Commit 4511cde0 by Vik Paruchuri

reverted capa module after accidental changes

parent 7b4b9a7e
......@@ -44,10 +44,10 @@ def only_one(lst, default="", process=lambda x: x):
def parse_timedelta(time_str):
"""
time_str: A string with the following components:
<D> day[s] (optional)
<H> hour[s] (optional)
<M> minute[s] (optional)
<S> second[s] (optional)
<D> day[s] (optional)
<H> hour[s] (optional)
<M> minute[s] (optional)
<S> second[s] (optional)
Returns a datetime.timedelta parsed from the string
"""
......@@ -79,7 +79,7 @@ class CapaModule(XModule):
js = {'coffee': [resource_string(__name__, 'js/src/capa/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
],
'js': [resource_string(__name__, 'js/src/capa/imageinput.js'),
resource_string(__name__, 'js/src/capa/schematic.js')]}
......@@ -89,7 +89,7 @@ class CapaModule(XModule):
def __init__(self, system, location, definition, descriptor, instance_state=None,
shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor, instance_state,
shared_state, **kwargs)
shared_state, **kwargs)
self.attempts = 0
self.max_attempts = None
......@@ -100,7 +100,7 @@ class CapaModule(XModule):
if display_due_date_string is not None:
self.display_due_date = dateutil.parser.parse(display_due_date_string)
#log.debug("Parsed " + display_due_date_string +
# " to " + str(self.display_due_date))
# " to " + str(self.display_due_date))
else:
self.display_due_date = None
......@@ -109,7 +109,7 @@ class CapaModule(XModule):
self.grace_period = parse_timedelta(grace_period_string)
self.close_date = self.display_due_date + self.grace_period
#log.debug("Then parsed " + grace_period_string +
# " to closing date" + str(self.close_date))
# " to closing date" + str(self.close_date))
else:
self.grace_period = None
self.close_date = self.display_due_date
......@@ -137,9 +137,9 @@ class CapaModule(XModule):
elif self.rerandomize == "per_student" and hasattr(self.system, 'id'):
# TODO: This line is badly broken:
# (1) We're passing student ID to xmodule.
# (2) There aren't bins of students. -- we only want 10 or 20 randomizations, and want to assign students
# to these bins, and may not want cohorts. So e.g. hash(your-id, problem_id) % num_bins.
# - analytics really needs small number of bins.
# (2) There aren't bins of students. -- we only want 10 or 20 randomizations, and want to assign students
# to these bins, and may not want cohorts. So e.g. hash(your-id, problem_id) % num_bins.
# - analytics really needs small number of bins.
self.seed = system.id
else:
self.seed = None
......@@ -148,7 +148,7 @@ class CapaModule(XModule):
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = LoncapaProblem(self.definition['data'], self.location.html_id(),
instance_state, seed=self.seed, system=self.system)
instance_state, seed=self.seed, system=self.system)
except Exception as err:
msg = 'cannot create LoncapaProblem {loc}: {err}'.format(
loc=self.location.url(), err=err)
......@@ -220,10 +220,10 @@ class CapaModule(XModule):
'element_id': self.location.html_id(),
'id': self.id,
'ajax_url': self.system.ajax_url,
})
})
def get_problem_html(self, encapsulate=True):
'''Return html for the problem. Adds check, reset, save buttons
'''Return html for the problem. Adds check, reset, save buttons
as necessary based on the problem config and state.'''
try:
......@@ -242,15 +242,15 @@ class CapaModule(XModule):
html = msg
else:
# We're in non-debug mode, and possibly even in production. We want
# to avoid bricking of problem as much as possible
# to avoid bricking of problem as much as possible
# Presumably, student submission has corrupted LoncapaProblem HTML.
# First, pull down all student answers
# First, pull down all student answers
student_answers = self.lcp.student_answers
answer_ids = student_answers.keys()
# Some inputtypes, such as dynamath, have additional "hidden" state that
# is not exposed to the student. Keep those hidden
# is not exposed to the student. Keep those hidden
# TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id
hidden_state_keywords = ['dynamath']
for answer_id in answer_ids:
......@@ -258,17 +258,17 @@ class CapaModule(XModule):
if answer_id.find(hidden_state_keyword) >= 0:
student_answers.pop(answer_id)
# Next, generate a fresh LoncapaProblem
# Next, generate a fresh LoncapaProblem
self.lcp = LoncapaProblem(self.definition['data'], self.location.html_id(),
state=None, # Tabula rasa
seed=self.seed, system=self.system)
state=None, # Tabula rasa
seed=self.seed, system=self.system)
# Prepend a scary warning to the student
warning = '<div class="capa_reset">'\
'<h2>Warning: The problem has been reset to its initial state!</h2>'\
'The problem\'s state was corrupted by an invalid submission. '\
'The submission consisted of:'\
'<ul>'
warning = '<div class="capa_reset">'\
'<h2>Warning: The problem has been reset to its initial state!</h2>'\
'The problem\'s state was corrupted by an invalid submission. ' \
'The submission consisted of:'\
'<ul>'
for student_answer in student_answers.values():
if student_answer != '':
warning += '<li>' + cgi.escape(student_answer) + '</li>'
......@@ -292,11 +292,11 @@ class CapaModule(XModule):
# check button is context-specific.
# Put a "Check" button if unlimited attempts or still some left
if self.max_attempts is None or self.attempts < self.max_attempts-1:
if self.max_attempts is None or self.attempts < self.max_attempts-1:
check_button = "Check"
else:
# Will be final check so let user know that
check_button = "Final Check"
check_button = "Final Check"
reset_button = True
save_button = True
......@@ -363,8 +363,8 @@ class CapaModule(XModule):
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
'''
handlers = {
'problem_get': self.get_problem,
......@@ -426,10 +426,10 @@ class CapaModule(XModule):
def update_score(self, get):
"""
Delivers grading response (e.g. from asynchronous code checking) to
the capa problem, so its score can be updated
the capa problem, so its score can be updated
'get' must have a field 'response' which is a string that contains the
grader's response
grader's response
No ajax return is needed. Return empty dict.
"""
......@@ -437,7 +437,7 @@ class CapaModule(XModule):
score_msg = get['xqueue_body']
self.lcp.update_score(score_msg, queuekey)
return dict() # No AJAX return is needed
return dict() # No AJAX return is needed
def get_answer(self, get):
'''
......@@ -453,8 +453,8 @@ class CapaModule(XModule):
else:
answers = self.lcp.get_question_answers()
# answers (eg <solution>) may have embedded images
# but be careful, some problems are using non-string answer dicts
# answers (eg <solution>) may have embedded images
# but be careful, some problems are using non-string answer dicts
new_answers = dict()
for answer_id in answers:
try:
......@@ -471,8 +471,8 @@ class CapaModule(XModule):
''' Return results of get_problem_html, as a simple dict for json-ing.
{ 'html': <the-html> }
Used if we want to reconfirm we have the right thing e.g. after
several AJAX calls.
Used if we want to reconfirm we have the right thing e.g. after
several AJAX calls.
'''
return {'html': self.get_problem_html(encapsulate=False)}
......@@ -500,11 +500,11 @@ class CapaModule(XModule):
def check_problem(self, get):
''' Checks whether answers to a problem are correct, and
returns a map of correct/incorrect answers:
returns a map of correct/incorrect answers:
{'success' : bool,
'contents' : html}
'''
{'success' : bool,
'contents' : html}
'''
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url()
......@@ -527,11 +527,11 @@ class CapaModule(XModule):
# Problem queued. Students must wait a specified waittime before they are allowed to submit
if self.lcp.is_queued():
current_time = datetime.datetime.now()
prev_submit_time = self.lcp.get_recentmost_queuetime()
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.system.xqueue['waittime']
if (current_time-prev_submit_time).total_seconds() < waittime_between_requests:
msg = 'You must wait at least %d seconds between submissions' % waittime_between_requests
return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback
return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback
try:
old_state = self.lcp.get_state()
......@@ -540,13 +540,13 @@ class CapaModule(XModule):
except StudentInputError as inst:
# TODO (vshnayder): why is this line here?
#self.lcp = LoncapaProblem(self.definition['data'],
# id=lcp_id, state=old_state, system=self.system)
# id=lcp_id, state=old_state, system=self.system)
log.exception("StudentInputError in capa_module:problem_check")
return {'success': inst.message}
except Exception, err:
# TODO: why is this line here?
#self.lcp = LoncapaProblem(self.definition['data'],
# id=lcp_id, state=old_state, system=self.system)
# id=lcp_id, state=old_state, system=self.system)
if self.system.DEBUG:
msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc()
......@@ -564,14 +564,14 @@ class CapaModule(XModule):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
event_info['attempts'] = self.attempts
self.system.track_function('save_problem_check', event_info)
if hasattr(self.system,'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_instance_state())
if hasattr(self.system,'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_instance_state())
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
......@@ -610,15 +610,15 @@ class CapaModule(XModule):
self.lcp.student_answers = answers
# TODO: should this be save_problem_fail? Looks like success to me...
# TODO: should this be save_problem_fail? Looks like success to me...
self.system.track_function('save_problem_fail', event_info)
return {'success': True}
def reset_problem(self, get):
''' Changes problem state to unfinished -- removes student answers,
and causes problem to rerender itself.
and causes problem to rerender itself.
Returns problem html as { 'html' : html-string }.
Returns problem html as { 'html' : html-string }.
'''
event_info = dict()
event_info['old_state'] = self.lcp.get_state()
......@@ -643,8 +643,8 @@ class CapaModule(XModule):
self.lcp.seed = None
self.lcp = LoncapaProblem(self.definition['data'],
self.location.html_id(), self.lcp.get_state(),
system=self.system)
self.location.html_id(), self.lcp.get_state(),
system=self.system)
event_info['new_state'] = self.lcp.get_state()
self.system.track_function('reset_problem', event_info)
......@@ -665,7 +665,7 @@ class CapaDescriptor(RawDescriptor):
template_dir_name = 'problem'
# Capa modules have some additional metadata:
# TODO (vshnayder): do problems have any other metadata? Do they
# TODO (vshnayder): do problems have any other metadata? Do they
# actually use type and points?
metadata_attributes = RawDescriptor.metadata_attributes + ('type', 'points')
......@@ -677,13 +677,13 @@ class CapaDescriptor(RawDescriptor):
return [
'problems/' + path[8:],
path[8:],
]
]
def __init__(self, *args, **kwargs):
super(CapaDescriptor, self).__init__(*args, **kwargs)
weight_string = self.metadata.get('weight', None)
if weight_string:
self.weight = float(weight_string)
else:
self.weight = None
\ No newline at end of file
self.weight = None
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment