Commit 528d1b03 by Jeff Ericson Committed by Giulio Gratta

Jeffs 3x merged into one

Conflicts:
	CHANGELOG.rst
parent 1e5d7bd3
......@@ -15,8 +15,24 @@ client error are correctly passed through to the client.
LMS: Improve performance of page load and thread list load for
discussion tab
LMS: The wiki markup cheatsheet dialog is now accessible to people with
disabilites. (LMS-1303)
Studio: Support targeted feedback, which allows for authors to provide explanations for
incorrect choice selections for multiple choice question choices that will automatically
display. These are intended to help steer a student to the correct answer. Thus, they are
best used for quizzes that allow multiple attempts. To provide targeted feedback, add an
element called <targetedfeedbackset> right before your <solution> or <solutionset>, and in
this element, provide a <targetedfeedback> for each feedback. Within <targetedfeedback>
you can specify your text explanation. Both the <targetedfeedback> and <choice> should have
the same explanation-id attribute.
Studio: Added feature to allow instructors to specify wait time between attempts
of the same quiz. In a problem's settings, instructors can specify how many
seconds student's are locked out of submitting another attempt of the same quiz.
The timer starts as soon as they submit an attempt for grading. Note that this
does not prevent a student from starting to work on another quiz attempt. It only
prevents the students from submitting a bunch of attempts in rapid succession.
LMS: The wiki markup cheatsheet dialog is now accessible to screen readers.
(LMS-1303)
Common: Add skip links for accessibility to CMS and LMS. (LMS-1311)
......@@ -24,6 +40,20 @@ Studio: Change course overview page, checklists, assets, and course staff
management page URLs to a RESTful interface. Also removed "\listing", which
duplicated "\index".
Studio: Support answer pools for multiple choice question choices, so authors can provide
multiple incorrect and correct choices for a question and have 1 correct choice and n-1
incorrect choices randomly selected and shuffled before being presented to the student.
In XML: <multiplechoiceresponse answer-pool="4"> enables an answer pool of 4 choices: 3
correct choices and 1 incorrect choice. To provide multiple solution expanations, wrap
all solution elements within a <solutionset>, and make sure to add an attribute called
"explanation-id" to both the <solution> tag and its corresponding <choice> tag, and be
sure that the value for this "explanation-id" attribute is the same for both. Note that
this feature is only supported in the advanced XML problem editor, not the regular one.
Also note that if you want your question to have a different set of answers for different
attempts, be sure in the problem settings in Studio to set "Randomization" to "Always"
LMS: Fixed accessibility bug where users could not tab through wiki (LMS-1307)
Blades: When start time and end time are specified for a video, a visual range
will be shown on the time slider to highlight the place in the video that will
be played.
......
......@@ -13,7 +13,7 @@ MAXIMUM_ATTEMPTS = "Maximum Attempts"
PROBLEM_WEIGHT = "Problem Weight"
RANDOMIZATION = 'Randomization'
SHOW_ANSWER = "Show Answer"
TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts"
@step('I have created a Blank Common Problem$')
def i_created_blank_common_problem(step):
......@@ -39,6 +39,7 @@ def i_see_advanced_settings_with_values(step):
[PROBLEM_WEIGHT, "", False],
[RANDOMIZATION, "Never", False],
[SHOW_ANSWER, "Finished", False],
[TIMER_BETWEEN_ATTEMPTS, "0", False]
])
......
......@@ -98,3 +98,41 @@ class SolutionRenderer(object):
return etree.XML(html)
registry.register(SolutionRenderer)
#-----------------------------------------------------------------------------
class TargetedFeedbackRenderer(object):
'''
A targeted feedback is just a <span>...</span> that is used for displaying an
extended piece of feedback to students if they incorrectly answered a question.
'''
tags = ['targetedfeedback']
def __init__(self, system, xml):
self.system = system
self.xml = xml
def get_html(self):
"""
Return the contents of this tag, rendered to html, as an etree element.
"""
html = '<section class="targeted-feedback-span"><span>%s</span></section>' % (
etree.tostring(self.xml))
try:
xhtml = etree.XML(html)
except Exception as err:
if self.system.DEBUG:
msg = '<html><div class="inline-error"><p>Error %s</p>' % (
str(err).replace('<', '&lt;'))
msg += ('<p>Failed to construct targeted feedback from <pre>%s</pre></p>' %
html.replace('<', '&lt;'))
msg += "</div></html>"
log.error(msg)
return etree.XML(msg)
else:
raise
return xhtml
registry.register(TargetedFeedbackRenderer)
......@@ -140,6 +140,14 @@ class CapaFields(object):
student_answers = Dict(help="Dictionary with the current student responses", scope=Scope.user_state)
done = Boolean(help="Whether the student has answered the problem", scope=Scope.user_state)
seed = Integer(help="Random seed for this student", scope=Scope.user_state)
last_submission_time = Date(help="Last submission time", scope=Scope.user_state)
submission_wait_seconds = Integer(
display_name="Timer Between Attempts",
help="Seconds a student must wait between submissions for a problem with multiple attempts.",
scope=Scope.settings,
default=0)
weight = Float(
display_name="Problem Weight",
help=("Defines the number of points each problem is worth. "
......@@ -297,6 +305,12 @@ class CapaModule(CapaFields, XModule):
self.student_answers = lcp_state['student_answers']
self.seed = lcp_state['seed']
def set_last_submission_time(self):
"""
Set the module's last submission time (when the problem was checked)
"""
self.last_submission_time = datetime.datetime.now(UTC())
def get_score(self):
"""
Access the problem's score
......@@ -886,7 +900,7 @@ class CapaModule(CapaFields, XModule):
return {'grade': score['score'], 'max_grade': score['total']}
def check_problem(self, data):
def check_problem(self, data, override_time=False):
"""
Checks whether answers to a problem are correct
......@@ -901,6 +915,11 @@ class CapaModule(CapaFields, XModule):
answers = self.make_dict_of_responses(data)
event_info['answers'] = convert_files_to_filenames(answers)
# Can override current time
current_time = datetime.datetime.now(UTC())
if override_time is not False:
current_time = override_time
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
......@@ -915,23 +934,31 @@ class CapaModule(CapaFields, XModule):
# Problem queued. Students must wait a specified waittime before they are allowed to submit
if self.lcp.is_queued():
current_time = datetime.datetime.now(UTC())
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.system.xqueue['waittime']
if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
msg = u'You must wait at least {wait} seconds between submissions'.format(
wait=waittime_between_requests)
return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback
# Wait time between resets
if self.last_submission_time is not None and self.submission_wait_seconds != 0:
if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds:
seconds_left = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds())
msg = u'You must wait at least {w} between submissions. {s} remaining.'.format(
w=self.pretty_print_seconds(self.submission_wait_seconds), s=self.pretty_print_seconds(seconds_left))
return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("StudentInputError in capa_module:problem_check",
exc_info=True)
log.warning("StudentInputError in capa_module:problem_check", exc_info=True)
# If the user is a staff member, include
# the full exception, including traceback,
......@@ -976,9 +1003,31 @@ class CapaModule(CapaFields, XModule):
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
return {'success': success,
'contents': html,
}
return {'success': success, 'contents': html}
def pretty_print_seconds(self, num_seconds):
"""
Returns time formatted nicely.
"""
if(num_seconds < 60):
plural = "s" if num_seconds > 1 else ""
return "%i second%s" % (num_seconds, plural)
elif(num_seconds < 60 * 60):
min_display = int(num_seconds / 60)
sec_display = num_seconds % 60
plural = "s" if min_display > 1 else ""
if sec_display == 0:
return "%i minute%s" % (min_display, plural)
else:
return "%i min, %i sec" % (min_display, sec_display)
else:
hr_display = int(num_seconds / 3600)
min_display = int((num_seconds % 3600) / 60)
sec_display = num_seconds % 60
if sec_display == 0:
return "%i hr, %i min" % (hr_display, min_display)
else:
return "%i hr, %i min, %i sec" % (hr_display, min_display, sec_display)
def rescore_problem(self):
"""
......
......@@ -126,6 +126,23 @@ section.problem {
}
}
.targeted-feedback-span {
> span {
margin: $baseline 0;
display: block;
border: 1px solid #000;
padding: 9px 15px $baseline;
background: #fff;
position: relative;
box-shadow: inset 0 0 0 1px #eee;
border-radius: 3px;
&:empty {
display: none;
}
}
}
div {
p {
&.answer {
......@@ -628,6 +645,34 @@ section.problem {
}
}
.detailed-targeted-feedback {
> p:first-child {
color: red;
text-transform: uppercase;
font-weight: bold;
font-style: normal;
font-size: 0.9em;
}
p:last-child {
margin-bottom: 0;
}
}
.detailed-targeted-feedback-correct {
> p:first-child {
color: green;
text-transform: uppercase;
font-weight: bold;
font-style: normal;
font-size: 0.9em;
}
p:last-child {
margin-bottom: 0;
}
}
div.capa_alert {
margin-top: $baseline;
padding: 8px 12px;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment