Commit c940814c by Colin-Fredericks

OSPR-535 Partial Credit

Squashed commit of the following:

commit 0f7c2af5f7b8caed575dd253a45299293b2729d7
Author: Colin-Fredericks <colin.fredericks@gmail.com>
Date:   Tue Jun 30 12:04:43 2015 -0400

    Forgot icon

commit b48970392741130f774709c54eb6e5ab0089812c
Author: Colin-Fredericks <colin.fredericks@gmail.com>
Date:   Tue Jun 30 11:49:57 2015 -0400

    OSPR-535 Partial Credit

    Squashed commit of the following:

    commit 6dd34f58f994e32d0d54bf1d67bffd04e0f8ef08
    Author: Colin-Fredericks <cof945@dhcp-140-247-184-176.fas.harvard.edu>
    Date:   Tue Jun 30 11:44:01 2015 -0400

        Fixing accidental overwrite.

    commit 1ff8fc4b0e83b90356e8e8dce1022f49bfd162cf
    Author: Colin-Fredericks <cof945@dhcp-140-247-184-176.fas.harvard.edu>
    Date:   Tue Jun 30 11:18:36 2015 -0400

        OSPR-535 Partial Credit

        Revised after first pull discussion.

Fixing scss typos

Fixing check/x display problem

Empty set is not []

Shuffling empty answer code to grade properly.

I don't think I ever wrote this in the first place...

Adding tests for MC and Checkbox

including proper partial-credit marking and scoring

Numerical and OptionResponse tests

Also a few improvements to NumericalResponse problem type and
exception-raising.

CustomResponse tests and more numerical tests

Increasing coverage and fixing typos

Exception added for pylint false positive

Hopefully fixing coverage issue

Retabulating line continuation

Bok Choy test for partial credit

Copypasta fix

Adding tooltip for partial credit

Improving and expanding comments

Minor fixes
parent 4e9b7ea9
...@@ -10,7 +10,7 @@ class CorrectMap(object): ...@@ -10,7 +10,7 @@ class CorrectMap(object):
in a capa problem. The response evaluation result for each answer_id includes in a capa problem. The response evaluation result for each answer_id includes
(correctness, npoints, msg, hint, hintmode). (correctness, npoints, msg, hint, hintmode).
- correctness : either 'correct' or 'incorrect' - correctness : 'correct', 'incorrect', or 'partially-correct'
- npoints : None, or integer specifying number of points awarded for this answer_id - npoints : None, or integer specifying number of points awarded for this answer_id
- msg : string (may have HTML) giving extra message response - msg : string (may have HTML) giving extra message response
(displayed below textline or textbox) (displayed below textline or textbox)
...@@ -101,10 +101,23 @@ class CorrectMap(object): ...@@ -101,10 +101,23 @@ class CorrectMap(object):
self.set(k, **correct_map[k]) self.set(k, **correct_map[k])
def is_correct(self, answer_id): def is_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is correct OR partially correct.
"""
if answer_id in self.cmap: if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct'] return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct']
return None return None
def is_partially_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] == 'partially-correct'
return None
def is_queued(self, answer_id): def is_queued(self, answer_id):
return answer_id in self.cmap and self.cmap[answer_id]['queuestate'] is not None return answer_id in self.cmap and self.cmap[answer_id]['queuestate'] is not None
......
...@@ -85,6 +85,7 @@ class Status(object): ...@@ -85,6 +85,7 @@ class Status(object):
names = { names = {
'correct': _('correct'), 'correct': _('correct'),
'incorrect': _('incorrect'), 'incorrect': _('incorrect'),
'partially-correct': _('partially correct'),
'incomplete': _('incomplete'), 'incomplete': _('incomplete'),
'unanswered': _('unanswered'), 'unanswered': _('unanswered'),
'unsubmitted': _('unanswered'), 'unsubmitted': _('unanswered'),
...@@ -94,6 +95,7 @@ class Status(object): ...@@ -94,6 +95,7 @@ class Status(object):
# Translators: these are tooltips that indicate the state of an assessment question # Translators: these are tooltips that indicate the state of an assessment question
'correct': _('This is correct.'), 'correct': _('This is correct.'),
'incorrect': _('This is incorrect.'), 'incorrect': _('This is incorrect.'),
'partially-correct': _('This is partially correct.'),
'unanswered': _('This is unanswered.'), 'unanswered': _('This is unanswered.'),
'unsubmitted': _('This is unanswered.'), 'unsubmitted': _('This is unanswered.'),
'queued': _('This is being processed.'), 'queued': _('This is being processed.'),
...@@ -896,7 +898,7 @@ class MatlabInput(CodeInput): ...@@ -896,7 +898,7 @@ class MatlabInput(CodeInput):
Right now, we only want this button to show up when a problem has not been Right now, we only want this button to show up when a problem has not been
checked. checked.
""" """
if self.status in ['correct', 'incorrect']: if self.status in ['correct', 'incorrect', 'partially-correct']:
return False return False
else: else:
return True return True
......
...@@ -139,6 +139,8 @@ class LoncapaResponse(object): ...@@ -139,6 +139,8 @@ class LoncapaResponse(object):
tags = None tags = None
hint_tag = None hint_tag = None
has_partial_credit = False
credit_type = []
max_inputfields = None max_inputfields = None
allowed_inputfields = [] allowed_inputfields = []
...@@ -213,6 +215,18 @@ class LoncapaResponse(object): ...@@ -213,6 +215,18 @@ class LoncapaResponse(object):
self.default_answer_map[entry.get( self.default_answer_map[entry.get(
'id')] = contextualize_text(answer, self.context) 'id')] = contextualize_text(answer, self.context)
# Does this problem have partial credit?
# If so, what kind? Get it as a list of strings.
partial_credit = xml.xpath('.')[0].get('partial_credit', default=False)
if str(partial_credit).lower().strip() == 'false':
self.has_partial_credit = False
self.credit_type = []
else:
self.has_partial_credit = True
self.credit_type = partial_credit.split(',')
self.credit_type = [word.strip().lower() for word in self.credit_type]
if hasattr(self, 'setup_response'): if hasattr(self, 'setup_response'):
self.setup_response() self.setup_response()
...@@ -261,7 +275,6 @@ class LoncapaResponse(object): ...@@ -261,7 +275,6 @@ class LoncapaResponse(object):
new_cmap = self.get_score(student_answers) new_cmap = self.get_score(student_answers)
self.get_hints(convert_files_to_filenames( self.get_hints(convert_files_to_filenames(
student_answers), new_cmap, old_cmap) student_answers), new_cmap, old_cmap)
# log.debug('new_cmap = %s' % new_cmap)
return new_cmap return new_cmap
def make_hint_div(self, hint_node, correct, student_answer, question_tag, def make_hint_div(self, hint_node, correct, student_answer, question_tag,
...@@ -813,11 +826,23 @@ class ChoiceResponse(LoncapaResponse): ...@@ -813,11 +826,23 @@ class ChoiceResponse(LoncapaResponse):
def setup_response(self): def setup_response(self):
self.assign_choice_names() self.assign_choice_names()
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]', correct_xml = self.xml.xpath(
id=self.xml.get('id')) '//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id')
)
self.correct_choices = set([choice.get( self.correct_choices = set([
'name') for choice in correct_xml]) choice.get('name') for choice in correct_xml
])
incorrect_xml = self.xml.xpath(
'//*[@id=$id]//choice[@correct="false"]',
id=self.xml.get('id')
)
self.incorrect_choices = set([
choice.get('name') for choice in incorrect_xml
])
def assign_choice_names(self): def assign_choice_names(self):
""" """
...@@ -831,26 +856,154 @@ class ChoiceResponse(LoncapaResponse): ...@@ -831,26 +856,154 @@ class ChoiceResponse(LoncapaResponse):
if not choice.get('id'): if not choice.get('id'):
choice.set("id", chr(ord("A") + index)) choice.set("id", chr(ord("A") + index))
def get_score(self, student_answers): def grade_via_every_decision_counts(self, **kwargs):
"""
Calculates partial credit on the Every Decision Counts scheme.
For each correctly selected or correctly blank choice, score 1 point.
Divide by total number of choices.
Arguments:
all_choices, the full set of checkboxes
student_answer, what the student actually chose
student_non_answers, what the student didn't choose
Returns a CorrectMap.
"""
student_answer = student_answers.get(self.answer_id, []) all_choices = kwargs['all_choices']
student_answer = kwargs['student_answer']
student_non_answers = kwargs['student_non_answers']
if not isinstance(student_answer, list): edc_max_grade = len(all_choices)
student_answer = [student_answer] edc_current_grade = 0
no_empty_answer = student_answer != [] good_answers = sum([1 for answer in student_answer if answer in self.correct_choices])
student_answer = set(student_answer) good_non_answers = sum([1 for blank in student_non_answers if blank in self.incorrect_choices])
edc_current_grade = good_answers + good_non_answers
return_grade = round(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
if edc_current_grade == edc_max_grade:
return CorrectMap(self.answer_id, correctness='correct')
elif edc_current_grade > 0:
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, correctness='incorrect', npoints=0)
def grade_via_halves(self, **kwargs):
"""
Calculates partial credit on the Halves scheme.
If no errors, full credit.
If one error, half credit as long as there are 3+ choices
If two errors, 1/4 credit as long as there are 5+ choices
(If not enough choices, no credit.)
Arguments:
all_choices, the full set of checkboxes
student_answer, what the student actually chose
student_non_answers, what the student didn't choose
Returns a CorrectMap
"""
all_choices = kwargs['all_choices']
student_answer = kwargs['student_answer']
student_non_answers = kwargs['student_non_answers']
halves_error_count = 0
incorrect_answers = sum([1 for answer in student_answer if answer in self.incorrect_choices])
missed_answers = sum([1 for blank in student_non_answers if blank in self.correct_choices])
halves_error_count = incorrect_answers + missed_answers
if halves_error_count == 0:
return_grade = self.get_max_score()
return CorrectMap(self.answer_id, correctness='correct', npoints=return_grade)
elif halves_error_count == 1 and len(all_choices) > 2:
return_grade = round(self.get_max_score() / 2.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
elif halves_error_count == 2 and len(all_choices) > 4:
return_grade = round(self.get_max_score() / 4.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, 'incorrect')
def grade_without_partial_credit(self, **kwargs):
"""
Standard grading for checkbox problems.
100% credit if all choices are correct; 0% otherwise
Arguments: student_answer, which is the items the student actually chose
"""
student_answer = kwargs['student_answer']
required_selected = len(self.correct_choices - student_answer) == 0 required_selected = len(self.correct_choices - student_answer) == 0
no_extra_selected = len(student_answer - self.correct_choices) == 0 no_extra_selected = len(student_answer - self.correct_choices) == 0
correct = required_selected & no_extra_selected & no_empty_answer correct = required_selected & no_extra_selected
if correct: if correct:
return CorrectMap(self.answer_id, 'correct') return CorrectMap(self.answer_id, 'correct')
else: else:
return CorrectMap(self.answer_id, 'incorrect') return CorrectMap(self.answer_id, 'incorrect')
def get_score(self, student_answers):
# Setting up answer sets:
# all_choices: the full set of checkboxes
# student_answer: what the student actually chose (note no "s")
# student_non_answers: what they didn't choose
# self.correct_choices: boxes that should be checked
# self.incorrect_choices: boxes that should NOT be checked
all_choices = self.correct_choices.union(self.incorrect_choices)
student_answer = student_answers.get(self.answer_id, [])
if not isinstance(student_answer, list):
student_answer = [student_answer]
# When a student leaves all the boxes unmarked, edX throws an error.
# This line checks for blank answers so that we can throw "false".
# This is not ideal. "None apply" should be a valid choice.
# Sadly, this is not the place where we can fix that problem.
empty_answer = student_answer == []
if empty_answer:
return CorrectMap(self.answer_id, 'incorrect')
student_answer = set(student_answer)
student_non_answers = all_choices - student_answer
# No partial credit? Get grade right now.
if not self.has_partial_credit:
return self.grade_without_partial_credit(student_answer=student_answer)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="EDC" to count each choice for equal points (Every Decision Counts).
# Set partial_credit="halves" to take half credit off for each error.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'edc': self.grade_via_every_decision_counts,
'halves': self.grade_via_halves,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Checkbox problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
all_choices=all_choices,
student_answer=student_answer,
student_non_answers=student_non_answers
)
def get_answers(self): def get_answers(self):
return {self.answer_id: list(self.correct_choices)} return {self.answer_id: list(self.correct_choices)}
...@@ -996,6 +1149,14 @@ class MultipleChoiceResponse(LoncapaResponse): ...@@ -996,6 +1149,14 @@ class MultipleChoiceResponse(LoncapaResponse):
multi_device_support = True multi_device_support = True
def setup_response(self): def setup_response(self):
"""
Collects information from the XML for later use.
correct_choices is a list of the correct choices.
partial_choices is a list of the partially-correct choices.
partial_values is a list of the scores that go with those
choices, defaulting to 0.5 if no value is specified.
"""
# call secondary setup for MultipleChoice questions, to set name # call secondary setup for MultipleChoice questions, to set name
# attributes # attributes
self.mc_setup_response() self.mc_setup_response()
...@@ -1010,9 +1171,20 @@ class MultipleChoiceResponse(LoncapaResponse): ...@@ -1010,9 +1171,20 @@ class MultipleChoiceResponse(LoncapaResponse):
contextualize_text(choice.get('name'), self.context) contextualize_text(choice.get('name'), self.context)
for choice in cxml for choice in cxml
if contextualize_text(choice.get('correct'), self.context).upper() == "TRUE" if contextualize_text(choice.get('correct'), self.context).upper() == "TRUE"
] ]
if self.has_partial_credit:
self.partial_choices = [
contextualize_text(choice.get('name'), self.context)
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'
]
self.partial_values = [
float(choice.get('point_value', default='0.5')) # Default partial credit: 50%
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'
]
def get_extended_hints(self, student_answer_dict, new_cmap): def get_extended_hints(self, student_answer_dict, new_cmap):
""" """
Extract any hints in a <choicegroup> matching the student's answers Extract any hints in a <choicegroup> matching the student's answers
...@@ -1082,16 +1254,80 @@ class MultipleChoiceResponse(LoncapaResponse): ...@@ -1082,16 +1254,80 @@ class MultipleChoiceResponse(LoncapaResponse):
self.do_shuffle(self.xml, problem) self.do_shuffle(self.xml, problem)
self.do_answer_pool(self.xml, problem) self.do_answer_pool(self.xml, problem)
def get_score(self, student_answers): def grade_via_points(self, **kwargs):
""" """
grade student response. Calculates partial credit based on the Points scheme.
Answer choices marked "partial" are given partial credit.
Default is 50%; other amounts may be set in point_value attributes.
Arguments: student_answers
Returns: a CorrectMap
""" """
student_answers = kwargs['student_answers']
if (self.answer_id in student_answers if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices): and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, 'correct') return CorrectMap(self.answer_id, correctness='correct')
elif (
self.answer_id in student_answers
and student_answers[self.answer_id] in self.partial_choices
):
choice_index = self.partial_choices.index(student_answers[self.answer_id])
credit_amount = self.partial_values[choice_index]
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=credit_amount)
else: else:
return CorrectMap(self.answer_id, 'incorrect') return CorrectMap(self.answer_id, 'incorrect')
def grade_without_partial_credit(self, **kwargs):
"""
Standard grading for multiple-choice problems.
100% credit if choices are correct; 0% otherwise
Arguments: student_answers
Returns: a CorrectMap
"""
student_answers = kwargs['student_answers']
if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, correctness='correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_score(self, student_answers):
"""
grade student response.
"""
# No partial credit? Grade it right away.
if not self.has_partial_credit:
return self.grade_without_partial_credit(student_answers=student_answers)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="points" to set specific point values for specific choices.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'points': self.grade_via_points,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Multiple Choice problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
student_answers=student_answers
)
def get_answers(self): def get_answers(self):
return {self.answer_id: self.correct_choices} return {self.answer_id: self.correct_choices}
...@@ -1351,23 +1587,163 @@ class OptionResponse(LoncapaResponse): ...@@ -1351,23 +1587,163 @@ class OptionResponse(LoncapaResponse):
def setup_response(self): def setup_response(self):
self.answer_fields = self.inputfields self.answer_fields = self.inputfields
def get_score(self, student_answers): def grade_via_points(self, problem_map, student_answers):
"""
Grades dropdown problems with "points"-style partial credit.
Full credit for any fully correct answer.
Partial credit for any partially correct answer.
Amount is set by point_values attribute, defaults to 50%.
Returns a CorrectMap.
"""
answer_map = problem_map['correct']
cmap = CorrectMap() cmap = CorrectMap()
amap = self.get_answers()
for aid in amap: for aid in answer_map:
if aid in student_answers and student_answers[aid] == amap[aid]: # Set correct/incorrect first, check for partial credit later.
cmap.set(aid, 'correct') for word in answer_map[aid]:
else: if aid in student_answers and student_answers[aid] == word:
cmap.set(aid, 'incorrect') cmap.set(aid, 'correct')
break
else:
cmap.set(aid, 'incorrect')
# For partial credit:
partial_map = problem_map['partial']
points_map = problem_map['point_values']
if not cmap.is_correct(aid) and partial_map[aid] is not None:
for index, word in enumerate(partial_map[aid]):
# Set the correctness and point value
# for each answer id independently.
if aid in student_answers and student_answers[aid] == word:
cmap.set(aid, 'partially-correct')
cmap.set_property(aid, 'npoints', points_map[aid][index])
break
else:
cmap.set(aid, 'incorrect')
answer_variable = self.get_student_answer_variable_name(student_answers, aid) answer_variable = self.get_student_answer_variable_name(student_answers, aid)
if answer_variable: if answer_variable:
cmap.set_property(aid, 'answervariable', answer_variable) cmap.set_property(aid, 'answervariable', answer_variable)
return cmap return cmap
def grade_without_partial_credit(self, problem_map, student_answers):
"""
Grades dropdown problems without partial credit.
Full credit for any correct answer, no credit otherwise.
Returns a CorrectMap.
"""
answer_map = problem_map['correct']
cmap = CorrectMap()
for aid in answer_map:
for word in answer_map[aid]:
if aid in student_answers and student_answers[aid] == word:
cmap.set(aid, 'correct')
break
else:
cmap.set(aid, 'incorrect')
answer_variable = self.get_student_answer_variable_name(student_answers, aid)
if answer_variable:
cmap.set_property(aid, 'answervariable', answer_variable)
return cmap
def get_score(self, student_answers):
problem_map = self.get_problem_attributes()
# If no partial credit, grade it right now.
if not self.has_partial_credit:
return self.grade_without_partial_credit(problem_map, student_answers)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="points" to allow credit for listed alternative answers.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'points': self.grade_via_points,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Dropdown problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
problem_map=problem_map,
student_answers=student_answers
)
def get_problem_attributes(self):
"""
This returns a dict built of of three smaller dictionaries.
Keys are:
"correct":
A dictionary with problem ids as keys.
Entries are lists of the correct answers for that id.
"partial":
A dictionary with problem ids as keys.
Entries are lists of the partially-correct answers for that id.
"point_values":
Matches the "partial" one, but gives point values instead.
Defaults to 50% credit.
"""
default_credit = 0.5
problem_map = dict()
for target in ['correct', 'partial', 'point_values']:
small_map = dict([
(af.get('id'), contextualize_text(
af.get(target, default=None),
self.context
))
for af in self.answer_fields
])
for answer_id in small_map:
if small_map[answer_id] is not None:
# Split on commas and strip whitespace
# to allow for multiple options.
small_map[answer_id] = small_map[answer_id].split(',')
for index, word in enumerate(small_map[answer_id]):
# Pick out whether we're getting numbers or strings.
if target in ['point_values']:
small_map[answer_id][index] = float(word.strip())
else:
small_map[answer_id][index] = str(word.strip())
# If we find nothing and we're looking for points, return the default.
elif target == 'point_values':
if problem_map['partial'][answer_id] is not None:
num_partial = len(problem_map['partial'][answer_id])
small_map[answer_id] = [default_credit] * num_partial
else:
small_map[answer_id] = []
# Add a copy of the in-loop map to the big map.
problem_map[target] = dict(small_map)
return problem_map
def get_answers(self): def get_answers(self):
amap = dict([(af.get('id'), contextualize_text(af.get( """
'correct'), self.context)) for af in self.answer_fields]) Returns a dictionary with problem ids as keys.
return amap Each entry is a list of the correct answers for that id.
"""
return self.get_problem_attributes()['correct']
def get_student_answer_variable_name(self, student_answers, aid): def get_student_answer_variable_name(self, student_answers, aid):
""" """
...@@ -1491,6 +1867,14 @@ class NumericalResponse(LoncapaResponse): ...@@ -1491,6 +1867,14 @@ class NumericalResponse(LoncapaResponse):
if self.answer_id not in student_answers: if self.answer_id not in student_answers:
return CorrectMap(self.answer_id, 'incorrect') return CorrectMap(self.answer_id, 'incorrect')
# Make sure we're using an approved partial credit style.
# Currently implemented: 'close' and 'list'
if self.has_partial_credit:
graders = ['list', 'close']
for style in self.credit_type:
if style not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
student_answer = student_answers[self.answer_id] student_answer = student_answers[self.answer_id]
_ = self.capa_system.i18n.ugettext _ = self.capa_system.i18n.ugettext
...@@ -1525,6 +1909,30 @@ class NumericalResponse(LoncapaResponse): ...@@ -1525,6 +1909,30 @@ class NumericalResponse(LoncapaResponse):
except Exception: except Exception:
raise general_exception raise general_exception
# End `evaluator` block -- we figured out the student's answer! # End `evaluator` block -- we figured out the student's answer!
tree = self.xml
# What multiple of the tolerance is worth partial credit?
has_partial_range = tree.xpath('responseparam[@partial_range]')
if has_partial_range:
partial_range = float(has_partial_range[0].get('partial_range', default='2'))
else:
partial_range = 2
# Take in alternative answers that are worth partial credit.
has_partial_answers = tree.xpath('responseparam[@partial_answers]')
if has_partial_answers:
partial_answers = has_partial_answers[0].get('partial_answers').split(',')
for index, word in enumerate(partial_answers):
partial_answers[index] = word.strip()
partial_answers[index] = self.get_staff_ans(partial_answers[index])
else:
partial_answers = False
partial_score = 0.5
is_correct = 'incorrect'
if self.range_tolerance: if self.range_tolerance:
if isinstance(student_float, complex): if isinstance(student_float, complex):
raise StudentInputError(_(u"You may not use complex numbers in range tolerance problems")) raise StudentInputError(_(u"You may not use complex numbers in range tolerance problems"))
...@@ -1546,19 +1954,71 @@ class NumericalResponse(LoncapaResponse): ...@@ -1546,19 +1954,71 @@ class NumericalResponse(LoncapaResponse):
tolerance=float_info.epsilon, tolerance=float_info.epsilon,
relative_tolerance=True relative_tolerance=True
): ):
correct = inclusion is_correct = 'correct' if inclusion else 'incorrect'
break break
else: else:
correct = boundaries[0] < student_float < boundaries[1] if boundaries[0] < student_float < boundaries[1]:
is_correct = 'correct'
else:
if self.has_partial_credit is False:
pass
elif 'close' in self.credit_type:
# Partial credit: 50% if the student is outside the specified boundaries,
# but within an extended set of boundaries.
extended_boundaries = []
boundary_range = boundaries[1] - boundaries[0]
extended_boundaries.append(boundaries[0] - partial_range * boundary_range)
extended_boundaries.append(boundaries[1] + partial_range * boundary_range)
if extended_boundaries[0] < student_float < extended_boundaries[1]:
is_correct = 'partially-correct'
else: else:
correct_float = self.get_staff_ans(self.correct_answer) correct_float = self.get_staff_ans(self.correct_answer)
correct = compare_with_tolerance(
student_float, correct_float, self.tolerance # Partial credit is available in three cases:
) # If the student answer is within expanded tolerance of the actual answer,
if correct: # the student gets 50% credit. (Currently set as the default.)
return CorrectMap(self.answer_id, 'correct') # Set via partial_credit="close" in the numericalresponse tag.
#
# If the student answer is within regular tolerance of an alternative answer,
# the student gets 50% credit. (Same default.)
# Set via partial_credit="list"
#
# If the student answer is within expanded tolerance of an alternative answer,
# the student gets 25%. (We take the 50% and square it, at the moment.)
# Set via partial_credit="list,close" or "close, list" or the like.
if str(self.tolerance).endswith('%'):
expanded_tolerance = str(partial_range * float(str(self.tolerance)[:-1])) + '%'
else:
expanded_tolerance = partial_range * float(self.tolerance)
if compare_with_tolerance(student_float, correct_float, self.tolerance):
is_correct = 'correct'
elif self.has_partial_credit is False:
pass
elif 'list' in self.credit_type:
for value in partial_answers:
if compare_with_tolerance(student_float, value, self.tolerance):
is_correct = 'partially-correct'
break
elif 'close' in self.credit_type:
if compare_with_tolerance(student_float, correct_float, expanded_tolerance):
is_correct = 'partially-correct'
break
elif compare_with_tolerance(student_float, value, expanded_tolerance):
is_correct = 'partially-correct'
partial_score = partial_score * partial_score
break
elif 'close' in self.credit_type:
if compare_with_tolerance(student_float, correct_float, expanded_tolerance):
is_correct = 'partially-correct'
if is_correct == 'partially-correct':
return CorrectMap(self.answer_id, is_correct, npoints=partial_score)
else: else:
return CorrectMap(self.answer_id, 'incorrect') return CorrectMap(self.answer_id, is_correct)
def compare_answer(self, ans1, ans2): def compare_answer(self, ans1, ans2):
""" """
...@@ -1867,6 +2327,9 @@ class CustomResponse(LoncapaResponse): ...@@ -1867,6 +2327,9 @@ class CustomResponse(LoncapaResponse):
code = None code = None
expect = None expect = None
# Standard amount for partial credit if not otherwise specified:
default_pc = 0.5
def setup_response(self): def setup_response(self):
xml = self.xml xml = self.xml
...@@ -2043,7 +2506,12 @@ class CustomResponse(LoncapaResponse): ...@@ -2043,7 +2506,12 @@ class CustomResponse(LoncapaResponse):
if grade_decimals: if grade_decimals:
npoints = max_points * grade_decimals[k] npoints = max_points * grade_decimals[k]
else: else:
npoints = max_points if correct[k] == 'correct' else 0 if correct[k] == 'correct':
npoints = max_points
elif correct[k] == 'partially-correct':
npoints = max_points * self.default_pc
else:
npoints = 0
correct_map.set(idset[k], correct[k], msg=messages[k], correct_map.set(idset[k], correct[k], msg=messages[k],
npoints=npoints) npoints=npoints)
return correct_map return correct_map
...@@ -2084,13 +2552,30 @@ class CustomResponse(LoncapaResponse): ...@@ -2084,13 +2552,30 @@ class CustomResponse(LoncapaResponse):
) )
if isinstance(ret, dict): if isinstance(ret, dict):
# One kind of dictionary the check function can return has the # One kind of dictionary the check function can return has the
# form {'ok': BOOLEAN, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)} # form {'ok': BOOLEAN or STRING, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}
# 'ok' will control the checkmark, while grade_decimal, if present, will scale # 'ok' will control the checkmark, while grade_decimal, if present, will scale
# the score the student receives on the response. # the score the student receives on the response.
# If there are multiple inputs, they all get marked # If there are multiple inputs, they all get marked
# to the same correct/incorrect value # to the same correct/incorrect value
if 'ok' in ret: if 'ok' in ret:
correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
ok_val = str(ret['ok']).lower().strip() if bool(ret['ok']) else 'false'
if ok_val == 'false':
correct = 'incorrect'
elif 'partial' in ok_val:
correct = 'partially-correct'
else:
correct = 'correct'
correct = [correct] * len(idset) # All inputs share the same mark.
# old version, no partial credit:
# correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
msg = ret.get('msg', None) msg = ret.get('msg', None)
msg = self.clean_message_html(msg) msg = self.clean_message_html(msg)
...@@ -2102,9 +2587,14 @@ class CustomResponse(LoncapaResponse): ...@@ -2102,9 +2587,14 @@ class CustomResponse(LoncapaResponse):
self.context['messages'][0] = msg self.context['messages'][0] = msg
if 'grade_decimal' in ret: if 'grade_decimal' in ret:
decimal = ret['grade_decimal'] decimal = float(ret['grade_decimal'])
else: else:
decimal = 1.0 if ret['ok'] else 0.0 if correct[0] == 'correct':
decimal = 1.0
elif correct[0] == 'partially-correct':
decimal = self.default_pc
else:
decimal = 0.0
grade_decimals = [decimal] * len(idset) grade_decimals = [decimal] * len(idset)
self.context['grade_decimals'] = grade_decimals self.context['grade_decimals'] = grade_decimals
...@@ -2112,7 +2602,11 @@ class CustomResponse(LoncapaResponse): ...@@ -2112,7 +2602,11 @@ class CustomResponse(LoncapaResponse):
# the form: # the form:
# { 'overall_message': STRING, # { 'overall_message': STRING,
# 'input_list': [ # 'input_list': [
# { 'ok': BOOLEAN, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}, # {
# 'ok': BOOLEAN or STRING,
# 'msg': STRING,
# 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)
# },
# ... # ...
# ] # ]
# } # }
...@@ -2129,16 +2623,35 @@ class CustomResponse(LoncapaResponse): ...@@ -2129,16 +2623,35 @@ class CustomResponse(LoncapaResponse):
correct = [] correct = []
messages = [] messages = []
grade_decimals = [] grade_decimals = []
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
for input_dict in input_list: for input_dict in input_list:
correct.append('correct' if str(input_dict['ok']).lower().strip() == "false" or not input_dict['ok']:
if input_dict['ok'] else 'incorrect') correct.append('incorrect')
elif 'partial' in str(input_dict['ok']).lower().strip():
correct.append('partially-correct')
else:
correct.append('correct')
# old version, no partial credit
# correct.append('correct'
# if input_dict['ok'] else 'incorrect')
msg = (self.clean_message_html(input_dict['msg']) msg = (self.clean_message_html(input_dict['msg'])
if 'msg' in input_dict else None) if 'msg' in input_dict else None)
messages.append(msg) messages.append(msg)
if 'grade_decimal' in input_dict: if 'grade_decimal' in input_dict:
decimal = input_dict['grade_decimal'] decimal = input_dict['grade_decimal']
else: else:
decimal = 1.0 if input_dict['ok'] else 0.0 if str(input_dict['ok']).lower().strip() == 'true':
decimal = 1.0
elif 'partial' in str(input_dict['ok']).lower().strip():
decimal = self.default_pc
else:
decimal = 0.0
grade_decimals.append(decimal) grade_decimals.append(decimal)
self.context['messages'] = messages self.context['messages'] = messages
...@@ -2155,7 +2668,21 @@ class CustomResponse(LoncapaResponse): ...@@ -2155,7 +2668,21 @@ class CustomResponse(LoncapaResponse):
) )
else: else:
correct = ['correct' if ret else 'incorrect'] * len(idset)
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
if str(ret).lower().strip() == "false" or not bool(ret):
correct = 'incorrect'
elif 'partial' in str(ret).lower().strip():
correct = 'partially-correct'
else:
correct = 'correct'
correct = [correct] * len(idset)
# old version, no partial credit:
# correct = ['correct' if ret else 'incorrect'] * len(idset)
self.context['correct'] = correct self.context['correct'] = correct
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
<div id="input_${id}_preview" class="equation"></div> <div id="input_${id}_preview" class="equation"></div>
<p id="answer_${id}" class="answer"></p> <p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</div> </div>
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
<% <%
if status == 'correct': if status == 'correct':
correctness = 'correct' correctness = 'correct'
elif status == 'partially-correct':
correctness = 'partially-correct'
elif status == 'incorrect': elif status == 'incorrect':
correctness = 'incorrect' correctness = 'incorrect'
else: else:
...@@ -31,7 +33,7 @@ ...@@ -31,7 +33,7 @@
/> ${choice_description} /> ${choice_description}
% if input_type == 'radio' and ( (isinstance(value, basestring) and (choice_id == value)) or (not isinstance(value, basestring) and choice_id in value) ): % if input_type == 'radio' and ( (isinstance(value, basestring) and (choice_id == value)) or (not isinstance(value, basestring) and choice_id in value) ):
% if status in ('correct', 'incorrect') and not show_correctness=='never': % if status in ('correct', 'partially-correct', 'incorrect') and not show_correctness=='never':
<span class="sr status">${choice_description|h} - ${status.display_name}</span> <span class="sr status">${choice_description|h} - ${status.display_name}</span>
% endif % endif
% endif % endif
...@@ -60,4 +62,4 @@ ...@@ -60,4 +62,4 @@
% if msg: % if msg:
<span class="message">${msg|n}</span> <span class="message">${msg|n}</span>
% endif % endif
</form> </form>
\ No newline at end of file
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
correctness = 'correct' correctness = 'correct'
elif status == 'incorrect': elif status == 'incorrect':
correctness = 'incorrect' correctness = 'incorrect'
elif status == 'partially-correct':
correctness = 'partially-correct'
else: else:
correctness = None correctness = None
%> %>
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
<div class="script_placeholder" data-src="/static/js/sylvester.js"></div> <div class="script_placeholder" data-src="/static/js/sylvester.js"></div>
<div class="script_placeholder" data-src="/static/js/crystallography.js"></div> <div class="script_placeholder" data-src="/static/js/crystallography.js"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="status ${status.classname}" id="status_${id}"> <div class="status ${status.classname}" id="status_${id}">
% endif % endif
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
<span class="message">${msg|n}</span> <span class="message">${msg|n}</span>
% endif % endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</section> </section>
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
<div class="script_placeholder" data-src="/static/js/capa/protex/protex.nocache.js?raw"/> <div class="script_placeholder" data-src="/static/js/capa/protex/protex.nocache.js?raw"/>
<div class="script_placeholder" data-src="${applet_loader}"/> <div class="script_placeholder" data-src="${applet_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="${status.classname}" id="status_${id}"> <div class="${status.classname}" id="status_${id}">
% endif % endif
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
</p> </p>
<p id="answer_${id}" class="answer"></p> <p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</section> </section>
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
<div class="script_placeholder" data-src="${STATIC_URL}js/capa/drag_and_drop.js"></div> <div class="script_placeholder" data-src="${STATIC_URL}js/capa/drag_and_drop.js"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="${status.classname}" id="status_${id}"> <div class="${status.classname}" id="status_${id}">
% endif % endif
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
<span class="message">${msg|n}</span> <span class="message">${msg|n}</span>
% endif % endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</div> </div>
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
<div class="script_placeholder" data-src="/static/js/capa/genex/genex.nocache.js?raw"/> <div class="script_placeholder" data-src="/static/js/capa/genex/genex.nocache.js?raw"/>
<div class="script_placeholder" data-src="${applet_loader}"/> <div class="script_placeholder" data-src="${applet_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="${status.classname}" id="status_${id}"> <div class="${status.classname}" id="status_${id}">
% endif % endif
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
</p> </p>
<p id="answer_${id}" class="answer"></p> <p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</section> </section>
......
<section id="editamoleculeinput_${id}" class="editamoleculeinput"> <section id="editamoleculeinput_${id}" class="editamoleculeinput">
<div class="script_placeholder" data-src="${applet_loader}"/> <div class="script_placeholder" data-src="${applet_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="${status.classname}" id="status_${id}"> <div class="${status.classname}" id="status_${id}">
% endif % endif
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
<div class="error_message" style="padding: 5px 5px 5px 5px; background-color:#FA6666; height:60px;width:400px; display: none"></div> <div class="error_message" style="padding: 5px 5px 5px 5px; background-color:#FA6666; height:60px;width:400px; display: none"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</section> </section>
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
<div class="script_placeholder" data-src="${jschannel_loader}"/> <div class="script_placeholder" data-src="${jschannel_loader}"/>
<div class="script_placeholder" data-src="${jsinput_loader}"/> <div class="script_placeholder" data-src="${jsinput_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="${status.classname}" id="status_${id}"> <div class="${status.classname}" id="status_${id}">
% endif % endif
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
<div class="error_message" style="padding: 5px 5px 5px 5px; background-color:#FA6666; height:60px;width:400px; display: none"></div> <div class="error_message" style="padding: 5px 5px 5px 5px; background-color:#FA6666; height:60px;width:400px; display: none"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
<div class="script_placeholder" data-src="${preprocessor['script_src']}"/> <div class="script_placeholder" data-src="${preprocessor['script_src']}"/>
% endif % endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'incomplete'): % if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
<div class="${status.classname} ${doinline}" id="status_${id}"> <div class="${status.classname} ${doinline}" id="status_${id}">
% endif % endif
% if hidden: % if hidden:
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
% endif % endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'incomplete'): % if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
</div> </div>
% endif % endif
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
<div class="script_placeholder" data-src="/static/js/vsepr/vsepr.js"></div> <div class="script_placeholder" data-src="/static/js/vsepr/vsepr.js"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
<div class="${status.classname}" id="status_${id}"> <div class="${status.classname}" id="status_${id}">
% endif % endif
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
% if msg: % if msg:
<span class="message">${msg|n}</span> <span class="message">${msg|n}</span>
% endif % endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div> </div>
% endif % endif
</section> </section>
...@@ -49,6 +49,9 @@ class ResponseXMLFactory(object): ...@@ -49,6 +49,9 @@ class ResponseXMLFactory(object):
*num_inputs*: The number of input elements *num_inputs*: The number of input elements
to create [DEFAULT: 1] to create [DEFAULT: 1]
*credit_type*: String of comma-separated words specifying the
partial credit grading scheme.
Returns a string representation of the XML tree. Returns a string representation of the XML tree.
""" """
...@@ -58,6 +61,7 @@ class ResponseXMLFactory(object): ...@@ -58,6 +61,7 @@ class ResponseXMLFactory(object):
script = kwargs.get('script', None) script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1) num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1) num_inputs = kwargs.get('num_inputs', 1)
credit_type = kwargs.get('credit_type', None)
# The root is <problem> # The root is <problem>
root = etree.Element("problem") root = etree.Element("problem")
...@@ -75,6 +79,11 @@ class ResponseXMLFactory(object): ...@@ -75,6 +79,11 @@ class ResponseXMLFactory(object):
# Add the response(s) # Add the response(s)
for __ in range(int(num_responses)): for __ in range(int(num_responses)):
response_element = self.create_response_element(**kwargs) response_element = self.create_response_element(**kwargs)
# Set partial credit
if credit_type is not None:
response_element.set('partial_credit', str(credit_type))
root.append(response_element) root.append(response_element)
# Add input elements # Add input elements
...@@ -132,6 +141,10 @@ class ResponseXMLFactory(object): ...@@ -132,6 +141,10 @@ class ResponseXMLFactory(object):
*choice_names": List of strings identifying the choices. *choice_names": List of strings identifying the choices.
If specified, you must ensure that If specified, you must ensure that
len(choice_names) == len(choices) len(choice_names) == len(choices)
*points*: List of strings giving partial credit values (0-1)
for each choice. Interpreted as floats in problem.
If specified, ensure len(points) == len(choices)
""" """
# Names of group elements # Names of group elements
group_element_names = { group_element_names = {
...@@ -144,15 +157,23 @@ class ResponseXMLFactory(object): ...@@ -144,15 +157,23 @@ class ResponseXMLFactory(object):
choices = kwargs.get('choices', [True]) choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple') choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices)) choice_names = kwargs.get('choice_names', [None] * len(choices))
points = kwargs.get('points', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element # Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert choice_type in group_element_names assert choice_type in group_element_names
group_element = etree.Element(group_element_names[choice_type]) group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements # Create the <choice> elements
for (correct_val, name) in zip(choices, choice_names): for (correct_val, name, pointval) in zip(choices, choice_names, points):
choice_element = etree.SubElement(group_element, "choice") choice_element = etree.SubElement(group_element, "choice")
choice_element.set("correct", "true" if correct_val else "false") if correct_val is True:
correctness = 'true'
elif correct_val is False:
correctness = 'false'
elif 'partial' in correct_val:
correctness = 'partial'
choice_element.set('correct', correctness)
# Add a name identifying the choice, if one exists # Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the # For simplicity, we use the same string as both the
...@@ -161,6 +182,10 @@ class ResponseXMLFactory(object): ...@@ -161,6 +182,10 @@ class ResponseXMLFactory(object):
choice_element.text = str(name) choice_element.text = str(name)
choice_element.set("name", str(name)) choice_element.set("name", str(name))
# Add point values for partially-correct choices.
if pointval:
choice_element.set("point_value", str(pointval))
return group_element return group_element
...@@ -176,10 +201,22 @@ class NumericalResponseXMLFactory(ResponseXMLFactory): ...@@ -176,10 +201,22 @@ class NumericalResponseXMLFactory(ResponseXMLFactory):
*tolerance*: The tolerance within which a response *tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01") is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%") or percentage (e.g. "2%")
*credit_type*: String of comma-separated words specifying the
partial credit grading scheme.
*partial_range*: The multiplier for the tolerance that will
still provide partial credit in the "close" grading style
*partial_answers*: A string of comma-separated alternate
answers that will receive partial credit in the "list" style
""" """
answer = kwargs.get('answer', None) answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None) tolerance = kwargs.get('tolerance', None)
credit_type = kwargs.get('credit_type', None)
partial_range = kwargs.get('partial_range', None)
partial_answers = kwargs.get('partial_answers', None)
response_element = etree.Element('numericalresponse') response_element = etree.Element('numericalresponse')
...@@ -193,6 +230,13 @@ class NumericalResponseXMLFactory(ResponseXMLFactory): ...@@ -193,6 +230,13 @@ class NumericalResponseXMLFactory(ResponseXMLFactory):
responseparam_element = etree.SubElement(response_element, 'responseparam') responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance') responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance)) responseparam_element.set('default', str(tolerance))
if partial_range is not None and 'close' in credit_type:
responseparam_element.set('partial_range', str(partial_range))
if partial_answers is not None and 'list' in credit_type:
# The line below throws a false positive pylint violation, so it's excepted.
responseparam_element = etree.SubElement(response_element, 'responseparam') # pylint: disable=E1101
responseparam_element.set('partial_answers', partial_answers)
return response_element return response_element
...@@ -629,15 +673,25 @@ class OptionResponseXMLFactory(ResponseXMLFactory): ...@@ -629,15 +673,25 @@ class OptionResponseXMLFactory(ResponseXMLFactory):
*options*: a list of possible options the user can choose from [REQUIRED] *options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options. You must specify at least 2 options.
*correct_option*: the correct choice from the list of options [REQUIRED] *correct_option*: a string with comma-separated correct choices [REQUIRED]
*partial_option*: a string with comma-separated partially-correct choices
*point_values*: a string with comma-separated values (0-1) that give the
partial credit values in the "points" grading scheme.
Must have one per partial option.
*credit_type*: String of comma-separated words specifying the
partial credit grading scheme.
""" """
options_list = kwargs.get('options', None) options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None) correct_option = kwargs.get('correct_option', None)
partial_option = kwargs.get('partial_option', None)
point_values = kwargs.get('point_values', None)
credit_type = kwargs.get('credit_type', None)
assert options_list and correct_option assert options_list and correct_option
assert len(options_list) > 1 assert len(options_list) > 1
assert correct_option in options_list for option in correct_option.split(','):
assert option.strip() in options_list
# Create the <optioninput> element # Create the <optioninput> element
optioninput_element = etree.Element("optioninput") optioninput_element = etree.Element("optioninput")
...@@ -651,6 +705,15 @@ class OptionResponseXMLFactory(ResponseXMLFactory): ...@@ -651,6 +705,15 @@ class OptionResponseXMLFactory(ResponseXMLFactory):
# Set the "correct" attribute # Set the "correct" attribute
optioninput_element.set('correct', str(correct_option)) optioninput_element.set('correct', str(correct_option))
# If we have 'points'-style partial credit...
if 'points' in str(credit_type):
# Set the "partial" attribute
optioninput_element.set('partial', str(partial_option))
# Set the "point_values" attribute, if it's specified.
if point_values is not None:
optioninput_element.set('point_values', str(point_values))
return optioninput_element return optioninput_element
......
...@@ -17,7 +17,7 @@ class CorrectMapTest(unittest.TestCase): ...@@ -17,7 +17,7 @@ class CorrectMapTest(unittest.TestCase):
self.cmap = CorrectMap() self.cmap = CorrectMap()
def test_set_input_properties(self): def test_set_input_properties(self):
# Set the correctmap properties for two inputs # Set the correctmap properties for three inputs
self.cmap.set( self.cmap.set(
answer_id='1_2_1', answer_id='1_2_1',
correctness='correct', correctness='correct',
...@@ -41,15 +41,34 @@ class CorrectMapTest(unittest.TestCase): ...@@ -41,15 +41,34 @@ class CorrectMapTest(unittest.TestCase):
queuestate=None queuestate=None
) )
self.cmap.set(
answer_id='3_2_1',
correctness='partially-correct',
npoints=3,
msg=None,
hint=None,
hintmode=None,
queuestate=None
)
# Assert that each input has the expected properties # Assert that each input has the expected properties
self.assertTrue(self.cmap.is_correct('1_2_1')) self.assertTrue(self.cmap.is_correct('1_2_1'))
self.assertFalse(self.cmap.is_correct('2_2_1')) self.assertFalse(self.cmap.is_correct('2_2_1'))
self.assertTrue(self.cmap.is_correct('3_2_1'))
self.assertTrue(self.cmap.is_partially_correct('3_2_1'))
self.assertFalse(self.cmap.is_partially_correct('2_2_1'))
# Intentionally testing an item that's not in cmap.
self.assertFalse(self.cmap.is_partially_correct('9_2_1'))
self.assertEqual(self.cmap.get_correctness('1_2_1'), 'correct') self.assertEqual(self.cmap.get_correctness('1_2_1'), 'correct')
self.assertEqual(self.cmap.get_correctness('2_2_1'), 'incorrect') self.assertEqual(self.cmap.get_correctness('2_2_1'), 'incorrect')
self.assertEqual(self.cmap.get_correctness('3_2_1'), 'partially-correct')
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5) self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 0) self.assertEqual(self.cmap.get_npoints('2_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 3)
self.assertEqual(self.cmap.get_msg('1_2_1'), 'Test message') self.assertEqual(self.cmap.get_msg('1_2_1'), 'Test message')
self.assertEqual(self.cmap.get_msg('2_2_1'), None) self.assertEqual(self.cmap.get_msg('2_2_1'), None)
...@@ -83,6 +102,8 @@ class CorrectMapTest(unittest.TestCase): ...@@ -83,6 +102,8 @@ class CorrectMapTest(unittest.TestCase):
# 3) incorrect, 5 points # 3) incorrect, 5 points
# 4) incorrect, None points # 4) incorrect, None points
# 5) correct, 0 points # 5) correct, 0 points
# 4) partially correct, 2.5 points
# 5) partially correct, None points
self.cmap.set( self.cmap.set(
answer_id='1_2_1', answer_id='1_2_1',
correctness='correct', correctness='correct',
...@@ -113,15 +134,30 @@ class CorrectMapTest(unittest.TestCase): ...@@ -113,15 +134,30 @@ class CorrectMapTest(unittest.TestCase):
npoints=0 npoints=0
) )
self.cmap.set(
answer_id='6_2_1',
correctness='partially-correct',
npoints=2.5
)
self.cmap.set(
answer_id='7_2_1',
correctness='partially-correct',
npoints=None
)
# Assert that we get the expected points # Assert that we get the expected points
# If points assigned --> npoints # If points assigned --> npoints
# If no points assigned and correct --> 1 point # If no points assigned and correct --> 1 point
# If no points assigned and partially correct --> 1 point
# If no points assigned and incorrect --> 0 points # If no points assigned and incorrect --> 0 points
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5.3) self.assertEqual(self.cmap.get_npoints('1_2_1'), 5.3)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 1) self.assertEqual(self.cmap.get_npoints('2_2_1'), 1)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 5) self.assertEqual(self.cmap.get_npoints('3_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('4_2_1'), 0) self.assertEqual(self.cmap.get_npoints('4_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('5_2_1'), 0) self.assertEqual(self.cmap.get_npoints('5_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('6_2_1'), 2.5)
self.assertEqual(self.cmap.get_npoints('7_2_1'), 1)
def test_set_overall_message(self): def test_set_overall_message(self):
......
...@@ -82,6 +82,23 @@ class ResponseTest(unittest.TestCase): ...@@ -82,6 +82,23 @@ class ResponseTest(unittest.TestCase):
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect') self.assertEqual(result, 'incorrect')
def assert_multiple_partial(self, problem, correct_answers, incorrect_answers, partial_answers):
"""
Runs multiple asserts for varying correct, incorrect,
and partially correct answers, all passed as lists.
"""
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'correct')
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect')
for input_str in partial_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'partially-correct')
def _get_random_number_code(self): def _get_random_number_code(self):
"""Returns code to be used to generate a random result.""" """Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))" return "str(random.randint(0, 1e9))"
...@@ -103,6 +120,14 @@ class MultiChoiceResponseTest(ResponseTest): ...@@ -103,6 +120,14 @@ class MultiChoiceResponseTest(ResponseTest):
self.assert_grade(problem, 'choice_1', 'correct') self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect') self.assert_grade(problem, 'choice_2', 'incorrect')
def test_partial_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points')
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
def test_named_multiple_choice_grade(self): def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False], problem = self.build_problem(choices=[False, True, False],
choice_names=["foil_1", "foil_2", "foil_3"]) choice_names=["foil_1", "foil_2", "foil_3"])
...@@ -112,6 +137,38 @@ class MultiChoiceResponseTest(ResponseTest): ...@@ -112,6 +137,38 @@ class MultiChoiceResponseTest(ResponseTest):
self.assert_grade(problem, 'choice_foil_2', 'correct') self.assert_grade(problem, 'choice_foil_2', 'correct')
self.assert_grade(problem, 'choice_foil_3', 'incorrect') self.assert_grade(problem, 'choice_foil_3', 'incorrect')
def test_multiple_choice_valid_grading_schemes(self):
# Multiple Choice problems only allow one partial credit scheme.
# Change this test if that changes.
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points,points')
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='bongo')
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
def test_partial_points_multiple_choice_grade(self):
problem = self.build_problem(
choices=['partial', 'partial', 'partial'],
credit_type='points',
points=['1', '0.6', '0']
)
# Ensure that we get the expected number of points
# Using assertAlmostEqual to avoid floating point issues
correct_map = problem.grade_answers({'1_2_1': 'choice_0'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 1)
correct_map = problem.grade_answers({'1_2_1': 'choice_1'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.6)
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0)
class TrueFalseResponseTest(ResponseTest): class TrueFalseResponseTest(ResponseTest):
xml_factory_class = TrueFalseResponseXMLFactory xml_factory_class = TrueFalseResponseXMLFactory
...@@ -352,6 +409,77 @@ class OptionResponseTest(ResponseTest): ...@@ -352,6 +409,77 @@ class OptionResponseTest(ResponseTest):
# Options not in the list should be marked incorrect # Options not in the list should be marked incorrect
self.assert_grade(problem, "invalid_option", "incorrect") self.assert_grade(problem, "invalid_option", "incorrect")
def test_grade_multiple_correct(self):
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second,third"
)
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
self.assert_grade(problem, "second", "correct")
self.assert_grade(problem, "third", "correct")
def test_grade_partial_credit(self):
# Testing the "points" style.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="points",
partial_option="third"
)
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
self.assert_grade(problem, "second", "correct")
self.assert_grade(problem, "third", "partially-correct")
def test_grade_partial_credit_with_points(self):
# Testing the "points" style with specified point values.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="points",
partial_option="third",
point_values="0.3"
)
# Assert that we get the expected grades and scores
self.assert_grade(problem, "first", "incorrect")
correct_map = problem.grade_answers({'1_2_1': 'first'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0)
self.assert_grade(problem, "second", "correct")
correct_map = problem.grade_answers({'1_2_1': 'second'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 1)
self.assert_grade(problem, "third", "partially-correct")
correct_map = problem.grade_answers({'1_2_1': 'third'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.3)
def test_grade_partial_credit_valid_scheme(self):
# Only one type of partial credit currently allowed.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="points,points",
partial_option="third"
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'second'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="bongo",
partial_option="third"
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'second'}
problem.grade_answers(input_dict)
def test_quote_option(self): def test_quote_option(self):
# Test that option response properly escapes quotes inside options strings # Test that option response properly escapes quotes inside options strings
problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"], problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"],
...@@ -383,6 +511,29 @@ class OptionResponseTest(ResponseTest): ...@@ -383,6 +511,29 @@ class OptionResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
self.assertEqual(correct_map.get_property('1_2_1', 'answervariable'), '$a') self.assertEqual(correct_map.get_property('1_2_1', 'answervariable'), '$a')
def test_variable_options_partial_credit(self):
"""
Test that if variable are given in option response then correct map must contain answervariable value.
This is the partial-credit version.
"""
script = textwrap.dedent("""\
a = 1000
b = a*2
c = a*3
""")
problem = self.build_problem(
options=['$a', '$b', '$c'],
correct_option='$a',
partial_option='$b',
script=script,
credit_type='points',
)
input_dict = {'1_2_1': '2000'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'partially-correct')
self.assertEqual(correct_map.get_property('1_2_1', 'answervariable'), '$b')
class FormulaResponseTest(ResponseTest): class FormulaResponseTest(ResponseTest):
""" """
...@@ -1111,6 +1262,112 @@ class ChoiceResponseTest(ResponseTest): ...@@ -1111,6 +1262,112 @@ class ChoiceResponseTest(ResponseTest):
# No choice 3 exists --> mark incorrect # No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect') self.assert_grade(problem, 'choice_3', 'incorrect')
def test_checkbox_group_valid_grading_schemes(self):
# Checkbox-type problems only allow one partial credit scheme.
# Change this test if that changes.
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='edc,halves,bongo'
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='bongo'
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
def test_checkbox_group_partial_credit_grade(self):
# First: Every Decision Counts grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='edc'
)
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
# (partially correct if at least one choice is right)
# (incorrect if totally wrong)
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct')
self.assert_grade(problem, 'choice_0', 'partially-correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'partially-correct')
# Second: Halves grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='halves'
)
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
# (partially correct on one error)
# (incorrect for more errors, at least with this # of choices.)
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
self.assert_grade(problem, ['choice_1', 'choice_2', 'choice_3'], 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'incorrect')
# Third: Halves grading style with more options
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True, False],
credit_type='halves'
)
# Check that we get the expected results
# (2 errors allowed with 5+ choices)
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_4'], 'incorrect')
self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
self.assert_grade(problem, ['choice_1', 'choice_2', 'choice_3'], 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3', 'choice_4'], 'incorrect')
def test_checkbox_group_partial_points_grade(self):
# Ensure that we get the expected number of points
# Using assertAlmostEqual to avoid floating point issues
# First: Every Decision Counts grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='edc'
)
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.75)
# Second: Halves grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='halves'
)
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.5)
# Third: Halves grading style with more options
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True, False],
credit_type='halves'
)
correct_map = problem.grade_answers({'1_2_1': 'choice_2,choice4'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.25)
def test_grade_with_no_checkbox_selected(self): def test_grade_with_no_checkbox_selected(self):
""" """
Test that answer marked as incorrect if no checkbox selected. Test that answer marked as incorrect if no checkbox selected.
...@@ -1171,7 +1428,7 @@ class NumericalResponseTest(ResponseTest): ...@@ -1171,7 +1428,7 @@ class NumericalResponseTest(ResponseTest):
# For simple things its not worth the effort. # For simple things its not worth the effort.
def test_grade_range_tolerance(self): def test_grade_range_tolerance(self):
problem_setup = [ problem_setup = [
# [given_asnwer, [list of correct responses], [list of incorrect responses]] # [given_answer, [list of correct responses], [list of incorrect responses]]
['[5, 7)', ['5', '6', '6.999'], ['4.999', '7']], ['[5, 7)', ['5', '6', '6.999'], ['4.999', '7']],
['[1.6e-5, 1.9e24)', ['0.000016', '1.6*10^-5', '1.59e24'], ['1.59e-5', '1.9e24', '1.9*10^24']], ['[1.6e-5, 1.9e24)', ['0.000016', '1.6*10^-5', '1.59e24'], ['1.59e-5', '1.9e24', '1.9*10^24']],
['[0, 1.6e-5]', ['1.6*10^-5'], ["2"]], ['[0, 1.6e-5]', ['1.6*10^-5'], ["2"]],
...@@ -1181,6 +1438,41 @@ class NumericalResponseTest(ResponseTest): ...@@ -1181,6 +1438,41 @@ class NumericalResponseTest(ResponseTest):
problem = self.build_problem(answer=given_answer) problem = self.build_problem(answer=given_answer)
self.assert_multiple_grade(problem, correct_responses, incorrect_responses) self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_range_tolerance_partial_credit(self):
problem_setup = [
# [given_answer,
# [list of correct responses],
# [list of incorrect responses],
# [list of partially correct responses]]
[
'[5, 7)',
['5', '6', '6.999'],
['0', '100'],
['4', '8']
],
[
'[1.6e-5, 1.9e24)',
['0.000016', '1.6*10^-5', '1.59e24'],
['-1e26', '1.9e26', '1.9*10^26'],
['0', '2e24']
],
[
'[0, 1.6e-5]',
['1.6*10^-5'],
['2'],
['1.9e-5', '-1e-6']
],
[
'(1.6e-5, 10]',
['2'],
['-20', '30'],
['-1', '12']
],
]
for given_answer, correct_responses, incorrect_responses, partial_responses in problem_setup:
problem = self.build_problem(answer=given_answer, credit_type='close')
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
def test_grade_range_tolerance_exceptions(self): def test_grade_range_tolerance_exceptions(self):
# no complex number in range tolerance staff answer # no complex number in range tolerance staff answer
problem = self.build_problem(answer='[1j, 5]') problem = self.build_problem(answer='[1j, 5]')
...@@ -1218,6 +1510,61 @@ class NumericalResponseTest(ResponseTest): ...@@ -1218,6 +1510,61 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses = ["", "3.9", "4.1", "0"] incorrect_responses = ["", "3.9", "4.1", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses) self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_partial(self):
# First: "list"-style grading scheme.
problem = self.build_problem(
answer=4,
credit_type='list',
partial_answers='2,8,-4'
)
correct_responses = ["4", "4.0"]
incorrect_responses = ["1", "3", "4.1", "0", "-2"]
partial_responses = ["2", "2.0", "-4", "-4.0", "8", "8.0"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
# Second: "close"-style grading scheme. Default range is twice tolerance.
problem = self.build_problem(
answer=4,
tolerance=0.2,
credit_type='close'
)
correct_responses = ["4", "4.1", "3.9"]
incorrect_responses = ["1", "3", "4.5", "0", "-2"]
partial_responses = ["4.3", "3.7"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
# Third: "close"-style grading scheme with partial_range set.
problem = self.build_problem(
answer=4,
tolerance=0.2,
partial_range=3,
credit_type='close'
)
correct_responses = ["4", "4.1"]
incorrect_responses = ["1", "3", "0", "-2"]
partial_responses = ["4.5", "3.5"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
# Fourth: both "list"- and "close"-style grading schemes at once.
problem = self.build_problem(
answer=4,
tolerance=0.2,
partial_range=3,
credit_type='close,list',
partial_answers='2,8,-4'
)
correct_responses = ["4", "4.0"]
incorrect_responses = ["1", "3", "0", "-2"]
partial_responses = ["2", "2.1", "1.5", "8", "7.5", "8.1", "-4", "-4.15", "-3.5", "4.5", "3.5"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
def test_numerical_valid_grading_schemes(self):
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(answer=4, tolerance=0.1, credit_type='bongo')
input_dict = {'1_2_1': '4'}
with self.assertRaises(LoncapaProblemError):
problem.grade_answers(input_dict)
def test_grade_decimal_tolerance(self): def test_grade_decimal_tolerance(self):
problem = self.build_problem(answer=4, tolerance=0.1) problem = self.build_problem(answer=4, tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"] correct_responses = ["4.0", "4.00", "4.09", "3.91"]
...@@ -1444,11 +1791,18 @@ class CustomResponseTest(ResponseTest): ...@@ -1444,11 +1791,18 @@ class CustomResponseTest(ResponseTest):
# or an ordered list of answers (if there are multiple inputs) # or an ordered list of answers (if there are multiple inputs)
# #
# The function should return a dict of the form # The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING } (no 'grade_decimal' key to test that it's optional) # { 'ok': BOOL or STRING, 'msg': STRING } (no 'grade_decimal' key to test that it's optional)
# #
script = textwrap.dedent(""" script = textwrap.dedent("""
def check_func(expect, answer_given): def check_func(expect, answer_given):
return {'ok': answer_given == expect, 'msg': 'Message text'} partial_credit = '21'
if answer_given == expect:
retval = True
elif answer_given == partial_credit:
retval = 'partial'
else:
retval = False
return {'ok': retval, 'msg': 'Message text'}
""") """)
problem = self.build_problem(script=script, cfn="check_func", expect="42") problem = self.build_problem(script=script, cfn="check_func", expect="42")
...@@ -1465,6 +1819,18 @@ class CustomResponseTest(ResponseTest): ...@@ -1465,6 +1819,18 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(msg, "Message text") self.assertEqual(msg, "Message text")
self.assertEqual(npoints, 1) self.assertEqual(npoints, 1)
# Partially Credit answer
input_dict = {'1_2_1': '21'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
npoints = correct_map.get_npoints('1_2_1')
self.assertEqual(correctness, 'partially-correct')
self.assertEqual(msg, "Message text")
self.assertTrue(0 <= npoints <= 1)
# Incorrect answer # Incorrect answer
input_dict = {'1_2_1': '0'} input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
...@@ -1486,14 +1852,24 @@ class CustomResponseTest(ResponseTest): ...@@ -1486,14 +1852,24 @@ class CustomResponseTest(ResponseTest):
# or an ordered list of answers (if there are multiple inputs) # or an ordered list of answers (if there are multiple inputs)
# #
# The function should return a dict of the form # The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING, 'grade_decimal': FLOAT } # { 'ok': BOOL or STRING, 'msg': STRING, 'grade_decimal': FLOAT }
# #
script = textwrap.dedent(""" script = textwrap.dedent("""
def check_func(expect, answer_given): def check_func(expect, answer_given):
partial_credit = '21'
if answer_given == expect:
retval = True
score = 0.9
elif answer_given == partial_credit:
retval = 'partial'
score = 0.5
else:
retval = False
score = 0.1
return { return {
'ok': answer_given == expect, 'ok': retval,
'msg': 'Message text', 'msg': 'Message text',
'grade_decimal': 0.9 if answer_given == expect else 0.1, 'grade_decimal': score,
} }
""") """)
...@@ -1511,16 +1887,28 @@ class CustomResponseTest(ResponseTest): ...@@ -1511,16 +1887,28 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1) self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
# Partially Correct answer
input_dict = {'1_2_1': '21'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.5)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'partially-correct')
def test_function_code_multiple_input_no_msg(self): def test_function_code_multiple_input_no_msg(self):
# Check functions also have the option of returning # Check functions also have the option of returning
# a single boolean value # a single boolean or string value
# If true, mark all the inputs correct # If true, mark all the inputs correct
# If one is true but not the other, mark all partially correct
# If false, mark all the inputs incorrect # If false, mark all the inputs incorrect
script = textwrap.dedent(""" script = textwrap.dedent("""
def check_func(expect, answer_given): def check_func(expect, answer_given):
return (answer_given[0] == expect and if answer_given[0] == expect and answer_given[1] == expect:
answer_given[1] == expect) retval = True
elif answer_given[0] == expect or answer_given[1] == expect:
retval = 'partial'
else:
retval = False
return retval
""") """)
problem = self.build_problem(script=script, cfn="check_func", problem = self.build_problem(script=script, cfn="check_func",
...@@ -1536,11 +1924,23 @@ class CustomResponseTest(ResponseTest): ...@@ -1536,11 +1924,23 @@ class CustomResponseTest(ResponseTest):
correctness = correct_map.get_correctness('1_2_2') correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'correct') self.assertEqual(correctness, 'correct')
# One answer incorrect -- expect both inputs marked incorrect # One answer incorrect -- expect both inputs marked partially correct
input_dict = {'1_2_1': '0', '1_2_2': '42'} input_dict = {'1_2_1': '0', '1_2_2': '42'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1') correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'partially-correct')
self.assertTrue(0 <= correct_map.get_npoints('1_2_1') <= 1)
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'partially-correct')
self.assertTrue(0 <= correct_map.get_npoints('1_2_2') <= 1)
# Both answers incorrect -- expect both inputs marked incorrect
input_dict = {'1_2_1': '0', '1_2_2': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'incorrect') self.assertEqual(correctness, 'incorrect')
correctness = correct_map.get_correctness('1_2_2') correctness = correct_map.get_correctness('1_2_2')
...@@ -1552,7 +1952,8 @@ class CustomResponseTest(ResponseTest): ...@@ -1552,7 +1952,8 @@ class CustomResponseTest(ResponseTest):
# the check function can return a dict of the form: # the check function can return a dict of the form:
# #
# {'overall_message': STRING, # {'overall_message': STRING,
# 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] } (no grade_decimal to test it's optional) # 'input_list': [{'ok': BOOL or STRING, 'msg': STRING}, ...] }
# (no grade_decimal to test it's optional)
# #
# 'overall_message' is displayed at the end of the response # 'overall_message' is displayed at the end of the response
# #
...@@ -1563,18 +1964,20 @@ class CustomResponseTest(ResponseTest): ...@@ -1563,18 +1964,20 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1) check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2) check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3) check3 = (int(answer_given[2]) == 3)
check4 = 'partial' if answer_given[3] == 'four' else False
return {'overall_message': 'Overall message', return {'overall_message': 'Overall message',
'input_list': [ 'input_list': [
{'ok': check1, 'msg': 'Feedback 1'}, {'ok': check1, 'msg': 'Feedback 1'},
{'ok': check2, 'msg': 'Feedback 2'}, {'ok': check2, 'msg': 'Feedback 2'},
{'ok': check3, 'msg': 'Feedback 3'} ] } {'ok': check3, 'msg': 'Feedback 3'},
{'ok': check4, 'msg': 'Feedback 4'} ] }
""") """)
problem = self.build_problem(script=script, problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3) cfn="check_func", num_inputs=4)
# Grade the inputs (one input incorrect) # Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'} input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3', '1_2_4': 'four'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
# Expect that we receive the overall message (for the whole response) # Expect that we receive the overall message (for the whole response)
...@@ -1584,16 +1987,19 @@ class CustomResponseTest(ResponseTest): ...@@ -1584,16 +1987,19 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct') self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct') self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_4'), 'partially-correct')
# Expect that the inputs were given correct npoints # Expect that the inputs were given correct npoints
self.assertEqual(correct_map.get_npoints('1_2_1'), 0) self.assertEqual(correct_map.get_npoints('1_2_1'), 0)
self.assertEqual(correct_map.get_npoints('1_2_2'), 1) self.assertEqual(correct_map.get_npoints('1_2_2'), 1)
self.assertEqual(correct_map.get_npoints('1_2_3'), 1) self.assertEqual(correct_map.get_npoints('1_2_3'), 1)
self.assertTrue(0 <= correct_map.get_npoints('1_2_4') <= 1)
# Expect that we received messages for each individual input # Expect that we received messages for each individual input
self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1') self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1')
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2') self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3') self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
self.assertEqual(correct_map.get_msg('1_2_4'), 'Feedback 4')
def test_function_code_multiple_inputs_decimal_score(self): def test_function_code_multiple_inputs_decimal_score(self):
...@@ -1601,7 +2007,8 @@ class CustomResponseTest(ResponseTest): ...@@ -1601,7 +2007,8 @@ class CustomResponseTest(ResponseTest):
# the check function can return a dict of the form: # the check function can return a dict of the form:
# #
# {'overall_message': STRING, # {'overall_message': STRING,
# 'input_list': [{'ok': BOOL, 'msg': STRING, 'grade_decimal': FLOAT}, ...] } # 'input_list': [{'ok': BOOL or STRING,
# 'msg': STRING, 'grade_decimal': FLOAT}, ...] }
# # # #
# 'input_list' contains dictionaries representing the correctness # 'input_list' contains dictionaries representing the correctness
# and message for each input. # and message for each input.
...@@ -1610,39 +2017,51 @@ class CustomResponseTest(ResponseTest): ...@@ -1610,39 +2017,51 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1) check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2) check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3) check3 = (int(answer_given[2]) == 3)
check4 = 'partial' if answer_given[3] == 'four' else False
score1 = 0.9 if check1 else 0.1 score1 = 0.9 if check1 else 0.1
score2 = 0.9 if check2 else 0.1 score2 = 0.9 if check2 else 0.1
score3 = 0.9 if check3 else 0.1 score3 = 0.9 if check3 else 0.1
score4 = 0.7 if check4 == 'partial' else 0.1
return { return {
'input_list': [ 'input_list': [
{'ok': check1, 'grade_decimal': score1, 'msg': 'Feedback 1'}, {'ok': check1, 'grade_decimal': score1, 'msg': 'Feedback 1'},
{'ok': check2, 'grade_decimal': score2, 'msg': 'Feedback 2'}, {'ok': check2, 'grade_decimal': score2, 'msg': 'Feedback 2'},
{'ok': check3, 'grade_decimal': score3, 'msg': 'Feedback 3'}, {'ok': check3, 'grade_decimal': score3, 'msg': 'Feedback 3'},
{'ok': check4, 'grade_decimal': score4, 'msg': 'Feedback 4'},
] ]
} }
""") """)
problem = self.build_problem(script=script, cfn="check_func", num_inputs=3) problem = self.build_problem(script=script, cfn="check_func", num_inputs=4)
# Grade the inputs (one input incorrect) # Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'} input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3', '1_2_4': 'four'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
# Expect that the inputs were graded individually # Expect that the inputs were graded individually
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct') self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct') self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_4'), 'partially-correct')
# Expect that the inputs were given correct npoints # Expect that the inputs were given correct npoints
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1) self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1)
self.assertEqual(correct_map.get_npoints('1_2_2'), 0.9) self.assertEqual(correct_map.get_npoints('1_2_2'), 0.9)
self.assertEqual(correct_map.get_npoints('1_2_3'), 0.9) self.assertEqual(correct_map.get_npoints('1_2_3'), 0.9)
self.assertEqual(correct_map.get_npoints('1_2_4'), 0.7)
def test_function_code_with_extra_args(self): def test_function_code_with_extra_args(self):
script = textwrap.dedent("""\ script = textwrap.dedent("""\
def check_func(expect, answer_given, options, dynamath): def check_func(expect, answer_given, options, dynamath):
assert options == "xyzzy", "Options was %r" % options assert options == "xyzzy", "Options was %r" % options
return {'ok': answer_given == expect, 'msg': 'Message text'} partial_credit = '21'
if answer_given == expect:
retval = True
elif answer_given == partial_credit:
retval = 'partial'
else:
retval = False
return {'ok': retval, 'msg': 'Message text'}
""") """)
problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath") problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
...@@ -1657,6 +2076,16 @@ class CustomResponseTest(ResponseTest): ...@@ -1657,6 +2076,16 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correctness, 'correct') self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text") self.assertEqual(msg, "Message text")
# Partially Correct answer
input_dict = {'1_2_1': '21'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'partially-correct')
self.assertEqual(msg, "Message text")
# Incorrect answer # Incorrect answer
input_dict = {'1_2_1': '0'} input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
...@@ -1683,8 +2112,12 @@ class CustomResponseTest(ResponseTest): ...@@ -1683,8 +2112,12 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1) check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2) check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3) check3 = (int(answer_given[2]) == 3)
return {'ok': (check1 and check2 and check3), if (int(answer_given[0]) == -1) and check2 and check3:
'msg': 'Message text'} return {'ok': 'partial',
'msg': 'Message text'}
else:
return {'ok': (check1 and check2 and check3),
'msg': 'Message text'}
""") """)
problem = self.build_problem(script=script, problem = self.build_problem(script=script,
...@@ -1699,6 +2132,15 @@ class CustomResponseTest(ResponseTest): ...@@ -1699,6 +2132,15 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect') self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect') self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect')
# Grade the inputs (one input partially correct)
input_dict = {'1_2_1': '-1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked partially correct
self.assertEqual(correct_map.get_correctness('1_2_1'), 'partially-correct')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'partially-correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'partially-correct')
# Grade the inputs (everything correct) # Grade the inputs (everything correct)
input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'} input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
$annotation-yellow: rgba(255,255,10,0.3); $annotation-yellow: rgba(255,255,10,0.3);
$color-copy-tip: rgb(100,100,100); $color-copy-tip: rgb(100,100,100);
$correct: $green-d1; $correct: $green-d1;
$partiallycorrect: $green-d1;
$incorrect: $red; $incorrect: $red;
// +Extends - Capa // +Extends - Capa
...@@ -75,6 +76,11 @@ h2 { ...@@ -75,6 +76,11 @@ h2 {
color: $correct; color: $correct;
} }
.feedback-hint-partially-correct {
margin-top: ($baseline/2);
color: $partiallycorrect;
}
.feedback-hint-incorrect { .feedback-hint-incorrect {
margin-top: ($baseline/2); margin-top: ($baseline/2);
color: $incorrect; color: $incorrect;
...@@ -174,6 +180,16 @@ div.problem { ...@@ -174,6 +180,16 @@ div.problem {
} }
} }
&.choicegroup_partially-correct {
@include status-icon($partiallycorrect, "\f00c");
border: 2px solid $partiallycorrect;
// keep green for correct answers on hover.
&:hover {
border-color: $partiallycorrect;
}
}
&.choicegroup_incorrect { &.choicegroup_incorrect {
@include status-icon($incorrect, "\f00d"); @include status-icon($incorrect, "\f00d");
border: 2px solid $incorrect; border: 2px solid $incorrect;
...@@ -227,6 +243,11 @@ div.problem { ...@@ -227,6 +243,11 @@ div.problem {
@include status-icon($correct, "\f00c"); @include status-icon($correct, "\f00c");
} }
// CASE: partially correct answer
&.partially-correct {
@include status-icon($partiallycorrect, "\f00c");
}
// CASE: incorrect answer // CASE: incorrect answer
&.incorrect { &.incorrect {
@include status-icon($incorrect, "\f00d"); @include status-icon($incorrect, "\f00d");
...@@ -338,6 +359,19 @@ div.problem { ...@@ -338,6 +359,19 @@ div.problem {
} }
} }
&.partially-correct, &.ui-icon-check {
p.status {
display: inline-block;
width: 25px;
height: 20px;
background: url('../images/partially-correct-icon.png') center center no-repeat;
}
input {
border-color: $partiallycorrect;
}
}
&.processing { &.processing {
p.status { p.status {
display: inline-block; display: inline-block;
...@@ -713,7 +747,7 @@ div.problem { ...@@ -713,7 +747,7 @@ div.problem {
height: 46px; height: 46px;
} }
> .incorrect, .correct, .unanswered { > .incorrect, .partially-correct, .correct, .unanswered {
.status { .status {
display: inline-block; display: inline-block;
...@@ -734,6 +768,18 @@ div.problem { ...@@ -734,6 +768,18 @@ div.problem {
} }
} }
// CASE: partially correct answer
> .partially-correct {
input {
border: 2px solid $partiallycorrect;
}
.status {
@include status-icon($partiallycorrect, "\f00c");
}
}
// CASE: correct answer // CASE: correct answer
> .correct { > .correct {
...@@ -775,7 +821,7 @@ div.problem { ...@@ -775,7 +821,7 @@ div.problem {
.indicator-container { .indicator-container {
display: inline-block; display: inline-block;
.status.correct:after, .status.incorrect:after, .status.unanswered:after { .status.correct:after, .status.partially-correct:after, .status.incorrect:after, .status.unanswered:after {
@include margin-left(0); @include margin-left(0);
} }
} }
...@@ -941,6 +987,20 @@ div.problem { ...@@ -941,6 +987,20 @@ div.problem {
} }
} }
.detailed-targeted-feedback-partially-correct {
> p:first-child {
@extend %t-strong;
color: $partiallycorrect;
text-transform: uppercase;
font-style: normal;
font-size: 0.9em;
}
p:last-child {
margin-bottom: 0;
}
}
.detailed-targeted-feedback-correct { .detailed-targeted-feedback-correct {
> p:first-child { > p:first-child {
@extend %t-strong; @extend %t-strong;
...@@ -1135,6 +1195,14 @@ div.problem { ...@@ -1135,6 +1195,14 @@ div.problem {
} }
} }
.result-partially-correct {
background: url('../images/partially-correct-icon.png') left 20px no-repeat;
.result-actual-output {
color: #090;
}
}
.result-incorrect { .result-incorrect {
background: url('../images/incorrect-icon.png') left 20px no-repeat; background: url('../images/incorrect-icon.png') left 20px no-repeat;
...@@ -1340,6 +1408,14 @@ div.problem { ...@@ -1340,6 +1408,14 @@ div.problem {
} }
} }
label.choicetextgroup_partially-correct, section.choicetextgroup_partially-correct {
@extend label.choicegroup_partially-correct;
input[type="text"] {
border-color: $partiallycorrect;
}
}
label.choicetextgroup_incorrect, section.choicetextgroup_incorrect { label.choicetextgroup_incorrect, section.choicetextgroup_incorrect {
@extend label.choicegroup_incorrect; @extend label.choicegroup_incorrect;
} }
......
...@@ -64,6 +64,12 @@ class ProblemPage(PageObject): ...@@ -64,6 +64,12 @@ class ProblemPage(PageObject):
""" """
self.q(css='div.problem div.capa_inputtype.textline input').fill(text) self.q(css='div.problem div.capa_inputtype.textline input').fill(text)
def fill_answer_numerical(self, text):
"""
Fill in the answer to a numerical problem.
"""
self.q(css='div.problem section.inputtype input').fill(text)
def click_check(self): def click_check(self):
""" """
Click the Check button! Click the Check button!
...@@ -84,6 +90,24 @@ class ProblemPage(PageObject): ...@@ -84,6 +90,24 @@ class ProblemPage(PageObject):
""" """
return self.q(css="div.problem div.capa_inputtype.textline div.correct span.status").is_present() return self.q(css="div.problem div.capa_inputtype.textline div.correct span.status").is_present()
def simpleprob_is_correct(self):
"""
Is there a "correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.correct span.status").is_present()
def simpleprob_is_partially_correct(self):
"""
Is there a "partially correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.partially-correct span.status").is_present()
def simpleprob_is_incorrect(self):
"""
Is there an "incorrect" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.incorrect span.status").is_present()
def click_clarification(self, index=0): def click_clarification(self, index=0):
""" """
Click on an inline icon that can be included in problem text using an HTML <clarification> element: Click on an inline icon that can be included in problem text using an HTML <clarification> element:
......
...@@ -213,3 +213,35 @@ class ProblemWithMathjax(ProblemsTest): ...@@ -213,3 +213,35 @@ class ProblemWithMathjax(ProblemsTest):
self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text) self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint") self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_check()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment