Commit a361191e by Renzo Lucioni

Merge pull request #9381 from edx/release-2015-08-18-conflict

Release 2015 08 18 conflict
parents 08b9b03b c5f5a58c
......@@ -10,7 +10,7 @@ class CorrectMap(object):
in a capa problem. The response evaluation result for each answer_id includes
(correctness, npoints, msg, hint, hintmode).
- correctness : 'correct', 'incorrect', or 'partially-correct'
- correctness : either 'correct' or 'incorrect'
- npoints : None, or integer specifying number of points awarded for this answer_id
- msg : string (may have HTML) giving extra message response
(displayed below textline or textbox)
......@@ -101,23 +101,10 @@ class CorrectMap(object):
self.set(k, **correct_map[k])
def is_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is correct OR partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct']
return None
def is_partially_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] == 'partially-correct'
return None
def is_queued(self, answer_id):
return answer_id in self.cmap and self.cmap[answer_id]['queuestate'] is not None
......
......@@ -85,7 +85,6 @@ class Status(object):
names = {
'correct': _('correct'),
'incorrect': _('incorrect'),
'partially-correct': _('partially correct'),
'incomplete': _('incomplete'),
'unanswered': _('unanswered'),
'unsubmitted': _('unanswered'),
......@@ -95,7 +94,6 @@ class Status(object):
# Translators: these are tooltips that indicate the state of an assessment question
'correct': _('This is correct.'),
'incorrect': _('This is incorrect.'),
'partially-correct': _('This is partially correct.'),
'unanswered': _('This is unanswered.'),
'unsubmitted': _('This is unanswered.'),
'queued': _('This is being processed.'),
......@@ -898,7 +896,7 @@ class MatlabInput(CodeInput):
Right now, we only want this button to show up when a problem has not been
checked.
"""
if self.status in ['correct', 'incorrect', 'partially-correct']:
if self.status in ['correct', 'incorrect']:
return False
else:
return True
......
......@@ -139,8 +139,6 @@ class LoncapaResponse(object):
tags = None
hint_tag = None
has_partial_credit = False
credit_type = []
max_inputfields = None
allowed_inputfields = []
......@@ -215,18 +213,6 @@ class LoncapaResponse(object):
self.default_answer_map[entry.get(
'id')] = contextualize_text(answer, self.context)
# Does this problem have partial credit?
# If so, what kind? Get it as a list of strings.
partial_credit = xml.xpath('.')[0].get('partial_credit', default=False)
if str(partial_credit).lower().strip() == 'false':
self.has_partial_credit = False
self.credit_type = []
else:
self.has_partial_credit = True
self.credit_type = partial_credit.split(',')
self.credit_type = [word.strip().lower() for word in self.credit_type]
if hasattr(self, 'setup_response'):
self.setup_response()
......@@ -275,6 +261,7 @@ class LoncapaResponse(object):
new_cmap = self.get_score(student_answers)
self.get_hints(convert_files_to_filenames(
student_answers), new_cmap, old_cmap)
# log.debug('new_cmap = %s' % new_cmap)
return new_cmap
def make_hint_div(self, hint_node, correct, student_answer, question_tag,
......@@ -826,23 +813,11 @@ class ChoiceResponse(LoncapaResponse):
def setup_response(self):
self.assign_choice_names()
correct_xml = self.xml.xpath(
'//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id')
)
self.correct_choices = set([
choice.get('name') for choice in correct_xml
])
incorrect_xml = self.xml.xpath(
'//*[@id=$id]//choice[@correct="false"]',
id=self.xml.get('id')
)
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id'))
self.incorrect_choices = set([
choice.get('name') for choice in incorrect_xml
])
self.correct_choices = set([choice.get(
'name') for choice in correct_xml])
def assign_choice_names(self):
"""
......@@ -856,154 +831,26 @@ class ChoiceResponse(LoncapaResponse):
if not choice.get('id'):
choice.set("id", chr(ord("A") + index))
def grade_via_every_decision_counts(self, **kwargs):
"""
Calculates partial credit on the Every Decision Counts scheme.
For each correctly selected or correctly blank choice, score 1 point.
Divide by total number of choices.
Arguments:
all_choices, the full set of checkboxes
student_answer, what the student actually chose
student_non_answers, what the student didn't choose
Returns a CorrectMap.
"""
all_choices = kwargs['all_choices']
student_answer = kwargs['student_answer']
student_non_answers = kwargs['student_non_answers']
edc_max_grade = len(all_choices)
edc_current_grade = 0
good_answers = sum([1 for answer in student_answer if answer in self.correct_choices])
good_non_answers = sum([1 for blank in student_non_answers if blank in self.incorrect_choices])
edc_current_grade = good_answers + good_non_answers
return_grade = round(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
if edc_current_grade == edc_max_grade:
return CorrectMap(self.answer_id, correctness='correct')
elif edc_current_grade > 0:
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, correctness='incorrect', npoints=0)
def grade_via_halves(self, **kwargs):
"""
Calculates partial credit on the Halves scheme.
If no errors, full credit.
If one error, half credit as long as there are 3+ choices
If two errors, 1/4 credit as long as there are 5+ choices
(If not enough choices, no credit.)
Arguments:
all_choices, the full set of checkboxes
student_answer, what the student actually chose
student_non_answers, what the student didn't choose
Returns a CorrectMap
"""
def get_score(self, student_answers):
all_choices = kwargs['all_choices']
student_answer = kwargs['student_answer']
student_non_answers = kwargs['student_non_answers']
halves_error_count = 0
incorrect_answers = sum([1 for answer in student_answer if answer in self.incorrect_choices])
missed_answers = sum([1 for blank in student_non_answers if blank in self.correct_choices])
halves_error_count = incorrect_answers + missed_answers
if halves_error_count == 0:
return_grade = self.get_max_score()
return CorrectMap(self.answer_id, correctness='correct', npoints=return_grade)
elif halves_error_count == 1 and len(all_choices) > 2:
return_grade = round(self.get_max_score() / 2.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
elif halves_error_count == 2 and len(all_choices) > 4:
return_grade = round(self.get_max_score() / 4.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, 'incorrect')
student_answer = student_answers.get(self.answer_id, [])
def grade_without_partial_credit(self, **kwargs):
"""
Standard grading for checkbox problems.
100% credit if all choices are correct; 0% otherwise
Arguments: student_answer, which is the items the student actually chose
"""
if not isinstance(student_answer, list):
student_answer = [student_answer]
student_answer = kwargs['student_answer']
no_empty_answer = student_answer != []
student_answer = set(student_answer)
required_selected = len(self.correct_choices - student_answer) == 0
no_extra_selected = len(student_answer - self.correct_choices) == 0
correct = required_selected & no_extra_selected
correct = required_selected & no_extra_selected & no_empty_answer
if correct:
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_score(self, student_answers):
# Setting up answer sets:
# all_choices: the full set of checkboxes
# student_answer: what the student actually chose (note no "s")
# student_non_answers: what they didn't choose
# self.correct_choices: boxes that should be checked
# self.incorrect_choices: boxes that should NOT be checked
all_choices = self.correct_choices.union(self.incorrect_choices)
student_answer = student_answers.get(self.answer_id, [])
if not isinstance(student_answer, list):
student_answer = [student_answer]
# When a student leaves all the boxes unmarked, edX throws an error.
# This line checks for blank answers so that we can throw "false".
# This is not ideal. "None apply" should be a valid choice.
# Sadly, this is not the place where we can fix that problem.
empty_answer = student_answer == []
if empty_answer:
return CorrectMap(self.answer_id, 'incorrect')
student_answer = set(student_answer)
student_non_answers = all_choices - student_answer
# No partial credit? Get grade right now.
if not self.has_partial_credit:
return self.grade_without_partial_credit(student_answer=student_answer)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="EDC" to count each choice for equal points (Every Decision Counts).
# Set partial_credit="halves" to take half credit off for each error.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'edc': self.grade_via_every_decision_counts,
'halves': self.grade_via_halves,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Checkbox problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
all_choices=all_choices,
student_answer=student_answer,
student_non_answers=student_non_answers
)
def get_answers(self):
return {self.answer_id: list(self.correct_choices)}
......@@ -1149,14 +996,6 @@ class MultipleChoiceResponse(LoncapaResponse):
multi_device_support = True
def setup_response(self):
"""
Collects information from the XML for later use.
correct_choices is a list of the correct choices.
partial_choices is a list of the partially-correct choices.
partial_values is a list of the scores that go with those
choices, defaulting to 0.5 if no value is specified.
"""
# call secondary setup for MultipleChoice questions, to set name
# attributes
self.mc_setup_response()
......@@ -1171,19 +1010,8 @@ class MultipleChoiceResponse(LoncapaResponse):
contextualize_text(choice.get('name'), self.context)
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).upper() == "TRUE"
]
if self.has_partial_credit:
self.partial_choices = [
contextualize_text(choice.get('name'), self.context)
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'
]
self.partial_values = [
float(choice.get('point_value', default='0.5')) # Default partial credit: 50%
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'
]
]
def get_extended_hints(self, student_answer_dict, new_cmap):
"""
......@@ -1254,80 +1082,16 @@ class MultipleChoiceResponse(LoncapaResponse):
self.do_shuffle(self.xml, problem)
self.do_answer_pool(self.xml, problem)
def grade_via_points(self, **kwargs):
"""
Calculates partial credit based on the Points scheme.
Answer choices marked "partial" are given partial credit.
Default is 50%; other amounts may be set in point_value attributes.
Arguments: student_answers
Returns: a CorrectMap
"""
student_answers = kwargs['student_answers']
if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, correctness='correct')
elif (
self.answer_id in student_answers
and student_answers[self.answer_id] in self.partial_choices
):
choice_index = self.partial_choices.index(student_answers[self.answer_id])
credit_amount = self.partial_values[choice_index]
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=credit_amount)
else:
return CorrectMap(self.answer_id, 'incorrect')
def grade_without_partial_credit(self, **kwargs):
def get_score(self, student_answers):
"""
Standard grading for multiple-choice problems.
100% credit if choices are correct; 0% otherwise
Arguments: student_answers
Returns: a CorrectMap
grade student response.
"""
student_answers = kwargs['student_answers']
if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, correctness='correct')
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_score(self, student_answers):
"""
grade student response.
"""
# No partial credit? Grade it right away.
if not self.has_partial_credit:
return self.grade_without_partial_credit(student_answers=student_answers)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="points" to set specific point values for specific choices.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'points': self.grade_via_points,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Multiple Choice problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
student_answers=student_answers
)
def get_answers(self):
return {self.answer_id: self.correct_choices}
......@@ -1587,163 +1351,23 @@ class OptionResponse(LoncapaResponse):
def setup_response(self):
self.answer_fields = self.inputfields
def grade_via_points(self, problem_map, student_answers):
"""
Grades dropdown problems with "points"-style partial credit.
Full credit for any fully correct answer.
Partial credit for any partially correct answer.
Amount is set by point_values attribute, defaults to 50%.
Returns a CorrectMap.
"""
answer_map = problem_map['correct']
cmap = CorrectMap()
for aid in answer_map:
# Set correct/incorrect first, check for partial credit later.
for word in answer_map[aid]:
if aid in student_answers and student_answers[aid] == word:
cmap.set(aid, 'correct')
break
else:
cmap.set(aid, 'incorrect')
# For partial credit:
partial_map = problem_map['partial']
points_map = problem_map['point_values']
if not cmap.is_correct(aid) and partial_map[aid] is not None:
for index, word in enumerate(partial_map[aid]):
# Set the correctness and point value
# for each answer id independently.
if aid in student_answers and student_answers[aid] == word:
cmap.set(aid, 'partially-correct')
cmap.set_property(aid, 'npoints', points_map[aid][index])
break
else:
cmap.set(aid, 'incorrect')
answer_variable = self.get_student_answer_variable_name(student_answers, aid)
if answer_variable:
cmap.set_property(aid, 'answervariable', answer_variable)
return cmap
def grade_without_partial_credit(self, problem_map, student_answers):
"""
Grades dropdown problems without partial credit.
Full credit for any correct answer, no credit otherwise.
Returns a CorrectMap.
"""
answer_map = problem_map['correct']
def get_score(self, student_answers):
cmap = CorrectMap()
for aid in answer_map:
for word in answer_map[aid]:
if aid in student_answers and student_answers[aid] == word:
cmap.set(aid, 'correct')
break
else:
cmap.set(aid, 'incorrect')
amap = self.get_answers()
for aid in amap:
if aid in student_answers and student_answers[aid] == amap[aid]:
cmap.set(aid, 'correct')
else:
cmap.set(aid, 'incorrect')
answer_variable = self.get_student_answer_variable_name(student_answers, aid)
if answer_variable:
cmap.set_property(aid, 'answervariable', answer_variable)
return cmap
def get_score(self, student_answers):
problem_map = self.get_problem_attributes()
# If no partial credit, grade it right now.
if not self.has_partial_credit:
return self.grade_without_partial_credit(problem_map, student_answers)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="points" to allow credit for listed alternative answers.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'points': self.grade_via_points,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Dropdown problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
problem_map=problem_map,
student_answers=student_answers
)
def get_problem_attributes(self):
"""
This returns a dict built of of three smaller dictionaries.
Keys are:
"correct":
A dictionary with problem ids as keys.
Entries are lists of the correct answers for that id.
"partial":
A dictionary with problem ids as keys.
Entries are lists of the partially-correct answers for that id.
"point_values":
Matches the "partial" one, but gives point values instead.
Defaults to 50% credit.
"""
default_credit = 0.5
problem_map = dict()
for target in ['correct', 'partial', 'point_values']:
small_map = dict([
(af.get('id'), contextualize_text(
af.get(target, default=None),
self.context
))
for af in self.answer_fields
])
for answer_id in small_map:
if small_map[answer_id] is not None:
# Split on commas and strip whitespace
# to allow for multiple options.
small_map[answer_id] = small_map[answer_id].split(',')
for index, word in enumerate(small_map[answer_id]):
# Pick out whether we're getting numbers or strings.
if target in ['point_values']:
small_map[answer_id][index] = float(word.strip())
else:
small_map[answer_id][index] = str(word.strip())
# If we find nothing and we're looking for points, return the default.
elif target == 'point_values':
if problem_map['partial'][answer_id] is not None:
num_partial = len(problem_map['partial'][answer_id])
small_map[answer_id] = [default_credit] * num_partial
else:
small_map[answer_id] = []
# Add a copy of the in-loop map to the big map.
problem_map[target] = dict(small_map)
return problem_map
def get_answers(self):
"""
Returns a dictionary with problem ids as keys.
Each entry is a list of the correct answers for that id.
"""
return self.get_problem_attributes()['correct']
amap = dict([(af.get('id'), contextualize_text(af.get(
'correct'), self.context)) for af in self.answer_fields])
return amap
def get_student_answer_variable_name(self, student_answers, aid):
"""
......@@ -1867,14 +1491,6 @@ class NumericalResponse(LoncapaResponse):
if self.answer_id not in student_answers:
return CorrectMap(self.answer_id, 'incorrect')
# Make sure we're using an approved partial credit style.
# Currently implemented: 'close' and 'list'
if self.has_partial_credit:
graders = ['list', 'close']
for style in self.credit_type:
if style not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
student_answer = student_answers[self.answer_id]
_ = self.capa_system.i18n.ugettext
......@@ -1909,30 +1525,6 @@ class NumericalResponse(LoncapaResponse):
except Exception:
raise general_exception
# End `evaluator` block -- we figured out the student's answer!
tree = self.xml
# What multiple of the tolerance is worth partial credit?
has_partial_range = tree.xpath('responseparam[@partial_range]')
if has_partial_range:
partial_range = float(has_partial_range[0].get('partial_range', default='2'))
else:
partial_range = 2
# Take in alternative answers that are worth partial credit.
has_partial_answers = tree.xpath('responseparam[@partial_answers]')
if has_partial_answers:
partial_answers = has_partial_answers[0].get('partial_answers').split(',')
for index, word in enumerate(partial_answers):
partial_answers[index] = word.strip()
partial_answers[index] = self.get_staff_ans(partial_answers[index])
else:
partial_answers = False
partial_score = 0.5
is_correct = 'incorrect'
if self.range_tolerance:
if isinstance(student_float, complex):
raise StudentInputError(_(u"You may not use complex numbers in range tolerance problems"))
......@@ -1954,71 +1546,19 @@ class NumericalResponse(LoncapaResponse):
tolerance=float_info.epsilon,
relative_tolerance=True
):
is_correct = 'correct' if inclusion else 'incorrect'
correct = inclusion
break
else:
if boundaries[0] < student_float < boundaries[1]:
is_correct = 'correct'
else:
if self.has_partial_credit is False:
pass
elif 'close' in self.credit_type:
# Partial credit: 50% if the student is outside the specified boundaries,
# but within an extended set of boundaries.
extended_boundaries = []
boundary_range = boundaries[1] - boundaries[0]
extended_boundaries.append(boundaries[0] - partial_range * boundary_range)
extended_boundaries.append(boundaries[1] + partial_range * boundary_range)
if extended_boundaries[0] < student_float < extended_boundaries[1]:
is_correct = 'partially-correct'
correct = boundaries[0] < student_float < boundaries[1]
else:
correct_float = self.get_staff_ans(self.correct_answer)
# Partial credit is available in three cases:
# If the student answer is within expanded tolerance of the actual answer,
# the student gets 50% credit. (Currently set as the default.)
# Set via partial_credit="close" in the numericalresponse tag.
#
# If the student answer is within regular tolerance of an alternative answer,
# the student gets 50% credit. (Same default.)
# Set via partial_credit="list"
#
# If the student answer is within expanded tolerance of an alternative answer,
# the student gets 25%. (We take the 50% and square it, at the moment.)
# Set via partial_credit="list,close" or "close, list" or the like.
if str(self.tolerance).endswith('%'):
expanded_tolerance = str(partial_range * float(str(self.tolerance)[:-1])) + '%'
else:
expanded_tolerance = partial_range * float(self.tolerance)
if compare_with_tolerance(student_float, correct_float, self.tolerance):
is_correct = 'correct'
elif self.has_partial_credit is False:
pass
elif 'list' in self.credit_type:
for value in partial_answers:
if compare_with_tolerance(student_float, value, self.tolerance):
is_correct = 'partially-correct'
break
elif 'close' in self.credit_type:
if compare_with_tolerance(student_float, correct_float, expanded_tolerance):
is_correct = 'partially-correct'
break
elif compare_with_tolerance(student_float, value, expanded_tolerance):
is_correct = 'partially-correct'
partial_score = partial_score * partial_score
break
elif 'close' in self.credit_type:
if compare_with_tolerance(student_float, correct_float, expanded_tolerance):
is_correct = 'partially-correct'
if is_correct == 'partially-correct':
return CorrectMap(self.answer_id, is_correct, npoints=partial_score)
correct = compare_with_tolerance(
student_float, correct_float, self.tolerance
)
if correct:
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, is_correct)
return CorrectMap(self.answer_id, 'incorrect')
def compare_answer(self, ans1, ans2):
"""
......@@ -2327,9 +1867,6 @@ class CustomResponse(LoncapaResponse):
code = None
expect = None
# Standard amount for partial credit if not otherwise specified:
default_pc = 0.5
def setup_response(self):
xml = self.xml
......@@ -2506,12 +2043,7 @@ class CustomResponse(LoncapaResponse):
if grade_decimals:
npoints = max_points * grade_decimals[k]
else:
if correct[k] == 'correct':
npoints = max_points
elif correct[k] == 'partially-correct':
npoints = max_points * self.default_pc
else:
npoints = 0
npoints = max_points if correct[k] == 'correct' else 0
correct_map.set(idset[k], correct[k], msg=messages[k],
npoints=npoints)
return correct_map
......@@ -2552,30 +2084,13 @@ class CustomResponse(LoncapaResponse):
)
if isinstance(ret, dict):
# One kind of dictionary the check function can return has the
# form {'ok': BOOLEAN or STRING, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}
# form {'ok': BOOLEAN, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}
# 'ok' will control the checkmark, while grade_decimal, if present, will scale
# the score the student receives on the response.
# If there are multiple inputs, they all get marked
# to the same correct/incorrect value
if 'ok' in ret:
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
ok_val = str(ret['ok']).lower().strip() if bool(ret['ok']) else 'false'
if ok_val == 'false':
correct = 'incorrect'
elif 'partial' in ok_val:
correct = 'partially-correct'
else:
correct = 'correct'
correct = [correct] * len(idset) # All inputs share the same mark.
# old version, no partial credit:
# correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
msg = ret.get('msg', None)
msg = self.clean_message_html(msg)
......@@ -2587,14 +2102,9 @@ class CustomResponse(LoncapaResponse):
self.context['messages'][0] = msg
if 'grade_decimal' in ret:
decimal = float(ret['grade_decimal'])
decimal = ret['grade_decimal']
else:
if correct[0] == 'correct':
decimal = 1.0
elif correct[0] == 'partially-correct':
decimal = self.default_pc
else:
decimal = 0.0
decimal = 1.0 if ret['ok'] else 0.0
grade_decimals = [decimal] * len(idset)
self.context['grade_decimals'] = grade_decimals
......@@ -2602,11 +2112,7 @@ class CustomResponse(LoncapaResponse):
# the form:
# { 'overall_message': STRING,
# 'input_list': [
# {
# 'ok': BOOLEAN or STRING,
# 'msg': STRING,
# 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)
# },
# { 'ok': BOOLEAN, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)},
# ...
# ]
# }
......@@ -2623,35 +2129,16 @@ class CustomResponse(LoncapaResponse):
correct = []
messages = []
grade_decimals = []
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
for input_dict in input_list:
if str(input_dict['ok']).lower().strip() == "false" or not input_dict['ok']:
correct.append('incorrect')
elif 'partial' in str(input_dict['ok']).lower().strip():
correct.append('partially-correct')
else:
correct.append('correct')
# old version, no partial credit
# correct.append('correct'
# if input_dict['ok'] else 'incorrect')
correct.append('correct'
if input_dict['ok'] else 'incorrect')
msg = (self.clean_message_html(input_dict['msg'])
if 'msg' in input_dict else None)
messages.append(msg)
if 'grade_decimal' in input_dict:
decimal = input_dict['grade_decimal']
else:
if str(input_dict['ok']).lower().strip() == 'true':
decimal = 1.0
elif 'partial' in str(input_dict['ok']).lower().strip():
decimal = self.default_pc
else:
decimal = 0.0
decimal = 1.0 if input_dict['ok'] else 0.0
grade_decimals.append(decimal)
self.context['messages'] = messages
......@@ -2668,21 +2155,7 @@ class CustomResponse(LoncapaResponse):
)
else:
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
if str(ret).lower().strip() == "false" or not bool(ret):
correct = 'incorrect'
elif 'partial' in str(ret).lower().strip():
correct = 'partially-correct'
else:
correct = 'correct'
correct = [correct] * len(idset)
# old version, no partial credit:
# correct = ['correct' if ret else 'incorrect'] * len(idset)
correct = ['correct' if ret else 'incorrect'] * len(idset)
self.context['correct'] = correct
......
......@@ -17,7 +17,7 @@
<div id="input_${id}_preview" class="equation"></div>
<p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</div>
......@@ -7,8 +7,6 @@
<%
if status == 'correct':
correctness = 'correct'
elif status == 'partially-correct':
correctness = 'partially-correct'
elif status == 'incorrect':
correctness = 'incorrect'
else:
......@@ -33,7 +31,7 @@
/> ${choice_description}
% if input_type == 'radio' and ( (isinstance(value, basestring) and (choice_id == value)) or (not isinstance(value, basestring) and choice_id in value) ):
% if status in ('correct', 'partially-correct', 'incorrect') and not show_correctness=='never':
% if status in ('correct', 'incorrect') and not show_correctness=='never':
<span class="sr status">${choice_description|h} - ${status.display_name}</span>
% endif
% endif
......@@ -62,4 +60,4 @@
% if msg:
<span class="message">${msg|n}</span>
% endif
</form>
\ No newline at end of file
</form>
......@@ -20,8 +20,6 @@
correctness = 'correct'
elif status == 'incorrect':
correctness = 'incorrect'
elif status == 'partially-correct':
correctness = 'partially-correct'
else:
correctness = None
%>
......
......@@ -9,7 +9,7 @@
<div class="script_placeholder" data-src="/static/js/sylvester.js"></div>
<div class="script_placeholder" data-src="/static/js/crystallography.js"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="status ${status.classname}" id="status_${id}">
% endif
......@@ -25,7 +25,7 @@
<span class="message">${msg|n}</span>
% endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</section>
......@@ -2,7 +2,7 @@
<div class="script_placeholder" data-src="/static/js/capa/protex/protex.nocache.js?raw"/>
<div class="script_placeholder" data-src="${applet_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="${status.classname}" id="status_${id}">
% endif
......@@ -15,7 +15,7 @@
</p>
<p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</section>
......@@ -8,7 +8,7 @@
<div class="script_placeholder" data-src="${STATIC_URL}js/capa/drag_and_drop.js"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="${status.classname}" id="status_${id}">
% endif
......@@ -26,7 +26,7 @@
<span class="message">${msg|n}</span>
% endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</div>
......@@ -2,7 +2,7 @@
<div class="script_placeholder" data-src="/static/js/capa/genex/genex.nocache.js?raw"/>
<div class="script_placeholder" data-src="${applet_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="${status.classname}" id="status_${id}">
% endif
......@@ -16,7 +16,7 @@
</p>
<p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</section>
......
<section id="editamoleculeinput_${id}" class="editamoleculeinput">
<div class="script_placeholder" data-src="${applet_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="${status.classname}" id="status_${id}">
% endif
......@@ -23,7 +23,7 @@
<div class="error_message" style="padding: 5px 5px 5px 5px; background-color:#FA6666; height:60px;width:400px; display: none"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</section>
......@@ -20,7 +20,7 @@
<div class="script_placeholder" data-src="${jschannel_loader}"/>
<div class="script_placeholder" data-src="${jsinput_loader}"/>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="${status.classname}" id="status_${id}">
% endif
......@@ -47,7 +47,7 @@
<div class="error_message" style="padding: 5px 5px 5px 5px; background-color:#FA6666; height:60px;width:400px; display: none"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
......
......@@ -7,7 +7,7 @@
<div class="script_placeholder" data-src="${preprocessor['script_src']}"/>
% endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
% if status in ('unsubmitted', 'correct', 'incorrect', 'incomplete'):
<div class="${status.classname} ${doinline}" id="status_${id}">
% endif
% if hidden:
......@@ -50,7 +50,7 @@
% endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
% if status in ('unsubmitted', 'correct', 'incorrect', 'incomplete'):
</div>
% endif
......
......@@ -11,7 +11,7 @@
<div class="script_placeholder" data-src="/static/js/vsepr/vsepr.js"></div>
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
<div class="${status.classname}" id="status_${id}">
% endif
......@@ -28,7 +28,7 @@
% if msg:
<span class="message">${msg|n}</span>
% endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</section>
......@@ -49,9 +49,6 @@ class ResponseXMLFactory(object):
*num_inputs*: The number of input elements
to create [DEFAULT: 1]
*credit_type*: String of comma-separated words specifying the
partial credit grading scheme.
Returns a string representation of the XML tree.
"""
......@@ -61,7 +58,6 @@ class ResponseXMLFactory(object):
script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1)
credit_type = kwargs.get('credit_type', None)
# The root is <problem>
root = etree.Element("problem")
......@@ -79,11 +75,6 @@ class ResponseXMLFactory(object):
# Add the response(s)
for __ in range(int(num_responses)):
response_element = self.create_response_element(**kwargs)
# Set partial credit
if credit_type is not None:
response_element.set('partial_credit', str(credit_type))
root.append(response_element)
# Add input elements
......@@ -141,10 +132,6 @@ class ResponseXMLFactory(object):
*choice_names": List of strings identifying the choices.
If specified, you must ensure that
len(choice_names) == len(choices)
*points*: List of strings giving partial credit values (0-1)
for each choice. Interpreted as floats in problem.
If specified, ensure len(points) == len(choices)
"""
# Names of group elements
group_element_names = {
......@@ -157,23 +144,15 @@ class ResponseXMLFactory(object):
choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices))
points = kwargs.get('points', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert choice_type in group_element_names
group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements
for (correct_val, name, pointval) in zip(choices, choice_names, points):
for (correct_val, name) in zip(choices, choice_names):
choice_element = etree.SubElement(group_element, "choice")
if correct_val is True:
correctness = 'true'
elif correct_val is False:
correctness = 'false'
elif 'partial' in correct_val:
correctness = 'partial'
choice_element.set('correct', correctness)
choice_element.set("correct", "true" if correct_val else "false")
# Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the
......@@ -182,10 +161,6 @@ class ResponseXMLFactory(object):
choice_element.text = str(name)
choice_element.set("name", str(name))
# Add point values for partially-correct choices.
if pointval:
choice_element.set("point_value", str(pointval))
return group_element
......@@ -201,22 +176,10 @@ class NumericalResponseXMLFactory(ResponseXMLFactory):
*tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%")
*credit_type*: String of comma-separated words specifying the
partial credit grading scheme.
*partial_range*: The multiplier for the tolerance that will
still provide partial credit in the "close" grading style
*partial_answers*: A string of comma-separated alternate
answers that will receive partial credit in the "list" style
"""
answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None)
credit_type = kwargs.get('credit_type', None)
partial_range = kwargs.get('partial_range', None)
partial_answers = kwargs.get('partial_answers', None)
response_element = etree.Element('numericalresponse')
......@@ -230,13 +193,6 @@ class NumericalResponseXMLFactory(ResponseXMLFactory):
responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance))
if partial_range is not None and 'close' in credit_type:
responseparam_element.set('partial_range', str(partial_range))
if partial_answers is not None and 'list' in credit_type:
# The line below throws a false positive pylint violation, so it's excepted.
responseparam_element = etree.SubElement(response_element, 'responseparam') # pylint: disable=E1101
responseparam_element.set('partial_answers', partial_answers)
return response_element
......@@ -673,25 +629,15 @@ class OptionResponseXMLFactory(ResponseXMLFactory):
*options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options.
*correct_option*: a string with comma-separated correct choices [REQUIRED]
*partial_option*: a string with comma-separated partially-correct choices
*point_values*: a string with comma-separated values (0-1) that give the
partial credit values in the "points" grading scheme.
Must have one per partial option.
*credit_type*: String of comma-separated words specifying the
partial credit grading scheme.
*correct_option*: the correct choice from the list of options [REQUIRED]
"""
options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None)
partial_option = kwargs.get('partial_option', None)
point_values = kwargs.get('point_values', None)
credit_type = kwargs.get('credit_type', None)
assert options_list and correct_option
assert len(options_list) > 1
for option in correct_option.split(','):
assert option.strip() in options_list
assert correct_option in options_list
# Create the <optioninput> element
optioninput_element = etree.Element("optioninput")
......@@ -705,15 +651,6 @@ class OptionResponseXMLFactory(ResponseXMLFactory):
# Set the "correct" attribute
optioninput_element.set('correct', str(correct_option))
# If we have 'points'-style partial credit...
if 'points' in str(credit_type):
# Set the "partial" attribute
optioninput_element.set('partial', str(partial_option))
# Set the "point_values" attribute, if it's specified.
if point_values is not None:
optioninput_element.set('point_values', str(point_values))
return optioninput_element
......
......@@ -17,7 +17,7 @@ class CorrectMapTest(unittest.TestCase):
self.cmap = CorrectMap()
def test_set_input_properties(self):
# Set the correctmap properties for three inputs
# Set the correctmap properties for two inputs
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
......@@ -41,34 +41,15 @@ class CorrectMapTest(unittest.TestCase):
queuestate=None
)
self.cmap.set(
answer_id='3_2_1',
correctness='partially-correct',
npoints=3,
msg=None,
hint=None,
hintmode=None,
queuestate=None
)
# Assert that each input has the expected properties
self.assertTrue(self.cmap.is_correct('1_2_1'))
self.assertFalse(self.cmap.is_correct('2_2_1'))
self.assertTrue(self.cmap.is_correct('3_2_1'))
self.assertTrue(self.cmap.is_partially_correct('3_2_1'))
self.assertFalse(self.cmap.is_partially_correct('2_2_1'))
# Intentionally testing an item that's not in cmap.
self.assertFalse(self.cmap.is_partially_correct('9_2_1'))
self.assertEqual(self.cmap.get_correctness('1_2_1'), 'correct')
self.assertEqual(self.cmap.get_correctness('2_2_1'), 'incorrect')
self.assertEqual(self.cmap.get_correctness('3_2_1'), 'partially-correct')
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 3)
self.assertEqual(self.cmap.get_msg('1_2_1'), 'Test message')
self.assertEqual(self.cmap.get_msg('2_2_1'), None)
......@@ -102,8 +83,6 @@ class CorrectMapTest(unittest.TestCase):
# 3) incorrect, 5 points
# 4) incorrect, None points
# 5) correct, 0 points
# 4) partially correct, 2.5 points
# 5) partially correct, None points
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
......@@ -134,30 +113,15 @@ class CorrectMapTest(unittest.TestCase):
npoints=0
)
self.cmap.set(
answer_id='6_2_1',
correctness='partially-correct',
npoints=2.5
)
self.cmap.set(
answer_id='7_2_1',
correctness='partially-correct',
npoints=None
)
# Assert that we get the expected points
# If points assigned --> npoints
# If no points assigned and correct --> 1 point
# If no points assigned and partially correct --> 1 point
# If no points assigned and incorrect --> 0 points
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5.3)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 1)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('4_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('5_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('6_2_1'), 2.5)
self.assertEqual(self.cmap.get_npoints('7_2_1'), 1)
def test_set_overall_message(self):
......
......@@ -82,23 +82,6 @@ class ResponseTest(unittest.TestCase):
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect')
def assert_multiple_partial(self, problem, correct_answers, incorrect_answers, partial_answers):
"""
Runs multiple asserts for varying correct, incorrect,
and partially correct answers, all passed as lists.
"""
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'correct')
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect')
for input_str in partial_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'partially-correct')
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
......@@ -120,14 +103,6 @@ class MultiChoiceResponseTest(ResponseTest):
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect')
def test_partial_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points')
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False],
choice_names=["foil_1", "foil_2", "foil_3"])
......@@ -137,38 +112,6 @@ class MultiChoiceResponseTest(ResponseTest):
self.assert_grade(problem, 'choice_foil_2', 'correct')
self.assert_grade(problem, 'choice_foil_3', 'incorrect')
def test_multiple_choice_valid_grading_schemes(self):
# Multiple Choice problems only allow one partial credit scheme.
# Change this test if that changes.
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points,points')
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(choices=[False, True, 'partial'], credit_type='bongo')
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
def test_partial_points_multiple_choice_grade(self):
problem = self.build_problem(
choices=['partial', 'partial', 'partial'],
credit_type='points',
points=['1', '0.6', '0']
)
# Ensure that we get the expected number of points
# Using assertAlmostEqual to avoid floating point issues
correct_map = problem.grade_answers({'1_2_1': 'choice_0'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 1)
correct_map = problem.grade_answers({'1_2_1': 'choice_1'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.6)
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0)
class TrueFalseResponseTest(ResponseTest):
xml_factory_class = TrueFalseResponseXMLFactory
......@@ -409,77 +352,6 @@ class OptionResponseTest(ResponseTest):
# Options not in the list should be marked incorrect
self.assert_grade(problem, "invalid_option", "incorrect")
def test_grade_multiple_correct(self):
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second,third"
)
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
self.assert_grade(problem, "second", "correct")
self.assert_grade(problem, "third", "correct")
def test_grade_partial_credit(self):
# Testing the "points" style.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="points",
partial_option="third"
)
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
self.assert_grade(problem, "second", "correct")
self.assert_grade(problem, "third", "partially-correct")
def test_grade_partial_credit_with_points(self):
# Testing the "points" style with specified point values.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="points",
partial_option="third",
point_values="0.3"
)
# Assert that we get the expected grades and scores
self.assert_grade(problem, "first", "incorrect")
correct_map = problem.grade_answers({'1_2_1': 'first'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0)
self.assert_grade(problem, "second", "correct")
correct_map = problem.grade_answers({'1_2_1': 'second'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 1)
self.assert_grade(problem, "third", "partially-correct")
correct_map = problem.grade_answers({'1_2_1': 'third'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.3)
def test_grade_partial_credit_valid_scheme(self):
# Only one type of partial credit currently allowed.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="points,points",
partial_option="third"
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'second'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(
options=["first", "second", "third"],
correct_option="second",
credit_type="bongo",
partial_option="third"
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'second'}
problem.grade_answers(input_dict)
def test_quote_option(self):
# Test that option response properly escapes quotes inside options strings
problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"],
......@@ -511,29 +383,6 @@ class OptionResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
self.assertEqual(correct_map.get_property('1_2_1', 'answervariable'), '$a')
def test_variable_options_partial_credit(self):
"""
Test that if variable are given in option response then correct map must contain answervariable value.
This is the partial-credit version.
"""
script = textwrap.dedent("""\
a = 1000
b = a*2
c = a*3
""")
problem = self.build_problem(
options=['$a', '$b', '$c'],
correct_option='$a',
partial_option='$b',
script=script,
credit_type='points',
)
input_dict = {'1_2_1': '2000'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'partially-correct')
self.assertEqual(correct_map.get_property('1_2_1', 'answervariable'), '$b')
class FormulaResponseTest(ResponseTest):
"""
......@@ -1262,112 +1111,6 @@ class ChoiceResponseTest(ResponseTest):
# No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect')
def test_checkbox_group_valid_grading_schemes(self):
# Checkbox-type problems only allow one partial credit scheme.
# Change this test if that changes.
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='edc,halves,bongo'
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='bongo'
)
with self.assertRaises(LoncapaProblemError):
input_dict = {'1_2_1': 'choice_1'}
problem.grade_answers(input_dict)
def test_checkbox_group_partial_credit_grade(self):
# First: Every Decision Counts grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='edc'
)
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
# (partially correct if at least one choice is right)
# (incorrect if totally wrong)
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct')
self.assert_grade(problem, 'choice_0', 'partially-correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'partially-correct')
# Second: Halves grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='halves'
)
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
# (partially correct on one error)
# (incorrect for more errors, at least with this # of choices.)
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
self.assert_grade(problem, ['choice_1', 'choice_2', 'choice_3'], 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'incorrect')
# Third: Halves grading style with more options
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True, False],
credit_type='halves'
)
# Check that we get the expected results
# (2 errors allowed with 5+ choices)
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_4'], 'incorrect')
self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct')
self.assert_grade(problem, 'choice_2', 'partially-correct')
self.assert_grade(problem, ['choice_1', 'choice_2', 'choice_3'], 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'partially-correct')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3', 'choice_4'], 'incorrect')
def test_checkbox_group_partial_points_grade(self):
# Ensure that we get the expected number of points
# Using assertAlmostEqual to avoid floating point issues
# First: Every Decision Counts grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='edc'
)
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.75)
# Second: Halves grading style
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True],
credit_type='halves'
)
correct_map = problem.grade_answers({'1_2_1': 'choice_2'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.5)
# Third: Halves grading style with more options
problem = self.build_problem(
choice_type='checkbox',
choices=[False, False, True, True, False],
credit_type='halves'
)
correct_map = problem.grade_answers({'1_2_1': 'choice_2,choice4'})
self.assertAlmostEqual(correct_map.get_npoints('1_2_1'), 0.25)
def test_grade_with_no_checkbox_selected(self):
"""
Test that answer marked as incorrect if no checkbox selected.
......@@ -1428,7 +1171,7 @@ class NumericalResponseTest(ResponseTest):
# For simple things its not worth the effort.
def test_grade_range_tolerance(self):
problem_setup = [
# [given_answer, [list of correct responses], [list of incorrect responses]]
# [given_asnwer, [list of correct responses], [list of incorrect responses]]
['[5, 7)', ['5', '6', '6.999'], ['4.999', '7']],
['[1.6e-5, 1.9e24)', ['0.000016', '1.6*10^-5', '1.59e24'], ['1.59e-5', '1.9e24', '1.9*10^24']],
['[0, 1.6e-5]', ['1.6*10^-5'], ["2"]],
......@@ -1438,41 +1181,6 @@ class NumericalResponseTest(ResponseTest):
problem = self.build_problem(answer=given_answer)
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_range_tolerance_partial_credit(self):
problem_setup = [
# [given_answer,
# [list of correct responses],
# [list of incorrect responses],
# [list of partially correct responses]]
[
'[5, 7)',
['5', '6', '6.999'],
['0', '100'],
['4', '8']
],
[
'[1.6e-5, 1.9e24)',
['0.000016', '1.6*10^-5', '1.59e24'],
['-1e26', '1.9e26', '1.9*10^26'],
['0', '2e24']
],
[
'[0, 1.6e-5]',
['1.6*10^-5'],
['2'],
['1.9e-5', '-1e-6']
],
[
'(1.6e-5, 10]',
['2'],
['-20', '30'],
['-1', '12']
],
]
for given_answer, correct_responses, incorrect_responses, partial_responses in problem_setup:
problem = self.build_problem(answer=given_answer, credit_type='close')
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
def test_grade_range_tolerance_exceptions(self):
# no complex number in range tolerance staff answer
problem = self.build_problem(answer='[1j, 5]')
......@@ -1510,61 +1218,6 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses = ["", "3.9", "4.1", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_partial(self):
# First: "list"-style grading scheme.
problem = self.build_problem(
answer=4,
credit_type='list',
partial_answers='2,8,-4'
)
correct_responses = ["4", "4.0"]
incorrect_responses = ["1", "3", "4.1", "0", "-2"]
partial_responses = ["2", "2.0", "-4", "-4.0", "8", "8.0"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
# Second: "close"-style grading scheme. Default range is twice tolerance.
problem = self.build_problem(
answer=4,
tolerance=0.2,
credit_type='close'
)
correct_responses = ["4", "4.1", "3.9"]
incorrect_responses = ["1", "3", "4.5", "0", "-2"]
partial_responses = ["4.3", "3.7"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
# Third: "close"-style grading scheme with partial_range set.
problem = self.build_problem(
answer=4,
tolerance=0.2,
partial_range=3,
credit_type='close'
)
correct_responses = ["4", "4.1"]
incorrect_responses = ["1", "3", "0", "-2"]
partial_responses = ["4.5", "3.5"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
# Fourth: both "list"- and "close"-style grading schemes at once.
problem = self.build_problem(
answer=4,
tolerance=0.2,
partial_range=3,
credit_type='close,list',
partial_answers='2,8,-4'
)
correct_responses = ["4", "4.0"]
incorrect_responses = ["1", "3", "0", "-2"]
partial_responses = ["2", "2.1", "1.5", "8", "7.5", "8.1", "-4", "-4.15", "-3.5", "4.5", "3.5"]
self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses)
def test_numerical_valid_grading_schemes(self):
# 'bongo' is not a valid grading scheme.
problem = self.build_problem(answer=4, tolerance=0.1, credit_type='bongo')
input_dict = {'1_2_1': '4'}
with self.assertRaises(LoncapaProblemError):
problem.grade_answers(input_dict)
def test_grade_decimal_tolerance(self):
problem = self.build_problem(answer=4, tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
......@@ -1791,18 +1444,11 @@ class CustomResponseTest(ResponseTest):
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
# { 'ok': BOOL or STRING, 'msg': STRING } (no 'grade_decimal' key to test that it's optional)
# { 'ok': BOOL, 'msg': STRING } (no 'grade_decimal' key to test that it's optional)
#
script = textwrap.dedent("""
def check_func(expect, answer_given):
partial_credit = '21'
if answer_given == expect:
retval = True
elif answer_given == partial_credit:
retval = 'partial'
else:
retval = False
return {'ok': retval, 'msg': 'Message text'}
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42")
......@@ -1819,18 +1465,6 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(msg, "Message text")
self.assertEqual(npoints, 1)
# Partially Credit answer
input_dict = {'1_2_1': '21'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
npoints = correct_map.get_npoints('1_2_1')
self.assertEqual(correctness, 'partially-correct')
self.assertEqual(msg, "Message text")
self.assertTrue(0 <= npoints <= 1)
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
......@@ -1852,24 +1486,14 @@ class CustomResponseTest(ResponseTest):
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
# { 'ok': BOOL or STRING, 'msg': STRING, 'grade_decimal': FLOAT }
# { 'ok': BOOL, 'msg': STRING, 'grade_decimal': FLOAT }
#
script = textwrap.dedent("""
def check_func(expect, answer_given):
partial_credit = '21'
if answer_given == expect:
retval = True
score = 0.9
elif answer_given == partial_credit:
retval = 'partial'
score = 0.5
else:
retval = False
score = 0.1
return {
'ok': retval,
'ok': answer_given == expect,
'msg': 'Message text',
'grade_decimal': score,
'grade_decimal': 0.9 if answer_given == expect else 0.1,
}
""")
......@@ -1887,28 +1511,16 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
# Partially Correct answer
input_dict = {'1_2_1': '21'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.5)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'partially-correct')
def test_function_code_multiple_input_no_msg(self):
# Check functions also have the option of returning
# a single boolean or string value
# a single boolean value
# If true, mark all the inputs correct
# If one is true but not the other, mark all partially correct
# If false, mark all the inputs incorrect
script = textwrap.dedent("""
def check_func(expect, answer_given):
if answer_given[0] == expect and answer_given[1] == expect:
retval = True
elif answer_given[0] == expect or answer_given[1] == expect:
retval = 'partial'
else:
retval = False
return retval
return (answer_given[0] == expect and
answer_given[1] == expect)
""")
problem = self.build_problem(script=script, cfn="check_func",
......@@ -1924,23 +1536,11 @@ class CustomResponseTest(ResponseTest):
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'correct')
# One answer incorrect -- expect both inputs marked partially correct
# One answer incorrect -- expect both inputs marked incorrect
input_dict = {'1_2_1': '0', '1_2_2': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'partially-correct')
self.assertTrue(0 <= correct_map.get_npoints('1_2_1') <= 1)
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'partially-correct')
self.assertTrue(0 <= correct_map.get_npoints('1_2_2') <= 1)
# Both answers incorrect -- expect both inputs marked incorrect
input_dict = {'1_2_1': '0', '1_2_2': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'incorrect')
correctness = correct_map.get_correctness('1_2_2')
......@@ -1952,8 +1552,7 @@ class CustomResponseTest(ResponseTest):
# the check function can return a dict of the form:
#
# {'overall_message': STRING,
# 'input_list': [{'ok': BOOL or STRING, 'msg': STRING}, ...] }
# (no grade_decimal to test it's optional)
# 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] } (no grade_decimal to test it's optional)
#
# 'overall_message' is displayed at the end of the response
#
......@@ -1964,20 +1563,18 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
check4 = 'partial' if answer_given[3] == 'four' else False
return {'overall_message': 'Overall message',
'input_list': [
{'ok': check1, 'msg': 'Feedback 1'},
{'ok': check2, 'msg': 'Feedback 2'},
{'ok': check3, 'msg': 'Feedback 3'},
{'ok': check4, 'msg': 'Feedback 4'} ] }
{'ok': check3, 'msg': 'Feedback 3'} ] }
""")
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=4)
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3', '1_2_4': 'four'}
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Expect that we receive the overall message (for the whole response)
......@@ -1987,19 +1584,16 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_4'), 'partially-correct')
# Expect that the inputs were given correct npoints
self.assertEqual(correct_map.get_npoints('1_2_1'), 0)
self.assertEqual(correct_map.get_npoints('1_2_2'), 1)
self.assertEqual(correct_map.get_npoints('1_2_3'), 1)
self.assertTrue(0 <= correct_map.get_npoints('1_2_4') <= 1)
# Expect that we received messages for each individual input
self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1')
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
self.assertEqual(correct_map.get_msg('1_2_4'), 'Feedback 4')
def test_function_code_multiple_inputs_decimal_score(self):
......@@ -2007,8 +1601,7 @@ class CustomResponseTest(ResponseTest):
# the check function can return a dict of the form:
#
# {'overall_message': STRING,
# 'input_list': [{'ok': BOOL or STRING,
# 'msg': STRING, 'grade_decimal': FLOAT}, ...] }
# 'input_list': [{'ok': BOOL, 'msg': STRING, 'grade_decimal': FLOAT}, ...] }
# #
# 'input_list' contains dictionaries representing the correctness
# and message for each input.
......@@ -2017,51 +1610,39 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
check4 = 'partial' if answer_given[3] == 'four' else False
score1 = 0.9 if check1 else 0.1
score2 = 0.9 if check2 else 0.1
score3 = 0.9 if check3 else 0.1
score4 = 0.7 if check4 == 'partial' else 0.1
return {
'input_list': [
{'ok': check1, 'grade_decimal': score1, 'msg': 'Feedback 1'},
{'ok': check2, 'grade_decimal': score2, 'msg': 'Feedback 2'},
{'ok': check3, 'grade_decimal': score3, 'msg': 'Feedback 3'},
{'ok': check4, 'grade_decimal': score4, 'msg': 'Feedback 4'},
]
}
""")
problem = self.build_problem(script=script, cfn="check_func", num_inputs=4)
problem = self.build_problem(script=script, cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3', '1_2_4': 'four'}
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Expect that the inputs were graded individually
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_4'), 'partially-correct')
# Expect that the inputs were given correct npoints
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1)
self.assertEqual(correct_map.get_npoints('1_2_2'), 0.9)
self.assertEqual(correct_map.get_npoints('1_2_3'), 0.9)
self.assertEqual(correct_map.get_npoints('1_2_4'), 0.7)
def test_function_code_with_extra_args(self):
script = textwrap.dedent("""\
def check_func(expect, answer_given, options, dynamath):
assert options == "xyzzy", "Options was %r" % options
partial_credit = '21'
if answer_given == expect:
retval = True
elif answer_given == partial_credit:
retval = 'partial'
else:
retval = False
return {'ok': retval, 'msg': 'Message text'}
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
......@@ -2076,16 +1657,6 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text")
# Partially Correct answer
input_dict = {'1_2_1': '21'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'partially-correct')
self.assertEqual(msg, "Message text")
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
......@@ -2112,12 +1683,8 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
if (int(answer_given[0]) == -1) and check2 and check3:
return {'ok': 'partial',
'msg': 'Message text'}
else:
return {'ok': (check1 and check2 and check3),
'msg': 'Message text'}
return {'ok': (check1 and check2 and check3),
'msg': 'Message text'}
""")
problem = self.build_problem(script=script,
......@@ -2132,15 +1699,6 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect')
# Grade the inputs (one input partially correct)
input_dict = {'1_2_1': '-1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked partially correct
self.assertEqual(correct_map.get_correctness('1_2_1'), 'partially-correct')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'partially-correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'partially-correct')
# Grade the inputs (everything correct)
input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
......
......@@ -24,7 +24,6 @@
$annotation-yellow: rgba(255,255,10,0.3);
$color-copy-tip: rgb(100,100,100);
$correct: $green-d1;
$partiallycorrect: $green-d1;
$incorrect: $red;
// +Extends - Capa
......@@ -76,11 +75,6 @@ h2 {
color: $correct;
}
.feedback-hint-partially-correct {
margin-top: ($baseline/2);
color: $partiallycorrect;
}
.feedback-hint-incorrect {
margin-top: ($baseline/2);
color: $incorrect;
......@@ -180,16 +174,6 @@ div.problem {
}
}
&.choicegroup_partially-correct {
@include status-icon($partiallycorrect, "\f00c");
border: 2px solid $partiallycorrect;
// keep green for correct answers on hover.
&:hover {
border-color: $partiallycorrect;
}
}
&.choicegroup_incorrect {
@include status-icon($incorrect, "\f00d");
border: 2px solid $incorrect;
......@@ -243,11 +227,6 @@ div.problem {
@include status-icon($correct, "\f00c");
}
// CASE: partially correct answer
&.partially-correct {
@include status-icon($partiallycorrect, "\f00c");
}
// CASE: incorrect answer
&.incorrect {
@include status-icon($incorrect, "\f00d");
......@@ -359,19 +338,6 @@ div.problem {
}
}
&.partially-correct, &.ui-icon-check {
p.status {
display: inline-block;
width: 25px;
height: 20px;
background: url('../images/partially-correct-icon.png') center center no-repeat;
}
input {
border-color: $partiallycorrect;
}
}
&.processing {
p.status {
display: inline-block;
......@@ -747,7 +713,7 @@ div.problem {
height: 46px;
}
> .incorrect, .partially-correct, .correct, .unanswered {
> .incorrect, .correct, .unanswered {
.status {
display: inline-block;
......@@ -768,18 +734,6 @@ div.problem {
}
}
// CASE: partially correct answer
> .partially-correct {
input {
border: 2px solid $partiallycorrect;
}
.status {
@include status-icon($partiallycorrect, "\f00c");
}
}
// CASE: correct answer
> .correct {
......@@ -821,7 +775,7 @@ div.problem {
.indicator-container {
display: inline-block;
.status.correct:after, .status.partially-correct:after, .status.incorrect:after, .status.unanswered:after {
.status.correct:after, .status.incorrect:after, .status.unanswered:after {
@include margin-left(0);
}
}
......@@ -987,20 +941,6 @@ div.problem {
}
}
.detailed-targeted-feedback-partially-correct {
> p:first-child {
@extend %t-strong;
color: $partiallycorrect;
text-transform: uppercase;
font-style: normal;
font-size: 0.9em;
}
p:last-child {
margin-bottom: 0;
}
}
.detailed-targeted-feedback-correct {
> p:first-child {
@extend %t-strong;
......@@ -1195,14 +1135,6 @@ div.problem {
}
}
.result-partially-correct {
background: url('../images/partially-correct-icon.png') left 20px no-repeat;
.result-actual-output {
color: #090;
}
}
.result-incorrect {
background: url('../images/incorrect-icon.png') left 20px no-repeat;
......@@ -1408,14 +1340,6 @@ div.problem {
}
}
label.choicetextgroup_partially-correct, section.choicetextgroup_partially-correct {
@extend label.choicegroup_partially-correct;
input[type="text"] {
border-color: $partiallycorrect;
}
}
label.choicetextgroup_incorrect, section.choicetextgroup_incorrect {
@extend label.choicegroup_incorrect;
}
......
......@@ -64,12 +64,6 @@ class ProblemPage(PageObject):
"""
self.q(css='div.problem div.capa_inputtype.textline input').fill(text)
def fill_answer_numerical(self, text):
"""
Fill in the answer to a numerical problem.
"""
self.q(css='div.problem section.inputtype input').fill(text)
def click_check(self):
"""
Click the Check button!
......@@ -90,24 +84,6 @@ class ProblemPage(PageObject):
"""
return self.q(css="div.problem div.capa_inputtype.textline div.correct span.status").is_present()
def simpleprob_is_correct(self):
"""
Is there a "correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.correct span.status").is_present()
def simpleprob_is_partially_correct(self):
"""
Is there a "partially correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.partially-correct span.status").is_present()
def simpleprob_is_incorrect(self):
"""
Is there an "incorrect" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.incorrect span.status").is_present()
def click_clarification(self, index=0):
"""
Click on an inline icon that can be included in problem text using an HTML <clarification> element:
......
......@@ -213,35 +213,3 @@ class ProblemWithMathjax(ProblemsTest):
self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_check()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
......@@ -2,6 +2,7 @@
This test file will run through some LMS test scenarios regarding access and navigation of the LMS
"""
import time
from mock import patch
from nose.plugins.attrib import attr
from django.conf import settings
......@@ -12,6 +13,7 @@ from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.factories import GlobalStaffFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.django import modulestore
@attr('shard_1')
......@@ -266,3 +268,45 @@ class TestNavigation(ModuleStoreTestCase, LoginEnrollmentTestCase):
kwargs={'course_id': test_course_id}
)
self.assert_request_status_code(302, url)
def test_proctoring_js_includes(self):
"""
Make sure that proctoring JS does not get included on
courseware pages if either the FEATURE flag is turned off
or the course is not proctored enabled
"""
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.test_course, True)
test_course_id = self.test_course.id.to_deprecated_string()
with patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': False}):
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
resp = self.client.get(url)
self.assertNotContains(resp, '/static/js/lms-proctoring.js')
with patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': True}):
url = reverse(
'courseware',
kwargs={'course_id': test_course_id}
)
resp = self.client.get(url)
self.assertNotContains(resp, '/static/js/lms-proctoring.js')
# now set up a course which is proctored enabled
self.test_course.enable_proctored_exams = True
self.test_course.save()
modulestore().update_item(self.test_course, self.user.id)
resp = self.client.get(url)
self.assertContains(resp, '/static/js/lms-proctoring.js')
......@@ -39,6 +39,34 @@ class TestProctoringDashboardViews(ModuleStoreTestCase):
"""
Test Pass Proctoring Tab is in the Instructor Dashboard
"""
self.instructor.is_staff = True
self.instructor.save()
response = self.client.get(self.url)
self.assertTrue(self.proctoring_link in response.content)
self.assertTrue('Allowance Section' in response.content)
def test_no_tab_non_global_staff(self):
"""
Test Pass Proctoring Tab is not in the Instructor Dashboard
for non global staff users
"""
self.instructor.is_staff = False
self.instructor.save()
response = self.client.get(self.url)
self.assertFalse(self.proctoring_link in response.content)
self.assertFalse('Allowance Section' in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': False})
def test_no_tab_flag_unset(self):
"""
Test Pass Proctoring Tab is not in the Instructor Dashboard
if the feature flag 'ENABLE_PROCTORED_EXAMS' is unset.
"""
self.instructor.is_staff = True
self.instructor.save()
response = self.client.get(self.url)
self.assertFalse(self.proctoring_link in response.content)
self.assertFalse('Allowance Section' in response.content)
......@@ -143,7 +143,13 @@ def instructor_dashboard_2(request, course_id):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Proctoring tab
if settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and course.enable_proctored_exams:
# only global staff (user.is_staff) is allowed to see this tab
can_see_proctoring = (
settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and
course.enable_proctored_exams and
request.user.is_staff
)
if can_see_proctoring:
sections.append(_section_proctoring(course, access))
# Certificates panel
......
......@@ -10,6 +10,7 @@
this.perPage = options.per_page || 10;
this.username = options.username;
this.privileged = options.privileged;
this.staff = options.staff;
this.server_api = _.extend(
{
......@@ -26,11 +27,11 @@
model: TeamMembershipModel,
canUserCreateTeam: function() {
// Note: non-privileged users are automatically added to any team
// Note: non-staff and non-privileged users are automatically added to any team
// that they create. This means that if multiple team membership is
// disabled that they cannot create a new team when they already
// belong to one.
return this.privileged || this.length === 0;
return this.privileged || this.staff || this.length === 0;
}
});
return TeamMembershipCollection;
......
......@@ -15,6 +15,7 @@ define(["jquery", "backbone", "teams/js/teams_tab_factory"],
userInfo: {
username: 'test-user',
privileged: false,
staff: false,
team_memberships_data: null
}
});
......
......@@ -173,7 +173,7 @@ define([
AjaxHelpers.respondWithError(
requests,
400,
{'error_message': {'user_message': 'User message', 'developer_message': 'Developer message' }}
{'user_message': 'User message', 'developer_message': 'Developer message'}
);
expect(teamEditView.$('.wrapper-msg .copy').text().trim()).toBe("User message");
......
......@@ -130,7 +130,7 @@ define([
it('does not allow access if the user is neither privileged nor a team member', function () {
var teamsTabView = createTeamsTabView({
userInfo: TeamSpecHelpers.createMockUserInfo({ privileged: false })
userInfo: TeamSpecHelpers.createMockUserInfo({ privileged: false, staff: true })
});
expect(teamsTabView.readOnlyDiscussion({
attributes: { membership: [] }
......
......@@ -100,6 +100,15 @@ define([
verifyActions(teamsView);
});
it('shows actions for a staff user already in a team', function () {
var staffMembership = TeamSpecHelpers.createMockTeamMemberships(
TeamSpecHelpers.createMockTeamMembershipsData(1, 5),
{ privileged: false, staff: true }
),
teamsView = createTopicTeamsView({ teamMemberships: staffMembership });
verifyActions(teamsView);
});
/*
// TODO: make this ready for prime time
it('refreshes when the team membership changes', function() {
......
......@@ -89,7 +89,8 @@ define([
parse: true,
url: 'api/teams/team_memberships',
username: testUser,
privileged: false
privileged: false,
staff: false
}),
options)
);
......@@ -100,6 +101,7 @@ define([
{
username: testUser,
privileged: false,
staff: false,
team_memberships_data: createMockTeamMembershipsData(1, 5)
},
options
......
......@@ -125,9 +125,9 @@
})
.fail(function(data) {
var response = JSON.parse(data.responseText);
var message = gettext("An error occurred. Please try again.")
if ('error_message' in response && 'user_message' in response['error_message']){
message = response['error_message']['user_message'];
var message = gettext("An error occurred. Please try again.");
if ('user_message' in response){
message = response.user_message;
}
view.showMessage(message, message);
});
......
......@@ -92,6 +92,7 @@
course_id: this.courseID,
username: this.userInfo.username,
privileged: this.userInfo.privileged,
staff: this.userInfo.staff,
parse: true
}
).bootstrap();
......
......@@ -143,7 +143,8 @@ class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
'name': 'Public Profiles',
'description': 'Description for topic 6.'
},
]
],
'max_team_size': 1
}
cls.test_course_2 = CourseFactory.create(
org='MIT',
......@@ -185,6 +186,13 @@ class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
profile.year_of_birth = 1970
profile.save()
# This student is enrolled in the other course, but not yet a member of a team. This is to allow
# course_2 to use a max_team_size of 1 without breaking other tests on course_1
self.create_and_enroll_student(
courses=[self.test_course_2],
username='student_enrolled_other_course_not_on_team'
)
# 'solar team' is intentionally lower case to test case insensitivity in name ordering
self.test_team_1 = CourseTeamFactory.create(
name=u'sólar team',
......@@ -219,6 +227,14 @@ class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
self.test_team_5.add_user(self.users['student_enrolled_both_courses_other_team'])
self.test_team_6.add_user(self.users['student_enrolled_public_profile'])
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
def create_and_enroll_student(self, courses=None, username=None):
""" Creates a new student and enrolls that student in the course.
......@@ -507,14 +523,38 @@ class TestCreateTeamAPI(TeamAPITestCase):
self.post_create_team(status, data)
def test_student_in_team(self):
self.post_create_team(
response = self.post_create_team(
400,
{
'course_id': str(self.test_course_1.id),
'description': "You are already on a team in this course."
},
data=self.build_team_data(
name="Doomed team",
course=self.test_course_1,
description="Overly ambitious student"
),
user='student_enrolled'
)
self.assertEqual(
"You are already in a team in this course.",
json.loads(response.content)["user_message"]
)
@ddt.data('staff', 'course_staff', 'community_ta')
def test_privileged_create_multiple_teams(self, user):
""" Privileged users can create multiple teams, even if they are already in one. """
# First add the privileged user to a team.
self.post_create_membership(
200,
self.build_membership_data(user, self.test_team_1),
user=user
)
self.post_create_team(
data=self.build_team_data(
name="Another team",
course=self.test_course_1,
description="Privileged users are the best"
),
user=user
)
@ddt.data({'description': ''}, {'name': 'x' * 1000}, {'name': ''})
def test_bad_fields(self, kwargs):
......@@ -877,14 +917,6 @@ class TestListMembershipAPI(TeamAPITestCase):
class TestCreateMembershipAPI(TeamAPITestCase):
"""Test cases for the membership creation endpoint."""
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
@ddt.data(
(None, 401),
('student_inactive', 401),
......@@ -956,6 +988,14 @@ class TestCreateMembershipAPI(TeamAPITestCase):
)
self.assertIn('not enrolled', json.loads(response.content)['developer_message'])
def test_over_max_team_size_in_course_2(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_other_course_not_on_team', self.test_team_5),
user='student_enrolled_other_course_not_on_team'
)
self.assertIn('full', json.loads(response.content)['developer_message'])
@ddt.ddt
class TestDetailMembershipAPI(TeamAPITestCase):
......
......@@ -96,9 +96,13 @@ class TeamsDashboardView(View):
context = {
"course": course,
"topics": topics_serializer.data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"team_memberships_data": team_memberships_serializer.data,
},
"topic_url": reverse(
......@@ -372,14 +376,16 @@ class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
if CourseTeamMembership.user_in_team_for_course(request.user, course_key):
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not team_administrator and CourseTeamMembership.user_in_team_for_course(request.user, course_key):
error_message = build_api_error(
ugettext_noop('You are already in a team in this course.'),
course_id=course_id
)
return Response({
'error_message': error_message,
}, status=status.HTTP_400_BAD_REQUEST)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
......@@ -396,8 +402,7 @@ class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
if not (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key)):
if not team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
return Response(CourseTeamSerializer(team).data)
......@@ -914,6 +919,13 @@ class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
if course_module.teams_max_size is not None and team.users.count() >= course_module.teams_max_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
try:
membership = team.add_user(user)
except AlreadyOnTeamInCourse:
......
......@@ -33,9 +33,9 @@ specific_student_selected = selected(not staff_selected and masquerade.user_name
student_selected = selected(not staff_selected and not specific_student_selected and not masquerade_group_id)
include_proctoring = settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and course.enable_proctored_exams
%>
<%static:js group='proctoring'/>
% if include_proctoring:
<%static:js group='proctoring'/>
% for template_name in ["proctored-exam-status"]:
<script type="text/template" id="${template_name}-tpl">
<%static:include path="courseware/${template_name}.underscore" />
......
......@@ -58,7 +58,7 @@ git+https://github.com/edx/ecommerce-api-client.git@1.1.0#egg=ecommerce-api-clie
-e git+https://github.com/edx/edx-user-state-client.git@30c0ad4b9f57f8d48d6943eb585ec8a9205f4469#egg=edx-user-state-client
-e git+https://github.com/edx/edx-organizations.git@release-2015-08-03#egg=edx-organizations
git+https://github.com/edx/edx-proctoring.git@release-2015-08-18#egg=edx-proctoring==0.6.0
git+https://github.com/edx/edx-proctoring.git@0.6.2#egg=edx-proctoring==0.6.2
# Third Party XBlocks
-e git+https://github.com/mitodl/edx-sga@172a90fd2738f8142c10478356b2d9ed3e55334a#egg=edx-sga
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment