Commit c2c84fe1 by gradyward

Addressed Code review comments (test cases + formatting)

Did not do refactor to minimize queries yet.
parent 3d3921b1
......@@ -785,7 +785,8 @@ class AssessmentPart(models.Model):
Args:
rubric_index (RubricIndex): The index of the rubric's data.
selected_criteria (list): list of criterion names
selected_criteria (list): list of criterion names that have an option selected
criteria_feedback (list): list of criterion names that have feedback on them
Returns:
None
......
......@@ -7,7 +7,7 @@ from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
import mock
class AssessmentTest(CacheResetTest):
"""
......@@ -148,3 +148,41 @@ class AssessmentTest(CacheResetTest):
criterion['options'] = []
return rubric_from_dict(rubric_dict)
def test_check_all_criteria_assessed(self):
"""
Runs a problem with 8 criterion (representing the 8 permutations of possible needs)
through this validator. Represented as:
A -- Has an option selected for it.
B -- Has Zero Options
C -- Has Feedback given
"""
all_criteria = ['---','--C','-B-','-BC','A--','A-C','AB-','ABC']
selected_criteria = [crit for crit in all_criteria if ('A' in crit)]
zero_option_criteria_names = [crit for crit in all_criteria if ('B' in crit)]
feedback_given_criteria = [crit for crit in all_criteria if ('C' in crit)]
zero_option_criteria = []
for zoc in zero_option_criteria_names:
a = mock.Mock()
a.name = zoc
zero_option_criteria.append(a)
fake_rubric_index = mock.Mock()
fake_rubric_index.find_criteria_without_options = mock.Mock(return_value=zero_option_criteria)
fake_rubric_index.find_missing_criteria = mock.Mock(return_value=(set(all_criteria) - set(selected_criteria)))
expected_not_assessed = {'---','--C','-B-','AB-'}
expected_assessed = set(all_criteria) - expected_not_assessed
error = False
try:
AssessmentPart._check_all_criteria_assessed(fake_rubric_index, selected_criteria, feedback_given_criteria)
except InvalidRubricSelection as ex:
for criterion in expected_not_assessed:
self.assertTrue(criterion in ex.message)
for criterion in expected_assessed:
self.assertFalse(criterion in ex.message)
error = True
self.assertTrue(error)
\ No newline at end of file
......@@ -56,8 +56,8 @@ class TestSelfApi(CacheResetTest):
"accuracy": "Like my sister's cutting comments about my weight, I may not have enjoyed the piece, but I cannot fault it for its factual nature."
}
OVERALL_FEEDBACK = "Unfortunately, the nature of being is too complex to comment, judge, or discern any one" + \
"arbitrary set of things over another."
OVERALL_FEEDBACK = (u"Unfortunately, the nature of being is too complex to comment, judge, or discern any one"
u"arbitrary set of things over another.")
def test_create_assessment(self):
# Initially, there should be no submission or self assessment
......
......@@ -146,12 +146,16 @@
{% endif %}
{% endfor %}
{% if criterion.feedback %}
{% if criterion.peer_feedback or criterion.self_feedback %}
<li class="answer--feedback ui-toggle-visibility {% if criterion.options %}is--collapsed{% endif %}">
{% if criterion.options %}
<h5 class="answer--feedback__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span>
{% if criterion.self_feedback %}
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.peer_feedback|length|add:'1' }})</span>
{% else %}
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.peer_feedback|length }})</span>
{% endif %}
</h5>
{% endif %}
......@@ -170,7 +174,7 @@
{% if criterion.self_feedback %}
<li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source">
{% trans "My Assessment" %}
{% trans "Your Assessment" %}
</h6>
<div class="feedback__value">
......@@ -186,7 +190,7 @@
</li>
{% endwith %}
{% endfor %}
{% if peer_assessments %}
{% if peer_assessments or self_assessment.feedback %}
<li class="question question--feedback ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
......@@ -215,6 +219,23 @@
{% endif %}
{% endwith %}
{% endfor %}
{% if self_assessment.feedback %}
<li class="answer self-evaluation--0" id="question--feedback__answer-0">
<h5 class="answer__title">
<span class="answer__source">
<span class="label sr">{% trans "Self assessment" %}: </span>
<span class="value">{% trans "Self assessment" %}</span>
</span>
</h5>
<div class="answer__value">
<h6 class="label sr">{% trans "Your assessment" %}: </h6>
<div class="value">
<p>{{ self_assessment.feedback }}</p>
</div>
</div>
</li>
{% endif %}
</ul>
</li>
{% endif %}
......
{% spaceless %}
{% load i18n %}
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
......@@ -70,3 +71,4 @@
</li>
</ol>
</fieldset>
{% endspaceless %}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment