Commit aaf9bdb2 by Stephen Sanchez

Merge pull request #302 from edx/ormsbee/workflow_steps

Make peer assessment an optional step.
parents ecb8e976 2702e77b
...@@ -72,7 +72,7 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -72,7 +72,7 @@ class PeerAssessmentInternalError(PeerAssessmentError):
pass pass
def is_complete(submission_uuid, requirements): def submitter_is_finished(submission_uuid, requirements):
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None: if workflow.completed_at is not None:
...@@ -99,7 +99,7 @@ def get_score(submission_uuid, requirements): ...@@ -99,7 +99,7 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible". dict with keys "points_earned" and "points_possible".
""" """
# User hasn't completed their own submission yet # User hasn't completed their own submission yet
if not is_complete(submission_uuid, requirements): if not submitter_is_finished(submission_uuid, requirements):
return None return None
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
...@@ -135,6 +135,10 @@ def get_score(submission_uuid, requirements): ...@@ -135,6 +135,10 @@ def get_score(submission_uuid, requirements):
} }
def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements))
def create_assessment( def create_assessment(
scorer_submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
......
...@@ -3,6 +3,7 @@ Public interface for self-assessment. ...@@ -3,6 +3,7 @@ Public interface for self-assessment.
""" """
import logging import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError from submissions.api import get_submission_and_student, SubmissionNotFoundError
...@@ -21,13 +22,30 @@ SELF_TYPE = "SE" ...@@ -21,13 +22,30 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.self_api") logger = logging.getLogger("openassessment.assessment.self_api")
class SelfAssessmentRequestError(Exception): class SelfAssessmentError(Exception):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class SelfAssessmentRequestError(SelfAssessmentError):
""" """
There was a problem with the request for a self-assessment. There was a problem with the request for a self-assessment.
""" """
pass pass
class SelfAssessmentInternalError(SelfAssessmentError):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None): def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
""" """
Create a self-assessment for a submission. Create a self-assessment for a submission.
...@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
assessment_dict = full_assessment_dict(assessment) assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission) _log_assessment(assessment, submission)
# Return the serialized assessment # Return the serialized assessment
return assessment_dict return assessment_dict
...@@ -140,21 +157,104 @@ def get_assessment(submission_uuid): ...@@ -140,21 +157,104 @@ def get_assessment(submission_uuid):
return serialized_assessment return serialized_assessment
def is_complete(submission_uuid): def submitter_is_finished(submission_uuid, requirements):
""" """
Check whether a self-assessment has been completed for a submission. Check whether a self-assessment has been completed for a submission.
Args: Args:
submission_uuid (str): The unique identifier of the submission. submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns: Returns:
bool True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
""" """
return Assessment.objects.filter( return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists() ).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
SelfAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try:
assessments = list(
Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
def _log_assessment(assessment, submission): def _log_assessment(assessment, submission):
""" """
Log the creation of a self-assessment. Log the creation of a self-assessment.
......
...@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC) ...@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC) WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
STEPS = ['peer', 'self']
@ddt @ddt
class TestPeerApi(CacheResetTest): class TestPeerApi(CacheResetTest):
...@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest): ...@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest):
'must_grade': REQUIRED_GRADED, 'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY 'must_be_graded_by': REQUIRED_GRADED_BY
} }
self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements)) self.assertTrue(peer_api.submitter_is_finished(tim_sub["uuid"], requirements))
def test_completeness(self): def test_completeness(self):
""" """
...@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest): ...@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest):
'must_grade': REQUIRED_GRADED, 'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY 'must_be_graded_by': REQUIRED_GRADED_BY
} }
self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements)) self.assertTrue(peer_api.submitter_is_finished(buffy_sub["uuid"], requirements))
def test_find_active_assessments(self): def test_find_active_assessments(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
...@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest): ...@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest):
new_student_item["student_id"] = student new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date) submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"]) peer_api.create_peer_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"]) workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item return submission, new_student_item
...@@ -9,7 +9,7 @@ import pytz ...@@ -9,7 +9,7 @@ import pytz
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission from submissions.api import create_submission
from openassessment.assessment.self_api import ( from openassessment.assessment.self_api import (
create_assessment, is_complete, SelfAssessmentRequestError, get_assessment create_assessment, submitter_is_finished, SelfAssessmentRequestError, get_assessment
) )
...@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest): ...@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest):
# Now there should be a submission, but no self-assessment # Now there should be a submission, but no self-assessment
assessment = get_assessment(submission["uuid"]) assessment = get_assessment(submission["uuid"])
self.assertIs(assessment, None) self.assertIs(assessment, None)
self.assertFalse(is_complete(submission['uuid'])) self.assertFalse(submitter_is_finished(submission['uuid'], {}))
# Create a self-assessment for the submission # Create a self-assessment for the submission
assessment = create_assessment( assessment = create_assessment(
...@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest): ...@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest):
) )
# Self-assessment should be complete # Self-assessment should be complete
self.assertTrue(is_complete(submission['uuid'])) self.assertTrue(submitter_is_finished(submission['uuid'], {}))
# Retrieve the self-assessment # Retrieve the self-assessment
retrieved = get_assessment(submission["uuid"]) retrieved = get_assessment(submission["uuid"])
...@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest): ...@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest):
def test_is_complete_no_submission(self): def test_is_complete_no_submission(self):
# This submission uuid does not exist # This submission uuid does not exist
self.assertFalse(is_complete('abc1234')) self.assertFalse(submitter_is_finished('abc1234', {}))
...@@ -9,6 +9,7 @@ from submissions import api as sub_api ...@@ -9,6 +9,7 @@ from submissions import api as sub_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api, self_api from openassessment.assessment import peer_api, self_api
STEPS = ['peer', 'self']
class Command(BaseCommand): class Command(BaseCommand):
...@@ -131,7 +132,7 @@ class Command(BaseCommand): ...@@ -131,7 +132,7 @@ class Command(BaseCommand):
""" """
answer = {'text': " ".join(loremipsum.get_paragraphs(5))} answer = {'text': " ".join(loremipsum.get_paragraphs(5))}
submission = sub_api.create_submission(student_item, answer) submission = sub_api.create_submission(student_item, answer)
workflow_api.create_workflow(submission['uuid']) workflow_api.create_workflow(submission['uuid'], STEPS)
workflow_api.update_from_assessments( workflow_api.update_from_assessments(
submission['uuid'], {'peer': {'must_grade': 1, 'must_be_graded_by': 1}} submission['uuid'], {'peer': {'must_grade': 1, 'must_be_graded_by': 1}}
) )
......
...@@ -43,7 +43,7 @@ class UploadDataTest(TestCase): ...@@ -43,7 +43,7 @@ class UploadDataTest(TestCase):
} }
submission_text = "test submission {}".format(index) submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text) submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid']) workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])
# Create and upload the archive of CSV files # Create and upload the archive of CSV files
# This should generate the files even though # This should generate the files even though
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
</article> </article>
<article class="submission__peer-evaluations step__content__section"> <article class="submission__peer-evaluations step__content__section">
<h3 class="submission__peer-evaluations__title">{% trans "Peer Assessments of Your Response" %}</h3> <h3 class="submission__peer-evaluations__title">{% trans "Assessments of Your Response" %}</h3>
<ol class="list submission__peer-evaluations__questions"> <ol class="list submission__peer-evaluations__questions">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
...@@ -143,135 +143,139 @@ ...@@ -143,135 +143,139 @@
</li> </li>
{% endwith %} {% endwith %}
{% endfor %} {% endfor %}
<li class="question question--feedback ui-toggle-visibility"> {% if peer_assessments %}
<h4 class="question__title ui-toggle-visibility__control"> <li class="question question--feedback ui-toggle-visibility">
<i class="ico icon-caret-right"></i> <h4 class="question__title ui-toggle-visibility__control">
<span class="question__title__copy">{% trans "Additional feedback on your response" %}</span> <i class="ico icon-caret-right"></i>
</h4> <span class="question__title__copy">{% trans "Additional feedback on your response" %}</span>
</h4>
<ul class="question__answers ui-toggle-visibility__content"> <ul class="question__answers ui-toggle-visibility__content">
{% for assessment in peer_assessments %} {% for assessment in peer_assessments %}
{% with peer_num=forloop.counter %} {% with peer_num=forloop.counter %}
{% if assessment.feedback %} {% if assessment.feedback %}
<li class="answer peer-evaluation--{{ peer_num }}" id="question--feedback__answer-{{ peer_num }}"> <li class="answer peer-evaluation--{{ peer_num }}" id="question--feedback__answer-{{ peer_num }}">
<h5 class="answer__title"> <h5 class="answer__title">
<span class="answer__source"> <span class="answer__source">
<span class="label sr">{% trans "Peer assessor" %}: </span> <span class="label sr">{% trans "Peer assessor" %}: </span>
<span class="value">{% blocktrans with peer_num=peer_num %}Peer {{ peer_num }}{% endblocktrans %}</span> <span class="value">{% blocktrans with peer_num=peer_num %}Peer {{ peer_num }}{% endblocktrans %}</span>
</span> </span>
</h5> </h5>
<div class="answer__value"> <div class="answer__value">
<h6 class="label sr">{% trans "Peer's assessment" %}: </h6> <h6 class="label sr">{% trans "Peer's assessment" %}: </h6>
<div class="value"> <div class="value">
<p>{{ assessment.feedback }}</p> <p>{{ assessment.feedback }}</p>
</div>
</div> </div>
</div> </li>
</li> {% endif %}
{% endif %} {% endwith %}
{% endwith %} {% endfor %}
{% endfor %} </ul>
</ul> </li>
</li> {% endif %}
</ol> </ol>
</article> </article>
<form id="submission__feedback" class="submission__feedback ui-toggle-visibility step__content__section is--collapsed" method="post"> {% if peer_assessments %}
<h3 class="submission__feedback__title ui-toggle-visibility__control"> <form id="submission__feedback" class="submission__feedback ui-toggle-visibility step__content__section is--collapsed" method="post">
<i class="ico icon-caret-right"></i> <h3 class="submission__feedback__title ui-toggle-visibility__control">
<span class="submission__feedback__title__copy">{% trans "Provide Feedback on Peer Assessments" %}</span> <i class="ico icon-caret-right"></i>
</h3> <span class="submission__feedback__title__copy">{% trans "Provide Feedback on Peer Assessments" %}</span>
</h3>
<div class="ui-toggle-visibility__content"> <div class="ui-toggle-visibility__content">
<div class="submission__feedback__content {{ has_submitted_feedback|yesno:"is--submitted," }}"> <div class="submission__feedback__content {{ has_submitted_feedback|yesno:"is--submitted," }}">
<span class="transition__status is--hidden" aria-hidden="true"> <span class="transition__status is--hidden" aria-hidden="true">
<span class="wrapper--anim"> <span class="wrapper--anim">
<i class="ico icon-refresh icon-spin"></i> <i class="ico icon-refresh icon-spin"></i>
<span class="copy">{% trans "Submitting Feedback" %}</span> <span class="copy">{% trans "Submitting Feedback" %}</span>
</span>
</span> </span>
</span>
<div class="message message--complete {{ has_submitted_feedback|yesno:",is--hidden" }}" <div class="message message--complete {{ has_submitted_feedback|yesno:",is--hidden" }}"
{{ has_submitted_feedback|yesno:'aria-hidden=false,aria-hidden=true' }}> {{ has_submitted_feedback|yesno:'aria-hidden=false,aria-hidden=true' }}>
<h3 class="message__title">{% trans "Your Feedback Has Been Submitted" %}</h3> <h3 class="message__title">{% trans "Your Feedback Has Been Submitted" %}</h3>
<div class="message__content"> <div class="message__content">
<p>{% trans "Your feedback has been submitted. Course staff will be able to see this feedback when they review course records." %}</p> <p>{% trans "Your feedback has been submitted. Course staff will be able to see this feedback when they review course records." %}</p>
</div>
</div> </div>
</div>
<div class="submission__feedback__instructions {{ has_submitted_feedback|yesno:"is--hidden," }}" <div class="submission__feedback__instructions {{ has_submitted_feedback|yesno:"is--hidden," }}"
{{ has_submitted_feedback|yesno:'aria-hidden=true,aria-hidden=false' }}> {{ has_submitted_feedback|yesno:'aria-hidden=true,aria-hidden=false' }}>
<p>{% trans "Course staff will be able to see any feedback that you provide here when they review course records." %}</p> <p>{% trans "Course staff will be able to see any feedback that you provide here when they review course records." %}</p>
</div>
<ol class="list list--fields submission__feedback__fields {{ has_submitted_feedback|yesno:"is--hidden," }}"
{{ has_submitted_feedback|yesno:'aria-hidden=true,aria-hidden=false' }}>
<li class="field field-group field--radio feedback__overall" id="feedback__overall">
<h4 class="field-group__label">{% trans "Select the statements below that best reflect your experience with peer assessments" %}:</h4>
<ol class="list--options">
<li class="option option--useful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--useful"
class="option__input feedback__overall__value"
value="These assessments were useful." />
<label class="option__label" for="feedback__overall__value--useful">{% trans "These assessments were useful." %}</label>
</li>
<li class="option option--notuseful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--notuseful"
class="option__input feedback__overall__value"
value="These assessments were not useful." />
<label class="option__label" for="feedback__overall__value--notuseful">{% trans "These assessments were not useful." %}</label>
</li>
<li class="option option--disagree">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--disagree"
class="option__input feedback__overall__value"
value="I disagree with one or more of the peer assessments of my response." />
<label class="option__label" for="feedback__overall__value--disagree">{% trans "I disagree with one or more of the peer assessments of my response." %}</label>
</li>
<li class="option option--inappropriate">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--inappropriate"
class="option__input feedback__overall__value"
value="Some comments I received were inappropriate." />
<label class="option__label" for="feedback__overall__value--inappropriate">{% trans "Some comments I received were inappropriate." %}</label>
</li>
</ol>
</li>
<li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">{% trans "Please provide any feedback on the grade or comments that you received from your peers." %}</label>
<textarea
id="feedback__remarks__value"
placeholder="{% trans "I feel the feedback I received was..." %}"
maxlength="100000"
>
{{ feedback_text }}
</textarea>
</li>
</ol>
<div class="submission__feedback__actions {{ has_submitted_feedback|yesno:"is--hidden," }}"
{{ has_submitted_feedback|yesno:'aria-hidden=true,aria-hidden=false' }}>
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not submit your feedback" %}</h3>
<div class="message__content"></div>
</div> </div>
<ul class="list list--actions submission__feedback__actions"> <ol class="list list--fields submission__feedback__fields {{ has_submitted_feedback|yesno:"is--hidden," }}"
<li class="list--actions__item"> {{ has_submitted_feedback|yesno:'aria-hidden=true,aria-hidden=false' }}>
<button type="submit" id="feedback__submit" class="action action--submit feedback__submit">{% trans "Submit Feedback on Peer Assessments" %}</button> <li class="field field-group field--radio feedback__overall" id="feedback__overall">
<h4 class="field-group__label">{% trans "Select the statements below that best reflect your experience with peer assessments" %}:</h4>
<ol class="list--options">
<li class="option option--useful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--useful"
class="option__input feedback__overall__value"
value="These assessments were useful." />
<label class="option__label" for="feedback__overall__value--useful">{% trans "These assessments were useful." %}</label>
</li>
<li class="option option--notuseful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--notuseful"
class="option__input feedback__overall__value"
value="These assessments were not useful." />
<label class="option__label" for="feedback__overall__value--notuseful">{% trans "These assessments were not useful." %}</label>
</li>
<li class="option option--disagree">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--disagree"
class="option__input feedback__overall__value"
value="I disagree with one or more of the peer assessments of my response." />
<label class="option__label" for="feedback__overall__value--disagree">{% trans "I disagree with one or more of the peer assessments of my response." %}</label>
</li>
<li class="option option--inappropriate">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--inappropriate"
class="option__input feedback__overall__value"
value="Some comments I received were inappropriate." />
<label class="option__label" for="feedback__overall__value--inappropriate">{% trans "Some comments I received were inappropriate." %}</label>
</li>
</ol>
</li> </li>
</ul> <li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">{% trans "Please provide any feedback on the grade or comments that you received from your peers." %}</label>
<textarea
id="feedback__remarks__value"
placeholder="{% trans "I feel the feedback I received was..." %}"
maxlength="100000"
>
{{ feedback_text }}
</textarea>
</li>
</ol>
<div class="submission__feedback__actions {{ has_submitted_feedback|yesno:"is--hidden," }}"
{{ has_submitted_feedback|yesno:'aria-hidden=true,aria-hidden=false' }}>
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not submit your feedback" %}</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions submission__feedback__actions">
<li class="list--actions__item">
<button type="submit" id="feedback__submit" class="action action--submit feedback__submit">{% trans "Submit Feedback on Peer Assessments" %}</button>
</li>
</ul>
</div>
</div> </div>
</div> </div>
</div> </form>
</form> {% endif %}
</div> </div>
</div> </div>
</div> </div>
......
...@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest): ...@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest):
} }
submission_text = "test submission {}".format(index) submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text) submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid']) workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])
# Generate a CSV file for the submissions # Generate a CSV file for the submissions
output_streams = self._output_streams(['submission']) output_streams = self._output_streams(['submission'])
......
from django.contrib import admin from django.contrib import admin
from .models import AssessmentWorkflow from .models import AssessmentWorkflow, AssessmentWorkflowStep
class AssessmentWorkflowStepInline(admin.StackedInline):
model = AssessmentWorkflowStep
extra = 0
class AssessmentWorkflowAdmin(admin.ModelAdmin): class AssessmentWorkflowAdmin(admin.ModelAdmin):
"""Admin for the user's overall workflow through open assessment. """Admin for the user's overall workflow through open assessment.
...@@ -15,5 +20,6 @@ class AssessmentWorkflowAdmin(admin.ModelAdmin): ...@@ -15,5 +20,6 @@ class AssessmentWorkflowAdmin(admin.ModelAdmin):
) )
list_filter = ('status',) list_filter = ('status',)
search_fields = ('uuid', 'submission_uuid', 'course_id', 'item_id') search_fields = ('uuid', 'submission_uuid', 'course_id', 'item_id')
inlines = (AssessmentWorkflowStepInline,)
admin.site.register(AssessmentWorkflow, AssessmentWorkflowAdmin) admin.site.register(AssessmentWorkflow, AssessmentWorkflowAdmin)
...@@ -9,7 +9,7 @@ from django.db import DatabaseError ...@@ -9,7 +9,7 @@ from django.db import DatabaseError
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from submissions import api as sub_api from submissions import api as sub_api
from .models import AssessmentWorkflow from .models import AssessmentWorkflow, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer from .serializers import AssessmentWorkflowSerializer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -58,7 +58,7 @@ class AssessmentWorkflowNotFoundError(AssessmentWorkflowError): ...@@ -58,7 +58,7 @@ class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
pass pass
def create_workflow(submission_uuid): def create_workflow(submission_uuid, steps):
"""Begins a new assessment workflow. """Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against. Create a new workflow that other assessments will record themselves against.
...@@ -66,6 +66,8 @@ def create_workflow(submission_uuid): ...@@ -66,6 +66,8 @@ def create_workflow(submission_uuid):
Args: Args:
submission_uuid (str): The UUID for the submission that all our submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating. assessments will be evaluating.
steps (list): List of steps that are part of the workflow, in the order
that the user must complete them. Example: `["peer", "self"]`
Returns: Returns:
dict: Assessment workflow information with the following dict: Assessment workflow information with the following
...@@ -85,7 +87,7 @@ def create_workflow(submission_uuid): ...@@ -85,7 +87,7 @@ def create_workflow(submission_uuid):
AssessmentWorkflowRequestError: If the `submission_uuid` passed in does AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
not exist or is of an invalid type. not exist or is of an invalid type.
AssessmentWorkflowInternalError: Unexpected internal error, such as the AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation submissions app not being available or a database configuration
problem. problem.
""" """
...@@ -98,7 +100,7 @@ def create_workflow(submission_uuid): ...@@ -98,7 +100,7 @@ def create_workflow(submission_uuid):
try: try:
submission_dict = sub_api.get_submission_and_student(submission_uuid) submission_dict = sub_api.get_submission_and_student(submission_uuid)
except sub_api.SubmissionNotFoundError as err: except sub_api.SubmissionNotFoundError:
err_msg = sub_err_msg("submission not found") err_msg = sub_err_msg("submission not found")
logger.error(err_msg) logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg) raise AssessmentWorkflowRequestError(err_msg)
...@@ -107,27 +109,51 @@ def create_workflow(submission_uuid): ...@@ -107,27 +109,51 @@ def create_workflow(submission_uuid):
logger.error(err_msg) logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg) raise AssessmentWorkflowRequestError(err_msg)
except sub_api.SubmissionInternalError as err: except sub_api.SubmissionInternalError as err:
err_msg = sub_err_msg(err)
logger.error(err) logger.error(err)
raise AssessmentWorkflowInternalError( raise AssessmentWorkflowInternalError(
u"retrieving submission {} failed with unknown error: {}" u"retrieving submission {} failed with unknown error: {}"
.format(submission_uuid, err) .format(submission_uuid, err)
) )
# Raise an error if they specify a step we don't recognize...
invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
if invalid_steps:
raise AssessmentWorkflowRequestError(
u"The following steps were not recognized: {}; Must be one of {}".format(
invalid_steps, AssessmentWorkflow.STEPS
)
)
# We're not using a serializer to deserialize this because the only variable # We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already # we're getting from the outside is the submission_uuid, which is already
# validated by this point. # validated by this point.
status = AssessmentWorkflow.STATUS.peer
if steps[0] == "peer":
try:
peer_api.create_peer_workflow(submission_uuid)
except peer_api.PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
elif steps[0] == "self":
status = AssessmentWorkflow.STATUS.self
try: try:
peer_api.create_peer_workflow(submission_uuid)
workflow = AssessmentWorkflow.objects.create( workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid, submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.peer, status=status,
course_id=submission_dict['student_item']['course_id'], course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'], item_id=submission_dict['student_item']['item_id'],
) )
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(steps)
]
workflow.steps.add(*workflow_steps)
except ( except (
DatabaseError, DatabaseError,
peer_api.PeerAssessmentError,
sub_api.SubmissionError sub_api.SubmissionError
) as err: ) as err:
err_msg = u"Could not create assessment workflow: {}".format(err) err_msg = u"Could not create assessment workflow: {}".format(err)
...@@ -298,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements): ...@@ -298,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements):
return _serialized_with_details(workflow, assessment_requirements) return _serialized_with_details(workflow, assessment_requirements)
def get_status_counts(course_id, item_id): def get_status_counts(course_id, item_id, steps):
""" """
Count how many workflows have each status, for a given item in a course. Count how many workflows have each status, for a given item in a course.
Kwargs: Kwargs:
course_id (unicode): The ID of the course. course_id (unicode): The ID of the course.
item_id (unicode): The ID of the item in the course. item_id (unicode): The ID of the item in the course.
steps (list): A list of assessment steps for this problem.
Returns: Returns:
list of dictionaries with keys "status" (str) and "count" (int) list of dictionaries with keys "status" (str) and "count" (int)
Example usage: Example usage:
>>> get_status_counts("ora2/1/1", "peer-assessment-problem") >>> get_status_counts("ora2/1/1", "peer-assessment-problem", ["peer"])
[ [
{"status": "peer", "count": 5}, {"status": "peer", "count": 5},
{"status": "self", "count": 10}, {"status": "self", "count": 10},
...@@ -327,7 +354,8 @@ def get_status_counts(course_id, item_id): ...@@ -327,7 +354,8 @@ def get_status_counts(course_id, item_id):
course_id=course_id, course_id=course_id,
item_id=item_id, item_id=item_id,
).count() ).count()
} for status in AssessmentWorkflow.STATUS_VALUES }
for status in steps + AssessmentWorkflow.STATUSES
] ]
......
...@@ -40,7 +40,6 @@ class Migration(SchemaMigration): ...@@ -40,7 +40,6 @@ class Migration(SchemaMigration):
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}), 'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}), 'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentWorkflowStep'
db.create_table('workflow_assessmentworkflowstep', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('workflow', self.gf('django.db.models.fields.related.ForeignKey')(related_name='steps', to=orm['workflow.AssessmentWorkflow'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('submitter_completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('assessment_completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('order_num', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('workflow', ['AssessmentWorkflowStep'])
def backwards(self, orm):
# Deleting model 'AssessmentWorkflowStep'
db.delete_table('workflow_assessmentworkflowstep')
models = {
'workflow.assessmentworkflow': {
'Meta': {'ordering': "['-created']", 'object_name': 'AssessmentWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'workflow.assessmentworkflowstep': {
'Meta': {'ordering': "['workflow', 'order_num']", 'object_name': 'AssessmentWorkflowStep'},
'assessment_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'submitter_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['workflow.AssessmentWorkflow']"})
}
}
complete_apps = ['workflow']
\ No newline at end of file
...@@ -16,6 +16,7 @@ import importlib ...@@ -16,6 +16,7 @@ import importlib
from django.conf import settings from django.conf import settings
from django.db import models from django.db import models
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from django.utils.timezone import now
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
...@@ -46,15 +47,20 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -46,15 +47,20 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
an after the fact recording of the last known state of that information so an after the fact recording of the last known state of that information so
we can search easily. we can search easily.
""" """
STATUS_VALUES = [ STEPS = [
"peer", # User needs to assess peer submissions "peer", # User needs to assess peer submissions
"self", # User needs to assess themselves "self", # User needs to assess themselves
]
STATUSES = [
"waiting", # User has done all necessary assessment but hasn't been "waiting", # User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their # graded yet -- we're waiting for assessments of their
# submission by others. # submission by others.
"done", # Complete "done", # Complete
] ]
STATUS_VALUES = STEPS + STATUSES
STATUS = Choices(*STATUS_VALUES) # implicit "status" field STATUS = Choices(*STATUS_VALUES) # implicit "status" field
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True) submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
...@@ -81,23 +87,16 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -81,23 +87,16 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return sub_api.get_latest_score_for_submission(self.submission_uuid) return sub_api.get_latest_score_for_submission(self.submission_uuid)
def status_details(self, assessment_requirements): def status_details(self, assessment_requirements):
return { status_dict = {}
"peer": { steps = self._get_steps()
"complete": self._is_peer_complete(assessment_requirements), for step in steps:
}, status_dict[step.name] = {
"self": { "complete": step.api().submitter_is_finished(
"complete": self._is_self_complete(), self.submission_uuid,
}, assessment_requirements.get(step.name, {})
} )
}
def _is_peer_complete(self, assessment_requirements): return status_dict
from openassessment.assessment import peer_api
peer_requirements = assessment_requirements["peer"]
return peer_api.is_complete(self.submission_uuid, peer_requirements)
def _is_self_complete(self):
from openassessment.assessment import self_api
return self_api.is_complete(self.submission_uuid)
def update_from_assessments(self, assessment_requirements): def update_from_assessments(self, assessment_requirements):
"""Query self and peer APIs and change our status if appropriate. """Query self and peer APIs and change our status if appropriate.
...@@ -130,64 +129,171 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -130,64 +129,171 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
specific requirements in this dict. specific requirements in this dict.
""" """
from openassessment.assessment import peer_api from openassessment.assessment import peer_api, self_api
# If we're done, we're done -- it doesn't matter if requirements have # If we're done, we're done -- it doesn't matter if requirements have
# changed because we've already written a score. # changed because we've already written a score.
if self.status == self.STATUS.done: if self.status == self.STATUS.done:
return return
# Have they completed the peer and self steps? # Update our AssessmentWorkflowStep models with the latest from our APIs
peer_complete = self._is_peer_complete(assessment_requirements) steps = self._get_steps()
self_complete = self._is_self_complete()
# Go through each step and update its status.
if peer_complete and self_complete: for step in steps:
# If they've completed both, they're at least waiting, possibly done step.update(self.submission_uuid, assessment_requirements)
new_status = self.STATUS.waiting
elif peer_complete: # Fetch name of the first step that the submitter hasn't yet completed.
# If they haven't done self assessment yet, that's their status new_status = next(
new_status = self.STATUS.self (step.name for step in steps if step.submitter_completed_at is None),
else: self.STATUS.waiting # if nothing's left to complete, we're waiting
# Default starting status is peer )
new_status = self.STATUS.peer
# If the submitter has done all they need to do, let's check to see if
# If we're at least waiting, let's check if we have a peer score and # all steps have been fully assessed (i.e. we can score it).
# can move all the way to done if (new_status == self.STATUS.waiting and
if new_status == self.STATUS.waiting: all(step.assessment_completed_at for step in steps)):
score = peer_api.get_score(
self.submission_uuid, assessment_requirements["peer"] # At this point, we're trying to give a score. We currently have a
) # very simple rule for this -- if it has a peer step, use that for
if score: # scoring. If not, use the self step. Later on, we may put more
sub_api.set_score( # interesting rules here.
step_names = [step.name for step in steps]
score = None
if self.STATUS.peer in step_names:
score = peer_api.get_score(
self.submission_uuid, self.submission_uuid,
score["points_earned"], assessment_requirements[self.STATUS.peer]
score["points_possible"]
) )
elif self.STATUS.self in step_names:
score = self_api.get_score(self.submission_uuid, {})
# This should be replaced by using the event tracking API, but if score:
# that's not quite ready yet. So we're making this temp hack. self.set_score(score)
emit_event({
"context": {
"course_id": self.course_id
},
"event": {
"submission_uuid": self.submission_uuid,
"points_earned": score["points_earned"],
"points_possible": score["points_possible"],
},
"event_source": "server",
"event_type": "openassessment.workflow.score",
"time": datetime.utcnow(),
})
new_status = self.STATUS.done new_status = self.STATUS.done
# Finally save our changes if the status has changed # Finally save our changes if the status has changed
if self.status != new_status: if self.status != new_status:
self.status = new_status self.status = new_status
self.save() self.save()
def _get_steps(self):
"""
Simple helper function for retrieving all the steps in the given
Workflow.
"""
steps = list(self.steps.all())
if not steps:
# If no steps exist for this AssessmentWorkflow, assume
# peer -> self for backwards compatibility
self.steps.add(
AssessmentWorkflowStep(name=self.STATUS.peer, order_num=0),
AssessmentWorkflowStep(name=self.STATUS.self, order_num=1)
)
steps = list(self.steps.all())
return steps
def set_score(self, score):
"""
Set a score for the workflow.
Scores are persisted via the Submissions API, separate from the Workflow
Data. Score is associated with the same submission_uuid as this workflow
Args:
score (dict): A dict containing 'points_earned' and
'points_possible'.
"""
sub_api.set_score(
self.submission_uuid,
score["points_earned"],
score["points_possible"]
)
# This should be replaced by using the event tracking API, but
# that's not quite ready yet. So we're making this temp hack.
emit_event({
"context": {
"course_id": self.course_id
},
"event": {
"submission_uuid": self.submission_uuid,
"points_earned": score["points_earned"],
"points_possible": score["points_possible"],
},
"event_source": "server",
"event_type": "openassessment.workflow.score",
"time": datetime.utcnow(),
})
class AssessmentWorkflowStep(models.Model):
"""An individual step in the overall workflow process.
Similar caveats apply to this class as apply to `AssessmentWorkflow`. What
we're storing in the database is usually but not always current information.
In particular, if the problem definition has changed the requirements for a
particular step in the workflow, then what is in the database will be out of
sync until someone views this problem again (which will trigger a workflow
update to occur).
"""
workflow = models.ForeignKey(AssessmentWorkflow, related_name="steps")
name = models.CharField(max_length=20)
submitter_completed_at = models.DateTimeField(default=None, null=True)
assessment_completed_at = models.DateTimeField(default=None, null=True)
order_num = models.PositiveIntegerField()
class Meta:
ordering = ["workflow", "order_num"]
def is_submitter_complete(self):
return self.submitter_completed_at is not None
def is_assessment_complete(self):
return self.assessment_completed_at is not None
def api(self):
"""
Returns an API associated with this workflow step. If no API is
associated with this workflow step, None is returned.
"""
from openassessment.assessment import peer_api, self_api
api = None
if self.name == AssessmentWorkflow.STATUS.self:
api = self_api
elif self.name == AssessmentWorkflow.STATUS.peer:
api = peer_api
return api
def update(self, submission_uuid, assessment_requirements):
"""
Updates the AssessmentWorkflowStep models with the requirements
specified from the Workflow API.
Intended for internal use by update_from_assessments(). See
update_from_assessments() documentation for more details.
"""
# Once a step is completed, it will not be revisited based on updated
# requirements.
step_changed = False
step_reqs = assessment_requirements.get(self.name, {})
# Has the user completed their obligations for this step?
if (not self.is_submitter_complete() and
self.api().submitter_is_finished(submission_uuid, step_reqs)):
self.submitter_completed_at = now()
step_changed = True
# Has the step received a score?
if (not self.is_assessment_complete() and
self.api().assessment_is_finished(submission_uuid, step_reqs)):
self.assessment_completed_at = now()
step_changed = True
if step_changed:
self.save()
# Just here to record thoughts for later: # Just here to record thoughts for later:
......
{
"peer": {
"steps": ["peer"],
"requirements": {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"both": {
"steps": ["peer", "self"],
"requirements": {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
},
"self": {
"steps": ["self"],
"requirements": {
"self": {}
}
}
}
\ No newline at end of file
from django.db import DatabaseError from django.db import DatabaseError
import ddt
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.assessment.models import PeerWorkflow
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment import peer_api
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission from submissions.models import Submission
...@@ -18,18 +18,14 @@ ITEM_1 = { ...@@ -18,18 +18,14 @@ ITEM_1 = {
"item_type": "openassessment", "item_type": "openassessment",
} }
REQUIREMENTS = { @ddt.ddt
"peer": {
"must_grade": 5,
"must_be_graded_by": 3,
}
}
class TestAssessmentWorkflowApi(CacheResetTest): class TestAssessmentWorkflowApi(CacheResetTest):
def test_create_workflow(self): @ddt.file_data('data/assessments.json')
def test_create_workflow(self, data):
first_step = data["steps"][0] if data["steps"] else "peer"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"]) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_keys = set(workflow.keys()) workflow_keys = set(workflow.keys())
self.assertEqual( self.assertEqual(
...@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest):
} }
) )
self.assertEqual(workflow["submission_uuid"], submission["uuid"]) self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "peer") self.assertEqual(workflow["status"], first_step)
workflow_from_get = workflow_api.get_workflow_for_submission( workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], REQUIREMENTS submission["uuid"], data["requirements"]
) )
del workflow_from_get['status_details'] del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get) self.assertEqual(workflow, workflow_from_get)
def test_need_valid_submission_uuid(self): @ddt.file_data('data/assessments.json')
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist # submission doesn't exist
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow("xxxxxxxxxxx") workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"])
# submission_uuid is the wrong type # submission_uuid is the wrong type
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow(123) workflow = workflow_api.create_workflow(123, data["steps"])
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_submissions_errors_wrapped(self, mock_get): def test_unexpected_submissions_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!") mock_get.side_effect = Exception("Kaboom!")
workflow_api.create_workflow("zzzzzzzzzzzzzzz") workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"])
@patch.object(AssessmentWorkflow.objects, 'create') @patch.object(AssessmentWorkflow.objects, 'create')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], data["steps"])
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, mock_create): def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"]) workflow_api.create_workflow(submission["uuid"], ["peer", "self"])
@patch.object(AssessmentWorkflow.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_exception_wrapped(self, data, mock_create):
mock_create.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.update_from_assessments(submission["uuid"], data["steps"])
def test_get_assessment_workflow_expected_errors(self): @ddt.file_data('data/assessments.json')
def test_get_assessment_workflow_expected_errors(self, data):
with self.assertRaises(workflow_api.AssessmentWorkflowNotFoundError): with self.assertRaises(workflow_api.AssessmentWorkflowNotFoundError):
workflow_api.get_workflow_for_submission("0000000000000", REQUIREMENTS) workflow_api.get_workflow_for_submission("0000000000000", data["requirements"])
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow_api.get_workflow_for_submission(123, REQUIREMENTS) workflow_api.get_workflow_for_submission(123, data["requirements"])
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_get_errors_wrapped(self, mock_get): def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!") mock_get.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "We talk TV!") submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"]) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_api.get_workflow_for_submission(workflow["uuid"], REQUIREMENTS) workflow_api.get_workflow_for_submission(workflow["uuid"], {})
def test_get_status_counts(self): def test_get_status_counts(self):
# Initially, the counts should all be zero # Initially, the counts should all be zero
counts = workflow_api.get_status_counts("test/1/1", "peer-problem") counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, [ self.assertEqual(counts, [
{"status": "peer", "count": 0}, {"status": "peer", "count": 0},
{"status": "self", "count": 0}, {"status": "self", "count": 0},
...@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done") self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done")
# Now the counts should be updated # Now the counts should be updated
counts = workflow_api.get_status_counts("test/1/1", "peer-problem") counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, [ self.assertEqual(counts, [
{"status": "peer", "count": 1}, {"status": "peer", "count": 1},
{"status": "self", "count": 2}, {"status": "self", "count": 2},
...@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest):
# Create a workflow in a different course, same user and item # Create a workflow in a different course, same user and item
# Counts should be the same # Counts should be the same
self._create_workflow_with_status("user 1", "other_course", "peer-problem", "peer") self._create_workflow_with_status("user 1", "other_course", "peer-problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem") updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts) self.assertEqual(counts, updated_counts)
# Create a workflow in the same course, different item # Create a workflow in the same course, different item
# Counts should be the same # Counts should be the same
self._create_workflow_with_status("user 1", "test/1/1", "other problem", "peer") self._create_workflow_with_status("user 1", "test/1/1", "other problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem") updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts) self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"): def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"):
...@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment", "item_type": "openassessment",
}, answer) }, answer)
workflow = workflow_api.create_workflow(submission['uuid']) workflow = workflow_api.create_workflow(submission['uuid'], ["peer", "self"])
workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid']) workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
workflow_model.status = status workflow_model.status = status
workflow_model.save() workflow_model.save()
...@@ -70,12 +70,26 @@ class GradeMixin(object): ...@@ -70,12 +70,26 @@ class GradeMixin(object):
Returns: Returns:
tuple of context (dict), template_path (string) tuple of context (dict), template_path (string)
""" """
feedback = peer_api.get_assessment_feedback(self.submission_uuid) # Peer specific stuff...
assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid']
if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = peer_api.get_assessments(submission_uuid)
has_submitted_feedback = feedback is not None
else:
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
else:
self_assessment = None
feedback_text = feedback.get('feedback', '') if feedback else '' feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid']) student_submission = sub_api.get_submission(submission_uuid)
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
# We retrieve the score from the workflow, which in turn retrieves # We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID. # the score for our current submission UUID.
...@@ -94,9 +108,14 @@ class GradeMixin(object): ...@@ -94,9 +108,14 @@ class GradeMixin(object):
} }
# Update the scores we will display to the user # Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field # Note that we are updating a *copy* of the rubric criteria stored in
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid) # the XBlock field
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"]) max_scores = peer_api.get_rubric_max_scores(submission_uuid)
if "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
if median_scores is not None and max_scores is not None: if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]: for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]] criterion["median_score"] = median_scores[criterion["name"]]
...@@ -114,11 +133,17 @@ class GradeMixin(object): ...@@ -114,11 +133,17 @@ class GradeMixin(object):
Returns: Returns:
tuple of context (dict), template_path (string) tuple of context (dict), template_path (string)
""" """
def _is_incomplete(step):
return (
step in workflow["status_details"] and
not workflow["status_details"][step]["complete"]
)
incomplete_steps = [] incomplete_steps = []
if not workflow["status_details"]["peer"]["complete"]: if _is_incomplete("peer"):
incomplete_steps.append("Peer Assessment") incomplete_steps.append(_("Peer Assessment"))
if not workflow["status_details"]["self"]["complete"]: if _is_incomplete("self"):
incomplete_steps.append("Self Assessment") incomplete_steps.append(_("Self Assessment"))
return ( return (
'openassessmentblock/grade/oa_grade_incomplete.html', 'openassessmentblock/grade/oa_grade_incomplete.html',
...@@ -131,7 +156,8 @@ class GradeMixin(object): ...@@ -131,7 +156,8 @@ class GradeMixin(object):
Submit feedback on an assessment. Submit feedback on an assessment.
Args: Args:
data (dict): Can provide keys 'feedback_text' (unicode) and 'feedback_options' (list of unicode). data (dict): Can provide keys 'feedback_text' (unicode) and
'feedback_options' (list of unicode).
Kwargs: Kwargs:
suffix (str): Unused suffix (str): Unused
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
import datetime as dt import datetime as dt
import logging import logging
import dateutil
import pkg_resources import pkg_resources
import pytz import pytz
...@@ -239,7 +238,9 @@ class OpenAssessmentBlock( ...@@ -239,7 +238,9 @@ class OpenAssessmentBlock(
# Include release/due dates for each step in the problem # Include release/due dates for each step in the problem
context['step_dates'] = list() context['step_dates'] = list()
for step in ['submission', 'peer-assessment', 'self-assessment']:
steps = ['submission'] + self.assessment_steps
for step in steps:
# Get the dates as a student would see them # Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False) __, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
...@@ -313,6 +314,10 @@ class OpenAssessmentBlock( ...@@ -313,6 +314,10 @@ class OpenAssessmentBlock(
load('static/xml/poverty_rubric_example.xml') load('static/xml/poverty_rubric_example.xml')
), ),
( (
"OpenAssessmentBlock (Self Only) Rubric",
load('static/xml/poverty_self_only_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric", "OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml') load('static/xml/censorship_rubric_example.xml')
), ),
...@@ -333,6 +338,10 @@ class OpenAssessmentBlock( ...@@ -333,6 +338,10 @@ class OpenAssessmentBlock(
return update_from_xml(block, node, validator=validator(block, strict_post_release=False)) return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
@property
def assessment_steps(self):
return [asmnt['name'] for asmnt in self.rubric_assessments]
def render_assessment(self, path, context_dict=None): def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML """Render an Assessment Module's HTML
...@@ -421,18 +430,17 @@ class OpenAssessmentBlock( ...@@ -421,18 +430,17 @@ class OpenAssessmentBlock(
] ]
# Resolve unspecified dates and date strings to datetimes # Resolve unspecified dates and date strings to datetimes
start, due, date_ranges = resolve_dates(self.start, self.due, [submission_range] + assessment_ranges) start, due, date_ranges = resolve_dates(
self.start, self.due, [submission_range] + assessment_ranges
)
# Based on the step, choose the date range to consider open_range = (start, due)
# We hard-code this to the submission -> peer -> self workflow for now; assessment_steps = self.assessment_steps
# later, we can revisit to make this more flexible. if step == 'submission':
open_range = (start, due)
if step == "submission":
open_range = date_ranges[0] open_range = date_ranges[0]
if step == "peer-assessment": elif step in assessment_steps:
open_range = date_ranges[1] step_index = assessment_steps.index(step)
if step == "self-assessment": open_range = date_ranges[1 + step_index]
open_range = date_ranges[2]
# Course staff always have access to the problem # Course staff always have access to the problem
if course_staff is None: if course_staff is None:
......
import logging import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from openassessment.assessment.peer_api import ( from openassessment.assessment.peer_api import (
PeerAssessmentInternalError, PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentRequestError,
...@@ -114,6 +117,8 @@ class PeerAssessmentMixin(object): ...@@ -114,6 +117,8 @@ class PeerAssessmentMixin(object):
number of assessments. number of assessments.
""" """
if "peer-assessment" not in self.assessment_steps:
return Response(u"")
continue_grading = data.params.get('continue_grading', False) continue_grading = data.params.get('continue_grading', False)
path, context_dict = self.peer_path_and_context(continue_grading) path, context_dict = self.peer_path_and_context(continue_grading)
return self.render_assessment(path, context_dict) return self.render_assessment(path, context_dict)
......
...@@ -2,6 +2,8 @@ import logging ...@@ -2,6 +2,8 @@ import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from webob import Response
from openassessment.assessment import self_api from openassessment.assessment import self_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as submission_api from submissions import api as submission_api
...@@ -24,6 +26,9 @@ class SelfAssessmentMixin(object): ...@@ -24,6 +26,9 @@ class SelfAssessmentMixin(object):
@XBlock.handler @XBlock.handler
def render_self_assessment(self, data, suffix=''): def render_self_assessment(self, data, suffix=''):
if "self-assessment" not in self.assessment_steps:
return Response(u"")
try: try:
path, context = self.self_path_and_context() path, context = self.self_path_and_context()
except: except:
......
...@@ -134,7 +134,59 @@ ...@@ -134,7 +134,59 @@
"score": "", "score": "",
"feedback_text": "", "feedback_text": "",
"student_submission": "", "student_submission": "",
"peer_assessments": [], "peer_assessments": [
{
"submission_uuid": "52d2158a-c568-11e3-b9b9-28cfe9182465",
"points_earned": 5,
"points_possible": 6,
"rubric": {
"criteria": [
{
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 1,
"points": 1,
"name": "Fair"
}
],
"points_possible": 2
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
}
]
}
}
],
"self_assessment": {}, "self_assessment": {},
"rubric_criteria": [], "rubric_criteria": [],
"has_submitted_feedback": false "has_submitted_feedback": false
...@@ -146,4 +198,4 @@ ...@@ -146,4 +198,4 @@
"context": {}, "context": {},
"output": "oa_edit.html" "output": "oa_edit.html"
} }
] ]
\ No newline at end of file
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.peerView=new OpenAssessment.PeerView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.peerView.load();this.renderSelfAssessmentStep();this.gradeView.load();courseStaffDebug=$(".wrapper--staff-info");if(courseStaffDebug.length>0){this.setUpCollapseExpand(courseStaffDebug,function(){})}},renderSelfAssessmentStep:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);var sel=$("#openassessment__self-assessment",view.element);view.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",view.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})}).fail(function(errMsg){view.showLoadError("self-assessment")})},selfSubmitEnabled:function(enabled){var button=$("#self-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},selfAssess:function(){var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var view=this;view.toggleActionError("self",null);view.selfSubmitEnabled(false);this.server.selfAssess(optionsSelected).done(function(){view.peerView.load();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()}).fail(function(errMsg){view.toggleActionError("self",errMsg);view.selfSubmitEnabled(true)})},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html(gettext("Unable to Load"))},getStepActionsErrorMessage:function(){return $(".step__actions .message__content").html()}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()})}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg=gettext("This problem has already been released. Any changes will apply only to future assessments.");if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()})}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};OpenAssessment.PeerView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.PeerView.prototype={load:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.showLoadError("peer-assessment")})},loadContinuedAssessment:function(){var view=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlersForContinuedAssessment()}).fail(function(errMsg){view.showLoadError("peer-assessment")})},installHandlers:function(){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel,$.proxy(view.loadContinuedAssessment,view));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;view.peerSubmitEnabled(numChecked==numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.peerAssess()})},installHandlersForContinuedAssessment:function(){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;view.peerSubmitEnabled(numChecked==numAvailable)})},peerSubmitEnabled:function(enabled){var button=$("#peer-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},peerAssess:function(){var view=this;var baseView=view.baseView;this.peerAssessRequest(function(){view.load();baseView.renderSelfAssessmentStep();baseView.gradeView.load();baseView.scrollToTop()})},continuedPeerAssess:function(){var view=this;var gradeView=this.baseView.gradeView;var baseView=view.baseView;view.peerAssessRequest(function(){view.loadContinuedAssessment();gradeView.load();baseView.scrollToTop()})},overallFeedback:function(overallFeedback){var selector="#assessment__rubric__question--feedback__value";if(typeof overallFeedback==="undefined"){return $(selector,this.element).val()}else{$(selector,this.element).val(overallFeedback)}},criterionFeedback:function(criterionFeedback){var selector="#peer-assessment--001__assessment textarea.answer__value";var feedback={};$(selector,this.element).each(function(index,sel){if(typeof criterionFeedback!=="undefined"){$(sel).val(criterionFeedback[sel.name]);feedback[sel.name]=criterionFeedback[sel.name]}else{feedback[sel.name]=$(sel).val()}});return feedback},optionsSelected:function(optionsSelected){var selector="#peer-assessment--001__assessment input[type=radio]";if(typeof optionsSelected==="undefined"){var options={};$(selector+":checked",this.element).each(function(index,sel){options[sel.name]=sel.value});return options}else{$(selector,this.element).prop("checked",false);$(selector,this.element).each(function(index,sel){if(optionsSelected.hasOwnProperty(sel.name)){if(sel.value==optionsSelected[sel.name]){$(sel).prop("checked",true)}}})}},peerAssessRequest:function(successFunction){var view=this;view.baseView.toggleActionError("peer",null);view.peerSubmitEnabled(false);this.server.peerAssess(this.optionsSelected(),this.criterionFeedback(),this.overallFeedback()).done(successFunction).fail(function(errMsg){view.baseView.toggleActionError("peer",errMsg);view.peerSubmitEnabled(true)})}};OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse=""};OpenAssessment.ResponseView.prototype={load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.responseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{var label=gettext("Status of Your Response");sel.html('<span class="sr">'+label+":"+"</span>\n"+msg)}},unsavedWarningEnabled:function(enabled){if(typeof enabled==="undefined"){return window.onbeforeunload!==null}else{if(enabled){window.onbeforeunload=function(){return"If you leave this page without saving or submitting your response, "+"you'll lose any work you've done on the response."}}else{window.onbeforeunload=null}}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var isBlank=currentResponse!=="";this.submitEnabled(isBlank);if($.trim(this.savedResponse)!==currentResponse){this.saveEnabled(isBlank);this.saveStatus(gettext("This response has not been saved."));this.unsavedWarningEnabled(true)}},save:function(){this.saveStatus(gettext("Saving..."));this.baseView.toggleActionError("save",null);this.unsavedWarningEnabled(false);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus(gettext("This response has been saved but not submitted."))}}).fail(function(errMsg){view.saveStatus(gettext("Error"));view.baseView.toggleActionError("save",errMsg)})},submit:function(){this.submitEnabled(false);var view=this;var baseView=this.baseView;this.confirmSubmission().pipe(function(){var submission=$("#submission__answer__value",view.element).val();baseView.toggleActionError("response",null);return view.server.submit(submission)}).done($.proxy(view.moveToNextStep,view)).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){view.moveToNextStep()}else{if(errMsg){baseView.toggleActionError("submit",errMsg)}view.submitEnabled(true)}})},moveToNextStep:function(){this.load();this.baseView.peerView.load();this.baseView.gradeView.load();this.unsavedWarningEnabled(false)},confirmSubmission:function(){var msg="You're about to submit your response for this assignment. "+"After you submit this response, you can't change it or submit a new response.";return $.Deferred(function(defer){if(confirm(msg)){defer.resolve()}else{defer.reject()}})}};OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX",gettext("This response could not be submitted.")])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This response could not be saved.")])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This feedback could not be submitted.")])})}).promise()},peerAssess:function(optionsSelected,criterionFeedback,overallFeedback){var url=this.url("peer_assess");var payload=JSON.stringify({options_selected:optionsSelected,criterion_feedback:criterionFeedback,overall_feedback:overallFeedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be loaded.")])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be saved.")])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("The server could not be contacted.")])})}).promise()}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}} if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.peerView=new OpenAssessment.PeerView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.loadAssessmentModules();courseStaffDebug=$(".wrapper--staff-info");if(courseStaffDebug.length>0){this.setUpCollapseExpand(courseStaffDebug,function(){})}},loadAssessmentModules:function(){this.peerView.load();this.renderSelfAssessmentStep();this.gradeView.load()},renderSelfAssessmentStep:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);var sel=$("#openassessment__self-assessment",view.element);view.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",view.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})}).fail(function(errMsg){view.showLoadError("self-assessment")})},selfSubmitEnabled:function(enabled){var button=$("#self-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},selfAssess:function(){var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var view=this;view.toggleActionError("self",null);view.selfSubmitEnabled(false);this.server.selfAssess(optionsSelected).done(function(){view.loadAssessmentModules();view.scrollToTop()}).fail(function(errMsg){view.toggleActionError("self",errMsg);view.selfSubmitEnabled(true)})},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html(gettext("Unable to Load"))},getStepActionsErrorMessage:function(){return $(".step__actions .message__content").html()}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()})}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg=gettext("This problem has already been released. Any changes will apply only to future assessments.");if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()})}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};OpenAssessment.PeerView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.PeerView.prototype={load:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.showLoadError("peer-assessment")})},loadContinuedAssessment:function(){var view=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlersForContinuedAssessment()}).fail(function(errMsg){view.showLoadError("peer-assessment")})},installHandlers:function(){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel,$.proxy(view.loadContinuedAssessment,view));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;view.peerSubmitEnabled(numChecked==numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.peerAssess()})},installHandlersForContinuedAssessment:function(){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;view.peerSubmitEnabled(numChecked==numAvailable)})},peerSubmitEnabled:function(enabled){var button=$("#peer-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},peerAssess:function(){var view=this;var baseView=view.baseView;this.peerAssessRequest(function(){view.load();baseView.loadAssessmentModules();baseView.scrollToTop()})},continuedPeerAssess:function(){var view=this;var gradeView=this.baseView.gradeView;var baseView=view.baseView;view.peerAssessRequest(function(){view.loadContinuedAssessment();gradeView.load();baseView.scrollToTop()})},overallFeedback:function(overallFeedback){var selector="#assessment__rubric__question--feedback__value";if(typeof overallFeedback==="undefined"){return $(selector,this.element).val()}else{$(selector,this.element).val(overallFeedback)}},criterionFeedback:function(criterionFeedback){var selector="#peer-assessment--001__assessment textarea.answer__value";var feedback={};$(selector,this.element).each(function(index,sel){if(typeof criterionFeedback!=="undefined"){$(sel).val(criterionFeedback[sel.name]);feedback[sel.name]=criterionFeedback[sel.name]}else{feedback[sel.name]=$(sel).val()}});return feedback},optionsSelected:function(optionsSelected){var selector="#peer-assessment--001__assessment input[type=radio]";if(typeof optionsSelected==="undefined"){var options={};$(selector+":checked",this.element).each(function(index,sel){options[sel.name]=sel.value});return options}else{$(selector,this.element).prop("checked",false);$(selector,this.element).each(function(index,sel){if(optionsSelected.hasOwnProperty(sel.name)){if(sel.value==optionsSelected[sel.name]){$(sel).prop("checked",true)}}})}},peerAssessRequest:function(successFunction){var view=this;view.baseView.toggleActionError("peer",null);view.peerSubmitEnabled(false);this.server.peerAssess(this.optionsSelected(),this.criterionFeedback(),this.overallFeedback()).done(successFunction).fail(function(errMsg){view.baseView.toggleActionError("peer",errMsg);view.peerSubmitEnabled(true)})}};OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse=""};OpenAssessment.ResponseView.prototype={load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.responseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{var label=gettext("Status of Your Response");sel.html('<span class="sr">'+label+":"+"</span>\n"+msg)}},unsavedWarningEnabled:function(enabled){if(typeof enabled==="undefined"){return window.onbeforeunload!==null}else{if(enabled){window.onbeforeunload=function(){return"If you leave this page without saving or submitting your response, "+"you'll lose any work you've done on the response."}}else{window.onbeforeunload=null}}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var isBlank=currentResponse!=="";this.submitEnabled(isBlank);if($.trim(this.savedResponse)!==currentResponse){this.saveEnabled(isBlank);this.saveStatus(gettext("This response has not been saved."));this.unsavedWarningEnabled(true)}},save:function(){this.saveStatus(gettext("Saving..."));this.baseView.toggleActionError("save",null);this.unsavedWarningEnabled(false);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus(gettext("This response has been saved but not submitted."))}}).fail(function(errMsg){view.saveStatus(gettext("Error"));view.baseView.toggleActionError("save",errMsg)})},submit:function(){this.submitEnabled(false);var view=this;var baseView=this.baseView;this.confirmSubmission().pipe(function(){var submission=$("#submission__answer__value",view.element).val();baseView.toggleActionError("response",null);return view.server.submit(submission)}).done($.proxy(view.moveToNextStep,view)).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){view.moveToNextStep()}else{if(errMsg){baseView.toggleActionError("submit",errMsg)}view.submitEnabled(true)}})},moveToNextStep:function(){this.load();this.baseView.loadAssessmentModules();this.unsavedWarningEnabled(false)},confirmSubmission:function(){var msg="You're about to submit your response for this assignment. "+"After you submit this response, you can't change it or submit a new response.";return $.Deferred(function(defer){if(confirm(msg)){defer.resolve()}else{defer.reject()}})}};OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX",gettext("This response could not be submitted.")])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This response could not be saved.")])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This feedback could not be submitted.")])})}).promise()},peerAssess:function(optionsSelected,criterionFeedback,overallFeedback){var url=this.url("peer_assess");var payload=JSON.stringify({options_selected:optionsSelected,criterion_feedback:criterionFeedback,overall_feedback:overallFeedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be loaded.")])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be saved.")])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("The server could not be contacted.")])})}).promise()}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}
\ No newline at end of file \ No newline at end of file
...@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() { ...@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() {
this.showLoadError = function(msg) {}; this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {}; this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {}; this.setUpCollapseExpand = function(sel) {};
this.renderSelfAssessmentStep = function() {};
this.scrollToTop = function() {}; this.scrollToTop = function() {};
this.gradeView = { load: function() {} }; this.loadAssessmentModules = function() {};
}; };
// Stubs // Stubs
......
...@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view // Stub base view
var StubBaseView = function() { var StubBaseView = function() {
this.loadAssessmentModules = function() {};
this.peerView = { load: function() {} }; this.peerView = { load: function() {} };
this.gradeView = { load: function() {} }; this.gradeView = { load: function() {} };
this.showLoadError = function(msg) {}; this.showLoadError = function(msg) {};
...@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() {
}).promise(); }).promise();
}); });
spyOn(view, 'load'); spyOn(view, 'load');
spyOn(baseView.peerView, 'load'); spyOn(baseView, 'loadAssessmentModules');
view.response('Test response'); view.response('Test response');
view.submit(); view.submit();
// Expect the current and next step to have been reloaded // Expect the current and next step to have been reloaded
expect(view.load).toHaveBeenCalled(); expect(view.load).toHaveBeenCalled();
expect(baseView.peerView.load).toHaveBeenCalled(); expect(baseView.loadAssessmentModules).toHaveBeenCalled();
}); });
it("enables the unsaved work warning when the user changes the response text", function() { it("enables the unsaved work warning when the user changes the response text", function() {
......
...@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = { ...@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = {
}, },
/** /**
* Asynchronously load each sub-view into the DOM. Asynchronously load each sub-view into the DOM.
*/ **/
load: function() { load: function() {
this.responseView.load(); this.responseView.load();
this.peerView.load(); this.loadAssessmentModules();
this.renderSelfAssessmentStep();
this.gradeView.load();
// Set up expand/collapse for course staff debug, if available // Set up expand/collapse for course staff debug, if available
courseStaffDebug = $('.wrapper--staff-info'); courseStaffDebug = $('.wrapper--staff-info');
...@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = { ...@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = {
}, },
/** /**
Refresh the Assessment Modules. This should be called any time an action is
performed by the user.
**/
loadAssessmentModules: function() {
this.peerView.load();
this.renderSelfAssessmentStep();
this.gradeView.load();
},
/**
Render the self-assessment step. Render the self-assessment step.
**/ **/
renderSelfAssessmentStep: function() { renderSelfAssessmentStep: function() {
...@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = { ...@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = {
this.server.selfAssess(optionsSelected).done( this.server.selfAssess(optionsSelected).done(
function() { function() {
view.peerView.load(); view.loadAssessmentModules();
view.renderSelfAssessmentStep();
view.gradeView.load();
view.scrollToTop(); view.scrollToTop();
} }
).fail(function(errMsg) { ).fail(function(errMsg) {
...@@ -181,14 +187,14 @@ OpenAssessment.BaseView.prototype = { ...@@ -181,14 +187,14 @@ OpenAssessment.BaseView.prototype = {
toggleActionError: function(type, msg) { toggleActionError: function(type, msg) {
var element = this.element; var element = this.element;
var container = null; var container = null;
if (type == 'save') { if (type == 'save') {
container = '.response__submission__actions'; container = '.response__submission__actions';
} }
else if (type == 'submit' || type == 'peer' || type == 'self') { else if (type == 'submit' || type == 'peer' || type == 'self') {
container = '.step__actions'; container = '.step__actions';
} }
else if (type == 'feedback_assess') { else if (type == 'feedback_assess') {
container = '.submission__feedback__actions'; container = '.submission__feedback__actions';
} }
// If we don't have anywhere to put the message, just log it to the console // If we don't have anywhere to put the message, just log it to the console
...@@ -219,10 +225,10 @@ OpenAssessment.BaseView.prototype = { ...@@ -219,10 +225,10 @@ OpenAssessment.BaseView.prototype = {
$(container + ' .step__status__value .copy').html(gettext('Unable to Load')); $(container + ' .step__status__value .copy').html(gettext('Unable to Load'));
}, },
/** /**
* Get the contents of the Step Actions error message box, for unit test validation. * Get the contents of the Step Actions error message box, for unit test validation.
* *
* Step Actions are the UX-level parts of the student interaction flow - * Step Actions are the UX-level parts of the student interaction flow -
* Submission, Peer Assessment, and Self Assessment. Since steps are mutually * Submission, Peer Assessment, and Self Assessment. Since steps are mutually
* exclusive, only one error box should be rendered on screen at a time. * exclusive, only one error box should be rendered on screen at a time.
* *
......
...@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = { ...@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = {
var baseView = view.baseView; var baseView = view.baseView;
this.peerAssessRequest(function() { this.peerAssessRequest(function() {
view.load(); view.load();
baseView.renderSelfAssessmentStep(); baseView.loadAssessmentModules();
baseView.gradeView.load();
baseView.scrollToTop(); baseView.scrollToTop();
}); });
}, },
......
...@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/ **/
moveToNextStep: function() { moveToNextStep: function() {
this.load(); this.load();
this.baseView.peerView.load(); this.baseView.loadAssessmentModules();
this.baseView.gradeView.load();
// Disable the "unsaved changes" warning if the user // Disable the "unsaved changes" warning if the user
// tries to navigate to another page. // tries to navigate to another page.
......
<openassessment submission_due="2015-03-11T18:20">
<title>
Global Poverty
</title>
<rubric>
<prompt>
Given the state of the world today, what do you think should be done to combat poverty?
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explanation>
</option>
</criterion>
<criterion>
<name>clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation></explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation></explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation></explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation></explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explanation>
</option>
</criterion>
<criterion>
<name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation></explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation></explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation></explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation></explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation></explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation></explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -124,7 +124,7 @@ class SubmissionMixin(object): ...@@ -124,7 +124,7 @@ class SubmissionMixin(object):
student_sub_dict = {'text': student_sub} student_sub_dict = {'text': student_sub}
submission = api.create_submission(student_item_dict, student_sub_dict) submission = api.create_submission(student_item_dict, student_sub_dict)
workflow_api.create_workflow(submission["uuid"]) self.create_workflow(submission["uuid"])
self.submission_uuid = submission["uuid"] self.submission_uuid = submission["uuid"]
# Emit analytics event... # Emit analytics event...
......
...@@ -10,7 +10,9 @@ ...@@ -10,7 +10,9 @@
{ {
"name": "self-assessment" "name": "self-assessment"
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"peer_only": { "peer_only": {
"valid": false, "valid": false,
...@@ -20,15 +22,19 @@ ...@@ -20,15 +22,19 @@
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"self_only": { "self_only": {
"valid": false, "valid": true,
"assessments": [ "assessments": [
{ {
"name": "self-assessment" "name": "self-assessment"
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"self_before_peer": { "self_before_peer": {
"valid": false, "valid": false,
...@@ -41,7 +47,9 @@ ...@@ -41,7 +47,9 @@
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"peer_then_peer": { "peer_then_peer": {
"valid": false, "valid": false,
...@@ -56,6 +64,8 @@ ...@@ -56,6 +64,8 @@
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ],
"current_assessments": null,
"is_released": false
} }
} }
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
<assessment name="peer-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" />
</assessments>
</openassessment>
{ {
"empty_dict": { "empty_dict": {
"assessment": {} "assessments": [{}],
}, "current_assessments": null,
"must_be_graded_by_zero": { "is_released": false
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 0
}
}, },
"unsupported_type": { "unsupported_type": {
"assessment": { "assessments": [
"name": "unsupported-assessment", {
"must_grade": 5, "name": "peer-assessment",
"must_be_graded_by": 3 "must_grade": 5,
} "must_be_graded_by": 3
},
{
"name": "self-assessment"
},
{
"name": "unsupported-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
}, },
"no_type": { "no_type": {
"assessment": { "assessments": [
"must_grade": 5, {
"must_be_graded_by": 3 "name": "self-assessment"
} },
{
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
}, },
"unsupported_unicode_type": { "unsupported_unicode_type": {
"assessment": { "assessments": [
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽", {
"must_grade": 5, "name": "self-assessment"
"must_be_graded_by": 3 },
} {
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
}, },
"no_must_grade": { "no_must_grade": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_be_graded_by": 3 "name": "peer-assessment",
} "must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"no_must_be_graded_by": { "no_must_be_graded_by": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 5 "name": "peer-assessment",
} "must_grade": 5
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"must_grade_less_than_must_be_graded_by": { "must_grade_less_than_must_be_graded_by": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 4, "name": "peer-assessment",
"must_be_graded_by": 5 "must_grade": 4,
} "must_be_graded_by": 5
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"must_grade_zero": { "must_grade_zero": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 0, "name": "peer-assessment",
"must_be_graded_by": 0 "must_grade": 0,
} "must_be_graded_by": 0
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"must_be_graded_by_zero": { "must_be_graded_by_zero": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 1, "name": "peer-assessment",
"must_be_graded_by": 0 "must_grade": 1,
} "must_be_graded_by": 0
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"remove_peer_mid_flight": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": [
{
"name": "self-assessment"
}
],
"is_released": true
},
"swap_peer_and_self_mid_flight": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": [
{
"name": "self-assessment"
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"is_released": true
} }
} }
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Only Self Assessment</title>
<prompt> <prompt>
Given the state of the world today, what do you think should be done to Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words. combat poverty? Please answer in a short essay of 200-300 words.
......
{ {
"peer": { "peer_then_self": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 5, "name": "peer-assessment",
"must_be_graded_by": 3 "must_grade": 5,
} "must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"self": { "self_only": {
"assessment": { "assessments": [
"name": "self-assessment", {
"must_grade": 2, "name": "self-assessment"
"must_be_graded_by": 1 }
} ],
"current_assessments": null,
"is_released": false
}, },
"must_be_graded_by_equals_must_grade": { "must_be_graded_by_equals_must_grade": {
"assessment": { "assessments": [
"name": "self-assessment", {
"must_grade": 1, "name": "peer-assessment",
"must_be_graded_by": 1 "must_grade": 1,
} "must_be_graded_by": 1
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
} }
} }
...@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase):
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก' SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
STEPS = ['peer', 'self']
@scenario('data/grade_scenario.xml', user_id='Greggs') @scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock): def test_render_grade(self, xblock):
# Submit, assess, and render the grade view # Submit, assess, and render the grade view
...@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer['student_id'] = scorer_name scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text}) scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid']) workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers)) submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
......
...@@ -92,12 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -92,12 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase):
# Test that we enforce that there are exactly two assessments, # Test that we enforce that there are exactly two assessments,
# peer ==> self # peer ==> self
# If and when we remove this restriction, this test can be deleted. # If and when we remove this restriction, this test can be deleted.
@data('data/invalid_assessment_combo_order.xml', 'data/invalid_assessment_combo_peer_only.xml')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock): def test_update_xml_invalid_assessment_combo(self, xblock, invalid_workflow):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_assessment_combo.xml')}) request = json.dumps(
{'xml': self.load_fixture_str(invalid_workflow)}
)
resp = self.request(xblock, 'update_xml', request, response_format='json') resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn("must have exactly two assessments", resp['msg'].lower()) self.assertIn("for this assignment", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment')) @data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
......
...@@ -14,27 +14,26 @@ class AssessmentValidationTest(TestCase): ...@@ -14,27 +14,26 @@ class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json') @ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data): def test_valid_assessment(self, data):
success, msg = validate_assessments([data['assessment']]) success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertTrue(success) self.assertTrue(success)
self.assertEqual(msg, u'') self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json') @ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data): def test_invalid_assessment(self, data):
success, msg = validate_assessments([data['assessment']]) success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
def test_no_assessments(self): def test_no_assessments(self):
success, msg = validate_assessments([]) success, msg = validate_assessments([], [], False)
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
# Currently, we enforce the restriction that there must be # Make sure only legal assessment combinations are allowed. For now, that's
# exactly two assessments, in the order (a) peer, then (b) self. # (peer -> self), and (self)
# If and when we remove that restriction, this test can be deleted.
@ddt.file_data('data/assessment_combo.json') @ddt.file_data('data/assessment_combo.json')
def test_enforce_peer_then_self(self, data): def test_enforce_assessment_combo_restrictions(self, data):
success, msg = validate_assessments(data['assessments'], enforce_peer_then_self=True) success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertEqual(success, data['valid'], msg=msg) self.assertEqual(success, data['valid'], msg=msg)
if not success: if not success:
......
...@@ -43,33 +43,49 @@ def _duplicates(items): ...@@ -43,33 +43,49 @@ def _duplicates(items):
return set(x for x in items if counts[x] > 1) return set(x for x in items if counts[x] > 1)
def validate_assessments(assessments, enforce_peer_then_self=False): def validate_assessments(assessments, current_assessments, is_released):
""" """
Check that the assessment dict is semantically valid. Check that the assessment dict is semantically valid.
Valid assessment steps are currently:
* peer, then self
* self only
If a question has been released, the type and number of assessment steps
cannot be changed.
Args: Args:
assessments (list of dict): list of serialized assessment models. assessments (list of dict): list of serialized assessment models.
current_assessments (list of dict): list of the current serialized
Kwargs: assessment models. Used to determine if the assessment configuration
enforce_peer_then_self (bool): If True, enforce the requirement that there has changed since the question had been released.
must be exactly two assessments: first, a peer-assessment, then a self-assessment. is_released (boolean) : True if the question has been released.
Returns: Returns:
tuple (is_valid, msg) where tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found. and msg describes any validation errors found.
""" """
if enforce_peer_then_self: def _self_only(assessments):
if len(assessments) != 2: return len(assessments) == 1 and assessments[0].get('name') == 'self-assessment'
return (False, _("This problem must have exactly two assessments."))
if assessments[0].get('name') != 'peer-assessment': def _peer_then_self(assessments):
return (False, _("The first assessment must be a peer assessment.")) return (
if assessments[1].get('name') != 'self-assessment': len(assessments) == 2 and
return (False, _("The second assessment must be a self assessment.")) assessments[0].get('name') == 'peer-assessment' and
assessments[1].get('name') == 'self-assessment'
)
if len(assessments) == 0: if len(assessments) == 0:
return (False, _("This problem must include at least one assessment.")) return (False, _("This problem must include at least one assessment."))
# Right now, there are two allowed scenarios: (peer -> self) and (self)
if not (_self_only(assessments) or _peer_then_self(assessments)):
return (
False,
_("For this assignment, you can set either a peer assessment followed by a self assessment or a self assessment only.")
)
for assessment_dict in assessments: for assessment_dict in assessments:
# Supported assessment # Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']: if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
...@@ -89,6 +105,15 @@ def validate_assessments(assessments, enforce_peer_then_self=False): ...@@ -89,6 +105,15 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
if must_grade < must_be_graded_by: if must_grade < must_be_graded_by:
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.')) return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
if is_released:
if len(assessments) != len(current_assessments):
return (False, _("The number of assessments cannot be changed after the problem has been released."))
names = [assessment.get('name') for assessment in assessments]
current_names = [assessment.get('name') for assessment in current_assessments]
if names != current_names:
return (False, _("The assessment type cannot be changed after the problem has been released."))
return (True, u'') return (True, u'')
...@@ -188,7 +213,12 @@ def validator(oa_block, strict_post_release=True): ...@@ -188,7 +213,12 @@ def validator(oa_block, strict_post_release=True):
""" """
def _inner(rubric_dict, submission_dict, assessments): def _inner(rubric_dict, submission_dict, assessments):
success, msg = validate_assessments(assessments, enforce_peer_then_self=True) current_assessments = oa_block.rubric_assessments
success, msg = validate_assessments(
assessments,
current_assessments,
strict_post_release and oa_block.is_released()
)
if not success: if not success:
return (False, msg) return (False, msg)
......
...@@ -8,6 +8,25 @@ class WorkflowMixin(object): ...@@ -8,6 +8,25 @@ class WorkflowMixin(object):
def handle_workflow_info(self, data, suffix=''): def handle_workflow_info(self, data, suffix=''):
return self.get_workflow_info() return self.get_workflow_info()
def create_workflow(self, submission_uuid):
steps = self._create_step_list()
workflow_api.create_workflow(submission_uuid, steps)
def _create_step_list(self):
def _convert_rubric_assessment_name(ra_name):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name, suffix = ra_name.split("-")
return short_name
# rubric_assessments stores names as "self-assessment",
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
return [
_convert_rubric_assessment_name(ra["name"])
for ra in self.rubric_assessments
]
def workflow_requirements(self): def workflow_requirements(self):
""" """
Retrieve the requirements from each assessment module Retrieve the requirements from each assessment module
...@@ -93,6 +112,7 @@ class WorkflowMixin(object): ...@@ -93,6 +112,7 @@ class WorkflowMixin(object):
status_counts = workflow_api.get_status_counts( status_counts = workflow_api.get_status_counts(
course_id=student_item['course_id'], course_id=student_item['course_id'],
item_id=student_item['item_id'], item_id=student_item['item_id'],
steps=self._create_step_list(),
) )
num_submissions = sum(item['count'] for item in status_counts) num_submissions = sum(item['count'] for item in status_counts)
return status_counts, num_submissions return status_counts, num_submissions
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment