peer_assessment_mixin.py 9.53 KB
Newer Older
1 2
import logging
from django.utils.translation import ugettext as _
Stephen Sanchez committed
3
from xblock.core import XBlock
4 5
from openassessment.assessment import peer_api
from openassessment.assessment.peer_api import (
6 7
    PeerAssessmentInternalError, PeerAssessmentRequestError,
    PeerAssessmentWorkflowError
8
)
9
import openassessment.workflow.api as workflow_api
10
from .resolve_dates import DISTANT_FUTURE
11 12

logger = logging.getLogger(__name__)
Stephen Sanchez committed
13 14


15
class PeerAssessmentMixin(object):
16 17 18 19 20 21 22 23
    """The Peer Assessment Mixin for all Peer Functionality.

    Abstracts all functionality and handlers associated with Peer Assessment.
    All Peer Assessment API calls should be contained without this Mixin as
    well.

    PeerAssessmentMixin is a Mixin for the OpenAssessmentBlock. Functions in
    the PeerAssessmentMixin call into the OpenAssessmentBlock functions and
Stephen Sanchez committed
24
    will not work outside of OpenAssessmentBlock
25 26

    """
Stephen Sanchez committed
27 28

    @XBlock.json_handler
29 30
    def peer_assess(self, data, suffix=''):
        """Place a peer assessment into OpenAssessment system
31 32 33 34 35 36

        Assess a Peer Submission.  Performs basic workflow validation to ensure
        that an assessment can be performed as this time.

        Args:
            data (dict): A dictionary containing information required to create
37 38 39 40
                a new peer assessment.  This dict should have the following attributes:
                `submission_uuid` (string): The unique identifier for the submission being assessed.
                `options_selected` (dict): Dictionary mapping criterion names to option values.
                `feedback` (unicode): Written feedback for the submission.
41 42

        Returns:
43 44
            Dict with keys "success" (bool) indicating success/failure.
            and "msg" (unicode) containing additional information if an error occurs.
45

Stephen Sanchez committed
46
        """
47 48 49 50 51 52 53
        # Validate the request
        if 'feedback' not in data:
            return {'success': False, 'msg': _('Must provide feedback in the assessment')}

        if 'options_selected' not in data:
            return {'success': False, 'msg': _('Must provide options selected in the assessment')}

54
        assessment_ui_model = self.get_assessment_module('peer-assessment')
55 56 57 58
        if assessment_ui_model:
            rubric_dict = {
                'criteria': self.rubric_criteria
            }
Stephen Sanchez committed
59
            assessment_dict = {
60
                "feedback": data['feedback'],
61
                "options_selected": data["options_selected"],
Stephen Sanchez committed
62
            }
63 64 65

            try:
                assessment = peer_api.create_assessment(
66
                    self.submission_uuid,
67 68 69
                    self.get_student_item_dict()["student_id"],
                    assessment_dict,
                    rubric_dict,
70
                    assessment_ui_model['must_be_graded_by']
71
                )
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
                # Emit analytics event...
                self.runtime.publish(
                    self,
                    "openassessmentblock.peer_assess",
                    {
                        "feedback": assessment["feedback"],
                        "rubric": {
                            "content_hash": assessment["rubric"]["content_hash"],
                        },
                        "scorer_id": assessment["scorer_id"],
                        "score_type": assessment["score_type"],
                        "scored_at": assessment["scored_at"],
                        "submission_uuid": assessment["submission_uuid"],
                        "parts": [
                            {
                                "option": {
                                    "name": part["option"]["name"],
                                    "points": part["option"]["points"]
                                }
                            }
                            for part in assessment["parts"]
                        ]
                    }
                )
96 97 98
            except PeerAssessmentRequestError as ex:
                return {'success': False, 'msg': ex.message}
            except PeerAssessmentInternalError as ex:
99 100 101
                msg = _("Internal error occurred while creating the assessment")
                logger.exception(msg)
                return {'success': False, 'msg': msg}
Stephen Sanchez committed
102

103 104
            # Update both the workflow that the submission we're assessing
            # belongs to, as well as our own (e.g. have we evaluated enough?)
105
            try:
106 107
                if assessment:
                    self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
108 109 110 111 112
                self.update_workflow_status()
            except workflow_api.AssessmentWorkflowError:
                msg = _('Could not update workflow status.')
                logger.exception(msg)
                return {'success': False, 'msg': msg}
113

Stephen Sanchez committed
114 115 116
            # Temp kludge until we fix JSON serialization for datetime
            assessment["scored_at"] = str(assessment["scored_at"])

117 118 119 120 121
            return {'success': True, 'msg': u''}

        else:
            return {'success': False, 'msg': _('Could not load peer assessment.')}

Stephen Sanchez committed
122 123
    @XBlock.handler
    def render_peer_assessment(self, data, suffix=''):
124 125 126 127 128 129
        """Renders the Peer Assessment HTML section of the XBlock

        Generates the peer assessment HTML for the first section of an Open
        Assessment XBlock. See OpenAssessmentBlock.render_assessment() for
        more information on rendering XBlock sections.

130 131 132 133 134
        Args:
            data (dict): May contain an attribute 'continue_grading', which
                allows a student to continue grading peers past the required
                number of assessments.

135
        """
136 137
        path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        finished = False
138
        problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")
139

140 141 142 143
        context_dict = {
            "rubric_criteria": self.rubric_criteria,
            "estimated_time": "20 minutes"  # TODO: Need to configure this.
        }
144

145 146 147 148 149 150
        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context_dict['peer_due'] = due_date

151 152 153 154 155 156 157 158 159
        workflow = self.get_workflow_info()
        if workflow is None:
            return self.render_assessment(path, context_dict)
        continue_grading = (
            data.params.get('continue_grading', False)
            and workflow["status_details"]["peer"]["complete"]
        )

        student_item = self.get_student_item_dict()
160 161
        assessment = self.get_assessment_module('peer-assessment')
        if assessment:
162 163 164 165 166 167
            context_dict["must_grade"] = assessment["must_grade"]
            finished, count = peer_api.has_finished_required_evaluating(
                student_item,
                assessment["must_grade"]
            )
            context_dict["graded"] = count
168
            context_dict["review_num"] = count + 1
169

170 171
            if continue_grading:
                context_dict["submit_button_text"] = (
172
                    "Submit your assessment & review another response"
173 174
                )
            elif assessment["must_grade"] - count == 1:
175
                context_dict["submit_button_text"] = (
176
                    "Submit your assessment & move onto next step"
177
                )
178
            else:
179 180 181
                context_dict["submit_button_text"] = (
                    "Submit your assessment & move to response #{}"
                ).format(count + 2)
182

183
        if reason == 'due' and problem_closed:
184
            path = 'openassessmentblock/peer/oa_peer_closed.html'
185
        elif reason == 'start' and problem_closed:
186
            context_dict["peer_start"] = start_date
187
            path = 'openassessmentblock/peer/oa_peer_unavailable.html'
188
        elif workflow.get("status") == "peer":
189
            peer_sub = self.get_peer_submission(student_item, assessment)
190 191 192
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_assessment.html'
                context_dict["peer_submission"] = peer_sub
193 194 195
            else:
                path = 'openassessmentblock/peer/oa_peer_waiting.html'
        elif continue_grading and student_item:
196
            peer_sub = self.get_peer_submission(student_item, assessment)
197 198 199 200
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
                context_dict["peer_submission"] = peer_sub
            else:
201
                path = 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
202
        elif workflow.get("status") == "done" or finished:
203
            path = "openassessmentblock/peer/oa_peer_complete.html"
204 205

        return self.render_assessment(path, context_dict)
Stephen Sanchez committed
206

207 208 209
    def get_peer_submission(
            self,
            student_item_dict,
210
            assessment
211
    ):
Stephen Sanchez committed
212 213 214
        peer_submission = False
        try:
            peer_submission = peer_api.get_submission_to_assess(
215 216
                student_item_dict,
                assessment["must_be_graded_by"],
217
                True
Stephen Sanchez committed
218
            )
219 220 221 222 223 224 225 226 227 228 229 230
            self.runtime.publish(
                self,
                "openassessmentblock.get_peer_submission",
                {
                    "requesting_student_id": student_item_dict["student_id"],
                    "course_id": student_item_dict["course_id"],
                    "item_id": student_item_dict["item_id"],
                    "submission_returned_uuid": (
                        peer_submission["uuid"] if peer_submission else None
                    )
                }
            )
231 232
        except PeerAssessmentWorkflowError as err:
            logger.exception(err)
233

Stephen Sanchez committed
234
        return peer_submission