Commit c80522da by Stephen Sanchez

Merge pull request #44 from edx/sanchez/refactor-ui-models

Removing the UI Models in favor of a simple dictionary.
parents 435f6d5d fdbf7a10
......@@ -14,7 +14,6 @@ from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.ui_models import PeerAssessmentUIModel
from scenario_parser import ScenarioParser
......@@ -114,16 +113,47 @@ DEFAULT_RUBRIC_CRITERIA = [
}
]
DEFAULT_PEER_ASSESSMENT = PeerAssessmentUIModel()
DEFAULT_PEER_ASSESSMENT.name = "peer-assessment"
DEFAULT_PEER_ASSESSMENT.start_datetime = datetime.datetime.now().isoformat()
DEFAULT_PEER_ASSESSMENT.must_grade = 5
DEFAULT_PEER_ASSESSMENT.must_be_graded_by = 3
UI_MODELS = {
"submission": {
"assessment_type": "submission",
"name": "submission",
"class_id": "",
"navigation_text": "Your response to this problem",
"title": "Your Response"
},
"peer-assessment": {
"assessment_type": "peer-assessment",
"namne": "peer-assessment",
"class_id": "",
"navigation_text": "Your assessment(s) of peer responses",
"title": "Assess Peers' Responses"
},
"self-assessment": {
"assessment_type": "self-assessment",
"name": "self-assessment",
"class_id": "",
"navigation_text": "Your assessment of your response",
"title": "Assess Your Response"
}
}
"""
The Default Peer Assessment is created as an example of how this XBlock can be
configured. If no configuration is specified, this is the default assessment
module(s) associated with the XBlock.
"""
DEFAULT_PEER_ASSESSMENT = {
"start_datetime": datetime.datetime.now().isoformat(),
"must_grade": 5,
"must_be_graded_by": 3,
}
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT,
]
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
......@@ -227,7 +257,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
general frame of the Open Ended Assessment Question.
"""
trace = self.get_xblock_trace()
ui_models = self._create_ui_models()
grade_state = self.get_grade_state()
# All data we intend to pass to the front end.
context_dict = {
......@@ -236,7 +266,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
"question": self.prompt,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria,
"rubric_assessments": [assessment.create_ui_model() for assessment in self.rubric_assessments],
"rubric_assessments": ui_models,
"grade_state": grade_state,
}
......@@ -248,6 +278,21 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
frag.initialize_js('OpenAssessmentBlock')
return frag
def _create_ui_models(self):
"""Combine UI attributes and XBlock configuration into a UI model.
This method takes all configuration for this XBlock instance and appends
UI attributes to create a UI Model for rendering all assessment modules.
This allows a clean separation of static UI attributes from persistent
XBlock configuration.
"""
ui_models = [UI_MODELS["submission"]]
for assessment in self.rubric_assessments:
ui_model = UI_MODELS[assessment["assessment_type"]]
ui_models.append(dict(assessment, **ui_model))
return ui_models
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench.
......
......@@ -46,8 +46,8 @@ class PeerAssessmentMixin(object):
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment_ui_model.must_grade),
int(assessment_ui_model.must_be_graded_by),
int(assessment_ui_model["must_grade"]),
int(assessment_ui_model["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
......@@ -77,12 +77,12 @@ class PeerAssessmentMixin(object):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict, assessment.must_be_graded_by
student_item_dict, assessment["must_be_graded_by"]
)
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
assessment.must_be_graded_by
assessment["must_be_graded_by"]
)
except PeerAssessmentWorkflowError:
......@@ -94,5 +94,5 @@ class PeerAssessmentMixin(object):
"""Get a configured assessment module by name.
"""
for assessment in self.rubric_assessments:
if assessment.name == mixin_name:
if assessment["name"] == mixin_name:
return assessment
# -*- coding: utf-8 -*-
"""XBlock scenario parsing routines"""
from openassessment.xblock.ui_models import PeerAssessmentUIModel, SelfAssessmentUIModel, SubmissionUIModel
class ScenarioParser(object):
......@@ -82,21 +81,23 @@ class ScenarioParser(object):
must_grade="5"
must_be_graded_by="3" />
</peer-assessment>"""
assessment_list = [SubmissionUIModel()]
assessment_list = []
for asmnt in assessments:
assessment = None
assessment_type = asmnt.tag
if 'peer-assessment' == assessment_type:
assessment = PeerAssessmentUIModel()
assessment.must_grade = int(asmnt.attrib.get('must_grade', 1))
assessment.must_be_graded_by = int(asmnt.attrib.get('must_be_graded_by', 0))
assessment = {
"must_grade": int(asmnt.attrib.get('must_grade', 1)),
"must_be_graded_by": int(asmnt.attrib.get('must_be_graded_by', 0))
}
elif 'self-assessment' == assessment_type:
assessment = SelfAssessmentUIModel()
assessment = {}
if assessment:
assessment.name = asmnt.attrib.get('name', '')
assessment.start_datetime = asmnt.attrib.get('start', None)
assessment.due_datetime = asmnt.attrib.get('due', None)
if assessment is not None:
assessment["assessment_type"] = assessment_type
assessment["name"] = asmnt.attrib.get('name', None)
assessment["start_datetime"] = asmnt.attrib.get('start', None)
assessment["due_datetime"] = asmnt.attrib.get('due', None)
assessment_list.append(assessment)
return assessment_list
......
......@@ -75,22 +75,16 @@ class TestScenarioParser(TestCase):
assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_assessments(assessments_xml)
# Need to capture Submissions in Tests
self.assertEqual(parsed_list[0].assessment_type, 'submission')
# Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[1].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[1].name, '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[1].must_grade, 1)
self.assertEqual(parsed_list[1].must_be_graded_by, 0)
self.assertEqual(parsed_list[0]["assessment_type"], 'self-assessment')
self.assertEqual(parsed_list[0]["name"], '0382e03c808e4f2bb12dfdd2d45d5c4b')
# Peer assessments are more interesting
self.assertEqual(parsed_list[2].assessment_type, 'peer-assessment')
self.assertEqual(parsed_list[2].name, '')
self.assertEqual(parsed_list[2].must_grade, 5)
self.assertEqual(parsed_list[2].must_be_graded_by, 3)
self.assertEqual(parsed_list[1]["assessment_type"], 'peer-assessment')
self.assertEqual(parsed_list[1]["must_grade"], 5)
self.assertEqual(parsed_list[1]["must_be_graded_by"], 3)
# We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[3].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[2]["assessment_type"], 'self-assessment')
"""UI Models constructed by the Open Assessment XBlock to generate HTML.
These Models should be fully constructed before reaching any templates used by
the XBlock, such that the templates should only have to render based on the
information provided. If any logic exists in the templates, it is likely that
should be refactored into the XBlock, and the results stored in these models.
"""
class SubmissionUIModel(object):
"""All data to be displayed to the front end regarding submissions.
All the data required to generate the Submission HTML.
"""
def __init__(self):
self.assessment_type = "submission"
self.name = "submission"
self.navigation_text = "Your response to this problem"
self.title = "Your Response"
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"navigation_text": self.navigation_text,
"title": self.title
}
class AssessmentUIModel(object):
"""Generic Assessment UI Model.
Common attributes for displaying Assessment sections of the front end.
"""
def __init__(self):
self.assessment_type = None
self.name = ''
self.start_datetime = None
self.due_datetime = None
self.must_grade = 1
self.must_be_graded_by = 0
self.navigation_text = ""
self.title = ""
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"start_datetime": self.start_datetime,
"due_datetime": self.due_datetime,
"must_grade": self.must_grade,
"must_be_graded_by": self.must_be_graded_by,
"navigation_text": self.navigation_text,
"title": self.title
}
class PeerAssessmentUIModel(AssessmentUIModel):
"""All data required to display the Peer Assessment front end.
Attributes and data specific to rendering the Peer Assessment section of
the front end.
"""
def __init__(self):
super(PeerAssessmentUIModel, self).__init__()
self.assessment_type = "peer-assessment"
self.title = "Assess Peers' Responses"
self.navigation_text = "Your assessment(s) of peer responses"
class SelfAssessmentUIModel(AssessmentUIModel):
"""All data required to display the Self Assessment front end.
Attributes and data specific to rendering the Self Assessment section of
the front end.
"""
def __init__(self):
super(SelfAssessmentUIModel, self).__init__()
self.assessment_type = "self-assessment"
self.navigation_text = "Your assessment of your response"
self.title = "Assess Your Response"
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment