Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
2702e77b
Commit
2702e77b
authored
May 01, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Updating tests and some simple cleanup.
parent
30a5cf60
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
587 additions
and
261 deletions
+587
-261
apps/openassessment/assessment/peer_api.py
+2
-6
apps/openassessment/assessment/self_api.py
+64
-9
apps/openassessment/assessment/test/test_peer.py
+4
-3
apps/openassessment/assessment/test/test_self.py
+4
-4
apps/openassessment/management/commands/create_oa_submissions.py
+2
-1
apps/openassessment/management/tests/test_upload_oa_data.py
+1
-1
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+2
-3
apps/openassessment/tests/test_data.py
+1
-1
apps/openassessment/workflow/api.py
+25
-15
apps/openassessment/workflow/models.py
+75
-48
apps/openassessment/workflow/test/data/assessments.json
+28
-0
apps/openassessment/workflow/test/test_api.py
+47
-31
apps/openassessment/xblock/grade_mixin.py
+4
-8
apps/openassessment/xblock/openassessmentblock.py
+11
-12
apps/openassessment/xblock/static/js/fixtures/templates.json
+55
-2
apps/openassessment/xblock/static/js/openassessment.min.js
+0
-0
apps/openassessment/xblock/static/js/spec/oa_peer.js
+1
-2
apps/openassessment/xblock/static/js/spec/oa_response.js
+3
-2
apps/openassessment/xblock/static/js/src/oa_base.js
+14
-8
apps/openassessment/xblock/static/js/src/oa_peer.js
+1
-2
apps/openassessment/xblock/static/js/src/oa_response.js
+1
-2
apps/openassessment/xblock/static/xml/poverty_self_only_example.xml
+0
-1
apps/openassessment/xblock/test/data/assessment_combo.json
+15
-5
apps/openassessment/xblock/test/data/invalid_assessments.json
+142
-45
apps/openassessment/xblock/test/data/valid_assessments.json
+37
-25
apps/openassessment/xblock/test/test_grade.py
+3
-1
apps/openassessment/xblock/test/test_studio.py
+8
-12
apps/openassessment/xblock/test/test_validation.py
+4
-4
apps/openassessment/xblock/validation.py
+27
-6
apps/openassessment/xblock/workflow_mixin.py
+6
-2
No files found.
apps/openassessment/assessment/peer_api.py
View file @
2702e77b
...
...
@@ -73,11 +73,6 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def
submitter_is_finished
(
submission_uuid
,
requirements
):
"""Temporary bridge method."""
return
is_complete
(
submission_uuid
,
requirements
)
def
is_complete
(
submission_uuid
,
requirements
):
try
:
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
if
workflow
.
completed_at
is
not
None
:
...
...
@@ -104,7 +99,7 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if
not
is_complete
(
submission_uuid
,
requirements
):
if
not
submitter_is_finished
(
submission_uuid
,
requirements
):
return
None
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
...
...
@@ -139,6 +134,7 @@ def get_score(submission_uuid, requirements):
"points_possible"
:
items
[
0
]
.
assessment
.
points_possible
,
}
def
assessment_is_finished
(
submission_uuid
,
requirements
):
return
bool
(
get_score
(
submission_uuid
,
requirements
))
...
...
apps/openassessment/assessment/self_api.py
View file @
2702e77b
...
...
@@ -3,6 +3,7 @@ Public interface for self-assessment.
"""
import
logging
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
from
dogapi
import
dog_stats_api
from
submissions.api
import
get_submission_and_student
,
SubmissionNotFoundError
...
...
@@ -21,13 +22,30 @@ SELF_TYPE = "SE"
logger
=
logging
.
getLogger
(
"openassessment.assessment.self_api"
)
class
SelfAssessmentRequestError
(
Exception
):
class
SelfAssessmentError
(
Exception
):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class
SelfAssessmentRequestError
(
SelfAssessmentError
):
"""
There was a problem with the request for a self-assessment.
"""
pass
class
SelfAssessmentInternalError
(
SelfAssessmentError
):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
def
create_assessment
(
submission_uuid
,
user_id
,
options_selected
,
rubric_dict
,
scored_at
=
None
):
"""
Create a self-assessment for a submission.
...
...
@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
submission
)
# Return the serialized assessment
return
assessment_dict
...
...
@@ -139,28 +156,65 @@ def get_assessment(submission_uuid):
return
serialized_assessment
def
submitter_is_finished
(
submission_uuid
,
requirements
):
"""Temporary bridge method."""
return
is_complete
(
submission_uuid
,
requirements
)
def
is_complete
(
submission_uuid
,
requirements
):
def
submitter_is_finished
(
submission_uuid
,
requirements
):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
bool
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
exists
()
def
assessment_is_finished
(
submission_uuid
,
requirements
):
return
is_complete
(
submission_uuid
,
requirements
)
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return
submitter_is_finished
(
submission_uuid
,
requirements
)
def
get_score
(
submission_uuid
,
requirements
):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment
=
get_assessment
(
submission_uuid
)
if
not
assessment
:
return
None
...
...
@@ -170,6 +224,7 @@ def get_score(submission_uuid, requirements):
"points_possible"
:
assessment
[
"points_possible"
]
}
def
get_assessment_scores_by_criteria
(
submission_uuid
):
"""Get the median score for each rubric criterion
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
2702e77b
...
...
@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY
=
datetime
.
datetime
(
2007
,
9
,
15
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
THURSDAY
=
datetime
.
datetime
(
2007
,
9
,
16
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
STEPS
=
[
'peer'
,
'self'
]
@ddt
class
TestPeerApi
(
CacheResetTest
):
...
...
@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest):
'must_grade'
:
REQUIRED_GRADED
,
'must_be_graded_by'
:
REQUIRED_GRADED_BY
}
self
.
assertTrue
(
peer_api
.
is_complete
(
tim_sub
[
"uuid"
],
requirements
))
self
.
assertTrue
(
peer_api
.
submitter_is_finished
(
tim_sub
[
"uuid"
],
requirements
))
def
test_completeness
(
self
):
"""
...
...
@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest):
'must_grade'
:
REQUIRED_GRADED
,
'must_be_graded_by'
:
REQUIRED_GRADED_BY
}
self
.
assertTrue
(
peer_api
.
is_complete
(
buffy_sub
[
"uuid"
],
requirements
))
self
.
assertTrue
(
peer_api
.
submitter_is_finished
(
buffy_sub
[
"uuid"
],
requirements
))
def
test_find_active_assessments
(
self
):
buffy_answer
,
_
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
...
...
@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest):
new_student_item
[
"student_id"
]
=
student
submission
=
sub_api
.
create_submission
(
new_student_item
,
answer
,
date
)
peer_api
.
create_peer_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
]
,
STEPS
)
return
submission
,
new_student_item
apps/openassessment/assessment/test/test_self.py
View file @
2702e77b
...
...
@@ -9,7 +9,7 @@ import pytz
from
openassessment.test_utils
import
CacheResetTest
from
submissions.api
import
create_submission
from
openassessment.assessment.self_api
import
(
create_assessment
,
is_complete
,
SelfAssessmentRequestError
,
get_assessment
create_assessment
,
submitter_is_finished
,
SelfAssessmentRequestError
,
get_assessment
)
...
...
@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest):
# Now there should be a submission, but no self-assessment
assessment
=
get_assessment
(
submission
[
"uuid"
])
self
.
assertIs
(
assessment
,
None
)
self
.
assertFalse
(
is_complete
(
submission
[
'uuid'
]
))
self
.
assertFalse
(
submitter_is_finished
(
submission
[
'uuid'
],
{}
))
# Create a self-assessment for the submission
assessment
=
create_assessment
(
...
...
@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest):
)
# Self-assessment should be complete
self
.
assertTrue
(
is_complete
(
submission
[
'uuid'
]
))
self
.
assertTrue
(
submitter_is_finished
(
submission
[
'uuid'
],
{}
))
# Retrieve the self-assessment
retrieved
=
get_assessment
(
submission
[
"uuid"
])
...
...
@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest):
def
test_is_complete_no_submission
(
self
):
# This submission uuid does not exist
self
.
assertFalse
(
is_complete
(
'abc1234'
))
self
.
assertFalse
(
submitter_is_finished
(
'abc1234'
,
{}
))
apps/openassessment/management/commands/create_oa_submissions.py
View file @
2702e77b
...
...
@@ -9,6 +9,7 @@ from submissions import api as sub_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment
import
peer_api
,
self_api
STEPS
=
[
'peer'
,
'self'
]
class
Command
(
BaseCommand
):
...
...
@@ -131,7 +132,7 @@ class Command(BaseCommand):
"""
answer
=
{
'text'
:
" "
.
join
(
loremipsum
.
get_paragraphs
(
5
))}
submission
=
sub_api
.
create_submission
(
student_item
,
answer
)
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
STEPS
)
workflow_api
.
update_from_assessments
(
submission
[
'uuid'
],
{
'peer'
:
{
'must_grade'
:
1
,
'must_be_graded_by'
:
1
}}
)
...
...
apps/openassessment/management/tests/test_upload_oa_data.py
View file @
2702e77b
...
...
@@ -43,7 +43,7 @@ class UploadDataTest(TestCase):
}
submission_text
=
"test submission {}"
.
format
(
index
)
submission
=
sub_api
.
create_submission
(
student_item
,
submission_text
)
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
[
'peer'
,
'self'
]
)
# Create and upload the archive of CSV files
# This should generate the files even though
...
...
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
2702e77b
...
...
@@ -143,8 +143,7 @@
</li>
{% endwith %}
{% endfor %}
{% if feedback_text %}
{% if peer_assessments %}
<li
class=
"question question--feedback ui-toggle-visibility"
>
<h4
class=
"question__title ui-toggle-visibility__control"
>
<i
class=
"ico icon-caret-right"
></i>
...
...
@@ -175,7 +174,7 @@
{% endfor %}
</ul>
</li>
{% endif %}
{% endif %}
</ol>
</article>
...
...
apps/openassessment/tests/test_data.py
View file @
2702e77b
...
...
@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest):
}
submission_text
=
"test submission {}"
.
format
(
index
)
submission
=
sub_api
.
create_submission
(
student_item
,
submission_text
)
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
[
'peer'
,
'self'
]
)
# Generate a CSV file for the submissions
output_streams
=
self
.
_output_streams
([
'submission'
])
...
...
apps/openassessment/workflow/api.py
View file @
2702e77b
...
...
@@ -87,7 +87,7 @@ def create_workflow(submission_uuid, steps):
AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
not exist or is of an invalid type.
AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation
submissions app not being available or a database configu
r
ation
problem.
"""
...
...
@@ -100,7 +100,7 @@ def create_workflow(submission_uuid, steps):
try
:
submission_dict
=
sub_api
.
get_submission_and_student
(
submission_uuid
)
except
sub_api
.
SubmissionNotFoundError
as
err
:
except
sub_api
.
SubmissionNotFoundError
:
err_msg
=
sub_err_msg
(
"submission not found"
)
logger
.
error
(
err_msg
)
raise
AssessmentWorkflowRequestError
(
err_msg
)
...
...
@@ -109,7 +109,6 @@ def create_workflow(submission_uuid, steps):
logger
.
error
(
err_msg
)
raise
AssessmentWorkflowRequestError
(
err_msg
)
except
sub_api
.
SubmissionInternalError
as
err
:
err_msg
=
sub_err_msg
(
err
)
logger
.
error
(
err
)
raise
AssessmentWorkflowInternalError
(
u"retrieving submission {} failed with unknown error: {}"
...
...
@@ -117,22 +116,32 @@ def create_workflow(submission_uuid, steps):
)
# Raise an error if they specify a step we don't recognize...
for
step
in
steps
:
if
step
not
in
AssessmentWorkflow
.
STEPS
:
raise
AssessmentWorkflowRequestError
(
u"Step not recognized: {}; Must be one of: {}"
.
format
(
step
,
AssessmentWorkflow
.
STEPS
)
invalid_steps
=
set
(
steps
)
-
set
(
AssessmentWorkflow
.
STEPS
)
if
invalid_steps
:
raise
AssessmentWorkflowRequestError
(
u"The following steps were not recognized: {}; Must be one of {}"
.
format
(
invalid_steps
,
AssessmentWorkflow
.
STEPS
)
)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
status
=
AssessmentWorkflow
.
STATUS
.
peer
if
steps
[
0
]
==
"peer"
:
try
:
peer_api
.
create_peer_workflow
(
submission_uuid
)
except
peer_api
.
PeerAssessmentError
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
logger
.
exception
(
err_msg
)
raise
AssessmentWorkflowInternalError
(
err_msg
)
elif
steps
[
0
]
==
"self"
:
status
=
AssessmentWorkflow
.
STATUS
.
self
try
:
peer_api
.
create_peer_workflow
(
submission_uuid
)
workflow
=
AssessmentWorkflow
.
objects
.
create
(
submission_uuid
=
submission_uuid
,
status
=
AssessmentWorkflow
.
STATUS
.
peer
,
status
=
status
,
course_id
=
submission_dict
[
'student_item'
][
'course_id'
],
item_id
=
submission_dict
[
'student_item'
][
'item_id'
],
)
...
...
@@ -145,7 +154,6 @@ def create_workflow(submission_uuid, steps):
workflow
.
steps
.
add
(
*
workflow_steps
)
except
(
DatabaseError
,
peer_api
.
PeerAssessmentError
,
sub_api
.
SubmissionError
)
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
...
...
@@ -316,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements):
return
_serialized_with_details
(
workflow
,
assessment_requirements
)
def
get_status_counts
(
course_id
,
item_id
):
def
get_status_counts
(
course_id
,
item_id
,
steps
):
"""
Count how many workflows have each status, for a given item in a course.
Kwargs:
course_id (unicode): The ID of the course.
item_id (unicode): The ID of the item in the course.
steps (list): A list of assessment steps for this problem.
Returns:
list of dictionaries with keys "status" (str) and "count" (int)
Example usage:
>>> get_status_counts("ora2/1/1", "peer-assessment-problem")
>>> get_status_counts("ora2/1/1", "peer-assessment-problem"
, ["peer"]
)
[
{"status": "peer", "count": 5},
{"status": "self", "count": 10},
...
...
@@ -345,7 +354,8 @@ def get_status_counts(course_id, item_id):
course_id
=
course_id
,
item_id
=
item_id
,
)
.
count
()
}
for
status
in
AssessmentWorkflow
.
STATUS_VALUES
}
for
status
in
steps
+
AssessmentWorkflow
.
STATUSES
]
...
...
apps/openassessment/workflow/models.py
View file @
2702e77b
...
...
@@ -52,13 +52,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
"self"
,
# User needs to assess themselves
]
STATUS
_VALUES
=
STEPS
+
[
STATUS
ES
=
[
"waiting"
,
# User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their
# submission by others.
"done"
,
# Complete
]
STATUS_VALUES
=
STEPS
+
STATUSES
STATUS
=
Choices
(
*
STATUS_VALUES
)
# implicit "status" field
submission_uuid
=
models
.
CharField
(
max_length
=
36
,
db_index
=
True
,
unique
=
True
)
...
...
@@ -85,22 +87,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return
sub_api
.
get_latest_score_for_submission
(
self
.
submission_uuid
)
def
status_details
(
self
,
assessment_requirements
):
from
openassessment.assessment
import
peer_api
,
self_api
status_dict
=
{}
if
"peer"
in
assessment_requirement
s
:
status_dict
[
"peer"
]
=
{
"complete"
:
peer_api
.
submitter_is_finished
(
steps
=
self
.
_get_steps
()
for
step
in
step
s
:
status_dict
[
step
.
name
]
=
{
"complete"
:
step
.
api
()
.
submitter_is_finished
(
self
.
submission_uuid
,
assessment_requirements
[
"peer"
]
assessment_requirements
.
get
(
step
.
name
,
{})
)
}
if
"self"
in
assessment_requirements
:
status_dict
[
"self"
]
=
{
"complete"
:
self_api
.
submitter_is_finished
(
self
.
submission_uuid
,
{})
}
return
status_dict
def
update_from_assessments
(
self
,
assessment_requirements
):
...
...
@@ -142,7 +137,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return
# Update our AssessmentWorkflowStep models with the latest from our APIs
steps
=
self
.
update_steps
(
assessment_requirements
)
steps
=
self
.
_get_steps
()
# Go through each step and update its status.
for
step
in
steps
:
step
.
update
(
self
.
submission_uuid
,
assessment_requirements
)
# Fetch name of the first step that the submitter hasn't yet completed.
new_status
=
next
(
...
...
@@ -178,10 +177,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self
.
status
=
new_status
self
.
save
()
def
update_steps
(
self
,
assessment_requirements
):
from
openassessment.assessment
import
peer_api
,
self_api
def
_get_steps
(
self
):
"""
Simple helper function for retrieving all the steps in the given
Workflow.
"""
steps
=
list
(
self
.
steps
.
all
())
if
not
steps
:
# If no steps exist for this AssessmentWorkflow, assume
...
...
@@ -191,39 +191,20 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
AssessmentWorkflowStep
(
name
=
self
.
STATUS
.
self
,
order_num
=
1
)
)
steps
=
list
(
self
.
steps
.
all
())
return
steps
# Mapping of step names to the APIs that power them
steps_to_apis
=
{
self
.
STATUS
.
self
:
self_api
,
self
.
STATUS
.
peer
:
peer_api
}
# Go through each step and update its status. Note that because we take
# the philosophy that once you're done, you're done. That means
for
step
in
steps
:
step_changed
=
False
step_api
=
steps_to_apis
[
step
.
name
]
step_reqs
=
assessment_requirements
.
get
(
step
.
name
,
{})
# Has the user completed their obligations for this step?
if
(
step
.
submitter_completed_at
is
None
and
step_api
.
submitter_is_finished
(
self
.
submission_uuid
,
step_reqs
)):
step
.
submitter_completed_at
=
now
()
step_changed
=
True
# Has the step received a score?
if
(
step
.
assessment_completed_at
is
None
and
step_api
.
assessment_is_finished
(
self
.
submission_uuid
,
step_reqs
)):
step
.
assessment_completed_at
=
now
()
step_changed
=
True
if
step_changed
:
step
.
save
()
def
set_score
(
self
,
score
):
"""
Set a score for the workflow.
return
steps
Scores are persisted via the Submissions API, separate from the Workflow
Data. Score is associated with the same submission_uuid as this workflow
Args:
score (dict): A dict containing 'points_earned' and
'points_possible'.
def
set_score
(
self
,
score
):
"""
sub_api
.
set_score
(
self
.
submission_uuid
,
score
[
"points_earned"
],
...
...
@@ -264,10 +245,56 @@ class AssessmentWorkflowStep(models.Model):
assessment_completed_at
=
models
.
DateTimeField
(
default
=
None
,
null
=
True
)
order_num
=
models
.
PositiveIntegerField
()
# Store the score for this step as well?
class
Meta
:
ordering
=
[
"workflow"
,
"order_num"
]
def
is_submitter_complete
(
self
):
return
self
.
submitter_completed_at
is
not
None
def
is_assessment_complete
(
self
):
return
self
.
assessment_completed_at
is
not
None
def
api
(
self
):
"""
Returns an API associated with this workflow step. If no API is
associated with this workflow step, None is returned.
"""
from
openassessment.assessment
import
peer_api
,
self_api
api
=
None
if
self
.
name
==
AssessmentWorkflow
.
STATUS
.
self
:
api
=
self_api
elif
self
.
name
==
AssessmentWorkflow
.
STATUS
.
peer
:
api
=
peer_api
return
api
def
update
(
self
,
submission_uuid
,
assessment_requirements
):
"""
Updates the AssessmentWorkflowStep models with the requirements
specified from the Workflow API.
Intended for internal use by update_from_assessments(). See
update_from_assessments() documentation for more details.
"""
# Once a step is completed, it will not be revisited based on updated
# requirements.
step_changed
=
False
step_reqs
=
assessment_requirements
.
get
(
self
.
name
,
{})
# Has the user completed their obligations for this step?
if
(
not
self
.
is_submitter_complete
()
and
self
.
api
()
.
submitter_is_finished
(
submission_uuid
,
step_reqs
)):
self
.
submitter_completed_at
=
now
()
step_changed
=
True
# Has the step received a score?
if
(
not
self
.
is_assessment_complete
()
and
self
.
api
()
.
assessment_is_finished
(
submission_uuid
,
step_reqs
)):
self
.
assessment_completed_at
=
now
()
step_changed
=
True
if
step_changed
:
self
.
save
()
# Just here to record thoughts for later:
#
...
...
apps/openassessment/workflow/test/data/assessments.json
0 → 100644
View file @
2702e77b
{
"peer"
:
{
"steps"
:
[
"peer"
],
"requirements"
:
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
}
},
"both"
:
{
"steps"
:
[
"peer"
,
"self"
],
"requirements"
:
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
"self"
:
{}
}
},
"self"
:
{
"steps"
:
[
"self"
],
"requirements"
:
{
"self"
:
{}
}
}
}
\ No newline at end of file
apps/openassessment/workflow/test/test_api.py
View file @
2702e77b
from
django.db
import
DatabaseError
import
ddt
from
mock
import
patch
from
nose.tools
import
raises
from
openassessment.assessment.models
import
PeerWorkflow
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.assessment
import
peer_api
from
openassessment.workflow.models
import
AssessmentWorkflow
from
submissions.models
import
Submission
...
...
@@ -18,18 +18,14 @@ ITEM_1 = {
"item_type"
:
"openassessment"
,
}
REQUIREMENTS
=
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
,
}
}
@ddt.ddt
class
TestAssessmentWorkflowApi
(
CacheResetTest
):
def
test_create_workflow
(
self
):
@ddt.file_data
(
'data/assessments.json'
)
def
test_create_workflow
(
self
,
data
):
first_step
=
data
[
"steps"
][
0
]
if
data
[
"steps"
]
else
"peer"
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
]
,
data
[
"steps"
]
)
workflow_keys
=
set
(
workflow
.
keys
())
self
.
assertEqual
(
...
...
@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest):
}
)
self
.
assertEqual
(
workflow
[
"submission_uuid"
],
submission
[
"uuid"
])
self
.
assertEqual
(
workflow
[
"status"
],
"peer"
)
self
.
assertEqual
(
workflow
[
"status"
],
first_step
)
workflow_from_get
=
workflow_api
.
get_workflow_for_submission
(
submission
[
"uuid"
],
REQUIREMENTS
submission
[
"uuid"
],
data
[
"requirements"
]
)
del
workflow_from_get
[
'status_details'
]
self
.
assertEqual
(
workflow
,
workflow_from_get
)
def
test_need_valid_submission_uuid
(
self
):
@ddt.file_data
(
'data/assessments.json'
)
def
test_need_valid_submission_uuid
(
self
,
data
):
# submission doesn't exist
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowRequestError
):
workflow
=
workflow_api
.
create_workflow
(
"xxxxxxxxxxx"
)
workflow
=
workflow_api
.
create_workflow
(
"xxxxxxxxxxx"
,
data
[
"steps"
]
)
# submission_uuid is the wrong type
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowRequestError
):
workflow
=
workflow_api
.
create_workflow
(
123
)
workflow
=
workflow_api
.
create_workflow
(
123
,
data
[
"steps"
]
)
@patch.object
(
Submission
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_submissions_errors_wrapped
(
self
,
mock_get
):
def
test_unexpected_submissions_errors_wrapped
(
self
,
data
,
mock_get
):
mock_get
.
side_effect
=
Exception
(
"Kaboom!"
)
workflow_api
.
create_workflow
(
"zzzzzzzzzzzzzzz"
)
workflow_api
.
create_workflow
(
"zzzzzzzzzzzzzzz"
,
data
[
"steps"
]
)
@patch.object
(
AssessmentWorkflow
.
objects
,
'create'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_workflow_errors_wrapped
(
self
,
data
,
mock_create
):
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
data
[
"steps"
])
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_workflow_errors_wrapped
(
self
,
mock_create
):
def
test_unexpected_
peer_
workflow_errors_wrapped
(
self
,
mock_create
):
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
,
"self"
])
@patch.object
(
AssessmentWorkflow
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_exception_wrapped
(
self
,
data
,
mock_create
):
mock_create
.
side_effect
=
Exception
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
workflow_api
.
update_from_assessments
(
submission
[
"uuid"
],
data
[
"steps"
])
def
test_get_assessment_workflow_expected_errors
(
self
):
@ddt.file_data
(
'data/assessments.json'
)
def
test_get_assessment_workflow_expected_errors
(
self
,
data
):
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowNotFoundError
):
workflow_api
.
get_workflow_for_submission
(
"0000000000000"
,
REQUIREMENTS
)
workflow_api
.
get_workflow_for_submission
(
"0000000000000"
,
data
[
"requirements"
]
)
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowRequestError
):
workflow_api
.
get_workflow_for_submission
(
123
,
REQUIREMENTS
)
workflow_api
.
get_workflow_for_submission
(
123
,
data
[
"requirements"
]
)
@patch.object
(
Submission
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_workflow_get_errors_wrapped
(
self
,
mock_get
):
def
test_unexpected_workflow_get_errors_wrapped
(
self
,
data
,
mock_get
):
mock_get
.
side_effect
=
Exception
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"We talk TV!"
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
get_workflow_for_submission
(
workflow
[
"uuid"
],
REQUIREMENTS
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
]
,
data
[
"steps"
]
)
workflow_api
.
get_workflow_for_submission
(
workflow
[
"uuid"
],
{}
)
def
test_get_status_counts
(
self
):
# Initially, the counts should all be zero
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
[
{
"status"
:
"peer"
,
"count"
:
0
},
{
"status"
:
"self"
,
"count"
:
0
},
...
...
@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self
.
_create_workflow_with_status
(
"user 10"
,
"test/1/1"
,
"peer-problem"
,
"done"
)
# Now the counts should be updated
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
[
{
"status"
:
"peer"
,
"count"
:
1
},
{
"status"
:
"self"
,
"count"
:
2
},
...
...
@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest):
# Create a workflow in a different course, same user and item
# Counts should be the same
self
.
_create_workflow_with_status
(
"user 1"
,
"other_course"
,
"peer-problem"
,
"peer"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
updated_counts
)
# Create a workflow in the same course, different item
# Counts should be the same
self
.
_create_workflow_with_status
(
"user 1"
,
"test/1/1"
,
"other problem"
,
"peer"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
updated_counts
)
def
_create_workflow_with_status
(
self
,
student_id
,
course_id
,
item_id
,
status
,
answer
=
"answer"
):
...
...
@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type"
:
"openassessment"
,
},
answer
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
[
"peer"
,
"self"
]
)
workflow_model
=
AssessmentWorkflow
.
objects
.
get
(
uuid
=
workflow
[
'uuid'
])
workflow_model
.
status
=
status
workflow_model
.
save
()
apps/openassessment/xblock/grade_mixin.py
View file @
2702e77b
...
...
@@ -71,13 +71,13 @@ class GradeMixin(object):
tuple of context (dict), template_path (string)
"""
# Peer specific stuff...
assessment_steps
=
[
asmnt
[
'name'
]
for
asmnt
in
self
.
rubric_assessments
]
assessment_steps
=
self
.
assessment_steps
submission_uuid
=
workflow
[
'submission_uuid'
]
if
"peer-assessment"
in
assessment_steps
:
feedback
=
peer_api
.
get_assessment_feedback
(
submission_uuid
)
peer_assessments
=
peer_api
.
get_assessments
(
submission_uuid
)
has_submitted_feedback
=
peer_api
.
get_assessment_feedback
(
submission_uuid
)
is
not
None
has_submitted_feedback
=
feedback
is
not
None
else
:
feedback
=
None
peer_assessments
=
[]
...
...
@@ -121,10 +121,6 @@ class GradeMixin(object):
criterion
[
"median_score"
]
=
median_scores
[
criterion
[
"name"
]]
criterion
[
"total_value"
]
=
max_scores
[
criterion
[
"name"
]]
from
pprint
import
pprint
pprint
(
self_assessment
)
return
(
'openassessmentblock/grade/oa_grade_complete.html'
,
context
)
def
render_grade_incomplete
(
self
,
workflow
):
...
...
@@ -145,9 +141,9 @@ class GradeMixin(object):
incomplete_steps
=
[]
if
_is_incomplete
(
"peer"
):
incomplete_steps
.
append
(
"Peer Assessment"
)
incomplete_steps
.
append
(
_
(
"Peer Assessment"
)
)
if
_is_incomplete
(
"self"
):
incomplete_steps
.
append
(
"Self Assessment"
)
incomplete_steps
.
append
(
_
(
"Self Assessment"
)
)
return
(
'openassessmentblock/grade/oa_grade_incomplete.html'
,
...
...
apps/openassessment/xblock/openassessmentblock.py
View file @
2702e77b
...
...
@@ -2,7 +2,6 @@
import
datetime
as
dt
import
logging
import
dateutil
import
pkg_resources
import
pytz
...
...
@@ -239,7 +238,9 @@ class OpenAssessmentBlock(
# Include release/due dates for each step in the problem
context
[
'step_dates'
]
=
list
()
for
step
in
[
'submission'
,
'peer-assessment'
,
'self-assessment'
]:
steps
=
[
'submission'
]
+
self
.
assessment_steps
for
step
in
steps
:
# Get the dates as a student would see them
__
,
__
,
start_date
,
due_date
=
self
.
is_closed
(
step
=
step
,
course_staff
=
False
)
...
...
@@ -432,16 +433,14 @@ class OpenAssessmentBlock(
start
,
due
,
date_ranges
=
resolve_dates
(
self
.
start
,
self
.
due
,
[
submission_range
]
+
assessment_ranges
)
step_ranges
=
{
"submission"
:
date_ranges
[
0
]}
for
step_num
,
asmnt
in
enumerate
(
self
.
rubric_assessments
,
start
=
1
):
step_ranges
[
asmnt
[
"name"
]]
=
date_ranges
[
step_num
]
# Based on the step, choose the date range to consider
# We hard-code this to the submission -> peer -> self workflow for now;
# later, we can revisit to make this more flexible.
open_range
=
(
start
,
due
)
if
step
in
[
"submission"
,
"peer-assessment"
,
"self-assessment"
]:
open_range
=
step_ranges
[
step
]
open_range
=
(
start
,
due
)
assessment_steps
=
self
.
assessment_steps
if
step
==
'submission'
:
open_range
=
date_ranges
[
0
]
elif
step
in
assessment_steps
:
step_index
=
assessment_steps
.
index
(
step
)
open_range
=
date_ranges
[
1
+
step_index
]
# Course staff always have access to the problem
if
course_staff
is
None
:
...
...
apps/openassessment/xblock/static/js/fixtures/templates.json
View file @
2702e77b
...
...
@@ -134,7 +134,59 @@
"score"
:
""
,
"feedback_text"
:
""
,
"student_submission"
:
""
,
"peer_assessments"
:
[],
"peer_assessments"
:
[
{
"submission_uuid"
:
"52d2158a-c568-11e3-b9b9-28cfe9182465"
,
"points_earned"
:
5
,
"points_possible"
:
6
,
"rubric"
:
{
"criteria"
:
[
{
"name"
:
"Criterion 1"
,
"prompt"
:
"Prompt 1"
,
"order_num"
:
0
,
"feedback"
:
"optional"
,
"options"
:
[
{
"order_num"
:
2
,
"points"
:
2
,
"name"
:
"Good"
}
],
"points_possible"
:
2
},
{
"name"
:
"Criterion 2"
,
"prompt"
:
"Prompt 2"
,
"order_num"
:
1
,
"options"
:
[
{
"order_num"
:
1
,
"points"
:
1
,
"name"
:
"Fair"
}
],
"points_possible"
:
2
},
{
"name"
:
"Criterion 3"
,
"prompt"
:
"Prompt 3"
,
"order_num"
:
2
,
"feedback"
:
"optional"
,
"options"
:
[
{
"order_num"
:
2
,
"points"
:
2
,
"name"
:
"Good"
}
],
"points_possible"
:
2
}
]
}
}
],
"self_assessment"
:
{},
"rubric_criteria"
:
[],
"has_submitted_feedback"
:
false
...
...
@@ -146,4 +198,4 @@
"context"
:
{},
"output"
:
"oa_edit.html"
}
]
]
\ No newline at end of file
apps/openassessment/xblock/static/js/openassessment.min.js
View file @
2702e77b
This diff is collapsed.
Click to expand it.
apps/openassessment/xblock/static/js/spec/oa_peer.js
View file @
2702e77b
...
...
@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() {
this
.
showLoadError
=
function
(
msg
)
{};
this
.
toggleActionError
=
function
(
msg
,
step
)
{};
this
.
setUpCollapseExpand
=
function
(
sel
)
{};
this
.
renderSelfAssessmentStep
=
function
()
{};
this
.
scrollToTop
=
function
()
{};
this
.
gradeView
=
{
load
:
function
()
{}
};
this
.
loadAssessmentModules
=
function
()
{
};
};
// Stubs
...
...
apps/openassessment/xblock/static/js/spec/oa_response.js
View file @
2702e77b
...
...
@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view
var
StubBaseView
=
function
()
{
this
.
loadAssessmentModules
=
function
()
{};
this
.
peerView
=
{
load
:
function
()
{}
};
this
.
gradeView
=
{
load
:
function
()
{}
};
this
.
showLoadError
=
function
(
msg
)
{};
...
...
@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() {
}).
promise
();
});
spyOn
(
view
,
'load'
);
spyOn
(
baseView
.
peerView
,
'load
'
);
spyOn
(
baseView
,
'loadAssessmentModules
'
);
view
.
response
(
'Test response'
);
view
.
submit
();
// Expect the current and next step to have been reloaded
expect
(
view
.
load
).
toHaveBeenCalled
();
expect
(
baseView
.
peerView
.
load
).
toHaveBeenCalled
();
expect
(
baseView
.
loadAssessmentModules
).
toHaveBeenCalled
();
});
it
(
"enables the unsaved work warning when the user changes the response text"
,
function
()
{
...
...
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
2702e77b
...
...
@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = {
},
/**
*
Asynchronously load each sub-view into the DOM.
*/
Asynchronously load each sub-view into the DOM.
*
*
/
load
:
function
()
{
this
.
responseView
.
load
();
this
.
peerView
.
load
();
this
.
renderSelfAssessmentStep
();
this
.
gradeView
.
load
();
this
.
loadAssessmentModules
();
// Set up expand/collapse for course staff debug, if available
courseStaffDebug
=
$
(
'.wrapper--staff-info'
);
...
...
@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = {
},
/**
Refresh the Assessment Modules. This should be called any time an action is
performed by the user.
**/
loadAssessmentModules
:
function
()
{
this
.
peerView
.
load
();
this
.
renderSelfAssessmentStep
();
this
.
gradeView
.
load
();
},
/**
Render the self-assessment step.
**/
renderSelfAssessmentStep
:
function
()
{
...
...
@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = {
this
.
server
.
selfAssess
(
optionsSelected
).
done
(
function
()
{
view
.
peerView
.
load
();
view
.
renderSelfAssessmentStep
();
view
.
gradeView
.
load
();
view
.
loadAssessmentModules
();
view
.
scrollToTop
();
}
).
fail
(
function
(
errMsg
)
{
...
...
apps/openassessment/xblock/static/js/src/oa_peer.js
View file @
2702e77b
...
...
@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = {
var
baseView
=
view
.
baseView
;
this
.
peerAssessRequest
(
function
()
{
view
.
load
();
baseView
.
renderSelfAssessmentStep
();
baseView
.
gradeView
.
load
();
baseView
.
loadAssessmentModules
();
baseView
.
scrollToTop
();
});
},
...
...
apps/openassessment/xblock/static/js/src/oa_response.js
View file @
2702e77b
...
...
@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/
moveToNextStep
:
function
()
{
this
.
load
();
this
.
baseView
.
peerView
.
load
();
this
.
baseView
.
gradeView
.
load
();
this
.
baseView
.
loadAssessmentModules
();
// Disable the "unsaved changes" warning if the user
// tries to navigate to another page.
...
...
apps/openassessment/xblock/static/xml/poverty_self_only_example.xml
View file @
2702e77b
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment
submission_due=
"2015-03-11T18:20"
>
<title>
Global Poverty
...
...
apps/openassessment/xblock/test/data/assessment_combo.json
View file @
2702e77b
...
...
@@ -10,7 +10,9 @@
{
"name"
:
"self-assessment"
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"peer_only"
:
{
"valid"
:
false
,
...
...
@@ -20,7 +22,9 @@
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"self_only"
:
{
"valid"
:
true
,
...
...
@@ -28,7 +32,9 @@
{
"name"
:
"self-assessment"
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"self_before_peer"
:
{
"valid"
:
false
,
...
...
@@ -41,7 +47,9 @@
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"peer_then_peer"
:
{
"valid"
:
false
,
...
...
@@ -56,6 +64,8 @@
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
}
}
apps/openassessment/xblock/test/data/invalid_assessments.json
View file @
2702e77b
{
"empty_dict"
:
{
"assessment"
:
{}
},
"must_be_graded_by_zero"
:
{
"assessment"
:
{
"name"
:
"self-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
0
}
"assessments"
:
[{}],
"current_assessments"
:
null
,
"is_released"
:
false
},
"unsupported_type"
:
{
"assessment"
:
{
"name"
:
"unsupported-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
},
{
"name"
:
"unsupported-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"no_type"
:
{
"assessment"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
"assessments"
:
[
{
"name"
:
"self-assessment"
},
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"unsupported_unicode_type"
:
{
"assessment"
:
{
"name"
:
"𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
"assessments"
:
[
{
"name"
:
"self-assessment"
},
{
"name"
:
"𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"no_must_grade"
:
{
"assessment"
:
{
"name"
:
"peer-assessment"
,
"must_be_graded_by"
:
3
}
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"no_must_be_graded_by"
:
{
"assessment"
:
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
}
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_grade_less_than_must_be_graded_by"
:
{
"assessment"
:
{
"name"
:
"peer-assessment"
,
"must_grade"
:
4
,
"must_be_graded_by"
:
5
}
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
4
,
"must_be_graded_by"
:
5
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_grade_zero"
:
{
"assessment"
:
{
"name"
:
"peer-assessment"
,
"must_grade"
:
0
,
"must_be_graded_by"
:
0
}
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
0
,
"must_be_graded_by"
:
0
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_be_graded_by_zero"
:
{
"assessment"
:
{
"name"
:
"peer-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
0
}
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
0
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"remove_peer_mid_flight"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
[
{
"name"
:
"self-assessment"
}
],
"is_released"
:
true
},
"swap_peer_and_self_mid_flight"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
[
{
"name"
:
"self-assessment"
},
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"is_released"
:
true
}
}
apps/openassessment/xblock/test/data/valid_assessments.json
View file @
2702e77b
{
"peer_then_self"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"self_only"
:
[
{
"name"
:
"self-assessment"
}
],
"must_be_graded_by_equals_must_grade"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
1
},
{
"name"
:
"self-assessment"
}
]
"peer_then_self"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"self_only"
:
{
"assessments"
:
[
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_be_graded_by_equals_must_grade"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
1
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
}
}
apps/openassessment/xblock/test/test_grade.py
View file @
2702e77b
...
...
@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase):
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
STEPS
=
[
'peer'
,
'self'
]
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade
(
self
,
xblock
):
# Submit, assess, and render the grade view
...
...
@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer
[
'student_id'
]
=
scorer_name
scorer_sub
=
sub_api
.
create_submission
(
scorer
,
{
'text'
:
submission_text
})
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
])
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
]
,
self
.
STEPS
)
submission
=
peer_api
.
get_submission_to_assess
(
scorer_sub
[
'uuid'
],
len
(
peers
))
...
...
apps/openassessment/xblock/test/test_studio.py
View file @
2702e77b
...
...
@@ -92,19 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase):
# Test that we enforce that there are exactly two assessments,
# peer ==> self
# If and when we remove this restriction, this test can be deleted.
@data
(
'data/invalid_assessment_combo_order.xml'
,
'data/invalid_assessment_combo_peer_only.xml'
)
@scenario
(
'data/basic_scenario.xml'
)
def
test_update_xml_invalid_assessment_combo
(
self
,
xblock
):
invalid_workflows
=
[
'invalid_assessment_combo_order'
,
'invalid_assessment_combo_peer_only'
]
for
invalid_workflow
in
invalid_workflows
:
request
=
json
.
dumps
(
{
'xml'
:
self
.
load_fixture_str
(
'data/{}.xml'
.
format
(
invalid_workflow
))}
)
resp
=
self
.
request
(
xblock
,
'update_xml'
,
request
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
'success'
])
self
.
assertIn
(
"supported assessment flows are"
,
resp
[
'msg'
]
.
lower
())
def
test_update_xml_invalid_assessment_combo
(
self
,
xblock
,
invalid_workflow
):
request
=
json
.
dumps
(
{
'xml'
:
self
.
load_fixture_str
(
invalid_workflow
)}
)
resp
=
self
.
request
(
xblock
,
'update_xml'
,
request
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
'success'
])
self
.
assertIn
(
"for this assignment"
,
resp
[
'msg'
]
.
lower
())
@data
((
'data/invalid_rubric.xml'
,
'rubric'
),
(
'data/invalid_assessment.xml'
,
'assessment'
))
@scenario
(
'data/basic_scenario.xml'
)
...
...
apps/openassessment/xblock/test/test_validation.py
View file @
2702e77b
...
...
@@ -14,18 +14,18 @@ class AssessmentValidationTest(TestCase):
@ddt.file_data
(
'data/valid_assessments.json'
)
def
test_valid_assessment
(
self
,
data
):
success
,
msg
=
validate_assessments
(
data
)
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
]
)
self
.
assertTrue
(
success
)
self
.
assertEqual
(
msg
,
u''
)
@ddt.file_data
(
'data/invalid_assessments.json'
)
def
test_invalid_assessment
(
self
,
data
):
success
,
msg
=
validate_assessments
(
[
data
[
'assessment'
]
])
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
])
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
def
test_no_assessments
(
self
):
success
,
msg
=
validate_assessments
([])
success
,
msg
=
validate_assessments
([]
,
[],
False
)
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
...
...
@@ -33,7 +33,7 @@ class AssessmentValidationTest(TestCase):
# (peer -> self), and (self)
@ddt.file_data
(
'data/assessment_combo.json'
)
def
test_enforce_assessment_combo_restrictions
(
self
,
data
):
success
,
msg
=
validate_assessments
(
data
[
'assessments'
])
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
])
self
.
assertEqual
(
success
,
data
[
'valid'
],
msg
=
msg
)
if
not
success
:
...
...
apps/openassessment/xblock/validation.py
View file @
2702e77b
...
...
@@ -43,7 +43,7 @@ def _duplicates(items):
return
set
(
x
for
x
in
items
if
counts
[
x
]
>
1
)
def
validate_assessments
(
assessments
):
def
validate_assessments
(
assessments
,
current_assessments
,
is_released
):
"""
Check that the assessment dict is semantically valid.
...
...
@@ -51,8 +51,15 @@ def validate_assessments(assessments):
* peer, then self
* self only
If a question has been released, the type and number of assessment steps
cannot be changed.
Args:
assessments (list of dict): list of serialized assessment models.
current_assessments (list of dict): list of the current serialized
assessment models. Used to determine if the assessment configuration
has changed since the question had been released.
is_released (boolean) : True if the question has been released.
Returns:
tuple (is_valid, msg) where
...
...
@@ -69,16 +76,16 @@ def validate_assessments(assessments):
assessments
[
1
]
.
get
(
'name'
)
==
'self-assessment'
)
if
len
(
assessments
)
==
0
:
return
(
False
,
_
(
"This problem must include at least one assessment."
))
# Right now, there are two allowed scenarios: (peer -> self) and (self)
if
not
(
_self_only
(
assessments
)
or
_peer_then_self
(
assessments
)):
return
(
False
,
_
(
"
The only supported assessment flows are (peer, self) or (self)
."
)
_
(
"
For this assignment, you can set either a peer assessment followed by a self assessment or a self assessment only
."
)
)
if
len
(
assessments
)
==
0
:
return
(
False
,
_
(
"This problem must include at least one assessment."
))
for
assessment_dict
in
assessments
:
# Supported assessment
if
not
assessment_dict
.
get
(
'name'
)
in
[
'peer-assessment'
,
'self-assessment'
]:
...
...
@@ -98,6 +105,15 @@ def validate_assessments(assessments):
if
must_grade
<
must_be_graded_by
:
return
(
False
,
_
(
'The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'
))
if
is_released
:
if
len
(
assessments
)
!=
len
(
current_assessments
):
return
(
False
,
_
(
"The number of assessments cannot be changed after the problem has been released."
))
names
=
[
assessment
.
get
(
'name'
)
for
assessment
in
assessments
]
current_names
=
[
assessment
.
get
(
'name'
)
for
assessment
in
current_assessments
]
if
names
!=
current_names
:
return
(
False
,
_
(
"The assessment type cannot be changed after the problem has been released."
))
return
(
True
,
u''
)
...
...
@@ -197,7 +213,12 @@ def validator(oa_block, strict_post_release=True):
"""
def
_inner
(
rubric_dict
,
submission_dict
,
assessments
):
success
,
msg
=
validate_assessments
(
assessments
)
current_assessments
=
oa_block
.
rubric_assessments
success
,
msg
=
validate_assessments
(
assessments
,
current_assessments
,
strict_post_release
and
oa_block
.
is_released
()
)
if
not
success
:
return
(
False
,
msg
)
...
...
apps/openassessment/xblock/workflow_mixin.py
View file @
2702e77b
...
...
@@ -9,6 +9,10 @@ class WorkflowMixin(object):
return
self
.
get_workflow_info
()
def
create_workflow
(
self
,
submission_uuid
):
steps
=
self
.
_create_step_list
()
workflow_api
.
create_workflow
(
submission_uuid
,
steps
)
def
_create_step_list
(
self
):
def
_convert_rubric_assessment_name
(
ra_name
):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name
,
suffix
=
ra_name
.
split
(
"-"
)
...
...
@@ -18,11 +22,10 @@ class WorkflowMixin(object):
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
steps
=
[
return
[
_convert_rubric_assessment_name
(
ra
[
"name"
])
for
ra
in
self
.
rubric_assessments
]
workflow_api
.
create_workflow
(
submission_uuid
,
steps
)
def
workflow_requirements
(
self
):
"""
...
...
@@ -109,6 +112,7 @@ class WorkflowMixin(object):
status_counts
=
workflow_api
.
get_status_counts
(
course_id
=
student_item
[
'course_id'
],
item_id
=
student_item
[
'item_id'
],
steps
=
self
.
_create_step_list
(),
)
num_submissions
=
sum
(
item
[
'count'
]
for
item
in
status_counts
)
return
status_counts
,
num_submissions
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment