Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
aaf9bdb2
Commit
aaf9bdb2
authored
May 06, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #302 from edx/ormsbee/workflow_steps
Make peer assessment an optional step.
parents
ecb8e976
2702e77b
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
39 changed files
with
976 additions
and
201 deletions
+976
-201
apps/openassessment/assessment/peer_api.py
+6
-2
apps/openassessment/assessment/self_api.py
+105
-5
apps/openassessment/assessment/test/test_peer.py
+4
-3
apps/openassessment/assessment/test/test_self.py
+4
-4
apps/openassessment/management/commands/create_oa_submissions.py
+2
-1
apps/openassessment/management/tests/test_upload_oa_data.py
+1
-1
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+5
-1
apps/openassessment/tests/test_data.py
+1
-1
apps/openassessment/workflow/admin.py
+7
-1
apps/openassessment/workflow/api.py
+38
-10
apps/openassessment/workflow/migrations/0002_auto__add_field_assessmentworkflow_course_id__add_field_assessmentwork.py
+0
-1
apps/openassessment/workflow/migrations/0003_auto__add_assessmentworkflowstep.py
+53
-0
apps/openassessment/workflow/models.py
+147
-41
apps/openassessment/workflow/test/data/assessments.json
+28
-0
apps/openassessment/workflow/test/test_api.py
+47
-31
apps/openassessment/xblock/grade_mixin.py
+39
-13
apps/openassessment/xblock/openassessmentblock.py
+19
-11
apps/openassessment/xblock/peer_assessment_mixin.py
+5
-0
apps/openassessment/xblock/self_assessment_mixin.py
+5
-0
apps/openassessment/xblock/static/js/fixtures/templates.json
+53
-1
apps/openassessment/xblock/static/js/openassessment.min.js
+0
-0
apps/openassessment/xblock/static/js/spec/oa_peer.js
+1
-2
apps/openassessment/xblock/static/js/spec/oa_response.js
+3
-2
apps/openassessment/xblock/static/js/src/oa_base.js
+14
-8
apps/openassessment/xblock/static/js/src/oa_peer.js
+1
-2
apps/openassessment/xblock/static/js/src/oa_response.js
+1
-2
apps/openassessment/xblock/static/xml/poverty_self_only_example.xml
+109
-0
apps/openassessment/xblock/submission_mixin.py
+1
-1
apps/openassessment/xblock/test/data/assessment_combo.json
+16
-6
apps/openassessment/xblock/test/data/invalid_assessment_combo_order.xml
+22
-0
apps/openassessment/xblock/test/data/invalid_assessment_combo_peer_only.xml
+21
-0
apps/openassessment/xblock/test/data/invalid_assessments.json
+113
-16
apps/openassessment/xblock/test/data/optional_assessments_self_only.xml
+1
-1
apps/openassessment/xblock/test/data/valid_assessments.json
+25
-9
apps/openassessment/xblock/test/test_grade.py
+3
-1
apps/openassessment/xblock/test/test_studio.py
+6
-3
apps/openassessment/xblock/test/test_validation.py
+7
-8
apps/openassessment/xblock/validation.py
+43
-13
apps/openassessment/xblock/workflow_mixin.py
+20
-0
No files found.
apps/openassessment/assessment/peer_api.py
View file @
aaf9bdb2
...
...
@@ -72,7 +72,7 @@ class PeerAssessmentInternalError(PeerAssessmentError):
pass
def
is_complete
(
submission_uuid
,
requirements
):
def
submitter_is_finished
(
submission_uuid
,
requirements
):
try
:
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
if
workflow
.
completed_at
is
not
None
:
...
...
@@ -99,7 +99,7 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if
not
is_complete
(
submission_uuid
,
requirements
):
if
not
submitter_is_finished
(
submission_uuid
,
requirements
):
return
None
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
...
...
@@ -135,6 +135,10 @@ def get_score(submission_uuid, requirements):
}
def
assessment_is_finished
(
submission_uuid
,
requirements
):
return
bool
(
get_score
(
submission_uuid
,
requirements
))
def
create_assessment
(
scorer_submission_uuid
,
scorer_id
,
...
...
apps/openassessment/assessment/self_api.py
View file @
aaf9bdb2
...
...
@@ -3,6 +3,7 @@ Public interface for self-assessment.
"""
import
logging
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
from
dogapi
import
dog_stats_api
from
submissions.api
import
get_submission_and_student
,
SubmissionNotFoundError
...
...
@@ -21,13 +22,30 @@ SELF_TYPE = "SE"
logger
=
logging
.
getLogger
(
"openassessment.assessment.self_api"
)
class
SelfAssessmentRequestError
(
Exception
):
class
SelfAssessmentError
(
Exception
):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class
SelfAssessmentRequestError
(
SelfAssessmentError
):
"""
There was a problem with the request for a self-assessment.
"""
pass
class
SelfAssessmentInternalError
(
SelfAssessmentError
):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
def
create_assessment
(
submission_uuid
,
user_id
,
options_selected
,
rubric_dict
,
scored_at
=
None
):
"""
Create a self-assessment for a submission.
...
...
@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
submission
)
# Return the serialized assessment
return
assessment_dict
...
...
@@ -140,21 +157,104 @@ def get_assessment(submission_uuid):
return
serialized_assessment
def
is_complete
(
submission_uuid
):
def
submitter_is_finished
(
submission_uuid
,
requirements
):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
bool
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
exists
()
def
assessment_is_finished
(
submission_uuid
,
requirements
):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return
submitter_is_finished
(
submission_uuid
,
requirements
)
def
get_score
(
submission_uuid
,
requirements
):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment
=
get_assessment
(
submission_uuid
)
if
not
assessment
:
return
None
return
{
"points_earned"
:
assessment
[
"points_earned"
],
"points_possible"
:
assessment
[
"points_possible"
]
}
def
get_assessment_scores_by_criteria
(
submission_uuid
):
"""Get the median score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
SelfAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try
:
assessments
=
list
(
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
order_by
(
'-scored_at'
)[:
1
]
)
scores
=
Assessment
.
scores_by_criterion
(
assessments
)
return
Assessment
.
get_median_score_dict
(
scores
)
except
DatabaseError
:
error_message
=
_
(
u"Error getting self assessment scores for {}"
)
.
format
(
submission_uuid
)
logger
.
exception
(
error_message
)
raise
SelfAssessmentInternalError
(
error_message
)
def
_log_assessment
(
assessment
,
submission
):
"""
Log the creation of a self-assessment.
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
aaf9bdb2
...
...
@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY
=
datetime
.
datetime
(
2007
,
9
,
15
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
THURSDAY
=
datetime
.
datetime
(
2007
,
9
,
16
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
STEPS
=
[
'peer'
,
'self'
]
@ddt
class
TestPeerApi
(
CacheResetTest
):
...
...
@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest):
'must_grade'
:
REQUIRED_GRADED
,
'must_be_graded_by'
:
REQUIRED_GRADED_BY
}
self
.
assertTrue
(
peer_api
.
is_complete
(
tim_sub
[
"uuid"
],
requirements
))
self
.
assertTrue
(
peer_api
.
submitter_is_finished
(
tim_sub
[
"uuid"
],
requirements
))
def
test_completeness
(
self
):
"""
...
...
@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest):
'must_grade'
:
REQUIRED_GRADED
,
'must_be_graded_by'
:
REQUIRED_GRADED_BY
}
self
.
assertTrue
(
peer_api
.
is_complete
(
buffy_sub
[
"uuid"
],
requirements
))
self
.
assertTrue
(
peer_api
.
submitter_is_finished
(
buffy_sub
[
"uuid"
],
requirements
))
def
test_find_active_assessments
(
self
):
buffy_answer
,
_
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
...
...
@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest):
new_student_item
[
"student_id"
]
=
student
submission
=
sub_api
.
create_submission
(
new_student_item
,
answer
,
date
)
peer_api
.
create_peer_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
]
,
STEPS
)
return
submission
,
new_student_item
apps/openassessment/assessment/test/test_self.py
View file @
aaf9bdb2
...
...
@@ -9,7 +9,7 @@ import pytz
from
openassessment.test_utils
import
CacheResetTest
from
submissions.api
import
create_submission
from
openassessment.assessment.self_api
import
(
create_assessment
,
is_complete
,
SelfAssessmentRequestError
,
get_assessment
create_assessment
,
submitter_is_finished
,
SelfAssessmentRequestError
,
get_assessment
)
...
...
@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest):
# Now there should be a submission, but no self-assessment
assessment
=
get_assessment
(
submission
[
"uuid"
])
self
.
assertIs
(
assessment
,
None
)
self
.
assertFalse
(
is_complete
(
submission
[
'uuid'
]
))
self
.
assertFalse
(
submitter_is_finished
(
submission
[
'uuid'
],
{}
))
# Create a self-assessment for the submission
assessment
=
create_assessment
(
...
...
@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest):
)
# Self-assessment should be complete
self
.
assertTrue
(
is_complete
(
submission
[
'uuid'
]
))
self
.
assertTrue
(
submitter_is_finished
(
submission
[
'uuid'
],
{}
))
# Retrieve the self-assessment
retrieved
=
get_assessment
(
submission
[
"uuid"
])
...
...
@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest):
def
test_is_complete_no_submission
(
self
):
# This submission uuid does not exist
self
.
assertFalse
(
is_complete
(
'abc1234'
))
self
.
assertFalse
(
submitter_is_finished
(
'abc1234'
,
{}
))
apps/openassessment/management/commands/create_oa_submissions.py
View file @
aaf9bdb2
...
...
@@ -9,6 +9,7 @@ from submissions import api as sub_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment
import
peer_api
,
self_api
STEPS
=
[
'peer'
,
'self'
]
class
Command
(
BaseCommand
):
...
...
@@ -131,7 +132,7 @@ class Command(BaseCommand):
"""
answer
=
{
'text'
:
" "
.
join
(
loremipsum
.
get_paragraphs
(
5
))}
submission
=
sub_api
.
create_submission
(
student_item
,
answer
)
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
STEPS
)
workflow_api
.
update_from_assessments
(
submission
[
'uuid'
],
{
'peer'
:
{
'must_grade'
:
1
,
'must_be_graded_by'
:
1
}}
)
...
...
apps/openassessment/management/tests/test_upload_oa_data.py
View file @
aaf9bdb2
...
...
@@ -43,7 +43,7 @@ class UploadDataTest(TestCase):
}
submission_text
=
"test submission {}"
.
format
(
index
)
submission
=
sub_api
.
create_submission
(
student_item
,
submission_text
)
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
[
'peer'
,
'self'
]
)
# Create and upload the archive of CSV files
# This should generate the files even though
...
...
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
aaf9bdb2
...
...
@@ -30,7 +30,7 @@
</article>
<article
class=
"submission__peer-evaluations step__content__section"
>
<h3
class=
"submission__peer-evaluations__title"
>
{% trans "
Peer
Assessments of Your Response" %}
</h3>
<h3
class=
"submission__peer-evaluations__title"
>
{% trans "Assessments of Your Response" %}
</h3>
<ol
class=
"list submission__peer-evaluations__questions"
>
{% for criterion in rubric_criteria %}
...
...
@@ -143,6 +143,7 @@
</li>
{% endwith %}
{% endfor %}
{% if peer_assessments %}
<li
class=
"question question--feedback ui-toggle-visibility"
>
<h4
class=
"question__title ui-toggle-visibility__control"
>
<i
class=
"ico icon-caret-right"
></i>
...
...
@@ -173,9 +174,11 @@
{% endfor %}
</ul>
</li>
{% endif %}
</ol>
</article>
{% if peer_assessments %}
<form
id=
"submission__feedback"
class=
"submission__feedback ui-toggle-visibility step__content__section is--collapsed"
method=
"post"
>
<h3
class=
"submission__feedback__title ui-toggle-visibility__control"
>
<i
class=
"ico icon-caret-right"
></i>
...
...
@@ -272,6 +275,7 @@
</div>
</div>
</form>
{% endif %}
</div>
</div>
</div>
...
...
apps/openassessment/tests/test_data.py
View file @
aaf9bdb2
...
...
@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest):
}
submission_text
=
"test submission {}"
.
format
(
index
)
submission
=
sub_api
.
create_submission
(
student_item
,
submission_text
)
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
[
'peer'
,
'self'
]
)
# Generate a CSV file for the submissions
output_streams
=
self
.
_output_streams
([
'submission'
])
...
...
apps/openassessment/workflow/admin.py
View file @
aaf9bdb2
from
django.contrib
import
admin
from
.models
import
AssessmentWorkflow
from
.models
import
AssessmentWorkflow
,
AssessmentWorkflowStep
class
AssessmentWorkflowStepInline
(
admin
.
StackedInline
):
model
=
AssessmentWorkflowStep
extra
=
0
class
AssessmentWorkflowAdmin
(
admin
.
ModelAdmin
):
"""Admin for the user's overall workflow through open assessment.
...
...
@@ -15,5 +20,6 @@ class AssessmentWorkflowAdmin(admin.ModelAdmin):
)
list_filter
=
(
'status'
,)
search_fields
=
(
'uuid'
,
'submission_uuid'
,
'course_id'
,
'item_id'
)
inlines
=
(
AssessmentWorkflowStepInline
,)
admin
.
site
.
register
(
AssessmentWorkflow
,
AssessmentWorkflowAdmin
)
apps/openassessment/workflow/api.py
View file @
aaf9bdb2
...
...
@@ -9,7 +9,7 @@ from django.db import DatabaseError
from
openassessment.assessment
import
peer_api
from
submissions
import
api
as
sub_api
from
.models
import
AssessmentWorkflow
from
.models
import
AssessmentWorkflow
,
AssessmentWorkflowStep
from
.serializers
import
AssessmentWorkflowSerializer
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -58,7 +58,7 @@ class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
pass
def
create_workflow
(
submission_uuid
):
def
create_workflow
(
submission_uuid
,
steps
):
"""Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against.
...
...
@@ -66,6 +66,8 @@ def create_workflow(submission_uuid):
Args:
submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating.
steps (list): List of steps that are part of the workflow, in the order
that the user must complete them. Example: `["peer", "self"]`
Returns:
dict: Assessment workflow information with the following
...
...
@@ -85,7 +87,7 @@ def create_workflow(submission_uuid):
AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
not exist or is of an invalid type.
AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation
submissions app not being available or a database configu
r
ation
problem.
"""
...
...
@@ -98,7 +100,7 @@ def create_workflow(submission_uuid):
try
:
submission_dict
=
sub_api
.
get_submission_and_student
(
submission_uuid
)
except
sub_api
.
SubmissionNotFoundError
as
err
:
except
sub_api
.
SubmissionNotFoundError
:
err_msg
=
sub_err_msg
(
"submission not found"
)
logger
.
error
(
err_msg
)
raise
AssessmentWorkflowRequestError
(
err_msg
)
...
...
@@ -107,27 +109,51 @@ def create_workflow(submission_uuid):
logger
.
error
(
err_msg
)
raise
AssessmentWorkflowRequestError
(
err_msg
)
except
sub_api
.
SubmissionInternalError
as
err
:
err_msg
=
sub_err_msg
(
err
)
logger
.
error
(
err
)
raise
AssessmentWorkflowInternalError
(
u"retrieving submission {} failed with unknown error: {}"
.
format
(
submission_uuid
,
err
)
)
# Raise an error if they specify a step we don't recognize...
invalid_steps
=
set
(
steps
)
-
set
(
AssessmentWorkflow
.
STEPS
)
if
invalid_steps
:
raise
AssessmentWorkflowRequestError
(
u"The following steps were not recognized: {}; Must be one of {}"
.
format
(
invalid_steps
,
AssessmentWorkflow
.
STEPS
)
)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
status
=
AssessmentWorkflow
.
STATUS
.
peer
if
steps
[
0
]
==
"peer"
:
try
:
peer_api
.
create_peer_workflow
(
submission_uuid
)
except
peer_api
.
PeerAssessmentError
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
logger
.
exception
(
err_msg
)
raise
AssessmentWorkflowInternalError
(
err_msg
)
elif
steps
[
0
]
==
"self"
:
status
=
AssessmentWorkflow
.
STATUS
.
self
try
:
workflow
=
AssessmentWorkflow
.
objects
.
create
(
submission_uuid
=
submission_uuid
,
status
=
AssessmentWorkflow
.
STATUS
.
peer
,
status
=
status
,
course_id
=
submission_dict
[
'student_item'
][
'course_id'
],
item_id
=
submission_dict
[
'student_item'
][
'item_id'
],
)
workflow_steps
=
[
AssessmentWorkflowStep
(
workflow
=
workflow
,
name
=
step
,
order_num
=
i
)
for
i
,
step
in
enumerate
(
steps
)
]
workflow
.
steps
.
add
(
*
workflow_steps
)
except
(
DatabaseError
,
peer_api
.
PeerAssessmentError
,
sub_api
.
SubmissionError
)
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
...
...
@@ -298,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements):
return
_serialized_with_details
(
workflow
,
assessment_requirements
)
def
get_status_counts
(
course_id
,
item_id
):
def
get_status_counts
(
course_id
,
item_id
,
steps
):
"""
Count how many workflows have each status, for a given item in a course.
Kwargs:
course_id (unicode): The ID of the course.
item_id (unicode): The ID of the item in the course.
steps (list): A list of assessment steps for this problem.
Returns:
list of dictionaries with keys "status" (str) and "count" (int)
Example usage:
>>> get_status_counts("ora2/1/1", "peer-assessment-problem")
>>> get_status_counts("ora2/1/1", "peer-assessment-problem"
, ["peer"]
)
[
{"status": "peer", "count": 5},
{"status": "self", "count": 10},
...
...
@@ -327,7 +354,8 @@ def get_status_counts(course_id, item_id):
course_id
=
course_id
,
item_id
=
item_id
,
)
.
count
()
}
for
status
in
AssessmentWorkflow
.
STATUS_VALUES
}
for
status
in
steps
+
AssessmentWorkflow
.
STATUSES
]
...
...
apps/openassessment/workflow/migrations/0002_auto__add_field_assessmentworkflow_course_id__add_field_assessmentwork.py
View file @
aaf9bdb2
...
...
@@ -40,7 +40,6 @@ class Migration(SchemaMigration):
'created'
:
(
'model_utils.fields.AutoCreatedField'
,
[],
{
'default'
:
'datetime.datetime.now'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'item_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'modified'
:
(
'model_utils.fields.AutoLastModifiedField'
,
[],
{
'default'
:
'datetime.datetime.now'
}),
'status'
:
(
'model_utils.fields.StatusField'
,
[],
{
'default'
:
"'peer'"
,
'max_length'
:
'100'
,
u'no_check_for_status'
:
'True'
}),
'status_changed'
:
(
'model_utils.fields.MonitorField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
u'monitor'
:
"u'status'"
}),
...
...
apps/openassessment/workflow/migrations/0003_auto__add_assessmentworkflowstep.py
0 → 100644
View file @
aaf9bdb2
# -*- coding: utf-8 -*-
import
datetime
from
south.db
import
db
from
south.v2
import
SchemaMigration
from
django.db
import
models
class
Migration
(
SchemaMigration
):
def
forwards
(
self
,
orm
):
# Adding model 'AssessmentWorkflowStep'
db
.
create_table
(
'workflow_assessmentworkflowstep'
,
(
(
'id'
,
self
.
gf
(
'django.db.models.fields.AutoField'
)(
primary_key
=
True
)),
(
'workflow'
,
self
.
gf
(
'django.db.models.fields.related.ForeignKey'
)(
related_name
=
'steps'
,
to
=
orm
[
'workflow.AssessmentWorkflow'
])),
(
'name'
,
self
.
gf
(
'django.db.models.fields.CharField'
)(
max_length
=
20
)),
(
'submitter_completed_at'
,
self
.
gf
(
'django.db.models.fields.DateTimeField'
)(
default
=
None
,
null
=
True
)),
(
'assessment_completed_at'
,
self
.
gf
(
'django.db.models.fields.DateTimeField'
)(
default
=
None
,
null
=
True
)),
(
'order_num'
,
self
.
gf
(
'django.db.models.fields.PositiveIntegerField'
)()),
))
db
.
send_create_signal
(
'workflow'
,
[
'AssessmentWorkflowStep'
])
def
backwards
(
self
,
orm
):
# Deleting model 'AssessmentWorkflowStep'
db
.
delete_table
(
'workflow_assessmentworkflowstep'
)
models
=
{
'workflow.assessmentworkflow'
:
{
'Meta'
:
{
'ordering'
:
"['-created']"
,
'object_name'
:
'AssessmentWorkflow'
},
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'created'
:
(
'model_utils.fields.AutoCreatedField'
,
[],
{
'default'
:
'datetime.datetime.now'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'modified'
:
(
'model_utils.fields.AutoLastModifiedField'
,
[],
{
'default'
:
'datetime.datetime.now'
}),
'status'
:
(
'model_utils.fields.StatusField'
,
[],
{
'default'
:
"'peer'"
,
'max_length'
:
'100'
,
u'no_check_for_status'
:
'True'
}),
'status_changed'
:
(
'model_utils.fields.MonitorField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
u'monitor'
:
"u'status'"
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'36'
,
'db_index'
:
'True'
}),
'uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'db_index'
:
'True'
,
'unique'
:
'True'
,
'max_length'
:
'36'
,
'blank'
:
'True'
})
},
'workflow.assessmentworkflowstep'
:
{
'Meta'
:
{
'ordering'
:
"['workflow', 'order_num']"
,
'object_name'
:
'AssessmentWorkflowStep'
},
'assessment_completed_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'None'
,
'null'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'20'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'submitter_completed_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'None'
,
'null'
:
'True'
}),
'workflow'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'steps'"
,
'to'
:
"orm['workflow.AssessmentWorkflow']"
})
}
}
complete_apps
=
[
'workflow'
]
\ No newline at end of file
apps/openassessment/workflow/models.py
View file @
aaf9bdb2
...
...
@@ -16,6 +16,7 @@ import importlib
from
django.conf
import
settings
from
django.db
import
models
from
django_extensions.db.fields
import
UUIDField
from
django.utils.timezone
import
now
from
model_utils
import
Choices
from
model_utils.models
import
StatusModel
,
TimeStampedModel
...
...
@@ -46,15 +47,20 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
an after the fact recording of the last known state of that information so
we can search easily.
"""
ST
ATUS_VALUE
S
=
[
ST
EP
S
=
[
"peer"
,
# User needs to assess peer submissions
"self"
,
# User needs to assess themselves
]
STATUSES
=
[
"waiting"
,
# User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their
# submission by others.
"done"
,
# Complete
]
STATUS_VALUES
=
STEPS
+
STATUSES
STATUS
=
Choices
(
*
STATUS_VALUES
)
# implicit "status" field
submission_uuid
=
models
.
CharField
(
max_length
=
36
,
db_index
=
True
,
unique
=
True
)
...
...
@@ -81,23 +87,16 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return
sub_api
.
get_latest_score_for_submission
(
self
.
submission_uuid
)
def
status_details
(
self
,
assessment_requirements
):
return
{
"peer"
:
{
"complete"
:
self
.
_is_peer_complete
(
assessment_requirements
),
},
"self"
:
{
"complete"
:
self
.
_is_self_complete
(),
},
status_dict
=
{}
steps
=
self
.
_get_steps
()
for
step
in
steps
:
status_dict
[
step
.
name
]
=
{
"complete"
:
step
.
api
()
.
submitter_is_finished
(
self
.
submission_uuid
,
assessment_requirements
.
get
(
step
.
name
,
{})
)
}
def
_is_peer_complete
(
self
,
assessment_requirements
):
from
openassessment.assessment
import
peer_api
peer_requirements
=
assessment_requirements
[
"peer"
]
return
peer_api
.
is_complete
(
self
.
submission_uuid
,
peer_requirements
)
def
_is_self_complete
(
self
):
from
openassessment.assessment
import
self_api
return
self_api
.
is_complete
(
self
.
submission_uuid
)
return
status_dict
def
update_from_assessments
(
self
,
assessment_requirements
):
"""Query self and peer APIs and change our status if appropriate.
...
...
@@ -130,34 +129,82 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
specific requirements in this dict.
"""
from
openassessment.assessment
import
peer_api
from
openassessment.assessment
import
peer_api
,
self_api
# If we're done, we're done -- it doesn't matter if requirements have
# changed because we've already written a score.
if
self
.
status
==
self
.
STATUS
.
done
:
return
# Have they completed the peer and self steps?
peer_complete
=
self
.
_is_peer_complete
(
assessment_requirements
)
self_complete
=
self
.
_is_self_complete
()
if
peer_complete
and
self_complete
:
# If they've completed both, they're at least waiting, possibly done
new_status
=
self
.
STATUS
.
waiting
elif
peer_complete
:
# If they haven't done self assessment yet, that's their status
new_status
=
self
.
STATUS
.
self
else
:
# Default starting status is peer
new_status
=
self
.
STATUS
.
peer
# If we're at least waiting, let's check if we have a peer score and
# can move all the way to done
if
new_status
==
self
.
STATUS
.
waiting
:
# Update our AssessmentWorkflowStep models with the latest from our APIs
steps
=
self
.
_get_steps
()
# Go through each step and update its status.
for
step
in
steps
:
step
.
update
(
self
.
submission_uuid
,
assessment_requirements
)
# Fetch name of the first step that the submitter hasn't yet completed.
new_status
=
next
(
(
step
.
name
for
step
in
steps
if
step
.
submitter_completed_at
is
None
),
self
.
STATUS
.
waiting
# if nothing's left to complete, we're waiting
)
# If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it).
if
(
new_status
==
self
.
STATUS
.
waiting
and
all
(
step
.
assessment_completed_at
for
step
in
steps
)):
# At this point, we're trying to give a score. We currently have a
# very simple rule for this -- if it has a peer step, use that for
# scoring. If not, use the self step. Later on, we may put more
# interesting rules here.
step_names
=
[
step
.
name
for
step
in
steps
]
score
=
None
if
self
.
STATUS
.
peer
in
step_names
:
score
=
peer_api
.
get_score
(
self
.
submission_uuid
,
assessment_requirements
[
"peer"
]
self
.
submission_uuid
,
assessment_requirements
[
self
.
STATUS
.
peer
]
)
elif
self
.
STATUS
.
self
in
step_names
:
score
=
self_api
.
get_score
(
self
.
submission_uuid
,
{})
if
score
:
self
.
set_score
(
score
)
new_status
=
self
.
STATUS
.
done
# Finally save our changes if the status has changed
if
self
.
status
!=
new_status
:
self
.
status
=
new_status
self
.
save
()
def
_get_steps
(
self
):
"""
Simple helper function for retrieving all the steps in the given
Workflow.
"""
steps
=
list
(
self
.
steps
.
all
())
if
not
steps
:
# If no steps exist for this AssessmentWorkflow, assume
# peer -> self for backwards compatibility
self
.
steps
.
add
(
AssessmentWorkflowStep
(
name
=
self
.
STATUS
.
peer
,
order_num
=
0
),
AssessmentWorkflowStep
(
name
=
self
.
STATUS
.
self
,
order_num
=
1
)
)
steps
=
list
(
self
.
steps
.
all
())
return
steps
def
set_score
(
self
,
score
):
"""
Set a score for the workflow.
Scores are persisted via the Submissions API, separate from the Workflow
Data. Score is associated with the same submission_uuid as this workflow
Args:
score (dict): A dict containing 'points_earned' and
'points_possible'.
"""
sub_api
.
set_score
(
self
.
submission_uuid
,
score
[
"points_earned"
],
...
...
@@ -180,14 +227,73 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
"time"
:
datetime
.
utcnow
(),
})
new_status
=
self
.
STATUS
.
done
class
AssessmentWorkflowStep
(
models
.
Model
):
"""An individual step in the overall workflow process.
# Finally save our changes if the status has changed
if
self
.
status
!=
new_status
:
self
.
status
=
new_status
self
.
save
()
Similar caveats apply to this class as apply to `AssessmentWorkflow`. What
we're storing in the database is usually but not always current information.
In particular, if the problem definition has changed the requirements for a
particular step in the workflow, then what is in the database will be out of
sync until someone views this problem again (which will trigger a workflow
update to occur).
"""
workflow
=
models
.
ForeignKey
(
AssessmentWorkflow
,
related_name
=
"steps"
)
name
=
models
.
CharField
(
max_length
=
20
)
submitter_completed_at
=
models
.
DateTimeField
(
default
=
None
,
null
=
True
)
assessment_completed_at
=
models
.
DateTimeField
(
default
=
None
,
null
=
True
)
order_num
=
models
.
PositiveIntegerField
()
class
Meta
:
ordering
=
[
"workflow"
,
"order_num"
]
def
is_submitter_complete
(
self
):
return
self
.
submitter_completed_at
is
not
None
def
is_assessment_complete
(
self
):
return
self
.
assessment_completed_at
is
not
None
def
api
(
self
):
"""
Returns an API associated with this workflow step. If no API is
associated with this workflow step, None is returned.
"""
from
openassessment.assessment
import
peer_api
,
self_api
api
=
None
if
self
.
name
==
AssessmentWorkflow
.
STATUS
.
self
:
api
=
self_api
elif
self
.
name
==
AssessmentWorkflow
.
STATUS
.
peer
:
api
=
peer_api
return
api
def
update
(
self
,
submission_uuid
,
assessment_requirements
):
"""
Updates the AssessmentWorkflowStep models with the requirements
specified from the Workflow API.
Intended for internal use by update_from_assessments(). See
update_from_assessments() documentation for more details.
"""
# Once a step is completed, it will not be revisited based on updated
# requirements.
step_changed
=
False
step_reqs
=
assessment_requirements
.
get
(
self
.
name
,
{})
# Has the user completed their obligations for this step?
if
(
not
self
.
is_submitter_complete
()
and
self
.
api
()
.
submitter_is_finished
(
submission_uuid
,
step_reqs
)):
self
.
submitter_completed_at
=
now
()
step_changed
=
True
# Has the step received a score?
if
(
not
self
.
is_assessment_complete
()
and
self
.
api
()
.
assessment_is_finished
(
submission_uuid
,
step_reqs
)):
self
.
assessment_completed_at
=
now
()
step_changed
=
True
if
step_changed
:
self
.
save
()
# Just here to record thoughts for later:
...
...
apps/openassessment/workflow/test/data/assessments.json
0 → 100644
View file @
aaf9bdb2
{
"peer"
:
{
"steps"
:
[
"peer"
],
"requirements"
:
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
}
},
"both"
:
{
"steps"
:
[
"peer"
,
"self"
],
"requirements"
:
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
"self"
:
{}
}
},
"self"
:
{
"steps"
:
[
"self"
],
"requirements"
:
{
"self"
:
{}
}
}
}
\ No newline at end of file
apps/openassessment/workflow/test/test_api.py
View file @
aaf9bdb2
from
django.db
import
DatabaseError
import
ddt
from
mock
import
patch
from
nose.tools
import
raises
from
openassessment.assessment.models
import
PeerWorkflow
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.assessment
import
peer_api
from
openassessment.workflow.models
import
AssessmentWorkflow
from
submissions.models
import
Submission
...
...
@@ -18,18 +18,14 @@ ITEM_1 = {
"item_type"
:
"openassessment"
,
}
REQUIREMENTS
=
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
,
}
}
@ddt.ddt
class
TestAssessmentWorkflowApi
(
CacheResetTest
):
def
test_create_workflow
(
self
):
@ddt.file_data
(
'data/assessments.json'
)
def
test_create_workflow
(
self
,
data
):
first_step
=
data
[
"steps"
][
0
]
if
data
[
"steps"
]
else
"peer"
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
]
,
data
[
"steps"
]
)
workflow_keys
=
set
(
workflow
.
keys
())
self
.
assertEqual
(
...
...
@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest):
}
)
self
.
assertEqual
(
workflow
[
"submission_uuid"
],
submission
[
"uuid"
])
self
.
assertEqual
(
workflow
[
"status"
],
"peer"
)
self
.
assertEqual
(
workflow
[
"status"
],
first_step
)
workflow_from_get
=
workflow_api
.
get_workflow_for_submission
(
submission
[
"uuid"
],
REQUIREMENTS
submission
[
"uuid"
],
data
[
"requirements"
]
)
del
workflow_from_get
[
'status_details'
]
self
.
assertEqual
(
workflow
,
workflow_from_get
)
def
test_need_valid_submission_uuid
(
self
):
@ddt.file_data
(
'data/assessments.json'
)
def
test_need_valid_submission_uuid
(
self
,
data
):
# submission doesn't exist
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowRequestError
):
workflow
=
workflow_api
.
create_workflow
(
"xxxxxxxxxxx"
)
workflow
=
workflow_api
.
create_workflow
(
"xxxxxxxxxxx"
,
data
[
"steps"
]
)
# submission_uuid is the wrong type
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowRequestError
):
workflow
=
workflow_api
.
create_workflow
(
123
)
workflow
=
workflow_api
.
create_workflow
(
123
,
data
[
"steps"
]
)
@patch.object
(
Submission
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_submissions_errors_wrapped
(
self
,
mock_get
):
def
test_unexpected_submissions_errors_wrapped
(
self
,
data
,
mock_get
):
mock_get
.
side_effect
=
Exception
(
"Kaboom!"
)
workflow_api
.
create_workflow
(
"zzzzzzzzzzzzzzz"
)
workflow_api
.
create_workflow
(
"zzzzzzzzzzzzzzz"
,
data
[
"steps"
]
)
@patch.object
(
AssessmentWorkflow
.
objects
,
'create'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_workflow_errors_wrapped
(
self
,
mock_create
):
def
test_unexpected_workflow_errors_wrapped
(
self
,
data
,
mock_create
):
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
data
[
"steps"
])
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_peer_workflow_errors_wrapped
(
self
,
mock_create
):
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
,
"self"
])
@patch.object
(
AssessmentWorkflow
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_exception_wrapped
(
self
,
data
,
mock_create
):
mock_create
.
side_effect
=
Exception
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
workflow_api
.
update_from_assessments
(
submission
[
"uuid"
],
data
[
"steps"
])
def
test_get_assessment_workflow_expected_errors
(
self
):
@ddt.file_data
(
'data/assessments.json'
)
def
test_get_assessment_workflow_expected_errors
(
self
,
data
):
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowNotFoundError
):
workflow_api
.
get_workflow_for_submission
(
"0000000000000"
,
REQUIREMENTS
)
workflow_api
.
get_workflow_for_submission
(
"0000000000000"
,
data
[
"requirements"
]
)
with
self
.
assertRaises
(
workflow_api
.
AssessmentWorkflowRequestError
):
workflow_api
.
get_workflow_for_submission
(
123
,
REQUIREMENTS
)
workflow_api
.
get_workflow_for_submission
(
123
,
data
[
"requirements"
]
)
@patch.object
(
Submission
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_workflow_get_errors_wrapped
(
self
,
mock_get
):
def
test_unexpected_workflow_get_errors_wrapped
(
self
,
data
,
mock_get
):
mock_get
.
side_effect
=
Exception
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"We talk TV!"
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
get_workflow_for_submission
(
workflow
[
"uuid"
],
REQUIREMENTS
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
]
,
data
[
"steps"
]
)
workflow_api
.
get_workflow_for_submission
(
workflow
[
"uuid"
],
{}
)
def
test_get_status_counts
(
self
):
# Initially, the counts should all be zero
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
[
{
"status"
:
"peer"
,
"count"
:
0
},
{
"status"
:
"self"
,
"count"
:
0
},
...
...
@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self
.
_create_workflow_with_status
(
"user 10"
,
"test/1/1"
,
"peer-problem"
,
"done"
)
# Now the counts should be updated
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
[
{
"status"
:
"peer"
,
"count"
:
1
},
{
"status"
:
"self"
,
"count"
:
2
},
...
...
@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest):
# Create a workflow in a different course, same user and item
# Counts should be the same
self
.
_create_workflow_with_status
(
"user 1"
,
"other_course"
,
"peer-problem"
,
"peer"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
updated_counts
)
# Create a workflow in the same course, different item
# Counts should be the same
self
.
_create_workflow_with_status
(
"user 1"
,
"test/1/1"
,
"other problem"
,
"peer"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
)
updated_counts
=
workflow_api
.
get_status_counts
(
"test/1/1"
,
"peer-problem"
,
[
"peer"
,
"self"
]
)
self
.
assertEqual
(
counts
,
updated_counts
)
def
_create_workflow_with_status
(
self
,
student_id
,
course_id
,
item_id
,
status
,
answer
=
"answer"
):
...
...
@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type"
:
"openassessment"
,
},
answer
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
'uuid'
]
,
[
"peer"
,
"self"
]
)
workflow_model
=
AssessmentWorkflow
.
objects
.
get
(
uuid
=
workflow
[
'uuid'
])
workflow_model
.
status
=
status
workflow_model
.
save
()
apps/openassessment/xblock/grade_mixin.py
View file @
aaf9bdb2
...
...
@@ -70,12 +70,26 @@ class GradeMixin(object):
Returns:
tuple of context (dict), template_path (string)
"""
feedback
=
peer_api
.
get_assessment_feedback
(
self
.
submission_uuid
)
# Peer specific stuff...
assessment_steps
=
self
.
assessment_steps
submission_uuid
=
workflow
[
'submission_uuid'
]
if
"peer-assessment"
in
assessment_steps
:
feedback
=
peer_api
.
get_assessment_feedback
(
submission_uuid
)
peer_assessments
=
peer_api
.
get_assessments
(
submission_uuid
)
has_submitted_feedback
=
feedback
is
not
None
else
:
feedback
=
None
peer_assessments
=
[]
has_submitted_feedback
=
False
if
"self-assessment"
in
assessment_steps
:
self_assessment
=
self_api
.
get_assessment
(
submission_uuid
)
else
:
self_assessment
=
None
feedback_text
=
feedback
.
get
(
'feedback'
,
''
)
if
feedback
else
''
student_submission
=
sub_api
.
get_submission
(
workflow
[
'submission_uuid'
])
peer_assessments
=
peer_api
.
get_assessments
(
student_submission
[
'uuid'
])
self_assessment
=
self_api
.
get_assessment
(
student_submission
[
'uuid'
])
has_submitted_feedback
=
peer_api
.
get_assessment_feedback
(
workflow
[
'submission_uuid'
])
is
not
None
student_submission
=
sub_api
.
get_submission
(
submission_uuid
)
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
...
...
@@ -94,9 +108,14 @@ class GradeMixin(object):
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
max_scores
=
peer_api
.
get_rubric_max_scores
(
self
.
submission_uuid
)
median_scores
=
peer_api
.
get_assessment_median_scores
(
student_submission
[
"uuid"
])
# Note that we are updating a *copy* of the rubric criteria stored in
# the XBlock field
max_scores
=
peer_api
.
get_rubric_max_scores
(
submission_uuid
)
if
"peer-assessment"
in
assessment_steps
:
median_scores
=
peer_api
.
get_assessment_median_scores
(
submission_uuid
)
elif
"self-assessment"
in
assessment_steps
:
median_scores
=
self_api
.
get_assessment_scores_by_criteria
(
submission_uuid
)
if
median_scores
is
not
None
and
max_scores
is
not
None
:
for
criterion
in
context
[
"rubric_criteria"
]:
criterion
[
"median_score"
]
=
median_scores
[
criterion
[
"name"
]]
...
...
@@ -114,11 +133,17 @@ class GradeMixin(object):
Returns:
tuple of context (dict), template_path (string)
"""
def
_is_incomplete
(
step
):
return
(
step
in
workflow
[
"status_details"
]
and
not
workflow
[
"status_details"
][
step
][
"complete"
]
)
incomplete_steps
=
[]
if
not
workflow
[
"status_details"
][
"peer"
][
"complete"
]
:
incomplete_steps
.
append
(
"Peer Assessment"
)
if
not
workflow
[
"status_details"
][
"self"
][
"complete"
]
:
incomplete_steps
.
append
(
"Self Assessment"
)
if
_is_incomplete
(
"peer"
)
:
incomplete_steps
.
append
(
_
(
"Peer Assessment"
)
)
if
_is_incomplete
(
"self"
)
:
incomplete_steps
.
append
(
_
(
"Self Assessment"
)
)
return
(
'openassessmentblock/grade/oa_grade_incomplete.html'
,
...
...
@@ -131,7 +156,8 @@ class GradeMixin(object):
Submit feedback on an assessment.
Args:
data (dict): Can provide keys 'feedback_text' (unicode) and 'feedback_options' (list of unicode).
data (dict): Can provide keys 'feedback_text' (unicode) and
'feedback_options' (list of unicode).
Kwargs:
suffix (str): Unused
...
...
apps/openassessment/xblock/openassessmentblock.py
View file @
aaf9bdb2
...
...
@@ -2,7 +2,6 @@
import
datetime
as
dt
import
logging
import
dateutil
import
pkg_resources
import
pytz
...
...
@@ -239,7 +238,9 @@ class OpenAssessmentBlock(
# Include release/due dates for each step in the problem
context
[
'step_dates'
]
=
list
()
for
step
in
[
'submission'
,
'peer-assessment'
,
'self-assessment'
]:
steps
=
[
'submission'
]
+
self
.
assessment_steps
for
step
in
steps
:
# Get the dates as a student would see them
__
,
__
,
start_date
,
due_date
=
self
.
is_closed
(
step
=
step
,
course_staff
=
False
)
...
...
@@ -313,6 +314,10 @@ class OpenAssessmentBlock(
load
(
'static/xml/poverty_rubric_example.xml'
)
),
(
"OpenAssessmentBlock (Self Only) Rubric"
,
load
(
'static/xml/poverty_self_only_example.xml'
)
),
(
"OpenAssessmentBlock Censorship Rubric"
,
load
(
'static/xml/censorship_rubric_example.xml'
)
),
...
...
@@ -333,6 +338,10 @@ class OpenAssessmentBlock(
return
update_from_xml
(
block
,
node
,
validator
=
validator
(
block
,
strict_post_release
=
False
))
@property
def
assessment_steps
(
self
):
return
[
asmnt
[
'name'
]
for
asmnt
in
self
.
rubric_assessments
]
def
render_assessment
(
self
,
path
,
context_dict
=
None
):
"""Render an Assessment Module's HTML
...
...
@@ -421,18 +430,17 @@ class OpenAssessmentBlock(
]
# Resolve unspecified dates and date strings to datetimes
start
,
due
,
date_ranges
=
resolve_dates
(
self
.
start
,
self
.
due
,
[
submission_range
]
+
assessment_ranges
)
start
,
due
,
date_ranges
=
resolve_dates
(
self
.
start
,
self
.
due
,
[
submission_range
]
+
assessment_ranges
)
# Based on the step, choose the date range to consider
# We hard-code this to the submission -> peer -> self workflow for now;
# later, we can revisit to make this more flexible.
open_range
=
(
start
,
due
)
if
step
==
"submission"
:
assessment_steps
=
self
.
assessment_steps
if
step
==
'submission'
:
open_range
=
date_ranges
[
0
]
if
step
==
"peer-assessment"
:
open_range
=
date_ranges
[
1
]
if
step
==
"self-assessment"
:
open_range
=
date_ranges
[
2
]
elif
step
in
assessment_steps
:
step_index
=
assessment_steps
.
index
(
step
)
open_range
=
date_ranges
[
1
+
step_index
]
# Course staff always have access to the problem
if
course_staff
is
None
:
...
...
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
aaf9bdb2
import
logging
from
django.utils.translation
import
ugettext
as
_
from
webob
import
Response
from
xblock.core
import
XBlock
from
openassessment.assessment
import
peer_api
from
openassessment.assessment.peer_api
import
(
PeerAssessmentInternalError
,
PeerAssessmentRequestError
,
...
...
@@ -114,6 +117,8 @@ class PeerAssessmentMixin(object):
number of assessments.
"""
if
"peer-assessment"
not
in
self
.
assessment_steps
:
return
Response
(
u""
)
continue_grading
=
data
.
params
.
get
(
'continue_grading'
,
False
)
path
,
context_dict
=
self
.
peer_path_and_context
(
continue_grading
)
return
self
.
render_assessment
(
path
,
context_dict
)
...
...
apps/openassessment/xblock/self_assessment_mixin.py
View file @
aaf9bdb2
...
...
@@ -2,6 +2,8 @@ import logging
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
webob
import
Response
from
openassessment.assessment
import
self_api
from
openassessment.workflow
import
api
as
workflow_api
from
submissions
import
api
as
submission_api
...
...
@@ -24,6 +26,9 @@ class SelfAssessmentMixin(object):
@XBlock.handler
def
render_self_assessment
(
self
,
data
,
suffix
=
''
):
if
"self-assessment"
not
in
self
.
assessment_steps
:
return
Response
(
u""
)
try
:
path
,
context
=
self
.
self_path_and_context
()
except
:
...
...
apps/openassessment/xblock/static/js/fixtures/templates.json
View file @
aaf9bdb2
...
...
@@ -134,7 +134,59 @@
"score"
:
""
,
"feedback_text"
:
""
,
"student_submission"
:
""
,
"peer_assessments"
:
[],
"peer_assessments"
:
[
{
"submission_uuid"
:
"52d2158a-c568-11e3-b9b9-28cfe9182465"
,
"points_earned"
:
5
,
"points_possible"
:
6
,
"rubric"
:
{
"criteria"
:
[
{
"name"
:
"Criterion 1"
,
"prompt"
:
"Prompt 1"
,
"order_num"
:
0
,
"feedback"
:
"optional"
,
"options"
:
[
{
"order_num"
:
2
,
"points"
:
2
,
"name"
:
"Good"
}
],
"points_possible"
:
2
},
{
"name"
:
"Criterion 2"
,
"prompt"
:
"Prompt 2"
,
"order_num"
:
1
,
"options"
:
[
{
"order_num"
:
1
,
"points"
:
1
,
"name"
:
"Fair"
}
],
"points_possible"
:
2
},
{
"name"
:
"Criterion 3"
,
"prompt"
:
"Prompt 3"
,
"order_num"
:
2
,
"feedback"
:
"optional"
,
"options"
:
[
{
"order_num"
:
2
,
"points"
:
2
,
"name"
:
"Good"
}
],
"points_possible"
:
2
}
]
}
}
],
"self_assessment"
:
{},
"rubric_criteria"
:
[],
"has_submitted_feedback"
:
false
...
...
apps/openassessment/xblock/static/js/openassessment.min.js
View file @
aaf9bdb2
This diff is collapsed.
Click to expand it.
apps/openassessment/xblock/static/js/spec/oa_peer.js
View file @
aaf9bdb2
...
...
@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() {
this
.
showLoadError
=
function
(
msg
)
{};
this
.
toggleActionError
=
function
(
msg
,
step
)
{};
this
.
setUpCollapseExpand
=
function
(
sel
)
{};
this
.
renderSelfAssessmentStep
=
function
()
{};
this
.
scrollToTop
=
function
()
{};
this
.
gradeView
=
{
load
:
function
()
{}
};
this
.
loadAssessmentModules
=
function
()
{
};
};
// Stubs
...
...
apps/openassessment/xblock/static/js/spec/oa_response.js
View file @
aaf9bdb2
...
...
@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view
var
StubBaseView
=
function
()
{
this
.
loadAssessmentModules
=
function
()
{};
this
.
peerView
=
{
load
:
function
()
{}
};
this
.
gradeView
=
{
load
:
function
()
{}
};
this
.
showLoadError
=
function
(
msg
)
{};
...
...
@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() {
}).
promise
();
});
spyOn
(
view
,
'load'
);
spyOn
(
baseView
.
peerView
,
'load
'
);
spyOn
(
baseView
,
'loadAssessmentModules
'
);
view
.
response
(
'Test response'
);
view
.
submit
();
// Expect the current and next step to have been reloaded
expect
(
view
.
load
).
toHaveBeenCalled
();
expect
(
baseView
.
peerView
.
load
).
toHaveBeenCalled
();
expect
(
baseView
.
loadAssessmentModules
).
toHaveBeenCalled
();
});
it
(
"enables the unsaved work warning when the user changes the response text"
,
function
()
{
...
...
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
aaf9bdb2
...
...
@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = {
},
/**
*
Asynchronously load each sub-view into the DOM.
*/
Asynchronously load each sub-view into the DOM.
*
*
/
load
:
function
()
{
this
.
responseView
.
load
();
this
.
peerView
.
load
();
this
.
renderSelfAssessmentStep
();
this
.
gradeView
.
load
();
this
.
loadAssessmentModules
();
// Set up expand/collapse for course staff debug, if available
courseStaffDebug
=
$
(
'.wrapper--staff-info'
);
...
...
@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = {
},
/**
Refresh the Assessment Modules. This should be called any time an action is
performed by the user.
**/
loadAssessmentModules
:
function
()
{
this
.
peerView
.
load
();
this
.
renderSelfAssessmentStep
();
this
.
gradeView
.
load
();
},
/**
Render the self-assessment step.
**/
renderSelfAssessmentStep
:
function
()
{
...
...
@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = {
this
.
server
.
selfAssess
(
optionsSelected
).
done
(
function
()
{
view
.
peerView
.
load
();
view
.
renderSelfAssessmentStep
();
view
.
gradeView
.
load
();
view
.
loadAssessmentModules
();
view
.
scrollToTop
();
}
).
fail
(
function
(
errMsg
)
{
...
...
apps/openassessment/xblock/static/js/src/oa_peer.js
View file @
aaf9bdb2
...
...
@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = {
var
baseView
=
view
.
baseView
;
this
.
peerAssessRequest
(
function
()
{
view
.
load
();
baseView
.
renderSelfAssessmentStep
();
baseView
.
gradeView
.
load
();
baseView
.
loadAssessmentModules
();
baseView
.
scrollToTop
();
});
},
...
...
apps/openassessment/xblock/static/js/src/oa_response.js
View file @
aaf9bdb2
...
...
@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/
moveToNextStep
:
function
()
{
this
.
load
();
this
.
baseView
.
peerView
.
load
();
this
.
baseView
.
gradeView
.
load
();
this
.
baseView
.
loadAssessmentModules
();
// Disable the "unsaved changes" warning if the user
// tries to navigate to another page.
...
...
apps/openassessment/xblock/static/xml/poverty_self_only_example.xml
0 → 100644
View file @
aaf9bdb2
<openassessment
submission_due=
"2015-03-11T18:20"
>
<title>
Global Poverty
</title>
<rubric>
<prompt>
Given the state of the world today, what do you think should be done to combat poverty?
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
concise
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"0"
>
<name>
Neal Stephenson (late)
</name>
<explanation>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explanation>
</option>
<option
points=
"1"
>
<name>
HP Lovecraft
</name>
<explanation>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explanation>
</option>
<option
points=
"3"
>
<name>
Robert Heinlein
</name>
<explanation>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explanation>
</option>
<option
points=
"4"
>
<name>
Neal Stephenson (early)
</name>
<explanation>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explanation>
</option>
<option
points=
"5"
>
<name>
Earnest Hemingway
</name>
<explanation>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explanation>
</option>
</criterion>
<criterion>
<name>
clear-headed
</name>
<prompt>
How clear is the thinking?
</prompt>
<option
points=
"0"
>
<name>
Yogi Berra
</name>
<explanation></explanation>
</option>
<option
points=
"1"
>
<name>
Hunter S. Thompson
</name>
<explanation></explanation>
</option>
<option
points=
"2"
>
<name>
Robert Heinlein
</name>
<explanation></explanation>
</option>
<option
points=
"3"
>
<name>
Isaac Asimov
</name>
<explanation></explanation>
</option>
<option
points=
"10"
>
<name>
Spock
</name>
<explanation>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explanation>
</option>
</criterion>
<criterion>
<name>
form
</name>
<prompt>
Lastly, how is its form? Punctuation, grammar, and spelling all count.
</prompt>
<option
points=
"0"
>
<name>
lolcats
</name>
<explanation></explanation>
</option>
<option
points=
"1"
>
<name>
Facebook
</name>
<explanation></explanation>
</option>
<option
points=
"2"
>
<name>
Reddit
</name>
<explanation></explanation>
</option>
<option
points=
"3"
>
<name>
metafilter
</name>
<explanation></explanation>
</option>
<option
points=
"4"
>
<name>
Usenet, 1996
</name>
<explanation></explanation>
</option>
<option
points=
"5"
>
<name>
The Elements of Style
</name>
<explanation></explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/submission_mixin.py
View file @
aaf9bdb2
...
...
@@ -124,7 +124,7 @@ class SubmissionMixin(object):
student_sub_dict
=
{
'text'
:
student_sub
}
submission
=
api
.
create_submission
(
student_item_dict
,
student_sub_dict
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
self
.
create_workflow
(
submission
[
"uuid"
])
self
.
submission_uuid
=
submission
[
"uuid"
]
# Emit analytics event...
...
...
apps/openassessment/xblock/test/data/assessment_combo.json
View file @
aaf9bdb2
...
...
@@ -10,7 +10,9 @@
{
"name"
:
"self-assessment"
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"peer_only"
:
{
"valid"
:
false
,
...
...
@@ -20,15 +22,19 @@
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"self_only"
:
{
"valid"
:
fals
e
,
"valid"
:
tru
e
,
"assessments"
:
[
{
"name"
:
"self-assessment"
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"self_before_peer"
:
{
"valid"
:
false
,
...
...
@@ -41,7 +47,9 @@
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"peer_then_peer"
:
{
"valid"
:
false
,
...
...
@@ -56,6 +64,8 @@
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
]
],
"current_assessments"
:
null
,
"is_released"
:
false
}
}
apps/openassessment/xblock/test/data/invalid_assessment_combo_order.xml
0 → 100644
View file @
aaf9bdb2
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
Concise
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"0"
>
<name>
Neal Stephenson (late)
</name>
<explanation>
Neal Stephenson explanation
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"self-assessment"
/>
<assessment
name=
"peer-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/invalid_assessment_combo_peer_only.xml
0 → 100644
View file @
aaf9bdb2
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
Concise
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"0"
>
<name>
Neal Stephenson (late)
</name>
<explanation>
Neal Stephenson explanation
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/invalid_assessments.json
View file @
aaf9bdb2
{
"empty_dict"
:
{
"assessment"
:
{}
},
"must_be_graded_by_zero"
:
{
"assessment"
:
{
"name"
:
"self-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
0
}
"assessments"
:
[{}],
"current_assessments"
:
null
,
"is_released"
:
false
},
"unsupported_type"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
},
{
"name"
:
"unsupported-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"no_type"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"self-assessment"
},
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"unsupported_unicode_type"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"self-assessment"
},
{
"name"
:
"𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"no_must_grade"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"no_must_be_graded_by"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_grade_less_than_must_be_graded_by"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
4
,
"must_be_graded_by"
:
5
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_grade_zero"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
0
,
"must_be_graded_by"
:
0
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_be_graded_by_zero"
:
{
"assessment"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
0
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"remove_peer_mid_flight"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
[
{
"name"
:
"self-assessment"
}
],
"is_released"
:
true
},
"swap_peer_and_self_mid_flight"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
[
{
"name"
:
"self-assessment"
},
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
],
"is_released"
:
true
}
}
apps/openassessment/xblock/test/data/
invalid_assessment_combo
.xml
→
apps/openassessment/xblock/test/data/
optional_assessments_self_only
.xml
View file @
aaf9bdb2
<openassessment>
<title>
O
pen Assessment Tes
t
</title>
<title>
O
nly Self Assessmen
t
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
...
...
apps/openassessment/xblock/test/data/valid_assessments.json
View file @
aaf9bdb2
{
"peer"
:
{
"assessment"
:
{
"peer_then_self"
:
{
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
5
,
"must_be_graded_by"
:
3
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"self"
:
{
"assessment"
:
{
"name"
:
"self-assessment"
,
"must_grade"
:
2
,
"must_be_graded_by"
:
1
"self_only"
:
{
"assessments"
:
[
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
},
"must_be_graded_by_equals_must_grade"
:
{
"assessment"
:
{
"name"
:
"self-assessment"
,
"assessments"
:
[
{
"name"
:
"peer-assessment"
,
"must_grade"
:
1
,
"must_be_graded_by"
:
1
},
{
"name"
:
"self-assessment"
}
],
"current_assessments"
:
null
,
"is_released"
:
false
}
}
apps/openassessment/xblock/test/test_grade.py
View file @
aaf9bdb2
...
...
@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase):
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
STEPS
=
[
'peer'
,
'self'
]
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade
(
self
,
xblock
):
# Submit, assess, and render the grade view
...
...
@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer
[
'student_id'
]
=
scorer_name
scorer_sub
=
sub_api
.
create_submission
(
scorer
,
{
'text'
:
submission_text
})
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
])
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
]
,
self
.
STEPS
)
submission
=
peer_api
.
get_submission_to_assess
(
scorer_sub
[
'uuid'
],
len
(
peers
))
...
...
apps/openassessment/xblock/test/test_studio.py
View file @
aaf9bdb2
...
...
@@ -92,12 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase):
# Test that we enforce that there are exactly two assessments,
# peer ==> self
# If and when we remove this restriction, this test can be deleted.
@data
(
'data/invalid_assessment_combo_order.xml'
,
'data/invalid_assessment_combo_peer_only.xml'
)
@scenario
(
'data/basic_scenario.xml'
)
def
test_update_xml_invalid_assessment_combo
(
self
,
xblock
):
request
=
json
.
dumps
({
'xml'
:
self
.
load_fixture_str
(
'data/invalid_assessment_combo.xml'
)})
def
test_update_xml_invalid_assessment_combo
(
self
,
xblock
,
invalid_workflow
):
request
=
json
.
dumps
(
{
'xml'
:
self
.
load_fixture_str
(
invalid_workflow
)}
)
resp
=
self
.
request
(
xblock
,
'update_xml'
,
request
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
'success'
])
self
.
assertIn
(
"
must have exactly two assessments
"
,
resp
[
'msg'
]
.
lower
())
self
.
assertIn
(
"
for this assignment
"
,
resp
[
'msg'
]
.
lower
())
@data
((
'data/invalid_rubric.xml'
,
'rubric'
),
(
'data/invalid_assessment.xml'
,
'assessment'
))
@scenario
(
'data/basic_scenario.xml'
)
...
...
apps/openassessment/xblock/test/test_validation.py
View file @
aaf9bdb2
...
...
@@ -14,27 +14,26 @@ class AssessmentValidationTest(TestCase):
@ddt.file_data
(
'data/valid_assessments.json'
)
def
test_valid_assessment
(
self
,
data
):
success
,
msg
=
validate_assessments
(
[
data
[
'assessment'
]
])
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
])
self
.
assertTrue
(
success
)
self
.
assertEqual
(
msg
,
u''
)
@ddt.file_data
(
'data/invalid_assessments.json'
)
def
test_invalid_assessment
(
self
,
data
):
success
,
msg
=
validate_assessments
(
[
data
[
'assessment'
]
])
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
])
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
def
test_no_assessments
(
self
):
success
,
msg
=
validate_assessments
([])
success
,
msg
=
validate_assessments
([]
,
[],
False
)
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
# Currently, we enforce the restriction that there must be
# exactly two assessments, in the order (a) peer, then (b) self.
# If and when we remove that restriction, this test can be deleted.
# Make sure only legal assessment combinations are allowed. For now, that's
# (peer -> self), and (self)
@ddt.file_data
(
'data/assessment_combo.json'
)
def
test_enforce_
peer_then_self
(
self
,
data
):
success
,
msg
=
validate_assessments
(
data
[
'assessments'
],
enforce_peer_then_self
=
True
)
def
test_enforce_
assessment_combo_restrictions
(
self
,
data
):
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
]
)
self
.
assertEqual
(
success
,
data
[
'valid'
],
msg
=
msg
)
if
not
success
:
...
...
apps/openassessment/xblock/validation.py
View file @
aaf9bdb2
...
...
@@ -43,33 +43,49 @@ def _duplicates(items):
return
set
(
x
for
x
in
items
if
counts
[
x
]
>
1
)
def
validate_assessments
(
assessments
,
enforce_peer_then_self
=
False
):
def
validate_assessments
(
assessments
,
current_assessments
,
is_released
):
"""
Check that the assessment dict is semantically valid.
Valid assessment steps are currently:
* peer, then self
* self only
If a question has been released, the type and number of assessment steps
cannot be changed.
Args:
assessments (list of dict): list of serialized assessment models.
Kwargs:
enforce_peer_then_self (bool): If True, enforce the requirement that there
must be exactly two assessments: first, a peer-assessment, then a self-assessment
.
current_assessments (list of dict): list of the current serialized
assessment models. Used to determine if the assessment configuration
has changed since the question had been released.
is_released (boolean) : True if the question has been released
.
Returns:
tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
if
enforce_peer_then_self
:
if
len
(
assessments
)
!=
2
:
return
(
False
,
_
(
"This problem must have exactly two assessments."
))
if
assessments
[
0
]
.
get
(
'name'
)
!=
'peer-assessment'
:
return
(
False
,
_
(
"The first assessment must be a peer assessment."
))
if
assessments
[
1
]
.
get
(
'name'
)
!=
'self-assessment'
:
return
(
False
,
_
(
"The second assessment must be a self assessment."
))
def
_self_only
(
assessments
):
return
len
(
assessments
)
==
1
and
assessments
[
0
]
.
get
(
'name'
)
==
'self-assessment'
def
_peer_then_self
(
assessments
):
return
(
len
(
assessments
)
==
2
and
assessments
[
0
]
.
get
(
'name'
)
==
'peer-assessment'
and
assessments
[
1
]
.
get
(
'name'
)
==
'self-assessment'
)
if
len
(
assessments
)
==
0
:
return
(
False
,
_
(
"This problem must include at least one assessment."
))
# Right now, there are two allowed scenarios: (peer -> self) and (self)
if
not
(
_self_only
(
assessments
)
or
_peer_then_self
(
assessments
)):
return
(
False
,
_
(
"For this assignment, you can set either a peer assessment followed by a self assessment or a self assessment only."
)
)
for
assessment_dict
in
assessments
:
# Supported assessment
if
not
assessment_dict
.
get
(
'name'
)
in
[
'peer-assessment'
,
'self-assessment'
]:
...
...
@@ -89,6 +105,15 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
if
must_grade
<
must_be_graded_by
:
return
(
False
,
_
(
'The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'
))
if
is_released
:
if
len
(
assessments
)
!=
len
(
current_assessments
):
return
(
False
,
_
(
"The number of assessments cannot be changed after the problem has been released."
))
names
=
[
assessment
.
get
(
'name'
)
for
assessment
in
assessments
]
current_names
=
[
assessment
.
get
(
'name'
)
for
assessment
in
current_assessments
]
if
names
!=
current_names
:
return
(
False
,
_
(
"The assessment type cannot be changed after the problem has been released."
))
return
(
True
,
u''
)
...
...
@@ -188,7 +213,12 @@ def validator(oa_block, strict_post_release=True):
"""
def
_inner
(
rubric_dict
,
submission_dict
,
assessments
):
success
,
msg
=
validate_assessments
(
assessments
,
enforce_peer_then_self
=
True
)
current_assessments
=
oa_block
.
rubric_assessments
success
,
msg
=
validate_assessments
(
assessments
,
current_assessments
,
strict_post_release
and
oa_block
.
is_released
()
)
if
not
success
:
return
(
False
,
msg
)
...
...
apps/openassessment/xblock/workflow_mixin.py
View file @
aaf9bdb2
...
...
@@ -8,6 +8,25 @@ class WorkflowMixin(object):
def
handle_workflow_info
(
self
,
data
,
suffix
=
''
):
return
self
.
get_workflow_info
()
def
create_workflow
(
self
,
submission_uuid
):
steps
=
self
.
_create_step_list
()
workflow_api
.
create_workflow
(
submission_uuid
,
steps
)
def
_create_step_list
(
self
):
def
_convert_rubric_assessment_name
(
ra_name
):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name
,
suffix
=
ra_name
.
split
(
"-"
)
return
short_name
# rubric_assessments stores names as "self-assessment",
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
return
[
_convert_rubric_assessment_name
(
ra
[
"name"
])
for
ra
in
self
.
rubric_assessments
]
def
workflow_requirements
(
self
):
"""
Retrieve the requirements from each assessment module
...
...
@@ -93,6 +112,7 @@ class WorkflowMixin(object):
status_counts
=
workflow_api
.
get_status_counts
(
course_id
=
student_item
[
'course_id'
],
item_id
=
student_item
[
'item_id'
],
steps
=
self
.
_create_step_list
(),
)
num_submissions
=
sum
(
item
[
'count'
]
for
item
in
status_counts
)
return
status_counts
,
num_submissions
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment