Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
e7126bfd
Commit
e7126bfd
authored
May 08, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #316 from edx/will/refactor-peer-api
Peer API Refactor
parents
a03a2336
1049bd8b
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
405 additions
and
470 deletions
+405
-470
apps/openassessment/assessment/errors.py
+72
-0
apps/openassessment/assessment/models.py
+285
-0
apps/openassessment/assessment/peer_api.py
+16
-420
apps/openassessment/assessment/self_api.py
+3
-24
apps/openassessment/assessment/test/test_peer.py
+23
-23
apps/openassessment/assessment/test/test_self.py
+2
-1
apps/openassessment/workflow/api.py
+2
-1
apps/openassessment/xblock/grade_mixin.py
+2
-1
No files found.
apps/openassessment/assessment/errors.py
0 → 100644
View file @
e7126bfd
"""
Errors for the assessment app.
"""
import
copy
class
PeerAssessmentError
(
Exception
):
"""Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the
Peer Assessment Workflow.
"""
pass
class
PeerAssessmentRequestError
(
PeerAssessmentError
):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
information which does not allow the request to be processed.
"""
def
__init__
(
self
,
field_errors
):
Exception
.
__init__
(
self
,
repr
(
field_errors
))
self
.
field_errors
=
copy
.
deepcopy
(
field_errors
)
class
PeerAssessmentWorkflowError
(
PeerAssessmentError
):
"""Error indicating a step in the workflow cannot be completed,
Raised when the action taken cannot be completed in the workflow. This can
occur based on parameters specific to the Submission, User, or Peer Scorers.
"""
pass
class
PeerAssessmentInternalError
(
PeerAssessmentError
):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
the actions or parameters given to the API.
"""
pass
class
SelfAssessmentError
(
Exception
):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class
SelfAssessmentRequestError
(
SelfAssessmentError
):
"""
There was a problem with the request for a self-assessment.
"""
pass
class
SelfAssessmentInternalError
(
SelfAssessmentError
):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
apps/openassessment/assessment/models.py
View file @
e7126bfd
...
...
@@ -14,13 +14,21 @@ from collections import defaultdict
from
copy
import
deepcopy
from
hashlib
import
sha1
import
json
import
random
from
datetime
import
timedelta
from
django.core.cache
import
cache
from
django.db
import
models
from
django.utils.timezone
import
now
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
import
math
from
openassessment.assessment.errors
import
PeerAssessmentWorkflowError
,
PeerAssessmentInternalError
import
logging
logger
=
logging
.
getLogger
(
"openassessment.assessment.models"
)
class
InvalidOptionSelection
(
Exception
):
"""
...
...
@@ -526,6 +534,9 @@ class PeerWorkflow(models.Model):
created for each assessment made by this student.
"""
# Amount of time before a lease on a submission expires
TIME_LIMIT
=
timedelta
(
hours
=
8
)
student_id
=
models
.
CharField
(
max_length
=
40
,
db_index
=
True
)
item_id
=
models
.
CharField
(
max_length
=
128
,
db_index
=
True
)
course_id
=
models
.
CharField
(
max_length
=
40
,
db_index
=
True
)
...
...
@@ -537,6 +548,280 @@ class PeerWorkflow(models.Model):
class
Meta
:
ordering
=
[
"created_at"
,
"id"
]
@classmethod
def
get_by_submission_uuid
(
cls
,
submission_uuid
):
"""
Retrieve the Peer Workflow associated with the given submission UUID.
Args:
submission_uuid (str): The string representation of the UUID belonging
to the associated Peer Workflow.
Returns:
workflow (PeerWorkflow): The most recent peer workflow associated with
this submission UUID.
Raises:
PeerAssessmentWorkflowError: Thrown when no workflow can be found for
the associated submission UUID. This should always exist before a
student is allow to request submissions for peer assessment.
Examples:
>>> PeerWorkflow.get_workflow_by_submission_uuid("abc123")
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
"""
try
:
return
cls
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
except
cls
.
DoesNotExist
:
return
None
except
DatabaseError
:
error_message
=
_
(
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentWorkflowError
(
error_message
)
@classmethod
def
create_item
(
cls
,
scorer_workflow
,
submission_uuid
):
"""
Create a new peer workflow for a student item and submission.
Args:
scorer_workflow (PeerWorkflow): The peer workflow associated with the scorer.
submission_uuid (str): The submission associated with this workflow.
Raises:
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
"""
peer_workflow
=
cls
.
get_by_submission_uuid
(
submission_uuid
)
try
:
workflow_item
,
__
=
PeerWorkflowItem
.
objects
.
get_or_create
(
scorer
=
scorer_workflow
,
author
=
peer_workflow
,
submission_uuid
=
submission_uuid
)
workflow_item
.
started_at
=
now
()
workflow_item
.
save
()
return
workflow_item
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}"
.
format
(
scorer_workflow
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
find_active_assessments
(
self
):
"""Given a student item, return an active assessment if one is found.
Before retrieving a new submission for a peer assessor, check to see if that
assessor already has a submission out for assessment. If an unfinished
assessment is found that has not expired, return the associated submission.
TODO: If a user begins an assessment, then resubmits, this will never find
the unfinished assessment. Is this OK?
Args:
workflow (PeerWorkflow): See if there is an associated active assessment
for this PeerWorkflow.
Returns:
submission_uuid (str): The submission_uuid for the submission that the
student has open for active assessment.
"""
oldest_acceptable
=
now
()
-
self
.
TIME_LIMIT
workflows
=
self
.
graded
.
filter
(
assessment__isnull
=
True
,
started_at__gt
=
oldest_acceptable
)
return
workflows
[
0
]
.
submission_uuid
if
workflows
else
None
def
get_submission_for_review
(
self
,
graded_by
):
"""
Find a submission for peer assessment. This function will find the next
submission that requires assessment, excluding any submission that has been
completely graded, or is actively being reviewed by other students.
Args:
graded_by (unicode): Student ID of the scorer.
Returns:
submission_uuid (str): The submission_uuid for the submission to review.
Raises:
PeerAssessmentInternalError: Raised when there is an error retrieving
the workflows or workflow items for this request.
"""
timeout
=
(
now
()
-
self
.
TIME_LIMIT
)
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
# The follow query behaves as the Peer Assessment Queue. This will
# find the next submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Does not have enough completed assessments
# 3) Is not something you have already scored.
# 4) Does not have a combination of completed assessments or open
# assessments equal to or more than the requirement.
try
:
peer_workflows
=
list
(
PeerWorkflow
.
objects
.
raw
(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where pw.item_id=
%
s "
"and pw.course_id=
%
s "
"and pw.student_id<>
%
s "
"and pw.grading_completed_at is NULL "
"and pw.id not in ("
" select pwi.author_id "
" from assessment_peerworkflowitem pwi "
" where pwi.scorer_id=
%
s "
" and pwi.assessment_id is not NULL "
") "
"and ("
" select count(pwi.id) as c "
" from assessment_peerworkflowitem pwi "
" where pwi.author_id=pw.id "
" and (pwi.assessment_id is not NULL or pwi.started_at >
%
s) "
") <
%
s "
"order by pw.created_at, pw.id "
"limit 1; "
,
[
self
.
item_id
,
self
.
course_id
,
self
.
student_id
,
self
.
id
,
timeout
,
graded_by
]
))
if
not
peer_workflows
:
return
None
return
peer_workflows
[
0
]
.
submission_uuid
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while retrieving a peer submission "
u"for student {}"
.
format
(
self
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
get_submission_for_over_grading
(
self
):
"""
Retrieve the next submission uuid for over grading in peer assessment.
"""
# The follow query behaves as the Peer Assessment Over Grading Queue. This
# will find a random submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Is not something you have already scored
try
:
query
=
list
(
PeerWorkflow
.
objects
.
raw
(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where course_id=
%
s "
"and item_id=
%
s "
"and student_id<>
%
s "
"and pw.id not in ( "
"select pwi.author_id "
"from assessment_peerworkflowitem pwi "
"where pwi.scorer_id=
%
s); "
,
[
self
.
course_id
,
self
.
item_id
,
self
.
student_id
,
self
.
id
]
))
workflow_count
=
len
(
query
)
if
workflow_count
<
1
:
return
None
random_int
=
random
.
randint
(
0
,
workflow_count
-
1
)
random_workflow
=
query
[
random_int
]
return
random_workflow
.
submission_uuid
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while retrieving a peer submission "
u"for student {}"
.
format
(
self
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
get_latest_open_workflow_item
(
self
):
"""
Return the latest open workflow item for this workflow.
Returns:
A PeerWorkflowItem that is open for assessment.
None if no item is found.
"""
workflow_query
=
self
.
graded
.
filter
(
assessment__isnull
=
True
)
.
order_by
(
"-started_at"
,
"-id"
)
items
=
list
(
workflow_query
[:
1
])
return
items
[
0
]
if
items
else
None
def
close_active_assessment
(
self
,
submission_uuid
,
assessment
,
num_required_grades
):
"""
Updates a workflow item on the student's workflow with the associated
assessment. When a workflow item has an assessment, it is considered
finished.
Args:
submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Returns:
None
"""
try
:
item_query
=
self
.
graded
.
filter
(
submission_uuid
=
submission_uuid
)
.
order_by
(
"-started_at"
,
"-id"
)
items
=
list
(
item_query
[:
1
])
if
not
items
:
raise
PeerAssessmentWorkflowError
(
_
(
u"No open assessment was found for student {} while assessing "
u"submission UUID {}."
.
format
(
self
.
student_id
,
submission_uuid
)
))
item
=
items
[
0
]
item
.
assessment
=
assessment
item
.
save
()
if
(
not
item
.
author
.
grading_completed_at
and
item
.
author
.
graded_by
.
filter
(
assessment__isnull
=
False
)
.
count
()
>=
num_required_grades
):
item
.
author
.
grading_completed_at
=
now
()
item
.
author
.
save
()
except
(
DatabaseError
,
PeerWorkflowItem
.
DoesNotExist
):
error_message
=
_
(
u"An internal error occurred while retrieving a workflow item for "
u"student {}. Workflow Items are created when submissions are "
u"pulled for assessment."
.
format
(
self
.
student_id
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentWorkflowError
(
error_message
)
def
num_peers_graded
(
self
):
"""
Returns the number of peers the student owning the workflow has graded.
Returns:
integer
"""
return
self
.
graded
.
filter
(
assessment__isnull
=
False
)
.
count
()
def
__repr__
(
self
):
return
(
"PeerWorkflow(student_id={0.student_id}, item_id={0.item_id}, "
...
...
apps/openassessment/assessment/peer_api.py
View file @
e7126bfd
...
...
@@ -4,9 +4,7 @@ The Peer Assessment Workflow API exposes all public actions required to complete
the workflow for a given submission.
"""
import
copy
import
logging
from
datetime
import
timedelta
from
django.utils
import
timezone
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
...
...
@@ -21,55 +19,14 @@ from openassessment.assessment.serializers import (
AssessmentSerializer
,
AssessmentFeedbackSerializer
,
RubricSerializer
,
full_assessment_dict
,
rubric_from_dict
,
serialize_assessments
,
)
from
openassessment.assessment.errors
import
(
PeerAssessmentRequestError
,
PeerAssessmentWorkflowError
,
PeerAssessmentInternalError
)
from
submissions
import
api
as
sub_api
logger
=
logging
.
getLogger
(
"openassessment.assessment.peer_api"
)
PEER_TYPE
=
"PE"
TIME_LIMIT
=
timedelta
(
hours
=
8
)
class
PeerAssessmentError
(
Exception
):
"""Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the
Peer Assessment Workflow.
"""
pass
class
PeerAssessmentRequestError
(
PeerAssessmentError
):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
information which does not allow the request to be processed.
"""
def
__init__
(
self
,
field_errors
):
Exception
.
__init__
(
self
,
repr
(
field_errors
))
self
.
field_errors
=
copy
.
deepcopy
(
field_errors
)
class
PeerAssessmentWorkflowError
(
PeerAssessmentError
):
"""Error indicating a step in the workflow cannot be completed,
Raised when the action taken cannot be completed in the workflow. This can
occur based on parameters specific to the Submission, User, or Peer Scorers.
"""
pass
class
PeerAssessmentInternalError
(
PeerAssessmentError
):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
the actions or parameters given to the API.
"""
pass
def
submitter_is_finished
(
submission_uuid
,
requirements
):
...
...
@@ -77,7 +34,7 @@ def submitter_is_finished(submission_uuid, requirements):
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
if
workflow
.
completed_at
is
not
None
:
return
True
elif
_num_peers_graded
(
workflow
)
>=
requirements
[
"must_grade"
]:
elif
workflow
.
num_peers_graded
(
)
>=
requirements
[
"must_grade"
]:
workflow
.
completed_at
=
timezone
.
now
()
workflow
.
save
()
return
True
...
...
@@ -209,7 +166,7 @@ def create_assessment(
scorer_workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
scorer_submission_uuid
)
peer_workflow_item
=
_get_latest_open_workflow_item
(
scorer_workflow
)
peer_workflow_item
=
scorer_workflow
.
get_latest_open_workflow_item
(
)
if
peer_workflow_item
is
None
:
message
=
_
(
u"There are no open assessments associated with the scorer's "
...
...
@@ -243,7 +200,7 @@ def create_assessment(
AssessmentPart
.
add_to_assessment
(
assessment
,
option_ids
,
criterion_feedback
=
criterion_feedback
)
# Close the active assessment
_close_active_assessment
(
scorer_workflow
,
peer_submission_uuid
,
assessment
,
num_required_grades
)
scorer_workflow
.
close_active_assessment
(
peer_submission_uuid
,
assessment
,
num_required_grades
)
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
scorer_workflow
)
...
...
@@ -370,11 +327,11 @@ def has_finished_required_evaluating(submission_uuid, required_assessments):
True, 3
"""
workflow
=
_get_workflow
_by_submission_uuid
(
submission_uuid
)
workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
submission_uuid
)
done
=
False
peers_graded
=
0
if
workflow
:
peers_graded
=
_num_peers_graded
(
workflow
)
peers_graded
=
workflow
.
num_peers_graded
(
)
done
=
(
peers_graded
>=
required_assessments
)
return
done
,
peers_graded
...
...
@@ -444,10 +401,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
raise
PeerAssessmentInternalError
(
error_message
)
def
get_submission_to_assess
(
submission_uuid
,
graded_by
,
over_grading
=
False
):
def
get_submission_to_assess
(
submission_uuid
,
graded_by
,
over_grading
=
False
):
"""Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student. This will
...
...
@@ -495,23 +449,23 @@ def get_submission_to_assess(
}
"""
workflow
=
_get_workflow
_by_submission_uuid
(
submission_uuid
)
workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
submission_uuid
)
if
not
workflow
:
raise
PeerAssessmentWorkflowError
(
_
(
u"A Peer Assessment Workflow does not exist for the specified "
u"student."
))
peer_submission_uuid
=
_find_active_assessments
(
workflow
)
peer_submission_uuid
=
workflow
.
find_active_assessments
(
)
# If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise, if over grading
# is turned on, get the first submission available for over grading.
if
peer_submission_uuid
is
None
:
peer_submission_uuid
=
_get_submission_for_review
(
workflow
,
graded_by
)
peer_submission_uuid
=
workflow
.
get_submission_for_review
(
graded_by
)
if
peer_submission_uuid
is
None
and
over_grading
:
peer_submission_uuid
=
_get_submission_for_over_grading
(
workflow
)
peer_submission_uuid
=
workflow
.
get_submission_for_over_grading
(
)
if
peer_submission_uuid
:
try
:
submission_data
=
sub_api
.
get_submission
(
peer_submission_uuid
)
_create_peer_workflow
_item
(
workflow
,
peer_submission_uuid
)
PeerWorkflow
.
create
_item
(
workflow
,
peer_submission_uuid
)
_log_workflow
(
peer_submission_uuid
,
workflow
,
over_grading
)
return
submission_data
except
sub_api
.
SubmissionNotFoundError
:
...
...
@@ -592,8 +546,8 @@ def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
PeerAssessmentWorkflowError: Could not find the workflow for the student.
PeerAssessmentInternalError: Could not create the peer workflow item.
"""
workflow
=
_get_workflow
_by_submission_uuid
(
scorer_submission_uuid
)
_create_peer_workflow
_item
(
workflow
,
submission_uuid
)
workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
scorer_submission_uuid
)
PeerWorkflow
.
create
_item
(
workflow
,
submission_uuid
)
def
get_assessment_feedback
(
submission_uuid
):
...
...
@@ -681,364 +635,6 @@ def set_assessment_feedback(feedback_dict):
raise
PeerAssessmentInternalError
(
msg
)
def
_get_workflow_by_submission_uuid
(
submission_uuid
):
"""Get the Peer Workflow associated with the given submission UUID.
If available, returns the Peer Workflow associated with the given
submission UUID.
Args:
submission_uuid (str): The string representation of the UUID belonging
to the associated Peer Workflow.
Returns:
workflow (PeerWorkflow): The most recent peer workflow associated with
this submission UUID.
Raises:
PeerAssessmentWorkflowError: Thrown when no workflow can be found for
the associated submission UUID. This should always exist before a
student is allow to request submissions for peer assessment.
Examples:
>>> workflow = _get_workflow_by_submission_uuid("abc123")
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
"""
try
:
return
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
except
PeerWorkflow
.
DoesNotExist
:
return
None
except
DatabaseError
:
error_message
=
_
(
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentWorkflowError
(
error_message
)
def
_create_peer_workflow_item
(
workflow
,
submission_uuid
):
"""Create a new peer workflow for a student item and submission.
Creates a unique peer workflow for a student item, associated with a
submission.
Args:
workflow (PeerWorkflow): The peer workflow associated with the scorer.
submission_uuid (str): The submission associated with this workflow.
Raises:
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_workflow_by_submission_uuid(student_item_dict)
>>> _create_peer_workflow_item(workflow, "1")
"""
try
:
peer_workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
workflow_item
,
__
=
PeerWorkflowItem
.
objects
.
get_or_create
(
scorer
=
workflow
,
author
=
peer_workflow
,
submission_uuid
=
submission_uuid
)
workflow_item
.
started_at
=
timezone
.
now
()
workflow_item
.
save
()
return
workflow_item
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}"
.
format
(
workflow
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
_find_active_assessments
(
workflow
):
"""Given a student item, return an active assessment if one is found.
Before retrieving a new submission for a peer assessor, check to see if that
assessor already has a submission out for assessment. If an unfinished
assessment is found that has not expired, return the associated submission.
TODO: If a user begins an assessment, then resubmits, this will never find
the unfinished assessment. Is this OK?
Args:
workflow (PeerWorkflow): See if there is an associated active assessment
for this PeerWorkflow.
Returns:
submission_uuid (str): The submission_uuid for the submission that the
student has open for active assessment.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_workflow_by_submission_uuid(student_item_dict)
>>> _find_active_assessments(student_item_dict)
"1"
"""
workflows
=
workflow
.
graded
.
filter
(
assessment__isnull
=
True
,
started_at__gt
=
timezone
.
now
()
-
TIME_LIMIT
)
return
workflows
[
0
]
.
submission_uuid
if
workflows
else
None
def
_get_submission_for_review
(
workflow
,
graded_by
,
over_grading
=
False
):
"""Get the next submission for peer assessment
Find a submission for peer assessment. This function will find the next
submission that requires assessment, excluding any submission that has been
completely graded, or is actively being reviewed by other students.
Args:
workflow (PeerWorkflow): Used to determine the next submission to get
for peer assessment. Iterates over all workflows that have the same
course_id and item_id as the student_item_dict, excluding any
workflow which has the same student_id.
Returns:
submission_uuid (str): The submission_uuid for the submission to review.
Raises:
PeerAssessmentInternalError: Raised when there is an error retrieving
the workflows or workflow items for this request.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> _find_active_assessments(student_item_dict)
"1"
"""
timeout
=
(
timezone
.
now
()
-
TIME_LIMIT
)
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
# The follow query behaves as the Peer Assessment Queue. This will
# find the next submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Does not have enough completed assessments
# 3) Is not something you have already scored.
# 4) Does not have a combination of completed assessments or open
# assessments equal to or more than the requirement.
try
:
peer_workflows
=
list
(
PeerWorkflow
.
objects
.
raw
(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where pw.item_id=
%
s "
"and pw.course_id=
%
s "
"and pw.student_id<>
%
s "
"and pw.grading_completed_at is NULL "
"and pw.id not in ("
" select pwi.author_id "
" from assessment_peerworkflowitem pwi "
" where pwi.scorer_id=
%
s "
" and pwi.assessment_id is not NULL "
") "
"and ("
" select count(pwi.id) as c "
" from assessment_peerworkflowitem pwi "
" where pwi.author_id=pw.id "
" and (pwi.assessment_id is not NULL or pwi.started_at >
%
s) "
") <
%
s "
"order by pw.created_at, pw.id "
"limit 1; "
,
[
workflow
.
item_id
,
workflow
.
course_id
,
workflow
.
student_id
,
workflow
.
id
,
timeout
,
graded_by
]
))
if
not
peer_workflows
:
return
None
return
peer_workflows
[
0
]
.
submission_uuid
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while retrieving a peer submission "
u"for student {}"
.
format
(
workflow
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
_get_submission_for_over_grading
(
workflow
):
"""Retrieve the next submission uuid for over grading
Gets the next submission uuid for over grading in peer assessment.
"""
# The follow query behaves as the Peer Assessment Over Grading Queue. This
# will find a random submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Is not something you have already scored
try
:
query
=
list
(
PeerWorkflow
.
objects
.
raw
(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where course_id=
%
s "
"and item_id=
%
s "
"and student_id<>
%
s "
"and pw.id not in ( "
"select pwi.author_id "
"from assessment_peerworkflowitem pwi "
"where pwi.scorer_id=
%
s); "
,
[
workflow
.
course_id
,
workflow
.
item_id
,
workflow
.
student_id
,
workflow
.
id
]
))
workflow_count
=
len
(
query
)
if
workflow_count
<
1
:
return
None
random_int
=
random
.
randint
(
0
,
workflow_count
-
1
)
random_workflow
=
query
[
random_int
]
return
random_workflow
.
submission_uuid
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while retrieving a peer submission "
u"for student {}"
.
format
(
workflow
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
_get_latest_open_workflow_item
(
workflow
):
"""Gets the latest open workflow item for a given workflow.
If there is an open workflow item for the given workflow, return this item.
Args:
workflow (PeerWorkflow): The scorer's workflow.
Returns:
A PeerWorkflowItem that is open for assessment. None if no item is
found.
Examples:
>>> workflow = _get_workflow_by_submission_uuid("abc123")
>>> _get_latest_open_workflow_item(workflow)
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
"""
workflow_query
=
workflow
.
graded
.
filter
(
assessment__isnull
=
True
)
.
order_by
(
"-started_at"
,
"-id"
)
items
=
list
(
workflow_query
[:
1
])
return
items
[
0
]
if
items
else
None
def
_close_active_assessment
(
workflow
,
submission_uuid
,
assessment
,
num_required_grades
):
"""Associate the work item with a complete assessment.
Updates a workflow item on the student's workflow with the associated
assessment. When a workflow item has an assessment, it is considered
finished.
Args:
workflow (PeerWorkflow): The scorer's workflow
submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Examples:
>>> workflow = _get_workflow_by_submission_uuid("abc123")
>>> assessment = Assessment.objects.all()[0]
>>> _close_active_assessment(workflow, "1", assessment, 3)
"""
try
:
item_query
=
workflow
.
graded
.
filter
(
submission_uuid
=
submission_uuid
)
.
order_by
(
"-started_at"
,
"-id"
)
items
=
list
(
item_query
[:
1
])
if
not
items
:
raise
PeerAssessmentWorkflowError
(
_
(
u"No open assessment was found for student {} while assessing "
u"submission UUID {}."
.
format
(
workflow
.
student_id
,
submission_uuid
)
))
item
=
items
[
0
]
item
.
assessment
=
assessment
item
.
save
()
if
(
not
item
.
author
.
grading_completed_at
and
item
.
author
.
graded_by
.
filter
(
assessment__isnull
=
False
)
.
count
()
>=
num_required_grades
):
item
.
author
.
grading_completed_at
=
timezone
.
now
()
item
.
author
.
save
()
except
(
DatabaseError
,
PeerWorkflowItem
.
DoesNotExist
):
error_message
=
_
(
u"An internal error occurred while retrieving a workflow item for "
u"student {}. Workflow Items are created when submissions are "
u"pulled for assessment."
.
format
(
workflow
.
student_id
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentWorkflowError
(
error_message
)
def
_num_peers_graded
(
workflow
):
"""Returns the number of peers the student owning the workflow has graded.
Determines if the student has graded enough peers.
Args:
workflow (PeerWorkflow): The workflow associated with the current
student.
Returns:
True if the student is done peer assessing, False if not.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_workflow_by_submission_uuid(student_item_dict)
>>> _num_peers_graded(workflow, 3)
True
"""
return
workflow
.
graded
.
filter
(
assessment__isnull
=
False
)
.
count
()
def
_log_assessment
(
assessment
,
scorer_workflow
):
"""
Log the creation of a peer assessment.
...
...
apps/openassessment/assessment/self_api.py
View file @
e7126bfd
...
...
@@ -14,6 +14,9 @@ from openassessment.assessment.serializers import (
from
openassessment.assessment.models
import
(
Assessment
,
AssessmentPart
,
InvalidOptionSelection
)
from
openassessment.assessment.errors
import
(
SelfAssessmentRequestError
,
SelfAssessmentInternalError
)
# Assessments are tagged as "self-evaluation"
...
...
@@ -22,30 +25,6 @@ SELF_TYPE = "SE"
logger
=
logging
.
getLogger
(
"openassessment.assessment.self_api"
)
class
SelfAssessmentError
(
Exception
):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class
SelfAssessmentRequestError
(
SelfAssessmentError
):
"""
There was a problem with the request for a self-assessment.
"""
pass
class
SelfAssessmentInternalError
(
SelfAssessmentError
):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
def
create_assessment
(
submission_uuid
,
user_id
,
options_selected
,
rubric_dict
,
scored_at
=
None
):
"""
Create a self-assessment for a submission.
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
e7126bfd
...
...
@@ -796,20 +796,20 @@ class TestPeerApi(CacheResetTest):
xander_answer
,
_
=
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
# Check for a workflow for Buffy.
buffy_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
buffy_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
self
.
assertIsNotNone
(
buffy_workflow
)
# Check to see if Buffy is actively reviewing Xander's submission.
# She isn't so we should get back no uuid.
submission_uuid
=
peer_api
.
_find_active_assessments
(
buffy_workflow
)
submission_uuid
=
buffy_workflow
.
find_active_assessments
(
)
self
.
assertIsNone
(
submission_uuid
)
# Buffy is going to review Xander's submission, so create a workflow
# item for Buffy.
peer_api
.
_create_peer_workflow
_item
(
buffy_workflow
,
xander_answer
[
"uuid"
])
PeerWorkflow
.
create
_item
(
buffy_workflow
,
xander_answer
[
"uuid"
])
# Check to see if Buffy is still actively reviewing Xander's submission.
submission_uuid
=
peer_api
.
_find_active_assessments
(
buffy_workflow
)
submission_uuid
=
buffy_workflow
.
find_active_assessments
(
)
self
.
assertEqual
(
xander_answer
[
"uuid"
],
submission_uuid
)
def
test_get_workflow_by_uuid
(
self
):
...
...
@@ -818,7 +818,7 @@ class TestPeerApi(CacheResetTest):
self
.
_create_student_and_submission
(
"Willow"
,
"Willow's answer"
)
buffy_answer_two
,
_
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
buffy_answer_two
[
'uuid'
])
workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
buffy_answer_two
[
'uuid'
])
self
.
assertNotEqual
(
buffy_answer
[
"uuid"
],
workflow
.
submission_uuid
)
self
.
assertEqual
(
buffy_answer_two
[
"uuid"
],
workflow
.
submission_uuid
)
...
...
@@ -827,10 +827,10 @@ class TestPeerApi(CacheResetTest):
xander_answer
,
_
=
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
self
.
_create_student_and_submission
(
"Willow"
,
"Willow's answer"
)
buffy_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
buffy_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
# Get the next submission for review
submission_uuid
=
peer_api
.
_get_submission_for_review
(
buffy_workflow
,
3
)
submission_uuid
=
buffy_workflow
.
get_submission_for_review
(
3
)
self
.
assertEqual
(
xander_answer
[
"uuid"
],
submission_uuid
)
def
test_get_submission_for_over_grading
(
self
):
...
...
@@ -838,19 +838,19 @@ class TestPeerApi(CacheResetTest):
xander_answer
,
_
=
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
willow_answer
,
_
=
self
.
_create_student_and_submission
(
"Willow"
,
"Willow's answer"
)
buffy_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
xander_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
xander_answer
[
'uuid'
])
willow_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
willow_answer
[
'uuid'
])
buffy_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
xander_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
xander_answer
[
'uuid'
])
willow_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
willow_answer
[
'uuid'
])
# Get a bunch of workflow items opened up.
peer_api
.
_create_peer_workflow
_item
(
buffy_workflow
,
xander_answer
[
"uuid"
])
peer_api
.
_create_peer_workflow
_item
(
willow_workflow
,
xander_answer
[
"uuid"
])
peer_api
.
_create_peer_workflow
_item
(
xander_workflow
,
xander_answer
[
"uuid"
])
peer_api
.
_create_peer_workflow
_item
(
buffy_workflow
,
willow_answer
[
"uuid"
])
peer_api
.
_create_peer_workflow
_item
(
xander_workflow
,
willow_answer
[
"uuid"
])
PeerWorkflow
.
create
_item
(
buffy_workflow
,
xander_answer
[
"uuid"
])
PeerWorkflow
.
create
_item
(
willow_workflow
,
xander_answer
[
"uuid"
])
PeerWorkflow
.
create
_item
(
xander_workflow
,
xander_answer
[
"uuid"
])
PeerWorkflow
.
create
_item
(
buffy_workflow
,
willow_answer
[
"uuid"
])
PeerWorkflow
.
create
_item
(
xander_workflow
,
willow_answer
[
"uuid"
])
# Get the next submission for review
submission_uuid
=
peer_api
.
_get_submission_for_over_grading
(
xander_workflow
)
submission_uuid
=
xander_workflow
.
get_submission_for_over_grading
(
)
if
not
(
buffy_answer
[
"uuid"
]
==
submission_uuid
or
willow_answer
[
"uuid"
]
==
submission_uuid
):
self
.
fail
(
"Submission was not Buffy or Willow's."
)
...
...
@@ -912,7 +912,7 @@ class TestPeerApi(CacheResetTest):
xander_answer
,
_
=
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
# Create a workflow for Buffy.
buffy_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
buffy_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
buffy_answer
[
'uuid'
])
# Get a workflow item opened up.
submission
=
peer_api
.
get_submission_to_assess
(
buffy_answer
[
'uuid'
],
3
)
...
...
@@ -930,7 +930,7 @@ class TestPeerApi(CacheResetTest):
assessment
=
Assessment
.
objects
.
filter
(
scorer_id
=
assessment_dict
[
"scorer_id"
],
scored_at
=
assessment_dict
[
"scored_at"
])[
0
]
peer_api
.
_close_active_assessment
(
buffy_workflow
,
xander_answer
[
"uuid"
],
assessment
,
REQUIRED_GRADED_BY
)
buffy_workflow
.
close_active_assessment
(
xander_answer
[
"uuid"
],
assessment
,
REQUIRED_GRADED_BY
)
item
=
PeerWorkflowItem
.
objects
.
get
(
submission_uuid
=
xander_answer
[
'uuid'
])
self
.
assertEqual
(
xander_answer
[
"uuid"
],
submission
[
"uuid"
])
...
...
@@ -940,9 +940,9 @@ class TestPeerApi(CacheResetTest):
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_failure_to_get_review_submission
(
self
,
mock_filter
):
tim_answer
,
_
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
tim_workflow
=
peer_api
.
_get_workflow
_by_submission_uuid
(
tim_answer
[
'uuid'
])
tim_workflow
=
PeerWorkflow
.
get
_by_submission_uuid
(
tim_answer
[
'uuid'
])
mock_filter
.
side_effect
=
DatabaseError
(
"Oh no."
)
peer_api
.
_get_submission_for_review
(
tim_workflow
,
3
)
tim_workflow
.
get_submission_for_review
(
3
)
@patch.object
(
AssessmentFeedback
.
objects
,
'get'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
...
...
@@ -986,7 +986,7 @@ class TestPeerApi(CacheResetTest):
def
test_failure_to_get_latest_workflow
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Oh no."
)
tim_answer
,
_
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
peer_api
.
_get_workflow
_by_submission_uuid
(
tim_answer
[
'uuid'
])
PeerWorkflow
.
get
_by_submission_uuid
(
tim_answer
[
'uuid'
])
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
...
...
@@ -994,12 +994,12 @@ class TestPeerApi(CacheResetTest):
mock_filter
.
side_effect
=
DatabaseError
(
"Oh no."
)
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
@patch.object
(
PeerWorkflow
.
objects
,
'get'
)
@patch.object
(
PeerWorkflow
.
objects
,
'get
_or_create
'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_create_workflow_item_error
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Oh no."
)
tim_answer
,
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
peer_api
.
_create_peer_workflow
_item
(
tim
,
tim_answer
[
'uuid'
])
PeerWorkflow
.
create
_item
(
tim
,
tim_answer
[
'uuid'
])
def
test_get_submission_to_evaluate
(
self
):
submission
,
__
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
...
...
apps/openassessment/assessment/test/test_self.py
View file @
e7126bfd
...
...
@@ -9,8 +9,9 @@ import pytz
from
openassessment.test_utils
import
CacheResetTest
from
submissions.api
import
create_submission
from
openassessment.assessment.self_api
import
(
create_assessment
,
submitter_is_finished
,
SelfAssessmentRequestError
,
get_assessment
create_assessment
,
submitter_is_finished
,
get_assessment
)
from
openassessment.assessment.errors
import
SelfAssessmentRequestError
class
TestSelfApi
(
CacheResetTest
):
...
...
apps/openassessment/workflow/api.py
View file @
e7126bfd
...
...
@@ -8,6 +8,7 @@ import logging
from
django.db
import
DatabaseError
from
openassessment.assessment
import
peer_api
from
openassessment.assessment.errors
import
PeerAssessmentError
from
submissions
import
api
as
sub_api
from
.models
import
AssessmentWorkflow
,
AssessmentWorkflowStep
from
.serializers
import
AssessmentWorkflowSerializer
...
...
@@ -131,7 +132,7 @@ def create_workflow(submission_uuid, steps):
if
steps
[
0
]
==
"peer"
:
try
:
peer_api
.
create_peer_workflow
(
submission_uuid
)
except
peer_api
.
PeerAssessmentError
as
err
:
except
PeerAssessmentError
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
logger
.
exception
(
err_msg
)
raise
AssessmentWorkflowInternalError
(
err_msg
)
...
...
apps/openassessment/xblock/grade_mixin.py
View file @
e7126bfd
...
...
@@ -9,6 +9,7 @@ from xblock.core import XBlock
from
openassessment.assessment
import
peer_api
from
openassessment.assessment
import
self_api
from
openassessment.assessment.errors
import
SelfAssessmentError
,
PeerAssessmentError
from
submissions
import
api
as
sub_api
...
...
@@ -55,7 +56,7 @@ class GradeMixin(object):
path
=
'openassessmentblock/grade/oa_grade_not_started.html'
else
:
# status is 'self' or 'peer', which implies that the workflow is incomplete
path
,
context
=
self
.
render_grade_incomplete
(
workflow
)
except
(
sub_api
.
SubmissionError
,
peer_api
.
PeerAssessmentError
,
self_api
.
SelfAssessmentReques
tError
):
except
(
sub_api
.
SubmissionError
,
PeerAssessmentError
,
SelfAssessmen
tError
):
return
self
.
render_error
(
_
(
u"An unexpected error occurred."
))
else
:
return
self
.
render_assessment
(
path
,
context
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment