Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
acb51e34
Commit
acb51e34
authored
Mar 31, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Updating over grading query to be more performant, and select a random submission
parent
84dd9255
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
67 additions
and
59 deletions
+67
-59
apps/openassessment/assessment/peer_api.py
+33
-43
apps/openassessment/assessment/test/test_peer.py
+3
-1
apps/openassessment/xblock/peer_assessment_mixin.py
+4
-6
apps/openassessment/xblock/test/test_peer.py
+27
-9
No files found.
apps/openassessment/assessment/peer_api.py
View file @
acb51e34
...
...
@@ -11,6 +11,8 @@ from django.utils import timezone
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
from
dogapi
import
dog_stats_api
from
django.db.models
import
Q
import
random
from
openassessment.assessment.models
import
(
Assessment
,
AssessmentFeedback
,
AssessmentPart
,
...
...
@@ -140,7 +142,7 @@ def create_assessment(
scorer_id
,
assessment_dict
,
rubric_dict
,
graded_by
,
num_required_grades
,
scored_at
=
None
):
"""Creates an assessment on the given submission.
...
...
@@ -155,9 +157,10 @@ def create_assessment(
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
graded_by (int): The required number of assessments a submission
requires before it is completed. If this number of assessments is
reached, the grading_completed_at timestamp is set for the Workflow.
num_required_grades (int): The required number of assessments a
submission requires before it is completed. If this number of
assessments is reached, the grading_completed_at timestamp is set
for the Workflow.
Kwargs:
scored_at (datetime): Optional argument to override the time in which
...
...
@@ -233,7 +236,7 @@ def create_assessment(
"assessment cannot be submitted unless the associated "
"submission came from the peer workflow."
))
# Close the active assessment
_close_active_assessment
(
scorer_workflow
,
submission_uuid
,
assessment
,
graded_by
)
_close_active_assessment
(
scorer_workflow
,
submission_uuid
,
assessment
,
num_required_grades
)
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
student_item
,
scorer_item
)
...
...
@@ -905,53 +908,34 @@ def _get_submission_for_over_grading(workflow):
"""Retrieve the next submission uuid for over grading
Gets the next submission uuid for over grading in peer assessment.
Specifically, this will construct a query that:
1) selects all the peer workflows for the current course and item,
excluding the current student
2) checks all the assessments associated with those workflows, excluding
the current student's assessments, and any workflows connected to them.
3) checks to see if any unfinished assessments are expired
4) Groups all the workflows with their collective assessments
5) Orders them but their total assessments
6) Returns the workflow with the fewest assessments.
"""
# The follow query behaves as the Peer Assessment Over Grading Queue. This
# will find
the next
submission (via PeerWorkflow) in this course / question
# will find
a random
submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Is not something you have already scored
# 3) Has the fewest current assessments.
try
:
peer_workflows
=
list
(
PeerWorkflow
.
objects
.
raw
(
"select pw.id, pw.submission_uuid, ("
" select count(pwi.id) as c "
" from assessment_peerworkflowitem pwi "
" where pwi.author_id=pw.id "
") as c "
query
=
list
(
PeerWorkflow
.
objects
.
raw
(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where pw.item_id=
%
s "
"and pw.course_id=
%
s "
"and pw.student_id<>
%
s "
"and pw.id not in ("
" select pwi.author_id "
" from assessment_peerworkflowitem pwi "
" where pwi.scorer_id=
%
s "
") "
"order by c, pw.created_at, pw.id "
"limit 1; "
,
[
workflow
.
item_id
,
workflow
.
course_id
,
workflow
.
student_id
,
workflow
.
id
]
"where course_id=
%
s "
"and item_id=
%
s "
"and student_id<>
%
s "
"and pw.id not in ( "
"select pwi.author_id "
"from assessment_peerworkflowitem pwi "
"where pwi.scorer_id=
%
s); "
,
[
workflow
.
course_id
,
workflow
.
item_id
,
workflow
.
student_id
,
workflow
.
id
]
))
if
not
peer_workflows
:
workflow_count
=
len
(
query
)
if
workflow_count
<
1
:
return
None
return
peer_workflows
[
0
]
.
submission_uuid
random_int
=
random
.
randint
(
0
,
workflow_count
-
1
)
random_workflow
=
query
[
random_int
]
return
random_workflow
.
submission_uuid
except
DatabaseError
:
error_message
=
_
(
u"An internal error occurred while retrieving a peer submission "
...
...
@@ -961,7 +945,12 @@ def _get_submission_for_over_grading(workflow):
raise
PeerAssessmentInternalError
(
error_message
)
def
_close_active_assessment
(
workflow
,
submission_uuid
,
assessment
,
graded_by
):
def
_close_active_assessment
(
workflow
,
submission_uuid
,
assessment
,
num_required_grades
):
"""Associate the work item with a complete assessment.
Updates a workflow item on the student's workflow with the associated
...
...
@@ -990,7 +979,8 @@ def _close_active_assessment(workflow, submission_uuid, assessment, graded_by):
try
:
item
=
workflow
.
graded
.
get
(
submission_uuid
=
submission_uuid
)
item
.
assessment
=
assessment
if
item
.
author
.
graded_by
.
all
()
.
count
()
>=
graded_by
:
if
(
not
item
.
author
.
grading_completed_at
and
item
.
author
.
graded_by
.
all
()
.
count
()
>=
num_required_grades
):
item
.
author
.
grading_completed_at
=
timezone
.
now
()
item
.
author
.
save
()
item
.
save
()
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
acb51e34
...
...
@@ -530,7 +530,9 @@ class TestPeerApi(CacheResetTest):
#Get the next submission for review
submission_uuid
=
peer_api
.
_get_submission_for_over_grading
(
xander_workflow
)
self
.
assertEqual
(
buffy_answer
[
"uuid"
],
submission_uuid
)
if
not
(
buffy_answer
[
"uuid"
]
==
submission_uuid
or
willow_answer
[
"uuid"
]
==
submission_uuid
):
self
.
fail
(
"Submission was not Buffy or Willow's."
)
def
test_create_assessment_feedback
(
self
):
tim_sub
,
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
...
...
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
acb51e34
...
...
@@ -184,14 +184,14 @@ class PeerAssessmentMixin(object):
context_dict
[
"peer_start"
]
=
self
.
format_datetime_string
(
date
)
path
=
'openassessmentblock/peer/oa_peer_unavailable.html'
elif
workflow
.
get
(
"status"
)
==
"peer"
:
peer_sub
=
self
.
get_peer_submission
(
student_item
,
assessment
,
submissions_closed
)
peer_sub
=
self
.
get_peer_submission
(
student_item
,
assessment
)
if
peer_sub
:
path
=
'openassessmentblock/peer/oa_peer_assessment.html'
context_dict
[
"peer_submission"
]
=
peer_sub
else
:
path
=
'openassessmentblock/peer/oa_peer_waiting.html'
elif
continue_grading
and
student_item
:
peer_sub
=
self
.
get_peer_submission
(
student_item
,
assessment
,
continue_grading
)
peer_sub
=
self
.
get_peer_submission
(
student_item
,
assessment
)
if
peer_sub
:
path
=
'openassessmentblock/peer/oa_peer_turbo_mode.html'
context_dict
[
"peer_submission"
]
=
peer_sub
...
...
@@ -205,16 +205,14 @@ class PeerAssessmentMixin(object):
def
get_peer_submission
(
self
,
student_item_dict
,
assessment
,
over_grading
assessment
):
submissions_closed
,
__
,
__
=
self
.
is_closed
(
step
=
"submission"
)
peer_submission
=
False
try
:
peer_submission
=
peer_api
.
get_submission_to_assess
(
student_item_dict
,
assessment
[
"must_be_graded_by"
],
over_grading
True
)
self
.
runtime
.
publish
(
self
,
...
...
apps/openassessment/xblock/test/test_peer.py
View file @
acb51e34
...
...
@@ -60,7 +60,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
1
)
# If Over Grading is on, this should now return Sally's response to Bob.
# If Over Grading is on, this should now return Sally or Hal's response
# to Bob.
submission
=
xblock
.
create_submission
(
student_item
,
u"Bob's answer"
)
workflow_info
=
xblock
.
get_workflow_info
()
self
.
assertEqual
(
workflow_info
[
"status"
],
u'peer'
)
...
...
@@ -73,7 +74,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
#Validate Peer Rendering.
self
.
assertIn
(
"Sally"
.
encode
(
'utf-8'
),
peer_response
.
body
)
self
.
assertTrue
(
"Sally"
.
encode
(
'utf-8'
)
in
peer_response
.
body
or
"Hal"
.
encode
(
'utf-8'
)
in
peer_response
.
body
)
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_assess_handler
(
self
,
xblock
):
...
...
@@ -212,10 +214,19 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self
.
assertIsNotNone
(
peer_response
)
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
hal_response
=
"Hal"
.
encode
(
'utf-8'
)
in
peer_response
.
body
sally_response
=
"Sally"
.
encode
(
'utf-8'
)
in
peer_response
.
body
# Validate Peer Rendering.
self
.
assertIn
(
"Sally"
.
encode
(
'utf-8'
),
peer_response
.
body
)
if
hal_response
:
peer_uuid
=
hal_sub
[
'uuid'
]
elif
sally_response
:
peer_uuid
=
sally_sub
[
'uuid'
]
else
:
self
.
fail
(
"Response was neither Hal or Sally's submission."
)
peer_api
.
create_assessment
(
sally_sub
[
'uuid'
]
,
peer_uuid
,
student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
},
...
...
@@ -229,10 +240,17 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self
.
assertIsNotNone
(
peer_response
)
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
# Validate Peer Rendering.
self
.
assertIn
(
"Hal"
.
encode
(
'utf-8'
),
peer_response
.
body
)
# Validate Peer Rendering. Check that if Sally or Hal were selected
# the first time around, the other is selected this time.
if
not
hal_response
and
"Hal"
.
encode
(
'utf-8'
)
in
peer_response
.
body
:
peer_uuid
=
hal_sub
[
'uuid'
]
elif
not
sally_response
and
"Sally"
.
encode
(
'utf-8'
)
in
peer_response
.
body
:
peer_uuid
=
sally_sub
[
'uuid'
]
else
:
self
.
fail
(
"Response was neither Hal or Sally's submission."
)
peer_api
.
create_assessment
(
hal_sub
[
'uuid'
]
,
peer_uuid
,
student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
},
...
...
@@ -245,4 +263,4 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_response
=
xblock
.
render_peer_assessment
(
request
)
self
.
assertIsNotNone
(
peer_response
)
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
self
.
assertIn
(
"Congratulations"
.
encode
(
'utf-8'
),
peer_response
.
body
)
\ No newline at end of file
self
.
assertIn
(
"Complete"
.
encode
(
'utf-8'
),
peer_response
.
body
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment