Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
f4b4a5ff
Commit
f4b4a5ff
authored
Jul 01, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #471 from edx/will/assessment-score-bug
BUGFIX: Too many assessments used in score.
parents
2f360d23
b2a97208
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
95 additions
and
4 deletions
+95
-4
openassessment/assessment/api/peer.py
+13
-4
openassessment/assessment/test/test_peer.py
+82
-0
No files found.
openassessment/assessment/api/peer.py
View file @
f4b4a5ff
...
...
@@ -81,7 +81,16 @@ def assessment_is_finished(submission_uuid, requirements):
"""
if
requirements
is
None
:
return
False
return
bool
(
get_score
(
submission_uuid
,
requirements
))
workflow
=
PeerWorkflow
.
get_by_submission_uuid
(
submission_uuid
)
if
workflow
is
None
:
return
False
scored_items
=
workflow
.
graded_by
.
filter
(
assessment__submission_uuid
=
submission_uuid
,
assessment__score_type
=
PEER_TYPE
)
return
scored_items
.
count
()
>=
requirements
[
"must_be_graded_by"
]
def
on_start
(
submission_uuid
):
...
...
@@ -151,12 +160,12 @@ def get_score(submission_uuid, requirements):
if
workflow
is
None
:
return
None
#
This query will use the ordering defined by the assessment model
#
(descending scored_at, then descending id)
#
Retrieve the assessments in ascending order by score date,
#
because we want to use the *first* one(s) for the score.
items
=
workflow
.
graded_by
.
filter
(
assessment__submission_uuid
=
submission_uuid
,
assessment__score_type
=
PEER_TYPE
)
.
order_by
(
'assessment'
)
)
.
order_by
(
'
-
assessment'
)
submission_finished
=
items
.
count
()
>=
requirements
[
"must_be_graded_by"
]
if
not
submission_finished
:
...
...
openassessment/assessment/test/test_peer.py
View file @
f4b4a5ff
...
...
@@ -1183,6 +1183,88 @@ class TestPeerApi(CacheResetTest):
with
self
.
assertRaises
(
peer_api
.
PeerAssessmentWorkflowError
):
peer_api
.
get_submission_to_assess
(
"no_such_submission"
,
"scorer ID"
)
def
test_too_many_assessments_counted_in_score_bug
(
self
):
# This bug allowed a score to be calculated using more
# assessments, than the required number in the problem definition.
# For the test case, set required number of assessments to one.
required_graded_by
=
1
requirements
=
{
'must_grade'
:
1
,
'must_be_graded_by'
:
required_graded_by
}
# Create some submissions and students
bob_sub
,
bob
=
self
.
_create_student_and_submission
(
'Bob'
,
'Bob submission'
)
tim_sub
,
tim
=
self
.
_create_student_and_submission
(
'Tim'
,
'Tim submission'
)
# Bob assesses someone else, satisfying his requirements
peer_api
.
get_submission_to_assess
(
bob_sub
[
'uuid'
],
bob
[
'student_id'
])
peer_api
.
create_assessment
(
bob_sub
[
'uuid'
],
bob
[
'student_id'
],
ASSESSMENT_DICT
[
'options_selected'
],
ASSESSMENT_DICT
[
'criterion_feedback'
],
ASSESSMENT_DICT
[
'overall_feedback'
],
RUBRIC_DICT
,
required_graded_by
)
# Tim grades Bob, so now Bob has one assessment
peer_api
.
get_submission_to_assess
(
tim_sub
[
'uuid'
],
tim
[
'student_id'
])
peer_api
.
create_assessment
(
tim_sub
[
'uuid'
],
tim
[
'student_id'
],
ASSESSMENT_DICT
[
'options_selected'
],
ASSESSMENT_DICT
[
'criterion_feedback'
],
ASSESSMENT_DICT
[
'overall_feedback'
],
RUBRIC_DICT
,
required_graded_by
)
# Here, the XBlock would update the workflow,
# which would check the peer API to see if the student has
# enough assessments.
# Part of the bug was that this would call `get_score()` which
# implicitly marked peer workflow items as scored.
peer_api
.
assessment_is_finished
(
bob_sub
[
'uuid'
],
requirements
)
# Sue creates a submission
sue_sub
,
sue
=
self
.
_create_student_and_submission
(
'Sue'
,
'Sue submission'
)
# Sue grades the only person in the queue, who is Tim because Tim still needs an assessment
peer_api
.
get_submission_to_assess
(
sue_sub
[
'uuid'
],
sue
[
'student_id'
])
peer_api
.
create_assessment
(
sue_sub
[
'uuid'
],
sue
[
'student_id'
],
ASSESSMENT_DICT
[
'options_selected'
],
ASSESSMENT_DICT
[
'criterion_feedback'
],
ASSESSMENT_DICT
[
'overall_feedback'
],
RUBRIC_DICT
,
required_graded_by
)
# Sue grades the only person she hasn't graded yet (Bob)
peer_api
.
get_submission_to_assess
(
sue_sub
[
'uuid'
],
sue
[
'student_id'
])
peer_api
.
create_assessment
(
sue_sub
[
'uuid'
],
sue
[
'student_id'
],
ASSESSMENT_DICT
[
'options_selected'
],
ASSESSMENT_DICT
[
'criterion_feedback'
],
ASSESSMENT_DICT
[
'overall_feedback'
],
RUBRIC_DICT
,
required_graded_by
)
# This used to create a second assessment,
# which was the bug.
peer_api
.
get_score
(
bob_sub
[
'uuid'
],
requirements
)
# Get the assessments used to generate the score
# Only the first assessment should be used
scored_assessments
=
peer_api
.
get_assessments
(
bob_sub
[
'uuid'
],
scored_only
=
True
)
self
.
assertEqual
(
len
(
scored_assessments
),
1
)
self
.
assertEqual
(
scored_assessments
[
0
][
'scorer_id'
],
tim
[
'student_id'
])
@staticmethod
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
new_student_item
=
STUDENT_ITEM
.
copy
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment