Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
b42fd746
Commit
b42fd746
authored
Mar 13, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #130 from edx/sanchez/fix-has-finished-eval
Fixing the count problem on peer api, completed assessments.
parents
f9ce3e6c
a3f86171
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
24 additions
and
1 deletions
+24
-1
apps/openassessment/assessment/models.py
+4
-0
apps/openassessment/assessment/peer_api.py
+1
-1
apps/openassessment/assessment/test/test_peer.py
+19
-0
No files found.
apps/openassessment/assessment/models.py
View file @
b42fd746
...
...
@@ -417,6 +417,10 @@ class PeerWorkflowItem(models.Model):
associated workflow represents the scorer of the given submission, and the
assessment represents the completed assessment for this work item.
Assessments are represented as their ID, defaulting to -1. This is done to
optimized complex queries against PeerWorkflowItems with the Assessments
indexed, whereas a Null reference would be costly.
"""
scorer_id
=
models
.
ForeignKey
(
PeerWorkflow
,
related_name
=
'items'
)
submission_uuid
=
models
.
CharField
(
max_length
=
128
,
db_index
=
True
)
...
...
apps/openassessment/assessment/peer_api.py
View file @
b42fd746
...
...
@@ -332,7 +332,7 @@ def has_finished_required_evaluating(student_item_dict, required_assessments):
count
=
0
if
workflow
:
done
=
_check_student_done_grading
(
workflow
,
required_assessments
)
count
=
workflow
.
items
.
all
()
.
count
()
count
=
workflow
.
items
.
all
()
.
exclude
(
assessment
=-
1
)
.
count
()
return
done
,
count
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
b42fd746
...
...
@@ -155,6 +155,25 @@ class TestPeerApi(TestCase):
self
.
assertEqual
(
1
,
len
(
assessments
))
self
.
assertEqual
(
assessments
[
0
][
"scored_at"
],
MONDAY
)
def
test_has_finished_evaluation
(
self
):
"""
Verify unfinished assessments do not get counted when determining a
complete workflow.
"""
tim_sub
,
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
bob_sub
,
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
sub
=
peer_api
.
get_submission_to_assess
(
bob
,
REQUIRED_GRADED
)
self
.
assertEqual
(
sub
[
"uuid"
],
tim_sub
[
"uuid"
])
finished
,
count
=
peer_api
.
has_finished_required_evaluating
(
bob
,
1
)
self
.
assertFalse
(
finished
)
self
.
assertEqual
(
count
,
0
)
peer_api
.
create_assessment
(
sub
[
"uuid"
],
bob
[
"student_id"
],
ASSESSMENT_DICT
,
RUBRIC_DICT
)
finished
,
count
=
peer_api
.
has_finished_required_evaluating
(
bob
,
1
)
self
.
assertTrue
(
finished
)
self
.
assertEqual
(
count
,
1
)
def
test_peer_assessment_workflow
(
self
):
tim_sub
,
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
bob_sub
,
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment