Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
47490d5d
Commit
47490d5d
authored
Jan 29, 2016
by
Eric Fischer
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #860 from edx/efischer/get_peer_scores
Always calculate peer median
parents
76e560e8
bd54c648
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
148 additions
and
50 deletions
+148
-50
openassessment/xblock/grade_mixin.py
+8
-8
openassessment/xblock/test/base.py
+60
-42
openassessment/xblock/test/test_grade.py
+80
-0
No files found.
openassessment/xblock/grade_mixin.py
View file @
47490d5d
...
...
@@ -333,7 +333,7 @@ class GradeMixin(object):
# Fetch all the unique assessment parts
criterion_name
=
criterion
[
'name'
]
staff_assessment_part
=
_get_assessment_part
(
_
(
'Staff Grade'
),
criterion_name
,
staff_assessment
)
if
len
(
peer_assessments
)
>
0
:
if
"peer-assessment"
in
assessment_steps
:
peer_assessment_part
=
{
'title'
:
_
(
'Peer Median Grade'
),
'criterion'
:
criterion
,
...
...
@@ -347,11 +347,6 @@ class GradeMixin(object):
for
index
,
peer_assessment
in
enumerate
(
peer_assessments
)
],
}
elif
"peer-assessment"
in
assessment_steps
:
peer_assessment_part
=
{
'title'
:
_
(
'Peer Median Grade'
),
'option'
:
{
'label'
:
_
(
'Waiting for peer reviews'
)}
}
else
:
peer_assessment_part
=
None
example_based_assessment_part
=
_get_assessment_part
(
...
...
@@ -449,7 +444,12 @@ class GradeMixin(object):
# - the median score, and no explanation (it is too verbose to show an aggregate).
options
=
median_options
()
if
len
(
options
)
==
0
:
return
None
# If we weren't able to get a median option when there should be one, show the following message
# This happens when there are less than must_be_graded_by assessments made for the user
if
len
(
criterion
[
'options'
])
>
0
:
return
{
'label'
:
_
(
'Waiting for peer reviews'
)}
else
:
return
None
if
len
(
options
)
==
1
:
return
options
[
0
]
return
{
...
...
@@ -478,7 +478,7 @@ class GradeMixin(object):
'title'
:
_
(
'Staff Comments'
),
'feedback'
:
feedback
})
if
peer_assessments
:
if
peer_assessments
and
len
(
peer_assessments
)
>=
self
.
workflow_requirements
()[
'peer'
][
'must_be_graded_by'
]
:
individual_feedback
=
[]
for
peer_index
,
peer_assessment
in
enumerate
(
peer_assessments
):
individual_feedback
.
append
({
...
...
openassessment/xblock/test/base.py
View file @
47490d5d
...
...
@@ -329,56 +329,74 @@ class SubmitAssessmentsMixin(object):
student_id
=
student_item
[
'student_id'
]
submission
=
xblock
.
create_submission
(
student_item
,
submission_text
)
# Create submissions and assessments from other users
scorer_submissions
=
[]
for
scorer_name
,
assessment
in
zip
(
peers
,
peer_assessments
):
# Create a submission for each scorer for the same problem
scorer
=
copy
.
deepcopy
(
student_item
)
scorer
[
'student_id'
]
=
scorer_name
scorer_sub
=
submissions_api
.
create_submission
(
scorer
,
{
'text'
:
submission_text
})
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
],
self
.
STEPS
)
submission
=
peer_api
.
get_submission_to_assess
(
scorer_sub
[
'uuid'
],
len
(
peers
))
# Store the scorer's submission so our user can assess it later
scorer_submissions
.
append
(
scorer_sub
)
# Create an assessment of the user's submission
if
len
(
peers
)
>
0
:
# Create submissions and (optionally) assessments from other users
must_be_graded_by
=
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
scorer_subs
=
self
.
create_peer_submissions
(
student_item
,
peers
,
submission_text
)
if
not
waiting_for_peer
:
peer_api
.
create_assessment
(
scorer_sub
[
'uuid'
],
scorer_name
,
assessment
[
'options_selected'
],
assessment
[
'criterion_feedback'
],
assessment
[
'overall_feedback'
],
{
'criteria'
:
xblock
.
rubric_criteria
},
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
for
scorer_sub
,
scorer_name
,
assessment
in
zip
(
scorer_subs
,
peers
,
peer_assessments
):
self
.
create_peer_assessment
(
scorer_sub
,
scorer_name
,
submission
,
assessment
,
xblock
.
rubric_criteria
,
must_be_graded_by
)
# Have our user make assessments (so she can get a score)
for
i
,
assessment
in
enumerate
(
peer_assessments
):
self
.
create_peer_assessment
(
submission
,
student_id
,
scorer_subs
[
i
],
assessment
,
xblock
.
rubric_criteria
,
must_be_graded_by
)
# Have our user make assessments (so she can get a score)
for
assessment
in
peer_assessments
:
peer_api
.
get_submission_to_assess
(
submission
[
'uuid'
],
len
(
peers
))
peer_api
.
create_assessment
(
submission
[
'uuid'
],
student_id
,
assessment
[
'options_selected'
],
assessment
[
'criterion_feedback'
],
assessment
[
'overall_feedback'
],
{
'criteria'
:
xblock
.
rubric_criteria
},
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
)
# Have the user submit a self-assessment (so she can get a score)
if
self_assessment
is
not
None
:
self_api
.
create_assessment
(
submission
[
'uuid'
],
student_id
,
self_assessment
[
'options_selected'
],
self_assessment
[
'criterion_feedback'
],
self_assessment
[
'overall_feedback'
],
{
'criteria'
:
xblock
.
rubric_criteria
}
)
self
.
create_self_assessment
(
submission
,
student_id
,
self_assessment
,
xblock
.
rubric_criteria
)
return
submission
def
create_peer_submissions
(
self
,
student_item
,
peer_names
,
submission_text
):
"""Create len(peer_names) submissions, and return them."""
returned_subs
=
[]
for
peer
in
peer_names
:
scorer
=
copy
.
deepcopy
(
student_item
)
scorer
[
'student_id'
]
=
peer
scorer_sub
=
submissions_api
.
create_submission
(
scorer
,
{
'text'
:
submission_text
})
returned_subs
.
append
(
scorer_sub
)
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
],
self
.
STEPS
)
return
returned_subs
def
create_peer_assessment
(
self
,
scorer_sub
,
scorer
,
sub_to_assess
,
assessment
,
criteria
,
grading_requirements
):
"""Create a peer assessment of submission sub_to_assess by scorer."""
peer_api
.
create_peer_workflow_item
(
scorer_sub
[
'uuid'
],
sub_to_assess
[
'uuid'
])
peer_api
.
create_assessment
(
scorer_sub
[
'uuid'
],
scorer
,
assessment
[
'options_selected'
],
assessment
[
'criterion_feedback'
],
assessment
[
'overall_feedback'
],
{
'criteria'
:
criteria
},
grading_requirements
)
def
create_self_assessment
(
self
,
submission
,
student_id
,
assessment
,
criteria
):
"""Submit a self assessment using the information given."""
self_api
.
create_assessment
(
submission
[
'uuid'
],
student_id
,
assessment
[
'options_selected'
],
assessment
[
'criterion_feedback'
],
assessment
[
'overall_feedback'
],
{
'criteria'
:
criteria
}
)
@staticmethod
def
set_staff_access
(
xblock
):
xblock
.
xmodule_runtime
=
mock
.
Mock
(
user_is_staff
=
True
)
...
...
openassessment/xblock/test/test_grade.py
View file @
47490d5d
...
...
@@ -277,6 +277,86 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
else
:
self
.
assertIsNone
(
assessment
.
get
(
'points'
,
None
))
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Bernard'
)
def
test_peer_update_after_override
(
self
,
xblock
):
# Note that much of the logic from self.create_submission_and_assessments is duplicated here;
# this is necessary to allow us to put off the final peer submission to the right point in time
# Create a submission from the user
student_item
=
xblock
.
get_student_item_dict
()
student_id
=
student_item
[
'student_id'
]
submission
=
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Create submissions from other users
scorer_subs
=
self
.
create_peer_submissions
(
student_item
,
self
.
PEERS
,
self
.
SUBMISSION
)
# Create all but the last peer assessment of the current user; no peer grade will be available
graded_by
=
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
for
scorer_sub
,
scorer_name
,
assessment
in
zip
(
scorer_subs
,
self
.
PEERS
,
PEER_ASSESSMENTS
)[:
-
1
]:
self
.
create_peer_assessment
(
scorer_sub
,
scorer_name
,
submission
,
assessment
,
xblock
.
rubric_criteria
,
graded_by
)
# Have our user make assessments
for
i
,
assessment
in
enumerate
(
PEER_ASSESSMENTS
):
self
.
create_peer_assessment
(
submission
,
student_id
,
scorer_subs
[
i
],
assessment
,
xblock
.
rubric_criteria
,
graded_by
)
# Have the user submit a self-assessment
self
.
create_self_assessment
(
submission
,
student_id
,
SELF_ASSESSMENT
,
xblock
.
rubric_criteria
)
# Submit a staff assessment
self
.
submit_staff_assessment
(
xblock
,
submission
,
assessment
=
STAFF_GOOD_ASSESSMENT
)
# Get the grade details
def
peer_data
():
"""We'll need to do this more than once, so it's defined in a local function for later reference"""
workflow_info
=
xblock
.
get_workflow_info
()
_
,
context
=
xblock
.
render_grade_complete
(
workflow_info
)
grade_details
=
context
[
'grade_details'
]
feedback_num
=
sum
(
1
for
item
in
grade_details
[
'additional_feedback'
]
if
item
[
'title'
]
.
startswith
(
'Peer'
))
return
[
next
(
assessment
[
'option'
]
for
assessment
in
criterion
[
'assessments'
]
if
assessment
[
'title'
]
==
u'Peer Median Grade'
)
for
criterion
in
grade_details
[
'criteria'
]
],
feedback_num
peer_scores
,
peer_feedback_num
=
peer_data
()
# Verify that no peer score is shown, and comments are being suppressed
self
.
assertTrue
(
all
([
option
[
'label'
]
==
u'Waiting for peer reviews'
for
option
in
peer_scores
]))
self
.
assertEqual
(
peer_feedback_num
,
0
)
# Submit final peer grade
self
.
create_peer_assessment
(
scorer_subs
[
-
1
],
self
.
PEERS
[
-
1
],
submission
,
PEER_ASSESSMENTS
[
-
1
],
xblock
.
rubric_criteria
,
graded_by
)
# Get grade information again, it should be updated
updated_peer_scores
,
updated_peer_feedback_num
=
peer_data
()
# Verify that points and feedback are present now that enough peers have graded
self
.
assertTrue
(
all
([
option
.
get
(
'points'
,
None
)
is
not
None
for
option
in
updated_peer_scores
]))
self
.
assertGreater
(
updated_peer_feedback_num
,
0
)
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Bob'
)
def
test_assessment_does_not_match_rubric
(
self
,
xblock
):
# Get to the grade complete section
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment