Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
bd79e1c4
Commit
bd79e1c4
authored
Mar 03, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Provide feedback in peer assessment
parent
3e07e49f
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
493 additions
and
64 deletions
+493
-64
apps/openassessment/peer/api.py
+39
-29
apps/openassessment/peer/models.py
+5
-1
apps/openassessment/peer/serializers.py
+1
-0
apps/openassessment/peer/test/test_api.py
+17
-0
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+11
-2
apps/openassessment/xblock/grade_mixin.py
+9
-2
apps/openassessment/xblock/peer_assessment_mixin.py
+46
-16
apps/openassessment/xblock/static/js/spec/oa_edit.js
+2
-2
apps/openassessment/xblock/static/js/spec/oa_server.js
+47
-2
apps/openassessment/xblock/static/js/src/oa_base.js
+0
-0
apps/openassessment/xblock/static/js/src/oa_server.js
+46
-1
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
+0
-5
apps/openassessment/xblock/test/data/assessment_scenario.xml
+49
-0
apps/openassessment/xblock/test/data/basic_scenario.xml
+1
-2
apps/openassessment/xblock/test/data/grade_scenario.xml
+49
-0
apps/openassessment/xblock/test/test_grade.py
+62
-0
apps/openassessment/xblock/test/test_peer.py
+89
-0
apps/submissions/models.py
+4
-0
apps/submissions/serializers.py
+13
-1
apps/submissions/tests/test_api.py
+3
-1
No files found.
apps/openassessment/peer/api.py
View file @
bd79e1c4
...
@@ -7,6 +7,7 @@ the workflow for a given submission.
...
@@ -7,6 +7,7 @@ the workflow for a given submission.
import
copy
import
copy
import
logging
import
logging
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
from
django.db
import
DatabaseError
from
openassessment.peer.models
import
Assessment
from
openassessment.peer.models
import
Assessment
...
@@ -87,16 +88,15 @@ def create_assessment(
...
@@ -87,16 +88,15 @@ def create_assessment(
required for the student to receive a score for their submission.
required for the student to receive a score for their submission.
must_be_graded_by (int): The number of assessments
must_be_graded_by (int): The number of assessments
required on the submission for it to be scored.
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment_dict (dict): All related information for the assessment. The dictionary
assessment contains points_earned, points_possible, and feedback.
must have the following keys: "options_selected" (mapping of criterion names to option values),
and "feedback" (string of written feedback for the submission).
scored_at (datetime): Optional argument to override the time in which
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
the assessment took place. If not specified, scored_at is set to
now.
now.
Returns:
Returns:
dict: The dictionary representing the assessment. This includes the
dict: the Assessment model, serialized as a dict.
points earned, points possible, time scored, scorer id, score type,
and feedback.
Raises:
Raises:
PeerAssessmentRequestError: Raised when the submission_id is invalid, or
PeerAssessmentRequestError: Raised when the submission_id is invalid, or
...
@@ -107,33 +107,33 @@ def create_assessment(
...
@@ -107,33 +107,33 @@ def create_assessment(
Examples:
Examples:
>>> assessment_dict = dict(
>>> assessment_dict = dict(
>>> points_earned=[1, 0, 3, 2],
>>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"},
>>> points_possible=12,
>>> feedback="Your submission was thrilling.",
>>> feedback="Your submission was thrilling.",
>>> )
>>> )
>>> create_assessment("1", "Tim", assessment_dict)
>>> create_assessment("1", "Tim", 3, 2, assessment_dict, rubric_dict)
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer_id': u"Tim",
'feedback': u'Your submission was thrilling.'
}
"""
"""
try
:
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
rubric
=
rubric_from_dict
(
rubric_dict
)
rubric
=
rubric_from_dict
(
rubric_dict
)
option_ids
=
rubric
.
options_ids
(
assessment_dict
[
"options_selected"
])
option_ids
=
rubric
.
options_ids
(
assessment_dict
[
"options_selected"
])
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
if
None
in
option_ids
:
raise
PeerAssessmentRequestError
(
_
(
"Selected options do not match the rubric options."
))
feedback
=
assessment_dict
.
get
(
'feedback'
,
u''
)
peer_assessment
=
{
peer_assessment
=
{
"rubric"
:
rubric
.
id
,
"rubric"
:
rubric
.
id
,
"scorer_id"
:
scorer_id
,
"scorer_id"
:
scorer_id
,
"submission"
:
submission
.
pk
,
"submission"
:
submission
.
pk
,
"score_type"
:
PEER_TYPE
,
"score_type"
:
PEER_TYPE
,
"feedback"
:
feedback
,
"parts"
:
[{
"option"
:
option_id
}
for
option_id
in
option_ids
]
"parts"
:
[{
"option"
:
option_id
}
for
option_id
in
option_ids
]
}
}
if
scored_at
:
if
scored_at
is
not
None
:
peer_assessment
[
"scored_at"
]
=
scored_at
peer_assessment
[
"scored_at"
]
=
scored_at
peer_serializer
=
AssessmentSerializer
(
data
=
peer_assessment
)
peer_serializer
=
AssessmentSerializer
(
data
=
peer_assessment
)
...
@@ -152,23 +152,33 @@ def create_assessment(
...
@@ -152,23 +152,33 @@ def create_assessment(
)
)
# Check if the grader is finished and has enough assessments
# Check if the grader is finished and has enough assessments
scorer_item
=
StudentItem
.
objects
.
get
(
try
:
student_id
=
scorer_id
,
scorer_item
=
StudentItem
.
objects
.
get
(
item_id
=
student_item
.
item_id
,
student_id
=
scorer_id
,
course_id
=
student_item
.
course_id
,
item_id
=
student_item
.
item_id
,
item_type
=
student_item
.
item_type
course_id
=
student_item
.
course_id
,
)
item_type
=
student_item
.
item_type
)
except
StudentItem
.
DoesNotExist
:
raise
PeerAssessmentWorkflowError
(
_
(
"You must make a submission before assessing another student"
))
scorer_submissions
=
Submission
.
objects
.
filter
(
scorer_submissions
=
Submission
.
objects
.
filter
(
student_item
=
scorer_item
student_item
=
scorer_item
)
.
order_by
(
"-attempt_number"
)
)
.
order_by
(
"-attempt_number"
)
_score_if_finished
(
if
len
(
scorer_submissions
)
>
0
:
scorer_item
,
_score_if_finished
(
scorer_submissions
[
0
],
scorer_item
,
must_grade
,
scorer_submissions
[
0
],
must_be_graded_by
must_grade
,
)
must_be_graded_by
)
# Currently, this condition is unreachable, since the only way to create a StudentItem is to
# create a submission for that student. We check anyway just in case this invariant changes.
else
:
raise
PeerAssessmentWorkflowError
(
_
(
"You must make at least one submission before assessing another student"
))
return
peer_serializer
.
data
return
peer_serializer
.
data
except
DatabaseError
:
except
DatabaseError
:
...
...
apps/openassessment/peer/models.py
View file @
bd79e1c4
...
@@ -81,6 +81,9 @@ class Rubric(models.Model):
...
@@ -81,6 +81,9 @@ class Rubric(models.Model):
options_selected (dict): Mapping of criteria names to the names of
options_selected (dict): Mapping of criteria names to the names of
the option that was selected for that criterion.
the option that was selected for that criterion.
Returns:
list of option ids (set to None if the selected option does not match the rubric)
Examples:
Examples:
>>> options_selected = {"secret": "yes", "safe": "no"}
>>> options_selected = {"secret": "yes", "safe": "no"}
>>> rubric.options_ids(options_selected)
>>> rubric.options_ids(options_selected)
...
@@ -97,6 +100,8 @@ class Rubric(models.Model):
...
@@ -97,6 +100,8 @@ class Rubric(models.Model):
return
[
return
[
crit_to_all_opts
[
crit
][
opt
]
crit_to_all_opts
[
crit
][
opt
]
if
crit
in
crit_to_all_opts
and
opt
in
crit_to_all_opts
[
crit
]
else
None
for
crit
,
opt
in
options_selected
.
items
()
for
crit
,
opt
in
options_selected
.
items
()
]
]
...
@@ -186,7 +191,6 @@ class Assessment(models.Model):
...
@@ -186,7 +191,6 @@ class Assessment(models.Model):
scorer_id
=
models
.
CharField
(
max_length
=
40
,
db_index
=
True
)
scorer_id
=
models
.
CharField
(
max_length
=
40
,
db_index
=
True
)
score_type
=
models
.
CharField
(
max_length
=
2
)
score_type
=
models
.
CharField
(
max_length
=
2
)
# TODO: move this to its own model
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
class
Meta
:
class
Meta
:
...
...
apps/openassessment/peer/serializers.py
View file @
bd79e1c4
...
@@ -123,6 +123,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
...
@@ -123,6 +123,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
'scored_at'
,
'scored_at'
,
'scorer_id'
,
'scorer_id'
,
'score_type'
,
'score_type'
,
'feedback'
,
# Foreign Key
# Foreign Key
'parts'
,
'parts'
,
...
...
apps/openassessment/peer/test/test_api.py
View file @
bd79e1c4
...
@@ -122,6 +122,7 @@ class TestApi(TestCase):
...
@@ -122,6 +122,7 @@ class TestApi(TestCase):
)
)
self
.
assertEqual
(
assessment
[
"points_earned"
],
6
)
self
.
assertEqual
(
assessment
[
"points_earned"
],
6
)
self
.
assertEqual
(
assessment
[
"points_possible"
],
14
)
self
.
assertEqual
(
assessment
[
"points_possible"
],
14
)
self
.
assertEqual
(
assessment
[
"feedback"
],
ASSESSMENT_DICT
[
"feedback"
])
@file_data
(
'valid_assessments.json'
)
@file_data
(
'valid_assessments.json'
)
def
test_get_assessments
(
self
,
assessment_dict
):
def
test_get_assessments
(
self
,
assessment_dict
):
...
@@ -288,6 +289,22 @@ class TestApi(TestCase):
...
@@ -288,6 +289,22 @@ class TestApi(TestCase):
self
.
assertEqual
(
16
,
Assessment
.
get_median_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
Assessment
.
get_median_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
Assessment
.
get_median_score
([
16
,
6
,
12
,
102
,
22
,
53
,
5
]))
self
.
assertEqual
(
16
,
Assessment
.
get_median_score
([
16
,
6
,
12
,
102
,
22
,
53
,
5
]))
@raises
(
peer_api
.
PeerAssessmentWorkflowError
)
def
test_assess_before_submitting
(
self
):
# Create a submission for another student
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
# Attempt to create the assessment from another student without first making a submission
peer_api
.
create_assessment
(
submission
[
"uuid"
],
"another_student"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
RUBRIC_DICT
,
MONDAY
)
@staticmethod
@staticmethod
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
new_student_item
=
STUDENT_ITEM
.
copy
()
new_student_item
=
STUDENT_ITEM
.
copy
()
...
...
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
bd79e1c4
...
@@ -6,6 +6,16 @@
...
@@ -6,6 +6,16 @@
<span
class=
"grade__value"
>
{{ score.points_earned }}
</span>
out of
<span
class=
"grade__potential"
>
{{ score.points_possible }}
</span>
<span
class=
"grade__value"
>
{{ score.points_earned }}
</span>
out of
<span
class=
"grade__potential"
>
{{ score.points_possible }}
</span>
</div>
</div>
<!-- PLACEHOLDER: @talbs, you can overwrite this with the actual template -->
<div
class=
"openassessment__feedback"
>
{% for asmnt in assessments %}
<div
class=
"openassessment__peer_assessment_{{ forloop.counter }}"
>
<div>
Peer #{{ forloop.counter }}
</div>
<div>
Feedback: {{ asmnt.feedback }}
</div>
</div>
{% endfor %}
</div>
<div
class=
"grade__actions"
>
<div
class=
"grade__actions"
>
<ul
class=
"list list--actions"
>
<ul
class=
"list list--actions"
>
<li
class=
"list--actions__item"
>
<li
class=
"list--actions__item"
>
...
@@ -13,4 +23,4 @@
...
@@ -13,4 +23,4 @@
</li>
</li>
</ul>
</ul>
</div>
</div>
</div>
</div>
\ No newline at end of file
apps/openassessment/xblock/grade_mixin.py
View file @
bd79e1c4
from
xblock.core
import
XBlock
from
xblock.core
import
XBlock
from
openassessment.peer.api
import
get_assessments
from
submissions.api
import
get_score
from
submissions.api
import
get_score
...
@@ -23,7 +24,14 @@ class GradeMixin(object):
...
@@ -23,7 +24,14 @@ class GradeMixin(object):
student_item
=
self
.
get_student_item_dict
()
student_item
=
self
.
get_student_item_dict
()
scores
=
get_score
(
student_item
)
scores
=
get_score
(
student_item
)
if
scores
:
if
scores
:
context
=
{
"score"
:
scores
[
0
]}
context
=
{
"score"
:
scores
[
0
],
"assessments"
:
[],
}
# Look up assessment feedback
for
assessment
in
get_assessments
(
scores
[
0
][
'submission_uuid'
]):
context
[
'assessments'
]
.
append
(
assessment
)
else
:
else
:
path
=
'openassessmentblock/grade/oa_grade_waiting.html'
path
=
'openassessmentblock/grade/oa_grade_waiting.html'
elif
not
problem_open
and
date
==
"due"
:
elif
not
problem_open
and
date
==
"due"
:
...
@@ -33,4 +41,3 @@ class GradeMixin(object):
...
@@ -33,4 +41,3 @@ class GradeMixin(object):
# TODO: How do we determine which modules are incomplete?
# TODO: How do we determine which modules are incomplete?
return
self
.
render_assessment
(
path
,
context
)
return
self
.
render_assessment
(
path
,
context
)
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
bd79e1c4
import
logging
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
xblock.core
import
XBlock
from
openassessment.peer
import
api
as
peer_api
from
openassessment.peer
import
api
as
peer_api
from
openassessment.peer.api
import
PeerAssessmentWorkflowError
from
openassessment.peer.api
import
(
PeerAssessmentWorkflowError
,
PeerAssessmentRequestError
,
PeerAssessmentInternalError
)
logger
=
logging
.
getLogger
(
__name__
)
class
PeerAssessmentMixin
(
object
):
class
PeerAssessmentMixin
(
object
):
...
@@ -25,37 +33,59 @@ class PeerAssessmentMixin(object):
...
@@ -25,37 +33,59 @@ class PeerAssessmentMixin(object):
Args:
Args:
data (dict): A dictionary containing information required to create
data (dict): A dictionary containing information required to create
a new peer assessment. Expecting attributes "points_earned",
a new peer assessment. This dict should have the following attributes:
"total_value", and "submission_uuid". If these attributes are
`submission_uuid` (string): The unique identifier for the submission being assessed.
not available, a new assessment cannot be stored.
`options_selected` (dict): Dictionary mapping criterion names to option values.
`feedback` (unicode): Written feedback for the submission.
Returns:
Returns:
(tuple): A tuple containing the dictionary representation of the
Dict with keys "success" (bool) indicating success/failure.
newly created assessment, and a "Success" string
.
and "msg" (unicode) containing additional information if an error occurs
.
"""
"""
# Validate the request
if
'feedback'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide feedback in the assessment'
)}
if
'options_selected'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide options selected in the assessment'
)}
if
'submission_uuid'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide submission uuid for the assessment'
)}
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
if
assessment_ui_model
:
if
assessment_ui_model
:
rubric_dict
=
{
rubric_dict
=
{
'criteria'
:
self
.
rubric_criteria
'criteria'
:
self
.
rubric_criteria
}
}
assessment_dict
=
{
assessment_dict
=
{
"feedback"
:
"Not yet implemented."
,
"feedback"
:
data
[
'feedback'
]
,
"options_selected"
:
data
[
"options_selected"
],
"options_selected"
:
data
[
"options_selected"
],
}
}
assessment
=
peer_api
.
create_assessment
(
data
[
"submission_uuid"
],
try
:
self
.
get_student_item_dict
()[
"student_id"
],
assessment
=
peer_api
.
create_assessment
(
int
(
assessment_ui_model
[
"must_grade"
]),
data
[
"submission_uuid"
],
int
(
assessment_ui_model
[
"must_be_graded_by"
]),
self
.
get_student_item_dict
()[
"student_id"
],
assessment_dict
,
int
(
assessment_ui_model
[
"must_grade"
]),
rubric_dict
,
int
(
assessment_ui_model
[
"must_be_graded_by"
]),
)
assessment_dict
,
rubric_dict
,
)
except
PeerAssessmentRequestError
as
ex
:
return
{
'success'
:
False
,
'msg'
:
ex
.
message
}
except
PeerAssessmentInternalError
as
ex
:
logger
.
exception
()
return
{
'success'
:
False
,
'msg'
:
_
(
"Internal error occurred while creating the assessment"
)}
# Temp kludge until we fix JSON serialization for datetime
# Temp kludge until we fix JSON serialization for datetime
assessment
[
"scored_at"
]
=
str
(
assessment
[
"scored_at"
])
assessment
[
"scored_at"
]
=
str
(
assessment
[
"scored_at"
])
return
{}
return
{
'success'
:
True
,
'msg'
:
u''
}
else
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Could not load peer assessment.'
)}
@XBlock.handler
@XBlock.handler
...
...
apps/openassessment/xblock/static/js/spec/oa_edit.js
View file @
bd79e1c4
/*
/*
*
Tests for OA XBlock editing.
Tests for OA XBlock editing.
*/
*
*
/
describe
(
"OpenAssessment.StudioUI"
,
function
()
{
describe
(
"OpenAssessment.StudioUI"
,
function
()
{
...
...
apps/openassessment/xblock/static/js/spec/oa_server.js
View file @
bd79e1c4
...
@@ -51,7 +51,7 @@ describe("OpenAssessment.Server", function() {
...
@@ -51,7 +51,7 @@ describe("OpenAssessment.Server", function() {
});
});
});
});
it
(
"Sends a submission the XBlock"
,
function
()
{
it
(
"Sends a submission t
o t
he XBlock"
,
function
()
{
// Status, student ID, attempt number
// Status, student ID, attempt number
stubAjax
(
true
,
[
true
,
1
,
2
]);
stubAjax
(
true
,
[
true
,
1
,
2
]);
...
@@ -69,7 +69,28 @@ describe("OpenAssessment.Server", function() {
...
@@ -69,7 +69,28 @@ describe("OpenAssessment.Server", function() {
expect
(
$
.
ajax
).
toHaveBeenCalledWith
({
expect
(
$
.
ajax
).
toHaveBeenCalledWith
({
url
:
'/submit'
,
url
:
'/submit'
,
type
:
"POST"
,
type
:
"POST"
,
data
:
{
submission
:
"This is only a test"
}
data
:
JSON
.
stringify
({
submission
:
"This is only a test"
})
});
});
it
(
"Sends an assessment to the XBlock"
,
function
()
{
stubAjax
(
true
,
{
success
:
true
,
msg
:
''
});
var
success
=
false
;
var
options
=
{
clarity
:
"Very clear"
,
precision
:
"Somewhat precise"
}
server
.
assess
(
"abc1234"
,
options
,
"Excellent job!"
).
done
(
function
()
{
success
=
true
;
});
expect
(
success
).
toBe
(
true
);
expect
(
$
.
ajax
).
toHaveBeenCalledWith
({
url
:
'/assess'
,
type
:
"POST"
,
data
:
JSON
.
stringify
({
submission_uuid
:
"abc1234"
,
options_selected
:
options
,
feedback
:
"Excellent job!"
})
});
});
});
});
...
@@ -183,4 +204,28 @@ describe("OpenAssessment.Server", function() {
...
@@ -183,4 +204,28 @@ describe("OpenAssessment.Server", function() {
expect
(
receivedMsg
).
toEqual
(
"Test error"
);
expect
(
receivedMsg
).
toEqual
(
"Test error"
);
});
});
it
(
"informs the caller of a server error when sending an assessment"
,
function
()
{
stubAjax
(
true
,
{
success
:
false
,
msg
:
'Test error!'
});
var
receivedMsg
=
null
;
var
options
=
{
clarity
:
"Very clear"
,
precision
:
"Somewhat precise"
}
server
.
assess
(
"abc1234"
,
options
,
"Excellent job!"
).
fail
(
function
(
msg
)
{
receivedMsg
=
msg
;
});
expect
(
receivedMsg
).
toEqual
(
"Test error!"
);
});
it
(
"informs the caller of an AJAX error when sending an assessment"
,
function
()
{
stubAjax
(
false
,
null
);
var
receivedMsg
=
null
;
var
options
=
{
clarity
:
"Very clear"
,
precision
:
"Somewhat precise"
}
server
.
assess
(
"abc1234"
,
options
,
"Excellent job!"
).
fail
(
function
(
msg
)
{
receivedMsg
=
msg
;
});
expect
(
receivedMsg
).
toEqual
(
"Could not contact server."
);
});
});
});
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
bd79e1c4
This diff is collapsed.
Click to expand it.
apps/openassessment/xblock/static/js/src/oa_server.js
View file @
bd79e1c4
...
@@ -85,7 +85,7 @@ OpenAssessment.Server.prototype = {
...
@@ -85,7 +85,7 @@ OpenAssessment.Server.prototype = {
$
.
ajax
({
$
.
ajax
({
type
:
"POST"
,
type
:
"POST"
,
url
:
url
,
url
:
url
,
data
:
{
submission
:
submission
}
data
:
JSON
.
stringify
({
submission
:
submission
})
}).
done
(
function
(
data
)
{
}).
done
(
function
(
data
)
{
var
success
=
data
[
0
];
var
success
=
data
[
0
];
if
(
success
)
{
if
(
success
)
{
...
@@ -105,6 +105,51 @@ OpenAssessment.Server.prototype = {
...
@@ -105,6 +105,51 @@ OpenAssessment.Server.prototype = {
},
},
/**
/**
Send an assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission.
Returns:
A JQuery promise, which resolves with no args if successful
and fails with an error message otherise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!";
server.assess("abc123", options, feedback).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
assess
:
function
(
submissionId
,
optionsSelected
,
feedback
)
{
var
url
=
this
.
url
(
'assess'
);
var
payload
=
JSON
.
stringify
({
submission_uuid
:
submissionId
,
options_selected
:
optionsSelected
,
feedback
:
feedback
});
return
$
.
Deferred
(
function
(
defer
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
url
,
data
:
payload
}).
done
(
function
(
data
)
{
if
(
data
.
success
)
{
defer
.
resolve
();
}
else
{
defer
.
rejectWith
(
this
,
[
data
.
msg
]);
}
}
).
fail
(
function
(
data
)
{
defer
.
rejectWith
(
this
,
[
'Could not contact server.'
]);
});
}).
promise
()
},
/**
Load the XBlock's XML definition from the server.
Load the XBlock's XML definition from the server.
Returns:
Returns:
...
...
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
View file @
bd79e1c4
...
@@ -60,11 +60,6 @@
...
@@ -60,11 +60,6 @@
</criterion>
</criterion>
</rubric>
</rubric>
<assessments>
<assessments>
<assessment
name=
"self-assessment"
start=
"2014-12-20T19:00-7:00"
due=
"2014-12-21T22:22-7:00"
must_grade=
"5"
must_be_graded_by=
"3"
/>
<assessment
name=
"peer-assessment"
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00-7:00"
start=
"2014-12-20T19:00-7:00"
due=
"2014-12-21T22:22-7:00"
due=
"2014-12-21T22:22-7:00"
...
...
apps/openassessment/xblock/test/data/assessment_scenario.xml
0 → 100644
View file @
bd79e1c4
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00"
due=
"2014-12-21T22:22"
must_grade=
"5"
must_be_graded_by=
"3"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/basic_scenario.xml
View file @
bd79e1c4
...
@@ -84,11 +84,10 @@
...
@@ -84,11 +84,10 @@
</criterion>
</criterion>
</rubric>
</rubric>
<assessments>
<assessments>
<
peer-
assessment
name=
"peer-assessment"
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00"
start=
"2014-12-20T19:00"
due=
"2014-12-21T22:22"
due=
"2014-12-21T22:22"
must_grade=
"5"
must_grade=
"5"
must_be_graded_by=
"3"
/>
must_be_graded_by=
"3"
/>
<self-assessment
name=
"self-assessment"
/>
</assessments>
</assessments>
</openassessment>
</openassessment>
apps/openassessment/xblock/test/data/grade_scenario.xml
0 → 100644
View file @
bd79e1c4
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00"
due=
"2014-12-21T22:22"
must_grade=
"2"
must_be_graded_by=
"2"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/test_grade.py
0 → 100644
View file @
bd79e1c4
# -*- coding: utf-8 -*-
"""
Tests for grade handlers in Open Assessment XBlock.
"""
import
copy
import
json
from
submissions
import
api
as
submission_api
from
openassessment.peer
import
api
as
peer_api
from
.base
import
XBlockHandlerTestCase
,
scenario
class
TestGrade
(
XBlockHandlerTestCase
):
ASSESSMENTS
=
[
{
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
'feedback'
:
u'єאςєɭɭєภՇ ฬ๏гк!'
,
},
{
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
'feedback'
:
u'Good job!'
,
},
]
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade
(
self
,
xblock
):
# Create a submission from the user
student_item
=
xblock
.
get_student_item_dict
()
submission
=
submission_api
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
scorer_submissions
=
[]
for
scorer_name
,
assessment
in
zip
([
'McNulty'
,
'Freamon'
],
self
.
ASSESSMENTS
):
# Create a submission for each scorer
scorer
=
copy
.
deepcopy
(
student_item
)
scorer
[
'student_id'
]
=
scorer_name
scorer_sub
=
submission_api
.
create_submission
(
scorer
,
self
.
SUBMISSION
)
# Store the scorer's submission so our user can assess it later
scorer_submissions
.
append
(
scorer_sub
)
# Create an assessment of the user's submission
peer_api
.
create_assessment
(
submission
[
'uuid'
],
scorer_name
,
2
,
2
,
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
)
# Have our user make assessments (so she can get a score)
for
submission
in
scorer_submissions
:
peer_api
.
create_assessment
(
submission
[
'uuid'
],
'Greggs'
,
2
,
2
,
self
.
ASSESSMENTS
[
0
],
{
'criteria'
:
xblock
.
rubric_criteria
}
)
# Render the view
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
self
.
assertIn
(
u'єאςєɭɭєภՇ ฬ๏гк!'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Good job!'
,
resp
.
decode
(
'utf-8'
))
apps/openassessment/xblock/test/test_peer.py
0 → 100644
View file @
bd79e1c4
# -*- coding: utf-8 -*-
"""
Tests for peer assessment handlers in Open Assessment XBlock.
"""
import
copy
import
json
from
submissions
import
api
as
submission_api
from
openassessment.peer
import
api
as
peer_api
from
.base
import
XBlockHandlerTestCase
,
scenario
class
TestPeerAssessment
(
XBlockHandlerTestCase
):
ASSESSMENT
=
{
'submission_uuid'
:
None
,
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
'feedback'
:
u'єאςєɭɭєภՇ ฬ๏гк!'
,
}
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario
(
'data/assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_assess_handler
(
self
,
xblock
):
# Create a submission for this problem from another user
student_item
=
xblock
.
get_student_item_dict
()
student_item
[
'student_id'
]
=
'Sally'
submission
=
submission_api
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Create a submission for the scorer (required before assessing another student)
another_student
=
copy
.
deepcopy
(
student_item
)
another_student
[
'student_id'
]
=
"Bob"
submission_api
.
create_submission
(
another_student
,
self
.
SUBMISSION
)
# Submit an assessment and expect a successful response
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
resp
=
self
.
request
(
xblock
,
'assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
self
.
assertTrue
(
resp
[
'success'
])
# Retrieve the assessment and check that it matches what we sent
actual
=
peer_api
.
get_assessments
(
submission
[
'uuid'
])
self
.
assertEqual
(
len
(
actual
),
1
)
self
.
assertEqual
(
actual
[
0
][
'submission_uuid'
],
assessment
[
'submission_uuid'
])
self
.
assertEqual
(
actual
[
0
][
'points_earned'
],
5
)
self
.
assertEqual
(
actual
[
0
][
'points_possible'
],
6
)
self
.
assertEqual
(
actual
[
0
][
'scorer_id'
],
'Bob'
)
self
.
assertEqual
(
actual
[
0
][
'score_type'
],
'PE'
)
self
.
assertEqual
(
len
(
actual
[
0
][
'parts'
]),
2
)
parts
=
sorted
(
actual
[
0
][
'parts'
])
self
.
assertEqual
(
parts
[
0
][
'option'
][
'criterion'
][
'name'
],
u'Form'
)
self
.
assertEqual
(
parts
[
0
][
'option'
][
'name'
],
'Fair'
)
self
.
assertEqual
(
parts
[
1
][
'option'
][
'criterion'
][
'name'
],
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
)
self
.
assertEqual
(
parts
[
1
][
'option'
][
'name'
],
u'ﻉซƈﻉɭɭﻉกՇ'
)
self
.
assertEqual
(
actual
[
0
][
'feedback'
],
assessment
[
'feedback'
])
@scenario
(
'data/assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_assess_rubric_option_mismatch
(
self
,
xblock
):
# Create a submission for this problem from another user
student_item
=
xblock
.
get_student_item_dict
()
student_item
[
'student_id'
]
=
'Sally'
submission
=
submission_api
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Create a submission for the scorer (required before assessing another student)
another_student
=
copy
.
deepcopy
(
student_item
)
another_student
[
'student_id'
]
=
"Bob"
submission_api
.
create_submission
(
another_student
,
self
.
SUBMISSION
)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
assessment
[
'options_selected'
][
'invalid'
]
=
'not a part of the rubric!'
resp
=
self
.
request
(
xblock
,
'assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
# Expect an error response
self
.
assertFalse
(
resp
[
'success'
])
@scenario
(
'data/assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_missing_keys_in_request
(
self
,
xblock
):
for
missing
in
[
'feedback'
,
'submission_uuid'
,
'options_selected'
]:
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
del
assessment
[
missing
]
resp
=
self
.
request
(
xblock
,
'assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
self
.
assertEqual
(
resp
[
'success'
],
False
)
apps/submissions/models.py
View file @
bd79e1c4
...
@@ -101,6 +101,10 @@ class Score(models.Model):
...
@@ -101,6 +101,10 @@ class Score(models.Model):
points_possible
=
models
.
PositiveIntegerField
(
default
=
0
)
points_possible
=
models
.
PositiveIntegerField
(
default
=
0
)
created_at
=
models
.
DateTimeField
(
editable
=
False
,
default
=
now
,
db_index
=
True
)
created_at
=
models
.
DateTimeField
(
editable
=
False
,
default
=
now
,
db_index
=
True
)
@property
def
submission_uuid
(
self
):
return
self
.
submission
.
uuid
def
__repr__
(
self
):
def
__repr__
(
self
):
return
repr
(
dict
(
return
repr
(
dict
(
student_item
=
self
.
student_item
,
student_item
=
self
.
student_item
,
...
...
apps/submissions/serializers.py
View file @
bd79e1c4
...
@@ -26,6 +26,18 @@ class SubmissionSerializer(serializers.ModelSerializer):
...
@@ -26,6 +26,18 @@ class SubmissionSerializer(serializers.ModelSerializer):
class
ScoreSerializer
(
serializers
.
ModelSerializer
):
class
ScoreSerializer
(
serializers
.
ModelSerializer
):
submission_uuid
=
serializers
.
Field
(
source
=
'submission_uuid'
)
class
Meta
:
class
Meta
:
model
=
Score
model
=
Score
fields
=
(
'student_item'
,
'submission'
,
'points_earned'
,
'points_possible'
,
'created_at'
)
fields
=
(
'student_item'
,
'submission'
,
'points_earned'
,
'points_possible'
,
'created_at'
,
# Computed
'submission_uuid'
,
)
apps/submissions/tests/test_api.py
View file @
bd79e1c4
...
@@ -131,9 +131,11 @@ class TestApi(TestCase):
...
@@ -131,9 +131,11 @@ class TestApi(TestCase):
self
.
_assert_score
(
score
,
11
,
12
)
self
.
_assert_score
(
score
,
11
,
12
)
def
test_get_score
(
self
):
def
test_get_score
(
self
):
self
.
test_create_score
()
submission
=
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
set_score
(
STUDENT_ITEM
,
submission
,
11
,
12
)
scores
=
api
.
get_score
(
STUDENT_ITEM
)
scores
=
api
.
get_score
(
STUDENT_ITEM
)
self
.
_assert_score
(
scores
[
0
],
11
,
12
)
self
.
_assert_score
(
scores
[
0
],
11
,
12
)
self
.
assertEqual
(
scores
[
0
][
'submission_uuid'
],
submission
[
'uuid'
])
def
test_get_score_no_student_id
(
self
):
def
test_get_score_no_student_id
(
self
):
student_item
=
copy
.
deepcopy
(
STUDENT_ITEM
)
student_item
=
copy
.
deepcopy
(
STUDENT_ITEM
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment