Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
4086a497
Commit
4086a497
authored
Mar 04, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #73 from edx/will/tim-230
Provide feedback in peer assessment
parents
25a63c91
bd79e1c4
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
685 additions
and
200 deletions
+685
-200
apps/openassessment/peer/api.py
+39
-29
apps/openassessment/peer/models.py
+5
-1
apps/openassessment/peer/serializers.py
+1
-0
apps/openassessment/peer/test/test_api.py
+17
-0
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+11
-2
apps/openassessment/xblock/grade_mixin.py
+9
-2
apps/openassessment/xblock/peer_assessment_mixin.py
+46
-16
apps/openassessment/xblock/static/js/spec/oa_edit.js
+2
-2
apps/openassessment/xblock/static/js/spec/oa_server.js
+47
-2
apps/openassessment/xblock/static/js/src/oa_base.js
+192
-136
apps/openassessment/xblock/static/js/src/oa_server.js
+46
-1
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
+0
-5
apps/openassessment/xblock/test/data/assessment_scenario.xml
+49
-0
apps/openassessment/xblock/test/data/basic_scenario.xml
+1
-2
apps/openassessment/xblock/test/data/grade_scenario.xml
+49
-0
apps/openassessment/xblock/test/test_grade.py
+62
-0
apps/openassessment/xblock/test/test_peer.py
+89
-0
apps/submissions/models.py
+4
-0
apps/submissions/serializers.py
+13
-1
apps/submissions/tests/test_api.py
+3
-1
No files found.
apps/openassessment/peer/api.py
View file @
4086a497
...
...
@@ -7,6 +7,7 @@ the workflow for a given submission.
import
copy
import
logging
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
from
openassessment.peer.models
import
Assessment
...
...
@@ -87,16 +88,15 @@ def create_assessment(
required for the student to receive a score for their submission.
must_be_graded_by (int): The number of assessments
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
assessment_dict (dict): All related information for the assessment. The dictionary
must have the following keys: "options_selected" (mapping of criterion names to option values),
and "feedback" (string of written feedback for the submission).
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
now.
Returns:
dict: The dictionary representing the assessment. This includes the
points earned, points possible, time scored, scorer id, score type,
and feedback.
dict: the Assessment model, serialized as a dict.
Raises:
PeerAssessmentRequestError: Raised when the submission_id is invalid, or
...
...
@@ -107,33 +107,33 @@ def create_assessment(
Examples:
>>> assessment_dict = dict(
>>> points_earned=[1, 0, 3, 2],
>>> points_possible=12,
>>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"},
>>> feedback="Your submission was thrilling.",
>>> )
>>> create_assessment("1", "Tim", assessment_dict)
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer_id': u"Tim",
'feedback': u'Your submission was thrilling.'
}
>>> create_assessment("1", "Tim", 3, 2, assessment_dict, rubric_dict)
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
rubric
=
rubric_from_dict
(
rubric_dict
)
option_ids
=
rubric
.
options_ids
(
assessment_dict
[
"options_selected"
])
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
if
None
in
option_ids
:
raise
PeerAssessmentRequestError
(
_
(
"Selected options do not match the rubric options."
))
feedback
=
assessment_dict
.
get
(
'feedback'
,
u''
)
peer_assessment
=
{
"rubric"
:
rubric
.
id
,
"scorer_id"
:
scorer_id
,
"submission"
:
submission
.
pk
,
"score_type"
:
PEER_TYPE
,
"feedback"
:
feedback
,
"parts"
:
[{
"option"
:
option_id
}
for
option_id
in
option_ids
]
}
if
scored_at
:
if
scored_at
is
not
None
:
peer_assessment
[
"scored_at"
]
=
scored_at
peer_serializer
=
AssessmentSerializer
(
data
=
peer_assessment
)
...
...
@@ -152,23 +152,33 @@ def create_assessment(
)
# Check if the grader is finished and has enough assessments
scorer_item
=
StudentItem
.
objects
.
get
(
student_id
=
scorer_id
,
item_id
=
student_item
.
item_id
,
course_id
=
student_item
.
course_id
,
item_type
=
student_item
.
item_type
)
try
:
scorer_item
=
StudentItem
.
objects
.
get
(
student_id
=
scorer_id
,
item_id
=
student_item
.
item_id
,
course_id
=
student_item
.
course_id
,
item_type
=
student_item
.
item_type
)
except
StudentItem
.
DoesNotExist
:
raise
PeerAssessmentWorkflowError
(
_
(
"You must make a submission before assessing another student"
))
scorer_submissions
=
Submission
.
objects
.
filter
(
student_item
=
scorer_item
)
.
order_by
(
"-attempt_number"
)
_score_if_finished
(
scorer_item
,
scorer_submissions
[
0
],
must_grade
,
must_be_graded_by
)
if
len
(
scorer_submissions
)
>
0
:
_score_if_finished
(
scorer_item
,
scorer_submissions
[
0
],
must_grade
,
must_be_graded_by
)
# Currently, this condition is unreachable, since the only way to create a StudentItem is to
# create a submission for that student. We check anyway just in case this invariant changes.
else
:
raise
PeerAssessmentWorkflowError
(
_
(
"You must make at least one submission before assessing another student"
))
return
peer_serializer
.
data
except
DatabaseError
:
...
...
apps/openassessment/peer/models.py
View file @
4086a497
...
...
@@ -81,6 +81,9 @@ class Rubric(models.Model):
options_selected (dict): Mapping of criteria names to the names of
the option that was selected for that criterion.
Returns:
list of option ids (set to None if the selected option does not match the rubric)
Examples:
>>> options_selected = {"secret": "yes", "safe": "no"}
>>> rubric.options_ids(options_selected)
...
...
@@ -97,6 +100,8 @@ class Rubric(models.Model):
return
[
crit_to_all_opts
[
crit
][
opt
]
if
crit
in
crit_to_all_opts
and
opt
in
crit_to_all_opts
[
crit
]
else
None
for
crit
,
opt
in
options_selected
.
items
()
]
...
...
@@ -186,7 +191,6 @@ class Assessment(models.Model):
scorer_id
=
models
.
CharField
(
max_length
=
40
,
db_index
=
True
)
score_type
=
models
.
CharField
(
max_length
=
2
)
# TODO: move this to its own model
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
class
Meta
:
...
...
apps/openassessment/peer/serializers.py
View file @
4086a497
...
...
@@ -123,6 +123,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
'scored_at'
,
'scorer_id'
,
'score_type'
,
'feedback'
,
# Foreign Key
'parts'
,
...
...
apps/openassessment/peer/test/test_api.py
View file @
4086a497
...
...
@@ -122,6 +122,7 @@ class TestApi(TestCase):
)
self
.
assertEqual
(
assessment
[
"points_earned"
],
6
)
self
.
assertEqual
(
assessment
[
"points_possible"
],
14
)
self
.
assertEqual
(
assessment
[
"feedback"
],
ASSESSMENT_DICT
[
"feedback"
])
@file_data
(
'valid_assessments.json'
)
def
test_get_assessments
(
self
,
assessment_dict
):
...
...
@@ -288,6 +289,22 @@ class TestApi(TestCase):
self
.
assertEqual
(
16
,
Assessment
.
get_median_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
Assessment
.
get_median_score
([
16
,
6
,
12
,
102
,
22
,
53
,
5
]))
@raises
(
peer_api
.
PeerAssessmentWorkflowError
)
def
test_assess_before_submitting
(
self
):
# Create a submission for another student
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
# Attempt to create the assessment from another student without first making a submission
peer_api
.
create_assessment
(
submission
[
"uuid"
],
"another_student"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
RUBRIC_DICT
,
MONDAY
)
@staticmethod
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
new_student_item
=
STUDENT_ITEM
.
copy
()
...
...
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
4086a497
...
...
@@ -6,6 +6,16 @@
<span
class=
"grade__value"
>
{{ score.points_earned }}
</span>
out of
<span
class=
"grade__potential"
>
{{ score.points_possible }}
</span>
</div>
<!-- PLACEHOLDER: @talbs, you can overwrite this with the actual template -->
<div
class=
"openassessment__feedback"
>
{% for asmnt in assessments %}
<div
class=
"openassessment__peer_assessment_{{ forloop.counter }}"
>
<div>
Peer #{{ forloop.counter }}
</div>
<div>
Feedback: {{ asmnt.feedback }}
</div>
</div>
{% endfor %}
</div>
<div
class=
"grade__actions"
>
<ul
class=
"list list--actions"
>
<li
class=
"list--actions__item"
>
...
...
@@ -13,4 +23,4 @@
</li>
</ul>
</div>
</div>
\ No newline at end of file
</div>
apps/openassessment/xblock/grade_mixin.py
View file @
4086a497
from
xblock.core
import
XBlock
from
openassessment.peer.api
import
get_assessments
from
submissions.api
import
get_score
...
...
@@ -23,7 +24,14 @@ class GradeMixin(object):
student_item
=
self
.
get_student_item_dict
()
scores
=
get_score
(
student_item
)
if
scores
:
context
=
{
"score"
:
scores
[
0
]}
context
=
{
"score"
:
scores
[
0
],
"assessments"
:
[],
}
# Look up assessment feedback
for
assessment
in
get_assessments
(
scores
[
0
][
'submission_uuid'
]):
context
[
'assessments'
]
.
append
(
assessment
)
else
:
path
=
'openassessmentblock/grade/oa_grade_waiting.html'
elif
not
problem_open
and
date
==
"due"
:
...
...
@@ -33,4 +41,3 @@ class GradeMixin(object):
# TODO: How do we determine which modules are incomplete?
return
self
.
render_assessment
(
path
,
context
)
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
4086a497
import
logging
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
openassessment.peer
import
api
as
peer_api
from
openassessment.peer.api
import
PeerAssessmentWorkflowError
from
openassessment.peer.api
import
(
PeerAssessmentWorkflowError
,
PeerAssessmentRequestError
,
PeerAssessmentInternalError
)
logger
=
logging
.
getLogger
(
__name__
)
class
PeerAssessmentMixin
(
object
):
...
...
@@ -25,37 +33,59 @@ class PeerAssessmentMixin(object):
Args:
data (dict): A dictionary containing information required to create
a new peer assessment. Expecting attributes "points_earned",
"total_value", and "submission_uuid". If these attributes are
not available, a new assessment cannot be stored.
a new peer assessment. This dict should have the following attributes:
`submission_uuid` (string): The unique identifier for the submission being assessed.
`options_selected` (dict): Dictionary mapping criterion names to option values.
`feedback` (unicode): Written feedback for the submission.
Returns:
(tuple): A tuple containing the dictionary representation of the
newly created assessment, and a "Success" string
.
Dict with keys "success" (bool) indicating success/failure.
and "msg" (unicode) containing additional information if an error occurs
.
"""
# Validate the request
if
'feedback'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide feedback in the assessment'
)}
if
'options_selected'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide options selected in the assessment'
)}
if
'submission_uuid'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide submission uuid for the assessment'
)}
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
if
assessment_ui_model
:
rubric_dict
=
{
'criteria'
:
self
.
rubric_criteria
}
assessment_dict
=
{
"feedback"
:
"Not yet implemented."
,
"feedback"
:
data
[
'feedback'
]
,
"options_selected"
:
data
[
"options_selected"
],
}
assessment
=
peer_api
.
create_assessment
(
data
[
"submission_uuid"
],
self
.
get_student_item_dict
()[
"student_id"
],
int
(
assessment_ui_model
[
"must_grade"
]),
int
(
assessment_ui_model
[
"must_be_graded_by"
]),
assessment_dict
,
rubric_dict
,
)
try
:
assessment
=
peer_api
.
create_assessment
(
data
[
"submission_uuid"
],
self
.
get_student_item_dict
()[
"student_id"
],
int
(
assessment_ui_model
[
"must_grade"
]),
int
(
assessment_ui_model
[
"must_be_graded_by"
]),
assessment_dict
,
rubric_dict
,
)
except
PeerAssessmentRequestError
as
ex
:
return
{
'success'
:
False
,
'msg'
:
ex
.
message
}
except
PeerAssessmentInternalError
as
ex
:
logger
.
exception
()
return
{
'success'
:
False
,
'msg'
:
_
(
"Internal error occurred while creating the assessment"
)}
# Temp kludge until we fix JSON serialization for datetime
assessment
[
"scored_at"
]
=
str
(
assessment
[
"scored_at"
])
return
{}
return
{
'success'
:
True
,
'msg'
:
u''
}
else
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Could not load peer assessment.'
)}
@XBlock.handler
...
...
apps/openassessment/xblock/static/js/spec/oa_edit.js
View file @
4086a497
/*
/*
*
Tests for OA XBlock editing.
*/
*
*
/
describe
(
"OpenAssessment.StudioUI"
,
function
()
{
...
...
apps/openassessment/xblock/static/js/spec/oa_server.js
View file @
4086a497
...
...
@@ -51,7 +51,7 @@ describe("OpenAssessment.Server", function() {
});
});
it
(
"Sends a submission the XBlock"
,
function
()
{
it
(
"Sends a submission t
o t
he XBlock"
,
function
()
{
// Status, student ID, attempt number
stubAjax
(
true
,
[
true
,
1
,
2
]);
...
...
@@ -69,7 +69,28 @@ describe("OpenAssessment.Server", function() {
expect
(
$
.
ajax
).
toHaveBeenCalledWith
({
url
:
'/submit'
,
type
:
"POST"
,
data
:
{
submission
:
"This is only a test"
}
data
:
JSON
.
stringify
({
submission
:
"This is only a test"
})
});
});
it
(
"Sends an assessment to the XBlock"
,
function
()
{
stubAjax
(
true
,
{
success
:
true
,
msg
:
''
});
var
success
=
false
;
var
options
=
{
clarity
:
"Very clear"
,
precision
:
"Somewhat precise"
}
server
.
assess
(
"abc1234"
,
options
,
"Excellent job!"
).
done
(
function
()
{
success
=
true
;
});
expect
(
success
).
toBe
(
true
);
expect
(
$
.
ajax
).
toHaveBeenCalledWith
({
url
:
'/assess'
,
type
:
"POST"
,
data
:
JSON
.
stringify
({
submission_uuid
:
"abc1234"
,
options_selected
:
options
,
feedback
:
"Excellent job!"
})
});
});
...
...
@@ -183,4 +204,28 @@ describe("OpenAssessment.Server", function() {
expect
(
receivedMsg
).
toEqual
(
"Test error"
);
});
it
(
"informs the caller of a server error when sending an assessment"
,
function
()
{
stubAjax
(
true
,
{
success
:
false
,
msg
:
'Test error!'
});
var
receivedMsg
=
null
;
var
options
=
{
clarity
:
"Very clear"
,
precision
:
"Somewhat precise"
}
server
.
assess
(
"abc1234"
,
options
,
"Excellent job!"
).
fail
(
function
(
msg
)
{
receivedMsg
=
msg
;
});
expect
(
receivedMsg
).
toEqual
(
"Test error!"
);
});
it
(
"informs the caller of an AJAX error when sending an assessment"
,
function
()
{
stubAjax
(
false
,
null
);
var
receivedMsg
=
null
;
var
options
=
{
clarity
:
"Very clear"
,
precision
:
"Somewhat precise"
}
server
.
assess
(
"abc1234"
,
options
,
"Excellent job!"
).
fail
(
function
(
msg
)
{
receivedMsg
=
msg
;
});
expect
(
receivedMsg
).
toEqual
(
"Could not contact server."
);
});
});
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
4086a497
/* START Javascript for OpenAssessmentXBlock. */
function
OpenAssessmentBlock
(
runtime
,
element
)
{
/* JavaScript for student-facing views of Open Assessment XBlock */
var
handlerUrl
=
runtime
.
handlerUrl
(
element
,
'submit'
);
var
renderSubmissionUrl
=
runtime
.
handlerUrl
(
element
,
'render_submission'
);
var
renderPeerUrl
=
runtime
.
handlerUrl
(
element
,
'render_peer_assessment'
);
var
renderSelfUrl
=
runtime
.
handlerUrl
(
element
,
'render_self_assessment'
);
var
renderGradeUrl
=
runtime
.
handlerUrl
(
element
,
'render_grade'
);
/* Namespace for open assessment */
if
(
typeof
OpenAssessment
==
"undefined"
||
!
OpenAssessment
)
{
OpenAssessment
=
{};
}
var
submissionListItem
=
'#openassessment__response'
;
var
peerListItem
=
'#openassessment__peer-assessment'
;
var
selfListItem
=
'#openassessment__self-assessment'
;
var
gradeListItem
=
'#openassessment__grade'
;
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
/**
Interface for student-facing views.
/*
* Utility functions
*/
function
collapse
(
element
)
{
element
.
addClass
(
"is--collapsed"
);
}
Args:
runtime (Runtime): an XBlock runtime instance.
element (DOM element): The DOM element representing this XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
function
expand
(
element
)
{
element
.
addClass
(
"is--collapsed"
);
}
Returns:
OpenAssessment.BaseUI
**/
OpenAssessment
.
BaseUI
=
function
(
runtime
,
element
,
server
)
{
this
.
runtime
=
runtime
;
this
.
element
=
element
;
this
.
server
=
server
;
}
/*
* Submission Functions
*/
function
render_submissions
(
data
)
{
$
(
submissionListItem
,
element
).
replaceWith
(
data
);
$
(
'#step--response__submit'
,
element
).
click
(
function
(
eventObject
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
handlerUrl
,
data
:
JSON
.
stringify
({
"submission"
:
$
(
'#submission__answer__value'
,
element
).
val
()}),
success
:
function
(
data
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
renderPeerUrl
,
dataType
:
"html"
,
success
:
function
(
data
)
{
render_peer_assessment
(
data
);
}
});
collapse
(
$
(
submissionListItem
,
element
));
$
.
ajax
({
type
:
"POST"
,
url
:
renderSubmissionUrl
,
success
:
function
(
data
)
{
render_submissions
(
data
);
collapse
(
$
(
submissionListItem
,
element
));
}
});
}
});
});
}
OpenAssessment
.
BaseUI
.
prototype
=
{
/*
* Peer Assessment Functions
*/
function
render_peer_assessment
(
data
)
{
$
(
peerListItem
,
element
).
replaceWith
(
data
);
function
prepare_assessment_post
(
element
)
{
var
selector
=
$
(
"input[type=radio]:checked"
,
element
);
var
criteriaChoices
=
{};
var
values
=
[];
for
(
var
i
=
0
;
i
<
selector
.
length
;
i
++
)
{
values
[
i
]
=
selector
[
i
].
value
;
criteriaChoices
[
selector
[
i
].
name
]
=
selector
[
i
].
value
}
return
{
"submission_uuid"
:
$
(
"span#peer_submission_uuid"
)[
0
].
innerText
,
"points_earned"
:
values
,
"options_selected"
:
criteriaChoices
};
}
/**
Collapse one of the steps in the workflow.
$
(
'#peer-assessment--001__assessment__submit'
,
element
).
click
(
function
(
eventObject
)
{
eventObject
.
preventDefault
();
$
.
ajax
({
type
:
"POST"
,
url
:
runtime
.
handlerUrl
(
element
,
'assess'
),
/* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */
data
:
JSON
.
stringify
(
prepare_assessment_post
(
element
)),
success
:
function
(
data
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
renderSelfUrl
,
dataType
:
"html"
,
success
:
function
(
data
)
{
$
(
selfListItem
,
element
).
replaceWith
(
data
);
}
});
$
.
ajax
({
type
:
"POST"
,
url
:
renderPeerUrl
,
dataType
:
"html"
,
success
:
function
(
data
)
{
render_peer_assessment
(
data
);
}
});
}
});
});
}
Args:
stepName (string): The name of the step to expand/collapse.
expanded (bool): If true, expand the step; otherwise, collapse it.
**/
setExpanded
:
function
(
stepName
,
expanded
)
{
var
el
=
$
(
"#openassessment__"
+
stepName
,
this
.
element
);
$
(
function
(
$
)
{
/* Here's where you'd do things on page load. */
$
.
ajax
({
type
:
"POST"
,
url
:
renderSubmissionUrl
,
dataType
:
"html"
,
success
:
function
(
data
)
{
render_submissions
(
data
);
if
(
expanded
)
{
el
.
removeClass
(
'is--collapsed'
);
}
else
{
el
.
addClass
(
'is--collapsed'
);
}
},
/**
Asynchronously load each sub-view into the DOM.
**/
load
:
function
()
{
this
.
renderSubmissionStep
(
true
);
this
.
renderPeerAssessmentStep
(
false
);
this
.
renderSelfAssessmentStep
(
false
);
this
.
renderGradeStep
(
false
);
},
/**
Render the submission step.
Args:
expanded (bool): If true, expand the step.
**/
renderSubmissionStep
:
function
(
expand
)
{
var
ui
=
this
;
this
.
server
.
render
(
'submission'
).
done
(
function
(
html
)
{
// Load the HTML
var
sel
=
$
(
'#openassessment__response'
,
ui
.
element
);
sel
.
replaceWith
(
html
);
ui
.
setExpanded
(
'response'
,
expand
);
// Install a click handler for submission
$
(
'#step--response__submit'
,
ui
.
element
).
click
(
function
(
eventObject
)
{
ui
.
submit
();
}
);
}
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
});
$
.
ajax
({
type
:
"POST"
,
url
:
renderPeerUrl
,
dataType
:
"html"
,
success
:
function
(
data
)
{
$
(
peerListItem
,
element
).
replaceWith
(
data
);
collapse
(
$
(
peerListItem
,
element
));
},
/**
Render the peer-assessment step.
Args:
expand (bool): If true, expand the step.
**/
renderPeerAssessmentStep
:
function
(
expand
)
{
var
ui
=
this
;
this
.
server
.
render
(
'peer_assessment'
).
done
(
function
(
html
)
{
// Load the HTML
var
sel
=
$
(
'#openassessment__peer-assessment'
,
ui
.
element
);
sel
.
replaceWith
(
html
);
ui
.
setExpanded
(
'peer-assessment'
,
expand
);
// Install a click handler for assessment
$
(
'#peer-assessment--001__assessment__submit'
,
ui
.
element
).
click
(
function
(
eventObject
)
{
// Override default form submission
eventObject
.
preventDefault
();
// Handle the click
ui
.
assess
();
}
);
}
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
});
$
.
ajax
({
type
:
"POST"
,
url
:
renderSelfUrl
,
dataType
:
"html"
,
success
:
function
(
data
)
{
$
(
selfListItem
,
element
).
replaceWith
(
data
);
collapse
(
$
(
selfListItem
,
element
));
},
/**
Render the self-assessment step.
Args:
expand (bool): If true, expand the step.
**/
renderSelfAssessmentStep
:
function
(
expand
)
{
var
ui
=
this
;
this
.
server
.
render
(
'self_assessment'
).
done
(
function
(
html
)
{
$
(
'#openassessment__self-assessment'
,
ui
.
element
).
replaceWith
(
html
);
ui
.
setExpanded
(
'self-assessment'
,
expand
);
}
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
});
$
.
ajax
({
type
:
"POST"
,
dataType
:
"html"
,
url
:
renderGradeUrl
,
success
:
function
(
data
)
{
$
(
gradeListItem
,
element
).
replaceWith
(
data
);
collapse
(
$
(
gradeListItem
,
element
));
},
/**
Render the grade step.
Args:
expand (bool): If true, expand the step.
**/
renderGradeStep
:
function
(
expand
)
{
var
ui
=
this
;
this
.
server
.
render
(
'grade'
).
done
(
function
(
html
)
{
$
(
'#openassessment__grade'
,
ui
.
element
).
replaceWith
(
html
);
ui
.
setExpanded
(
'grade'
,
expand
);
}
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
});
});
},
/**
Send a submission to the server and update the UI.
**/
submit
:
function
()
{
// Send the submission to the server
var
submission
=
$
(
'#submission__answer__value'
,
this
.
element
).
val
();
var
ui
=
this
;
this
.
server
.
submit
(
submission
).
done
(
// When we have successfully sent the submission, expand the next step
function
(
studentId
,
attemptNum
)
{
ui
.
renderPeerAssessmentStep
(
true
);
}
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
});
},
/**
Send an assessment to the server and update the UI.
**/
assess
:
function
()
{
// Retrieve assessment info from the DOM
var
submissionId
=
$
(
"span#peer_submission_uuid"
,
this
.
element
)[
0
].
innerText
;
var
optionsSelected
=
{};
$
(
"input[type=radio]:checked"
,
this
.
element
).
each
(
function
(
index
,
sel
)
{
optionsSelected
[
sel
.
name
]
=
sel
.
value
;
}
);
var
feedback
=
$
(
'#assessment__rubric__question--004__value'
,
this
.
element
).
val
();
// Send the assessment to the server
var
ui
=
this
;
this
.
server
.
assess
(
submissionId
,
optionsSelected
,
feedback
).
done
(
function
()
{
// When we have successfully sent the assessment, expand the next step
ui
.
renderSelfAssessmentStep
(
true
);
ui
.
renderGradeStep
(
true
);
}
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
});
}
}
/* XBlock JavaScript entry point for OpenAssessmentXBlock. */
function
OpenAssessmentBlock
(
runtime
,
element
)
{
/**
Render views within the base view on page load.
**/
$
(
function
(
$
)
{
var
server
=
new
OpenAssessment
.
Server
(
runtime
,
element
);
var
ui
=
new
OpenAssessment
.
BaseUI
(
runtime
,
element
,
server
);
ui
.
load
();
});
}
/* END Javascript for OpenAssessmentXBlock. */
apps/openassessment/xblock/static/js/src/oa_server.js
View file @
4086a497
...
...
@@ -85,7 +85,7 @@ OpenAssessment.Server.prototype = {
$
.
ajax
({
type
:
"POST"
,
url
:
url
,
data
:
{
submission
:
submission
}
data
:
JSON
.
stringify
({
submission
:
submission
})
}).
done
(
function
(
data
)
{
var
success
=
data
[
0
];
if
(
success
)
{
...
...
@@ -105,6 +105,51 @@ OpenAssessment.Server.prototype = {
},
/**
Send an assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission.
Returns:
A JQuery promise, which resolves with no args if successful
and fails with an error message otherise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!";
server.assess("abc123", options, feedback).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
assess
:
function
(
submissionId
,
optionsSelected
,
feedback
)
{
var
url
=
this
.
url
(
'assess'
);
var
payload
=
JSON
.
stringify
({
submission_uuid
:
submissionId
,
options_selected
:
optionsSelected
,
feedback
:
feedback
});
return
$
.
Deferred
(
function
(
defer
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
url
,
data
:
payload
}).
done
(
function
(
data
)
{
if
(
data
.
success
)
{
defer
.
resolve
();
}
else
{
defer
.
rejectWith
(
this
,
[
data
.
msg
]);
}
}
).
fail
(
function
(
data
)
{
defer
.
rejectWith
(
this
,
[
'Could not contact server.'
]);
});
}).
promise
()
},
/**
Load the XBlock's XML definition from the server.
Returns:
...
...
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
View file @
4086a497
...
...
@@ -60,11 +60,6 @@
</criterion>
</rubric>
<assessments>
<assessment
name=
"self-assessment"
start=
"2014-12-20T19:00-7:00"
due=
"2014-12-21T22:22-7:00"
must_grade=
"5"
must_be_graded_by=
"3"
/>
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00-7:00"
due=
"2014-12-21T22:22-7:00"
...
...
apps/openassessment/xblock/test/data/assessment_scenario.xml
0 → 100644
View file @
4086a497
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00"
due=
"2014-12-21T22:22"
must_grade=
"5"
must_be_graded_by=
"3"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/basic_scenario.xml
View file @
4086a497
...
...
@@ -84,11 +84,10 @@
</criterion>
</rubric>
<assessments>
<
peer-
assessment
name=
"peer-assessment"
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00"
due=
"2014-12-21T22:22"
must_grade=
"5"
must_be_graded_by=
"3"
/>
<self-assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/grade_scenario.xml
0 → 100644
View file @
4086a497
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00"
due=
"2014-12-21T22:22"
must_grade=
"2"
must_be_graded_by=
"2"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/test_grade.py
0 → 100644
View file @
4086a497
# -*- coding: utf-8 -*-
"""
Tests for grade handlers in Open Assessment XBlock.
"""
import
copy
import
json
from
submissions
import
api
as
submission_api
from
openassessment.peer
import
api
as
peer_api
from
.base
import
XBlockHandlerTestCase
,
scenario
class
TestGrade
(
XBlockHandlerTestCase
):
ASSESSMENTS
=
[
{
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
'feedback'
:
u'єאςєɭɭєภՇ ฬ๏гк!'
,
},
{
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
'feedback'
:
u'Good job!'
,
},
]
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade
(
self
,
xblock
):
# Create a submission from the user
student_item
=
xblock
.
get_student_item_dict
()
submission
=
submission_api
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
scorer_submissions
=
[]
for
scorer_name
,
assessment
in
zip
([
'McNulty'
,
'Freamon'
],
self
.
ASSESSMENTS
):
# Create a submission for each scorer
scorer
=
copy
.
deepcopy
(
student_item
)
scorer
[
'student_id'
]
=
scorer_name
scorer_sub
=
submission_api
.
create_submission
(
scorer
,
self
.
SUBMISSION
)
# Store the scorer's submission so our user can assess it later
scorer_submissions
.
append
(
scorer_sub
)
# Create an assessment of the user's submission
peer_api
.
create_assessment
(
submission
[
'uuid'
],
scorer_name
,
2
,
2
,
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
)
# Have our user make assessments (so she can get a score)
for
submission
in
scorer_submissions
:
peer_api
.
create_assessment
(
submission
[
'uuid'
],
'Greggs'
,
2
,
2
,
self
.
ASSESSMENTS
[
0
],
{
'criteria'
:
xblock
.
rubric_criteria
}
)
# Render the view
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
self
.
assertIn
(
u'єאςєɭɭєภՇ ฬ๏гк!'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Good job!'
,
resp
.
decode
(
'utf-8'
))
apps/openassessment/xblock/test/test_peer.py
0 → 100644
View file @
4086a497
# -*- coding: utf-8 -*-
"""
Tests for peer assessment handlers in Open Assessment XBlock.
"""
import
copy
import
json
from
submissions
import
api
as
submission_api
from
openassessment.peer
import
api
as
peer_api
from
.base
import
XBlockHandlerTestCase
,
scenario
class
TestPeerAssessment
(
XBlockHandlerTestCase
):
ASSESSMENT
=
{
'submission_uuid'
:
None
,
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
'feedback'
:
u'єאςєɭɭєภՇ ฬ๏гк!'
,
}
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario
(
'data/assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_assess_handler
(
self
,
xblock
):
# Create a submission for this problem from another user
student_item
=
xblock
.
get_student_item_dict
()
student_item
[
'student_id'
]
=
'Sally'
submission
=
submission_api
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Create a submission for the scorer (required before assessing another student)
another_student
=
copy
.
deepcopy
(
student_item
)
another_student
[
'student_id'
]
=
"Bob"
submission_api
.
create_submission
(
another_student
,
self
.
SUBMISSION
)
# Submit an assessment and expect a successful response
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
resp
=
self
.
request
(
xblock
,
'assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
self
.
assertTrue
(
resp
[
'success'
])
# Retrieve the assessment and check that it matches what we sent
actual
=
peer_api
.
get_assessments
(
submission
[
'uuid'
])
self
.
assertEqual
(
len
(
actual
),
1
)
self
.
assertEqual
(
actual
[
0
][
'submission_uuid'
],
assessment
[
'submission_uuid'
])
self
.
assertEqual
(
actual
[
0
][
'points_earned'
],
5
)
self
.
assertEqual
(
actual
[
0
][
'points_possible'
],
6
)
self
.
assertEqual
(
actual
[
0
][
'scorer_id'
],
'Bob'
)
self
.
assertEqual
(
actual
[
0
][
'score_type'
],
'PE'
)
self
.
assertEqual
(
len
(
actual
[
0
][
'parts'
]),
2
)
parts
=
sorted
(
actual
[
0
][
'parts'
])
self
.
assertEqual
(
parts
[
0
][
'option'
][
'criterion'
][
'name'
],
u'Form'
)
self
.
assertEqual
(
parts
[
0
][
'option'
][
'name'
],
'Fair'
)
self
.
assertEqual
(
parts
[
1
][
'option'
][
'criterion'
][
'name'
],
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
)
self
.
assertEqual
(
parts
[
1
][
'option'
][
'name'
],
u'ﻉซƈﻉɭɭﻉกՇ'
)
self
.
assertEqual
(
actual
[
0
][
'feedback'
],
assessment
[
'feedback'
])
@scenario
(
'data/assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_assess_rubric_option_mismatch
(
self
,
xblock
):
# Create a submission for this problem from another user
student_item
=
xblock
.
get_student_item_dict
()
student_item
[
'student_id'
]
=
'Sally'
submission
=
submission_api
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Create a submission for the scorer (required before assessing another student)
another_student
=
copy
.
deepcopy
(
student_item
)
another_student
[
'student_id'
]
=
"Bob"
submission_api
.
create_submission
(
another_student
,
self
.
SUBMISSION
)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
assessment
[
'options_selected'
][
'invalid'
]
=
'not a part of the rubric!'
resp
=
self
.
request
(
xblock
,
'assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
# Expect an error response
self
.
assertFalse
(
resp
[
'success'
])
@scenario
(
'data/assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_missing_keys_in_request
(
self
,
xblock
):
for
missing
in
[
'feedback'
,
'submission_uuid'
,
'options_selected'
]:
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
del
assessment
[
missing
]
resp
=
self
.
request
(
xblock
,
'assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
self
.
assertEqual
(
resp
[
'success'
],
False
)
apps/submissions/models.py
View file @
4086a497
...
...
@@ -101,6 +101,10 @@ class Score(models.Model):
points_possible
=
models
.
PositiveIntegerField
(
default
=
0
)
created_at
=
models
.
DateTimeField
(
editable
=
False
,
default
=
now
,
db_index
=
True
)
@property
def
submission_uuid
(
self
):
return
self
.
submission
.
uuid
def
__repr__
(
self
):
return
repr
(
dict
(
student_item
=
self
.
student_item
,
...
...
apps/submissions/serializers.py
View file @
4086a497
...
...
@@ -26,6 +26,18 @@ class SubmissionSerializer(serializers.ModelSerializer):
class
ScoreSerializer
(
serializers
.
ModelSerializer
):
submission_uuid
=
serializers
.
Field
(
source
=
'submission_uuid'
)
class
Meta
:
model
=
Score
fields
=
(
'student_item'
,
'submission'
,
'points_earned'
,
'points_possible'
,
'created_at'
)
fields
=
(
'student_item'
,
'submission'
,
'points_earned'
,
'points_possible'
,
'created_at'
,
# Computed
'submission_uuid'
,
)
apps/submissions/tests/test_api.py
View file @
4086a497
...
...
@@ -131,9 +131,11 @@ class TestApi(TestCase):
self
.
_assert_score
(
score
,
11
,
12
)
def
test_get_score
(
self
):
self
.
test_create_score
()
submission
=
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
set_score
(
STUDENT_ITEM
,
submission
,
11
,
12
)
scores
=
api
.
get_score
(
STUDENT_ITEM
)
self
.
_assert_score
(
scores
[
0
],
11
,
12
)
self
.
assertEqual
(
scores
[
0
][
'submission_uuid'
],
submission
[
'uuid'
])
def
test_get_score_no_student_id
(
self
):
student_item
=
copy
.
deepcopy
(
STUDENT_ITEM
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment