Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
8bfd03c5
Commit
8bfd03c5
authored
Mar 17, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #142 from edx/jrbl/231-feedback-on-assessments
Jrbl/231 feedback on assessments
parents
376a7bd8
6d7d4fd8
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
357 additions
and
18 deletions
+357
-18
apps/openassessment/assessment/admin.py
+2
-2
apps/openassessment/assessment/migrations/0002_auto__add_assessmentfeedback.py
+122
-0
apps/openassessment/assessment/models.py
+15
-0
apps/openassessment/assessment/peer_api.py
+81
-2
apps/openassessment/assessment/serializers.py
+18
-1
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+1
-1
apps/openassessment/xblock/grade_mixin.py
+42
-0
apps/openassessment/xblock/static/js/openassessment.min.js
+0
-0
apps/openassessment/xblock/static/js/src/oa_base.js
+28
-3
apps/openassessment/xblock/static/js/src/oa_server.js
+38
-1
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
+2
-2
apps/openassessment/xblock/submission_mixin.py
+4
-2
apps/submissions/api.py
+1
-3
apps/submissions/models.py
+1
-0
apps/submissions/serializers.py
+1
-0
scripts/workbench.sh
+1
-1
No files found.
apps/openassessment/assessment/admin.py
View file @
8bfd03c5
from
django.contrib
import
admin
from
openassessment.assessment.models
import
Assessment
,
AssessmentPart
,
Rubric
,
Criterion
,
CriterionOption
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.models
import
Assessment
,
Assessment
Feedback
,
Assessment
Part
,
Rubric
,
Criterion
,
CriterionOption
,
PeerWorkflow
,
PeerWorkflowItem
admin
.
site
.
register
(
Assessment
)
admin
.
site
.
register
(
AssessmentPart
)
admin
.
site
.
register
(
AssessmentFeedback
)
admin
.
site
.
register
(
Rubric
)
admin
.
site
.
register
(
Criterion
)
admin
.
site
.
register
(
CriterionOption
)
admin
.
site
.
register
(
PeerWorkflow
)
admin
.
site
.
register
(
PeerWorkflowItem
)
apps/openassessment/assessment/migrations/0002_auto__add_assessmentfeedback.py
0 → 100644
View file @
8bfd03c5
# -*- coding: utf-8 -*-
import
datetime
from
south.db
import
db
from
south.v2
import
SchemaMigration
from
django.db
import
models
class
Migration
(
SchemaMigration
):
def
forwards
(
self
,
orm
):
# Adding model 'AssessmentFeedback'
db
.
create_table
(
'assessment_assessmentfeedback'
,
(
(
'id'
,
self
.
gf
(
'django.db.models.fields.AutoField'
)(
primary_key
=
True
)),
(
'submission_uuid'
,
self
.
gf
(
'django.db.models.fields.CharField'
)(
unique
=
True
,
max_length
=
128
,
db_index
=
True
)),
(
'helpfulness'
,
self
.
gf
(
'django.db.models.fields.IntegerField'
)(
default
=
2
)),
(
'feedback'
,
self
.
gf
(
'django.db.models.fields.TextField'
)(
default
=
''
,
max_length
=
10000
)),
))
db
.
send_create_signal
(
'assessment'
,
[
'AssessmentFeedback'
])
# Adding M2M table for field assessments on 'AssessmentFeedback'
db
.
create_table
(
'assessment_assessmentfeedback_assessments'
,
(
(
'id'
,
models
.
AutoField
(
verbose_name
=
'ID'
,
primary_key
=
True
,
auto_created
=
True
)),
(
'assessmentfeedback'
,
models
.
ForeignKey
(
orm
[
'assessment.assessmentfeedback'
],
null
=
False
)),
(
'assessment'
,
models
.
ForeignKey
(
orm
[
'assessment.assessment'
],
null
=
False
))
))
db
.
create_unique
(
'assessment_assessmentfeedback_assessments'
,
[
'assessmentfeedback_id'
,
'assessment_id'
])
def
backwards
(
self
,
orm
):
# Deleting model 'AssessmentFeedback'
db
.
delete_table
(
'assessment_assessmentfeedback'
)
# Removing M2M table for field assessments on 'AssessmentFeedback'
db
.
delete_table
(
'assessment_assessmentfeedback_assessments'
)
models
=
{
'assessment.assessment'
:
{
'Meta'
:
{
'ordering'
:
"['-scored_at', '-id']"
,
'object_name'
:
'Assessment'
},
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Rubric']"
}),
'score_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'2'
}),
'scored_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['submissions.Submission']"
})
},
'assessment.assessmentfeedback'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedback'
},
'assessments'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'related_name'
:
"'assessment_feedback'"
,
'symmetrical'
:
'False'
,
'to'
:
"orm['assessment.Assessment']"
}),
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
}),
'helpfulness'
:
(
'django.db.models.fields.IntegerField'
,
[],
{
'default'
:
'2'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentpart'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentPart'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'parts'"
,
'to'
:
"orm['assessment.Assessment']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'option'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.CriterionOption']"
})
},
'assessment.criterion'
:
{
'Meta'
:
{
'ordering'
:
"['rubric', 'order_num']"
,
'object_name'
:
'Criterion'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'prompt'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'criteria'"
,
'to'
:
"orm['assessment.Rubric']"
})
},
'assessment.criterionoption'
:
{
'Meta'
:
{
'ordering'
:
"['criterion', 'order_num']"
,
'object_name'
:
'CriterionOption'
},
'criterion'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'options'"
,
'to'
:
"orm['assessment.Criterion']"
}),
'explanation'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'points'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{})
},
'assessment.peerworkflow'
:
{
'Meta'
:
{
'ordering'
:
"['created_at', 'id']"
,
'object_name'
:
'PeerWorkflow'
},
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.peerworkflowitem'
:
{
'Meta'
:
{
'ordering'
:
"['started_at', 'id']"
,
'object_name'
:
'PeerWorkflowItem'
},
'assessment'
:
(
'django.db.models.fields.IntegerField'
,
[],
{
'default'
:
'-1'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'items'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'started_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.rubric'
:
{
'Meta'
:
{
'object_name'
:
'Rubric'
},
'content_hash'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
})
},
'submissions.studentitem'
:
{
'Meta'
:
{
'unique_together'
:
"(('course_id', 'student_id', 'item_id'),)"
,
'object_name'
:
'StudentItem'
},
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'item_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
})
},
'submissions.submission'
:
{
'Meta'
:
{
'ordering'
:
"['-submitted_at', '-id']"
,
'object_name'
:
'Submission'
},
'answer'
:
(
'django.db.models.fields.TextField'
,
[],
{
'blank'
:
'True'
}),
'attempt_number'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'student_item'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['submissions.StudentItem']"
}),
'submitted_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'db_index'
:
'True'
,
'max_length'
:
'36'
,
'blank'
:
'True'
})
}
}
complete_apps
=
[
'assessment'
]
\ No newline at end of file
apps/openassessment/assessment/models.py
View file @
8bfd03c5
...
...
@@ -373,6 +373,21 @@ class AssessmentPart(models.Model):
return
self
.
option
.
criterion
.
points_possible
class
AssessmentFeedback
(
models
.
Model
):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
submission_uuid
=
models
.
CharField
(
max_length
=
128
,
unique
=
True
,
db_index
=
True
)
assessments
=
models
.
ManyToManyField
(
Assessment
,
related_name
=
'assessment_feedback'
,
default
=
None
)
HELPFULNESS_CHOICES
=
(
(
0
,
'These results were not at all helpful'
),
(
1
,
'These results were somewhat helpful'
),
(
2
,
'These results were helpful'
),
(
3
,
'These results were very helpful'
),
(
4
,
'These results were extremely helpful'
),
)
helpfulness
=
models
.
IntegerField
(
choices
=
HELPFULNESS_CHOICES
,
default
=
2
)
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
class
PeerWorkflow
(
models
.
Model
):
"""Internal Model for tracking Peer Assessment Workflow
...
...
apps/openassessment/assessment/peer_api.py
View file @
8bfd03c5
...
...
@@ -13,9 +13,9 @@ from django.db import DatabaseError
from
django.db.models
import
Q
from
pytz
import
UTC
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
,
AssessmentFeedback
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
)
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
,
AssessmentFeedbackSerializer
)
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
...
@@ -889,3 +889,82 @@ def _check_submission_graded(submission_uuid, must_be_graded_by):
return
PeerWorkflowItem
.
objects
.
filter
(
submission_uuid
=
submission_uuid
)
.
exclude
(
assessment
=-
1
)
.
count
()
>=
must_be_graded_by
def
get_assessment_feedback
(
submission_uuid
):
"""Retrieve a feedback object for an assessment whether it exists or not.
Gets or creates a new Assessment Feedback model for the given submission.
Args:
submission_uuid: The submission we want to create assessment feedback
for.
Returns:
The assessment feedback object that exists, or a newly created model.
Raises:
PeerAssessmentInternalError: Raised when the AssessmentFeedback cannot
be created or retrieved because of internal exceptions.
"""
try
:
feedback
=
AssessmentFeedback
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
return
AssessmentFeedbackSerializer
(
feedback
)
.
data
except
AssessmentFeedback
.
DoesNotExist
:
return
None
except
DatabaseError
:
error_message
=
(
u"An error occurred retrieving assessment feedback for {}."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
set_assessment_feedback
(
must_grade
,
feedback_dict
):
"""Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the
dict.
Args:
must_grade (int): The required number of assessments for the associated
submission.
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
Returns:
The modified or created feedback.
"""
submission_uuid
=
feedback_dict
.
get
(
'submission_uuid'
)
if
not
submission_uuid
:
error_message
=
u"An error occurred creating assessment feedback: bad or missing submission_uuid."
logger
.
error
(
error_message
)
raise
PeerAssessmentRequestError
(
error_message
)
try
:
assessments
=
Assessment
.
objects
.
filter
(
submission__uuid
=
submission_uuid
,
score_type
=
"PE"
)
except
DatabaseError
:
error_message
=
(
u"An error occurred getting database state to set assessment feedback for {}."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
feedback
=
AssessmentFeedbackSerializer
(
data
=
feedback_dict
)
if
not
feedback
.
is_valid
():
raise
PeerAssessmentRequestError
(
feedback
.
errors
)
try
:
feedback_model
=
feedback
.
save
()
# Assessments associated with feedback must be saved after the row is
# committed to the database in order to associated the PKs across both
# tables.
feedback_model
.
assessments
.
add
(
*
[
assessment
.
id
for
assessment
in
assessments
[:
must_grade
]])
except
DatabaseError
:
error_message
=
(
u"An error occurred saving assessment feedback for {}."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
return
feedback
.
data
apps/openassessment/assessment/serializers.py
View file @
8bfd03c5
...
...
@@ -8,7 +8,7 @@ from copy import deepcopy
from
django.utils.translation
import
ugettext
as
_
from
rest_framework
import
serializers
from
openassessment.assessment.models
import
(
Assessment
,
AssessmentPart
,
Criterion
,
CriterionOption
,
Rubric
Assessment
,
Assessment
Feedback
,
Assessment
Part
,
Criterion
,
CriterionOption
,
Rubric
)
...
...
@@ -271,3 +271,20 @@ def rubric_from_dict(rubric_dict):
rubric
=
rubric_serializer
.
save
()
return
rubric
class
AssessmentFeedbackSerializer
(
serializers
.
ModelSerializer
):
submission_uuid
=
serializers
.
CharField
(
source
=
'submission_uuid'
)
helpfulness
=
serializers
.
IntegerField
(
source
=
'helpfulness'
)
feedback
=
serializers
.
CharField
(
source
=
'feedback'
)
assessments
=
AssessmentSerializer
(
many
=
True
,
default
=
None
,
required
=
False
)
class
Meta
:
model
=
AssessmentFeedback
fields
=
(
'submission_uuid'
,
'helpfulness'
,
'feedback'
,
'assessments'
,
)
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
8bfd03c5
...
...
@@ -134,7 +134,7 @@
<ol
class=
"list list--fields submission__feeedback__content"
>
<li
class=
"field field--textarea feedback__remarks"
id=
"feedback__remarks"
>
<label
for=
"feedback__remarks__value"
>
Please provide any thoughts or comments on the feedback you received from your peers here.
</label>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
></textarea>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
>
{{ feedback_text }}
</textarea>
</li>
</ol>
...
...
apps/openassessment/xblock/grade_mixin.py
View file @
8bfd03c5
import
copy
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
openassessment.assessment
import
peer_api
...
...
@@ -20,6 +23,8 @@ class GradeMixin(object):
status
=
workflow
.
get
(
'status'
)
context
=
{}
if
status
==
"done"
:
feedback
=
peer_api
.
get_assessment_feedback
(
self
.
submission_uuid
)
feedback_text
=
feedback
.
get
(
'feedback'
,
''
)
if
feedback
else
''
max_scores
=
peer_api
.
get_rubric_max_scores
(
self
.
submission_uuid
)
path
=
'openassessmentblock/grade/oa_grade_complete.html'
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
...
...
@@ -40,6 +45,7 @@ class GradeMixin(object):
student_submission
[
"uuid"
],
assessment_ui_model
[
"must_be_graded_by"
]
)
context
[
"feedback_text"
]
=
feedback_text
context
[
"student_submission"
]
=
student_submission
context
[
"peer_assessments"
]
=
peer_assessments
context
[
"self_assessment"
]
=
self_assessment
...
...
@@ -65,3 +71,39 @@ class GradeMixin(object):
path
=
'openassessmentblock/grade/oa_grade_incomplete.html'
return
self
.
render_assessment
(
path
,
context
)
@XBlock.json_handler
def
feedback_submit
(
self
,
data
,
suffix
=
''
):
"""Attach the Assessment Feedback text to some submission."""
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
or
{}
assessment_feedback
=
data
.
get
(
'feedback'
,
''
)
if
not
assessment_feedback
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"No feedback given, so none recorded"
)
}
try
:
peer_api
.
set_assessment_feedback
(
assessment_ui_model
[
'must_grade'
],
{
'submission_uuid'
:
self
.
submission_uuid
,
'feedback'
:
assessment_feedback
,
'helpfulness'
:
0
}
)
except
(
peer_api
.
PeerAssessmentInternalError
,
peer_api
.
PeerAssessmentRequestError
):
return
{
'success'
:
False
,
'msg'
:
_
(
u"Assessment Feedback could not be saved due to an internal "
u"server error."
),
}
return
{
'success'
:
True
,
'msg'
:
_
(
u"Feedback saved!"
)
}
apps/openassessment/xblock/static/js/openassessment.min.js
View file @
8bfd03c5
This diff is collapsed.
Click to expand it.
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
8bfd03c5
...
...
@@ -98,6 +98,7 @@ OpenAssessment.BaseUI.prototype = {
ui
.
save
();
}
);
}
).
fail
(
function
(
errMsg
)
{
ui
.
showLoadError
(
'response'
);
...
...
@@ -150,6 +151,7 @@ OpenAssessment.BaseUI.prototype = {
ui
.
peerAssess
();
}
);
}
).
fail
(
function
(
errMsg
)
{
ui
.
showLoadError
(
'peer-assessment'
);
...
...
@@ -251,14 +253,21 @@ OpenAssessment.BaseUI.prototype = {
function
(
html
)
{
// Load the HTML
$
(
'#openassessment__grade'
,
ui
.
element
).
replaceWith
(
html
);
// Install a click handler for collapse/expand
var
sel
=
$
(
'#openassessment__grade'
,
ui
.
element
);
ui
.
setUpCollapseExpand
(
sel
);
// Install a click handler for assessment feedback
sel
.
find
(
'#feedback__submit'
).
click
(
function
(
eventObject
)
{
eventObject
.
preventDefault
();
ui
.
feedback_assess
();
});
}
).
fail
(
function
(
errMsg
)
{
ui
.
showLoadError
(
'grade'
,
errMsg
);
});
},
/**
...
...
@@ -308,8 +317,24 @@ OpenAssessment.BaseUI.prototype = {
},
/**
* Send an assessment to the server and update the UI.
*/
Send assessment feedback to the server and update the UI.
**/
feedback_assess
:
function
()
{
// Send the submission to the server
var
feedback
=
$
(
'#feedback__remarks__value'
,
this
.
element
).
val
();
var
ui
=
this
;
this
.
server
.
feedback_submit
(
feedback
).
done
(
// When we have successfully sent the submission, textarea no longer editable
console
.
log
(
"Feedback to the assessments submitted, thanks!"
)
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
ui
.
toggleActionError
(
'feedback_assess'
,
errMsg
);
});
},
/**
Send an assessment to the server and update the UI.
**/
peerAssess
:
function
()
{
var
ui
=
this
;
ui
.
peerAssessRequest
(
function
()
{
...
...
apps/openassessment/xblock/static/js/src/oa_server.js
View file @
8bfd03c5
...
...
@@ -161,9 +161,46 @@ OpenAssessment.Server.prototype = {
}).
promise
();
},
/**
* Send feedback on assessments to the XBlock.
* Args:
* feedback: The feedback given on a series of assessments associated
* with this current submission.
*
* Returns:
* A JQuery promise, which resolves with no args if successful and
* fails with an error message otherwise.
*
* Example:
* server.feedback_submit("I dislike my reviews.").done(
* console.log("Success!");
* ).fail(function(errMsg) {
* console.log("Error: " + errMsg);
* });
*/
feedback_submit
:
function
(
feedback
)
{
var
url
=
this
.
url
(
'feedback_submit'
);
var
payload
=
JSON
.
stringify
({
feedback
:
feedback
});
return
$
.
Deferred
(
function
(
defer
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
url
,
data
:
payload
}).
done
(
function
(
data
)
{
if
(
data
.
success
)
{
defer
.
resolve
();
}
else
{
defer
.
rejectWith
(
this
,
[
data
.
msg
]);
}
}
).
fail
(
function
(
data
)
{
defer
.
rejectWith
(
this
,
[
'Could not contact server.'
]);
});
}).
promise
()
},
/**
Send a peer assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
...
...
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
View file @
8bfd03c5
...
...
@@ -61,8 +61,8 @@
<assessment
name=
"peer-assessment"
start=
"2014-12-20T19:00-7:00"
due=
"2014-12-21T22:22-7:00"
must_grade=
"
5
"
must_be_graded_by=
"
3
"
/>
must_grade=
"
1
"
must_be_graded_by=
"
1
"
/>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/submission_mixin.py
View file @
8bfd03c5
import
copy
import
logging
import
dateutil
from
xblock.core
import
XBlock
import
logging
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
submissions
import
api
from
openassessment.assessment
import
peer_api
from
openassessment.workflow
import
api
as
workflow_api
...
...
apps/submissions/api.py
View file @
8bfd03c5
...
...
@@ -8,9 +8,7 @@ import logging
from
django.db
import
DatabaseError
from
django.utils.encoding
import
force_unicode
from
submissions.serializers
import
(
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
)
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
from
submissions.models
import
Submission
,
StudentItem
,
Score
,
ScoreSummary
logger
=
logging
.
getLogger
(
__name__
)
...
...
apps/submissions/models.py
View file @
8bfd03c5
...
...
@@ -157,3 +157,4 @@ class ScoreSummary(models.Model):
u"Error while updating score summary for student item {}"
.
format
(
score
.
student_item
)
)
apps/submissions/serializers.py
View file @
8bfd03c5
...
...
@@ -42,3 +42,4 @@ class ScoreSerializer(serializers.ModelSerializer):
# Computed
'submission_uuid'
,
)
scripts/workbench.sh
View file @
8bfd03c5
...
...
@@ -11,7 +11,7 @@ export DJANGO_SETTINGS_MODULE="settings.dev"
# Create the database
echo
"Updating the database..."
python manage.py syncdb
--migrate
-
-noinput
-
v
0
python manage.py syncdb
--migrate
-v
0
echo
"Starting server..."
python manage.py runserver_plus
"
${
@
:1
}
"
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment