Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
c51eb79c
Commit
c51eb79c
authored
Mar 13, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Changed some models and apis around. Some small tweaks to the UI. Migration.
parent
c6a73bfe
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
233 additions
and
66 deletions
+233
-66
apps/openassessment/assessment/admin.py
+1
-2
apps/openassessment/assessment/migrations/0002_auto__add_assessmentfeedback.py
+122
-0
apps/openassessment/assessment/models.py
+15
-15
apps/openassessment/assessment/peer_api.py
+63
-2
apps/openassessment/assessment/serializers.py
+5
-5
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+1
-1
apps/openassessment/xblock/grade_mixin.py
+22
-9
apps/openassessment/xblock/static/js/src/oa_base.js
+2
-3
apps/openassessment/xblock/static/js/src/oa_server.js
+2
-5
apps/submissions/api.py
+0
-24
No files found.
apps/openassessment/assessment/admin.py
View file @
c51eb79c
...
...
@@ -4,10 +4,9 @@ from openassessment.assessment.models import Assessment, AssessmentFeedback, Ass
admin
.
site
.
register
(
Assessment
)
admin
.
site
.
register
(
AssessmentPart
)
admin
.
site
.
register
(
AssessmentFeedback
)
admin
.
site
.
register
(
Rubric
)
admin
.
site
.
register
(
Criterion
)
admin
.
site
.
register
(
CriterionOption
)
admin
.
site
.
register
(
PeerWorkflow
)
admin
.
site
.
register
(
PeerWorkflowItem
)
admin
.
site
.
register
(
AssessmentFeedback
)
apps/openassessment/assessment/migrations/0002_auto__add_assessmentfeedback.py
0 → 100644
View file @
c51eb79c
# -*- coding: utf-8 -*-
import
datetime
from
south.db
import
db
from
south.v2
import
SchemaMigration
from
django.db
import
models
class
Migration
(
SchemaMigration
):
def
forwards
(
self
,
orm
):
# Adding model 'AssessmentFeedback'
db
.
create_table
(
'assessment_assessmentfeedback'
,
(
(
'id'
,
self
.
gf
(
'django.db.models.fields.AutoField'
)(
primary_key
=
True
)),
(
'submission_uuid'
,
self
.
gf
(
'django.db.models.fields.CharField'
)(
unique
=
True
,
max_length
=
128
,
db_index
=
True
)),
(
'helpfulness'
,
self
.
gf
(
'django.db.models.fields.IntegerField'
)(
default
=
2
)),
(
'feedback'
,
self
.
gf
(
'django.db.models.fields.TextField'
)(
default
=
''
,
max_length
=
10000
)),
))
db
.
send_create_signal
(
'assessment'
,
[
'AssessmentFeedback'
])
# Adding M2M table for field assessments on 'AssessmentFeedback'
db
.
create_table
(
'assessment_assessmentfeedback_assessments'
,
(
(
'id'
,
models
.
AutoField
(
verbose_name
=
'ID'
,
primary_key
=
True
,
auto_created
=
True
)),
(
'assessmentfeedback'
,
models
.
ForeignKey
(
orm
[
'assessment.assessmentfeedback'
],
null
=
False
)),
(
'assessment'
,
models
.
ForeignKey
(
orm
[
'assessment.assessment'
],
null
=
False
))
))
db
.
create_unique
(
'assessment_assessmentfeedback_assessments'
,
[
'assessmentfeedback_id'
,
'assessment_id'
])
def
backwards
(
self
,
orm
):
# Deleting model 'AssessmentFeedback'
db
.
delete_table
(
'assessment_assessmentfeedback'
)
# Removing M2M table for field assessments on 'AssessmentFeedback'
db
.
delete_table
(
'assessment_assessmentfeedback_assessments'
)
models
=
{
'assessment.assessment'
:
{
'Meta'
:
{
'ordering'
:
"['-scored_at', '-id']"
,
'object_name'
:
'Assessment'
},
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Rubric']"
}),
'score_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'2'
}),
'scored_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['submissions.Submission']"
})
},
'assessment.assessmentfeedback'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedback'
},
'assessments'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'related_name'
:
"'assessment_feedback'"
,
'symmetrical'
:
'False'
,
'to'
:
"orm['assessment.Assessment']"
}),
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
}),
'helpfulness'
:
(
'django.db.models.fields.IntegerField'
,
[],
{
'default'
:
'2'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentpart'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentPart'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'parts'"
,
'to'
:
"orm['assessment.Assessment']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'option'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.CriterionOption']"
})
},
'assessment.criterion'
:
{
'Meta'
:
{
'ordering'
:
"['rubric', 'order_num']"
,
'object_name'
:
'Criterion'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'prompt'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'criteria'"
,
'to'
:
"orm['assessment.Rubric']"
})
},
'assessment.criterionoption'
:
{
'Meta'
:
{
'ordering'
:
"['criterion', 'order_num']"
,
'object_name'
:
'CriterionOption'
},
'criterion'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'options'"
,
'to'
:
"orm['assessment.Criterion']"
}),
'explanation'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'points'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{})
},
'assessment.peerworkflow'
:
{
'Meta'
:
{
'ordering'
:
"['created_at', 'id']"
,
'object_name'
:
'PeerWorkflow'
},
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.peerworkflowitem'
:
{
'Meta'
:
{
'ordering'
:
"['started_at', 'id']"
,
'object_name'
:
'PeerWorkflowItem'
},
'assessment'
:
(
'django.db.models.fields.IntegerField'
,
[],
{
'default'
:
'-1'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'items'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'started_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.rubric'
:
{
'Meta'
:
{
'object_name'
:
'Rubric'
},
'content_hash'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
})
},
'submissions.studentitem'
:
{
'Meta'
:
{
'unique_together'
:
"(('course_id', 'student_id', 'item_id'),)"
,
'object_name'
:
'StudentItem'
},
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
}),
'item_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'255'
,
'db_index'
:
'True'
})
},
'submissions.submission'
:
{
'Meta'
:
{
'ordering'
:
"['-submitted_at', '-id']"
,
'object_name'
:
'Submission'
},
'answer'
:
(
'django.db.models.fields.TextField'
,
[],
{
'blank'
:
'True'
}),
'attempt_number'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'student_item'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['submissions.StudentItem']"
}),
'submitted_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'db_index'
:
'True'
,
'max_length'
:
'36'
,
'blank'
:
'True'
})
}
}
complete_apps
=
[
'assessment'
]
\ No newline at end of file
apps/openassessment/assessment/models.py
View file @
c51eb79c
...
...
@@ -373,6 +373,21 @@ class AssessmentPart(models.Model):
return
self
.
option
.
criterion
.
points_possible
class
AssessmentFeedback
(
models
.
Model
):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
submission_uuid
=
models
.
CharField
(
max_length
=
128
,
unique
=
True
,
db_index
=
True
)
assessments
=
models
.
ManyToManyField
(
Assessment
,
related_name
=
'assessment_feedback'
)
HELPFULNESS_CHOICES
=
(
(
0
,
'These results were not at all helpful'
),
(
1
,
'These results were somewhat helpful'
),
(
2
,
'These results were helpful'
),
(
3
,
'These results were very helpful'
),
(
4
,
'These results were extremely helpful'
),
)
helpfulness
=
models
.
IntegerField
(
choices
=
HELPFULNESS_CHOICES
,
default
=
2
)
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
class
PeerWorkflow
(
models
.
Model
):
"""Internal Model for tracking Peer Assessment Workflow
...
...
@@ -439,18 +454,3 @@ class PeerWorkflowItem(models.Model):
def
__unicode__
(
self
):
return
repr
(
self
)
class
AssessmentFeedback
(
models
.
Model
):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
peerworkflows
=
models
.
ManyToManyField
(
PeerWorkflowItem
)
HELPFULNESS_CHOICES
=
(
(
0
,
'These results were not at all helpful'
),
(
1
,
'These results were somewhat helpful'
),
(
2
,
'These results were helpful'
),
(
3
,
'These results were very helpful'
),
(
4
,
'These results were extremely helpful'
),
)
helpfulness
=
models
.
IntegerField
(
choices
=
HELPFULNESS_CHOICES
,
default
=
2
)
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
apps/openassessment/assessment/peer_api.py
View file @
c51eb79c
...
...
@@ -13,9 +13,9 @@ from django.db import DatabaseError
from
django.db.models
import
Q
from
pytz
import
UTC
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
,
AssessmentFeedback
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
)
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
,
AssessmentFeedbackSerializer
)
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
...
@@ -889,3 +889,63 @@ def _check_submission_graded(submission_uuid, must_be_graded_by):
return
PeerWorkflowItem
.
objects
.
filter
(
submission_uuid
=
submission_uuid
)
.
exclude
(
assessment
=-
1
)
.
count
()
>=
must_be_graded_by
def
get_assessment_feedback
(
submission_uuid
):
"""Retrieve a feedback object for an assessment whether it exists or not.
Gets or creates a new Assessment Feedback model for the given submission.
Args:
submission_uuid: The submission we want to create assessment feedback
for.
Returns:
The assessment feedback object that exists, or a newly created model.
Raises:
PeerAssessmentInternalError: Raised when the AssessmentFeedback cannot
be created or retrieved because of internal exceptions.
"""
try
:
feedback
,
feedback_created
=
AssessmentFeedback
.
objects
.
get_or_create
(
submission_uuid
=
submission_uuid
)
return
AssessmentFeedbackSerializer
(
feedback
)
.
data
except
DatabaseError
:
error_message
=
(
u"An error occurred retrieving assessment feedback for {}."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
set_assessment_feedback
(
must_grade
,
feedback_dict
):
"""Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the
dict.
Args:
must_grade (int): The required number of assessments for the associated
submission.
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
Returns:
The modified or created feedback.
"""
feedback_model
=
AssessmentFeedback
.
objects
.
get
(
submission_uuid
=
feedback_dict
[
'submission_uuid'
]
)
submission
=
Submission
.
objects
.
get
(
uuid
=
feedback_dict
[
'submission_uuid'
])
feedback_dict
[
'assessments'
]
=
[
assessment
.
pk
for
assessment
in
Assessment
.
objects
.
filter
(
submission
=
submission
,
score_type
=
"PE"
)[:
must_grade
]
]
feedback
=
AssessmentFeedbackSerializer
(
feedback_model
,
data
=
feedback_dict
)
if
not
feedback
.
is_valid
():
raise
PeerAssessmentRequestError
(
feedback
.
errors
)
feedback
.
save
()
return
feedback
.
data
\ No newline at end of file
apps/openassessment/assessment/serializers.py
View file @
c51eb79c
...
...
@@ -277,9 +277,9 @@ class AssessmentFeedbackSerializer(serializers.ModelSerializer):
class
Meta
:
model
=
AssessmentFeedback
fields
=
(
'id'
,
'peerworkflows
'
,
'helpfulness'
,
'feedback'
,
)
fields
=
(
'submission_uuid
'
,
'helpfulness'
,
'feedback'
,
)
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
c51eb79c
...
...
@@ -134,7 +134,7 @@
<ol
class=
"list list--fields submission__feeedback__content"
>
<li
class=
"field field--textarea feedback__remarks"
id=
"feedback__remarks"
>
<label
for=
"feedback__remarks__value"
>
Please provide any thoughts or comments on the feedback you received from your peers here.
</label>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
></textarea>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
>
{{ feedback_text }}
</textarea>
</li>
</ol>
...
...
apps/openassessment/xblock/grade_mixin.py
View file @
c51eb79c
...
...
@@ -4,8 +4,6 @@ from django.utils.translation import ugettext as _
from
xblock.core
import
XBlock
from
openassessment.assessment
import
peer_api
from
submissions
import
api
as
submissions_api
class
GradeMixin
(
object
):
...
...
@@ -30,6 +28,7 @@ class GradeMixin(object):
status
=
workflow
.
get
(
'status'
)
context
=
{}
if
status
==
"done"
:
feedback
=
peer_api
.
get_assessment_feedback
(
self
.
submission_uuid
)
max_scores
=
peer_api
.
get_rubric_max_scores
(
self
.
submission_uuid
)
path
=
'openassessmentblock/grade/oa_grade_complete.html'
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
...
...
@@ -50,6 +49,7 @@ class GradeMixin(object):
student_submission
[
"uuid"
],
assessment_ui_model
[
"must_be_graded_by"
]
)
context
[
"feedback_text"
]
=
feedback
.
get
(
'feedback'
,
''
)
context
[
"student_submission"
]
=
student_submission
context
[
"peer_assessments"
]
=
peer_assessments
context
[
"self_assessment"
]
=
self_assessment
...
...
@@ -79,12 +79,25 @@ class GradeMixin(object):
@XBlock.json_handler
def
feedback_submit
(
self
,
data
,
suffix
=
''
):
"""Attach the Assessment Feedback text to some submission."""
submission_uuid
=
self
.
submission_uuid
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
or
{}
assessment_feedback
=
data
.
get
(
'feedback'
,
''
)
raise
Exception
,
"jrbl everything worked up to here"
# DEBUG
if
not
assessment_feedback
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"No feedback given, so none recorded"
)}
feedback_dict
=
submissions_api
.
get_assessment_feedback
(
submission_uuid
)
feedback_dict
[
'feedback'
]
=
assessment_feedback
__
=
submissions_api
.
set_assessment_feedback
(
feedback_dict
)
return
{
'success'
:
True
,
'msg'
:
_
(
u"Feedback saved!"
)}
return
{
'success'
:
False
,
'msg'
:
_
(
u"No feedback given, so none recorded"
)
}
peer_api
.
set_assessment_feedback
(
assessment_ui_model
.
get
(
'must_grade'
,
0
),
{
'submission_uuid'
:
self
.
submission_uuid
,
'feedback'
:
assessment_feedback
,
'helpfulness'
:
0
}
)
return
{
'success'
:
True
,
'msg'
:
_
(
u"Feedback saved!"
)
}
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
c51eb79c
...
...
@@ -253,7 +253,7 @@ OpenAssessment.BaseUI.prototype = {
function
(
html
)
{
// Load the HTML
$
(
'#openassessment__grade'
,
ui
.
element
).
replaceWith
(
html
);
// Install a click handler for collapse/expand
var
sel
=
$
(
'#openassessment__grade'
,
ui
.
element
);
ui
.
setUpCollapseExpand
(
sel
);
...
...
@@ -322,10 +322,9 @@ OpenAssessment.BaseUI.prototype = {
feedback_assess
:
function
()
{
// Send the submission to the server
var
feedback
=
$
(
'#feedback__remarks__value'
,
this
.
element
).
val
();
var
ui
=
this
;
this
.
server
.
feedback_submit
(
feedback
).
done
(
// When we have successfully sent the submission, textarea no longer editable
console
.
log
(
"Feedback to the assessments submitted, thanks!"
)
// JRBL: FIXME: TODO: make this true
console
.
log
(
"Feedback to the assessments submitted, thanks!"
)
).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
console
.
log
(
errMsg
);
...
...
apps/openassessment/xblock/static/js/src/oa_server.js
View file @
c51eb79c
...
...
@@ -162,9 +162,8 @@ OpenAssessment.Server.prototype = {
},
/**
Send feedback on assessments to the XBlock.
FIXME: JRBL: write documentation
**/
* Send feedback on assessments to the XBlock.
*/
feedback_submit
:
function
(
feedback
)
{
var
url
=
this
.
url
(
'feedback_submit'
);
var
payload
=
JSON
.
stringify
({
...
...
@@ -175,11 +174,9 @@ OpenAssessment.Server.prototype = {
function
(
data
)
{
if
(
data
.
success
)
{
defer
.
resolve
();
alert
(
"resolved!"
);
}
else
{
defer
.
rejectWith
(
this
,
[
data
.
msg
]);
alert
(
"rejected!"
);
}
}
).
fail
(
function
(
data
)
{
...
...
apps/submissions/api.py
View file @
c51eb79c
...
...
@@ -10,8 +10,6 @@ from django.utils.encoding import force_unicode
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
from
submissions.models
import
Submission
,
StudentItem
,
Score
,
ScoreSummary
from
openassessment.assessment.serializers
import
AssessmentFeedbackSerializer
from
openassessment.assessment.models
import
AssessmentFeedback
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -489,25 +487,3 @@ def _get_or_create_student_item(student_item_dict):
student_item_dict
)
logger
.
exception
(
error_message
)
raise
SubmissionInternalError
(
error_message
)
def
get_assessment_feedback
(
submission_uuid
):
"""Retrieve a feedback object for an assessment whether it exists or not."""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
feedback_obj
,
feedback_created
=
AssessmentFeedback
.
objects
.
get_or_create
(
submission
=
submission
)
return
AssessmentFeedbackSerializer
(
feedback_obj
)
.
data
except
DatabaseError
,
msg
:
error_message
=
u"An error occurred retrieving assessment feedback for {}."
.
format
(
submission_uuid
)
logger
.
exception
(
error_message
)
raise
DatabaseError
,
msg
def
set_assessment_feedback
(
feedback_dict
):
"""Set a feedback object for an assessment to have some new values."""
submission
=
Submission
.
objects
.
get
(
submission_uuid
=
feedback_dict
[
'submission_uuid'
])
feedback_obj
=
AssessmentFeedback
.
objects
.
get
(
pk
=
feedback_dict
[
'id'
])
if
feedback_obj
.
submission
!=
submission
:
raise
Exception
,
"Can't re-associate a piece of feedback"
# TODO: less generic
feedback_obj
.
helpfulness
=
feedback_dict
[
'helpfulness'
]
feedback_obj
.
feedback
=
feedback_dict
[
'feedback'
]
feedback_obj
.
save
()
return
AssessmentFeedbackSerializer
(
feedback_obj
)
.
data
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment