Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
ce6e0b93
Commit
ce6e0b93
authored
Mar 18, 2014
by
Joe Blaylock
Committed by
Will Daly
Mar 25, 2014
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Allow multiple feedback options to be selected
parent
50583fbc
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
753 additions
and
182 deletions
+753
-182
apps/openassessment/assessment/admin.py
+7
-1
apps/openassessment/assessment/migrations/0002_auto__add_assessmentfeedbackoption__del_field_assessmentfeedback_feedb.py
+134
-0
apps/openassessment/assessment/models.py
+51
-10
apps/openassessment/assessment/peer_api.py
+45
-41
apps/openassessment/assessment/serializers.py
+19
-6
apps/openassessment/assessment/test/test_models.py
+96
-1
apps/openassessment/assessment/test/test_peer.py
+15
-9
apps/openassessment/assessment/test/test_serializers.py
+36
-2
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
+49
-25
apps/openassessment/xblock/grade_mixin.py
+26
-31
apps/openassessment/xblock/static/js/fixtures/grade_complete.html
+55
-0
apps/openassessment/xblock/static/js/openassessment.min.js
+0
-0
apps/openassessment/xblock/static/js/spec/oa_base.js
+36
-0
apps/openassessment/xblock/static/js/spec/oa_server.js
+42
-0
apps/openassessment/xblock/static/js/src/oa_base.js
+19
-14
apps/openassessment/xblock/static/js/src/oa_server.js
+14
-15
apps/openassessment/xblock/static/xml/poverty_rubric_example.xml
+2
-2
apps/openassessment/xblock/submission_mixin.py
+1
-1
apps/openassessment/xblock/test/test_grade.py
+106
-24
No files found.
apps/openassessment/assessment/admin.py
View file @
ce6e0b93
from
django.contrib
import
admin
from
django.contrib
import
admin
from
openassessment.assessment.models
import
Assessment
,
AssessmentFeedback
,
AssessmentPart
,
Rubric
,
Criterion
,
CriterionOption
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.models
import
(
Assessment
,
AssessmentPart
,
Rubric
,
AssessmentFeedback
,
AssessmentFeedbackOption
,
Criterion
,
CriterionOption
,
PeerWorkflow
,
PeerWorkflowItem
,
)
admin
.
site
.
register
(
Assessment
)
admin
.
site
.
register
(
Assessment
)
admin
.
site
.
register
(
AssessmentPart
)
admin
.
site
.
register
(
AssessmentPart
)
admin
.
site
.
register
(
AssessmentFeedback
)
admin
.
site
.
register
(
AssessmentFeedback
)
admin
.
site
.
register
(
AssessmentFeedbackOption
)
admin
.
site
.
register
(
Rubric
)
admin
.
site
.
register
(
Rubric
)
admin
.
site
.
register
(
Criterion
)
admin
.
site
.
register
(
Criterion
)
admin
.
site
.
register
(
CriterionOption
)
admin
.
site
.
register
(
CriterionOption
)
...
...
apps/openassessment/assessment/migrations/0002_auto__add_assessmentfeedbackoption__del_field_assessmentfeedback_feedb.py
0 → 100644
View file @
ce6e0b93
# -*- coding: utf-8 -*-
import
datetime
from
south.db
import
db
from
south.v2
import
SchemaMigration
from
django.db
import
models
class
Migration
(
SchemaMigration
):
def
forwards
(
self
,
orm
):
# Adding model 'AssessmentFeedbackOption'
db
.
create_table
(
'assessment_assessmentfeedbackoption'
,
(
(
'id'
,
self
.
gf
(
'django.db.models.fields.AutoField'
)(
primary_key
=
True
)),
(
'text'
,
self
.
gf
(
'django.db.models.fields.CharField'
)(
unique
=
True
,
max_length
=
255
)),
))
db
.
send_create_signal
(
'assessment'
,
[
'AssessmentFeedbackOption'
])
# Deleting field 'AssessmentFeedback.feedback'
db
.
delete_column
(
'assessment_assessmentfeedback'
,
'feedback'
)
# Deleting field 'AssessmentFeedback.helpfulness'
db
.
delete_column
(
'assessment_assessmentfeedback'
,
'helpfulness'
)
# Adding field 'AssessmentFeedback.feedback_text'
db
.
add_column
(
'assessment_assessmentfeedback'
,
'feedback_text'
,
self
.
gf
(
'django.db.models.fields.TextField'
)(
default
=
''
,
max_length
=
10000
),
keep_default
=
False
)
# Adding M2M table for field options on 'AssessmentFeedback'
db
.
create_table
(
'assessment_assessmentfeedback_options'
,
(
(
'id'
,
models
.
AutoField
(
verbose_name
=
'ID'
,
primary_key
=
True
,
auto_created
=
True
)),
(
'assessmentfeedback'
,
models
.
ForeignKey
(
orm
[
'assessment.assessmentfeedback'
],
null
=
False
)),
(
'assessmentfeedbackoption'
,
models
.
ForeignKey
(
orm
[
'assessment.assessmentfeedbackoption'
],
null
=
False
))
))
db
.
create_unique
(
'assessment_assessmentfeedback_options'
,
[
'assessmentfeedback_id'
,
'assessmentfeedbackoption_id'
])
def
backwards
(
self
,
orm
):
# Deleting model 'AssessmentFeedbackOption'
db
.
delete_table
(
'assessment_assessmentfeedbackoption'
)
# Adding field 'AssessmentFeedback.feedback'
db
.
add_column
(
'assessment_assessmentfeedback'
,
'feedback'
,
self
.
gf
(
'django.db.models.fields.TextField'
)(
default
=
''
,
max_length
=
10000
),
keep_default
=
False
)
# Adding field 'AssessmentFeedback.helpfulness'
db
.
add_column
(
'assessment_assessmentfeedback'
,
'helpfulness'
,
self
.
gf
(
'django.db.models.fields.IntegerField'
)(
default
=
2
),
keep_default
=
False
)
# Deleting field 'AssessmentFeedback.feedback_text'
db
.
delete_column
(
'assessment_assessmentfeedback'
,
'feedback_text'
)
# Removing M2M table for field options on 'AssessmentFeedback'
db
.
delete_table
(
'assessment_assessmentfeedback_options'
)
models
=
{
'assessment.assessment'
:
{
'Meta'
:
{
'ordering'
:
"['-scored_at', '-id']"
,
'object_name'
:
'Assessment'
},
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Rubric']"
}),
'score_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'2'
}),
'scored_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentfeedback'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedback'
},
'assessments'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'default'
:
'None'
,
'related_name'
:
"'assessment_feedback'"
,
'symmetrical'
:
'False'
,
'to'
:
"orm['assessment.Assessment']"
}),
'feedback_text'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'options'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'default'
:
'None'
,
'to'
:
"orm['assessment.AssessmentFeedbackOption']"
,
'symmetrical'
:
'False'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentfeedbackoption'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedbackOption'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'text'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'255'
})
},
'assessment.assessmentpart'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentPart'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'parts'"
,
'to'
:
"orm['assessment.Assessment']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'option'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.CriterionOption']"
})
},
'assessment.criterion'
:
{
'Meta'
:
{
'ordering'
:
"['rubric', 'order_num']"
,
'object_name'
:
'Criterion'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'prompt'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'criteria'"
,
'to'
:
"orm['assessment.Rubric']"
})
},
'assessment.criterionoption'
:
{
'Meta'
:
{
'ordering'
:
"['criterion', 'order_num']"
,
'object_name'
:
'CriterionOption'
},
'criterion'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'options'"
,
'to'
:
"orm['assessment.Criterion']"
}),
'explanation'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'points'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{})
},
'assessment.peerworkflow'
:
{
'Meta'
:
{
'ordering'
:
"['created_at', 'id']"
,
'object_name'
:
'PeerWorkflow'
},
'completed_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'null'
:
'True'
,
'db_index'
:
'True'
}),
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.peerworkflowitem'
:
{
'Meta'
:
{
'ordering'
:
"['started_at', 'id']"
,
'object_name'
:
'PeerWorkflowItem'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Assessment']"
,
'null'
:
'True'
}),
'author'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'graded_by'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'scored'
:
(
'django.db.models.fields.BooleanField'
,
[],
{
'default'
:
'False'
}),
'scorer'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'graded'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'started_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.rubric'
:
{
'Meta'
:
{
'object_name'
:
'Rubric'
},
'content_hash'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
})
}
}
complete_apps
=
[
'assessment'
]
\ No newline at end of file
apps/openassessment/assessment/models.py
View file @
ce6e0b93
...
@@ -405,19 +405,60 @@ class AssessmentPart(models.Model):
...
@@ -405,19 +405,60 @@ class AssessmentPart(models.Model):
])
])
class
AssessmentFeedbackOption
(
models
.
Model
):
"""
Option a student can select to provide feedback on the feedback they received.
`AssessmentFeedback` stands in a one-to-many relationship with `AssessmentFeedbackOption`s:
a student can select zero or more `AssessmentFeedbackOption`s when providing feedback.
Over time, we may decide to add, delete, or reword assessment feedback options.
To preserve data integrity, we will always get-or-create `AssessmentFeedbackOption`s
based on the option text.
"""
text
=
models
.
CharField
(
max_length
=
255
,
unique
=
True
)
class
AssessmentFeedback
(
models
.
Model
):
class
AssessmentFeedback
(
models
.
Model
):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
"""
Feedback on feedback. When students receive their grades, they
can provide feedback on how they were assessed, to be reviewed by course staff.
This consists of free-form written feedback
("Please provide any thoughts or comments on the feedback you received from your peers")
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
submission_uuid
=
models
.
CharField
(
max_length
=
128
,
unique
=
True
,
db_index
=
True
)
submission_uuid
=
models
.
CharField
(
max_length
=
128
,
unique
=
True
,
db_index
=
True
)
assessments
=
models
.
ManyToManyField
(
Assessment
,
related_name
=
'assessment_feedback'
,
default
=
None
)
assessments
=
models
.
ManyToManyField
(
Assessment
,
related_name
=
'assessment_feedback'
,
default
=
None
)
HELPFULNESS_CHOICES
=
(
feedback_text
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
(
0
,
'These results were not at all helpful'
),
options
=
models
.
ManyToManyField
(
AssessmentFeedbackOption
,
related_name
=
'assessment_feedback'
,
default
=
None
)
(
1
,
'These results were somewhat helpful'
),
(
2
,
'These results were helpful'
),
def
add_options
(
self
,
selected_options
):
(
3
,
'These results were very helpful'
),
"""
(
4
,
'These results were extremely helpful'
),
Select feedback options for this assessment.
)
Students can select zero or more options.
helpfulness
=
models
.
IntegerField
(
choices
=
HELPFULNESS_CHOICES
,
default
=
2
)
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
Note: you *must* save the model before calling this method.
Args:
option_text_list (list of unicode): List of options that the user selected.
Raises:
DatabaseError
"""
# First, retrieve options that already exist
options
=
list
(
AssessmentFeedbackOption
.
objects
.
filter
(
text__in
=
selected_options
))
# If there are additional options that do not yet exist, create them
new_options
=
[
text
for
text
in
selected_options
if
text
not
in
[
opt
.
text
for
opt
in
options
]]
for
new_option_text
in
new_options
:
options
.
append
(
AssessmentFeedbackOption
.
objects
.
create
(
text
=
new_option_text
))
# Add all options to the feedback model
# Note that we've already saved each of the AssessmentFeedbackOption models, so they have primary keys
# (required for adding to a many-to-many relationship)
self
.
options
.
add
(
*
options
)
class
PeerWorkflow
(
models
.
Model
):
class
PeerWorkflow
(
models
.
Model
):
...
...
apps/openassessment/assessment/peer_api.py
View file @
ce6e0b93
...
@@ -933,19 +933,17 @@ def _num_peers_graded(workflow):
...
@@ -933,19 +933,17 @@ def _num_peers_graded(workflow):
def
get_assessment_feedback
(
submission_uuid
):
def
get_assessment_feedback
(
submission_uuid
):
"""Retrieve a feedback object for an assessment whether it exists or not.
"""
Retrieve a feedback on an assessment.
Gets or creates a new Assessment Feedback model for the given submission.
Args:
Args:
submission_uuid: The submission we want to
create assessment feedback
submission_uuid: The submission we want to
retrieve assessment feedback for.
for.
Returns:
Returns:
The assessment feedback object that exists, or a newly created model.
dict or None
Raises:
PeerAssessmentInternalError: Raised when the AssessmentFeedback cannot
be created or retrieved because of internal exceptions.
Raises:
PeerAssessmentInternalError: Error occurred while retrieving the feedback.
"""
"""
try
:
try
:
feedback
=
AssessmentFeedback
.
objects
.
get
(
feedback
=
AssessmentFeedback
.
objects
.
get
(
...
@@ -964,46 +962,52 @@ def get_assessment_feedback(submission_uuid):
...
@@ -964,46 +962,52 @@ def get_assessment_feedback(submission_uuid):
def
set_assessment_feedback
(
feedback_dict
):
def
set_assessment_feedback
(
feedback_dict
):
"""Set a feedback object for an assessment to have some new values.
"""
Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the
Sets or updates the assessment feedback with the given values in the dict.
dict.
Args:
Args:
feedback_dict (dict): A dictionary of all the values to update or create
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
a new assessment feedback.
Returns:
Returns:
The modified or created feedback.
None
Raises:
PeerAssessmentRequestError
PeerAssessmentInternalError
"""
"""
submission_uuid
=
feedback_dict
.
get
(
'submission_uuid'
)
submission_uuid
=
feedback_dict
.
get
(
'submission_uuid'
)
if
not
submission_uuid
:
feedback_text
=
feedback_dict
.
get
(
'feedback_text'
)
error_message
=
u"An error occurred creating assessment feedback: bad or missing submission_uuid."
selected_options
=
feedback_dict
.
get
(
'options'
,
list
())
logger
.
error
(
error_message
)
raise
PeerAssessmentRequestError
(
error_message
)
try
:
assessments
=
PeerWorkflowItem
.
get_scored_assessments
(
submission_uuid
)
except
DatabaseError
:
error_message
=
(
u"An error occurred getting database state to set assessment feedback for {}."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
feedback
=
AssessmentFeedbackSerializer
(
data
=
feedback_dict
)
if
not
feedback
.
is_valid
():
raise
PeerAssessmentRequestError
(
feedback
.
errors
)
try
:
try
:
feedback_model
=
feedback
.
save
()
# Get or create the assessment model for this submission
# Assessments associated with feedback must be saved after the row is
# If we receive an integrity error, assume that someone else is trying to create
# committed to the database in order to associated the PKs across both
# another feedback model for this submission, and raise an exception.
# tables.
if
submission_uuid
:
feedback_model
.
assessments
.
add
(
*
assessments
)
feedback
,
created
=
AssessmentFeedback
.
objects
.
get_or_create
(
submission_uuid
=
submission_uuid
)
else
:
error_message
=
u"An error occurred creating assessment feedback: bad or missing submission_uuid."
logger
.
error
(
error_message
)
raise
PeerAssessmentRequestError
(
error_message
)
# Update the feedback text
if
feedback_text
is
not
None
:
feedback
.
feedback_text
=
feedback_text
# Save the feedback model. We need to do this before setting m2m relations.
if
created
or
feedback_text
is
not
None
:
feedback
.
save
()
# Associate the feedback with selected options
feedback
.
add_options
(
selected_options
)
# Associate the feedback with scored assessments
assessments
=
PeerWorkflowItem
.
get_scored_assessments
(
submission_uuid
)
feedback
.
assessments
.
add
(
*
assessments
)
except
DatabaseError
:
except
DatabaseError
:
error_message
=
(
msg
=
u"Error occurred while creating or updating feedback on assessment: {}"
.
format
(
feedback_dict
)
u"An error occurred saving assessment feedback for {}."
logger
.
exception
(
msg
)
.
format
(
submission_uuid
)
raise
PeerAssessmentInternalError
(
msg
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
return
feedback
.
data
apps/openassessment/assessment/serializers.py
View file @
ce6e0b93
...
@@ -10,8 +10,10 @@ from django.core.cache import cache
...
@@ -10,8 +10,10 @@ from django.core.cache import cache
from
django.utils.translation
import
ugettext
as
_
from
django.utils.translation
import
ugettext
as
_
from
rest_framework
import
serializers
from
rest_framework
import
serializers
from
openassessment.assessment.models
import
(
from
openassessment.assessment.models
import
(
Assessment
,
AssessmentFeedback
,
AssessmentPart
,
Criterion
,
CriterionOption
,
Rubric
,
Assessment
,
AssessmentPart
,
Criterion
,
CriterionOption
,
Rubric
,
PeerWorkflowItem
,
PeerWorkflow
)
AssessmentFeedback
,
AssessmentFeedbackOption
,
PeerWorkflowItem
,
PeerWorkflow
)
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
@@ -299,15 +301,26 @@ def rubric_from_dict(rubric_dict):
...
@@ -299,15 +301,26 @@ def rubric_from_dict(rubric_dict):
return
rubric
return
rubric
class
AssessmentFeedbackOptionSerializer
(
serializers
.
ModelSerializer
):
"""
Serialize an `AssessmentFeedbackOption` model.
"""
class
Meta
:
model
=
AssessmentFeedbackOption
fields
=
(
'text'
,)
class
AssessmentFeedbackSerializer
(
serializers
.
ModelSerializer
):
class
AssessmentFeedbackSerializer
(
serializers
.
ModelSerializer
):
submission_uuid
=
serializers
.
CharField
(
source
=
'submission_uuid'
)
"""
helpfulness
=
serializers
.
IntegerField
(
source
=
'helpfulness'
)
Serialize feedback in response to an assessment.
feedback
=
serializers
.
CharField
(
source
=
'feedback'
)
"""
assessments
=
AssessmentSerializer
(
many
=
True
,
default
=
None
,
required
=
False
)
assessments
=
AssessmentSerializer
(
many
=
True
,
default
=
None
,
required
=
False
)
options
=
AssessmentFeedbackOptionSerializer
(
many
=
True
,
default
=
None
,
required
=
False
)
class
Meta
:
class
Meta
:
model
=
AssessmentFeedback
model
=
AssessmentFeedback
fields
=
(
'submission_uuid'
,
'
helpfulness'
,
'feedback'
,
'assessments'
,
)
fields
=
(
'submission_uuid'
,
'
feedback_text'
,
'assessments'
,
'options'
)
class
PeerWorkflowSerializer
(
serializers
.
ModelSerializer
):
class
PeerWorkflowSerializer
(
serializers
.
ModelSerializer
):
...
...
apps/openassessment/assessment/test/test_models.py
View file @
ce6e0b93
# -*- coding: utf-8 -*-
"""
"""
Tests for assessment models.
Tests for assessment models.
"""
"""
from
django.test
import
TestCase
from
django.test
import
TestCase
from
openassessment.assessment.models
import
(
from
openassessment.assessment.models
import
(
Rubric
,
Criterion
,
CriterionOption
,
InvalidOptionSelection
Rubric
,
Criterion
,
CriterionOption
,
InvalidOptionSelection
,
AssessmentFeedback
,
AssessmentFeedbackOption
,
)
)
...
@@ -103,3 +105,96 @@ class TestRubricOptionIds(TestCase):
...
@@ -103,3 +105,96 @@ class TestRubricOptionIds(TestCase):
"test criterion 2"
:
"test option 2"
,
"test criterion 2"
:
"test option 2"
,
"test criterion 3"
:
"test option 1"
,
"test criterion 3"
:
"test option 1"
,
})
})
class
AssessmentFeedbackTest
(
TestCase
):
"""
Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive.
"""
def
setUp
(
self
):
self
.
feedback
=
AssessmentFeedback
.
objects
.
create
(
submission_uuid
=
'test_submission'
,
feedback_text
=
'test feedback'
,
)
def
test_default_options
(
self
):
self
.
assertEqual
(
self
.
feedback
.
options
.
count
(),
0
)
def
test_add_options_all_new
(
self
):
# We haven't created any feedback options yet, so these should be created.
self
.
feedback
.
add_options
([
'I liked my assessment'
,
'I thought my assessment was unfair'
])
# Check the feedback options
options
=
self
.
feedback
.
options
.
all
()
self
.
assertEqual
(
len
(
options
),
2
)
self
.
assertEqual
(
options
[
0
]
.
text
,
'I liked my assessment'
)
self
.
assertEqual
(
options
[
1
]
.
text
,
'I thought my assessment was unfair'
)
def
test_add_options_some_new
(
self
):
# Create one feedback option in the database
AssessmentFeedbackOption
.
objects
.
create
(
text
=
'I liked my assessment'
)
# Add feedback options. The one that's new should be created.
self
.
feedback
.
add_options
([
'I liked my assessment'
,
'I thought my assessment was unfair'
])
# Check the feedback options
options
=
self
.
feedback
.
options
.
all
()
self
.
assertEqual
(
len
(
options
),
2
)
self
.
assertEqual
(
options
[
0
]
.
text
,
'I liked my assessment'
)
self
.
assertEqual
(
options
[
1
]
.
text
,
'I thought my assessment was unfair'
)
def
test_add_options_empty
(
self
):
# No options
self
.
feedback
.
add_options
([])
self
.
assertEqual
(
len
(
self
.
feedback
.
options
.
all
()),
0
)
# Add an option
self
.
feedback
.
add_options
([
'test'
])
self
.
assertEqual
(
len
(
self
.
feedback
.
options
.
all
()),
1
)
# Add an empty list of options
self
.
feedback
.
add_options
([])
self
.
assertEqual
(
len
(
self
.
feedback
.
options
.
all
()),
1
)
def
test_add_options_duplicates
(
self
):
# Add some options, which will be created
self
.
feedback
.
add_options
([
'I liked my assessment'
,
'I thought my assessment was unfair'
])
# Add some more options, one of which is a duplicate
self
.
feedback
.
add_options
([
'I liked my assessment'
,
'I disliked my assessment'
])
# There should be three options
options
=
self
.
feedback
.
options
.
all
()
self
.
assertEqual
(
len
(
options
),
3
)
self
.
assertEqual
(
options
[
0
]
.
text
,
'I liked my assessment'
)
self
.
assertEqual
(
options
[
1
]
.
text
,
'I thought my assessment was unfair'
)
self
.
assertEqual
(
options
[
2
]
.
text
,
'I disliked my assessment'
)
# There should be only three options in the database
self
.
assertEqual
(
AssessmentFeedbackOption
.
objects
.
count
(),
3
)
def
test_add_options_all_old
(
self
):
# Add some options, which will be created
self
.
feedback
.
add_options
([
'I liked my assessment'
,
'I thought my assessment was unfair'
])
# Add some more options, all of which are duplicates
self
.
feedback
.
add_options
([
'I liked my assessment'
,
'I thought my assessment was unfair'
])
# There should be two options
options
=
self
.
feedback
.
options
.
all
()
self
.
assertEqual
(
len
(
options
),
2
)
self
.
assertEqual
(
options
[
0
]
.
text
,
'I liked my assessment'
)
self
.
assertEqual
(
options
[
1
]
.
text
,
'I thought my assessment was unfair'
)
# There should be two options in the database
self
.
assertEqual
(
AssessmentFeedbackOption
.
objects
.
count
(),
2
)
def
test_unicode
(
self
):
# Create options with unicode
self
.
feedback
.
add_options
([
u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽'
,
u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'
])
# There should be two options in the database
self
.
assertEqual
(
AssessmentFeedbackOption
.
objects
.
count
(),
2
)
apps/openassessment/assessment/test/test_peer.py
View file @
ce6e0b93
...
@@ -509,18 +509,25 @@ class TestPeerApi(TestCase):
...
@@ -509,18 +509,25 @@ class TestPeerApi(TestCase):
)
)
feedback
=
peer_api
.
get_assessment_feedback
(
tim_sub
[
'uuid'
])
feedback
=
peer_api
.
get_assessment_feedback
(
tim_sub
[
'uuid'
])
self
.
assertIsNone
(
feedback
)
self
.
assertIsNone
(
feedback
)
feedback
=
peer_api
.
set_assessment_feedback
(
peer_api
.
set_assessment_feedback
(
{
{
'submission_uuid'
:
tim_sub
[
'uuid'
],
'submission_uuid'
:
tim_sub
[
'uuid'
],
'helpfulness'
:
0
,
'feedback_text'
:
'Bob is a jerk!'
,
'feedback'
:
'Bob is a jerk!'
'options'
:
[
'I disliked this assessment'
,
'I felt this assessment was unfair'
,
]
}
}
)
)
self
.
assertIsNotNone
(
feedback
)
self
.
assertEquals
(
feedback
[
"assessments"
][
0
][
"submission_uuid"
],
assessment
[
"submission_uuid"
])
saved_feedback
=
peer_api
.
get_assessment_feedback
(
tim_sub
[
'uuid'
])
saved_feedback
=
peer_api
.
get_assessment_feedback
(
tim_sub
[
'uuid'
])
self
.
assertEquals
(
feedback
,
saved_feedback
)
self
.
assertIsNot
(
saved_feedback
,
None
)
self
.
assertEquals
(
saved_feedback
[
'submission_uuid'
],
assessment
[
'submission_uuid'
])
self
.
assertEquals
(
saved_feedback
[
'feedback_text'
],
'Bob is a jerk!'
)
self
.
assertItemsEqual
(
saved_feedback
[
'options'
],
[
{
'text'
:
'I disliked this assessment'
},
{
'text'
:
'I felt this assessment was unfair'
},
])
self
.
assertEquals
(
saved_feedback
[
"assessments"
][
0
][
"submission_uuid"
],
assessment
[
"submission_uuid"
])
def
test_close_active_assessment
(
self
):
def
test_close_active_assessment
(
self
):
buffy_answer
,
buffy
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
buffy_answer
,
buffy
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
...
@@ -576,8 +583,7 @@ class TestPeerApi(TestCase):
...
@@ -576,8 +583,7 @@ class TestPeerApi(TestCase):
peer_api
.
set_assessment_feedback
(
peer_api
.
set_assessment_feedback
(
{
{
'submission_uuid'
:
tim_answer
[
'uuid'
],
'submission_uuid'
:
tim_answer
[
'uuid'
],
'helpfulness'
:
0
,
'feedback_text'
:
'Boo'
,
'feedback'
:
'Boo'
,
}
}
)
)
...
...
apps/openassessment/assessment/test/test_serializers.py
View file @
ce6e0b93
...
@@ -4,9 +4,10 @@ import os.path
...
@@ -4,9 +4,10 @@ import os.path
from
ddt
import
ddt
,
file_data
from
ddt
import
ddt
,
file_data
from
django.test
import
TestCase
from
django.test
import
TestCase
from
openassessment.assessment.models
import
Criterion
,
CriterionOption
,
Rubric
from
openassessment.assessment.models
import
Criterion
,
CriterionOption
,
Rubric
,
AssessmentFeedback
from
openassessment.assessment.serializers
import
(
from
openassessment.assessment.serializers
import
(
InvalidRubric
,
RubricSerializer
,
rubric_from_dict
InvalidRubric
,
RubricSerializer
,
rubric_from_dict
,
AssessmentFeedbackSerializer
)
)
def
json_data
(
filename
):
def
json_data
(
filename
):
...
@@ -82,3 +83,36 @@ class TestCriterionOptionDeserialization(TestCase):
...
@@ -82,3 +83,36 @@ class TestCriterionOptionDeserialization(TestCase):
]
]
}
}
)
)
class
TestAssessmentFeedbackSerializer
(
TestCase
):
def
test_serialize
(
self
):
feedback
=
AssessmentFeedback
.
objects
.
create
(
submission_uuid
=
'abc123'
,
feedback_text
=
'Test feedback'
)
feedback
.
add_options
([
'I liked my assessment'
,
'I thought my assessment was unfair'
])
serialized
=
AssessmentFeedbackSerializer
(
feedback
)
.
data
self
.
assertItemsEqual
(
serialized
,
{
'submission_uuid'
:
'abc123'
,
'feedback_text'
:
'Test feedback'
,
'options'
:
[
{
'text'
:
'I liked my assessment'
},
{
'text'
:
'I thought my assessment was unfair'
},
],
'assessments'
:
[],
})
def
test_empty_options
(
self
):
feedback
=
AssessmentFeedback
.
objects
.
create
(
submission_uuid
=
'abc123'
,
feedback_text
=
'Test feedback'
)
serialized
=
AssessmentFeedbackSerializer
(
feedback
)
.
data
self
.
assertItemsEqual
(
serialized
,
{
'submission_uuid'
:
'abc123'
,
'feedback_text'
:
'Test feedback'
,
'options'
:
[],
'assessments'
:
[],
})
apps/openassessment/templates/openassessmentblock/grade/oa_grade_complete.html
View file @
ce6e0b93
...
@@ -134,7 +134,6 @@
...
@@ -134,7 +134,6 @@
{% endwith %}
{% endwith %}
{% endfor %}
{% endfor %}
</ul>
</ul>
</li>
</li>
</ol>
</ol>
</article>
</article>
...
@@ -147,31 +146,56 @@
...
@@ -147,31 +146,56 @@
<p>
Course staff will be able to see any feedback that you provide here when they review course records.
</p>
<p>
Course staff will be able to see any feedback that you provide here when they review course records.
</p>
</div>
</div>
<div
class=
"submission__feedback__elements"
>
<ol
class=
"list list--fields submission__feedback__fields"
>
<ol
class=
"list list--fields submission__feeedback__fields"
>
<li
class=
"field field--radio feedback__overall"
id=
"feedback__overall"
>
<li
class=
"field field--select feedback__overall"
id=
"feedback__overall"
>
<h4>
Please select the statements below that reflect what you think of this peer grading experience:
</h4>
<label
for=
"feedback__overall__value"
>
Overall how do you consider your peers’ assessments of your response?
</label>
<ol
class=
"list--options"
>
<select
id=
"feedback__overall__value"
>
<li
class=
"option option--useful"
>
<option
value=
"This assessment was useful"
>
This assessment was useful
</option>
<input
type=
"checkbox"
<option
value=
"This assessment was not useful"
>
This assessment was not useful
</option>
name=
"feedback__overall__value"
<option
value=
"I disagree with this assessment"
>
I disagree with this assessment
</option>
id=
"feedback__overall__value--useful"
<option
value=
"This assessment was inappropriate"
>
This assessment was inappropriate
</option>
class=
"feedback__overall__value"
</select>
value=
"These assessments were useful."
/>
</li>
<label
for=
"feedback__overall__value--useful"
>
These assessments were useful.
</label>
<li
class=
"field field--textarea feedback__remarks"
id=
"feedback__remarks"
>
<label
for=
"feedback__remarks__value"
>
Provide any thoughts or comments on the feedback you received from your peers here.
</label>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
>
{{ feedback_text }}
</textarea>
</li>
</ol>
<div
class=
"submission__feeedback__actions"
>
<ul
class=
"list list--actions submission__feeedback__actions"
>
<li
class=
"list--actions__item"
>
<button
type=
"submit"
id=
"feedback__submit"
class=
"action action--submit feedback__submit"
>
Submit Feedback On Peer Evaluations
</button>
</li>
</li>
</ul>
<li
class=
"option option--notuseful"
>
</div>
<input
type=
"checkbox"
</div>
name=
"feedback__overall__value"
id=
"feedback__overall__value--notuseful"
class=
"feedback__overall__value"
value=
"These assessments were not useful."
/>
<label
for=
"feedback__overall__value--notuseful"
>
These assessments were not useful.
</label>
</li>
<li
class=
"option option--disagree"
>
<input
type=
"checkbox"
name=
"feedback__overall__value"
id=
"feedback__overall__value--disagree"
class=
"feedback__overall__value"
value=
"I disagree with the ways that my peers assessed me."
/>
<label
for=
"feedback__overall__value--notuseful"
>
I disagree with the ways that my peers assessed me.
</label>
</li>
<li
class=
"option option--inappropriate"
>
<input
type=
"checkbox"
name=
"feedback__overall__value"
id=
"feedback__overall__value--inappropriate"
class=
"feedback__overall__value"
value=
"I received some inappropriate comments."
/>
<label
for=
"feedback__overall__value--notuseful"
>
I received some inappropriate comments.
</label>
</li>
</ol>
</li>
<li
class=
"field field--textarea feedback__remarks"
id=
"feedback__remarks"
>
<label
for=
"feedback__remarks__value"
>
Please provide any thoughts or comments on the feedback you received from your peers here.
</label>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
>
{{ feedback_text }}
</textarea>
</li>
</ol>
</div>
<div
class=
"submission__feeedback__actions"
>
<ul
class=
"list list--actions submission__feeedback__actions"
>
<li
class=
"list--actions__item"
>
<button
type=
"submit"
id=
"feedback__submit"
class=
"action action--submit feedback__submit"
>
Submit Feedback On Peer Evaluations
</button>
</li>
</ul>
</div>
</div>
</form>
</form>
</div>
</div>
...
...
apps/openassessment/xblock/grade_mixin.py
View file @
ce6e0b93
...
@@ -71,35 +71,30 @@ class GradeMixin(object):
...
@@ -71,35 +71,30 @@ class GradeMixin(object):
return
self
.
render_assessment
(
path
,
context
)
return
self
.
render_assessment
(
path
,
context
)
@XBlock.json_handler
@XBlock.json_handler
def
feedback_submit
(
self
,
data
,
suffix
=
''
):
def
submit_feedback
(
self
,
data
,
suffix
=
''
):
"""Attach the Assessment Feedback text to some submission."""
"""
assessment_feedback
=
data
.
get
(
'feedback'
,
''
)
Submit feedback on an assessment.
if
not
assessment_feedback
:
return
{
Args:
'success'
:
False
,
data (dict): Can provide keys 'feedback_text' (unicode) and 'feedback_options' (list of unicode).
'msg'
:
_
(
u"No feedback given, so none recorded"
)
}
Kwargs:
suffix (str): Unused
Returns:
Dict with keys 'success' (bool) and 'msg' (unicode)
"""
feedback_text
=
data
.
get
(
'feedback_text'
,
u''
)
feedback_options
=
data
.
get
(
'feedback_options'
,
list
())
try
:
try
:
peer_api
.
set_assessment_feedback
(
peer_api
.
set_assessment_feedback
({
{
'submission_uuid'
:
self
.
submission_uuid
,
'submission_uuid'
:
self
.
submission_uuid
,
'feedback_text'
:
feedback_text
,
'feedback'
:
assessment_feedback
,
'options'
:
feedback_options
,
'helpfulness'
:
0
})
}
except
(
peer_api
.
PeerAssessmentInternalError
,
peer_api
.
PeerAssessmentRequestError
):
)
return
{
'success'
:
False
,
'msg'
:
_
(
u"Assessment feedback could not be saved."
)}
except
(
else
:
peer_api
.
PeerAssessmentInternalError
,
return
{
'success'
:
True
,
'msg'
:
_
(
u"Feedback saved!"
)}
peer_api
.
PeerAssessmentRequestError
):
return
{
'success'
:
False
,
'msg'
:
_
(
u"Assessment Feedback could not be saved due to an internal "
u"server error."
),
}
return
{
'success'
:
True
,
'msg'
:
_
(
u"Feedback saved!"
)
}
apps/openassessment/xblock/static/js/fixtures/grade_complete.html
0 → 100644
View file @
ce6e0b93
<div
id=
'openassessment-base'
>
<form
id=
"submission__feeedback"
class=
"submission__feeedback"
method=
"post"
>
<h3
class=
"submission__feeedback__title"
>
Give Feedback On Peer Evaluations
</h3>
<div
class=
"submission__feeedback__content"
>
<div
class=
"submission__feeedback__instructions"
>
<p>
Course staff will be able to see any feedback that you provide here when they review course records.
</p>
</div>
<ol
class=
"list list--fields submission__feedback__fields"
>
<li
class=
"field field--radio feedback__overall"
id=
"feedback__overall"
>
<h4>
Please select the statements below that reflect what you think of this peer grading experience:
</h4>
<ol
class=
"list--options"
>
<li
class=
"option option--useful"
>
<input
type=
"checkbox"
name=
"feedback__overall__value"
id=
"feedback__overall__value--useful"
class=
"feedback__overall__value"
value=
"These assessments were useful."
/>
<label
for=
"feedback__overall__value--useful"
>
These assessments were useful.
</label>
</li>
<li
class=
"option option--notuseful"
>
<input
type=
"checkbox"
name=
"feedback__overall__value"
id=
"feedback__overall__value--notuseful"
class=
"feedback__overall__value"
value=
"These assessments were not useful."
/>
<label
for=
"feedback__overall__value--notuseful"
>
These assessments were not useful.
</label>
</li>
<li
class=
"option option--disagree"
>
<input
type=
"checkbox"
name=
"feedback__overall__value"
id=
"feedback__overall__value--disagree"
class=
"feedback__overall__value"
value=
"I disagree with the ways that my peers assessed me."
/>
<label
for=
"feedback__overall__value--notuseful"
>
I disagree with the ways that my peers assessed me.
</label>
</li>
<li
class=
"option option--inappropriate"
>
<input
type=
"checkbox"
name=
"feedback__overall__value"
id=
"feedback__overall__value--inappropriate"
class=
"feedback__overall__value"
value=
"I received some inappropriate comments."
/>
<label
for=
"feedback__overall__value--notuseful"
>
I received some inappropriate comments.
</label>
</li>
</ol>
</li>
<li
class=
"field field--textarea feedback__remarks"
id=
"feedback__remarks"
>
<label
for=
"feedback__remarks__value"
>
Please provide any thoughts or comments on the feedback you received from your peers here.
</label>
<textarea
id=
"feedback__remarks__value"
placeholder=
"I feel the feedback I received was..."
>
{{ feedback_text }}
</textarea>
</li>
</ol>
</div>
</form>
</div>
apps/openassessment/xblock/static/js/openassessment.min.js
View file @
ce6e0b93
This diff is collapsed.
Click to expand it.
apps/openassessment/xblock/static/js/spec/oa_base.js
View file @
ce6e0b93
...
@@ -35,6 +35,15 @@ describe("OpenAssessment.BaseUI", function() {
...
@@ -35,6 +35,15 @@ describe("OpenAssessment.BaseUI", function() {
defer
.
resolveWith
(
this
,
[
server
.
fragments
[
component
]]);
defer
.
resolveWith
(
this
,
[
server
.
fragments
[
component
]]);
}).
promise
();
}).
promise
();
};
};
this
.
submitFeedbackOnAssessment
=
function
(
text
,
options
)
{
// Store the args we receive so we can check them later
this
.
feedbackText
=
text
;
this
.
feedbackOptions
=
options
;
// Return a promise that always resolves successfully
return
$
.
Deferred
(
function
(
defer
)
{
defer
.
resolve
()
}).
promise
();
};
};
};
// Stub runtime
// Stub runtime
...
@@ -102,4 +111,31 @@ describe("OpenAssessment.BaseUI", function() {
...
@@ -102,4 +111,31 @@ describe("OpenAssessment.BaseUI", function() {
expect
(
server
.
selfAssess
).
toHaveBeenCalled
();
expect
(
server
.
selfAssess
).
toHaveBeenCalled
();
});
});
});
});
it
(
"Sends feedback on a submission to the server"
,
function
()
{
jasmine
.
getFixtures
().
fixturesPath
=
'base/fixtures'
;
loadFixtures
(
'grade_complete.html'
);
// Simulate user feedback
$
(
'#feedback__remarks__value'
).
val
(
'I disliked the feedback I received.'
);
$
(
'#feedback__overall__value--notuseful'
).
attr
(
'checked'
,
'checked'
);
$
(
'#feedback__overall__value--disagree'
).
attr
(
'checked'
,
'checked'
);
// Create a new stub server
server
=
new
StubServer
();
// Create the object under test
var
el
=
$
(
"#openassessment-base"
).
get
(
0
);
ui
=
new
OpenAssessment
.
BaseUI
(
runtime
,
el
,
server
);
// Submit feedback on an assessment
ui
.
submitFeedbackOnAssessment
();
// Expect that the feedback was retrieved from the DOM and sent to the server
expect
(
server
.
feedbackText
).
toEqual
(
'I disliked the feedback I received.'
);
expect
(
server
.
feedbackOptions
).
toEqual
([
'These assessments were not useful.'
,
'I disagree with the ways that my peers assessed me.'
]);
});
});
});
apps/openassessment/xblock/static/js/spec/oa_server.js
View file @
ce6e0b93
...
@@ -106,6 +106,26 @@ describe("OpenAssessment.Server", function() {
...
@@ -106,6 +106,26 @@ describe("OpenAssessment.Server", function() {
});
});
});
});
it
(
"Sends feedback on an assessment to the XBlock"
,
function
()
{
stubAjax
(
true
,
{
success
:
true
,
msg
:
''
});
var
success
=
false
;
var
options
=
[
"Option 1"
,
"Option 2"
];
server
.
submitFeedbackOnAssessment
(
"test feedback"
,
options
).
done
(
function
()
{
success
=
true
;
});
expect
(
success
).
toBe
(
true
);
expect
(
$
.
ajax
).
toHaveBeenCalledWith
({
url
:
'/submit_feedback'
,
type
:
"POST"
,
data
:
JSON
.
stringify
({
feedback_text
:
"test feedback"
,
feedback_options
:
options
,
})
});
});
it
(
"loads the XBlock's XML definition"
,
function
()
{
it
(
"loads the XBlock's XML definition"
,
function
()
{
stubAjax
(
true
,
{
success
:
true
,
xml
:
"<openassessment />"
});
stubAjax
(
true
,
{
success
:
true
,
xml
:
"<openassessment />"
});
...
@@ -291,4 +311,26 @@ describe("OpenAssessment.Server", function() {
...
@@ -291,4 +311,26 @@ describe("OpenAssessment.Server", function() {
expect
(
receivedMsg
).
toEqual
(
"Test error"
);
expect
(
receivedMsg
).
toEqual
(
"Test error"
);
});
});
it
(
"informs the caller of an AJAX error when sending feedback on submission"
,
function
()
{
stubAjax
(
false
,
null
);
var
receivedMsg
=
null
;
var
options
=
[
"Option 1"
,
"Option 2"
];
server
.
submitFeedbackOnAssessment
(
"test feedback"
,
options
).
fail
(
function
(
errMsg
)
{
receivedMsg
=
errMsg
;
}
);
expect
(
receivedMsg
).
toEqual
(
"Could not contact server."
);
});
it
(
"informs the caller of a server error when sending feedback on submission"
,
function
()
{
stubAjax
(
true
,
{
success
:
false
,
msg
:
"Test error"
});
var
receivedMsg
=
null
;
var
options
=
[
"Option 1"
,
"Option 2"
];
server
.
submitFeedbackOnAssessment
(
"test feedback"
,
options
).
fail
(
function
(
errMsg
)
{
receivedMsg
=
errMsg
;
}
);
expect
(
receivedMsg
).
toEqual
(
"Test error"
);
});
});
});
apps/openassessment/xblock/static/js/src/oa_base.js
View file @
ce6e0b93
...
@@ -260,8 +260,8 @@ OpenAssessment.BaseUI.prototype = {
...
@@ -260,8 +260,8 @@ OpenAssessment.BaseUI.prototype = {
// Install a click handler for assessment feedback
// Install a click handler for assessment feedback
sel
.
find
(
'#feedback__submit'
).
click
(
function
(
eventObject
)
{
sel
.
find
(
'#feedback__submit'
).
click
(
function
(
eventObject
)
{
eventObject
.
preventDefault
();
eventObject
.
preventDefault
();
ui
.
feedback_assess
();
ui
.
submitFeedbackOnAssessment
();
});
});
}
}
).
fail
(
function
(
errMsg
)
{
).
fail
(
function
(
errMsg
)
{
...
@@ -275,11 +275,11 @@ OpenAssessment.BaseUI.prototype = {
...
@@ -275,11 +275,11 @@ OpenAssessment.BaseUI.prototype = {
**/
**/
save
:
function
()
{
save
:
function
()
{
// Retrieve the student's response from the DOM
// Retrieve the student's response from the DOM
var
submission
=
$
(
'#submission__answer__value'
,
this
.
element
).
val
();
var
ui
=
this
;
var
ui
=
this
;
this
.
setSaveStatus
(
'Saving...'
);
var
submission
=
$
(
'#submission__answer__value'
,
ui
.
element
).
val
();
this
.
toggleActionError
(
'save'
,
null
);
ui
.
setSaveStatus
(
'Saving...'
);
this
.
server
.
save
(
submission
).
done
(
function
()
{
ui
.
toggleActionError
(
'save'
,
null
);
ui
.
server
.
save
(
submission
).
done
(
function
()
{
ui
.
setSaveStatus
(
"Saved but not submitted"
);
ui
.
setSaveStatus
(
"Saved but not submitted"
);
}).
fail
(
function
(
errMsg
)
{
}).
fail
(
function
(
errMsg
)
{
ui
.
setSaveStatus
(
'Error'
);
ui
.
setSaveStatus
(
'Error'
);
...
@@ -302,10 +302,10 @@ OpenAssessment.BaseUI.prototype = {
...
@@ -302,10 +302,10 @@ OpenAssessment.BaseUI.prototype = {
**/
**/
submit
:
function
()
{
submit
:
function
()
{
// Send the submission to the server
// Send the submission to the server
var
submission
=
$
(
'#submission__answer__value'
,
this
.
element
).
val
();
var
ui
=
this
;
var
ui
=
this
;
this
.
toggleActionError
(
'response'
,
null
);
var
submission
=
$
(
'#submission__answer__value'
,
ui
.
element
).
val
();
this
.
server
.
submit
(
submission
).
done
(
ui
.
toggleActionError
(
'response'
,
null
);
ui
.
server
.
submit
(
submission
).
done
(
// When we have successfully sent the submission, expand the next step
// When we have successfully sent the submission, expand the next step
function
(
studentId
,
attemptNum
)
{
function
(
studentId
,
attemptNum
)
{
ui
.
renderSubmissionStep
();
ui
.
renderSubmissionStep
();
...
@@ -319,14 +319,19 @@ OpenAssessment.BaseUI.prototype = {
...
@@ -319,14 +319,19 @@ OpenAssessment.BaseUI.prototype = {
/**
/**
Send assessment feedback to the server and update the UI.
Send assessment feedback to the server and update the UI.
**/
**/
feedback_assess
:
function
()
{
submitFeedbackOnAssessment
:
function
()
{
// Send the submission to the server
// Send the submission to the server
var
feedback
=
$
(
'#feedback__remarks__value'
,
this
.
element
).
val
();
var
ui
=
this
;
var
ui
=
this
;
this
.
server
.
feedback_submit
(
feedback
).
done
(
var
text
=
$
(
'#feedback__remarks__value'
,
ui
.
element
).
val
();
var
options
=
$
.
map
(
$
(
'.feedback__overall__value:checked'
,
ui
.
element
),
function
(
element
,
index
)
{
return
$
(
element
).
val
();
}
);
ui
.
server
.
submitFeedbackOnAssessment
(
text
,
options
).
done
(
function
()
{
// When we have successfully sent the submission, textarea no longer editable
// When we have successfully sent the submission, textarea no longer editable
console
.
log
(
"Feedback to the assessments submitted, thanks!"
)
// TODO
).
fail
(
function
(
errMsg
)
{
console
.
log
(
"Feedback to the assessments submitted, thanks!"
);
}).
fail
(
function
(
errMsg
)
{
// TODO: display to the user
// TODO: display to the user
ui
.
toggleActionError
(
'feedback_assess'
,
errMsg
);
ui
.
toggleActionError
(
'feedback_assess'
,
errMsg
);
});
});
...
...
apps/openassessment/xblock/static/js/src/oa_server.js
View file @
ce6e0b93
...
@@ -161,42 +161,41 @@ OpenAssessment.Server.prototype = {
...
@@ -161,42 +161,41 @@ OpenAssessment.Server.prototype = {
}).
promise
();
}).
promise
();
},
},
/**
/**
* Send feedback on assessments to the XBlock.
* Send feedback on assessments to the XBlock.
* Args:
* Args:
*
feedback: The feedback given on a series of assessments associated
*
text (string): Written feedback from the student.
*
with this current submission
.
*
options (list of strings): One or more options the student selected
.
*
*
* Returns:
* Returns:
* A JQuery promise, which resolves with no args if successful and
* A JQuery promise, which resolves with no args if successful and
* fails with an error message otherwise.
* fails with an error message otherwise.
*
*
* Example:
* Example:
* server.feedback_submit("I dislike my reviews.").done(
* server.submit_feedback(
* "Good feedback!", ["I liked the feedback I received"]
* ).done(function() {
* console.log("Success!");
* console.log("Success!");
* ).fail(function(errMsg) {
*
}
).fail(function(errMsg) {
* console.log("Error: " + errMsg);
* console.log("Error: " + errMsg);
* });
* });
*/
*/
feedback_submit
:
function
(
feedback
)
{
submitFeedbackOnAssessment
:
function
(
text
,
options
)
{
var
url
=
this
.
url
(
'
feedback_submit
'
);
var
url
=
this
.
url
(
'
submit_feedback
'
);
var
payload
=
JSON
.
stringify
({
var
payload
=
JSON
.
stringify
({
feedback
:
feedback
'feedback_text'
:
text
,
'feedback_options'
:
options
});
});
return
$
.
Deferred
(
function
(
defer
)
{
return
$
.
Deferred
(
function
(
defer
)
{
$
.
ajax
({
type
:
"POST"
,
url
:
url
,
data
:
payload
}).
done
(
$
.
ajax
({
type
:
"POST"
,
url
:
url
,
data
:
payload
}).
done
(
function
(
data
)
{
function
(
data
)
{
if
(
data
.
success
)
{
if
(
data
.
success
)
{
defer
.
resolve
();
}
defer
.
resolve
();
else
{
defer
.
rejectWith
(
this
,
[
data
.
msg
]);
}
}
else
{
defer
.
rejectWith
(
this
,
[
data
.
msg
]);
}
}
}
).
fail
(
function
(
data
)
{
).
fail
(
function
(
data
)
{
defer
.
rejectWith
(
this
,
[
'Could not contact server.'
]);
defer
.
rejectWith
(
this
,
[
'Could not contact server.'
]);
});
});
}).
promise
()
}).
promise
()
;
},
},
/**
/**
...
...
apps/openassessment/xblock/static/xml/poverty_rubric_example.xml
View file @
ce6e0b93
...
@@ -107,8 +107,8 @@
...
@@ -107,8 +107,8 @@
<assessment
name=
"peer-assessment"
<assessment
name=
"peer-assessment"
start=
"2014-03-11T10:00-18:10"
start=
"2014-03-11T10:00-18:10"
due=
"2014-12-21T22:22-7:00"
due=
"2014-12-21T22:22-7:00"
must_grade=
"
3
"
must_grade=
"
1
"
must_be_graded_by=
"
3
"
/>
must_be_graded_by=
"
1
"
/>
<assessment
name=
"self-assessment"
/>
<assessment
name=
"self-assessment"
/>
</assessments>
</assessments>
</openassessment>
</openassessment>
apps/openassessment/xblock/submission_mixin.py
View file @
ce6e0b93
...
@@ -120,7 +120,7 @@ class SubmissionMixin(object):
...
@@ -120,7 +120,7 @@ class SubmissionMixin(object):
student_sub_dict
=
{
'text'
:
student_sub
}
student_sub_dict
=
{
'text'
:
student_sub
}
submission
=
api
.
create_submission
(
student_item_dict
,
student_sub_dict
)
submission
=
api
.
create_submission
(
student_item_dict
,
student_sub_dict
)
workflow
=
workflow
_api
.
create_workflow
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
])
self
.
submission_uuid
=
submission
[
"uuid"
]
self
.
submission_uuid
=
submission
[
"uuid"
]
return
submission
return
submission
...
...
apps/openassessment/xblock/test/test_grade.py
View file @
ce6e0b93
...
@@ -4,11 +4,18 @@ Tests for grade handlers in Open Assessment XBlock.
...
@@ -4,11 +4,18 @@ Tests for grade handlers in Open Assessment XBlock.
"""
"""
import
copy
import
copy
import
json
import
json
from
submissions
import
api
as
sub_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment
import
peer_api
,
self_api
from
openassessment.assessment
import
peer_api
,
self_api
from
.base
import
XBlockHandlerTestCase
,
scenario
from
.base
import
XBlockHandlerTestCase
,
scenario
class
TestGrade
(
XBlockHandlerTestCase
):
class
TestGrade
(
XBlockHandlerTestCase
):
"""
View-level tests for the XBlock grade handlers.
"""
PEERS
=
[
'McNulty'
,
'Moreland'
]
ASSESSMENTS
=
[
ASSESSMENTS
=
[
{
{
...
@@ -25,19 +32,107 @@ class TestGrade(XBlockHandlerTestCase):
...
@@ -25,19 +32,107 @@ class TestGrade(XBlockHandlerTestCase):
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade
(
self
,
xblock
):
def
test_render_grade
(
self
,
xblock
):
# Submit, assess, and render the grade view
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
self
.
PEERS
,
self
.
ASSESSMENTS
,
self
.
ASSESSMENTS
[
0
]
)
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
self
.
assertIn
(
u'єאςєɭɭєภՇ ฬ๏гк!'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Good job!'
,
resp
.
decode
(
'utf-8'
))
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_submit_feedback
(
self
,
xblock
):
# Create submissions and assessments
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
self
.
PEERS
,
self
.
ASSESSMENTS
,
self
.
ASSESSMENTS
[
0
]
)
# Submit feedback on the assessments
payload
=
json
.
dumps
({
'feedback_text'
:
u'I disliked my assessment'
,
'feedback_options'
:
[
u'Option 1'
,
u'Option 2'
],
})
resp
=
self
.
request
(
xblock
,
'submit_feedback'
,
payload
,
response_format
=
'json'
)
self
.
assertTrue
(
resp
[
'success'
])
# Verify that the feedback was created in the database
feedback
=
peer_api
.
get_assessment_feedback
(
xblock
.
submission_uuid
)
self
.
assertIsNot
(
feedback
,
None
)
self
.
assertEqual
(
feedback
[
'feedback_text'
],
u'I disliked my assessment'
)
self
.
assertItemsEqual
(
feedback
[
'options'
],
[{
'text'
:
u'Option 1'
},
{
'text'
:
u'Option 2'
}]
)
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Bob'
)
def
test_submit_feedback_no_options
(
self
,
xblock
):
# Create submissions and assessments
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
self
.
PEERS
,
self
.
ASSESSMENTS
,
self
.
ASSESSMENTS
[
0
]
)
# Submit feedback on the assessments with no options specified
payload
=
json
.
dumps
({
'feedback_text'
:
u'I disliked my assessment'
,
'feedback_options'
:
[],
})
resp
=
self
.
request
(
xblock
,
'submit_feedback'
,
payload
,
response_format
=
'json'
)
self
.
assertTrue
(
resp
[
'success'
])
# Verify that the feedback was created in the database
feedback
=
peer_api
.
get_assessment_feedback
(
xblock
.
submission_uuid
)
self
.
assertIsNot
(
feedback
,
None
)
self
.
assertItemsEqual
(
feedback
[
'options'
],
[])
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Bob'
)
def
test_submit_feedback_invalid_options
(
self
,
xblock
):
# Create submissions and assessments
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
self
.
PEERS
,
self
.
ASSESSMENTS
,
self
.
ASSESSMENTS
[
0
]
)
# Options should be a list, not a string
payload
=
json
.
dumps
({
'feedback_text'
:
u'I disliked my assessment'
,
'feedback_options'
:
u'should be a list!'
,
})
resp
=
self
.
request
(
xblock
,
'submit_feedback'
,
payload
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
'success'
])
self
.
assertGreater
(
len
(
resp
[
'msg'
]),
0
)
def
_create_submission_and_assessments
(
self
,
xblock
,
submission_text
,
peers
,
peer_assessments
,
self_assessment
):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
Args:
xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
submission_text (unicode): Text of the submission from the user.
peers (list of unicode): List of user IDs of peers who will assess the user.
peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
self_assessment (dict): Dict of assessment for self-assessment.
Returns:
None
"""
# Create a submission from the user
# Create a submission from the user
student_item
=
xblock
.
get_student_item_dict
()
student_item
=
xblock
.
get_student_item_dict
()
submission
=
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
student_id
=
student_item
[
'student_id'
]
xblock
.
get_workflow_info
()
submission
=
xblock
.
create_submission
(
student_item
,
submission_text
)
# Create submissions and assessments from other users
scorer_submissions
=
[]
scorer_submissions
=
[]
for
scorer_name
,
assessment
in
zip
([
'McNulty'
,
'Freamon'
],
self
.
ASSESSMENTS
):
for
scorer_name
,
assessment
in
zip
(
peers
,
peer_assessments
):
# Create a submission for each scorer
# Create a submission for each scorer for the same problem
scorer
=
copy
.
deepcopy
(
student_item
)
scorer
=
copy
.
deepcopy
(
student_item
)
scorer
[
'student_id'
]
=
scorer_name
scorer
[
'student_id'
]
=
scorer_name
scorer_sub
=
xblock
.
create_submission
(
scorer
,
self
.
SUBMISSION
)
xblock
.
get_workflow_info
()
scorer_sub
=
sub_api
.
create_submission
(
scorer
,
{
'text'
:
submission_text
})
submission
=
peer_api
.
get_submission_to_assess
(
scorer
,
2
)
workflow_api
.
create_workflow
(
scorer_sub
[
'uuid'
])
submission
=
peer_api
.
get_submission_to_assess
(
scorer
,
len
(
peers
))
# Store the scorer's submission so our user can assess it later
# Store the scorer's submission so our user can assess it later
scorer_submissions
.
append
(
scorer_sub
)
scorer_submissions
.
append
(
scorer_sub
)
...
@@ -47,28 +142,15 @@ class TestGrade(XBlockHandlerTestCase):
...
@@ -47,28 +142,15 @@ class TestGrade(XBlockHandlerTestCase):
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
)
)
# Since xblock.create_submission sets the xblock's submission_uuid,
# we need to set it back to the proper user for this test.
xblock
.
submission_uuid
=
submission
[
"uuid"
]
# Have our user make assessments (so she can get a score)
# Have our user make assessments (so she can get a score)
for
_
in
range
(
2
)
:
for
asmnt
in
peer_assessments
:
new_submission
=
peer_api
.
get_submission_to_assess
(
student_item
,
2
)
new_submission
=
peer_api
.
get_submission_to_assess
(
student_item
,
len
(
peers
)
)
peer_api
.
create_assessment
(
peer_api
.
create_assessment
(
new_submission
[
'uuid'
],
'Greggs'
,
new_submission
[
'uuid'
],
student_id
,
asmnt
,
{
'criteria'
:
xblock
.
rubric_criteria
}
self
.
ASSESSMENTS
[
0
],
{
'criteria'
:
xblock
.
rubric_criteria
}
)
)
# Have the user submit a self-assessment (so she can get a score)
# Have the user submit a self-assessment (so she can get a score)
self_api
.
create_assessment
(
self_api
.
create_assessment
(
submission
[
'uuid'
],
'Greggs'
,
submission
[
'uuid'
],
student_id
,
self_assessment
[
'options_selected'
],
self
.
ASSESSMENTS
[
0
][
'options_selected'
],
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
}
)
)
# Render the view
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
self
.
assertIn
(
u'єאςєɭɭєภՇ ฬ๏гк!'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Good job!'
,
resp
.
decode
(
'utf-8'
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment