Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
84dd9255
Commit
84dd9255
authored
Mar 28, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Updating the Assessment Models
parent
c2f6aceb
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
157 additions
and
21 deletions
+157
-21
apps/openassessment/assessment/migrations/0005_auto__del_field_peerworkflow_graded_count__add_field_peerworkflow_grad.py
+106
-0
apps/openassessment/assessment/models.py
+1
-1
apps/openassessment/assessment/peer_api.py
+26
-11
apps/openassessment/assessment/test/test_peer.py
+0
-0
apps/openassessment/management/commands/create_oa_submissions.py
+7
-1
apps/openassessment/xblock/peer_assessment_mixin.py
+1
-0
apps/openassessment/xblock/test/test_grade.py
+4
-2
apps/openassessment/xblock/test/test_peer.py
+12
-6
No files found.
apps/openassessment/assessment/migrations/0005_auto__del_field_peerworkflow_graded_count__add_field_peerworkflow_grad.py
0 → 100644
View file @
84dd9255
# -*- coding: utf-8 -*-
import
datetime
from
south.db
import
db
from
south.v2
import
SchemaMigration
from
django.db
import
models
class
Migration
(
SchemaMigration
):
def
forwards
(
self
,
orm
):
# Deleting field 'PeerWorkflow.graded_count'
db
.
delete_column
(
'assessment_peerworkflow'
,
'graded_count'
)
# Adding field 'PeerWorkflow.grading_completed_at'
db
.
add_column
(
'assessment_peerworkflow'
,
'grading_completed_at'
,
self
.
gf
(
'django.db.models.fields.DateTimeField'
)(
null
=
True
,
db_index
=
True
),
keep_default
=
False
)
def
backwards
(
self
,
orm
):
# Adding field 'PeerWorkflow.graded_count'
db
.
add_column
(
'assessment_peerworkflow'
,
'graded_count'
,
self
.
gf
(
'django.db.models.fields.PositiveIntegerField'
)(
default
=
0
,
db_index
=
True
),
keep_default
=
False
)
# Deleting field 'PeerWorkflow.grading_completed_at'
db
.
delete_column
(
'assessment_peerworkflow'
,
'grading_completed_at'
)
models
=
{
'assessment.assessment'
:
{
'Meta'
:
{
'ordering'
:
"['-scored_at', '-id']"
,
'object_name'
:
'Assessment'
},
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Rubric']"
}),
'score_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'2'
}),
'scored_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentfeedback'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedback'
},
'assessments'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'default'
:
'None'
,
'related_name'
:
"'assessment_feedback'"
,
'symmetrical'
:
'False'
,
'to'
:
"orm['assessment.Assessment']"
}),
'feedback_text'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'options'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'default'
:
'None'
,
'related_name'
:
"'assessment_feedback'"
,
'symmetrical'
:
'False'
,
'to'
:
"orm['assessment.AssessmentFeedbackOption']"
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentfeedbackoption'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedbackOption'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'text'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'255'
})
},
'assessment.assessmentpart'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentPart'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'parts'"
,
'to'
:
"orm['assessment.Assessment']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'option'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.CriterionOption']"
})
},
'assessment.criterion'
:
{
'Meta'
:
{
'ordering'
:
"['rubric', 'order_num']"
,
'object_name'
:
'Criterion'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'prompt'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'criteria'"
,
'to'
:
"orm['assessment.Rubric']"
})
},
'assessment.criterionoption'
:
{
'Meta'
:
{
'ordering'
:
"['criterion', 'order_num']"
,
'object_name'
:
'CriterionOption'
},
'criterion'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'options'"
,
'to'
:
"orm['assessment.Criterion']"
}),
'explanation'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'points'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{})
},
'assessment.peerworkflow'
:
{
'Meta'
:
{
'ordering'
:
"['created_at', 'id']"
,
'object_name'
:
'PeerWorkflow'
},
'completed_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'null'
:
'True'
,
'db_index'
:
'True'
}),
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'grading_completed_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'null'
:
'True'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.peerworkflowitem'
:
{
'Meta'
:
{
'ordering'
:
"['started_at', 'id']"
,
'object_name'
:
'PeerWorkflowItem'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Assessment']"
,
'null'
:
'True'
}),
'author'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'graded_by'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'scored'
:
(
'django.db.models.fields.BooleanField'
,
[],
{
'default'
:
'False'
}),
'scorer'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'graded'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'started_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.rubric'
:
{
'Meta'
:
{
'object_name'
:
'Rubric'
},
'content_hash'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
})
}
}
complete_apps
=
[
'assessment'
]
\ No newline at end of file
apps/openassessment/assessment/models.py
View file @
84dd9255
...
...
@@ -501,7 +501,7 @@ class PeerWorkflow(models.Model):
submission_uuid
=
models
.
CharField
(
max_length
=
128
,
db_index
=
True
,
unique
=
True
)
created_at
=
models
.
DateTimeField
(
default
=
now
,
db_index
=
True
)
completed_at
=
models
.
DateTimeField
(
null
=
True
,
db_index
=
True
)
grad
ed_count
=
models
.
PositiveIntegerField
(
default
=
0
,
db_index
=
True
)
grad
ing_completed_at
=
models
.
DateTimeField
(
null
=
True
,
db_index
=
True
)
class
Meta
:
ordering
=
[
"created_at"
,
"id"
]
...
...
apps/openassessment/assessment/peer_api.py
View file @
84dd9255
...
...
@@ -75,9 +75,15 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def
is_complete
(
submission_uuid
,
requirements
):
try
:
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
if
workflow
.
completed_at
is
not
None
:
return
True
elif
_num_peers_graded
(
workflow
)
>=
requirements
[
"must_grade"
]:
workflow
.
completed_at
=
timezone
.
now
()
workflow
.
save
()
return
True
return
False
except
PeerWorkflow
.
DoesNotExist
:
return
False
return
_num_peers_graded
(
workflow
)
>=
requirements
[
"must_grade"
]
def
get_score
(
submission_uuid
,
requirements
):
...
...
@@ -121,8 +127,6 @@ def get_score(submission_uuid, requirements):
scored_item
.
scored
=
True
scored_item
.
save
()
workflow
.
completed_at
=
timezone
.
now
()
workflow
.
save
()
return
{
"points_earned"
:
sum
(
get_assessment_median_scores
(
submission_uuid
)
.
values
()
...
...
@@ -131,7 +135,13 @@ def get_score(submission_uuid, requirements):
}
def
create_assessment
(
submission_uuid
,
scorer_id
,
assessment_dict
,
rubric_dict
,
scored_at
=
None
):
def
create_assessment
(
submission_uuid
,
scorer_id
,
assessment_dict
,
rubric_dict
,
graded_by
,
scored_at
=
None
):
"""Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular
...
...
@@ -145,6 +155,9 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict,
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
graded_by (int): The required number of assessments a submission
requires before it is completed. If this number of assessments is
reached, the grading_completed_at timestamp is set for the Workflow.
Kwargs:
scored_at (datetime): Optional argument to override the time in which
...
...
@@ -220,7 +233,7 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict,
"assessment cannot be submitted unless the associated "
"submission came from the peer workflow."
))
# Close the active assessment
_close_active_assessment
(
scorer_workflow
,
submission_uuid
,
assessment
)
_close_active_assessment
(
scorer_workflow
,
submission_uuid
,
assessment
,
graded_by
)
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
student_item
,
scorer_item
)
...
...
@@ -852,7 +865,7 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False):
"where pw.item_id=
%
s "
"and pw.course_id=
%
s "
"and pw.student_id<>
%
s "
"and pw.grad
ed_count <
%
s
"
"and pw.grad
ing_completed_at is NULL
"
"and pw.id not in ("
" select pwi.author_id "
" from assessment_peerworkflowitem pwi "
...
...
@@ -870,7 +883,6 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False):
workflow
.
item_id
,
workflow
.
course_id
,
workflow
.
student_id
,
graded_by
,
workflow
.
id
,
timeout
,
graded_by
...
...
@@ -949,7 +961,7 @@ def _get_submission_for_over_grading(workflow):
raise
PeerAssessmentInternalError
(
error_message
)
def
_close_active_assessment
(
workflow
,
submission_uuid
,
assessment
):
def
_close_active_assessment
(
workflow
,
submission_uuid
,
assessment
,
graded_by
):
"""Associate the work item with a complete assessment.
Updates a workflow item on the student's workflow with the associated
...
...
@@ -960,6 +972,8 @@ def _close_active_assessment(workflow, submission_uuid, assessment):
workflow (PeerWorkflow): The scorer's workflow
submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Examples:
>>> student_item_dict = dict(
...
...
@@ -970,14 +984,15 @@ def _close_active_assessment(workflow, submission_uuid, assessment):
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> assessment = Assessment.objects.all()[0]
>>> _close_active_assessment(workflow, "1", assessment)
>>> _close_active_assessment(workflow, "1", assessment
, 3
)
"""
try
:
item
=
workflow
.
graded
.
get
(
submission_uuid
=
submission_uuid
)
item
.
assessment
=
assessment
item
.
author
.
graded_count
+=
1
item
.
author
.
save
()
if
item
.
author
.
graded_by
.
all
()
.
count
()
>=
graded_by
:
item
.
author
.
grading_completed_at
=
timezone
.
now
()
item
.
author
.
save
()
item
.
save
()
except
(
DatabaseError
,
PeerWorkflowItem
.
DoesNotExist
):
error_message
=
_
(
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
84dd9255
This diff is collapsed.
Click to expand it.
apps/openassessment/management/commands/create_oa_submissions.py
View file @
84dd9255
...
...
@@ -97,7 +97,13 @@ class Command(BaseCommand):
'options_selected'
:
options_selected
,
'feedback'
:
" "
.
join
(
loremipsum
.
get_paragraphs
(
2
))
}
peer_api
.
create_assessment
(
submission_uuid
,
scorer_id
,
assessment
,
rubric
)
peer_api
.
create_assessment
(
submission_uuid
,
scorer_id
,
assessment
,
rubric
,
self
.
NUM_PEER_ASSESSMENTS
)
# Create a self-assessment
print
"-- Creating self assessment"
...
...
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
84dd9255
...
...
@@ -69,6 +69,7 @@ class PeerAssessmentMixin(object):
self
.
get_student_item_dict
()[
"student_id"
],
assessment_dict
,
rubric_dict
,
assessment_ui_model
[
'must_be_graded_by'
]
)
# Emit analytics event...
self
.
runtime
.
publish
(
...
...
apps/openassessment/xblock/test/test_grade.py
View file @
84dd9255
...
...
@@ -139,14 +139,16 @@ class TestGrade(XBlockHandlerTestCase):
# Create an assessment of the user's submission
peer_api
.
create_assessment
(
submission
[
'uuid'
],
scorer_name
,
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
},
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
)
# Have our user make assessments (so she can get a score)
for
asmnt
in
peer_assessments
:
new_submission
=
peer_api
.
get_submission_to_assess
(
student_item
,
len
(
peers
))
peer_api
.
create_assessment
(
new_submission
[
'uuid'
],
student_id
,
asmnt
,
{
'criteria'
:
xblock
.
rubric_criteria
}
new_submission
[
'uuid'
],
student_id
,
asmnt
,
{
'criteria'
:
xblock
.
rubric_criteria
},
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
)
# Have the user submit a self-assessment (so she can get a score)
...
...
apps/openassessment/xblock/test/test_peer.py
View file @
84dd9255
...
...
@@ -44,7 +44,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub
[
'uuid'
],
hal_student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
},
1
)
# Now Sally will assess Hal.
...
...
@@ -55,7 +56,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub
[
'uuid'
],
sally_student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
},
1
)
# If Over Grading is on, this should now return Sally's response to Bob.
...
...
@@ -182,7 +184,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub
[
'uuid'
],
hal_student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
},
1
)
# Now Sally will assess Hal.
...
...
@@ -193,7 +196,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub
[
'uuid'
],
sally_student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
},
1
)
# If Over Grading is on, this should now return Sally's response to Bob.
...
...
@@ -214,7 +218,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub
[
'uuid'
],
student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
},
1
)
# Validate Submission Rendering.
...
...
@@ -230,7 +235,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub
[
'uuid'
],
student_item
[
'student_id'
],
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
}
{
'criteria'
:
xblock
.
rubric_criteria
},
1
)
# A Final over grading will not return anything.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment