Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
c14df4da
Commit
c14df4da
authored
Mar 18, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Unlink assessment models from submission models
parent
d964ea41
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
149 additions
and
64 deletions
+149
-64
apps/openassessment/assessment/migrations/0003_auto__del_field_assessment_submission__add_field_assessment_submission.py
+96
-0
apps/openassessment/assessment/models.py
+6
-15
apps/openassessment/assessment/peer_api.py
+25
-22
apps/openassessment/assessment/self_api.py
+4
-11
apps/openassessment/assessment/serializers.py
+4
-7
apps/openassessment/xblock/test/test_peer.py
+1
-1
fixtures/assessments.json
+7
-8
manage.py
+6
-0
No files found.
apps/openassessment/assessment/migrations/0003_auto__del_field_assessment_submission__add_field_assessment_submission.py
0 → 100644
View file @
c14df4da
# -*- coding: utf-8 -*-
import
datetime
from
south.db
import
db
from
south.v2
import
SchemaMigration
from
django.db
import
models
class
Migration
(
SchemaMigration
):
def
forwards
(
self
,
orm
):
# Deleting field 'Assessment.submission'
db
.
delete_column
(
'assessment_assessment'
,
'submission_id'
)
# Adding field 'Assessment.submission_uuid'
db
.
add_column
(
'assessment_assessment'
,
'submission_uuid'
,
self
.
gf
(
'django.db.models.fields.CharField'
)(
default
=
""
,
max_length
=
128
,
db_index
=
True
),
keep_default
=
False
)
def
backwards
(
self
,
orm
):
# Adding field 'Assessment.submission'
db
.
add_column
(
'assessment_assessment'
,
'submission'
,
self
.
gf
(
'django.db.models.fields.related.ForeignKey'
)(
default
=
0
,
to
=
orm
[
'submissions.Submission'
]),
keep_default
=
False
)
# Deleting field 'Assessment.submission_uuid'
db
.
delete_column
(
'assessment_assessment'
,
'submission_uuid'
)
models
=
{
'assessment.assessment'
:
{
'Meta'
:
{
'ordering'
:
"['-scored_at', '-id']"
,
'object_name'
:
'Assessment'
},
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.Rubric']"
}),
'score_type'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'2'
}),
'scored_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentfeedback'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentFeedback'
},
'assessments'
:
(
'django.db.models.fields.related.ManyToManyField'
,
[],
{
'default'
:
'None'
,
'related_name'
:
"'assessment_feedback'"
,
'symmetrical'
:
'False'
,
'to'
:
"orm['assessment.Assessment']"
}),
'feedback'
:
(
'django.db.models.fields.TextField'
,
[],
{
'default'
:
"''"
,
'max_length'
:
'10000'
}),
'helpfulness'
:
(
'django.db.models.fields.IntegerField'
,
[],
{
'default'
:
'2'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.assessmentpart'
:
{
'Meta'
:
{
'object_name'
:
'AssessmentPart'
},
'assessment'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'parts'"
,
'to'
:
"orm['assessment.Assessment']"
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'option'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'to'
:
"orm['assessment.CriterionOption']"
})
},
'assessment.criterion'
:
{
'Meta'
:
{
'ordering'
:
"['rubric', 'order_num']"
,
'object_name'
:
'Criterion'
},
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'prompt'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
}),
'rubric'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'criteria'"
,
'to'
:
"orm['assessment.Rubric']"
})
},
'assessment.criterionoption'
:
{
'Meta'
:
{
'ordering'
:
"['criterion', 'order_num']"
,
'object_name'
:
'CriterionOption'
},
'criterion'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'options'"
,
'to'
:
"orm['assessment.Criterion']"
}),
'explanation'
:
(
'django.db.models.fields.TextField'
,
[],
{
'max_length'
:
'10000'
,
'blank'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'name'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'100'
}),
'order_num'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{}),
'points'
:
(
'django.db.models.fields.PositiveIntegerField'
,
[],
{})
},
'assessment.peerworkflow'
:
{
'Meta'
:
{
'ordering'
:
"['created_at', 'id']"
,
'object_name'
:
'PeerWorkflow'
},
'course_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'created_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'item_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
}),
'student_id'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.peerworkflowitem'
:
{
'Meta'
:
{
'ordering'
:
"['started_at', 'id']"
,
'object_name'
:
'PeerWorkflowItem'
},
'assessment'
:
(
'django.db.models.fields.IntegerField'
,
[],
{
'default'
:
'-1'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
}),
'scorer_id'
:
(
'django.db.models.fields.related.ForeignKey'
,
[],
{
'related_name'
:
"'items'"
,
'to'
:
"orm['assessment.PeerWorkflow']"
}),
'started_at'
:
(
'django.db.models.fields.DateTimeField'
,
[],
{
'default'
:
'datetime.datetime.now'
,
'db_index'
:
'True'
}),
'submission_uuid'
:
(
'django.db.models.fields.CharField'
,
[],
{
'max_length'
:
'128'
,
'db_index'
:
'True'
})
},
'assessment.rubric'
:
{
'Meta'
:
{
'object_name'
:
'Rubric'
},
'content_hash'
:
(
'django.db.models.fields.CharField'
,
[],
{
'unique'
:
'True'
,
'max_length'
:
'40'
,
'db_index'
:
'True'
}),
'id'
:
(
'django.db.models.fields.AutoField'
,
[],
{
'primary_key'
:
'True'
})
}
}
complete_apps
=
[
'assessment'
]
apps/openassessment/assessment/models.py
View file @
c14df4da
...
...
@@ -20,8 +20,6 @@ from django.utils.timezone import now
from
django.utils.translation
import
ugettext
as
_
import
math
from
submissions.models
import
Submission
class
InvalidOptionSelection
(
Exception
):
"""
...
...
@@ -220,7 +218,7 @@ class Assessment(models.Model):
objects that map to each :class:`Criterion` in the :class:`Rubric` we're
assessing against.
"""
submission
=
models
.
ForeignKey
(
Submission
)
submission
_uuid
=
models
.
CharField
(
max_length
=
128
,
db_index
=
True
)
rubric
=
models
.
ForeignKey
(
Rubric
)
scored_at
=
models
.
DateTimeField
(
default
=
now
,
db_index
=
True
)
...
...
@@ -241,10 +239,6 @@ class Assessment(models.Model):
def
points_possible
(
self
):
return
self
.
rubric
.
points_possible
@property
def
submission_uuid
(
self
):
return
self
.
submission
.
uuid
def
__unicode__
(
self
):
return
u"Assessment {}"
.
format
(
self
.
id
)
...
...
@@ -313,7 +307,7 @@ class Assessment(models.Model):
return
median_score
@classmethod
def
scores_by_criterion
(
cls
,
submission
,
must_be_graded_by
):
def
scores_by_criterion
(
cls
,
submission
_uuid
,
must_be_graded_by
):
"""Create a dictionary of lists for scores associated with criterion
Create a key value in a dict with a list of values, for every criterion
...
...
@@ -324,20 +318,17 @@ class Assessment(models.Model):
of scores.
Args:
submission (Submission): Obtain assessments associated with this
submission
must_be_graded_by (int): The number of assessments to include in
this score analysis.
submission_uuid (str): Obtain assessments associated with this submission.
must_be_graded_by (int): The number of assessments to include in this score analysis.
Examples:
>>> Assessment.scores_by_criterion(
submission
, 3)
>>> Assessment.scores_by_criterion(
'abcd'
, 3)
{
"foo": [1, 2, 3],
"bar": [6, 7, 8]
}
"""
assessments
=
cls
.
objects
.
filter
(
submission
=
submission
)
.
order_by
(
"scored_at"
)[:
must_be_graded_by
]
assessments
=
cls
.
objects
.
filter
(
submission_uuid
=
submission_uuid
)
.
order_by
(
"scored_at"
)[:
must_be_graded_by
]
scores
=
defaultdict
(
list
)
for
assessment
in
assessments
:
...
...
apps/openassessment/assessment/peer_api.py
View file @
c14df4da
...
...
@@ -78,12 +78,22 @@ def is_complete(submission_uuid, requirements):
def
get_score
(
submission_uuid
,
requirements
):
"""
Retrieve a score for a submission if requirements have been satisfied.
Args:
submission_uuid (str): The UUID of the submission.
requirements (dict): Description of requirements for receiving a score,
specific to the particular kind of submission (e.g. self or peer).
Returns:
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if
not
is_complete
(
submission_uuid
,
requirements
):
return
None
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
,
score_type
=
PEER_TYPE
)
assessments
=
Assessment
.
objects
.
filter
(
submission_uuid
=
submission_uuid
,
score_type
=
PEER_TYPE
)
submission_finished
=
_check_submission_graded
(
submission_uuid
,
requirements
[
"must_be_graded_by"
])
if
not
submission_finished
:
...
...
@@ -92,7 +102,7 @@ def get_score(submission_uuid, requirements):
return
{
"points_earned"
:
sum
(
get_assessment_median_scores
(
submission
.
uuid
,
requirements
[
"must_be_graded_by"
]
submission
_
uuid
,
requirements
[
"must_be_graded_by"
]
)
.
values
()
),
"points_possible"
:
assessments
[
0
]
.
points_possible
,
...
...
@@ -157,7 +167,7 @@ def create_assessment(
peer_assessment
=
{
"rubric"
:
rubric
.
id
,
"scorer_id"
:
scorer_id
,
"submission
"
:
submission
.
pk
,
"submission
_uuid"
:
submission
.
uuid
,
"score_type"
:
PEER_TYPE
,
"feedback"
:
feedback
,
"parts"
:
[{
"option"
:
option_id
}
for
option_id
in
option_ids
]
...
...
@@ -228,8 +238,7 @@ def get_rubric_max_scores(submission_uuid):
the submission, or its associated rubric.
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
.
order_by
(
"-scored_at"
,
"-id"
)
assessments
=
Assessment
.
objects
.
filter
(
submission_uuid
=
submission_uuid
)
.
order_by
(
"-scored_at"
,
"-id"
)
if
assessments
:
return
{
criterion
.
name
:
criterion
.
points_possible
...
...
@@ -246,7 +255,7 @@ def get_rubric_max_scores(submission_uuid):
raise
PeerAssessmentInternalError
(
error_message
)
def
get_assessment_median_scores
(
submission_id
,
must_be_graded_by
):
def
get_assessment_median_scores
(
submission_
uu
id
,
must_be_graded_by
):
"""Get the median score for each rubric criterion
For a given assessment, collect the median score for each criterion on the
...
...
@@ -263,10 +272,8 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
assessments are used.
Args:
submission_id (str): The submission uuid to get all rubric criterion
median scores.
must_be_graded_by (int): The number of assessments to include in this
score analysis.
submission_uuid (str): The submission uuid to get all rubric criterion median scores.
must_be_graded_by (int): The number of assessments to include in this score analysis.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
...
...
@@ -279,13 +286,10 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
# Create a key value in a dict with a list of values, for every criterion
# found in an assessment.
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
scores
=
Assessment
.
scores_by_criterion
(
submission
,
must_be_graded_by
)
scores
=
Assessment
.
scores_by_criterion
(
submission_uuid
,
must_be_graded_by
)
return
Assessment
.
get_median_score_dict
(
scores
)
except
DatabaseError
:
error_message
=
_
(
u"Error getting assessment median scores {}"
.
format
(
submission_id
)
)
error_message
=
_
(
u"Error getting assessment median scores {}"
.
format
(
submission_uuid
))
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
...
...
@@ -337,7 +341,7 @@ def has_finished_required_evaluating(student_item_dict, required_assessments):
return
done
,
count
def
get_assessments
(
submission_id
):
def
get_assessments
(
submission_
uu
id
):
"""Retrieve the assessments for a submission.
Retrieves all the assessments for a submissions. This API returns related
...
...
@@ -345,8 +349,8 @@ def get_assessments(submission_id):
assessments associated with this submission will not be returned.
Args:
submission_
id (str): The submission all the requested assessments ar
e
associated with. Required
.
submission_
uuid (str): The UUID of the submission all th
e
requested assessments are associated with
.
Returns:
list(dict): A list of dictionaries, where each dictionary represents a
...
...
@@ -379,11 +383,10 @@ def get_assessments(submission_id):
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
return
get_assessment_review
(
submission
)
return
get_assessment_review
(
submission_uuid
)
except
DatabaseError
:
error_message
=
_
(
u"Error getting assessments for submission {}"
.
format
(
submission_id
)
u"Error getting assessments for submission {}"
.
format
(
submission_
uu
id
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
...
...
apps/openassessment/assessment/self_api.py
View file @
c14df4da
...
...
@@ -11,9 +11,6 @@ from openassessment.assessment.serializers import (
)
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
# TODO -- remove once Dave's changes land
from
submissions.models
import
Submission
# Assessments are tagged as "self-evaluation"
SELF_TYPE
=
"SE"
...
...
@@ -46,8 +43,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
"""
# Check that there are not any assessments for this submission
# TODO -- change key lookup for submission UUID once Dave's changes land
if
Assessment
.
objects
.
filter
(
submission__uuid
=
submission_uuid
,
score_type
=
SELF_TYPE
)
.
exists
():
if
Assessment
.
objects
.
filter
(
submission_uuid
=
submission_uuid
,
score_type
=
SELF_TYPE
)
.
exists
():
raise
SelfAssessmentRequestError
(
_
(
"Self assessment already exists for this submission"
))
# Check that the student is allowed to assess this submission
...
...
@@ -75,8 +71,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
self_assessment
=
{
"rubric"
:
rubric
.
id
,
"scorer_id"
:
user_id
,
# TODO -- replace once Dave adds submission_uuid as a field on the assessment
"submission"
:
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
.
pk
,
"submission_uuid"
:
submission_uuid
,
"score_type"
:
SELF_TYPE
,
"feedback"
:
u""
,
"parts"
:
[{
"option"
:
option_id
}
for
option_id
in
option_ids
],
...
...
@@ -131,13 +126,11 @@ def get_submission_and_assessment(submission_uuid):
# between checking the number of self-assessments and creating a new self-assessment.
# To be safe, we retrieve just the most recent submission.
assessments
=
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_
_
uuid
=
submission_uuid
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
order_by
(
'-scored_at'
)
if
assessments
.
exists
():
# TODO -- remove once Dave's changes land
assessment_dict
=
full_assessment_dict
(
assessments
[
0
])
assessment_dict
[
'submission_uuid'
]
=
submission_uuid
return
submission
,
assessment_dict
else
:
return
submission
,
None
...
...
@@ -154,5 +147,5 @@ def is_complete(submission_uuid):
bool
"""
return
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_
_
uuid
=
submission_uuid
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
exists
()
apps/openassessment/assessment/serializers.py
View file @
c14df4da
...
...
@@ -108,7 +108,6 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class
AssessmentSerializer
(
serializers
.
ModelSerializer
):
"""Serializer for :class:`Assessment`."""
submission_uuid
=
serializers
.
Field
(
source
=
'submission_uuid'
)
parts
=
AssessmentPartSerializer
(
required
=
True
,
many
=
True
)
points_earned
=
serializers
.
Field
(
source
=
'points_earned'
)
...
...
@@ -117,7 +116,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
class
Meta
:
model
=
Assessment
fields
=
(
'submission
'
,
# will go away shortly
'submission
_uuid'
,
'rubric'
,
'scored_at'
,
'scorer_id'
,
...
...
@@ -128,13 +127,12 @@ class AssessmentSerializer(serializers.ModelSerializer):
'parts'
,
# Computed, not part of the model
'submission_uuid'
,
'points_earned'
,
'points_possible'
,
)
def
get_assessment_review
(
submission
):
def
get_assessment_review
(
submission
_uuid
):
"""Get all information pertaining to an assessment for review.
Given an assessment serializer, return a serializable formatted model of
...
...
@@ -142,8 +140,7 @@ def get_assessment_review(submission):
associated rubric.
Args:
submission (Submission): The Submission Model object to get
assessment reviews for.
submission_uuid (str): The UUID of the submission whose assessment reviews we want to retrieve.
Returns:
(list): A list of assessment reviews, combining assessments with
...
...
@@ -186,7 +183,7 @@ def get_assessment_review(submission):
"""
return
[
full_assessment_dict
(
assessment
)
for
assessment
in
Assessment
.
objects
.
filter
(
submission
=
submission
)
for
assessment
in
Assessment
.
objects
.
filter
(
submission
_uuid
=
submission_uuid
)
]
...
...
apps/openassessment/xblock/test/test_peer.py
View file @
c14df4da
...
...
@@ -90,7 +90,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
xblock
.
create_submission
(
another_student
,
self
.
SUBMISSION
)
xblock
.
get_workflow_info
()
peer_api
.
get_submission_to_assess
(
another_student
,
3
)
# Submit an assessment and expect a successful response
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
...
...
fixtures/assessments.json
View file @
c14df4da
...
...
@@ -213,7 +213,7 @@
"fields"
:
{
"scorer_id"
:
"3"
,
"feedback"
:
"Test feedback"
,
"submission
"
:
8
,
"submission
_uuid"
:
"beb581f0-a9da-11e3-9b83-080027880ca6"
,
"score_type"
:
"PE"
,
"rubric"
:
2
,
"scored_at"
:
"2014-03-12T11:38:26Z"
...
...
@@ -225,7 +225,7 @@
"fields"
:
{
"scorer_id"
:
"2"
,
"feedback"
:
""
,
"submission
"
:
8
,
"submission
_uuid"
:
"beb581f0-a9da-11e3-9b83-080027880ca6"
,
"score_type"
:
"SE"
,
"rubric"
:
2
,
"scored_at"
:
"2014-03-12T11:38:13Z"
...
...
@@ -237,7 +237,7 @@
"fields"
:
{
"scorer_id"
:
"2"
,
"feedback"
:
"Test feedback"
,
"submission
"
:
9
,
"submission
_uuid"
:
"c18b952c-a9da-11e3-9ccf-080027880ca6"
,
"score_type"
:
"PE"
,
"rubric"
:
2
,
"scored_at"
:
"2014-03-12T11:38:08Z"
...
...
@@ -249,7 +249,7 @@
"fields"
:
{
"scorer_id"
:
"2"
,
"feedback"
:
""
,
"submission
"
:
6
,
"submission
_uuid"
:
"9af86840-a9da-11e3-b156-080027880ca6"
,
"score_type"
:
"SE"
,
"rubric"
:
2
,
"scored_at"
:
"2014-03-12T11:37:37Z"
...
...
@@ -261,7 +261,7 @@
"fields"
:
{
"scorer_id"
:
"2"
,
"feedback"
:
"Test feedback"
,
"submission
"
:
7
,
"submission
_uuid"
:
"ac0539f6-a9da-11e3-af02-080027880ca6"
,
"score_type"
:
"PE"
,
"rubric"
:
2
,
"scored_at"
:
"2014-03-12T11:37:33Z"
...
...
@@ -273,7 +273,7 @@
"fields"
:
{
"scorer_id"
:
"2"
,
"feedback"
:
"Test feedback"
,
"submission
"
:
4
,
"submission
_uuid"
:
"7e8ededc-a9da-11e3-89ce-080027880ca6"
,
"score_type"
:
"PE"
,
"rubric"
:
2
,
"scored_at"
:
"2014-03-12T11:36:19Z"
...
...
@@ -534,4 +534,4 @@
"submission_uuid"
:
"beb581f0-a9da-11e3-9b83-080027880ca6"
}
}
]
\ No newline at end of file
]
manage.py
View file @
c14df4da
#!/usr/bin/env python
import
sys
import
os
if
__name__
==
"__main__"
:
# Default to dev settings for convenience
if
os
.
environ
.
get
(
'DJANGO_SETTINGS_MODULE'
)
is
None
:
os
.
environ
[
'DJANGO_SETTINGS_MODULE'
]
=
'settings.dev'
from
django.core.management
import
execute_from_command_line
execute_from_command_line
(
sys
.
argv
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment