Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
bd56d5c3
Commit
bd56d5c3
authored
Mar 31, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add datadog instrumentation
parent
831d2495
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
138 additions
and
38 deletions
+138
-38
apps/openassessment/assessment/models.py
+13
-0
apps/openassessment/assessment/peer_api.py
+0
-0
apps/openassessment/assessment/self_api.py
+44
-20
apps/openassessment/assessment/test/test_peer.py
+1
-1
apps/openassessment/workflow/api.py
+5
-1
apps/submissions/api.py
+73
-16
pylintrc
+1
-0
requirements/base.txt
+1
-0
No files found.
apps/openassessment/assessment/models.py
View file @
bd56d5c3
...
...
@@ -260,6 +260,19 @@ class Assessment(models.Model):
def
points_possible
(
self
):
return
self
.
rubric
.
points_possible
def
to_float
(
self
):
"""
Calculate the score percentage (points earned / points possible).
Returns:
float or None
"""
if
self
.
points_possible
==
0
:
return
None
else
:
return
float
(
self
.
points_earned
)
/
self
.
points_possible
def
__unicode__
(
self
):
return
u"Assessment {}"
.
format
(
self
.
id
)
...
...
apps/openassessment/assessment/peer_api.py
View file @
bd56d5c3
This diff is collapsed.
Click to expand it.
apps/openassessment/assessment/self_api.py
View file @
bd56d5c3
...
...
@@ -2,14 +2,12 @@
Public interface for self-assessment.
"""
import
logging
from
django.core.cache
import
cache
from
django.utils.translation
import
ugettext
as
_
from
submissions.api
import
(
get_submission_and_student
,
get_submission
,
SubmissionNotFoundError
,
SubmissionRequestError
)
from
dogapi
import
dog_stats_api
from
submissions.api
import
get_submission_and_student
,
SubmissionNotFoundError
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
InvalidRubric
,
RubricSerializer
,
AssessmentSerializer
,
InvalidRubric
,
full_assessment_dict
,
rubric_from_dict
,
serialize_assessments
)
from
openassessment.assessment.models
import
(
...
...
@@ -98,22 +96,9 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart
.
add_to_assessment
(
assessment
,
option_ids
)
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
submission
)
logger
.
info
(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.
format
(
assessment_id
=
assessment
.
id
,
user
=
user_id
,
submission_uuid
=
submission_uuid
,
course_id
=
submission
[
'student_item'
][
'course_id'
],
item_id
=
submission
[
'student_item'
][
'item_id'
],
rubric_content_hash
=
rubric
.
content_hash
)
)
# Return the serialized assessment
return
assessment_dict
...
...
@@ -168,3 +153,42 @@ def is_complete(submission_uuid):
return
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
exists
()
def
_log_assessment
(
assessment
,
submission
):
"""
Log the creation of a self-assessment.
Args:
assessment (Assessment): The assessment model.
submission (dict): The serialized submission model.
Returns:
None
"""
logger
.
info
(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.
format
(
assessment_id
=
assessment
.
id
,
user
=
submission
[
'student_item'
][
'student_id'
],
submission_uuid
=
submission
[
'uuid'
],
course_id
=
submission
[
'student_item'
][
'course_id'
],
item_id
=
submission
[
'student_item'
][
'item_id'
],
rubric_content_hash
=
assessment
.
rubric
.
content_hash
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
submission
[
'student_item'
][
'course_id'
]),
u"item_id:{item_id}"
.
format
(
item_id
=
submission
[
'student_item'
][
'item_id'
]),
u"type:self"
]
score_percentage
=
assessment
.
to_float
()
if
score_percentage
is
not
None
:
dog_stats_api
.
histogram
(
'openassessment.assessment.score_precentage'
,
score_percentage
,
tags
=
tags
)
dog_stats_api
.
increment
(
'openassessment.assessment.count'
,
tags
=
tags
)
apps/openassessment/assessment/test/test_peer.py
View file @
bd56d5c3
...
...
@@ -699,7 +699,7 @@ class TestPeerApi(CacheResetTest):
tim
,
_
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
peer_api
.
get_assessments
(
tim
[
"uuid"
])
@patch.object
(
Submission
.
objects
,
'get
'
)
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create
'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_error_on_assessment_creation
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
...
...
apps/openassessment/workflow/api.py
View file @
bd56d5c3
...
...
@@ -125,7 +125,11 @@ def create_workflow(submission_uuid):
course_id
=
submission_dict
[
'student_item'
][
'course_id'
],
item_id
=
submission_dict
[
'student_item'
][
'item_id'
],
)
except
(
DatabaseError
,
peer_api
.
PeerAssessmentError
)
as
err
:
except
(
DatabaseError
,
peer_api
.
PeerAssessmentError
,
sub_api
.
SubmissionError
)
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
logger
.
exception
(
err_msg
)
raise
AssessmentWorkflowInternalError
(
err_msg
)
...
...
apps/submissions/api.py
View file @
bd56d5c3
...
...
@@ -9,6 +9,7 @@ from django.core.cache import cache
from
django.conf
import
settings
from
django.db
import
IntegrityError
,
DatabaseError
from
django.utils.encoding
import
force_unicode
from
dogapi
import
dog_stats_api
from
submissions.serializers
import
(
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
,
JsonFieldError
...
...
@@ -141,17 +142,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
submission_serializer
.
save
()
sub_data
=
submission_serializer
.
data
logger
.
info
(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.
format
(
submission_uuid
=
sub_data
[
"uuid"
],
course_id
=
student_item_dict
[
"course_id"
],
item_id
=
student_item_dict
[
"item_id"
],
anonymous_student_id
=
student_item_dict
[
"student_id"
]
)
)
_log_submission
(
sub_data
,
student_item_dict
)
return
sub_data
...
...
@@ -500,15 +491,81 @@ def set_score(submission_uuid, points_earned, points_possible):
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
try
:
score
.
save
()
logger
.
info
(
"Score of ({}/{}) set for submission {}"
.
format
(
points_earned
,
points_possible
,
submission_uuid
)
)
score_model
=
score
.
save
()
_log_score
(
score_model
)
except
IntegrityError
:
pass
def
_log_submission
(
submission
,
student_item
):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger
.
info
(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.
format
(
submission_uuid
=
submission
[
"uuid"
],
course_id
=
student_item
[
"course_id"
],
item_id
=
student_item
[
"item_id"
],
anonymous_student_id
=
student_item
[
"student_id"
]
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
student_item
[
'course_id'
]),
u"item_id:{item_id}"
.
format
(
item_id
=
student_item
[
'item_id'
]),
u"item_type:{item_type}"
.
format
(
item_type
=
student_item
[
'item_type'
]),
]
dog_stats_api
.
histogram
(
'submissions.submission.size'
,
len
(
submission
[
'answer'
]),
tags
=
tags
)
dog_stats_api
.
increment
(
'submissions.submission.count'
,
tags
=
tags
)
def
_log_score
(
score
):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger
.
info
(
"Score of ({}/{}) set for submission {}"
.
format
(
score
.
points_earned
,
score
.
points_possible
,
score
.
submission
.
uuid
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
score
.
student_item
.
course_id
),
u"item_id:{item_id}"
.
format
(
item_id
=
score
.
student_item
.
item_id
),
u"item_type:{item_type}"
.
format
(
item_type
=
score
.
student_item
.
item_type
),
]
time_delta
=
score
.
created_at
-
score
.
submission
.
created_at
dog_stats_api
.
histogram
(
'submissions.score.seconds_since_submission'
,
time_delta
.
total_seconds
(),
tags
=
tags
)
score_percentage
=
score
.
to_float
()
if
score_percentage
is
not
None
:
dog_stats_api
.
histogram
(
'submissions.score.score_percentage'
,
score_percentage
,
tags
=
tags
)
dog_stats_api
.
increment
(
'submissions.score.count'
,
tags
=
tags
)
def
_get_or_create_student_item
(
student_item_dict
):
"""Gets or creates a Student Item that matches the values specified.
...
...
pylintrc
View file @
bd56d5c3
...
...
@@ -94,6 +94,7 @@ generated-members=
aq_parent,
objects,
DoesNotExist,
MultipleObjectsReturned,
can_read,
can_write,
get_url,
...
...
requirements/base.txt
View file @
bd56d5c3
...
...
@@ -4,6 +4,7 @@ git+https://github.com/edx/xblock-sdk.git@50ed1646d24f6f0a21d6d0bb074e3b7c8a78fd
# Third Party Requirements
defusedxml==0.4.1
dogapi==1.2.1
django==1.4.8
django-extensions==1.2.5
django-model-utils==1.4.0
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment