Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
bd56d5c3
Commit
bd56d5c3
authored
Mar 31, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add datadog instrumentation
parent
831d2495
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
317 additions
and
160 deletions
+317
-160
apps/openassessment/assessment/models.py
+13
-0
apps/openassessment/assessment/peer_api.py
+179
-122
apps/openassessment/assessment/self_api.py
+44
-20
apps/openassessment/assessment/test/test_peer.py
+1
-1
apps/openassessment/workflow/api.py
+5
-1
apps/submissions/api.py
+73
-16
pylintrc
+1
-0
requirements/base.txt
+1
-0
No files found.
apps/openassessment/assessment/models.py
View file @
bd56d5c3
...
...
@@ -260,6 +260,19 @@ class Assessment(models.Model):
def
points_possible
(
self
):
return
self
.
rubric
.
points_possible
def
to_float
(
self
):
"""
Calculate the score percentage (points earned / points possible).
Returns:
float or None
"""
if
self
.
points_possible
==
0
:
return
None
else
:
return
float
(
self
.
points_earned
)
/
self
.
points_possible
def
__unicode__
(
self
):
return
u"Assessment {}"
.
format
(
self
.
id
)
...
...
apps/openassessment/assessment/peer_api.py
View file @
bd56d5c3
...
...
@@ -8,10 +8,9 @@ import copy
import
logging
from
datetime
import
timedelta
from
django.utils
import
timezone
from
django.utils.translation
import
ugettext
as
_
from
django.db
import
DatabaseError
from
d
jango.db.models
import
Q
from
d
ogapi
import
dog_stats_api
from
openassessment.assessment.models
import
(
Assessment
,
AssessmentFeedback
,
AssessmentPart
,
...
...
@@ -23,8 +22,6 @@ from openassessment.assessment.serializers import (
)
from
submissions
import
api
as
sub_api
from
submissions.api
import
get_submission_and_student
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
logger
=
logging
.
getLogger
(
"openassessment.assessment.peer_api"
)
...
...
@@ -134,12 +131,7 @@ def get_score(submission_uuid, requirements):
}
def
create_assessment
(
submission_uuid
,
scorer_id
,
assessment_dict
,
rubric_dict
,
scored_at
=
None
):
def
create_assessment
(
submission_uuid
,
scorer_id
,
assessment_dict
,
rubric_dict
,
scored_at
=
None
):
"""Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular
...
...
@@ -177,7 +169,7 @@ def create_assessment(
>>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
submission
=
sub_api
.
get_submission_and_student
(
submission_uuid
)
rubric
=
rubric_from_dict
(
rubric_dict
)
# Validate that the selected options matched the rubric
...
...
@@ -192,7 +184,7 @@ def create_assessment(
peer_assessment
=
{
"rubric"
:
rubric
.
id
,
"scorer_id"
:
scorer_id
,
"submission_uuid"
:
submission
.
uuid
,
"submission_uuid"
:
submission
_
uuid
,
"score_type"
:
PEER_TYPE
,
"feedback"
:
feedback
,
}
...
...
@@ -212,23 +204,12 @@ def create_assessment(
# option to do validation. We already validated these options above.
AssessmentPart
.
add_to_assessment
(
assessment
,
option_ids
)
student_item
=
submission
.
student_item
student_item_dict
=
StudentItemSerializer
(
student_item
)
.
data
try
:
scorer_item
=
StudentItem
.
objects
.
get
(
student_id
=
scorer_id
,
item_id
=
student_item
.
item_id
,
course_id
=
student_item
.
course_id
,
item_type
=
student_item
.
item_type
)
except
StudentItem
.
DoesNotExist
:
raise
PeerAssessmentWorkflowError
(
_
(
"You must make a submission before assessing another student."
))
student_item
=
submission
[
'student_item'
]
scorer_item
=
copy
.
deepcopy
(
student_item
)
scorer_item
[
'student_id'
]
=
scorer_id
scorer_item_dict
=
StudentItemSerializer
(
scorer_item
)
.
data
scorer_workflow
=
_get_latest_workflow
(
scorer_item_dict
)
workflow
=
_get_latest_workflow
(
student_item_dict
)
scorer_workflow
=
_get_latest_workflow
(
scorer_item
)
workflow
=
_get_latest_workflow
(
student_item
)
if
not
scorer_workflow
:
raise
PeerAssessmentWorkflowError
(
_
(
...
...
@@ -241,20 +222,7 @@ def create_assessment(
# Close the active assessment
_close_active_assessment
(
scorer_workflow
,
submission_uuid
,
assessment
)
assessment_dict
=
full_assessment_dict
(
assessment
)
logger
.
info
(
u"Created peer-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}"
.
format
(
assessment_id
=
assessment
.
id
,
user
=
student_item_dict
[
'student_id'
],
submission_uuid
=
submission_uuid
,
course_id
=
student_item_dict
[
'course_id'
],
item_id
=
student_item_dict
[
'item_id'
],
rubric_content_hash
=
rubric
.
content_hash
,
scorer
=
scorer_id
,
)
)
_log_assessment
(
assessment
,
student_item
,
scorer_item
)
return
assessment_dict
except
DatabaseError
:
...
...
@@ -297,8 +265,6 @@ def get_rubric_max_scores(submission_uuid):
criterion
[
"name"
]:
criterion
[
"points_possible"
]
for
criterion
in
rubric_dict
[
"criteria"
]
}
except
Submission
.
DoesNotExist
:
return
None
except
DatabaseError
:
error_message
=
_
(
u"Error getting rubric options max scores for submission uuid "
...
...
@@ -531,17 +497,9 @@ def get_submission_to_assess(
try
:
submission_data
=
sub_api
.
get_submission
(
submission_uuid
)
_create_peer_workflow_item
(
workflow
,
submission_uuid
)
logger
.
info
(
u"Retrieved submission {} ({}, {}) to be assessed by {}"
.
format
(
submission_uuid
,
student_item_dict
[
"course_id"
],
student_item_dict
[
"item_id"
],
student_item_dict
[
"student_id"
],
)
)
_log_workflow
(
submission_uuid
,
student_item_dict
,
over_grading
)
return
submission_data
except
sub_api
.
Submission
DoesNotExist
:
except
sub_api
.
Submission
NotFoundError
:
error_message
=
_
(
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
...
...
@@ -575,6 +533,7 @@ def create_peer_workflow(submission_uuid):
student item and submission.
Raises:
SubmissionError: There was an error retrieving the submission.
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
...
...
@@ -583,11 +542,11 @@ def create_peer_workflow(submission_uuid):
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_uuid
)
submission
=
sub_api
.
get_submission_and_student
(
submission_uuid
)
workflow
=
PeerWorkflow
.
objects
.
get_or_create
(
student_id
=
submission
.
student_item
.
student_id
,
course_id
=
submission
.
student_item
.
course_id
,
item_id
=
submission
.
student_item
.
item_id
,
student_id
=
submission
[
'student_item'
][
'student_id'
]
,
course_id
=
submission
[
'student_item'
][
'course_id'
]
,
item_id
=
submission
[
'student_item'
][
'item_id'
]
,
submission_uuid
=
submission_uuid
)
return
workflow
...
...
@@ -626,6 +585,91 @@ def create_peer_workflow_item(scorer, submission_uuid):
_create_peer_workflow_item
(
workflow
,
submission_uuid
)
def
get_assessment_feedback
(
submission_uuid
):
"""
Retrieve a feedback on an assessment.
Args:
submission_uuid: The submission we want to retrieve assessment feedback for.
Returns:
dict or None
Raises:
PeerAssessmentInternalError: Error occurred while retrieving the feedback.
"""
try
:
feedback
=
AssessmentFeedback
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
return
AssessmentFeedbackSerializer
(
feedback
)
.
data
except
AssessmentFeedback
.
DoesNotExist
:
return
None
except
DatabaseError
:
error_message
=
(
u"An error occurred retrieving assessment feedback for {}."
.
format
(
submission_uuid
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
set_assessment_feedback
(
feedback_dict
):
"""
Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the dict.
Args:
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
Returns:
None
Raises:
PeerAssessmentRequestError
PeerAssessmentInternalError
"""
submission_uuid
=
feedback_dict
.
get
(
'submission_uuid'
)
feedback_text
=
feedback_dict
.
get
(
'feedback_text'
)
selected_options
=
feedback_dict
.
get
(
'options'
,
list
())
if
feedback_text
and
len
(
feedback_text
)
>
AssessmentFeedback
.
MAXSIZE
:
error_message
=
u"Assessment feedback too large."
raise
PeerAssessmentRequestError
(
error_message
)
try
:
# Get or create the assessment model for this submission
# If we receive an integrity error, assume that someone else is trying to create
# another feedback model for this submission, and raise an exception.
if
submission_uuid
:
feedback
,
created
=
AssessmentFeedback
.
objects
.
get_or_create
(
submission_uuid
=
submission_uuid
)
else
:
error_message
=
u"An error occurred creating assessment feedback: bad or missing submission_uuid."
logger
.
error
(
error_message
)
raise
PeerAssessmentRequestError
(
error_message
)
# Update the feedback text
if
feedback_text
is
not
None
:
feedback
.
feedback_text
=
feedback_text
# Save the feedback model. We need to do this before setting m2m relations.
if
created
or
feedback_text
is
not
None
:
feedback
.
save
()
# Associate the feedback with selected options
feedback
.
add_options
(
selected_options
)
# Associate the feedback with scored assessments
assessments
=
PeerWorkflowItem
.
get_scored_assessments
(
submission_uuid
)
feedback
.
assessments
.
add
(
*
assessments
)
except
DatabaseError
:
msg
=
u"Error occurred while creating or updating feedback on assessment: {}"
.
format
(
feedback_dict
)
logger
.
exception
(
msg
)
raise
PeerAssessmentInternalError
(
msg
)
def
_get_latest_workflow
(
student_item_dict
):
"""Given a student item, return the current workflow for this student.
...
...
@@ -972,86 +1016,99 @@ def _num_peers_graded(workflow):
return
workflow
.
graded
.
filter
(
assessment__isnull
=
False
)
.
count
()
def
get_assessment_feedback
(
submission_uuid
):
def
_log_assessment
(
assessment
,
student_item
,
scorer_item
):
"""
Retrieve a feedback on an
assessment.
Log the creation of a peer
assessment.
Args:
submission_uuid: The submission we want to retrieve assessment feedback for.
assessment (Assessment): The assessment model that was created.
student_item (dict): The serialized student item model of the student being scored.
scorer_item (dict): The serialized student item model of the student creating the assessment.
Returns:
dict or
None
None
Raises:
PeerAssessmentInternalError: Error occurred while retrieving the feedback.
"""
logger
.
info
(
u"Created peer-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}"
.
format
(
assessment_id
=
assessment
.
id
,
user
=
student_item
[
'student_id'
],
submission_uuid
=
assessment
.
submission_uuid
,
course_id
=
student_item
[
'course_id'
],
item_id
=
student_item
[
'item_id'
],
rubric_content_hash
=
assessment
.
rubric
.
content_hash
,
scorer
=
scorer_item
[
'student_id'
],
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
student_item
[
'course_id'
]),
u"item_id:{item_id}"
.
format
(
item_id
=
student_item
[
'item_id'
]),
u"type:peer"
,
]
score_percentage
=
assessment
.
to_float
()
if
score_percentage
is
not
None
:
dog_stats_api
.
histogram
(
'openassessment.assessment.score_percentage'
,
score_percentage
,
tags
=
tags
)
# Calculate the time spent assessing
# This is the time from when the scorer retrieved the submission
# (created the peer workflow item) to when they completed an assessment.
# By this point, the assessment *should* have an associated peer workflow item,
# but if not, we simply skip the event.
try
:
feedback
=
AssessmentFeedback
.
objects
.
get
(
submission_uuid
=
submission_uuid
workflow_item
=
assessment
.
peerworkflowitem_set
.
get
()
except
(
PeerWorkflowItem
.
DoesNotExist
,
PeerWorkflowItem
.
MultipleObjectsReturned
,
DatabaseError
):
msg
=
u"Could not retrieve peer workflow item for assessment: {assessment}"
.
format
(
assessment
=
assessment
.
id
)
return
AssessmentFeedbackSerializer
(
feedback
)
.
data
except
AssessmentFeedback
.
DoesNotExist
:
return
None
except
DatabaseError
:
error_message
=
(
u"An error occurred retrieving assessment feedback for {}."
.
format
(
submission_uuid
)
logger
.
exception
(
msg
)
workflow_item
=
None
if
workflow_item
is
not
None
:
time_delta
=
assessment
.
scored_at
-
workflow_item
.
started_at
dog_stats_api
.
histogram
(
'openassessment.assessment.seconds_spent_assessing'
,
time_delta
.
total_seconds
(),
tags
=
tags
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
dog_stats_api
.
increment
(
'openassessment.assessment.count'
,
tags
=
tags
)
def
set_assessment_feedback
(
feedback_dict
):
"""
Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the dict.
def
_log_workflow
(
submission_uuid
,
student_item
,
over_grading
):
"""
Log the creation of a peer-assessment workflow.
Args:
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
Returns:
None
Raises:
PeerAssessmentRequestError
PeerAssessmentInternalError
submission_uuid (str): The UUID of the submission being assessed.
student_item (dict): The serialized student item of the student making the assessment.
over_grading (bool): Whether over-grading is enabled.
"""
submission_uuid
=
feedback_dict
.
get
(
'submission_uuid'
)
feedback_text
=
feedback_dict
.
get
(
'feedback_text'
)
selected_options
=
feedback_dict
.
get
(
'options'
,
list
())
if
feedback_text
and
len
(
feedback_text
)
>
AssessmentFeedback
.
MAXSIZE
:
error_message
=
u"Assessment feedback too large."
raise
PeerAssessmentRequestError
(
error_message
)
try
:
# Get or create the assessment model for this submission
# If we receive an integrity error, assume that someone else is trying to create
# another feedback model for this submission, and raise an exception.
if
submission_uuid
:
feedback
,
created
=
AssessmentFeedback
.
objects
.
get_or_create
(
submission_uuid
=
submission_uuid
)
else
:
error_message
=
u"An error occurred creating assessment feedback: bad or missing submission_uuid."
logger
.
error
(
error_message
)
raise
PeerAssessmentRequestError
(
error_message
)
logger
.
info
(
u"Retrieved submission {} ({}, {}) to be assessed by {}"
.
format
(
submission_uuid
,
student_item
[
"course_id"
],
student_item
[
"item_id"
],
student_item
[
"student_id"
],
)
)
# Update the feedback text
if
feedback_text
is
not
None
:
feedback
.
feedback_text
=
feedback_text
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
student_item
[
'course_id'
]),
u"item_id:{item_id}"
.
format
(
item_id
=
student_item
[
'item_id'
]),
u"type:peer"
]
# Save the feedback model. We need to do this before setting m2m relations.
if
created
or
feedback_text
is
not
None
:
feedback
.
save
()
if
over_grading
:
tags
.
append
(
u"overgrading"
)
# Associate the feedback with selected options
feedback
.
add_options
(
selected_options
)
# Associate the feedback with scored assessments
assessments
=
PeerWorkflowItem
.
get_scored_assessments
(
submission_uuid
)
feedback
.
assessments
.
add
(
*
assessments
)
except
DatabaseError
:
msg
=
u"Error occurred while creating or updating feedback on assessment: {}"
.
format
(
feedback_dict
)
logger
.
exception
(
msg
)
raise
PeerAssessmentInternalError
(
msg
)
dog_stats_api
.
increment
(
'openassessment.assessment.peer_workflow.count'
,
tags
=
tags
)
apps/openassessment/assessment/self_api.py
View file @
bd56d5c3
...
...
@@ -2,14 +2,12 @@
Public interface for self-assessment.
"""
import
logging
from
django.core.cache
import
cache
from
django.utils.translation
import
ugettext
as
_
from
submissions.api
import
(
get_submission_and_student
,
get_submission
,
SubmissionNotFoundError
,
SubmissionRequestError
)
from
dogapi
import
dog_stats_api
from
submissions.api
import
get_submission_and_student
,
SubmissionNotFoundError
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
InvalidRubric
,
RubricSerializer
,
AssessmentSerializer
,
InvalidRubric
,
full_assessment_dict
,
rubric_from_dict
,
serialize_assessments
)
from
openassessment.assessment.models
import
(
...
...
@@ -98,22 +96,9 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart
.
add_to_assessment
(
assessment
,
option_ids
)
assessment_dict
=
full_assessment_dict
(
assessment
)
_log_assessment
(
assessment
,
submission
)
logger
.
info
(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.
format
(
assessment_id
=
assessment
.
id
,
user
=
user_id
,
submission_uuid
=
submission_uuid
,
course_id
=
submission
[
'student_item'
][
'course_id'
],
item_id
=
submission
[
'student_item'
][
'item_id'
],
rubric_content_hash
=
rubric
.
content_hash
)
)
# Return the serialized assessment
return
assessment_dict
...
...
@@ -168,3 +153,42 @@ def is_complete(submission_uuid):
return
Assessment
.
objects
.
filter
(
score_type
=
SELF_TYPE
,
submission_uuid
=
submission_uuid
)
.
exists
()
def
_log_assessment
(
assessment
,
submission
):
"""
Log the creation of a self-assessment.
Args:
assessment (Assessment): The assessment model.
submission (dict): The serialized submission model.
Returns:
None
"""
logger
.
info
(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.
format
(
assessment_id
=
assessment
.
id
,
user
=
submission
[
'student_item'
][
'student_id'
],
submission_uuid
=
submission
[
'uuid'
],
course_id
=
submission
[
'student_item'
][
'course_id'
],
item_id
=
submission
[
'student_item'
][
'item_id'
],
rubric_content_hash
=
assessment
.
rubric
.
content_hash
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
submission
[
'student_item'
][
'course_id'
]),
u"item_id:{item_id}"
.
format
(
item_id
=
submission
[
'student_item'
][
'item_id'
]),
u"type:self"
]
score_percentage
=
assessment
.
to_float
()
if
score_percentage
is
not
None
:
dog_stats_api
.
histogram
(
'openassessment.assessment.score_precentage'
,
score_percentage
,
tags
=
tags
)
dog_stats_api
.
increment
(
'openassessment.assessment.count'
,
tags
=
tags
)
apps/openassessment/assessment/test/test_peer.py
View file @
bd56d5c3
...
...
@@ -699,7 +699,7 @@ class TestPeerApi(CacheResetTest):
tim
,
_
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
peer_api
.
get_assessments
(
tim
[
"uuid"
])
@patch.object
(
Submission
.
objects
,
'get
'
)
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create
'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_error_on_assessment_creation
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
...
...
apps/openassessment/workflow/api.py
View file @
bd56d5c3
...
...
@@ -125,7 +125,11 @@ def create_workflow(submission_uuid):
course_id
=
submission_dict
[
'student_item'
][
'course_id'
],
item_id
=
submission_dict
[
'student_item'
][
'item_id'
],
)
except
(
DatabaseError
,
peer_api
.
PeerAssessmentError
)
as
err
:
except
(
DatabaseError
,
peer_api
.
PeerAssessmentError
,
sub_api
.
SubmissionError
)
as
err
:
err_msg
=
u"Could not create assessment workflow: {}"
.
format
(
err
)
logger
.
exception
(
err_msg
)
raise
AssessmentWorkflowInternalError
(
err_msg
)
...
...
apps/submissions/api.py
View file @
bd56d5c3
...
...
@@ -9,6 +9,7 @@ from django.core.cache import cache
from
django.conf
import
settings
from
django.db
import
IntegrityError
,
DatabaseError
from
django.utils.encoding
import
force_unicode
from
dogapi
import
dog_stats_api
from
submissions.serializers
import
(
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
,
JsonFieldError
...
...
@@ -141,17 +142,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
submission_serializer
.
save
()
sub_data
=
submission_serializer
.
data
logger
.
info
(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.
format
(
submission_uuid
=
sub_data
[
"uuid"
],
course_id
=
student_item_dict
[
"course_id"
],
item_id
=
student_item_dict
[
"item_id"
],
anonymous_student_id
=
student_item_dict
[
"student_id"
]
)
)
_log_submission
(
sub_data
,
student_item_dict
)
return
sub_data
...
...
@@ -500,15 +491,81 @@ def set_score(submission_uuid, points_earned, points_possible):
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
try
:
score
.
save
()
logger
.
info
(
"Score of ({}/{}) set for submission {}"
.
format
(
points_earned
,
points_possible
,
submission_uuid
)
)
score_model
=
score
.
save
()
_log_score
(
score_model
)
except
IntegrityError
:
pass
def
_log_submission
(
submission
,
student_item
):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger
.
info
(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.
format
(
submission_uuid
=
submission
[
"uuid"
],
course_id
=
student_item
[
"course_id"
],
item_id
=
student_item
[
"item_id"
],
anonymous_student_id
=
student_item
[
"student_id"
]
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
student_item
[
'course_id'
]),
u"item_id:{item_id}"
.
format
(
item_id
=
student_item
[
'item_id'
]),
u"item_type:{item_type}"
.
format
(
item_type
=
student_item
[
'item_type'
]),
]
dog_stats_api
.
histogram
(
'submissions.submission.size'
,
len
(
submission
[
'answer'
]),
tags
=
tags
)
dog_stats_api
.
increment
(
'submissions.submission.count'
,
tags
=
tags
)
def
_log_score
(
score
):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger
.
info
(
"Score of ({}/{}) set for submission {}"
.
format
(
score
.
points_earned
,
score
.
points_possible
,
score
.
submission
.
uuid
)
)
tags
=
[
u"course_id:{course_id}"
.
format
(
course_id
=
score
.
student_item
.
course_id
),
u"item_id:{item_id}"
.
format
(
item_id
=
score
.
student_item
.
item_id
),
u"item_type:{item_type}"
.
format
(
item_type
=
score
.
student_item
.
item_type
),
]
time_delta
=
score
.
created_at
-
score
.
submission
.
created_at
dog_stats_api
.
histogram
(
'submissions.score.seconds_since_submission'
,
time_delta
.
total_seconds
(),
tags
=
tags
)
score_percentage
=
score
.
to_float
()
if
score_percentage
is
not
None
:
dog_stats_api
.
histogram
(
'submissions.score.score_percentage'
,
score_percentage
,
tags
=
tags
)
dog_stats_api
.
increment
(
'submissions.score.count'
,
tags
=
tags
)
def
_get_or_create_student_item
(
student_item_dict
):
"""Gets or creates a Student Item that matches the values specified.
...
...
pylintrc
View file @
bd56d5c3
...
...
@@ -94,6 +94,7 @@ generated-members=
aq_parent,
objects,
DoesNotExist,
MultipleObjectsReturned,
can_read,
can_write,
get_url,
...
...
requirements/base.txt
View file @
bd56d5c3
...
...
@@ -4,6 +4,7 @@ git+https://github.com/edx/xblock-sdk.git@50ed1646d24f6f0a21d6d0bb074e3b7c8a78fd
# Third Party Requirements
defusedxml==0.4.1
dogapi==1.2.1
django==1.4.8
django-extensions==1.2.5
django-model-utils==1.4.0
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment