Commit bd56d5c3 by Will Daly

Add datadog instrumentation

parent 831d2495
......@@ -260,6 +260,19 @@ class Assessment(models.Model):
def points_possible(self):
return self.rubric.points_possible
def to_float(self):
"""
Calculate the score percentage (points earned / points possible).
Returns:
float or None
"""
if self.points_possible == 0:
return None
else:
return float(self.points_earned) / self.points_possible
def __unicode__(self):
return u"Assessment {}".format(self.id)
......
......@@ -2,14 +2,12 @@
Public interface for self-assessment.
"""
import logging
from django.core.cache import cache
from django.utils.translation import ugettext as _
from submissions.api import (
get_submission_and_student, get_submission,
SubmissionNotFoundError, SubmissionRequestError
)
from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError
from openassessment.assessment.serializers import (
AssessmentSerializer, InvalidRubric, RubricSerializer,
AssessmentSerializer, InvalidRubric,
full_assessment_dict, rubric_from_dict, serialize_assessments
)
from openassessment.assessment.models import (
......@@ -98,22 +96,9 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission)
logger.info(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.format(
assessment_id=assessment.id,
user=user_id,
submission_uuid=submission_uuid,
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
rubric_content_hash=rubric.content_hash
)
)
# Return the serialized assessment
return assessment_dict
......@@ -168,3 +153,42 @@ def is_complete(submission_uuid):
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def _log_assessment(assessment, submission):
"""
Log the creation of a self-assessment.
Args:
assessment (Assessment): The assessment model.
submission (dict): The serialized submission model.
Returns:
None
"""
logger.info(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.format(
assessment_id=assessment.id,
user=submission['student_item']['student_id'],
submission_uuid=submission['uuid'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
rubric_content_hash=assessment.rubric.content_hash
)
)
tags = [
u"course_id:{course_id}".format(course_id=submission['student_item']['course_id']),
u"item_id:{item_id}".format(item_id=submission['student_item']['item_id']),
u"type:self"
]
score_percentage = assessment.to_float()
if score_percentage is not None:
dog_stats_api.histogram('openassessment.assessment.score_precentage', score_percentage, tags=tags)
dog_stats_api.increment('openassessment.assessment.count', tags=tags)
......@@ -699,7 +699,7 @@ class TestPeerApi(CacheResetTest):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessments(tim["uuid"])
@patch.object(Submission.objects, 'get')
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
......
......@@ -125,7 +125,11 @@ def create_workflow(submission_uuid):
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'],
)
except (DatabaseError, peer_api.PeerAssessmentError) as err:
except (
DatabaseError,
peer_api.PeerAssessmentError,
sub_api.SubmissionError
) as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
......
......@@ -9,6 +9,7 @@ from django.core.cache import cache
from django.conf import settings
from django.db import IntegrityError, DatabaseError
from django.utils.encoding import force_unicode
from dogapi import dog_stats_api
from submissions.serializers import (
SubmissionSerializer, StudentItemSerializer, ScoreSerializer, JsonFieldError
......@@ -141,17 +142,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
submission_serializer.save()
sub_data = submission_serializer.data
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=sub_data["uuid"],
course_id=student_item_dict["course_id"],
item_id=student_item_dict["item_id"],
anonymous_student_id=student_item_dict["student_id"]
)
)
_log_submission(sub_data, student_item_dict)
return sub_data
......@@ -500,15 +491,81 @@ def set_score(submission_uuid, points_earned, points_possible):
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
try:
score.save()
logger.info(
"Score of ({}/{}) set for submission {}"
.format(points_earned, points_possible, submission_uuid)
)
score_model = score.save()
_log_score(score_model)
except IntegrityError:
pass
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
tags = [
u"course_id:{course_id}".format(course_id=student_item['course_id']),
u"item_id:{item_id}".format(item_id=student_item['item_id']),
u"item_type:{item_type}".format(item_type=student_item['item_type']),
]
dog_stats_api.histogram('submissions.submission.size', len(submission['answer']), tags=tags)
dog_stats_api.increment('submissions.submission.count', tags=tags)
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
tags = [
u"course_id:{course_id}".format(course_id=score.student_item.course_id),
u"item_id:{item_id}".format(item_id=score.student_item.item_id),
u"item_type:{item_type}".format(item_type=score.student_item.item_type),
]
time_delta = score.created_at - score.submission.created_at
dog_stats_api.histogram(
'submissions.score.seconds_since_submission',
time_delta.total_seconds(),
tags=tags
)
score_percentage = score.to_float()
if score_percentage is not None:
dog_stats_api.histogram(
'submissions.score.score_percentage',
score_percentage,
tags=tags
)
dog_stats_api.increment('submissions.score.count', tags=tags)
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
......
......@@ -94,6 +94,7 @@ generated-members=
aq_parent,
objects,
DoesNotExist,
MultipleObjectsReturned,
can_read,
can_write,
get_url,
......
......@@ -4,6 +4,7 @@ git+https://github.com/edx/xblock-sdk.git@50ed1646d24f6f0a21d6d0bb074e3b7c8a78fd
# Third Party Requirements
defusedxml==0.4.1
dogapi==1.2.1
django==1.4.8
django-extensions==1.2.5
django-model-utils==1.4.0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment