Commit 395d162d by David Ormsbee

Merge pull request #229 from edx/ormsbee/analytics_events

Add event and INFO level logging.
parents d32de309 9fbe33bf
......@@ -59,3 +59,6 @@ ora2db
testdb
fixture_err.log
apps/openassessment/xblock/static/js/fixtures/*.html
# logging
logs/*.log*
......@@ -26,7 +26,7 @@ from submissions.api import get_submission_and_student
from submissions.models import Submission, StudentItem
from submissions.serializers import SubmissionSerializer, StudentItemSerializer
logger = logging.getLogger(__name__)
logger = logging.getLogger("openassessment.assessment.peer_api")
PEER_TYPE = "PE"
TIME_LIMIT = timedelta(hours=8)
......@@ -240,8 +240,23 @@ def create_assessment(
"submission came from the peer workflow."))
# Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment)
assessment_dict = full_assessment_dict(assessment)
logger.info(
u"Created peer-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}"
.format(
assessment_id=assessment.id,
user=student_item_dict['student_id'],
submission_uuid=submission_uuid,
course_id=student_item_dict['course_id'],
item_id=student_item_dict['item_id'],
rubric_content_hash=rubric.content_hash,
scorer=scorer_id,
)
)
return full_assessment_dict(assessment)
return assessment_dict
except DatabaseError:
error_message = _(
u"An error occurred while creating assessment {} for submission: "
......@@ -516,6 +531,15 @@ def get_submission_to_assess(
try:
submission_data = sub_api.get_submission(submission_uuid)
_create_peer_workflow_item(workflow, submission_uuid)
logger.info(
u"Retrieved submission {} ({}, {}) to be assessed by {}"
.format(
submission_uuid,
student_item_dict["course_id"],
student_item_dict["item_id"],
student_item_dict["student_id"],
)
)
return submission_data
except sub_api.SubmissionDoesNotExist:
error_message = _(
......@@ -526,6 +550,14 @@ def get_submission_to_assess(
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
logger.info(
u"No submission found for {} to assess ({}, {})"
.format(
student_item_dict["student_id"],
student_item_dict["course_id"],
student_item_dict["item_id"],
)
)
return None
......
"""
Public interface for self-assessment.
"""
import logging
from django.core.cache import cache
from django.utils.translation import ugettext as _
from submissions.api import (
......@@ -19,6 +20,8 @@ from openassessment.assessment.models import (
# Assessments are tagged as "self-evaluation"
SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.self_api")
class SelfAssessmentRequestError(Exception):
"""
......@@ -96,8 +99,24 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
assessment_dict = full_assessment_dict(assessment)
logger.info(
u"Created self-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.format(
assessment_id=assessment.id,
user=user_id,
submission_uuid=submission_uuid,
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
rubric_content_hash=rubric.content_hash
)
)
# Return the serialized assessment
return full_assessment_dict(assessment)
return assessment_dict
def get_assessment(submission_uuid):
......@@ -124,7 +143,16 @@ def get_assessment(submission_uuid):
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1])
return serialized_assessments[0] if serialized_assessments else None
if not serialized_assessments:
logger.info(
u"No self-assessment found for submission {}".format(submission_uuid)
)
return None
serialized_assessment = serialized_assessments[0]
logger.info(u"Retrieved self-assessment for submission {}".format(submission_uuid))
return serialized_assessment
def is_complete(submission_uuid):
......
......@@ -9,6 +9,10 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.workflow --auto
"""
import logging
import importlib
from django.conf import settings
from django.db import models
from django_extensions.db.fields import UUIDField
from model_utils import Choices
......@@ -16,6 +20,17 @@ from model_utils.models import StatusModel, TimeStampedModel
from submissions import api as sub_api
logger = logging.getLogger('openassessment.workflow.models')
# This will (hopefully soon) be replaced with calls to the event-tracking API:
# https://github.com/edx/event-tracking
if hasattr(settings, "EDX_TIM") and "EVENT_LOGGER" in settings.EDX_TIM:
func_path = settings.EDX_TIM["EVENT_LOGGER"]
module_name, func_name = func_path.rsplit('.', 1)
emit_event = getattr(importlib.import_module(module_name), func_name)
else:
emit_event = lambda event: logger.info("Event: " + unicode(event))
class AssessmentWorkflow(TimeStampedModel, StatusModel):
"""Tracks the open-ended assessment status of a student submission.
......@@ -147,8 +162,16 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
score["points_earned"],
score["points_possible"]
)
emit_event({
"event_type": "openassessment.workflow.score",
"submission_uuid": self.submission_uuid,
"points_earned": score["points_earned"],
"points_possible": score["points_possible"]
})
new_status = self.STATUS.done
# Finally save our changes if the status has changed
if self.status != new_status:
self.status = new_status
......
"""
This is just a dummy event logger to test our ability to dyanmically change this
value based on configuration. All this should go away when we start using the
edx-analytics approved library (once that's ready to be used on prod).
"""
def fake_event_logger(event):
print event
from django.test import TestCase
from mock import patch
from nose.tools import raises
from openassessment.workflow.models import emit_event
from openassessment.workflow.test.events import fake_event_logger
class TestEmitEvent(TestCase):
def test_emit_wired_correctly(self):
self.assertEqual(emit_event, fake_event_logger)
......@@ -145,4 +145,13 @@ class GradeMixin(object):
except (peer_api.PeerAssessmentInternalError, peer_api.PeerAssessmentRequestError):
return {'success': False, 'msg': _(u"Assessment feedback could not be saved.")}
else:
self.runtime.publish(
self,
"openassessmentblock.submit_feedback_on_assessments",
{
'submission_uuid': self.submission_uuid,
'feedback_text': feedback_text,
'options': feedback_options,
}
)
return {'success': True, 'msg': _(u"Feedback saved!")}
......@@ -70,6 +70,30 @@ class PeerAssessmentMixin(object):
assessment_dict,
rubric_dict,
)
# Emit analytics event...
self.runtime.publish(
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"]
}
}
for part in assessment["parts"]
]
}
)
except PeerAssessmentRequestError as ex:
return {'success': False, 'msg': ex.message}
except PeerAssessmentInternalError as ex:
......@@ -191,6 +215,19 @@ class PeerAssessmentMixin(object):
assessment["must_be_graded_by"],
over_grading
)
self.runtime.publish(
self,
"openassessmentblock.get_peer_submission",
{
"requesting_student_id": student_item_dict["student_id"],
"course_id": student_item_dict["course_id"],
"item_id": student_item_dict["item_id"],
"submission_returned_uuid": (
peer_submission["uuid"] if peer_submission else None
)
}
)
except PeerAssessmentWorkflowError as err:
logger.exception(err)
return peer_submission
......@@ -85,13 +85,35 @@ class SelfAssessmentMixin(object):
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
try:
self_api.create_assessment(
assessment = self_api.create_assessment(
data['submission_uuid'],
self.get_student_item_dict()['student_id'],
data['options_selected'],
{"criteria": self.rubric_criteria}
)
self.runtime.publish(
self,
"openassessmentblock.self_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"]
}
}
for part in assessment["parts"]
]
}
)
# After we've created the self-assessment, we need to update the workflow.
self.update_workflow_status()
except self_api.SelfAssessmentRequestError as ex:
......
......@@ -104,6 +104,13 @@ class SubmissionMixin(object):
try:
self.saved_response = unicode(data['submission'])
self.has_saved = True
# Emit analytics event...
self.runtime.publish(
self,
"openassessmentblock.save_submission",
{"saved_response": self.saved_response}
)
except:
return {'success': False, 'msg': _(u"Could not save response submission")}
else:
......@@ -120,6 +127,20 @@ class SubmissionMixin(object):
submission = api.create_submission(student_item_dict, student_sub_dict)
workflow_api.create_workflow(submission["uuid"])
self.submission_uuid = submission["uuid"]
# Emit analytics event...
self.runtime.publish(
self,
"openassessmentblock.create_submission",
{
"submission_uuid": submission["uuid"],
"attempt_number": submission["attempt_number"],
"created_at": submission["created_at"],
"submitted_at": submission["submitted_at"],
"answer": submission["answer"],
}
)
return submission
@staticmethod
......
......@@ -6,6 +6,7 @@ import copy
import logging
from django.core.cache import cache
from django.conf import settings
from django.db import IntegrityError, DatabaseError
from django.utils.encoding import force_unicode
......@@ -14,8 +15,7 @@ from submissions.serializers import (
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary
logger = logging.getLogger(__name__)
logger = logging.getLogger("submissions.api")
class SubmissionError(Exception):
......@@ -140,7 +140,21 @@ def create_submission(student_item_dict, answer, submitted_at=None,
raise SubmissionRequestError(submission_serializer.errors)
submission_serializer.save()
return submission_serializer.data
sub_data = submission_serializer.data
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=sub_data["uuid"],
course_id=student_item_dict["course_id"],
item_id=student_item_dict["item_id"],
anonymous_student_id=student_item_dict["student_id"]
)
)
return sub_data
except JsonFieldError:
error_message = u"Could not serialize JSON field in submission {} for student item {}".format(
model_kwargs, student_item_dict
......@@ -192,6 +206,7 @@ def get_submission(submission_uuid):
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
......@@ -199,6 +214,7 @@ def get_submission(submission_uuid):
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
......@@ -208,6 +224,7 @@ def get_submission(submission_uuid):
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
......@@ -415,7 +432,7 @@ def get_latest_score_for_submission(submission_uuid):
return ScoreSerializer(score).data
def set_score(submission_uuid, score, points_possible):
def set_score(submission_uuid, points_earned, points_possible):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
......@@ -426,8 +443,7 @@ def set_score(submission_uuid, score, points_possible):
dictionary must contain a course_id, student_id, and item_id.
submission_uuid (str): The submission associated with this score.
submission_uuid (str): UUID for the submission (must exist).
score (int): The score to associate with the given submission and
student item.
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular
student item.
......@@ -468,7 +484,7 @@ def set_score(submission_uuid, score, points_possible):
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": score,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
......@@ -485,6 +501,10 @@ def set_score(submission_uuid, score, points_possible):
# a score summary and ignore the error.
try:
score.save()
logger.info(
"Score of ({}/{}) set for submission {}"
.format(points_earned, points_possible, submission_uuid)
)
except IntegrityError:
pass
......
Log files:
apps_info.log = INFO level logging for all edx-ora2 apps and OpenAssessmentBlock
apps_debug.log = same as above, except DEBUG level
errors.log = all ERROR and CRITICAL logs, stack traces
events.log = Analytics events from the xblock-sdk workbench runtime's publish()
trace.log = The kitchen sink. Massive because of SQL debug logs from Django.
# edX Internal Requirements
git+https://github.com/edx/XBlock.git@2ac249d5af0cd42adf766bfb1c6858354bbcccd9#egg=XBlock
git+https://github.com/ormsbee/xblock-sdk.git@4f62e508#egg=xblock-sdk
git+https://github.com/edx/XBlock.git@3b6e4218bd326f84dbeb0baed7b2b7813ffea3dd#egg=XBlock
git+https://github.com/edx/xblock-sdk.git@50ed1646d24f6f0a21d6d0bb074e3b7c8a78fd5a#egg=xblock-sdk
# Third Party Requirements
defusedxml==0.4.1
......
......@@ -138,35 +138,6 @@ INSTALLED_APPS = (
'openassessment.assessment',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
},
}
# TODO: add config for XBLOCK_WORKBENCH { SCENARIO_CLASSES }
WORKBENCH = {
'reset_state_on_restart': False,
......@@ -178,3 +149,7 @@ CACHES = {
'LOCATION': 'default_loc_mem',
},
}
EDX_TIM = {
}
\ No newline at end of file
......@@ -29,3 +29,70 @@ CACHES = {
'TIMEOUT': 60 * 60 * 8
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'apps_info': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'logs/apps_info.log',
'formatter': 'simple',
},
'apps_debug': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/apps_debug.log',
'formatter': 'simple',
},
'trace': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/trace.log',
'formatter': 'simple',
'maxBytes': 1000000,
'backupCount': 2,
},
'events': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'logs/events.log',
'formatter': 'simple',
},
'errors': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': 'logs/errors.log',
'formatter': 'simple',
}
},
'formatters': {
'simple': {
'format': '%(asctime)s %(name)s [%(levelname)s] %(message)s'
}
},
'loggers': {
'': {
'handlers': ['trace', 'errors'],
'propagate': True,
},
'openassessment': {
'handlers': ['apps_debug', 'apps_info'],
'propagate': True,
},
'submissions': {
'handlers': ['apps_debug', 'apps_info'],
'propagate': True,
},
'workbench.runtime': {
'handlers': ['apps_debug', 'apps_info', 'events'],
'propogate': True,
}
},
}
......@@ -43,3 +43,4 @@ LETTUCE_SERVER_PORT = 8005
# Install test-specific Django apps
INSTALLED_APPS += ('django_nose', 'lettuce.django',)
EDX_TIM["EVENT_LOGGER"] = "openassessment.workflow.test.events.fake_event_logger"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment