Commit 531eb80e by Calen Pennington

Merge remote-tracking branch 'origin/master' into feature/alex/poll-merged

Conflicts:
	cms/djangoapps/contentstore/tests/test_contentstore.py
	cms/djangoapps/contentstore/views.py
	common/lib/xmodule/xmodule/combined_open_ended_module.py
	common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
	common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
	common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
	common/lib/xmodule/xmodule/peer_grading_module.py
	common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
	common/lib/xmodule/xmodule/tests/test_self_assessment.py
	lms/djangoapps/open_ended_grading/tests.py
parents 5379a9fd 0b2226b0
...@@ -12,7 +12,7 @@ profile=no ...@@ -12,7 +12,7 @@ profile=no
# Add files or directories to the blacklist. They should be base names, not # Add files or directories to the blacklist. They should be base names, not
# paths. # paths.
ignore=CVS ignore=CVS, migrations
# Pickle collected data for later comparisons. # Pickle collected data for later comparisons.
persistent=yes persistent=yes
...@@ -33,7 +33,11 @@ load-plugins= ...@@ -33,7 +33,11 @@ load-plugins=
# can either give multiple identifier separated by comma (,) or put this option # can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where # multiple time (only on the command line, not in the configuration file where
# it should appear only once). # it should appear only once).
disable=E1102,W0142 disable=
# W0141: Used builtin function 'map'
# W0142: Used * or ** magic
# R0903: Too few public methods (1/2)
W0141,W0142,R0903
[REPORTS] [REPORTS]
...@@ -43,7 +47,7 @@ disable=E1102,W0142 ...@@ -43,7 +47,7 @@ disable=E1102,W0142
output-format=text output-format=text
# Include message's id in output # Include message's id in output
include-ids=no include-ids=yes
# Put messages in a separate file for each module / package specified on the # Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be # command line instead of printing them on stdout. Reports (if any) will be
...@@ -97,7 +101,7 @@ bad-functions=map,filter,apply,input ...@@ -97,7 +101,7 @@ bad-functions=map,filter,apply,input
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names # Regular expression which should only match correct module level names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$
# Regular expression which should only match correct class names # Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$ class-rgx=[A-Z_][a-zA-Z0-9]+$
...@@ -106,7 +110,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$ ...@@ -106,7 +110,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$
function-rgx=[a-z_][a-z0-9_]{2,30}$ function-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct method names # Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$ method-rgx=([a-z_][a-z0-9_]{2,60}|setUp|set[Uu]pClass|tearDown|tear[Dd]ownClass|assert[A-Z]\w*)$
# Regular expression which should only match correct instance attribute names # Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$ attr-rgx=[a-z_][a-z0-9_]{2,30}$
......
...@@ -114,7 +114,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -114,7 +114,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertTrue(sequential.location.url() in chapter.children) self.assertTrue(sequential.location.url() in chapter.children)
self.client.post(reverse('delete_item'), self.client.post(reverse('delete_item'),
json.dumps({'id': sequential.location.url(), 'delete_children': 'true'}), json.dumps({'id': sequential.location.url(), 'delete_children': 'true', 'delete_all_versions': 'true'}),
"application/json") "application/json")
found = False found = False
......
...@@ -643,7 +643,7 @@ def delete_item(request): ...@@ -643,7 +643,7 @@ def delete_item(request):
modulestore('direct').delete_item(item.location) modulestore('direct').delete_item(item.location)
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling # cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_loc, None) parent_locs = modulestore('direct').get_parent_locations(item_loc, None)
for parent_loc in parent_locs: for parent_loc in parent_locs:
......
...@@ -120,7 +120,8 @@ def get_cohort(user, course_id): ...@@ -120,7 +120,8 @@ def get_cohort(user, course_id):
return None return None
choices = course.auto_cohort_groups choices = course.auto_cohort_groups
if len(choices) == 0: n = len(choices)
if n == 0:
# Nowhere to put user # Nowhere to put user
log.warning("Course %s is auto-cohorted, but there are no" log.warning("Course %s is auto-cohorted, but there are no"
" auto_cohort_groups specified", " auto_cohort_groups specified",
...@@ -128,7 +129,14 @@ def get_cohort(user, course_id): ...@@ -128,7 +129,14 @@ def get_cohort(user, course_id):
return None return None
# Put user in a random group, creating it if needed # Put user in a random group, creating it if needed
group_name = random.choice(choices) choice = random.randrange(0, n)
group_name = choices[choice]
# Victor: we are seeing very strange behavior on prod, where almost all users
# end up in the same group. Log at INFO to try to figure out what's going on.
log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format(
user, group_name,choice))
group, created = CourseUserGroup.objects.get_or_create( group, created = CourseUserGroup.objects.get_or_create(
course_id=course_id, course_id=course_id,
group_type=CourseUserGroup.COHORT, group_type=CourseUserGroup.COHORT,
......
...@@ -6,7 +6,7 @@ from django.test.utils import override_settings ...@@ -6,7 +6,7 @@ from django.test.utils import override_settings
from course_groups.models import CourseUserGroup from course_groups.models import CourseUserGroup
from course_groups.cohorts import (get_cohort, get_course_cohorts, from course_groups.cohorts import (get_cohort, get_course_cohorts,
is_commentable_cohorted) is_commentable_cohorted, get_cohort_by_name)
from xmodule.modulestore.django import modulestore, _MODULESTORES from xmodule.modulestore.django import modulestore, _MODULESTORES
...@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase): ...@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase):
"user2 should still be in originally placed cohort") "user2 should still be in originally placed cohort")
def test_auto_cohorting_randomization(self):
"""
Make sure get_cohort() randomizes properly.
"""
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
groups = ["group_{0}".format(n) for n in range(5)]
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=groups)
# Assign 100 users to cohorts
for i in range(100):
user = User.objects.create(username="test_{0}".format(i),
email="a@b{0}.com".format(i))
get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts(self): def test_get_course_cohorts(self):
course1_id = 'a/b/c' course1_id = 'a/b/c'
course2_id = 'e/f/g' course2_id = 'e/f/g'
......
...@@ -89,7 +89,6 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -89,7 +89,6 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def __init__(self, system, location, descriptor, model_data): def __init__(self, system, location, descriptor, model_data):
XModule.__init__(self, system, location, descriptor, model_data) XModule.__init__(self, system, location, descriptor, model_data)
""" """
Definition file should have one or many task blocks, a rubric block, and a prompt block: Definition file should have one or many task blocks, a rubric block, and a prompt block:
...@@ -152,13 +151,13 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -152,13 +151,13 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
attributes = self.student_attributes + self.settings_attributes attributes = self.student_attributes + self.settings_attributes
static_data = { static_data = {
'rewrite_content_links' : self.rewrite_content_links, 'rewrite_content_links': self.rewrite_content_links,
} }
instance_state = { k: getattr(self,k) for k in attributes} instance_state = {k: getattr(self, k) for k in attributes}
self.child_descriptor = descriptors[version_index](self.system) self.child_descriptor = descriptors[version_index](self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(self.data), self.system) self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(self.data), self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor, self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
instance_state = instance_state, static_data= static_data, attributes=attributes) instance_state=instance_state, static_data=static_data, attributes=attributes)
self.save_instance_data() self.save_instance_data()
def get_html(self): def get_html(self):
...@@ -190,9 +189,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -190,9 +189,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def save_instance_data(self): def save_instance_data(self):
for attribute in self.student_attributes: for attribute in self.student_attributes:
child_attr = getattr(self.child_module,attribute) child_attr = getattr(self.child_module, attribute)
if child_attr != getattr(self, attribute): if child_attr != getattr(self, attribute):
setattr(self,attribute, getattr(self.child_module,attribute)) setattr(self, attribute, getattr(self.child_module, attribute))
class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
......
...@@ -89,7 +89,7 @@ class FolditModule(FolditFields, XModule): ...@@ -89,7 +89,7 @@ class FolditModule(FolditFields, XModule):
from foldit.models import Score from foldit.models import Score
leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)] leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)]
leaders.sort(key=lambda x: x[1]) leaders.sort(key=lambda x: -x[1])
return leaders return leaders
......
...@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService): ...@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config, system): def __init__(self, config, system):
config['system'] = system config['system'] = system
super(ControllerQueryService, self).__init__(config) super(ControllerQueryService, self).__init__(config)
...@@ -75,15 +76,16 @@ class ControllerQueryService(GradingService): ...@@ -75,15 +76,16 @@ class ControllerQueryService(GradingService):
response = self.post(self.take_action_on_flags_url, params) response = self.post(self.take_action_on_flags_url, params)
return response return response
def convert_seconds_to_human_readable(seconds): def convert_seconds_to_human_readable(seconds):
if seconds < 60: if seconds < 60:
human_string = "{0} seconds".format(seconds) human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60: elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds/60,1)) human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24*60*60): elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds/(60*60),1)) human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else: else:
human_string = "{0} days".format(round(seconds/(60*60*24),1)) human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
eta_string = "{0}".format(human_string) eta_string = "{0}".format(human_string)
return eta_string return eta_string
...@@ -19,6 +19,7 @@ class GradingService(object): ...@@ -19,6 +19,7 @@ class GradingService(object):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config): def __init__(self, config):
self.username = config['username'] self.username = config['username']
self.password = config['password'] self.password = config['password']
......
...@@ -5,6 +5,7 @@ to send them to S3. ...@@ -5,6 +5,7 @@ to send them to S3.
try: try:
from PIL import Image from PIL import Image
ENABLE_PIL = True ENABLE_PIL = True
except: except:
ENABLE_PIL = False ENABLE_PIL = False
...@@ -51,6 +52,7 @@ class ImageProperties(object): ...@@ -51,6 +52,7 @@ class ImageProperties(object):
""" """
Class to check properties of an image and to validate if they are allowed. Class to check properties of an image and to validate if they are allowed.
""" """
def __init__(self, image_data): def __init__(self, image_data):
""" """
Initializes class variables Initializes class variables
...@@ -141,6 +143,7 @@ class URLProperties(object): ...@@ -141,6 +143,7 @@ class URLProperties(object):
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
links to the peer grading image functionality of the external grading service. links to the peer grading image functionality of the external grading service.
""" """
def __init__(self, url_string): def __init__(self, url_string):
self.url_string = url_string self.url_string = url_string
...@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface): ...@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url return True, public_url
except: except:
#This is a dev_facing_error #This is a dev_facing_error
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower()) error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(
bucketname.lower())
log.error(error_message) log.error(error_message)
return False, error_message return False, error_message
......
...@@ -77,7 +77,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -77,7 +77,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(self.latest_answer(), system) self.send_to_grader(self.latest_answer(), system)
self.child_created = False self.child_created = False
def _parse(self, oeparam, prompt, rubric, system): def _parse(self, oeparam, prompt, rubric, system):
''' '''
Parse OpenEndedResponse XML: Parse OpenEndedResponse XML:
...@@ -104,7 +103,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -104,7 +103,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# response types) # response types)
except TypeError, ValueError: except TypeError, ValueError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload)) log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '') self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
...@@ -148,7 +149,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -148,7 +149,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['feedback', 'submission_id', 'grader_id', 'score']: for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses: if tag not in survey_responses:
#This is a student_facing_error #This is a student_facing_error
return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)} return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)}
try: try:
submission_id = int(survey_responses['submission_id']) submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id']) grader_id = int(survey_responses['grader_id'])
...@@ -266,7 +269,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -266,7 +269,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return True return True
def get_answers(self): def get_answers(self):
""" """
Gets and shows the answer for this problem. Gets and shows the answer for this problem.
...@@ -300,7 +302,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -300,7 +302,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order. # We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first. # This dictionary specifies which goes first--lower first.
priorities = { # These go at the start of the feedback priorities = {# These go at the start of the feedback
'spelling': 0, 'spelling': 0,
'grammar': 1, 'grammar': 1,
# needs to be after all the other feedback # needs to be after all the other feedback
...@@ -411,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -411,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return feedback_template, rubric_scores return feedback_template, rubric_scores
def _parse_score_msg(self, score_msg, system, join_feedback=True): def _parse_score_msg(self, score_msg, system, join_feedback=True):
""" """
Grader reply is a JSON-dump of the following dict Grader reply is a JSON-dump of the following dict
...@@ -437,12 +438,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -437,12 +438,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': False, 'valid': False,
'score': 0, 'score': 0,
'feedback': '', 'feedback': '',
'rubric_scores' : [[0]], 'rubric_scores': [[0]],
'grader_types' : [''], 'grader_types': [''],
'feedback_items' : [''], 'feedback_items': [''],
'feedback_dicts' : [{}], 'feedback_dicts': [{}],
'grader_ids' : [0], 'grader_ids': [0],
'submission_ids' : [0], 'submission_ids': [0],
} }
try: try:
score_result = json.loads(score_msg) score_result = json.loads(score_msg)
...@@ -527,12 +528,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -527,12 +528,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': True, 'valid': True,
'score': score, 'score': score,
'feedback': feedback, 'feedback': feedback,
'rubric_scores' : rubric_scores, 'rubric_scores': rubric_scores,
'grader_types' : grader_types, 'grader_types': grader_types,
'feedback_items' : feedback_items, 'feedback_items': feedback_items,
'feedback_dicts' : feedback_dicts, 'feedback_dicts': feedback_dicts,
'grader_ids' : grader_ids, 'grader_ids': grader_ids,
'submission_ids' : submission_ids, 'submission_ids': submission_ids,
} }
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
...@@ -585,7 +586,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -585,7 +586,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
d = handlers[dispatch](get, system) d = handlers[dispatch](get, system)
...@@ -679,7 +680,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -679,7 +680,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct = "" correct = ""
previous_answer = self.initial_display previous_answer = self.initial_display
context = { context = {
'prompt': self.child_prompt, 'prompt': self.child_prompt,
'previous_answer': previous_answer, 'previous_answer': previous_answer,
...@@ -692,7 +692,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -692,7 +692,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended', 'child_type': 'openended',
'correct': correct, 'correct': correct,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'eta_message' : eta_string, 'eta_message': eta_string,
} }
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context) html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html return html
...@@ -726,7 +726,9 @@ class OpenEndedDescriptor(): ...@@ -726,7 +726,9 @@ class OpenEndedDescriptor():
for child in ['openendedparam']: for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k): def parse(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -100,7 +100,8 @@ class OpenEndedChild(object): ...@@ -100,7 +100,8 @@ class OpenEndedChild(object):
# completion (doesn't matter if you self-assessed correct/incorrect). # completion (doesn't matter if you self-assessed correct/incorrect).
if system.open_ended_grading_interface: if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system) self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,
system)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
self.controller_qs = None self.controller_qs = None
...@@ -142,7 +143,9 @@ class OpenEndedChild(object): ...@@ -142,7 +143,9 @@ class OpenEndedChild(object):
return True, { return True, {
'success': False, 'success': False,
#This is a student_facing_error #This is a student_facing_error
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.child_attempts, self.max_attempts) 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
self.child_attempts, self.max_attempts
)
} }
else: else:
return False, {} return False, {}
...@@ -298,7 +301,7 @@ class OpenEndedChild(object): ...@@ -298,7 +301,7 @@ class OpenEndedChild(object):
@return: Boolean correct. @return: Boolean correct.
""" """
correct = False correct = False
if(isinstance(score, (int, long, float, complex))): if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score()) score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66) correct = (score_ratio >= 0.66)
return correct return correct
...@@ -332,7 +335,8 @@ class OpenEndedChild(object): ...@@ -332,7 +335,8 @@ class OpenEndedChild(object):
try: try:
image_data.seek(0) image_data.seek(0)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface) success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
self.s3_interface)
except: except:
log.exception("Could not upload image to S3.") log.exception("Could not upload image to S3.")
...@@ -444,16 +448,18 @@ class OpenEndedChild(object): ...@@ -444,16 +448,18 @@ class OpenEndedChild(object):
success = True success = True
except: except:
#This is a dev_facing_error #This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id)) log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location_string, student_id))
#This is a student_facing_error #This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff." error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
if count_graded>=count_required: if count_graded >= count_required:
return success, allowed_to_submit, "" return success, allowed_to_submit, ""
else: else:
allowed_to_submit = False allowed_to_submit = False
#This is a student_facing_error #This is a student_facing_error
error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count) error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count)
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
def get_eta(self): def get_eta(self):
...@@ -468,7 +474,7 @@ class OpenEndedChild(object): ...@@ -468,7 +474,7 @@ class OpenEndedChild(object):
success = response['success'] success = response['success']
if isinstance(success, basestring): if isinstance(success, basestring):
success = (success.lower()=="true") success = (success.lower() == "true")
if success: if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta']) eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
...@@ -477,6 +483,3 @@ class OpenEndedChild(object): ...@@ -477,6 +483,3 @@ class OpenEndedChild(object):
eta_string = "" eta_string = ""
return eta_string return eta_string
...@@ -14,6 +14,7 @@ class PeerGradingService(GradingService): ...@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
""" """
Interface with the grading controller for peer grading Interface with the grading controller for peer grading
""" """
def __init__(self, config, system): def __init__(self, config, system):
config['system'] = system config['system'] = system
super(PeerGradingService, self).__init__(config) super(PeerGradingService, self).__init__(config)
...@@ -39,7 +40,8 @@ class PeerGradingService(GradingService): ...@@ -39,7 +40,8 @@ class PeerGradingService(GradingService):
{'location': problem_location, 'grader_id': grader_id}) {'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response)) return self.try_to_decode(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
submission_flagged):
data = {'grader_id': grader_id, data = {'grader_id': grader_id,
'submission_id': submission_id, 'submission_id': submission_id,
'score': score, 'score': score,
...@@ -89,6 +91,7 @@ class PeerGradingService(GradingService): ...@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass pass
return text return text
""" """
This is a mock peer grading service that can be used for unit tests This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller without making actual service calls to the grading controller
......
...@@ -75,7 +75,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -75,7 +75,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html return html
def handle_ajax(self, dispatch, get, system): def handle_ajax(self, dispatch, get, system):
""" """
This is called by courseware.module_render, to handle an AJAX call. This is called by courseware.module_render, to handle an AJAX call.
...@@ -97,7 +96,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -97,7 +96,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
d = handlers[dispatch](get, system) d = handlers[dispatch](get, system)
...@@ -161,7 +160,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -161,7 +160,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
def save_answer(self, get, system): def save_answer(self, get, system):
""" """
After the answer is submitted, show the rubric. After the answer is submitted, show the rubric.
...@@ -226,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -226,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try: try:
score = int(get['assessment']) score = int(get['assessment'])
score_list = get.getlist('score_list[]') score_list = get.getlist('score_list[]')
for i in xrange(0,len(score_list)): for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i]) score_list[i] = int(score_list[i])
except ValueError: except ValueError:
#This is a dev_facing_error #This is a dev_facing_error
...@@ -310,7 +308,9 @@ class SelfAssessmentDescriptor(): ...@@ -310,7 +308,9 @@ class SelfAssessmentDescriptor():
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k): def parse(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -55,7 +55,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -55,7 +55,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
#We need to set the location here so the child modules can use it #We need to set the location here so the child modules can use it
system.set('location', location) system.set('location', location)
self.system = system self.system = system
if(self.system.open_ended_grading_interface): if (self.system.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system) self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
...@@ -145,7 +145,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -145,7 +145,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
d = handlers[dispatch](get) d = handlers[dispatch](get)
...@@ -182,7 +182,8 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -182,7 +182,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
except: except:
success, response = self.query_data_for_location() success, response = self.query_data_for_location()
if not success: if not success:
log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format( log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.url(), self.system.anonymous_student_id self.system.location.url(), self.system.anonymous_student_id
)) ))
return None return None
...@@ -262,7 +263,8 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -262,7 +263,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
error: if there was an error in the submission, this is the error message error: if there was an error in the submission, this is the error message
""" """
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]',
'submission_flagged'])
success, message = self._check_required(get, required) success, message = self._check_required(get, required)
if not success: if not success:
return self._err_response(message) return self._err_response(message)
...@@ -421,7 +423,9 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -421,7 +423,9 @@ class PeerGradingModule(PeerGradingFields, XModule):
return response return response
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) log.exception(
"Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(
location, submission_id, submission_key, grader_id))
#This is a student_facing_error #This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.') return self._err_response('There was an error saving your score. Please notify course staff.')
...@@ -501,7 +505,6 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -501,7 +505,6 @@ class PeerGradingModule(PeerGradingFields, XModule):
problem['due'] = None problem['due'] = None
problem['closed'] = False problem['closed'] = False
ajax_url = self.ajax_url ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', { html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id, 'course_id': self.system.course_id,
...@@ -524,7 +527,8 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -524,7 +527,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
if self.use_for_single_location not in TRUE_DICT: if self.use_for_single_location not in TRUE_DICT:
#This is an error case, because it must be set to use a single location to be called without get parameters #This is an error case, because it must be set to use a single location to be called without get parameters
#This is a dev_facing_error #This is a dev_facing_error
log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.") log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False} return {'html': "", 'success': False}
problem_location = self.link_to_location problem_location = self.link_to_location
......
...@@ -14,6 +14,7 @@ from datetime import datetime ...@@ -14,6 +14,7 @@ from datetime import datetime
from . import test_system from . import test_system
import test_util_open_ended import test_util_open_ended
""" """
Tests for the various pieces of the CombinedOpenEndedGrading system Tests for the various pieces of the CombinedOpenEndedGrading system
...@@ -46,9 +47,9 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -46,9 +47,9 @@ class OpenEndedChildTest(unittest.TestCase):
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload': False, 'accept_file_upload': False,
'close_date': None, 'close_date': None,
's3_interface' : "", 's3_interface': "",
'open_ended_grading_interface' : {}, 'open_ended_grading_interface': {},
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
definition = Mock() definition = Mock()
descriptor = Mock() descriptor = Mock()
...@@ -63,17 +64,14 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -63,17 +64,14 @@ class OpenEndedChildTest(unittest.TestCase):
answer = self.openendedchild.latest_answer() answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "") self.assertEqual(answer, "")
def test_latest_score_empty(self): def test_latest_score_empty(self):
answer = self.openendedchild.latest_score() answer = self.openendedchild.latest_score()
self.assertEqual(answer, None) self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self): def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system) answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "") self.assertEqual(answer, "")
def test_new_history_entry(self): def test_new_history_entry(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -99,7 +97,6 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -99,7 +97,6 @@ class OpenEndedChildTest(unittest.TestCase):
score = self.openendedchild.latest_score() score = self.openendedchild.latest_score()
self.assertEqual(score, 4) self.assertEqual(score, 4)
def test_record_latest_post_assessment(self): def test_record_latest_post_assessment(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -124,13 +121,11 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -124,13 +121,11 @@ class OpenEndedChildTest(unittest.TestCase):
self.assertEqual(score['score'], new_score) self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score']) self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self): def test_reset(self):
self.openendedchild.reset(self.test_system) self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state()) state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL) self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self): def test_is_last_response_correct(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -165,11 +160,11 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -165,11 +160,11 @@ class OpenEndedModuleTest(unittest.TestCase):
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload': False, 'accept_file_upload': False,
'rewrite_content_links' : "", 'rewrite_content_links': "",
'close_date': None, 'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
oeparam = etree.XML(''' oeparam = etree.XML('''
...@@ -188,7 +183,8 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -188,7 +183,8 @@ class OpenEndedModuleTest(unittest.TestCase):
self.test_system.location = self.location self.test_system.location = self.location
self.mock_xqueue = MagicMock() self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message") self.mock_xqueue.send_to_queue.return_value = (None, "Message")
self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1} self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location, self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata) self.definition, self.descriptor, self.static_data, self.metadata)
...@@ -301,12 +297,12 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ...@@ -301,12 +297,12 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
'rubric': rubric, 'rubric': rubric,
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload' : False, 'accept_file_upload': False,
'rewrite_content_links' : "", 'rewrite_content_links': "",
'close_date' : "", 'close_date': "",
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
oeparam = etree.XML(''' oeparam = etree.XML('''
......
...@@ -10,8 +10,8 @@ from . import test_system ...@@ -10,8 +10,8 @@ from . import test_system
import test_util_open_ended import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric> rubric = '''<rubric><rubric>
<category> <category>
<description>Response Quality</description> <description>Response Quality</description>
...@@ -46,9 +46,9 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -46,9 +46,9 @@ class SelfAssessmentTest(unittest.TestCase):
'display_name': "Name", 'display_name': "Name",
'accept_file_upload': False, 'accept_file_upload': False,
'close_date': None, 'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
self.module = SelfAssessmentModule(test_system(), self.location, self.module = SelfAssessmentModule(test_system(), self.location,
...@@ -62,13 +62,14 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -62,13 +62,14 @@ class SelfAssessmentTest(unittest.TestCase):
def test_self_assessment_flow(self): def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']} responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name): def get_fake_item(name):
return responses[name] return responses[name]
def get_data_for_location(self,location,student): def get_data_for_location(self, location, student):
return { return {
'count_graded' : 0, 'count_graded': 0,
'count_required' : 0, 'count_required': 0,
'student_sub_count': 0, 'student_sub_count': 0,
} }
...@@ -87,7 +88,6 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -87,7 +88,6 @@ class SelfAssessmentTest(unittest.TestCase):
self.module.save_assessment(mock_query_dict, self.module.system) self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE) self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({}) d = self.module.reset({})
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL) self.assertEqual(self.module.child_state, self.module.INITIAL)
......
OPEN_ENDED_GRADING_INTERFACE = { OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/', 'url': 'http://127.0.0.1:3033/',
'username' : 'incorrect', 'username': 'incorrect',
'password' : 'incorrect', 'password': 'incorrect',
'staff_grading' : 'staff_grading', 'staff_grading': 'staff_grading',
'peer_grading' : 'peer_grading', 'peer_grading': 'peer_grading',
'grading_controller' : 'grading_controller' 'grading_controller': 'grading_controller'
} }
S3_INTERFACE = { S3_INTERFACE = {
'aws_access_key' : "", 'aws_access_key': "",
'aws_secret_key' : "", 'aws_secret_key': "",
"aws_bucket_name" : "", "aws_bucket_name": "",
} }
\ No newline at end of file
'''
This is a one-off command aimed at fixing a temporary problem encountered where partial credit was awarded for
code problems, but the resulting score (or grade) was mistakenly set to zero because of a bug in
CorrectMap.get_npoints().
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from courseware.models import StudentModule
from capa.correctmap import CorrectMap
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to recalculate the score/grade based on the partial credit.
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-08 15:45:00' (the problem must have been answered before the fix was installed,
on Prod and Edge)
modified > '2013-03-07 20:18:00' (the problem must have been visited after the bug was introduced)
state like '%"npoints": 0.%' (the problem must have some form of partial credit).
'''
num_visited = 0
num_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'), )
def fix_studentmodules(self, save_changes):
'''Identify the list of StudentModule objects that might need fixing, and then fix each one'''
modules = StudentModule.objects.filter(modified__gt='2013-03-07 20:18:00',
created__lt='2013-03-08 15:45:00',
state__contains='"npoints": 0.')
for module in modules:
self.fix_studentmodule_grade(module, save_changes)
def fix_studentmodule_grade(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_visited += 1
# LoncapaProblem.get_score() checks student_answers -- if there are none, we will return a grade of 0
# Check that this is the case, but do so sooner, before we do any of the other grading work.
student_answers = state_dict['student_answers']
if (not student_answers) or len(student_answers) == 0:
# we should not have a grade here:
if module.grade != 0:
LOG.error("No answer found but grade {grade} exists for {type} module {id} for student {student} "
"in course {course_id}".format(grade=module.grade,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
else:
LOG.debug("No answer and no grade found for {type} module {id} for student {student} "
"in course {course_id}".format(grade=module.grade,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
# load into a CorrectMap, as done in LoncapaProblem.__init__():
correct_map = CorrectMap()
if 'correct_map' in state_dict:
correct_map.set_dict(state_dict['correct_map'])
# calculate score the way LoncapaProblem.get_score() works, by deferring to
# CorrectMap's get_npoints implementation.
correct = 0
for key in correct_map:
correct += correct_map.get_npoints(key)
if module.grade == correct:
# nothing to change
LOG.debug("Grade matches for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
elif save_changes:
# make the change
LOG.info("Grade changing from {0} to {1} for {type} module {id} for student {student} "
"in course {course_id}".format(module.grade, correct,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
module.grade = correct
module.save()
self.num_changed += 1
else:
# don't make the change, but log that the change would be made
LOG.info("Grade would change from {0} to {1} for {type} module {id} for student {student} "
"in course {course_id}".format(module.grade, correct,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
self.num_changed += 1
def handle(self, **options):
'''Handle management command request'''
save_changes = options['save_changes']
LOG.info("Starting run: save_changes = {0}".format(save_changes))
self.fix_studentmodules(save_changes)
LOG.info("Finished run: updating {0} of {1} modules".format(self.num_changed, self.num_visited))
...@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = ( ...@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = (
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'), ('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'), ('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions') ('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
) )
def staff_grading_notifications(course, user): def staff_grading_notifications(course, user):
...@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user): ...@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.info("Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id, student_id)) log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -87,7 +89,9 @@ def peer_grading_notifications(course, user): ...@@ -87,7 +89,9 @@ def peer_grading_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.info("Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id, student_id)) log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -119,7 +123,9 @@ def combined_notifications(course, user): ...@@ -119,7 +123,9 @@ def combined_notifications(course, user):
return notification_dict return notification_dict
min_time_to_query = user.last_login min_time_to_query = user.last_login
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified') last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id,
modified__gt=min_time_to_query).values('modified').order_by(
'-modified')
last_module_seen_count = last_module_seen.count() last_module_seen_count = last_module_seen.count()
if last_module_seen_count > 0: if last_module_seen_count > 0:
...@@ -131,7 +137,8 @@ def combined_notifications(course, user): ...@@ -131,7 +137,8 @@ def combined_notifications(course, user):
img_path = "" img_path = ""
try: try:
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed) controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
last_time_viewed)
log.debug(controller_response) log.debug(controller_response)
notifications = json.loads(controller_response) notifications = json.loads(controller_response)
if notifications['success']: if notifications['success']:
...@@ -141,7 +148,9 @@ def combined_notifications(course, user): ...@@ -141,7 +148,9 @@ def combined_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Problem with getting notifications from controller query service for course {0} user {1}.".format(course_id, student_id)) log.exception(
"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -165,7 +174,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value): ...@@ -165,7 +174,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value):
def create_key_name(student_id, course_id, notification_type): def create_key_name(student_id, course_id, notification_type):
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id) key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
student=student_id)
return key_name return key_name
......
...@@ -15,6 +15,7 @@ class StaffGrading(object): ...@@ -15,6 +15,7 @@ class StaffGrading(object):
""" """
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views. Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
""" """
def __init__(self, course): def __init__(self, course):
self.course = course self.course = course
......
...@@ -20,10 +20,12 @@ log = logging.getLogger(__name__) ...@@ -20,10 +20,12 @@ log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.' STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.'
class MockStaffGradingService(object): class MockStaffGradingService(object):
""" """
A simple mockup of a staff grading service, testing. A simple mockup of a staff grading service, testing.
""" """
def __init__(self): def __init__(self):
self.cnt = 0 self.cnt = 0
...@@ -45,13 +47,16 @@ class MockStaffGradingService(object): ...@@ -45,13 +47,16 @@ class MockStaffGradingService(object):
return json.dumps({'success': True, return json.dumps({'success': True,
'problem_list': [ 'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}), 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5,
'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10}) 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5,
'min_for_ml': 10})
]}) ]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id) return self.get_next(course_id, 'fake location', grader_id)
...@@ -59,6 +64,7 @@ class StaffGradingService(GradingService): ...@@ -59,6 +64,7 @@ class StaffGradingService(GradingService):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config): def __init__(self, config):
config['system'] = ModuleSystem( config['system'] = ModuleSystem(
ajax_url=None, ajax_url=None,
...@@ -121,7 +127,8 @@ class StaffGradingService(GradingService): ...@@ -121,7 +127,8 @@ class StaffGradingService(GradingService):
return json.dumps(self._render_rubric(response)) return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
""" """
Save a score and feedback for a submission. Save a score and feedback for a submission.
...@@ -304,7 +311,7 @@ def save_grade(request, course_id): ...@@ -304,7 +311,7 @@ def save_grade(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = set(['score', 'feedback', 'submission_id', 'location','submission_flagged', 'rubric_scores[]']) required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged', 'rubric_scores[]'])
actual = set(request.POST.keys()) actual = set(request.POST.keys())
missing = required - actual missing = required - actual
if len(missing) > 0: if len(missing) > 0:
...@@ -314,7 +321,6 @@ def save_grade(request, course_id): ...@@ -314,7 +321,6 @@ def save_grade(request, course_id):
grader_id = unique_id_for_user(request.user) grader_id = unique_id_for_user(request.user)
p = request.POST p = request.POST
location = p['location'] location = p['location']
skipped = 'skipped' in p skipped = 'skipped' in p
...@@ -329,7 +335,9 @@ def save_grade(request, course_id): ...@@ -329,7 +335,9 @@ def save_grade(request, course_id):
p['submission_flagged']) p['submission_flagged'])
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(request, course_id)) log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error #This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
...@@ -337,13 +345,16 @@ def save_grade(request, course_id): ...@@ -337,13 +345,16 @@ def save_grade(request, course_id):
result = json.loads(result_json) result = json.loads(result_json)
except ValueError: except ValueError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(result_json)) log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error #This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False): if not result.get('success', False):
#This is a dev_facing_error #This is a dev_facing_error
log.warning('Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json)) log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade. # Ok, save_grade seemed to work. Get the next submission to grade.
......
...@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem ...@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string from mitxmako.shortcuts import render_to_string
import logging import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
from django.test.utils import override_settings from django.test.utils import override_settings
from django.http import QueryDict from django.http import QueryDict
...@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the access control and error handling logic -- all the actual work is on the
backend. backend.
''' '''
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall" self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id) self.toy = modulestore().get_course(self.course_id)
def make_instructor(course): def make_instructor(course):
group_name = _course_staff_group_name(course.location) group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name) g = Group.objects.create(name=group_name)
...@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the access control and error handling logic -- all the actual work is on the
backend. backend.
''' '''
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -145,16 +149,16 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -145,16 +149,16 @@ class TestPeerGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall" self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id) self.toy = modulestore().get_course(self.course_id)
location = "i4x://edX/toy/peergrading/init" location = "i4x://edX/toy/peergrading/init"
model_data = {'data' : "<peergrading/>"} model_data = {'data': "<peergrading/>"}
self.mock_service = peer_grading_service.MockPeerGradingService() self.mock_service = peer_grading_service.MockPeerGradingService()
self.system = ModuleSystem( self.system = ModuleSystem(
ajax_url=location, ajax_url=location,
track_function=None, track_function=None,
get_module = None, get_module=None,
render_template=render_to_string, render_template=render_to_string,
replace_urls=None, replace_urls=None,
xblock_model_data= {}, xblock_model_data={},
s3_interface = test_util_open_ended.S3_INTERFACE, s3_interface=test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
) )
self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system, location, model_data) self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system, location, model_data)
...@@ -192,8 +196,10 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -192,8 +196,10 @@ class TestPeerGradingService(ct.PageLoader):
} }
qdict = MagicMock() qdict = MagicMock()
def fake_get_item(key): def fake_get_item(key):
return data[key] return data[key]
qdict.__getitem__.side_effect = fake_get_item qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item qdict.getlist = fake_get_item
qdict.keys = data.keys qdict.keys = data.keys
...@@ -254,8 +260,10 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -254,8 +260,10 @@ class TestPeerGradingService(ct.PageLoader):
} }
qdict = MagicMock() qdict = MagicMock()
def fake_get_item(key): def fake_get_item(key):
return data[key] return data[key]
qdict.__getitem__.side_effect = fake_get_item qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item qdict.getlist = fake_get_item
qdict.keys = data.keys qdict.keys = data.keys
......
...@@ -57,22 +57,24 @@ def _reverse_without_slash(url_name, course_id): ...@@ -57,22 +57,24 @@ def _reverse_without_slash(url_name, course_id):
ajax_url = reverse(url_name, kwargs={'course_id': course_id}) ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url return ajax_url
DESCRIPTION_DICT = { DESCRIPTION_DICT = {
'Peer Grading': "View all problems that require peer assessment in this particular course.", 'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.", 'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.", 'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate." 'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
} }
ALERT_DICT = { ALERT_DICT = {
'Peer Grading': "New submissions to grade", 'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade", 'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned", 'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review" 'Flagged Submissions': "Submissions have been flagged for review"
} }
STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff." STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff."
STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org" STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org"
@cache_control(no_cache=True, no_store=True, must_revalidate=True) @cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id): def staff_grading(request, course_id):
""" """
...@@ -99,7 +101,7 @@ def peer_grading(request, course_id): ...@@ -99,7 +101,7 @@ def peer_grading(request, course_id):
#Get the current course #Get the current course
course = get_course_with_access(request.user, course_id, 'load') course = get_course_with_access(request.user, course_id, 'load')
course_id_parts = course.id.split("/") course_id_parts = course.id.split("/")
false_dict = [False,"False", "false", "FALSE"] false_dict = [False, "False", "false", "FALSE"]
#Reverse the base course url #Reverse the base course url
base_course_url = reverse('courses') base_course_url = reverse('courses')
...@@ -181,7 +183,7 @@ def student_problem_list(request, course_id): ...@@ -181,7 +183,7 @@ def student_problem_list(request, course_id):
except: except:
#This is a student_facing_error #This is a student_facing_error
eta_string = "Error getting ETA." eta_string = "Error getting ETA."
problem_list[i].update({'eta_string' : eta_string}) problem_list[i].update({'eta_string': eta_string})
except GradingServiceError: except GradingServiceError:
#This is a student_facing_error #This is a student_facing_error
...@@ -325,12 +327,13 @@ def take_action_on_flags(request, course_id): ...@@ -325,12 +327,13 @@ def take_action_on_flags(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = ['submission_id', 'action_type', 'student_id'] required = ['submission_id', 'action_type', 'student_id']
for key in required: for key in required:
if key not in request.POST: if key not in request.POST:
#This is a staff_facing_error #This is a staff_facing_error
return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(key)}), return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(
key)}),
mimetype="application/json") mimetype="application/json")
p = request.POST p = request.POST
...@@ -345,5 +348,7 @@ def take_action_on_flags(request, course_id): ...@@ -345,5 +348,7 @@ def take_action_on_flags(request, course_id):
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(submission_id, action_type, grader_id)) log.exception(
"Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, grader_id))
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment