Commit c0efb013 by David Ormsbee

Merge branch 'master' into jmpm-analytics

parents 8d4e0f83 faab0ac1
......@@ -12,7 +12,7 @@ profile=no
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
ignore=CVS, migrations
# Pickle collected data for later comparisons.
persistent=yes
......@@ -33,7 +33,11 @@ load-plugins=
# can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once).
disable=E1102,W0142
disable=
# W0141: Used builtin function 'map'
# W0142: Used * or ** magic
# R0903: Too few public methods (1/2)
W0141,W0142,R0903
[REPORTS]
......@@ -43,7 +47,7 @@ disable=E1102,W0142
output-format=text
# Include message's id in output
include-ids=no
include-ids=yes
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
......@@ -97,7 +101,7 @@ bad-functions=map,filter,apply,input
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$
# Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
......@@ -106,7 +110,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
method-rgx=([a-z_][a-z0-9_]{2,60}|setUp|set[Uu]pClass|tearDown|tear[Dd]ownClass|assert[A-Z]\w*)$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
......
1.8.7-p371
\ No newline at end of file
1.9.3-p374
......@@ -86,12 +86,14 @@ def signup(request):
csrf_token = csrf(request)['csrf_token']
return render_to_response('signup.html', {'csrf': csrf_token})
def old_login_redirect(request):
'''
Redirect to the active login url.
'''
return redirect('login', permanent=True)
@ssl_login_shortcut
@ensure_csrf_cookie
def login_page(request):
......@@ -104,6 +106,7 @@ def login_page(request):
'forgot_password_link': "//{base}/#forgot-password-modal".format(base=settings.LMS_BASE),
})
def howitworks(request):
if request.user.is_authenticated():
return index(request)
......@@ -112,6 +115,7 @@ def howitworks(request):
# ==== Views for any logged-in user ==================================
@login_required
@ensure_csrf_cookie
def index(request):
......@@ -145,6 +149,7 @@ def index(request):
# ==== Views with per-item permissions================================
def has_access(user, location, role=STAFF_ROLE_NAME):
'''
Return True if user allowed to access this piece of data
......@@ -393,6 +398,7 @@ def preview_component(request, location):
'editor': wrap_xmodule(component.get_html, component, 'xmodule_edit.html')(),
})
@expect_json
@login_required
@ensure_csrf_cookie
......@@ -636,6 +642,17 @@ def delete_item(request):
if item.location.revision is None and item.location.category == 'vertical' and delete_all_versions:
modulestore('direct').delete_item(item.location)
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_loc, None)
for parent_loc in parent_locs:
parent = modulestore('direct').get_item(parent_loc)
item_url = item_loc.url()
if item_url in parent.definition["children"]:
parent.definition["children"].remove(item_url)
modulestore('direct').update_children(parent.location, parent.definition["children"])
return HttpResponse()
......@@ -709,6 +726,7 @@ def create_draft(request):
return HttpResponse()
@login_required
@expect_json
def publish_draft(request):
......@@ -738,6 +756,7 @@ def unpublish_unit(request):
return HttpResponse()
@login_required
@expect_json
def clone_item(request):
......@@ -768,8 +787,7 @@ def clone_item(request):
return HttpResponse(json.dumps({'id': dest_location.url()}))
#@login_required
#@ensure_csrf_cookie
def upload_asset(request, org, course, coursename):
'''
cdodge: this method allows for POST uploading of files into the course asset library, which will
......@@ -831,6 +849,7 @@ def upload_asset(request, org, course, coursename):
response['asset_url'] = StaticContent.get_url_path_from_location(content.location)
return response
'''
This view will return all CMS users who are editors for the specified course
'''
......@@ -863,6 +882,7 @@ def create_json_response(errmsg = None):
return resp
'''
This POST-back view will add a user - specified by email - to the list of editors for
the specified course
......@@ -895,6 +915,7 @@ def add_user(request, location):
return create_json_response()
'''
This POST-back view will remove a user - specified by email - from the list of editors for
the specified course
......@@ -926,6 +947,7 @@ def remove_user(request, location):
def landing(request, org, course, coursename):
return render_to_response('temp-course-landing.html', {})
@login_required
@ensure_csrf_cookie
def static_pages(request, org, course, coursename):
......@@ -1029,6 +1051,7 @@ def edit_tabs(request, org, course, coursename):
'components': components
})
def not_found(request):
return render_to_response('error.html', {'error': '404'})
......@@ -1064,6 +1087,7 @@ def course_info(request, org, course, name, provided_id=None):
'handouts_location': Location(['i4x', org, course, 'course_info', 'handouts']).url()
})
@expect_json
@login_required
@ensure_csrf_cookie
......@@ -1161,6 +1185,7 @@ def get_course_settings(request, org, course, name):
"section": "details"})
})
@login_required
@ensure_csrf_cookie
def course_config_graders_page(request, org, course, name):
......@@ -1184,6 +1209,7 @@ def course_config_graders_page(request, org, course, name):
'course_details': json.dumps(course_details, cls=CourseSettingsEncoder)
})
@login_required
@ensure_csrf_cookie
def course_config_advanced_page(request, org, course, name):
......@@ -1207,6 +1233,7 @@ def course_config_advanced_page(request, org, course, name):
'advanced_dict' : json.dumps(CourseMetadata.fetch(location)),
})
@expect_json
@login_required
@ensure_csrf_cookie
......@@ -1238,6 +1265,7 @@ def course_settings_updates(request, org, course, name, section):
return HttpResponse(json.dumps(manager.update_from_json(request.POST), cls=CourseSettingsEncoder),
mimetype="application/json")
@expect_json
@login_required
@ensure_csrf_cookie
......@@ -1272,7 +1300,7 @@ def course_grader_updates(request, org, course, name, grader_index=None):
return HttpResponse(json.dumps(CourseGradingModel.update_grader_from_json(Location(['i4x', org, course, 'course', name]), request.POST)),
mimetype="application/json")
## NB: expect_json failed on ["key", "key2"] and json payload
@login_required
@ensure_csrf_cookie
......@@ -1363,6 +1391,7 @@ def asset_index(request, org, course, name):
def edge(request):
return render_to_response('university_profiles/edge.html', {})
@login_required
@expect_json
def create_new_course(request):
......@@ -1418,6 +1447,7 @@ def create_new_course(request):
return HttpResponse(json.dumps({'id': new_course.location.url()}))
def initialize_course_tabs(course):
# set up the default tabs
# I've added this because when we add static tabs, the LMS either expects a None for the tabs list or
......@@ -1435,6 +1465,7 @@ def initialize_course_tabs(course):
modulestore('direct').update_metadata(course.location.url(), course.own_metadata)
@ensure_csrf_cookie
@login_required
def import_course(request, org, course, name):
......@@ -1512,6 +1543,7 @@ def import_course(request, org, course, name):
course_module.location.name])
})
@ensure_csrf_cookie
@login_required
def generate_export_course(request, org, course, name):
......@@ -1563,6 +1595,7 @@ def export_course(request, org, course, name):
'successful_import_redirect_url': ''
})
def event(request):
'''
A noop to swallow the analytics call so that cms methods don't spook and poor developers looking at
......
......@@ -10,7 +10,7 @@ class CourseMetadata(object):
'''
# __new_advanced_key__ is used by client not server; so, could argue against it being here
FILTERED_LIST = XModuleDescriptor.system_metadata_fields + ['start', 'end', 'enrollment_start', 'enrollment_end', 'tabs', 'graceperiod', '__new_advanced_key__']
@classmethod
def fetch(cls, course_location):
"""
......@@ -18,17 +18,17 @@ class CourseMetadata(object):
"""
if not isinstance(course_location, Location):
course_location = Location(course_location)
course = {}
descriptor = get_modulestore(course_location).get_item(course_location)
for k, v in descriptor.metadata.iteritems():
if k not in cls.FILTERED_LIST:
course[k] = v
return course
@classmethod
def update_from_json(cls, course_location, jsondict):
"""
......@@ -37,7 +37,7 @@ class CourseMetadata(object):
Ensures none of the fields are in the blacklist.
"""
descriptor = get_modulestore(course_location).get_item(course_location)
dirty = False
for k, v in jsondict.iteritems():
......@@ -45,26 +45,26 @@ class CourseMetadata(object):
if k not in cls.FILTERED_LIST and (k not in descriptor.metadata or descriptor.metadata[k] != v):
dirty = True
descriptor.metadata[k] = v
if dirty:
get_modulestore(course_location).update_metadata(course_location, descriptor.metadata)
# Could just generate and return a course obj w/o doing any db reads, but I put the reads in as a means to confirm
# it persisted correctly
return cls.fetch(course_location)
@classmethod
def delete_key(cls, course_location, payload):
'''
Remove the given metadata key(s) from the course. payload can be a single key or [key..]
'''
descriptor = get_modulestore(course_location).get_item(course_location)
for key in payload['deleteKeys']:
if key in descriptor.metadata:
del descriptor.metadata[key]
get_modulestore(course_location).update_metadata(course_location, descriptor.metadata)
return cls.fetch(course_location)
\ No newline at end of file
......@@ -65,23 +65,23 @@ def is_commentable_cohorted(course_id, commentable_id):
ans))
return ans
def get_cohorted_commentables(course_id):
"""
Given a course_id return a list of strings representing cohorted commentables
"""
course = courses.get_course_by_id(course_id)
if not course.is_cohorted:
# this is the easy case :)
ans = []
else:
else:
ans = course.cohorted_discussions
return ans
def get_cohort(user, course_id):
"""
Given a django User and a course_id, return the user's cohort in that
......@@ -120,7 +120,8 @@ def get_cohort(user, course_id):
return None
choices = course.auto_cohort_groups
if len(choices) == 0:
n = len(choices)
if n == 0:
# Nowhere to put user
log.warning("Course %s is auto-cohorted, but there are no"
" auto_cohort_groups specified",
......@@ -128,12 +129,19 @@ def get_cohort(user, course_id):
return None
# Put user in a random group, creating it if needed
group_name = random.choice(choices)
choice = random.randrange(0, n)
group_name = choices[choice]
# Victor: we are seeing very strange behavior on prod, where almost all users
# end up in the same group. Log at INFO to try to figure out what's going on.
log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format(
user, group_name,choice))
group, created = CourseUserGroup.objects.get_or_create(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=group_name)
user.course_groups.add(group)
return group
......
......@@ -6,7 +6,7 @@ from django.test.utils import override_settings
from course_groups.models import CourseUserGroup
from course_groups.cohorts import (get_cohort, get_course_cohorts,
is_commentable_cohorted)
is_commentable_cohorted, get_cohort_by_name)
from xmodule.modulestore.django import modulestore, _MODULESTORES
......@@ -168,7 +168,7 @@ class TestCohorts(django.test.TestCase):
self.assertEquals(get_cohort(user3, course.id), None,
"No groups->no auto-cohorting")
# Now make it different
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
......@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase):
"user2 should still be in originally placed cohort")
def test_auto_cohorting_randomization(self):
"""
Make sure get_cohort() randomizes properly.
"""
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
groups = ["group_{0}".format(n) for n in range(5)]
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=groups)
# Assign 100 users to cohorts
for i in range(100):
user = User.objects.create(username="test_{0}".format(i),
email="a@b{0}.com".format(i))
get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts(self):
course1_id = 'a/b/c'
course2_id = 'e/f/g'
......
......@@ -183,7 +183,7 @@ def evaluator(variables, functions, string, cs=False):
# 0.33k or -17
number = (Optional(minus | plus) + inner_number
+ Optional(CaselessLiteral("E") + Optional("-") + number_part)
+ Optional(CaselessLiteral("E") + Optional((plus | minus)) + number_part)
+ Optional(number_suffix))
number = number.setParseAction(number_parse_action) # Convert to number
......
......@@ -366,6 +366,12 @@ class ChoiceGroup(InputTypeBase):
self.choices = self.extract_choices(self.xml)
@classmethod
def get_attributes(cls):
return [Attribute("show_correctness", "always"),
Attribute("submitted_message", "Answer received.")]
def _extra_context(self):
return {'input_type': self.html_input_type,
'choices': self.choices,
......
<form class="choicegroup capa_inputtype" id="inputtype_${id}">
<div class="indicator_container">
% if input_type == 'checkbox' or not value:
% if status == 'unsubmitted':
% if status == 'unsubmitted' or show_correctness == 'never':
<span class="unanswered" style="display:inline-block;" id="status_${id}"></span>
% elif status == 'correct':
<span class="correct" id="status_${id}"></span>
......@@ -26,7 +26,7 @@
else:
correctness = None
%>
% if correctness:
% if correctness and not show_correctness=='never':
class="choicegroup_${correctness}"
% endif
% endif
......@@ -41,4 +41,7 @@
<span id="answer_${id}"></span>
</fieldset>
% if show_correctness == "never" and (value or status not in ['unsubmitted']):
<div class="capa_alert">${submitted_message}</div>
%endif
</form>
......@@ -102,6 +102,8 @@ class ChoiceGroupTest(unittest.TestCase):
'choices': [('foil1', '<text>This is foil One.</text>'),
('foil2', '<text>This is foil Two.</text>'),
('foil3', 'This is foil Three.'), ],
'show_correctness': 'always',
'submitted_message': 'Answer received.',
'name_array_suffix': expected_suffix, # what is this for??
}
......
......@@ -19,7 +19,7 @@ from capa.xqueue_interface import dateformat
class ResponseTest(unittest.TestCase):
""" Base class for tests of capa responses."""
xml_factory_class = None
def setUp(self):
......@@ -43,7 +43,7 @@ class ResponseTest(unittest.TestCase):
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect',
self.assertEqual(result, 'incorrect',
msg="%s should be marked incorrect" % str(input_str))
class MultiChoiceResponseTest(ResponseTest):
......@@ -61,7 +61,7 @@ class MultiChoiceResponseTest(ResponseTest):
def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False],
choice_names=["foil_1", "foil_2", "foil_3"])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
self.assert_grade(problem, 'choice_foil_2', 'correct')
......@@ -117,7 +117,7 @@ class ImageResponseTest(ResponseTest):
# Anything inside the rectangle (and along the borders) is correct
# Everything else is incorrect
correct_inputs = ["[12,19]", "[10,10]", "[20,20]",
correct_inputs = ["[12,19]", "[10,10]", "[20,20]",
"[10,15]", "[20,15]", "[15,10]", "[15,20]"]
incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
......@@ -259,7 +259,7 @@ class OptionResponseTest(ResponseTest):
xml_factory_class = OptionResponseXMLFactory
def test_grade(self):
problem = self.build_problem(options=["first", "second", "third"],
problem = self.build_problem(options=["first", "second", "third"],
correct_option="second")
# Assert that we get the expected grades
......@@ -374,8 +374,8 @@ class StringResponseTest(ResponseTest):
hints = [("wisconsin", "wisc", "The state capital of Wisconsin is Madison"),
("minnesota", "minn", "The state capital of Minnesota is St. Paul")]
problem = self.build_problem(answer="Michigan",
case_sensitive=False,
problem = self.build_problem(answer="Michigan",
case_sensitive=False,
hints=hints)
# We should get a hint for Wisconsin
......@@ -543,7 +543,7 @@ class ChoiceResponseTest(ResponseTest):
xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self):
problem = self.build_problem(choice_type='radio',
problem = self.build_problem(choice_type='radio',
choices=[False, True, False])
# Check that we get the expected results
......@@ -601,17 +601,17 @@ class NumericalResponseTest(ResponseTest):
correct_responses = ["4", "4.0", "4.00"]
incorrect_responses = ["", "3.9", "4.1", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_decimal_tolerance(self):
problem = self.build_problem(question_text="What is 2 + 2 approximately?",
explanation="The answer is 4",
answer=4,
tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
incorrect_responses = ["", "4.11", "3.89", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_percent_tolerance(self):
problem = self.build_problem(question_text="What is 2 + 2 approximately?",
explanation="The answer is 4",
......@@ -642,6 +642,15 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses = ["", "2.11", "1.89", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_exponential_answer(self):
problem = self.build_problem(question_text="What 5 * 10?",
explanation="The answer is 50",
answer="5e+1")
correct_responses = ["50", "50.0", "5e1", "5e+1", "50e0", "500e-1"]
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory
......@@ -667,7 +676,7 @@ class CustomResponseTest(ResponseTest):
# The code can also set the global overall_message (str)
# to pass a message that applies to the whole response
inline_script = textwrap.dedent("""
messages[0] = "Test Message"
messages[0] = "Test Message"
overall_message = "Overall message"
""")
problem = self.build_problem(answer=inline_script)
......@@ -687,14 +696,14 @@ class CustomResponseTest(ResponseTest):
def test_function_code_single_input(self):
# For function code, we pass in these arguments:
#
#
# 'expect' is the expect attribute of the <customresponse>
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
#
#
# The function should return a dict of the form
#
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING }
#
script = textwrap.dedent("""
......@@ -727,7 +736,7 @@ class CustomResponseTest(ResponseTest):
def test_function_code_multiple_input_no_msg(self):
# Check functions also have the option of returning
# a single boolean value
# a single boolean value
# If true, mark all the inputs correct
# If false, mark all the inputs incorrect
script = textwrap.dedent("""
......@@ -736,7 +745,7 @@ class CustomResponseTest(ResponseTest):
answer_given[1] == expect)
""")
problem = self.build_problem(script=script, cfn="check_func",
problem = self.build_problem(script=script, cfn="check_func",
expect="42", num_inputs=2)
# Correct answer -- expect both inputs marked correct
......@@ -764,10 +773,10 @@ class CustomResponseTest(ResponseTest):
# If the <customresponse> has multiple inputs associated with it,
# the check function can return a dict of the form:
#
#
# {'overall_message': STRING,
# 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] }
#
#
# 'overall_message' is displayed at the end of the response
#
# 'input_list' contains dictionaries representing the correctness
......@@ -784,7 +793,7 @@ class CustomResponseTest(ResponseTest):
{'ok': check3, 'msg': 'Feedback 3'} ] }
""")
problem = self.build_problem(script=script,
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
......@@ -821,11 +830,11 @@ class CustomResponseTest(ResponseTest):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
return {'ok': (check1 and check2 and check3),
return {'ok': (check1 and check2 and check3),
'msg': 'Message text'}
""")
problem = self.build_problem(script=script,
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
......@@ -862,7 +871,7 @@ class CustomResponseTest(ResponseTest):
# Expect that an exception gets raised when we check the answer
with self.assertRaises(Exception):
problem.grade_answers({'1_2_1': '42'})
def test_invalid_dict_exception(self):
# Construct a script that passes back an invalid dict format
......
......@@ -10,7 +10,6 @@ from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import Comb
log = logging.getLogger("mitx.courseware")
VERSION_TUPLES = (
('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module),
)
......@@ -18,6 +17,7 @@ VERSION_TUPLES = (
DEFAULT_VERSION = 1
DEFAULT_VERSION = str(DEFAULT_VERSION)
class CombinedOpenEndedModule(XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
......@@ -60,7 +60,7 @@ class CombinedOpenEndedModule(XModule):
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
instance_state, shared_state, **kwargs)
instance_state, shared_state, **kwargs)
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
......@@ -129,13 +129,15 @@ class CombinedOpenEndedModule(XModule):
version_index = versions.index(self.version)
static_data = {
'rewrite_content_links' : self.rewrite_content_links,
'rewrite_content_links': self.rewrite_content_links,
}
self.child_descriptor = descriptors[version_index](self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']), self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']),
self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
instance_state = json.dumps(instance_state), metadata = self.metadata, static_data= static_data)
instance_state=json.dumps(instance_state), metadata=self.metadata,
static_data=static_data)
def get_html(self):
return self.child_module.get_html()
......
......@@ -356,7 +356,14 @@ class CourseDescriptor(SequenceDescriptor):
"""
Return the pdf_textbooks config, as a python object, or None if not specified.
"""
return self.metadata.get('pdf_textbooks')
return self.metadata.get('pdf_textbooks', [])
@property
def html_textbooks(self):
"""
Return the html_textbooks config, as a python object, or None if not specified.
"""
return self.metadata.get('html_textbooks', [])
@tabs.setter
def tabs(self, value):
......
......@@ -86,7 +86,10 @@ class FolditModule(XModule):
"""
from foldit.models import Score
return [(e['username'], e['score']) for e in Score.get_tops_n(10)]
leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)]
leaders.sort(key=lambda x: x[1])
return leaders
def get_html(self):
"""
......
......@@ -46,10 +46,10 @@ class XModuleCourseFactory(Factory):
new_course.metadata['start'] = stringify_time(gmtime())
new_course.tabs = [{"type": "courseware"},
{"type": "course_info", "name": "Course Info"},
{"type": "discussion", "name": "Discussion"},
{"type": "wiki", "name": "Wiki"},
{"type": "progress", "name": "Progress"}]
{"type": "course_info", "name": "Course Info"},
{"type": "discussion", "name": "Discussion"},
{"type": "wiki", "name": "Wiki"},
{"type": "progress", "name": "Progress"}]
# Update the data in the mongo datastore
store.update_metadata(new_course.location.url(), new_course.own_metadata)
......
......@@ -119,11 +119,11 @@ def test_equality():
# All the cleaning functions should do the same thing with these
general_pairs = [('', ''),
(' ', '_'),
('abc,', 'abc_'),
('ab fg!@//\\aj', 'ab_fg_aj'),
(u"ab\xA9", "ab_"), # no unicode allowed for now
]
(' ', '_'),
('abc,', 'abc_'),
('ab fg!@//\\aj', 'ab_fg_aj'),
(u"ab\xA9", "ab_"), # no unicode allowed for now
]
def test_clean():
......@@ -131,7 +131,7 @@ def test_clean():
('a:b', 'a_b'), # no colons in non-name components
('a-b', 'a-b'), # dashes ok
('a.b', 'a.b'), # dot ok
]
]
for input, output in pairs:
assert_equals(Location.clean(input), output)
......@@ -141,17 +141,17 @@ def test_clean_for_url_name():
('a:b', 'a:b'), # colons ok in names
('a-b', 'a-b'), # dashes ok in names
('a.b', 'a.b'), # dot ok in names
]
]
for input, output in pairs:
assert_equals(Location.clean_for_url_name(input), output)
def test_clean_for_html():
pairs = general_pairs + [
("a:b", "a_b"), # no colons for html use
("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.)
('a.b', 'a_b'), # no dots.
]
("a:b", "a_b"), # no colons for html use
("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.)
('a.b', 'a_b'), # no dots.
]
for input, output in pairs:
assert_equals(Location.clean_for_html(input), output)
......
......@@ -12,7 +12,7 @@ def check_path_to_location(modulestore):
("edX/toy/2012_Fall", "Overview", "Welcome", None)),
("i4x://edX/toy/chapter/Overview",
("edX/toy/2012_Fall", "Overview", None, None)),
)
)
course_id = "edX/toy/2012_Fall"
for location, expected in should_work:
......@@ -20,6 +20,6 @@ def check_path_to_location(modulestore):
not_found = (
"i4x://edX/toy/video/WelcomeX", "i4x://edX/toy/course/NotHome"
)
)
for location in not_found:
assert_raises(ItemNotFoundError, path_to_location, modulestore, course_id, location)
......@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config, system):
config['system'] = system
super(ControllerQueryService, self).__init__(config)
......@@ -59,7 +60,7 @@ class ControllerQueryService(GradingService):
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id,
}
}
response = self.get(self.flagged_problem_list_url, params)
return response
......@@ -70,20 +71,21 @@ class ControllerQueryService(GradingService):
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
}
}
response = self.post(self.take_action_on_flags_url, params)
return response
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds/60,1))
elif seconds < (24*60*60):
human_string = "{0} hours".format(round(seconds/(60*60),1))
human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
human_string = "{0} days".format(round(seconds/(60*60*24),1))
human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
eta_string = "{0}".format(human_string)
return eta_string
......@@ -19,6 +19,7 @@ class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
......@@ -34,8 +35,8 @@ class GradingService(object):
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
{'username': self.username,
'password': self.password, })
response.raise_for_status()
......@@ -47,7 +48,7 @@ class GradingService(object):
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
allow_redirects=allow_redirects)
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
......@@ -63,8 +64,8 @@ class GradingService(object):
"""
log.debug(params)
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
allow_redirects=allow_redirects,
params=params)
try:
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
......@@ -92,7 +93,7 @@ class GradingService(object):
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into staff_grading backend. Response: %s",
r)
r)
# try again
response = operation()
response.raise_for_status()
......
......@@ -5,6 +5,7 @@ to send them to S3.
try:
from PIL import Image
ENABLE_PIL = True
except:
ENABLE_PIL = False
......@@ -51,6 +52,7 @@ class ImageProperties(object):
"""
Class to check properties of an image and to validate if they are allowed.
"""
def __init__(self, image_data):
"""
Initializes class variables
......@@ -92,7 +94,7 @@ class ImageProperties(object):
g = rgb[1]
b = rgb[2]
check_r = (r > 60)
check_g = (r * 0.4) < g < (r * 0.85)
check_g = (r * 0.4) < g < (r * 0.85)
check_b = (r * 0.2) < b < (r * 0.7)
colors_okay = check_r and check_b and check_g
except:
......@@ -141,6 +143,7 @@ class URLProperties(object):
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
links to the peer grading image functionality of the external grading service.
"""
def __init__(self, url_string):
self.url_string = url_string
......@@ -212,7 +215,7 @@ def run_image_tests(image):
success = image_properties.run_tests()
except:
log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image,"
"or an issue with the deployment configuration of PIL/Pillow")
"or an issue with the deployment configuration of PIL/Pillow")
return success
......@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url
except:
#This is a dev_facing_error
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower())
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(
bucketname.lower())
log.error(error_message)
return False, error_message
......
......@@ -10,7 +10,7 @@ import logging
from lxml import etree
import capa.xqueue_interface as xqueue_interface
from xmodule.capa_module import ComplexEncoder
from xmodule.capa_module import ComplexEncoder
from xmodule.editing_module import EditingDescriptor
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
......@@ -77,7 +77,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(self.latest_answer(), system)
self.created = False
def _parse(self, oeparam, prompt, rubric, system):
'''
Parse OpenEndedResponse XML:
......@@ -104,7 +103,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# response types)
except TypeError, ValueError:
#This is a dev_facing_error
log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload))
log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
......@@ -148,7 +149,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
#This is a student_facing_error
return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)}
return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
......@@ -188,7 +191,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
body=json.dumps(contents))
#Convert error to a success value
success = True
......@@ -222,8 +225,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.history)))
xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'],
lms_key=queuekey,
queue_name=self.queue_name)
lms_key=queuekey,
queue_name=self.queue_name)
contents = self.payload.copy()
......@@ -241,7 +244,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
......@@ -266,7 +269,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return True
def get_answers(self):
"""
Gets and shows the answer for this problem.
......@@ -300,7 +302,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities = { # These go at the start of the feedback
priorities = {# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
......@@ -400,7 +402,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not response_items['success']:
return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
{'errors': feedback})
{'errors': feedback})
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'],
......@@ -411,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return feedback_template, rubric_scores
def _parse_score_msg(self, score_msg, system, join_feedback=True):
"""
Grader reply is a JSON-dump of the following dict
......@@ -437,13 +438,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': False,
'score': 0,
'feedback': '',
'rubric_scores' : [[0]],
'grader_types' : [''],
'feedback_items' : [''],
'feedback_dicts' : [{}],
'grader_ids' : [0],
'submission_ids' : [0],
}
'rubric_scores': [[0]],
'grader_types': [''],
'feedback_items': [''],
'feedback_dicts': [{}],
'grader_ids': [0],
'submission_ids': [0],
}
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
......@@ -470,7 +471,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
log.error(error_message)
fail['feedback'] = error_message
return fail
#This is to support peer grading
#This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
rubric_scores = []
......@@ -527,12 +528,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': True,
'score': score,
'feedback': feedback,
'rubric_scores' : rubric_scores,
'grader_types' : grader_types,
'feedback_items' : feedback_items,
'feedback_dicts' : feedback_dicts,
'grader_ids' : grader_ids,
'submission_ids' : submission_ids,
'rubric_scores': rubric_scores,
'grader_types': grader_types,
'feedback_items': feedback_items,
'feedback_dicts': feedback_dicts,
'grader_ids': grader_ids,
'submission_ids': submission_ids,
}
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
......@@ -545,7 +546,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return ""
feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system,
join_feedback=join_feedback)
join_feedback=join_feedback)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
......@@ -585,7 +586,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
......@@ -679,7 +680,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct = ""
previous_answer = self.initial_display
context = {
'prompt': self.prompt,
'previous_answer': previous_answer,
......@@ -692,7 +692,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended',
'correct': correct,
'accept_file_upload': self.accept_file_upload,
'eta_message' : eta_string,
'eta_message': eta_string,
}
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html
......@@ -723,7 +723,9 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
......
......@@ -74,7 +74,7 @@ class OpenEndedChild(object):
'done': 'Done',
}
def __init__(self, system, location, definition, descriptor, static_data,
def __init__(self, system, location, definition, descriptor, static_data,
instance_state=None, shared_state=None, **kwargs):
# Load instance state
if instance_state is not None:
......@@ -108,15 +108,14 @@ class OpenEndedChild(object):
self._max_score = static_data['max_score']
if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,
system)
else:
self.peer_gs = MockPeerGradingService()
self.controller_qs = None
self.controller_qs = None
self.system = system
self.location_string = location
try:
self.location_string = self.location_string.url()
......@@ -152,7 +151,8 @@ class OpenEndedChild(object):
return True, {
'success': False,
#This is a student_facing_error
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.attempts, self.max_attempts)
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
self.attempts, self.max_attempts)
}
else:
return False, {}
......@@ -180,8 +180,8 @@ class OpenEndedChild(object):
try:
answer = autolink_html(answer)
cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True,
host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
clean_html = cleaner.clean_html(answer)
clean_html = re.sub(r'</p>$', '', re.sub(r'^<p>', '', clean_html))
except:
......@@ -282,7 +282,7 @@ class OpenEndedChild(object):
"""
#This is a dev_facing_error
log.warning("Open ended child state out sync. state: %r, get: %r. %s",
self.state, get, msg)
self.state, get, msg)
#This is a student_facing_error
return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'}
......@@ -308,7 +308,7 @@ class OpenEndedChild(object):
@return: Boolean correct.
"""
correct = False
if(isinstance(score, (int, long, float, complex))):
if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
......@@ -342,7 +342,8 @@ class OpenEndedChild(object):
try:
image_data.seek(0)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
self.s3_interface)
except:
log.exception("Could not upload image to S3.")
......@@ -404,9 +405,9 @@ class OpenEndedChild(object):
#In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
#a config issue (development vs deployment). For now, just treat this as a "success"
log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, "
"but the image was not able to be uploaded to S3. This could indicate a config"
"issue with this deployment, but it could also indicate a problem with S3 or with the"
"student image itself.")
"but the image was not able to be uploaded to S3. This could indicate a config"
"issue with this deployment, but it could also indicate a problem with S3 or with the"
"student image itself.")
overall_success = True
elif not has_file_to_upload:
#If there is no file to upload, probably the student has embedded the link in the answer text
......@@ -445,7 +446,7 @@ class OpenEndedChild(object):
response = {}
#This is a student_facing_error
error_string = ("You need to peer grade {0} more in order to make another submission. "
"You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
"You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
try:
response = self.peer_gs.get_data_for_location(self.location_string, student_id)
count_graded = response['count_graded']
......@@ -454,16 +455,18 @@ class OpenEndedChild(object):
success = True
except:
#This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id))
log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location_string, student_id))
#This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message
if count_graded>=count_required:
if count_graded >= count_required:
return success, allowed_to_submit, ""
else:
allowed_to_submit = False
#This is a student_facing_error
error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count)
error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count)
return success, allowed_to_submit, error_message
def get_eta(self):
......@@ -478,7 +481,7 @@ class OpenEndedChild(object):
success = response['success']
if isinstance(success, basestring):
success = (success.lower()=="true")
success = (success.lower() == "true")
if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
......@@ -487,6 +490,3 @@ class OpenEndedChild(object):
eta_string = ""
return eta_string
......@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
def __init__(self, config, system):
config['system'] = system
super(PeerGradingService, self).__init__(config)
......@@ -36,10 +37,11 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id})
{'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged):
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
submission_flagged):
data = {'grader_id': grader_id,
'submission_id': submission_id,
'score': score,
......@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass
return text
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
......@@ -122,7 +125,7 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
calibration_essay_id, submission_key, score,
calibration_essay_id, submission_key, score,
feedback, rubric_scores):
return {'success': True, 'actual_score': 2}
......
......@@ -73,7 +73,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html
def handle_ajax(self, dispatch, get, system):
"""
This is called by courseware.module_render, to handle an AJAX call.
......@@ -95,7 +94,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
......@@ -159,7 +158,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
def save_answer(self, get, system):
"""
After the answer is submitted, show the rubric.
......@@ -224,7 +222,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try:
score = int(get['assessment'])
score_list = get.getlist('score_list[]')
for i in xrange(0,len(score_list)):
for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i])
except ValueError:
#This is a dev_facing_error
......@@ -268,7 +266,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'allow_reset': self._allow_reset()}
def latest_post_assessment(self, system):
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try:
rubric_scores = json.loads(latest_post_assessment)
except:
......@@ -305,7 +303,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
......
......@@ -10,8 +10,8 @@ from . import test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
......@@ -24,7 +24,7 @@ class SelfAssessmentTest(unittest.TestCase):
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
}
location = Location(["i4x", "edX", "sa_test", "selfassessment",
"SampleQuestion"])
......@@ -41,22 +41,22 @@ class SelfAssessmentTest(unittest.TestCase):
'attempts': 2})
static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False,
}
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
}
self.module = SelfAssessmentModule(test_system(), self.location,
self.definition, self.descriptor,
static_data,
state, metadata=self.metadata)
self.definition, self.descriptor,
static_data,
state, metadata=self.metadata)
def test_get_html(self):
html = self.module.get_html(self.module.system)
......@@ -64,14 +64,15 @@ class SelfAssessmentTest(unittest.TestCase):
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
return responses[name]
def get_data_for_location(self,location,student):
def get_data_for_location(self, location, student):
return {
'count_graded' : 0,
'count_required' : 0,
'student_sub_count': 0,
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
......@@ -82,20 +83,19 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
......
OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/',
'username' : 'incorrect',
'password' : 'incorrect',
'staff_grading' : 'staff_grading',
'peer_grading' : 'peer_grading',
'grading_controller' : 'grading_controller'
'url': 'http://127.0.0.1:3033/',
'username': 'incorrect',
'password': 'incorrect',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
S3_INTERFACE = {
'aws_access_key' : "",
'aws_secret_key' : "",
"aws_bucket_name" : "",
'aws_access_key': "",
'aws_secret_key': "",
"aws_bucket_name": "",
}
\ No newline at end of file
#! /bin/bash
set -e
set -x
git remote prune origin
# Reset the submodule, in case it changed
git submodule foreach 'git reset --hard HEAD'
# Set the IO encoding to UTF-8 so that askbot will start
export PYTHONIOENCODING=UTF-8
rake clobber
rake pep8 || echo "pep8 failed, continuing"
rake pylint || echo "pylint failed, continuing"
......@@ -32,18 +32,22 @@ if [ ! -d /mnt/virtualenvs/"$JOB_NAME" ]; then
virtualenv /mnt/virtualenvs/"$JOB_NAME"
fi
export PIP_DOWNLOAD_CACHE=/mnt/pip-cache
source /mnt/virtualenvs/"$JOB_NAME"/bin/activate
pip install -q -r pre-requirements.txt
pip install -q -r test-requirements.txt
yes w | pip install -q -r requirements.txt
yes w | pip install -q -r test-requirements.txt -r requirements.txt
rake clobber
rake pep8
rake pylint
TESTS_FAILED=0
rake test_cms[false] || TESTS_FAILED=1
rake test_lms[false] || TESTS_FAILED=1
rake test_common/lib/capa || TESTS_FAILED=1
rake test_common/lib/xmodule || TESTS_FAILED=1
# Don't run the lms jasmine tests for now because
# Don't run the lms jasmine tests for now because
# they mostly all fail anyhow
# rake phantomjs_jasmine_lms || true
rake phantomjs_jasmine_cms || TESTS_FAILED=1
......
'''
This is a one-off command aimed at fixing a temporary problem encountered where partial credit was awarded for
code problems, but the resulting score (or grade) was mistakenly set to zero because of a bug in
CorrectMap.get_npoints().
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from courseware.models import StudentModule
from capa.correctmap import CorrectMap
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to recalculate the score/grade based on the partial credit.
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-08 15:45:00' (the problem must have been answered before the fix was installed,
on Prod and Edge)
modified > '2013-03-07 20:18:00' (the problem must have been visited after the bug was introduced)
state like '%"npoints": 0.%' (the problem must have some form of partial credit).
'''
num_visited = 0
num_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'), )
def fix_studentmodules(self, save_changes):
'''Identify the list of StudentModule objects that might need fixing, and then fix each one'''
modules = StudentModule.objects.filter(modified__gt='2013-03-07 20:18:00',
created__lt='2013-03-08 15:45:00',
state__contains='"npoints": 0.')
for module in modules:
self.fix_studentmodule_grade(module, save_changes)
def fix_studentmodule_grade(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_visited += 1
# LoncapaProblem.get_score() checks student_answers -- if there are none, we will return a grade of 0
# Check that this is the case, but do so sooner, before we do any of the other grading work.
student_answers = state_dict['student_answers']
if (not student_answers) or len(student_answers) == 0:
# we should not have a grade here:
if module.grade != 0:
LOG.error("No answer found but grade {grade} exists for {type} module {id} for student {student} "
"in course {course_id}".format(grade=module.grade,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
else:
LOG.debug("No answer and no grade found for {type} module {id} for student {student} "
"in course {course_id}".format(grade=module.grade,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
# load into a CorrectMap, as done in LoncapaProblem.__init__():
correct_map = CorrectMap()
if 'correct_map' in state_dict:
correct_map.set_dict(state_dict['correct_map'])
# calculate score the way LoncapaProblem.get_score() works, by deferring to
# CorrectMap's get_npoints implementation.
correct = 0
for key in correct_map:
correct += correct_map.get_npoints(key)
if module.grade == correct:
# nothing to change
LOG.debug("Grade matches for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
elif save_changes:
# make the change
LOG.info("Grade changing from {0} to {1} for {type} module {id} for student {student} "
"in course {course_id}".format(module.grade, correct,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
module.grade = correct
module.save()
self.num_changed += 1
else:
# don't make the change, but log that the change would be made
LOG.info("Grade would change from {0} to {1} for {type} module {id} for student {student} "
"in course {course_id}".format(module.grade, correct,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
self.num_changed += 1
def handle(self, **options):
'''Handle management command request'''
save_changes = options['save_changes']
LOG.info("Starting run: save_changes = {0}".format(save_changes))
self.fix_studentmodules(save_changes)
LOG.info("Finished run: updating {0} of {1} modules".format(self.num_changed, self.num_visited))
......@@ -130,6 +130,17 @@ def _pdf_textbooks(tab, user, course, active_page):
for index, textbook in enumerate(course.pdf_textbooks)]
return []
def _html_textbooks(tab, user, course, active_page):
"""
Generates one tab per textbook. Only displays if user is authenticated.
"""
if user.is_authenticated():
# since there can be more than one textbook, active_page is e.g. "book/0".
return [CourseTab(textbook['tab_title'], reverse('html_book', args=[course.id, index]),
active_page == "htmltextbook/{0}".format(index))
for index, textbook in enumerate(course.html_textbooks)]
return []
def _staff_grading(tab, user, course, active_page):
if has_access(user, course, 'staff'):
link = reverse('staff_grading', args=[course.id])
......@@ -209,6 +220,7 @@ VALID_TAB_TYPES = {
'external_link': TabImpl(key_checker(['name', 'link']), _external_link),
'textbooks': TabImpl(null_validator, _textbooks),
'pdf_textbooks': TabImpl(null_validator, _pdf_textbooks),
'html_textbooks': TabImpl(null_validator, _html_textbooks),
'progress': TabImpl(need_name, _progress),
'static_tab': TabImpl(key_checker(['name', 'url_slug']), _static_tab),
'peer_grading': TabImpl(null_validator, _peer_grading),
......
......@@ -18,7 +18,6 @@ import pystache_custom as pystache
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.search import path_to_location
log = logging.getLogger(__name__)
......@@ -166,7 +165,6 @@ def initialize_discussion_info(course):
# get all discussion models within this course_id
all_modules = modulestore().get_items(['i4x', course.location.org, course.location.course, 'discussion', None], course_id=course_id)
path_to_locations = {}
for module in all_modules:
skip_module = False
for key in ('id', 'discussion_category', 'for'):
......@@ -174,14 +172,6 @@ def initialize_discussion_info(course):
log.warning("Required key '%s' not in discussion %s, leaving out of category map" % (key, module.location))
skip_module = True
# cdodge: pre-compute the path_to_location. Note this can throw an exception for any
# dangling discussion modules
try:
path_to_locations[module.location] = path_to_location(modulestore(), course.id, module.location)
except NoPathToItem:
log.warning("Could not compute path_to_location for {0}. Perhaps this is an orphaned discussion module?!? Skipping...".format(module.location))
skip_module = True
if skip_module:
continue
......@@ -246,7 +236,6 @@ def initialize_discussion_info(course):
_DISCUSSIONINFO[course.id]['id_map'] = discussion_id_map
_DISCUSSIONINFO[course.id]['category_map'] = category_map
_DISCUSSIONINFO[course.id]['timestamp'] = datetime.now()
_DISCUSSIONINFO[course.id]['path_to_location'] = path_to_locations
class JsonResponse(HttpResponse):
......@@ -403,21 +392,8 @@ def get_courseware_context(content, course):
location = id_map[id]["location"].url()
title = id_map[id]["title"]
# cdodge: did we pre-compute, if so, then let's use that rather than recomputing
if 'path_to_location' in _DISCUSSIONINFO[course.id] and location in _DISCUSSIONINFO[course.id]['path_to_location']:
(course_id, chapter, section, position) = _DISCUSSIONINFO[course.id]['path_to_location'][location]
else:
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course.id, location)
except NoPathToItem:
# Object is not in the graph any longer, let's just get path to the base of the course
# so that we can at least return something to the caller
(course_id, chapter, section, position) = path_to_location(modulestore(), course.id, course.location)
url = reverse('courseware_position', kwargs={"course_id":course_id,
"chapter":chapter,
"section":section,
"position":position})
url = reverse('jump_to', kwargs={"course_id":course.location.course_id,
"location": location})
content_info = {"courseware_url": url, "courseware_title": title}
return content_info
......
......@@ -59,7 +59,7 @@ class Score(models.Model):
scores = Score.objects \
.filter(puzzle_id__in=puzzles) \
.annotate(total_score=models.Sum('best_score')) \
.order_by('-total_score')[:n]
.order_by('total_score')[:n]
num = len(puzzles)
return [{'username': s.user.username,
......
......@@ -143,11 +143,12 @@ class FolditTestCase(TestCase):
def test_SetPlayerPuzzleScores_manyplayers(self):
"""
Check that when we send scores from multiple users, the correct order
of scores is displayed.
of scores is displayed. Note that, before being processed by
display_score, lower scores are better.
"""
puzzle_id = ['1']
player1_score = 0.07
player2_score = 0.08
player1_score = 0.08
player2_score = 0.02
response1 = self.make_puzzle_score_request(puzzle_id, player1_score,
self.user)
......@@ -164,8 +165,12 @@ class FolditTestCase(TestCase):
self.assertEqual(len(top_10), 2)
# Top score should be player2_score. Second should be player1_score
self.assertEqual(top_10[0]['score'], Score.display_score(player2_score))
self.assertEqual(top_10[1]['score'], Score.display_score(player1_score))
self.assertAlmostEqual(top_10[0]['score'],
Score.display_score(player2_score),
delta=0.5)
self.assertAlmostEqual(top_10[1]['score'],
Score.display_score(player1_score),
delta=0.5)
# Top score user should be self.user2.username
self.assertEqual(top_10[0]['username'], self.user2.username)
......
......@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = (
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
)
)
def staff_grading_notifications(course, user):
......@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info("Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id, student_id))
log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
......@@ -80,7 +82,9 @@ def peer_grading_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info("Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id, student_id))
log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
......@@ -105,7 +109,9 @@ def combined_notifications(course, user):
return notification_dict
min_time_to_query = user.last_login
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified')
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id,
modified__gt=min_time_to_query).values('modified').order_by(
'-modified')
last_module_seen_count = last_module_seen.count()
if last_module_seen_count > 0:
......@@ -117,7 +123,8 @@ def combined_notifications(course, user):
img_path = ""
try:
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed)
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
last_time_viewed)
log.debug(controller_response)
notifications = json.loads(controller_response)
if notifications['success']:
......@@ -127,7 +134,9 @@ def combined_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.exception("Problem with getting notifications from controller query service for course {0} user {1}.".format(course_id, student_id))
log.exception(
"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
......@@ -151,7 +160,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value):
def create_key_name(student_id, course_id, notification_type):
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id)
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
student=student_id)
return key_name
......
......@@ -15,6 +15,7 @@ class StaffGrading(object):
"""
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
"""
def __init__(self, course):
self.course = course
......
......@@ -20,10 +20,12 @@ log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.'
class MockStaffGradingService(object):
"""
A simple mockup of a staff grading service, testing.
"""
def __init__(self):
self.cnt = 0
......@@ -43,15 +45,18 @@ class MockStaffGradingService(object):
def get_problem_list(self, course_id, grader_id):
self.cnt += 1
return json.dumps({'success': True,
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5,
'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5,
'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id)
......@@ -59,6 +64,7 @@ class StaffGradingService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
config['system'] = ModuleSystem(None, None, None, render_to_string, None)
super(StaffGradingService, self).__init__(config)
......@@ -109,12 +115,13 @@ class StaffGradingService(GradingService):
GradingServiceError: something went wrong with the connection.
"""
response = self.get(self.get_next_url,
params={'location': location,
'grader_id': grader_id})
params={'location': location,
'grader_id': grader_id})
return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
"""
Save a score and feedback for a submission.
......@@ -253,14 +260,14 @@ def get_problem_list(request, course_id):
try:
response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user))
return HttpResponse(response,
mimetype="application/json")
mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error from staff grading service in open ended grading. server url: {0}"
.format(staff_grading_service().url))
.format(staff_grading_service().url))
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE}))
'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location):
......@@ -272,7 +279,7 @@ def _get_next(course_id, grader_id, location):
except GradingServiceError:
#This is a dev facing error
log.exception("Error from staff grading service in open ended grading. server url: {0}"
.format(staff_grading_service().url))
.format(staff_grading_service().url))
#This is a staff_facing_error
return json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE})
......@@ -297,7 +304,7 @@ def save_grade(request, course_id):
if request.method != 'POST':
raise Http404
required = set(['score', 'feedback', 'submission_id', 'location','submission_flagged', 'rubric_scores[]'])
required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged', 'rubric_scores[]'])
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
......@@ -307,22 +314,23 @@ def save_grade(request, course_id):
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
skipped = 'skipped' in p
skipped = 'skipped' in p
try:
result_json = staff_grading_service().save_grade(course_id,
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped,
p.getlist('rubric_scores[]'),
p['submission_flagged'])
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped,
p.getlist('rubric_scores[]'),
p['submission_flagged'])
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(request, course_id))
log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
......@@ -330,13 +338,16 @@ def save_grade(request, course_id):
result = json.loads(result_json)
except ValueError:
#This is a dev_facing_error
log.exception("save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(result_json))
log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False):
#This is a dev_facing_error
log.warning('Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade.
......
......@@ -7,7 +7,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open
from django.test import TestCase
from open_ended_grading import staff_grading_service
from xmodule.open_ended_grading_classes import peer_grading_service
from xmodule import peer_grading_module
from xmodule import peer_grading_module
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
......@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
import logging
log = logging.getLogger(__name__)
from django.test.utils import override_settings
from django.http import QueryDict
......@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the
backend.
'''
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
......@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id)
def make_instructor(course):
group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name)
......@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the
backend.
'''
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
......@@ -148,11 +152,12 @@ class TestPeerGradingService(ct.PageLoader):
self.mock_service = peer_grading_service.MockPeerGradingService()
self.system = ModuleSystem(location, None, None, render_to_string, None,
s3_interface = test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
s3_interface=test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
)
self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system)
self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "<peergrading/>", self.descriptor)
self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "<peergrading/>",
self.descriptor)
self.peer_module.peer_gs = self.mock_service
self.logout()
......@@ -175,18 +180,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_grade_success(self):
data = {
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
qdict = MagicMock()
def fake_get_item(key):
return data[key]
qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item
qdict.keys = data.keys
......@@ -237,18 +244,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_calibration_essay_success(self):
data = {
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
qdict = MagicMock()
def fake_get_item(key):
return data[key]
qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item
qdict.keys = data.keys
......
......@@ -50,22 +50,24 @@ def _reverse_without_slash(url_name, course_id):
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
DESCRIPTION_DICT = {
'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
}
'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
}
ALERT_DICT = {
'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review"
}
'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review"
}
STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff."
STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org"
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
......@@ -92,10 +94,10 @@ def peer_grading(request, course_id):
#Get the current course
course = get_course_with_access(request.user, course_id, 'load')
course_id_parts = course.id.split("/")
false_dict = [False,"False", "false", "FALSE"]
false_dict = [False, "False", "false", "FALSE"]
#Reverse the base course url
base_course_url = reverse('courses')
base_course_url = reverse('courses')
try:
#TODO: This will not work with multiple runs of a course. Make it work. The last key in the Location passed
#to get_items is called revision. Is this the same as run?
......@@ -147,7 +149,7 @@ def student_problem_list(request, course_id):
success = False
error_text = ""
problem_list = []
base_course_url = reverse('courses')
base_course_url = reverse('courses')
try:
problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user))
......@@ -174,7 +176,7 @@ def student_problem_list(request, course_id):
except:
#This is a student_facing_error
eta_string = "Error getting ETA."
problem_list[i].update({'eta_string' : eta_string})
problem_list[i].update({'eta_string': eta_string})
except GradingServiceError:
#This is a student_facing_error
......@@ -215,7 +217,7 @@ def flagged_problem_list(request, course_id):
success = False
error_text = ""
problem_list = []
base_course_url = reverse('courses')
base_course_url = reverse('courses')
try:
problem_list_json = controller_qs.get_flagged_problem_list(course_id)
......@@ -243,14 +245,14 @@ def flagged_problem_list(request, course_id):
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_id)
context = {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
}
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
......@@ -305,7 +307,7 @@ def combined_notifications(request, course_id):
}
return render_to_response('open_ended_problems/combined_notifications.html',
combined_dict
combined_dict
)
......@@ -318,13 +320,14 @@ def take_action_on_flags(request, course_id):
if request.method != 'POST':
raise Http404
required = ['submission_id', 'action_type', 'student_id']
for key in required:
if key not in request.POST:
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(key)}),
mimetype="application/json")
return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(
key)}),
mimetype="application/json")
p = request.POST
submission_id = p['submission_id']
......@@ -338,5 +341,7 @@ def take_action_on_flags(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(submission_id, action_type, grader_id))
log.exception(
"Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, grader_id))
return _err_response(STAFF_ERROR_MESSAGE)
from lxml import etree
# from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404
from mitxmako.shortcuts import render_to_response
from courseware.access import has_access
......@@ -15,6 +15,8 @@ def index(request, course_id, book_index, page=None):
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
......@@ -40,6 +42,8 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None):
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
def remap_static_url(original_url, course):
......@@ -57,13 +61,49 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None):
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
entry['url'] = remap_static_url(entry['url'], course)
return render_to_response('static_pdfbook.html',
{'book_index': book_index,
'course': course,
{'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'staff_access': staff_access})
@login_required
def html_index(request, course_id, book_index, chapter=None, anchor_id=None):
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
def remap_static_url(original_url, course):
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
course.metadata['data_dir'],
course_namespace=course.location
)
# strip off the quotes again...
return output_url[1:-1]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
return render_to_response('static_htmlbook.html',
{'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'anchor_id': anchor_id,
'staff_access': staff_access})
......@@ -158,6 +158,19 @@ div.book-wrapper {
img {
max-width: 100%;
}
div {
text-align: left;
line-height: 1.6em;
margin-left: 5px;
margin-right: 5px;
margin-top: 5px;
margin-bottom: 5px;
.Paragraph, h2 {
margin-top: 10px;
}
}
}
}
......
<%inherit file="main.html" />
<%namespace name='static' file='static_content.html'/>
<%block name="title"><title>${course.number} Textbook</title>
</%block>
<%block name="headextra">
<%static:css group='course'/>
<%static:js group='courseware'/>
</%block>
<%block name="js_extra">
<script type="text/javascript">
(function($) {
$.fn.myHTMLViewer = function(options) {
var urlToLoad = null;
if (options.url) {
urlToLoad = options.url;
}
var chapterUrls = null;
if (options.chapters) {
chapterUrls = options.chapters;
}
var chapterToLoad = 1;
if (options.chapterNum) {
// TODO: this should only be specified if there are
// chapters, and it should be in-bounds.
chapterToLoad = options.chapterNum;
}
var anchorToLoad = null;
if (options.chapters) {
anchorToLoad = options.anchor_id;
}
loadUrl = function htmlViewLoadUrl(url, anchorId) {
// clear out previous load, if any:
parentElement = document.getElementById('bookpage');
while (parentElement.hasChildNodes())
parentElement.removeChild(parentElement.lastChild);
// load new URL in:
$('#bookpage').load(url);
// if there is an anchor set, then go to that location:
if (anchorId != null) {
// TODO: add implementation....
}
};
loadChapterUrl = function htmlViewLoadChapterUrl(chapterNum, anchorId) {
if (chapterNum < 1 || chapterNum > chapterUrls.length) {
return;
}
var chapterUrl = chapterUrls[chapterNum-1];
loadUrl(chapterUrl, anchorId);
};
// define navigation links for chapters:
if (chapterUrls != null) {
var loadChapterUrlHelper = function(i) {
return function(event) {
// when opening a new chapter, always open to the top:
loadChapterUrl(i, null);
};
};
for (var index = 1; index <= chapterUrls.length; index += 1) {
$("#htmlchapter-" + index).click(loadChapterUrlHelper(index));
}
}
// finally, load the appropriate url/page
if (urlToLoad != null) {
loadUrl(urlToLoad, anchorToLoad);
} else {
loadChapterUrl(chapterToLoad, anchorToLoad);
}
}
})(jQuery);
$(document).ready(function() {
var options = {};
%if 'url' in textbook:
options.url = "${textbook['url']}";
%endif
%if 'chapters' in textbook:
var chptrs = [];
%for chap in textbook['chapters']:
chptrs.push("${chap['url']}");
%endfor
options.chapters = chptrs;
%endif
%if chapter is not None:
options.chapterNum = ${chapter};
%endif
%if anchor_id is not None:
options.anchor_id = ${anchor_id};
%endif
$('#outerContainer').myHTMLViewer(options);
});
</script>
</%block>
<%include file="/courseware/course_navigation.html" args="active_page='htmltextbook/{0}'.format(book_index)" />
<div id="outerContainer">
<div id="mainContainer" class="book-wrapper">
%if 'chapters' in textbook:
<section aria-label="Textbook Navigation" class="book-sidebar">
<ul id="booknav" class="treeview-booknav">
<%def name="print_entry(entry, index_value)">
<li id="htmlchapter-${index_value}">
<a class="chapter">
${entry.get('title')}
</a>
</li>
</%def>
%for (index, entry) in enumerate(textbook['chapters']):
${print_entry(entry, index+1)}
% endfor
</ul>
</section>
%endif
<section id="viewerContainer" class="book">
<section class="page">
<div id="bookpage" />
</section>
</section>
</div>
</div>
......@@ -280,6 +280,15 @@ if settings.COURSEWARE_ENABLED:
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/pdfbook/(?P<book_index>[^/]*)/chapter/(?P<chapter>[^/]*)/(?P<page>[^/]*)$',
'staticbook.views.pdf_index'),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/htmlbook/(?P<book_index>[^/]*)/$',
'staticbook.views.html_index', name="html_book"),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/htmlbook/(?P<book_index>[^/]*)/chapter/(?P<chapter>[^/]*)/$',
'staticbook.views.html_index'),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/htmlbook/(?P<book_index>[^/]*)/chapter/(?P<chapter>[^/]*)/(?P<anchor_id>[^/]*)/$',
'staticbook.views.html_index'),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/htmlbook/(?P<book_index>[^/]*)/(?P<anchor_id>[^/]*)/$',
'staticbook.views.html_index'),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/courseware/?$',
'courseware.views.index', name="courseware"),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/courseware/(?P<chapter>[^/]*)/$',
......
django==1.4.3
pip
numpy==1.6.2
scipy==0.11.0
Markdown==2.2.1
pygments==1.5
lxml==3.0.1
boto==2.6.0
mako==0.7.3
python-memcached==1.48
python-openid==2.2.5
path.py
django_debug_toolbar
fs==0.4.0
beautifulsoup==3.2.1
-r repo-requirements.txt
beautifulsoup4==4.1.3
feedparser==5.1.3
requests==0.14.2
http://sympy.googlecode.com/files/sympy-0.7.1.tar.gz
newrelic==1.8.0.13
glob2==0.3
pymongo==2.4.1
django_nose==1.1
nosexcover==1.0.7
rednose==0.3.3
GitPython==0.3.2.RC1
mock==0.8.0
PyYAML==3.10
South==0.7.6
pytz==2012h
beautifulsoup==3.2.1
boto==2.6.0
django-celery==3.0.11
django-countries==1.5
django-kombu==0.9.4
django-debug-toolbar-mongo
django-followit==0.0.3
django-jasmine==0.3.2
django-keyedcache==1.4-6
django-kombu==0.9.4
django-mako==0.1.5pre
django-masquerade==0.1.6
django-mptt==0.5.5
django-openid-auth==0.4
django-robots==0.9.1
django-sekizai==0.6.1
django-ses==0.4.1
django-storages==1.1.5
django-threaded-multihost==1.4-1
django-sekizai==0.6.1
django-mptt==0.5.5
sorl-thumbnail==11.12
networkx==1.7
pygraphviz==1.1
-r repo-requirements.txt
nltk==2.0.4
django-debug-toolbar-mongo
django==1.4.3
django_debug_toolbar
django_nose==1.1
dogapi==1.2.1
dogstatsd-python==0.2.1
MySQL-python==1.2.4c1
sphinx==1.1.3
factory_boy
Shapely==1.2.16
feedparser==5.1.3
fs==0.4.0
GitPython==0.3.2.RC1
glob2==0.3
http://sympy.googlecode.com/files/sympy-0.7.1.tar.gz
ipython==0.13.1
xmltodict==0.4.1
lxml==3.0.1
mako==0.7.3
Markdown==2.2.1
mock==0.8.0
MySQL-python==1.2.4c1
networkx==1.7
newrelic==1.8.0.13
nltk==2.0.4
nosexcover==1.0.7
numpy==1.6.2
paramiko==1.9.0
path.py
Pillow==1.7.8
dogapi==1.2.1
pip
pygments==1.5
pygraphviz==1.1
pymongo==2.4.1
python-memcached==1.48
python-openid==2.2.5
pytz==2012h
PyYAML==3.10
rednose==0.3.3
requests==0.14.2
scipy==0.11.0
Shapely==1.2.16
sorl-thumbnail==11.12
South==0.7.6
sphinx==1.1.3
xmltodict==0.4.1
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment