Commit 6108529c by Usman Khalid

Merge pull request #2001 from edx/waheed/openended-problems-commands-2

Django command to post submissions to ora
parents 7d98231e 57710d72
......@@ -4,7 +4,7 @@ from lxml import etree
from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
from .x_module import XModule
from .x_module import XModule, module_attr
from xblock.fields import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from collections import namedtuple
......@@ -510,3 +510,7 @@ class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod,
CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version, CombinedOpenEndedDescriptor.track_changes])
return non_editable_fields
# Proxy to CombinedOpenEndedModule so that external callers don't have to know if they're working
# with a module or a descriptor
child_module = module_attr('child_module')
......@@ -363,6 +363,32 @@ class CombinedOpenEndedV1Module():
last_completed_child = next((i for i, child in reversed(list(enumerate(children))) if child['child_state'] == self.DONE), 0)
self.current_task_number = min(last_completed_child + 1, len(best_task_states) - 1)
def create_task(self, task_state, task_xml):
"""Create task object for given task state and task xml."""
tag_name = self.get_tag_name(task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=task_state,
)
return task
def get_task_number(self, task_number):
"""Return task object at task_index."""
task_states_count = len(self.task_states)
if task_states_count > 0 and task_number < task_states_count:
task_state = self.task_states[task_number]
task_xml = self.task_xml[task_number]
return self.create_task(task_state, task_xml)
return None
def reset_task_state(self, message=""):
"""
......
......@@ -18,6 +18,7 @@ from webob.multidict import MultiDict
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
......@@ -500,6 +501,27 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)
def test_create_task(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
self.assertIsInstance(second_task, OpenEndedModule)
def test_get_task_number(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.get_task_number(0)
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.get_task_number(1)
self.assertIsInstance(second_task, OpenEndedModule)
third_task = combinedoe.get_task_number(2)
self.assertIsNone(third_task)
def test_update_task_states(self):
"""
See if we can update the task states properly
......
......@@ -774,6 +774,153 @@ INSTANCE_INCONSISTENT_STATE5 = serialize_open_ended_instance_state("""
}
""")
# State Initial
STATE_INITIAL = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history" : [],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": "A stored answer.",
"version": 1,
"child_history": [],
"max_score": 3,
"child_state": "initial"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "initial",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")
STATE_ACCESSING = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history": [
{
"answer": "Here is an answer."
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": null,
"version": 1,
"child_history": [
{
"answer": "Here is an answer."
}
],
"max_score": 3,
"child_state": "assessing"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "assessing",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")
STATE_POST_ASSESSMENT = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history": [
{
"answer": "Here is an answer."
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": null,
"version": 1,
"child_history": [
{
"answer": "Here is an answer."
}
],
"max_score": 3,
"post_assessment": {
"feedback" : {
"grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>3</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"child_state": "post_assessment"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "done",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")
# Task state with self assessment only.
TEST_STATE_SA = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"{\\\"submission_id\\\": 1461, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5414, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"post_assessment\"}"]
......
......@@ -10,6 +10,7 @@ from courseware.courses import get_course_by_id
from xmodule.modulestore.django import modulestore
from django.core.management.base import BaseCommand
from instructor.utils import DummyRequest
class Command(BaseCommand):
......@@ -37,7 +38,7 @@ class Command(BaseCommand):
if len(args) > 2:
get_raw_scores = args[2].lower() == 'raw'
request = self.DummyRequest()
request = DummyRequest()
try:
course = get_course_by_id(course_id)
except Exception:
......@@ -63,12 +64,3 @@ class Command(BaseCommand):
fp.close()
print "Done: %d records dumped" % len(datatable['data'])
class DummyRequest(object):
META = {}
def __init__(self):
return
def get_host(self):
return 'edx.mit.edu'
def is_secure(self):
return False
"""
Command to manually re-post open ended submissions to the grader.
"""
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from courseware.courses import get_course
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to manually re-post open ended submissions to the grader.
"""
help = ("Usage: openended_post <course_id> <problem_location> <student_ids.txt> --dry-run --task-number=<task_number>\n"
"The text file should contain a User.id in each line.")
option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except send the submission to the grader. "),
make_option('--task-number',
type='int', default=0,
help="Task number that needs to be submitted."),
)
def handle(self, *args, **options):
dry_run = options['dry_run']
task_number = options['task_number']
if len(args) == 3:
course_id = args[0]
location = args[1]
students_ids = [line.strip() for line in open(args[2])]
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_instance(course.id, location, depth=0)
if descriptor is None:
print "Location not found in course"
return
if dry_run:
print "Doing a dry run."
students = User.objects.filter(id__in=students_ids).order_by('username')
print "Number of students: {0}".format(students.count())
for student in students:
post_submission_for_student(student, course, location, task_number, dry_run=dry_run)
def post_submission_for_student(student, course, location, task_number, dry_run=True):
"""If the student's task child_state is ASSESSING post submission to grader."""
print "{0}:{1}".format(student.id, student.username)
try:
module = get_module_for_student(student, course, location)
if module is None:
print " WARNING: No state found."
return False
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " WARNING: No task state found."
return False
if not isinstance(latest_task, OpenEndedModule):
print " ERROR: Not an OpenEndedModule task."
return False
latest_task_state = latest_task.child_state
if latest_task_state == OpenEndedChild.INITIAL:
print " WARNING: No submission."
elif latest_task_state == OpenEndedChild.POST_ASSESSMENT or latest_task_state == OpenEndedChild.DONE:
print " WARNING: Submission already graded."
elif latest_task_state == OpenEndedChild.ASSESSING:
latest_answer = latest_task.latest_answer()
if dry_run:
print " Skipped sending submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
else:
latest_task.send_to_grader(latest_answer, latest_task.system)
print " Sent submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
return True
else:
print "WARNING: Invalid task_state: {0}".format(latest_task_state)
except Exception as err: # pylint: disable=broad-except
print err
return False
"""
Command to get statistics about open ended problems.
"""
import csv
import time
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from courseware.courses import get_course
from courseware.models import StudentModule
from student.models import anonymous_id_for_user, CourseEnrollment
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to get statistics about open ended problems.
"""
help = "Usage: openended_stats <course_id> <problem_location> --task-number=<task_number>\n"
option_list = BaseCommand.option_list + (
make_option('--task-number',
type='int', default=0,
help="Task number to get statistics about."),
)
def handle(self, *args, **options):
"""Handler for command."""
task_number = options['task_number']
if len(args) == 2:
course_id = args[0]
location = args[1]
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_instance(course.id, location, depth=0)
if descriptor is None:
print "Location {0} not found in course".format(location)
return
try:
enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
print "Total students enrolled in {0}: {1}".format(course_id, enrolled_students.count())
calculate_task_statistics(enrolled_students, course, location, task_number)
except KeyboardInterrupt:
print "\nOperation Cancelled"
def calculate_task_statistics(students, course, location, task_number, write_to_file=True):
"""Print stats of students."""
stats = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 0,
OpenEndedChild.POST_ASSESSMENT: 0,
OpenEndedChild.DONE: 0
}
students_with_saved_answers = []
students_with_ungraded_submissions = [] # pylint: disable=invalid-name
students_with_graded_submissions = [] # pylint: disable=invalid-name
students_with_no_state = []
student_modules = StudentModule.objects.filter(module_state_key=location, student__in=students).order_by('student')
print "Total student modules: {0}".format(student_modules.count())
for index, student_module in enumerate(student_modules):
if index % 100 == 0:
print "--- {0} students processed ---".format(index)
student = student_module.student
print "{0}:{1}".format(student.id, student.username)
module = get_module_for_student(student, course, location)
if module is None:
print " WARNING: No state found"
students_with_no_state.append(student)
continue
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " No task state found"
students_with_no_state.append(student)
continue
task_state = latest_task.child_state
stats[task_state] += 1
print " State: {0}".format(task_state)
if task_state == OpenEndedChild.INITIAL:
if latest_task.stored_answer is not None:
students_with_saved_answers.append(student)
elif task_state == OpenEndedChild.ASSESSING:
students_with_ungraded_submissions.append(student)
elif task_state == OpenEndedChild.POST_ASSESSMENT or task_state == OpenEndedChild.DONE:
students_with_graded_submissions.append(student)
location = Location(location)
print "----------------------------------"
print "Time: {0}".format(time.strftime("%Y %b %d %H:%M:%S +0000", time.gmtime()))
print "Course: {0}".format(course.id)
print "Location: {0}".format(location)
print "No state: {0}".format(len(students_with_no_state))
print "Initial State: {0}".format(stats[OpenEndedChild.INITIAL] - len(students_with_saved_answers))
print "Saved answers: {0}".format(len(students_with_saved_answers))
print "Submitted answers: {0}".format(stats[OpenEndedChild.ASSESSING])
print "Received grades: {0}".format(stats[OpenEndedChild.POST_ASSESSMENT] + stats[OpenEndedChild.DONE])
print "----------------------------------"
if write_to_file:
filename = "stats.{0}.{1}".format(location.course, location.name)
time_stamp = time.strftime("%Y%m%d-%H%M%S")
with open('{0}.{1}.csv'.format(filename, time_stamp), 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
for student in students_with_ungraded_submissions:
writer.writerow(("ungraded", student.id, anonymous_id_for_user(student, ''), student.username))
for student in students_with_graded_submissions:
writer.writerow(("graded", student.id, anonymous_id_for_user(student, ''), student.username))
return stats
"""
Tests for the instructor app management commands.
"""
"""Test the openended_post management command."""
from datetime import datetime
import json
from mock import patch, ANY
from pytz import UTC
from django.test.utils import override_settings
import capa.xqueue_interface as xqueue_interface
from xmodule.modulestore import Location
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.tests.test_util_open_ended import (
STATE_INITIAL, STATE_ACCESSING, STATE_POST_ASSESSMENT
)
from courseware.courses import get_course_with_access
from courseware.tests.factories import StudentModuleFactory, UserFactory
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from student.models import anonymous_id_for_user
from instructor.management.commands.openended_post import post_submission_for_student
from instructor.management.commands.openended_stats import calculate_task_statistics
from instructor.utils import get_module_for_student
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class OpenEndedPostTest(ModuleStoreTestCase):
"""Test the openended_post management command."""
def setUp(self):
self.course_id = "edX/open_ended/2012_Fall"
self.problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"])
self.self_assessment_task_number = 0
self.open_ended_task_number = 1
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
def test_post_submission_for_student_on_initial(self):
course = get_course_with_access(self.student_on_initial, self.course_id, 'load')
dry_run_result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_on_accessing(self):
course = get_course_with_access(self.student_on_accessing, self.course_id, 'load')
dry_run_result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
module = get_module_for_student(self.student_on_accessing, course, self.problem_location)
task = module.child_module.get_task_number(self.open_ended_task_number)
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': anonymous_id_for_user(self.student_on_accessing, ''),
'submission_time': qtime}
contents = task.payload.copy()
contents.update({
'max_score': 2,
'student_info': json.dumps(student_info),
'student_response': "Here is an answer.",
})
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertTrue(result)
mock_send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY)
def test_post_submission_for_student_on_post_assessment(self):
course = get_course_with_access(self.student_on_post_assessment, self.course_id, 'load')
dry_run_result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_invalid_task(self):
course = get_course_with_access(self.student_on_accessing, self.course_id, 'load')
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.self_assessment_task_number, dry_run=False)
self.assertFalse(result)
out_of_bounds_task_number = 3
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, out_of_bounds_task_number, dry_run=False)
self.assertFalse(result)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class OpenEndedStatsTest(ModuleStoreTestCase):
"""Test the openended_stats management command."""
def setUp(self):
self.course_id = "edX/open_ended/2012_Fall"
self.problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"])
self.task_number = 1
self.invalid_task_number = 3
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
self.students = [self.student_on_initial, self.student_on_accessing, self.student_on_post_assessment]
def test_calculate_task_statistics(self):
course = get_course_with_access(self.student_on_accessing, self.course_id, 'load')
stats = calculate_task_statistics(self.students, course, self.problem_location, self.task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 1)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 1)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 1)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.invalid_task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 0)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 0)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 0)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
......@@ -13,6 +13,7 @@ from courseware import grades, models
from courseware.courses import get_course_by_id
from django.contrib.auth.models import User
from instructor.utils import DummyRequest
class MyEncoder(JSONEncoder):
......@@ -38,15 +39,6 @@ def offline_grade_calculation(course_id):
enc = MyEncoder()
class DummyRequest(object):
META = {}
def __init__(self):
return
def get_host(self):
return 'edx.mit.edu'
def is_secure(self):
return False
print "%d enrolled students" % len(enrolled_students)
course = get_course_by_id(course_id)
......
"""
Helpers for instructor app.
"""
from xmodule.modulestore.django import modulestore
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module
class DummyRequest(object):
"""Dummy request"""
META = {}
def __init__(self):
self.session = {}
self.user = None
return
def get_host(self):
"""Return a default host."""
return 'edx.mit.edu'
def is_secure(self):
"""Always insecure."""
return False
def get_module_for_student(student, course, location):
"""Return the module for the (student, location) using a DummyRequest."""
request = DummyRequest()
request.user = student
descriptor = modulestore().get_instance(course.id, location, depth=0)
field_data_cache = FieldDataCache([descriptor], course.id, student)
return get_module(student, request, location, field_data_cache, course.id)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment