Commit a4dae577 by Lyla Fischer

merge with master

parent 7f92606e
"""
Course settings module. The settings are based of django.conf. All settings in
courseware.global_course_settings are first applied, and then any settings
in the settings.DATA_DIR/course_settings.py are applied. A setting must be
in ALL_CAPS.
Settings are used by calling
from courseware import course_settings
Note that courseware.course_settings is not a module -- it's an object. So
importing individual settings is not possible:
from courseware.course_settings import GRADER # This won't work.
"""
import imp
import logging
import sys
import types
from django.conf import settings
from courseware import global_course_settings
from courseware import graders
_log = logging.getLogger("mitx.courseware")
class Settings(object):
def __init__(self):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_course_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_course_settings, setting))
data_dir = settings.DATA_DIR
fp = None
try:
fp, pathname, description = imp.find_module("course_settings", [data_dir])
mod = imp.load_module("course_settings", fp, pathname, description)
except Exception as e:
_log.exception("Unable to import course settings file from " + data_dir + ". Error: " + str(e))
mod = types.ModuleType('course_settings')
finally:
if fp:
fp.close()
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
setattr(self, setting, setting_value)
# Here is where we should parse any configurations, so that we can fail early
self.GRADER = graders.grader_from_conf(self.GRADER)
course_settings = Settings()
\ No newline at end of file
......@@ -53,13 +53,12 @@ html_problem_semantics = ["responseparam", "answer", "script"]
html_skip = ["numericalresponse", "customresponse", "schematicresponse", "formularesponse", "text"]
class LoncapaProblem(object):
def __init__(self, filename, id, state=None, seed=None):
def __init__(self, fileobject, id, state=None, seed=None):
## Initialize class variables from state
self.seed = None
self.student_answers = dict()
self.correct_map = dict()
self.done = False
self.filename = filename
self.problem_id = id
if seed != None:
......@@ -75,16 +74,12 @@ class LoncapaProblem(object):
if 'done' in state:
self.done = state['done']
# print self.seed
# TODO: Does this deplete the Linux entropy pool? Is this fast enough?
if not self.seed:
self.seed=struct.unpack('i', os.urandom(4))[0]
# print filename, self.seed, seed
## Parse XML file
#log.debug(u"LoncapaProblem() opening file {0}".format(filename))
file_text = open(filename).read()
file_text = fileobject.read()
# Convert startouttext and endouttext to proper <text></text>
# TODO: Do with XML operations
file_text = re.sub("startouttext\s*/","text",file_text)
......
......@@ -177,6 +177,8 @@ def course_xml_process(tree):
propogate_downward_tag(tree, "due")
propogate_downward_tag(tree, "graded")
propogate_downward_tag(tree, "graceperiod")
propogate_downward_tag(tree, "showanswer")
propogate_downward_tag(tree, "rerandomize")
return tree
def course_file(user):
......
GRADER = [
{
'type' : "Homework",
'min_count' : 12,
'drop_count' : 2,
'short_label' : "HW",
'weight' : 0.15,
},
{
'type' : "Lab",
'min_count' : 12,
'drop_count' : 2,
'category' : "Labs",
'weight' : 0.15
},
{
'type' : "Midterm",
'name' : "Midterm Exam",
'short_label' : "Midterm",
'weight' : 0.3,
},
{
'type' : "Final",
'name' : "Final Exam",
'short_label' : "Final",
'weight' : 0.4,
}
]
import abc
import logging
from django.conf import settings
from collections import namedtuple
log = logging.getLogger("mitx.courseware")
# This is a tuple for holding scores, either from problems or sections.
# Section either indicates the name of the problem or the name of the section
Score = namedtuple("Score", "earned possible graded section")
def grader_from_conf(conf):
"""
This creates a CourseGrader from a configuration (such as in course_settings.py).
The conf can simply be an instance of CourseGrader, in which case no work is done.
More commonly, the conf is a list of dictionaries. A WeightedSubsectionsGrader
with AssignmentFormatGrader's or SingleSectionGrader's as subsections will be
generated. Every dictionary should contain the parameters for making either a
AssignmentFormatGrader or SingleSectionGrader, in addition to a 'weight' key.
"""
if isinstance(conf, CourseGrader):
return conf
subgraders = []
for subgraderconf in conf:
subgraderconf = subgraderconf.copy()
weight = subgraderconf.pop("weight", 0)
try:
if 'min_count' in subgraderconf:
#This is an AssignmentFormatGrader
subgrader = AssignmentFormatGrader(**subgraderconf)
subgraders.append( (subgrader, subgrader.category, weight) )
elif 'name' in subgraderconf:
#This is an SingleSectionGrader
subgrader = SingleSectionGrader(**subgraderconf)
subgraders.append( (subgrader, subgrader.category, weight) )
else:
raise ValueError("Configuration has no appropriate grader class.")
except (TypeError, ValueError) as error:
errorString = "Unable to parse grader configuration:\n " + str(subgraderconf) + "\n Error was:\n " + str(error)
log.critical(errorString)
raise ValueError(errorString)
return WeightedSubsectionsGrader( subgraders )
class CourseGrader(object):
"""
A course grader takes the totaled scores for each graded section (that a student has
started) in the course. From these scores, the grader calculates an overall percentage
grade. The grader should also generate information about how that score was calculated,
to be displayed in graphs or charts.
A grader has one required method, grade(), which is passed a grade_sheet. The grade_sheet
contains scores for all graded section that the student has started. If a student has
a score of 0 for that section, it may be missing from the grade_sheet. The grade_sheet
is keyed by section format. Each value is a list of Score namedtuples for each section
that has the matching section format.
The grader outputs a dictionary with the following keys:
- percent: Contaisn a float value, which is the final percentage score for the student.
- section_breakdown: This is a list of dictionaries which provide details on sections
that were graded. These are used for display in a graph or chart. The format for a
section_breakdown dictionary is explained below.
- grade_breakdown: This is a list of dictionaries which provide details on the contributions
of the final percentage grade. This is a higher level breakdown, for when the grade is constructed
of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for
a grade_breakdown is explained below. This section is optional.
A dictionary in the section_breakdown list has the following keys:
percent: A float percentage for the section.
label: A short string identifying the section. Preferably fixed-length. E.g. "HW 3".
detail: A string explanation of the score. E.g. "Homework 1 - Ohms Law - 83% (5/6)"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
prominent: A boolean value indicating that this section should be displayed as more prominent
than other items.
A dictionary in the grade_breakdown list has the following keys:
percent: A float percentage in the breakdown. All percents should add up to the final percentage.
detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def grade(self, grade_sheet):
raise NotImplementedError
class WeightedSubsectionsGrader(CourseGrader):
"""
This grader takes a list of tuples containing (grader, category_name, weight) and computes
a final grade by totalling the contribution of each sub grader and multiplying it by the
given weight. For example, the sections may be
[ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30), (finalGrader, "Final", 0.40) ]
All items in section_breakdown for each subgrader will be combined. A grade_breakdown will be
composed using the score from each grader.
Note that the sum of the weights is not take into consideration. If the weights add up to
a value > 1, the student may end up with a percent > 100%. This allows for sections that
are extra credit.
"""
def __init__(self, sections):
self.sections = sections
def grade(self, grade_sheet):
total_percent = 0.0
section_breakdown = []
grade_breakdown = []
for subgrader, category, weight in self.sections:
subgrade_result = subgrader.grade(grade_sheet)
weightedPercent = subgrade_result['percent'] * weight
section_detail = "{0} = {1:.1%} of a possible {2:.0%}".format(category, weightedPercent, weight)
total_percent += weightedPercent
section_breakdown += subgrade_result['section_breakdown']
grade_breakdown.append( {'percent' : weightedPercent, 'detail' : section_detail, 'category' : category} )
return {'percent' : total_percent,
'section_breakdown' : section_breakdown,
'grade_breakdown' : grade_breakdown}
class SingleSectionGrader(CourseGrader):
"""
This grades a single section with the format 'type' and the name 'name'.
If the name is not appropriate for the short short_label or category, they each may
be specified individually.
"""
def __init__(self, type, name, short_label = None, category = None):
self.type = type
self.name = name
self.short_label = short_label or name
self.category = category or name
def grade(self, grade_sheet):
foundScore = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
foundScore = score
break
if foundScore:
percent = foundScore.earned / float(foundScore.possible)
detail = "{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format( name = self.name,
percent = percent,
earned = float(foundScore.earned),
possible = float(foundScore.possible))
else:
percent = 0.0
detail = "{name} - 0% (?/?)".format(name = self.name)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(50, 100)
points_earned = random.randrange(40, points_possible)
percent = points_earned / float(points_possible)
detail = "{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format( name = self.name,
percent = percent,
earned = float(points_earned),
possible = float(points_possible))
breakdown = [{'percent': percent, 'label': self.short_label, 'detail': detail, 'category': self.category, 'prominent': True}]
return {'percent' : percent,
'section_breakdown' : breakdown,
#No grade_breakdown here
}
class AssignmentFormatGrader(CourseGrader):
"""
Grades all sections matching the format 'type' with an equal weight. A specified
number of lowest scores can be dropped from the calculation. The minimum number of
sections in this format must be specified (even if those sections haven't been
written yet).
min_count defines how many assignments are expected throughout the course. Placeholder
scores (of 0) will be inserted if the number of matching sections in the course is < min_count.
If there number of matching sections in the course is > min_count, min_count will be ignored.
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
section_type is a string that is the type of a singular section. For example, for Labs it
would be "Lab". This defaults to be the same as category.
short_label is similar to section_type, but shorter. For example, for Homework it would be
"HW".
"""
def __init__(self, type, min_count, drop_count, category = None, section_type = None, short_label = None):
self.type = type
self.min_count = min_count
self.drop_count = drop_count
self.category = category or self.type
self.section_type = section_type or self.type
self.short_label = short_label or self.type
def grade(self, grade_sheet):
def totalWithDrops(breakdown, drop_count):
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown = sorted( enumerate(breakdown), key=lambda x: -x[1]['percent'] )
# A list of the indices of the dropped scores
dropped_indices = []
if drop_count > 0:
dropped_indices = [x[0] for x in sorted_breakdown[-drop_count:]]
aggregate_score = 0
for index, mark in enumerate(breakdown):
if index not in dropped_indices:
aggregate_score += mark['percent']
if (len(breakdown) - drop_count > 0):
aggregate_score /= len(breakdown) - drop_count
return aggregate_score, dropped_indices
#Figure the homework scores
scores = grade_sheet.get(self.type, [])
breakdown = []
for i in range( max(self.min_count, len(scores)) ):
if i < len(scores):
percentage = scores[i].earned / float(scores[i].possible)
summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index = i+1,
section_type = self.section_type,
name = scores[i].section,
percent = percentage,
earned = float(scores[i].earned),
possible = float(scores[i].possible) )
else:
percentage = 0
summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index = i+1, section_type = self.section_type)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(10, 50)
points_earned = random.randrange(5, points_possible)
percentage = points_earned / float(points_possible)
summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index = i+1,
section_type = self.section_type,
name = "Randomly Generated",
percent = percentage,
earned = float(points_earned),
possible = float(points_possible) )
short_label = "{short_label} {index:02d}".format(index = i+1, short_label = self.short_label)
breakdown.append( {'percent': percentage, 'label': short_label, 'detail': summary, 'category': self.category} )
total_percent, dropped_indices = totalWithDrops(breakdown, self.drop_count)
for dropped_index in dropped_indices:
breakdown[dropped_index]['mark'] = {'detail': "The lowest {drop_count} {section_type} scores are dropped.".format(drop_count = self.drop_count, section_type=self.section_type) }
total_detail = "{section_type} Average = {percent:.0%}".format(percent = total_percent, section_type = self.section_type)
total_label = "{short_label} Avg".format(short_label = self.short_label)
breakdown.append( {'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True} )
return {'percent' : total_percent,
'section_breakdown' : breakdown,
#No grade_breakdown here
}
import courseware.content_parser as content_parser
import courseware.modules
import logging
from lxml import etree
import random
import urllib
from collections import namedtuple
from django.conf import settings
from lxml import etree
from models import StudentModule
from student.models import UserProfile
log = logging.getLogger("mitx.courseware")
Score = namedtuple("Score", "earned possible weight graded section")
def get_grade(user, problem, cache):
## HACK: assumes max score is fixed per problem
id = problem.get('id')
correct = 0
# If the ID is not in the cache, add the item
if id not in cache:
module = StudentModule(module_type = 'problem', # TODO: Move into StudentModule.__init__?
module_id = id,
student = user,
state = None,
grade = 0,
max_grade = None,
done = 'i')
cache[id] = module
# Grab the # correct from cache
if id in cache:
response = cache[id]
if response.grade!=None:
correct=response.grade
# Grab max grade from cache, or if it doesn't exist, compute and save to DB
if id in cache and response.max_grade != None:
total = response.max_grade
else:
total=courseware.modules.capa_module.Module(etree.tostring(problem), "id").max_score()
response.max_grade = total
response.save()
return (correct, total)
from courseware import course_settings
import courseware.content_parser as content_parser
from courseware.graders import Score
import courseware.modules
from models import StudentModule
def grade_sheet(student):
"""
......@@ -54,9 +17,7 @@ def grade_sheet(student):
each containing an array of sections, each containing an array of scores. This contains information for graded and ungraded
problems, and is good for displaying a course summary with due dates, etc.
- grade_summary is a summary of how the final grade breaks down. It is an array of "sections". Each section can either be
a conglomerate of scores (like labs or homeworks) which has subscores and a totalscore, or a section can be all from one assignment
(such as a midterm or final) and only has a totalscore. Each section has a weight that shows how it contributes to the total grade.
- grade_summary is the output from the course grader. More information on the format is in the docstring for CourseGrader.
"""
dom=content_parser.course_file(student)
course = dom.xpath('//course/@name')[0]
......@@ -68,7 +29,6 @@ def grade_sheet(student):
response_by_id[response.module_id] = response
totaled_scores = {}
chapters=[]
for c in xmlChapters:
......@@ -85,33 +45,29 @@ def grade_sheet(student):
scores=[]
if len(problems)>0:
for p in problems:
(correct,total) = get_grade(student, p, response_by_id)
# id = p.get('id')
# correct = 0
# if id in response_by_id:
# response = response_by_id[id]
# if response.grade!=None:
# correct=response.grade
# total=courseware.modules.capa_module.Module(etree.tostring(p), "id").max_score() # TODO: Add state. Not useful now, but maybe someday problems will have randomized max scores?
# print correct, total
(correct,total) = get_score(student, p, response_by_id)
if settings.GENERATE_PROFILE_SCORES:
if total > 1:
correct = random.randrange( max(total-2, 1) , total + 1 )
else:
correct = total
scores.append( Score(int(correct),total, float(p.get("weight", total)), graded, p.get("name")) )
if not total > 0:
#We simply cannot grade a problem that is 12/0, because we might need it as a percentage
graded = False
scores.append( Score(correct,total, graded, p.get("name")) )
section_total, graded_total = aggregate_scores(scores)
section_total, graded_total = aggregate_scores(scores, s.get("name"))
#Add the graded total to totaled_scores
format = s.get('format') if s.get('format') else ""
subtitle = s.get('subtitle') if s.get('subtitle') else format
format = s.get('format', "")
subtitle = s.get('subtitle', format)
if format and graded_total[1] > 0:
format_scores = totaled_scores.get(format, [])
format_scores.append( graded_total )
totaled_scores[ format ] = format_scores
score={'section':s.get("name"),
section_score={'section':s.get("name"),
'scores':scores,
'section_total' : section_total,
'format' : format,
......@@ -119,154 +75,75 @@ def grade_sheet(student):
'due' : s.get("due") or "",
'graded' : graded,
}
sections.append(score)
sections.append(section_score)
chapters.append({'course':course,
'chapter' : c.get("name"),
'sections' : sections,})
grade_summary = grade_summary_6002x(totaled_scores)
return {'courseware_summary' : chapters, #all assessments as they appear in the course definition
'grade_summary' : grade_summary, #graded assessments only
}
grader = course_settings.GRADER
grade_summary = grader.grade(totaled_scores)
return {'courseware_summary' : chapters,
'grade_summary' : grade_summary}
def aggregate_scores(scores):
scores = filter( lambda score: score.possible > 0, scores )
def aggregate_scores(scores, section_name = "summary"):
total_correct_graded = sum(score.earned for score in scores if score.graded)
total_possible_graded = sum(score.possible for score in scores if score.graded)
total_correct_graded = sum((score.earned*1.0/score.possible)*score.weight for score in scores if score.graded)
total_possible_graded = sum(score.weight for score in scores if score.graded)
total_correct = sum((score.earned*1.0/score.possible)*score.weight for score in scores)
total_possible = sum(score.weight for score in scores)
total_correct = sum(score.earned for score in scores)
total_possible = sum(score.possible for score in scores)
#regardless of whether or not it is graded
all_total = Score(total_correct,
total_possible,
1,
False,
"summary")
section_name)
#selecting only graded things
graded_total = Score(total_correct_graded,
total_possible_graded,
1,
True,
"summary")
section_name)
return all_total, graded_total
def grade_summary_6002x(totaled_scores):
"""
This function takes the a dictionary of (graded) section scores, and applies the course grading rules to create
the grade_summary. For 6.002x this means homeworks and labs all have equal weight, with the lowest 2 of each
being dropped. There is one midterm and one final.
"""
def totalWithDrops(scores, drop_count):
#Note that this key will sort the list descending
sorted_scores = sorted( enumerate(scores), key=lambda x: -x[1]['percentage'] )
# A list of the indices of the dropped scores
dropped_indices = [score[0] for score in sorted_scores[-drop_count:]]
aggregate_score = 0
for index, score in enumerate(scores):
if index not in dropped_indices:
aggregate_score += score['percentage']
aggregate_score /= len(scores) - drop_count
return aggregate_score, dropped_indices
#Figure the homework scores
homework_scores = totaled_scores['Homework'] if 'Homework' in totaled_scores else []
homework_percentages = []
for i in range(12):
if i < len(homework_scores):
percentage = homework_scores[i].earned / float(homework_scores[i].possible)
summary = "Homework {0} - {1} - {2:.0%} ({3:g}/{4:g})".format( i + 1, homework_scores[i].section , percentage, homework_scores[i].earned, homework_scores[i].possible )
else:
percentage = 0
summary = "Unreleased Homework {0} - 0% (?/?)".format(i + 1)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(10, 50)
points_earned = random.randrange(5, points_possible)
percentage = points_earned / float(points_possible)
summary = "Random Homework - {0:.0%} ({1:g}/{2:g})".format( percentage, points_earned, points_possible )
label = "HW {0:02d}".format(i + 1)
homework_percentages.append( {'percentage': percentage, 'summary': summary, 'label' : label} )
homework_total, homework_dropped_indices = totalWithDrops(homework_percentages, 2)
#Figure the lab scores
lab_scores = totaled_scores['Lab'] if 'Lab' in totaled_scores else []
lab_percentages = []
for i in range(12):
if i < len(lab_scores):
percentage = lab_scores[i].earned / float(lab_scores[i].possible)
summary = "Lab {0} - {1} - {2:.0%} ({3:g}/{4:g})".format( i + 1, lab_scores[i].section , percentage, lab_scores[i].earned, lab_scores[i].possible )
else:
percentage = 0
summary = "Unreleased Lab {0} - 0% (?/?)".format(i + 1)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(10, 50)
points_earned = random.randrange(5, points_possible)
percentage = points_earned / float(points_possible)
summary = "Random Lab - {0:.0%} ({1:g}/{2:g})".format( percentage, points_earned, points_possible )
label = "Lab {0:02d}".format(i + 1)
lab_percentages.append( {'percentage': percentage, 'summary': summary, 'label' : label} )
lab_total, lab_dropped_indices = totalWithDrops(lab_percentages, 2)
#TODO: Pull this data about the midterm and final from the databse. It should be exactly similar to above, but we aren't sure how exams will be done yet.
#This is a hack, but I have no intention of having this function be useful for anything but 6.002x anyway, so I don't want to make it pretty.
midterm_score = totaled_scores['Midterm'][0] if 'Midterm' in totaled_scores else Score('?', '?', '?', True, "?")
midterm_percentage = midterm_score.earned * 1.0 / midterm_score.possible if 'Midterm' in totaled_scores else 0
final_score = totaled_scores['Final'][0] if 'Final' in totaled_scores else Score('?', '?', '?', True, "?")
final_percentage = final_score.earned * 1.0 / final_score.possible if 'Final' in totaled_scores else 0
def get_score(user, problem, cache):
## HACK: assumes max score is fixed per problem
id = problem.get('id')
correct = 0.0
if settings.GENERATE_PROFILE_SCORES:
midterm_score = Score(random.randrange(50, 150), 150, 150, True, "?")
midterm_percentage = midterm_score.earned / float(midterm_score.possible)
# If the ID is not in the cache, add the item
if id not in cache:
module = StudentModule(module_type = 'problem', # TODO: Move into StudentModule.__init__?
module_id = id,
student = user,
state = None,
grade = 0,
max_grade = None,
done = 'i')
cache[id] = module
# Grab the # correct from cache
if id in cache:
response = cache[id]
if response.grade!=None:
correct=float(response.grade)
final_score = Score(random.randrange(100, 300), 300, 300, True, "?")
final_percentage = final_score.earned / float(final_score.possible)
# Grab max grade from cache, or if it doesn't exist, compute and save to DB
if id in cache and response.max_grade != None:
total = response.max_grade
else:
total=float(courseware.modules.capa_module.Module(etree.tostring(problem), "id").max_score())
response.max_grade = total
response.save()
grade_summary = [
{
'category': 'Homework',
'subscores' : homework_percentages,
'dropped_indices' : homework_dropped_indices,
'totalscore' : homework_total,
'totalscore_summary' : "Homework Average - {0:.0%}".format(homework_total),
'totallabel' : 'HW Avg',
'weight' : 0.15,
},
{
'category': 'Labs',
'subscores' : lab_percentages,
'dropped_indices' : lab_dropped_indices,
'totalscore' : lab_total,
'totalscore_summary' : "Lab Average - {0:.0%}".format(lab_total),
'totallabel' : 'Lab Avg',
'weight' : 0.15,
},
{
'category': 'Midterm',
'totalscore' : midterm_percentage,
'totalscore_summary' : "Midterm - {0:.0%} ({1}/{2})".format(midterm_percentage, midterm_score.earned, midterm_score.possible),
'totallabel' : 'Midterm',
'weight' : 0.30,
},
{
'category': 'Final',
'totalscore' : final_percentage,
'totalscore_summary' : "Final - {0:.0%} ({1}/{2})".format(final_percentage, final_score.earned, final_score.possible),
'totallabel' : 'Final',
'weight' : 0.40,
}
]
return grade_summary
#Now we re-weight the problem, if specified
weight = problem.get("weight", None)
if weight:
weight = float(weight)
correct = correct * weight / total
total = weight
return (correct, total)
......@@ -18,8 +18,12 @@ from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import Context
from django.template import Context, loader
from fs.osfs import OSFS
from mitxmako.shortcuts import render_to_response, render_to_string
from models import StudentModule
from student.models import UserProfile
import track.views
......@@ -30,6 +34,14 @@ import courseware.modules
log = logging.getLogger("mitx.courseware")
class I4xSystem(object):
def __init__(self, ajax_url, track_function, render_function, filestore=None):
self.ajax_url = ajax_url
self.track_function = track_function
self.filestore = OSFS(settings.DATA_DIR)
self.render_function = render_function
self.exception404 = Http404
def object_cache(cache, user, module_type, module_id):
# We don't look up on user -- all queries include user
# Additional lookup would require a DB hit the way Django
......@@ -76,12 +88,15 @@ def modx_dispatch(request, module=None, dispatch=None, id=None):
xml = content_parser.module_xml(request.user, module, 'id', id)
# Create the module
instance=courseware.modules.get_module_class(module)(xml,
system = I4xSystem(track_function = make_track_function(request),
render_function = None,
ajax_url = ajax_url,
filestore = None
)
instance=courseware.modules.get_module_class(module)(system,
xml,
id,
ajax_url=ajax_url,
state=oldstate,
track_function = make_track_function(request),
render_function = None)
state=oldstate)
# Let the module handle the AJAX
post_data=""
if request.raw_post_data:
......@@ -131,12 +146,15 @@ def render_x_module(user, request, xml_module, module_object_preload):
# Create a new instance
ajax_url = '/modx/'+module_type+'/'+module_id+'/'
instance=module_class(etree.tostring(xml_module),
system = I4xSystem(track_function = make_track_function(request),
render_function = lambda x: render_module(user, request, x, module_object_preload),
ajax_url = ajax_url,
filestore = None
)
instance=module_class(system,
etree.tostring(xml_module),
module_id,
ajax_url=ajax_url,
state=state,
track_function = make_track_function(request),
render_function = lambda x: render_module(user, request, x, module_object_preload))
state=state)
# If instance wasn't already in the database, create it
if not smod:
......
......@@ -16,9 +16,7 @@ import traceback
from lxml import etree
## TODO: Abstract out from Django
from django.conf import settings
from mitxmako.shortcuts import render_to_response, render_to_string
from django.http import Http404
from mitxmako.shortcuts import render_to_string
from x_module import XModule
from courseware.capa.capa_problem import LoncapaProblem, StudentInputError
......@@ -92,7 +90,6 @@ class Module(XModule):
# User submitted a problem, and hasn't reset. We don't want
# more submissions.
if self.lcp.done and self.rerandomize == "always":
#print "!"
check_button = False
save_button = False
......@@ -131,8 +128,8 @@ class Module(XModule):
return html
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None, meta = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, track_function, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
self.attempts = 0
self.max_attempts = None
......@@ -185,15 +182,14 @@ class Module(XModule):
if state!=None and 'attempts' in state:
self.attempts=state['attempts']
self.filename=content_parser.item(dom2.xpath('/problem/@filename'))
filename=settings.DATA_DIR+"/problems/"+self.filename+".xml"
self.filename="problems/"+content_parser.item(dom2.xpath('/problem/@filename'))+".xml"
self.name=content_parser.item(dom2.xpath('/problem/@name'))
self.weight=content_parser.item(dom2.xpath('/problem/@weight'))
if self.rerandomize == 'never':
seed = 1
else:
seed = None
self.lcp=LoncapaProblem(filename, self.item_id, state, seed = seed)
self.lcp=LoncapaProblem(self.filestore.open(self.filename), self.item_id, state, seed = seed)
def handle_ajax(self, dispatch, get):
if dispatch=='problem_get':
......@@ -242,16 +238,15 @@ class Module(XModule):
if self.show_answer == 'closed' and not self.closed():
return False
print "aa", self.show_answer
raise Http404
raise self.system.exception404 #TODO: Not 404
def get_answer(self, get):
if not self.answer_available():
raise Http404
raise self.system.exception404
else:
return json.dumps(self.lcp.get_question_answers(),
cls=ComplexEncoder)
# Figure out if we should move these to capa_problem?
def get_problem(self, get):
''' Same as get_problem_html -- if we want to reconfirm we
......@@ -276,33 +271,27 @@ class Module(XModule):
if self.closed():
event_info['failure']='closed'
self.tracker('save_problem_check_fail', event_info)
print "cp"
raise Http404
raise self.system.exception404
# Problem submitted. Student should reset before checking
# again.
if self.lcp.done and self.rerandomize == "always":
event_info['failure']='unreset'
self.tracker('save_problem_check_fail', event_info)
print "cpdr"
raise Http404
raise self.system.exception404
try:
old_state = self.lcp.get_state()
lcp_id = self.lcp.problem_id
filename = self.lcp.filename
correct_map = self.lcp.grade_answers(answers)
except StudentInputError as inst:
self.lcp = LoncapaProblem(filename, id=lcp_id, state=old_state)
self.lcp = LoncapaProblem(self.filestore.open(self.filename), id=lcp_id, state=old_state)
traceback.print_exc()
# print {'error':sys.exc_info(),
# 'answers':answers,
# 'seed':self.lcp.seed,
# 'filename':self.lcp.filename}
return json.dumps({'success':inst.message})
except:
self.lcp = LoncapaProblem(filename, id=lcp_id, state=old_state)
self.lcp = LoncapaProblem(self.filestore.open(self.filename), id=lcp_id, state=old_state)
traceback.print_exc()
raise
return json.dumps({'success':'Unknown Error'})
......@@ -380,8 +369,8 @@ class Module(XModule):
self.lcp.questions=dict() # Detailed info about questions in problem instance. TODO: Should be by id and not lid.
self.lcp.seed=None
filename=settings.DATA_DIR+"problems/"+self.filename+".xml"
self.lcp=LoncapaProblem(filename, self.item_id, self.lcp.get_state())
filename="problems/"+self.filename+".xml"
self.lcp=LoncapaProblem(self.filestore.open(filename), self.item_id, self.lcp.get_state())
event_info['new_state']=self.lcp.get_state()
self.tracker('reset_problem', event_info)
......
import json
## TODO: Abstract out from Django
from django.conf import settings
from mitxmako.shortcuts import render_to_response, render_to_string
from x_module import XModule
......@@ -24,13 +22,13 @@ class Module(XModule):
textlist=[i for i in textlist if type(i)==str]
return "".join(textlist)
try:
filename=settings.DATA_DIR+"html/"+self.filename
return open(filename).read()
filename="html/"+self.filename
return self.filestore.open(filename).read()
except: # For backwards compatibility. TODO: Remove
return render_to_string(self.filename, {'id': self.item_id})
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, track_function, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
xmltree=etree.fromstring(xml)
self.filename = None
filename_l=xmltree.xpath("/html/@filename")
......
......@@ -19,6 +19,6 @@ class Module(XModule):
def get_html(self):
return '<input type="hidden" class="schematic" name="{item_id}" height="480" width="640">'.format(item_id=self.item_id)
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, render_function = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
......@@ -2,9 +2,6 @@ import json
from lxml import etree
## TODO: Abstract out from Django
from django.http import Http404
from django.conf import settings
from mitxmako.shortcuts import render_to_response, render_to_string
from x_module import XModule
......@@ -38,12 +35,10 @@ class Module(XModule):
return self.destroy_js
def handle_ajax(self, dispatch, get):
print "GET", get
print "DISPATCH", dispatch
if dispatch=='goto_position':
self.position = int(get['position'])
return json.dumps({'success':True})
raise Http404()
raise self.system.exception404
def render(self):
if self.rendered:
......@@ -107,9 +102,8 @@ class Module(XModule):
self.rendered = True
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, track_function, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
self.xmltree=etree.fromstring(xml)
self.position = 1
......
......@@ -14,16 +14,16 @@ class Module(XModule):
@classmethod
def get_xml_tags(c):
## TODO: Abstract out from filesystem
tags = os.listdir(settings.DATA_DIR+'/custom_tags')
return tags
def get_html(self):
return self.html
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, track_function, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
xmltree = etree.fromstring(xml)
filename = xmltree.tag
params = dict(xmltree.items())
# print params
self.html = render_to_string(filename, params, namespace = 'custom_tags')
import json
## TODO: Abstract out from Django
from django.conf import settings
from mitxmako.shortcuts import render_to_response, render_to_string
from x_module import XModule
......@@ -26,8 +24,9 @@ class Module(XModule):
def get_destroy_js(self):
return self.destroy_js_text
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, track_function, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
xmltree=etree.fromstring(xml)
self.contents=[(e.get("name"),self.render_function(e)) \
for e in xmltree]
......
......@@ -3,8 +3,6 @@ import logging
from lxml import etree
## TODO: Abstract out from Django
from django.conf import settings
from mitxmako.shortcuts import render_to_response, render_to_string
from x_module import XModule
......@@ -58,8 +56,8 @@ class Module(XModule):
def get_destroy_js(self):
return "videoDestroy(\"{0}\");".format(self.item_id)+self.annotations_destroy
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None):
XModule.__init__(self, xml, item_id, ajax_url, track_url, state, track_function, render_function)
def __init__(self, system, xml, item_id, state=None):
XModule.__init__(self, system, xml, item_id, state)
xmltree=etree.fromstring(xml)
self.youtube = xmltree.get('youtube')
self.name = xmltree.get('name')
......
......@@ -45,13 +45,14 @@ class XModule(object):
get is a dictionary-like object '''
return ""
def __init__(self, xml, item_id, ajax_url=None, track_url=None, state=None, track_function=None, render_function = None):
def __init__(self, system, xml, item_id, track_url=None, state=None):
''' In most cases, you must pass state or xml'''
self.xml = xml
self.item_id = item_id
self.ajax_url = ajax_url
self.track_url = track_url
self.state = state
self.tracker = track_function
self.render_function = render_function
self.ajax_url = system.ajax_url
self.tracker = system.track_function
self.filestore = system.filestore
self.render_function = system.render_function
self.system = system
......@@ -6,7 +6,9 @@ import numpy
import courseware.modules
import courseware.capa.calc as calc
import courseware.capa.capa_problem as lcp
from grades import Score, aggregate_scores
import courseware.graders as graders
from courseware.graders import Score, CourseGrader, WeightedSubsectionsGrader, SingleSectionGrader, AssignmentFormatGrader
from courseware.grades import aggregate_scores
class ModelsTest(unittest.TestCase):
def setUp(self):
......@@ -64,7 +66,7 @@ class ModelsTest(unittest.TestCase):
class MultiChoiceTest(unittest.TestCase):
def test_MC_grade(self):
multichoice_file = os.path.dirname(__file__)+"/test_files/multichoice.xml"
test_lcp = lcp.LoncapaProblem(multichoice_file, '1')
test_lcp = lcp.LoncapaProblem(open(multichoice_file), '1')
correct_answers = {'1_2_1':'foil3'}
self.assertEquals(test_lcp.grade_answers(correct_answers)['1_2_1'], 'correct')
false_answers = {'1_2_1':'foil2'}
......@@ -72,7 +74,7 @@ class MultiChoiceTest(unittest.TestCase):
def test_TF_grade(self):
truefalse_file = os.getcwd()+"/djangoapps/courseware/test_files/truefalse.xml"
test_lcp = lcp.LoncapaProblem(truefalse_file, '1')
test_lcp = lcp.LoncapaProblem(open(truefalse_file), '1')
correct_answers = {'1_2_1':['foil2', 'foil1']}
self.assertEquals(test_lcp.grade_answers(correct_answers)['1_2_1'], 'correct')
false_answers = {'1_2_1':['foil1']}
......@@ -84,42 +86,218 @@ class MultiChoiceTest(unittest.TestCase):
false_answers = {'1_2_1':['foil1', 'foil2', 'foil3']}
self.assertEquals(test_lcp.grade_answers(false_answers)['1_2_1'], 'incorrect')
class GraderTest(unittest.TestCase):
class GradesheetTest(unittest.TestCase):
def test_weighted_grading(self):
scores = []
Score.__sub__=lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
all, graded = aggregate_scores(scores)
self.assertEqual(all, Score(earned=0, possible=0, weight=1, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=0, possible=5, weight=1, graded=False, section="summary"))
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
all, graded = aggregate_scores(scores)
self.assertEqual(all, Score(earned=0, possible=1, weight=1, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, weight=1, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=3.0/5, possible=2, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=3.0/5, possible=1, weight=1, graded=True, section="summary"))
self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, weight=2, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
class GraderTest(unittest.TestCase):
scores.append(Score(earned=2, possible=5, weight=0, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
empty_gradesheet = {
}
incomplete_gradesheet = {
'Homework': [],
'Lab': [],
'Midterm' : [],
}
test_gradesheet = {
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
Score(earned=16, possible=16.0, graded=True, section='hw2')],
#The dropped scores should be from the assignments that don't exist yet
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), #Dropped
Score(earned=1, possible=1.0, graded=True, section='lab2'),
Score(earned=1, possible=1.0, graded=True, section='lab3'),
Score(earned=5, possible=25.0, graded=True, section='lab4'), #Dropped
Score(earned=3, possible=4.0, graded=True, section='lab5'), #Dropped
Score(earned=6, possible=7.0, graded=True, section='lab6'),
Score(earned=5, possible=6.0, graded=True, section='lab7')],
'Midterm' : [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"),],
}
def test_SingleSectionGrader(self):
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
for graded in [midtermGrader.grade(self.empty_gradesheet),
midtermGrader.grade(self.incomplete_gradesheet),
badLabGrader.grade(self.test_gradesheet)]:
self.assertEqual( len(graded['section_breakdown']), 1 )
self.assertEqual( graded['percent'], 0.0 )
graded = midtermGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.505 )
self.assertEqual( len(graded['section_breakdown']), 1 )
graded = lab4Grader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.2 )
self.assertEqual( len(graded['section_breakdown']), 1 )
def test_AssignmentFormatGrader(self):
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
#Even though the minimum number is 3, this should grade correctly when 7 assignments are found
overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
#Test the grading of an empty gradesheet
for graded in [ homeworkGrader.grade(self.empty_gradesheet),
noDropGrader.grade(self.empty_gradesheet),
homeworkGrader.grade(self.incomplete_gradesheet),
noDropGrader.grade(self.incomplete_gradesheet) ]:
self.assertAlmostEqual( graded['percent'], 0.0 )
#Make sure the breakdown includes 12 sections, plus one summary
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
graded = homeworkGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.11 ) # 100% + 10% / 10 assignments
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
graded = noDropGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0916666666666666 ) # 100% + 10% / 12 assignments
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
graded = overflowGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.8880952380952382 ) # 100% + 10% / 5 assignments
self.assertEqual( len(graded['section_breakdown']), 7 + 1 )
graded = labGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.9226190476190477 )
self.assertEqual( len(graded['section_breakdown']), 7 + 1 )
def test_WeightedSubsectionsGrader(self):
#First, a few sub graders
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
weightedGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.25), (labGrader, labGrader.category, 0.25),
(midtermGrader, midtermGrader.category, 0.5)] )
overOneWeightsGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.5), (labGrader, labGrader.category, 0.5),
(midtermGrader, midtermGrader.category, 0.5)] )
#The midterm should have all weight on this one
zeroWeightsGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
(midtermGrader, midtermGrader.category, 0.5)] )
#This should always have a final percent of zero
allZeroWeightsGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
(midtermGrader, midtermGrader.category, 0.0)] )
emptyGrader = graders.WeightedSubsectionsGrader( [] )
graded = weightedGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.5106547619047619 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = overOneWeightsGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.7688095238095238 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = zeroWeightsGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.2525 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = allZeroWeightsGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
for graded in [ weightedGrader.grade(self.empty_gradesheet),
weightedGrader.grade(self.incomplete_gradesheet),
zeroWeightsGrader.grade(self.empty_gradesheet),
allZeroWeightsGrader.grade(self.empty_gradesheet)]:
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = emptyGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), 0 )
self.assertEqual( len(graded['grade_breakdown']), 0 )
scores.append(Score(earned=2, possible=5, weight=3, graded=False, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=13.0/5, possible=7, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
def test_graderFromConf(self):
#Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
#in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
weightedGrader = graders.grader_from_conf([
{
'type' : "Homework",
'min_count' : 12,
'drop_count' : 2,
'short_label' : "HW",
'weight' : 0.25,
},
{
'type' : "Lab",
'min_count' : 7,
'drop_count' : 3,
'category' : "Labs",
'weight' : 0.25
},
{
'type' : "Midterm",
'name' : "Midterm Exam",
'short_label' : "Midterm",
'weight' : 0.5,
},
])
emptyGrader = graders.grader_from_conf([])
graded = weightedGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.5106547619047619 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = emptyGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), 0 )
self.assertEqual( len(graded['grade_breakdown']), 0 )
#Test that graders can also be used instead of lists of dictionaries
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
graded = homeworkGrader2.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.11 )
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
#TODO: How do we test failure cases? The parser only logs an error when it can't parse something. Maybe it should throw exceptions?
scores.append(Score(earned=2, possible=5, weight=.5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=14.0/5, possible=7.5, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=8.0/5, possible=3.5, weight=1, graded=True, section="summary"))
......@@ -84,7 +84,7 @@ def render_accordion(request,course,chapter,section):
parameter. Returns (initialization_javascript, content)'''
if not course:
course = "6.002 Spring 2012"
toc=content_parser.toc_from_xml(content_parser.course_file(request.user), chapter, section)
active_chapter=1
for i in range(len(toc)):
......@@ -96,8 +96,7 @@ def render_accordion(request,course,chapter,section):
['format_url_params',content_parser.format_url_params],
['csrf',csrf(request)['csrf_token']]] + \
template_imports.items())
return {'init_js':render_to_string('accordion_init.js',context),
'content':render_to_string('accordion.html',context)}
return render_to_string('accordion.html',context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def render_section(request, section):
......@@ -124,8 +123,8 @@ def render_section(request, section):
if 'init_js' not in module:
module['init_js']=''
context={'init':accordion['init_js']+module['init_js'],
'accordion':accordion['content'],
context={'init':module['init_js'],
'accordion':accordion,
'content':module['content'],
'csrf':csrf(request)['csrf_token']}
......@@ -179,8 +178,8 @@ def index(request, course="6.002 Spring 2012", chapter="Using the System", secti
if 'init_js' not in module:
module['init_js']=''
context={'init':accordion['init_js']+module['init_js'],
'accordion':accordion['content'],
context={'init':module['init_js'],
'accordion':accordion,
'content':module['content'],
'csrf':csrf(request)['csrf_token']}
......
#! /usr/bin/env python
import sys
import json
import random
import copy
from collections import defaultdict
from argparse import ArgumentParser, FileType
from datetime import datetime
def generate_user(user_number):
return {
......@@ -51,7 +54,6 @@ def generate_user(user_number):
}
def parse_args(args=sys.argv[1:]):
parser = ArgumentParser()
parser.add_argument('-d', '--data', type=FileType('r'), default=sys.stdin)
......@@ -59,6 +61,7 @@ def parse_args(args=sys.argv[1:]):
parser.add_argument('count', type=int)
return parser.parse_args(args)
def main(args=sys.argv[1:]):
args = parse_args(args)
......@@ -79,6 +82,8 @@ def main(args=sys.argv[1:]):
sample = random.choice(answers)
data = copy.deepcopy(sample)
data["fields"]["student"] = student_id + 1
data["fields"]["created"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data["fields"]["modified"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data["pk"] = out_pk
out_pk += 1
out_data.append(data)
......
......@@ -11,7 +11,7 @@ PACKAGE_NAME = "mitx"
LINK_PATH = "/opt/wwc/mitx"
VERSION = "0.1"
COMMIT = (ENV["GIT_COMMIT"] || `git rev-parse HEAD`).chomp()[0, 10]
BRANCH = (ENV["GIT_BRANCH"] || `git symbolic-ref -q HEAD`).chomp().gsub('refs/heads/', '').gsub('origin/', '').gsub('/', '_').downcase()
BRANCH = (ENV["GIT_BRANCH"] || `git symbolic-ref -q HEAD`).chomp().gsub('refs/heads/', '').gsub('origin/', '')
BUILD_NUMBER = (ENV["BUILD_NUMBER"] || "dev").chomp()
if BRANCH == "master"
......@@ -19,9 +19,10 @@ if BRANCH == "master"
else
DEPLOY_NAME = "#{PACKAGE_NAME}-#{BRANCH}-#{BUILD_NUMBER}-#{COMMIT}"
end
INSTALL_DIR_PATH = File.join(DEPLOY_DIR, DEPLOY_NAME)
PACKAGE_REPO = "packages@gp.mitx.mit.edu:/opt/pkgrepo.incoming"
NORMALIZED_DEPLOY_NAME = DEPLOY_NAME.downcase().gsub(/[_\/]/, '-')
INSTALL_DIR_PATH = File.join(DEPLOY_DIR, NORMALIZED_DEPLOY_NAME)
# Set up the clean and clobber tasks
CLOBBER.include('build')
......@@ -58,7 +59,7 @@ task :package do
"--prefix=#{INSTALL_DIR_PATH}",
"-C", "#{REPO_ROOT}",
"--provides=#{PACKAGE_NAME}",
"--name=#{DEPLOY_NAME}",
"--name=#{NORMALIZED_DEPLOY_NAME}",
"--version=#{VERSION}",
"-a", "all",
"."]
......@@ -67,5 +68,5 @@ task :package do
end
task :publish => :package do
sh("scp #{BUILD_DIR}/#{DEPLOY_NAME}_#{VERSION}-1_all.deb #{PACKAGE_REPO}")
sh("scp #{BUILD_DIR}/#{NORMALIZED_DEPLOY_NAME}_#{VERSION}*.deb #{PACKAGE_REPO}")
end
......@@ -147,6 +147,7 @@ INSTALLED_APPS = (
'perfstats',
'util',
'masquerade',
'django_jasmine',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
......@@ -347,6 +348,7 @@ PROJECT_ROOT = os.path.dirname(__file__)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'askbot.context.application_settings',
#'django.core.context_processors.i18n',
'askbot.user_messages.context_processors.user_messages',#must be before auth
......@@ -683,3 +685,5 @@ if MAKO_MODULE_DIR == None:
djcelery.setup_loader()
# Jasmine Settings
JASMINE_TEST_DIRECTORY = PROJECT_DIR+'/templates/coffee'
This source diff could not be displayed because it is too large. You can view the blob instead.
// Generated by CoffeeScript 1.3.2-pre
(function() {
window.Calculator = (function() {
function Calculator() {}
Calculator.bind = function() {
var calculator;
calculator = new Calculator;
$('.calc').click(calculator.toggle);
$('form#calculator').submit(calculator.calculate).submit(function(e) {
return e.preventDefault();
});
return $('div.help-wrapper a').hover(calculator.helpToggle).click(function(e) {
return e.preventDefault();
});
};
Calculator.prototype.toggle = function() {
$('li.calc-main').toggleClass('open');
$('#calculator_wrapper #calculator_input').focus();
return $('.calc').toggleClass('closed');
};
Calculator.prototype.helpToggle = function() {
return $('.help').toggleClass('shown');
};
Calculator.prototype.calculate = function() {
return $.getJSON('/calculate', {
equation: $('#calculator_input').val()
}, function(data) {
return $('#calculator_output').val(data.result);
});
};
return Calculator;
})();
window.Courseware = (function() {
function Courseware() {}
Courseware.bind = function() {
return this.Navigation.bind();
};
Courseware.Navigation = (function() {
function Navigation() {}
Navigation.bind = function() {
var active, navigation;
if ($('#accordion').length) {
navigation = new Navigation;
active = $('#accordion ul:has(li.active)').index('#accordion ul');
$('#accordion').bind('accordionchange', navigation.log).accordion({
active: active >= 0 ? active : 1,
header: 'h3',
autoHeight: false
});
return $('#open_close_accordion a').click(navigation.toggle);
}
};
Navigation.prototype.log = function(event, ui) {
return log_event('accordion', {
newheader: ui.newHeader.text(),
oldheader: ui.oldHeader.text()
});
};
Navigation.prototype.toggle = function() {
return $('.course-wrapper').toggleClass('closed');
};
return Navigation;
})();
return Courseware;
}).call(this);
window.FeedbackForm = (function() {
function FeedbackForm() {}
FeedbackForm.bind = function() {
return $('#feedback_button').click(function() {
var data;
data = {
subject: $('#feedback_subject').val(),
message: $('#feedback_message').val(),
url: window.location.href
};
return $.post('/send_feedback', data, function() {
return $('#feedback_div').html('Feedback submitted. Thank you');
}, 'json');
});
};
return FeedbackForm;
})();
$(function() {
$.ajaxSetup({
headers: {
'X-CSRFToken': $.cookie('csrftoken')
}
});
Calculator.bind();
Courseware.bind();
FeedbackForm.bind();
return $("a[rel*=leanModal]").leanModal();
});
}).call(this);
......@@ -18,6 +18,14 @@ This set of questions and answers accompanies MIT&rsquo;s February 13,
6.002x: Circuits and Electronics.
</p>
<h2> How do I register? </h2>
<p> We will have a link to a form where you can sign up for our database and mailing list shortly. Please check back in the next two weeks to this website for further instruction. </p>
<h2> Where can I find a list of courses available? When do the next classes begin? </h2>
<p> Courses will begin again in the Fall Semester (September). We anticipate offering 4-5 courses this Fall, one of which will be 6.002x again. The additional classes will be announced in early summer. </p>
<h2> I tried to register for the course, but it says the username
is already taken.</h2>
......
$("#accordion").accordion({
active: ${ active_chapter },
header: 'h3',
autoHeight: false,
});
$("#open_close_accordion a").click(function(){
if ($(".course-wrapper").hasClass("closed")){
$(".course-wrapper").removeClass("closed");
} else {
$(".course-wrapper").addClass("closed");
}
});
$('.ui-accordion').bind('accordionchange', function(event, ui) {
var event_data = {'newheader':ui.newHeader.text(),
'oldheader':ui.oldHeader.text()};
log_event('accordion', event_data);
});
CoffeeScript
============
This folder contains the CoffeeScript file that will be compiled to the static
directory. By default, we're compile and merge all the files ending `.coffee`
into `static/js/application.js`.
Install the Compiler
--------------------
CoffeeScript compiler are written in JavaScript. You'll need to install Node and
npm (Node Package Manager) to be able to install the CoffeeScript compiler.
### Mac OS X
Install Node via Homebrew, then use npm:
brew install node
curl http://npmjs.org/install.sh | sh
npm install -g git://github.com/jashkenas/coffee-script.git
(Note that we're using the edge version of CoffeeScript for now, as there was
some issue with directory watching in 1.3.1.)
Try to run `coffee` and make sure you get a coffee prompt.
### Debian/Ubuntu
Conveniently, you can install Node via `apt-get`, then use npm:
sudo apt-get install nodejs npm &&
sudo npm install -g git://github.com/jashkenas/coffee-script.git
Compiling
---------
Run this command in the `mitx` directory to easily make the compiler watch for
changes in your file, and join the result into `application.js`:
coffee -j static/js/application.js -cw templates/coffee/src
Please note that the compiler will not be able to detect the file that get added
after you've ran the command, so you'll need to restart the compiler if there's
a new CoffeeScript file.
Testing
=======
We're also using Jasmine to unit-testing the JavaScript files. All the specs are
written in CoffeeScript for the consistency. Because of the limitation of
`django-jasmine` plugin, we'll need to also running another compiler to compile
the test file.
Using this command to compile the test files:
coffee -cw templates/coffee/spec/*.coffee
Then start the server in debug mode, navigate to http://127.0.0.1:8000/_jasmine
to see the test result.
{
"js_files": [
"/static/js/jquery-1.6.2.min.js",
"/static/js/jquery-ui-1.8.16.custom.min.js",
"/static/js/jquery.leanModal.js"
],
"static_files": [
"js/application.js"
]
}
<div class="course-wrapper">
<header id="open_close_accordion">
<a href="#">close</a>
</header>
<div id="accordion"></div>
</div>
<ul>
<li class="calc-main">
<a href="#" class="calc">Calculator</a>
<div id="calculator_wrapper">
<form id="calculator">
<div class="input-wrapper">
<input type="text" id="calculator_input" />
<div class="help-wrapper">
<a href="#">Hints</a>
<dl class="help"></dl>
</div>
</div>
<input id="calculator_button" type="submit" value="="/>
<input type="text" id="calculator_output" readonly />
</form>
</div>
</li>
</ul>
<div id="feedback_div">
<form>
<label>Subject:</label> <input type="text" id="feedback_subject">
<label>Feedback: </label><textarea id="feedback_message"></textarea>
<input id="feedback_button" type="button" value="Submit">
</form>
</div>
describe 'Calculator', ->
beforeEach ->
loadFixtures 'calculator.html'
@calculator = new Calculator
describe 'bind', ->
beforeEach ->
Calculator.bind()
it 'bind the calculator button', ->
expect($('.calc')).toHandleWith 'click', @calculator.toggle
it 'bind the help button', ->
# These events are bind by $.hover()
expect($('div.help-wrapper a')).toHandleWith 'mouseenter', @calculator.helpToggle
expect($('div.help-wrapper a')).toHandleWith 'mouseleave', @calculator.helpToggle
it 'prevent default behavior on help button', ->
$('div.help-wrapper a').click (e) ->
expect(e.isDefaultPrevented()).toBeTruthy()
$('div.help-wrapper a').click()
it 'bind the calculator submit', ->
expect($('form#calculator')).toHandleWith 'submit', @calculator.calculate
it 'prevent default behavior on form submit', ->
$('form#calculator').submit (e) ->
expect(e.isDefaultPrevented()).toBeTruthy()
e.preventDefault()
$('form#calculator').submit()
describe 'toggle', ->
it 'toggle the calculator and focus the input', ->
spyOn $.fn, 'focus'
@calculator.toggle()
expect($('li.calc-main')).toHaveClass('open')
expect($('#calculator_wrapper #calculator_input').focus).toHaveBeenCalled()
it 'toggle the close button on the calculator button', ->
@calculator.toggle()
expect($('.calc')).toHaveClass('closed')
@calculator.toggle()
expect($('.calc')).not.toHaveClass('closed')
describe 'helpToggle', ->
it 'toggle the help overlay', ->
@calculator.helpToggle()
expect($('.help')).toHaveClass('shown')
@calculator.helpToggle()
expect($('.help')).not.toHaveClass('shown')
describe 'calculate', ->
beforeEach ->
$('#calculator_input').val '1+2'
spyOn($, 'getJSON').andCallFake (url, data, callback) ->
callback({ result: 3 })
@calculator.calculate()
it 'send data to /calculate', ->
expect($.getJSON).toHaveBeenCalledWith '/calculate',
equation: '1+2'
, jasmine.any(Function)
it 'update the calculator output', ->
expect($('#calculator_output').val()).toEqual('3')
// Generated by CoffeeScript 1.3.2-pre
(function() {
describe('Calculator', function() {
beforeEach(function() {
loadFixtures('calculator.html');
return this.calculator = new Calculator;
});
describe('bind', function() {
beforeEach(function() {
return Calculator.bind();
});
it('bind the calculator button', function() {
return expect($('.calc')).toHandleWith('click', this.calculator.toggle);
});
it('bind the help button', function() {
expect($('div.help-wrapper a')).toHandleWith('mouseenter', this.calculator.helpToggle);
return expect($('div.help-wrapper a')).toHandleWith('mouseleave', this.calculator.helpToggle);
});
it('prevent default behavior on help button', function() {
$('div.help-wrapper a').click(function(e) {
return expect(e.isDefaultPrevented()).toBeTruthy();
});
return $('div.help-wrapper a').click();
});
it('bind the calculator submit', function() {
return expect($('form#calculator')).toHandleWith('submit', this.calculator.calculate);
});
return it('prevent default behavior on form submit', function() {
$('form#calculator').submit(function(e) {
expect(e.isDefaultPrevented()).toBeTruthy();
return e.preventDefault();
});
return $('form#calculator').submit();
});
});
describe('toggle', function() {
it('toggle the calculator and focus the input', function() {
spyOn($.fn, 'focus');
this.calculator.toggle();
expect($('li.calc-main')).toHaveClass('open');
return expect($('#calculator_wrapper #calculator_input').focus).toHaveBeenCalled();
});
return it('toggle the close button on the calculator button', function() {
this.calculator.toggle();
expect($('.calc')).toHaveClass('closed');
this.calculator.toggle();
return expect($('.calc')).not.toHaveClass('closed');
});
});
describe('helpToggle', function() {
return it('toggle the help overlay', function() {
this.calculator.helpToggle();
expect($('.help')).toHaveClass('shown');
this.calculator.helpToggle();
return expect($('.help')).not.toHaveClass('shown');
});
});
return describe('calculate', function() {
beforeEach(function() {
$('#calculator_input').val('1+2');
spyOn($, 'getJSON').andCallFake(function(url, data, callback) {
return callback({
result: 3
});
});
return this.calculator.calculate();
});
it('send data to /calculate', function() {
return expect($.getJSON).toHaveBeenCalledWith('/calculate', {
equation: '1+2'
}, jasmine.any(Function));
});
return it('update the calculator output', function() {
return expect($('#calculator_output').val()).toEqual('3');
});
});
});
}).call(this);
describe 'Courseware', ->
describe 'bind', ->
it 'bind the navigation', ->
spyOn Courseware.Navigation, 'bind'
Courseware.bind()
expect(Courseware.Navigation.bind).toHaveBeenCalled()
describe 'Navigation', ->
beforeEach ->
loadFixtures 'accordion.html'
@navigation = new Courseware.Navigation
describe 'bind', ->
describe 'when the #accordion exists', ->
describe 'when there is an active section', ->
it 'activate the accordion with correct active section', ->
spyOn $.fn, 'accordion'
$('#accordion').append('<ul><li></li></ul><ul><li class="active"></li></ul>')
Courseware.Navigation.bind()
expect($('#accordion').accordion).toHaveBeenCalledWith
active: 1
header: 'h3'
autoHeight: false
describe 'when there is no active section', ->
it 'activate the accordian with section 1 as active', ->
spyOn $.fn, 'accordion'
$('#accordion').append('<ul><li></li></ul><ul><li></li></ul>')
Courseware.Navigation.bind()
expect($('#accordion').accordion).toHaveBeenCalledWith
active: 1
header: 'h3'
autoHeight: false
it 'binds the accordionchange event', ->
Courseware.Navigation.bind()
expect($('#accordion')).toHandleWith 'accordionchange', @navigation.log
it 'bind the navigation toggle', ->
Courseware.Navigation.bind()
expect($('#open_close_accordion a')).toHandleWith 'click', @navigation.toggle
describe 'when the #accordion does not exists', ->
beforeEach ->
$('#accordion').remove()
it 'does not activate the accordion', ->
spyOn $.fn, 'accordion'
Courseware.Navigation.bind()
expect($('#accordion').accordion).wasNotCalled()
describe 'toggle', ->
it 'toggle closed class on the wrapper', ->
$('.course-wrapper').removeClass('closed')
@navigation.toggle()
expect($('.course-wrapper')).toHaveClass('closed')
@navigation.toggle()
expect($('.course-wrapper')).not.toHaveClass('closed')
describe 'log', ->
beforeEach ->
window.log_event = ->
spyOn window, 'log_event'
it 'submit event log', ->
@navigation.log {}, {
newHeader:
text: -> "new"
oldHeader:
text: -> "old"
}
expect(window.log_event).toHaveBeenCalledWith 'accordion',
newheader: 'new'
oldheader: 'old'
// Generated by CoffeeScript 1.3.2-pre
(function() {
describe('Courseware', function() {
describe('bind', function() {
return it('bind the navigation', function() {
spyOn(Courseware.Navigation, 'bind');
Courseware.bind();
return expect(Courseware.Navigation.bind).toHaveBeenCalled();
});
});
return describe('Navigation', function() {
beforeEach(function() {
loadFixtures('accordion.html');
return this.navigation = new Courseware.Navigation;
});
describe('bind', function() {
describe('when the #accordion exists', function() {
describe('when there is an active section', function() {
return it('activate the accordion with correct active section', function() {
spyOn($.fn, 'accordion');
$('#accordion').append('<ul><li></li></ul><ul><li class="active"></li></ul>');
Courseware.Navigation.bind();
return expect($('#accordion').accordion).toHaveBeenCalledWith({
active: 1,
header: 'h3',
autoHeight: false
});
});
});
describe('when there is no active section', function() {
return it('activate the accordian with section 1 as active', function() {
spyOn($.fn, 'accordion');
$('#accordion').append('<ul><li></li></ul><ul><li></li></ul>');
Courseware.Navigation.bind();
return expect($('#accordion').accordion).toHaveBeenCalledWith({
active: 1,
header: 'h3',
autoHeight: false
});
});
});
it('binds the accordionchange event', function() {
Courseware.Navigation.bind();
return expect($('#accordion')).toHandleWith('accordionchange', this.navigation.log);
});
return it('bind the navigation toggle', function() {
Courseware.Navigation.bind();
return expect($('#open_close_accordion a')).toHandleWith('click', this.navigation.toggle);
});
});
return describe('when the #accordion does not exists', function() {
beforeEach(function() {
return $('#accordion').remove();
});
return it('does not activate the accordion', function() {
spyOn($.fn, 'accordion');
Courseware.Navigation.bind();
return expect($('#accordion').accordion).wasNotCalled();
});
});
});
describe('toggle', function() {
return it('toggle closed class on the wrapper', function() {
$('.course-wrapper').removeClass('closed');
this.navigation.toggle();
expect($('.course-wrapper')).toHaveClass('closed');
this.navigation.toggle();
return expect($('.course-wrapper')).not.toHaveClass('closed');
});
});
return describe('log', function() {
beforeEach(function() {
window.log_event = function() {};
return spyOn(window, 'log_event');
});
return it('submit event log', function() {
this.navigation.log({}, {
newHeader: {
text: function() {
return "new";
}
},
oldHeader: {
text: function() {
return "old";
}
}
});
return expect(window.log_event).toHaveBeenCalledWith('accordion', {
newheader: 'new',
oldheader: 'old'
});
});
});
});
});
}).call(this);
describe 'FeedbackForm', ->
beforeEach ->
loadFixtures 'feedback_form.html'
describe 'bind', ->
beforeEach ->
FeedbackForm.bind()
spyOn($, 'post').andCallFake (url, data, callback, format) ->
callback()
it 'binds to the #feedback_button', ->
expect($('#feedback_button')).toHandle 'click'
it 'post data to /send_feedback on click', ->
$('#feedback_subject').val 'Awesome!'
$('#feedback_message').val 'This site is really good.'
$('#feedback_button').click()
expect($.post).toHaveBeenCalledWith '/send_feedback', {
subject: 'Awesome!'
message: 'This site is really good.'
url: window.location.href
}, jasmine.any(Function), 'json'
it 'replace the form with a thank you message', ->
$('#feedback_button').click()
expect($('#feedback_div').html()).toEqual 'Feedback submitted. Thank you'
// Generated by CoffeeScript 1.3.2-pre
(function() {
describe('FeedbackForm', function() {
beforeEach(function() {
return loadFixtures('feedback_form.html');
});
return describe('bind', function() {
beforeEach(function() {
FeedbackForm.bind();
return spyOn($, 'post').andCallFake(function(url, data, callback, format) {
return callback();
});
});
it('binds to the #feedback_button', function() {
return expect($('#feedback_button')).toHandle('click');
});
it('post data to /send_feedback on click', function() {
$('#feedback_subject').val('Awesome!');
$('#feedback_message').val('This site is really good.');
$('#feedback_button').click();
return expect($.post).toHaveBeenCalledWith('/send_feedback', {
subject: 'Awesome!',
message: 'This site is really good.',
url: window.location.href
}, jasmine.any(Function), 'json');
});
return it('replace the form with a thank you message', function() {
$('#feedback_button').click();
return expect($('#feedback_div').html()).toEqual('Feedback submitted. Thank you');
});
});
});
}).call(this);
jasmine.getFixtures().fixturesPath = "/_jasmine/fixtures/"
// Generated by CoffeeScript 1.3.2-pre
(function() {
jasmine.getFixtures().fixturesPath = "/_jasmine/fixtures/";
}).call(this);
class window.Calculator
@bind: ->
calculator = new Calculator
$('.calc').click calculator.toggle
$('form#calculator').submit(calculator.calculate).submit (e) ->
e.preventDefault()
$('div.help-wrapper a').hover(calculator.helpToggle).click (e) ->
e.preventDefault()
toggle: ->
$('li.calc-main').toggleClass 'open'
$('#calculator_wrapper #calculator_input').focus()
$('.calc').toggleClass 'closed'
helpToggle: ->
$('.help').toggleClass 'shown'
calculate: ->
$.getJSON '/calculate', { equation: $('#calculator_input').val() }, (data) ->
$('#calculator_output').val(data.result)
class window.Courseware
@bind: ->
@Navigation.bind()
class @Navigation
@bind: ->
if $('#accordion').length
navigation = new Navigation
active = $('#accordion ul:has(li.active)').index('#accordion ul')
$('#accordion').bind('accordionchange', navigation.log).accordion
active: if active >= 0 then active else 1
header: 'h3'
autoHeight: false
$('#open_close_accordion a').click navigation.toggle
log: (event, ui) ->
log_event 'accordion',
newheader: ui.newHeader.text()
oldheader: ui.oldHeader.text()
toggle: ->
$('.course-wrapper').toggleClass('closed')
class window.FeedbackForm
@bind: ->
$('#feedback_button').click ->
data =
subject: $('#feedback_subject').val()
message: $('#feedback_message').val()
url: window.location.href
$.post '/send_feedback', data, ->
$('#feedback_div').html 'Feedback submitted. Thank you'
,'json'
$ ->
$.ajaxSetup
headers : { 'X-CSRFToken': $.cookie 'csrftoken' }
Calculator.bind()
Courseware.bind()
FeedbackForm.bind()
$("a[rel*=leanModal]").leanModal()
......@@ -8,7 +8,8 @@
<style type="text/css">
.grade_a {color:green;}
.grade_b {color:Chocolate;}
.grade_c {color:DimGray;}
.grade_c {color:DarkSlateGray;}
.grade_f {color:DimGray;}
.grade_none {color:LightGray;}
</style>
......@@ -29,16 +30,10 @@
<tr> <!-- Header Row -->
<th>Student</th>
%for section in templateSummary:
%if 'subscores' in section:
%for subsection in section['subscores']:
<th>${subsection['label']}</th>
%endfor
<th>${section['totallabel']}</th>
%else:
<th>${section['category']}</th>
%endif
%for section in templateSummary['section_breakdown']:
<th>${section['label']}</th>
%endfor
<th>Total</th>
</tr>
<%def name="percent_data(percentage)">
......@@ -50,6 +45,8 @@
data_class = "grade_b"
elif percentage > .6:
data_class = "grade_c"
elif percentage > 0:
data_class = "grade_f"
%>
<td class="${data_class}">${ "{0:.0%}".format( percentage ) }</td>
</%def>
......@@ -57,16 +54,10 @@
%for student in students:
<tr>
<td><a href="/profile/${student['id']}/">${student['username']}</a></td>
%for section in student['grade_info']['grade_summary']:
%if 'subscores' in section:
%for subsection in section['subscores']:
${percent_data( subsection['percentage'] )}
%endfor
${percent_data( section['totalscore'] )}
%else:
${percent_data( section['totalscore'] )}
%endif
%for section in student['grade_info']['grade_summary']['section_breakdown']:
${percent_data( section['percent'] )}
%endfor
<th>${percent_data( student['grade_info']['grade_summary']['percent'])}</th>
</tr>
%endfor
</table>
......
......@@ -9,6 +9,7 @@
<script type="text/javascript" src="${ settings.LIB_URL }jquery-1.6.2.min.js"></script>
<script type="text/javascript" src="${ settings.LIB_URL }jquery-ui-1.8.16.custom.min.js"></script>
<script type="text/javascript" src="${ settings.LIB_URL }swfobject/swfobject.js"></script>
<script type="text/javascript" src="/static/js/application.js"></script>
<!--[if lt IE 9]>
<script src="/static/js/html5shiv.js"></script>
......@@ -90,20 +91,18 @@
</nav>
</footer>
<div id="feedback_div" class="leanModal_box">
<h1>Feedback for MITx</h1>
<p>Found a bug? Got an idea for improving our system? Let us know.</p>
<form>
<ol>
<li><label>Subject:</label> <input type="text" id="feedback_subject"></li>
<li><label>Feedback: </label><textarea id="feedback_message"></textarea></li>
<li><input id="feedback_button" type="button" value="Submit"></li>
</ol>
</form>
</div>
<div id="feedback_div" class="leanModal_box">
<h1>Feedback for MITx</h1>
<p>Found a bug? Got an idea for improving our system? Let us know.</p>
<form>
<ol>
<li><label>Subject:</label> <input type="text" id="feedback_subject"></li>
<li><label>Feedback: </label><textarea id="feedback_message"></textarea></li>
<li><input id="feedback_button" type="button" value="Submit"></li>
</ol>
</form>
</div>
<script type="text/javascript" src="${ settings.LIB_URL }jquery.treeview.js"></script>
<script type="text/javascript" src="/static/js/jquery.leanModal.min.js"></script>
......@@ -112,52 +111,7 @@
<script type="text/javascript" src="/static/js/video_player.js"></script>
<script type="text/javascript" src="/static/js/schematic.js"></script>
<script type="text/javascript" src="/static/js/cktsim.js"></script>
<script>
// Feedback form
$(function() {
$("#feedback_button").click(function(){
postJSON("/send_feedback", {"subject":$("#feedback_subject").attr("value"),
"url":document.URL,
"message":$("#feedback_message").attr("value")},
function(data){
$("#feedback_subject").attr("value","");
$("#feedback_message").attr("value","");
$("#feedback_div").html("Feedback submitted. Thank you");
});
});
// Calculator
$(".calc").click(function(){
$("li.calc-main").toggleClass('open');
$("#calculator_wrapper #calculator_input").focus();
$(this).toggleClass("closed");
return false;
});
$("div.help-wrapper a").hover(function(){
$(".help").toggleClass("shown");
});
$("div.help-wrapper a").click(function(){
return false;
});
$("form#calculator").submit(function(e){
e.preventDefault();
$.getJSON("/calculate", {"equation":$("#calculator_input").attr("value")},
function(data){
$("#calculator_output").attr("value",data.result);
});
});
$("a[rel*=leanModal]").leanModal();
});
</script>
<%block name="js_extra"/>
<%block name="js_extra"/>
</body>
</html>
......@@ -150,11 +150,11 @@ $(function() {
<%
earned = section['section_total'].earned
total = section['section_total'].possible
percentageString = "{0:.0%}".format( float(earned)/total) if earned > 0 else ""
percentageString = "{0:.0%}".format( float(earned)/total) if earned > 0 and total > 0 else ""
%>
<h3><a href="${reverse('courseware_section', args=format_url_params([chapter['course'], chapter['chapter'], section['section']])) }">
${ section['section'] }</a> ${"({0:g}/{1:g}) {2}".format( earned, total, percentageString )}</h3>
${ section['section'] }</a> ${"({0:.3n}/{1:.3n}) {2}".format( float(earned), float(total), percentageString )}</h3>
${section['subtitle']}
%if 'due' in section and section['due']!="":
due ${section['due']}
......@@ -164,7 +164,7 @@ $(function() {
<ol class="scores">
${ "Problem Scores: " if section['graded'] else "Practice Scores: "}
%for score in section['scores']:
<li class="score">${"{0:g}/{1:g}".format(score.earned,score.possible)}</li>
<li class="score">${"{0:.3n}/{1:.3n}".format(float(score.earned),float(score.possible))}</li>
%endfor
</ol>
%endif
......
......@@ -9,7 +9,7 @@ $(function () {
position: 'absolute',
display: 'none',
top: y + 5,
left: x + 5,
left: x + 15,
border: '1px solid #000',
padding: '4px 6px',
color: '#fff',
......@@ -19,96 +19,81 @@ $(function () {
}
/* -------------------------------- Grade detail bars -------------------------------- */
<%
colors = ["#b72121", "#600101", "#666666", "#333333"]
categories = {}
tickIndex = 1
sectionSpacer = 0.5
sectionSpacer = 0.25
sectionIndex = 0
series = []
ticks = [] #These are the indices and x-axis labels for the data
bottomTicks = [] #Labels on the bottom
detail_tooltips = {} #This an dictionary mapping from 'section' -> array of detail_tooltips
droppedScores = [] #These are the datapoints to indicate assignments which aren't factored into the total score
droppedScores = [] #These are the datapoints to indicate assignments which are not factored into the total score
dropped_score_tooltips = []
for section in grade_summary:
if 'subscores' in section: ##This is for sections like labs or homeworks, with several smaller components and a total
series.append({
'label' : section['category'],
'data' : [[i + tickIndex, score['percentage']] for i,score in enumerate(section['subscores'])],
'color' : colors[sectionIndex]
})
ticks += [[i + tickIndex, score['label'] ] for i,score in enumerate(section['subscores'])]
bottomTicks.append( [tickIndex + len(section['subscores'])/2, section['category']] )
detail_tooltips[ section['category'] ] = [score['summary'] for score in section['subscores']]
droppedScores += [[tickIndex + index, 0.05] for index in section['dropped_indices']]
dropExplanation = "The lowest {0} {1} scores are dropped".format( len(section['dropped_indices']), section['category'] )
dropped_score_tooltips += [dropExplanation] * len(section['dropped_indices'])
tickIndex += len(section['subscores']) + sectionSpacer
category_total_label = section['category'] + " Total"
series.append({
'label' : category_total_label,
'data' : [ [tickIndex, section['totalscore']] ],
'color' : colors[sectionIndex]
})
ticks.append( [tickIndex, section['totallabel']] )
detail_tooltips[category_total_label] = [section['totalscore_summary']]
else:
series.append({
'label' : section['category'],
'data' : [ [tickIndex, section['totalscore']] ],
'color' : colors[sectionIndex]
})
ticks.append( [tickIndex, section['totallabel']] )
detail_tooltips[section['category']] = [section['totalscore_summary']]
for section in grade_summary['section_breakdown']:
if section.get('prominent', False):
tickIndex += sectionSpacer
if section['category'] not in categories:
colorIndex = len(categories) % len(colors)
categories[ section['category'] ] = {'label' : section['category'],
'data' : [],
'color' : colors[colorIndex]}
categoryData = categories[ section['category'] ]
categoryData['data'].append( [tickIndex, section['percent']] )
ticks.append( [tickIndex, section['label'] ] )
if section['category'] in detail_tooltips:
detail_tooltips[ section['category'] ].append( section['detail'] )
else:
detail_tooltips[ section['category'] ] = [ section['detail'], ]
if 'mark' in section:
droppedScores.append( [tickIndex, 0.05] )
dropped_score_tooltips.append( section['mark']['detail'] )
tickIndex += 1 + sectionSpacer
sectionIndex += 1
detail_tooltips['Dropped Scores'] = dropped_score_tooltips
tickIndex += 1
if section.get('prominent', False):
tickIndex += sectionSpacer
## ----------------------------- Grade overviewew bar ------------------------- ##
totalWeight = 0.0
sectionIndex = 0
totalScore = 0.0
tickIndex += sectionSpacer
series = categories.values()
overviewBarX = tickIndex
for section in grade_summary:
weighted_score = section['totalscore'] * section['weight']
summary_text = "{0} - {1:.1%} of a possible {2:.0%}".format(section['category'], weighted_score, section['weight'])
weighted_category_label = section['category'] + " - Weighted"
if section['totalscore'] > 0:
extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[]
for section in grade_summary['grade_breakdown']:
if section['percent'] > 0:
if section['category'] in categories:
color = categories[ section['category'] ]['color']
else:
color = colors[ extraColorIndex % len(colors) ]
extraColorIndex += 1
series.append({
'label' : weighted_category_label,
'data' : [ [overviewBarX, weighted_score] ],
'color' : colors[sectionIndex]
'label' : section['category'] + "-grade_breakdown",
'data' : [ [overviewBarX, section['percent']] ],
'color' : color
})
detail_tooltips[weighted_category_label] = [ summary_text ]
sectionIndex += 1
totalWeight += section['weight']
totalScore += section['totalscore'] * section['weight']
detail_tooltips[section['category'] + "-grade_breakdown"] = [ section['detail'] ]
ticks += [ [overviewBarX, "Total"] ]
tickIndex += 1 + sectionSpacer
totalScore = grade_summary['percent']
detail_tooltips['Dropped Scores'] = dropped_score_tooltips
%>
var series = ${ json.dumps(series) };
var series = ${ json.dumps( series ) };
var ticks = ${ json.dumps(ticks) };
var bottomTicks = ${ json.dumps(bottomTicks) };
var detail_tooltips = ${ json.dumps(detail_tooltips) };
......@@ -132,7 +117,7 @@ $(function () {
var $grade_detail_graph = $("#${graph_div_id}");
if ($grade_detail_graph.length > 0) {
var plot = $.plot($grade_detail_graph, series, options);
//We need to put back the plotting of the percent here
var o = plot.pointOffset({x: ${overviewBarX} , y: ${totalScore}});
$grade_detail_graph.append('<div style="position:absolute;left:' + (o.left - 12) + 'px;top:' + (o.top - 20) + 'px">${"{totalscore:.0%}".format(totalscore=totalScore)}</div>');
}
......
......@@ -84,84 +84,83 @@ section.tool-wrapper {
width: flex-grid(4.5, 9);
div.graph-controls {
padding: 0 0 lh();
margin-bottom: lh();
border-bottom: 1px solid darken(#073642, 5%);
@include box-shadow(0 1px 0 lighten(#073642, 2%));
@extend .clearfix;
div.music-wrapper {
margin-right: flex-gutter(4.5);
width: flex-grid(1.5, 4.5);
float: left;
padding: 0 0 lh();
margin-bottom: lh();
border-bottom: 1px solid darken(#073642, 10%);
@include box-shadow(0 1px 0 lighten(#073642, 2%));
@extend .clearfix;
input#playButton {
display: block;
@include button(simple, lighten( #586e75, 5% ));
font: bold 14px $body-font-family;
border-color: darken(#002b36, 6%);
float: right;
&:active {
@include box-shadow(none);
}
&[value="Stop"] {
@include button(simple, darken(#268bd2, 30%));
font: bold 14px $body-font-family;
&:active {
@include box-shadow(none);
}
}
}
}
div.inputs-wrapper {
padding-top: lh(.5);
width: flex-grid(3, 4.5);
float: left;
@include clearfix;
margin-bottom: lh();
padding: 0 0 lh();
margin-bottom: lh();
border-bottom: 1px solid darken(#073642, 10%);
@include box-shadow(0 1px 0 lighten(#073642, 2%));
@extend .clearfix;
}
select#musicTypeSelect {
display: block;
margin-bottom: lh(.5);
font: 16px $body-font-family;
width: 100%;
p {
@include inline-block();
margin: 0;
-webkit-font-smoothing: antialiased;
font-weight: bold;
text-shadow: 0 -1px 0 darken(#073642, 10%);
}
div#graph-output, div#graph-listen {
display: block;
margin-bottom: lh(.5);
text-align: right;
p {
@include inline-block();
margin: 0;
}
ul {
@include inline-block();
margin-bottom: 0;
ul {
li {
@include inline-block();
margin-bottom: 0;
li {
@include inline-block();
margin-bottom: 0;
input {
margin-right: 5px;
}
input {
margin-right: 5px;
}
}
}
input#playButton {
div#graph-listen {
margin-top: 8px;
margin-right: 20px;
display: block;
@include button(simple, #dc322f);
font: bold 14px $body-font-family;
color: #47221a;
text-shadow: 0 1px 0 lighten(#dc322f, 5%);
@include box-shadow(inset 0 1px 0 lighten(#dc322f, 10%));
&:active {
@include box-shadow(none);
}
&[value="Stop"] {
@include button(simple, darken(#268bd2, 30%));
font: bold 14px $body-font-family;
&:active {
@include box-shadow(none);
}
}
text-align: right;
float: left;
margin-bottom: 0;
}
}
label {
@include border-radius(2px);
font-weight: bold;
padding: 3px;
color: #fff;
padding: 3px;
-webkit-font-smoothing: antialiased;
}
......@@ -190,6 +189,29 @@ section.tool-wrapper {
div.schematic-sliders {
div.top-sliders {
padding: 0 0 lh();
margin-bottom: lh();
border-bottom: 1px solid darken(#073642, 10%);
@include box-shadow(0 1px 0 lighten(#073642, 2%));
@extend .clearfix;
select#musicTypeSelect {
@include inline-block();
font: 16px $body-font-family;
margin-bottom: 0;
}
p {
@include inline-block();
-webkit-font-smoothing: antialiased;
text-shadow: 0 -1px 0 darken(#073642, 10%);
margin: 0 lh(.5) lh() 0;
font-weight: bold;
}
}
div.slider-label {
margin-bottom: lh(0.5);
font-weight: bold;
......@@ -208,11 +230,13 @@ section.tool-wrapper {
}
.ui-slider-handle {
background-color: #dc322f;
background: lighten( #586e75, 5% ) url('/static/images/amplifier-slider-handle.png') center no-repeat;
border: 1px solid darken(#002b36, 8%);
@include box-shadow(inset 0 1px 0 lighten( #586e75, 20% ));
margin-top: -.3em;
&:hover, &:active {
background-color: lighten(#dc322f, 5%);
background-color: lighten( #586e75, 10% );
}
}
}
......
......@@ -172,7 +172,7 @@ div.course-wrapper {
header {
@extend h1.top-header;
margin-bottom: 0;
margin-bottom: -16px;
h1 {
margin: 0;
......@@ -193,10 +193,18 @@ div.course-wrapper {
padding-bottom: 0;
}
ul {
list-style: disc outside none;
padding-left: 1em;
}
nav.sequence-bottom {
ul {
list-style: none;
padding: 0;
}
}
}
}
......
......@@ -42,16 +42,6 @@ div.answer-block {
padding-top: 20px;
width: 100%;
div.official-stamp {
background: $mit-red;
color: #fff;
font-size: 12px;
margin-top: 10px;
padding: 2px 5px;
text-align: center;
margin-left: -1px;
}
img.answer-img-accept {
margin: 10px 0px 10px 16px;
}
......
div.question-header {
div.official-stamp {
background: $mit-red;
color: #fff;
font-size: 12px;
margin-top: 10px;
padding: 2px 5px;
text-align: center;
margin-left: -1px;
}
div.vote-buttons {
display: inline-block;
float: left;
......
......@@ -77,6 +77,10 @@ if settings.ASKBOT_ENABLED:
# url(r'^robots.txt$', include('robots.urls')),
)
if settings.DEBUG:
## Jasmine
urlpatterns=urlpatterns + (url(r'^_jasmine/', include('django_jasmine.urls')),)
urlpatterns = patterns(*urlpatterns)
if settings.DEBUG:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment