Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
03ea89f6
Commit
03ea89f6
authored
12 years ago
by
Bridger Maxwell
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added some tests to the grading refactor. Fixed some bugs found during testing.
parent
9ced4685
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
129 additions
and
38 deletions
+129
-38
djangoapps/courseware/grades.py
+33
-36
djangoapps/courseware/tests.py
+96
-2
No files found.
djangoapps/courseware/grades.py
View file @
03ea89f6
...
@@ -75,10 +75,10 @@ class SingleSectionGrader(CourseGrader):
...
@@ -75,10 +75,10 @@ class SingleSectionGrader(CourseGrader):
if
foundScore
:
if
foundScore
:
percent
=
foundScore
.
earned
/
float
(
foundScore
.
possible
)
percent
=
foundScore
.
earned
/
float
(
foundScore
.
possible
)
detail
=
"{name} - {percent:.0
%
} ({earned:
g}/{possible:g
})"
.
format
(
name
=
self
.
section_name
,
detail
=
"{name} - {percent:.0
%
} ({earned:
.3n}/{possible:.3n
})"
.
format
(
name
=
self
.
section_name
,
percent
=
percent
,
percent
=
percent
,
earned
=
f
oundScore
.
earned
,
earned
=
f
loat
(
foundScore
.
earned
)
,
possible
=
f
oundScore
.
possible
)
possible
=
f
loat
(
foundScore
.
possible
)
)
else
:
else
:
percent
=
0.0
percent
=
0.0
...
@@ -99,6 +99,10 @@ class AssignmentFormatGrader(CourseGrader):
...
@@ -99,6 +99,10 @@ class AssignmentFormatGrader(CourseGrader):
sections in this format must be specified (even if those sections haven't been
sections in this format must be specified (even if those sections haven't been
written yet).
written yet).
min_number defines how many assignments are expected throughout the course. Placeholder
scores (of 0) will be inserted if the number of matching sections in the course is < min_number.
If there number of matching sections in the course is > min_number, min_number will be ignored.
category should be presentable to the user, but may not appear. When the grade breakdown is
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
displayed, scores from the same category will be similar (for example, by color).
...
@@ -113,37 +117,40 @@ class AssignmentFormatGrader(CourseGrader):
...
@@ -113,37 +117,40 @@ class AssignmentFormatGrader(CourseGrader):
self
.
course_format
=
course_format
self
.
course_format
=
course_format
self
.
min_number
=
min_number
self
.
min_number
=
min_number
self
.
drop_count
=
drop_count
self
.
drop_count
=
drop_count
self
.
category
=
category
or
course_format
self
.
category
=
category
or
self
.
course_format
self
.
section_type
=
section_type
or
course_format
self
.
section_type
=
section_type
or
self
.
course_format
self
.
short_label
=
short_label
or
se
ction_type
self
.
short_label
=
short_label
or
se
lf
.
course_format
def
grade
(
self
,
grade_sheet
):
def
grade
(
self
,
grade_sheet
):
def
totalWithDrops
(
breakdown
,
drop_count
):
def
totalWithDrops
(
breakdown
,
drop_count
):
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown
=
sorted
(
enumerate
(
breakdown
),
key
=
lambda
x
:
-
x
[
1
][
'percent'
]
)
sorted_breakdown
=
sorted
(
enumerate
(
breakdown
),
key
=
lambda
x
:
-
x
[
1
][
'percent'
]
)
# A list of the indices of the dropped scores
# A list of the indices of the dropped scores
dropped_indices
=
[
x
[
0
]
for
x
in
sorted_breakdown
[
-
drop_count
:]]
dropped_indices
=
[]
if
drop_count
>
0
:
dropped_indices
=
[
x
[
0
]
for
x
in
sorted_breakdown
[
-
drop_count
:]]
aggregate_score
=
0
aggregate_score
=
0
for
index
,
mark
in
enumerate
(
breakdown
):
for
index
,
mark
in
enumerate
(
breakdown
):
if
index
not
in
dropped_indices
:
if
index
not
in
dropped_indices
:
aggregate_score
+=
mark
[
'percent'
]
aggregate_score
+=
mark
[
'percent'
]
aggregate_score
/=
len
(
scores
)
-
drop_count
if
(
len
(
breakdown
)
-
drop_count
>
0
):
aggregate_score
/=
len
(
breakdown
)
-
drop_count
return
aggregate_score
,
dropped_indices
return
aggregate_score
,
dropped_indices
#Figure the homework scores
#Figure the homework scores
scores
=
grade_sheet
.
get
(
self
.
course_format
,
[])
scores
=
grade_sheet
.
get
(
self
.
course_format
,
[])
breakdown
=
[]
breakdown
=
[]
for
i
in
range
(
12
):
for
i
in
range
(
max
(
self
.
min_number
,
len
(
scores
))
):
if
i
<
len
(
scores
):
if
i
<
len
(
scores
):
percentage
=
scores
[
i
]
.
earned
/
float
(
scores
[
i
]
.
possible
)
percentage
=
scores
[
i
]
.
earned
/
float
(
scores
[
i
]
.
possible
)
summary
=
"{section_type} {index} - {name} - {percent:.0
%
} ({earned:
g}/{possible:g
})"
.
format
(
index
=
i
+
1
,
summary
=
"{section_type} {index} - {name} - {percent:.0
%
} ({earned:
.3n}/{possible:.3n
})"
.
format
(
index
=
i
+
1
,
section_type
=
self
.
section_type
,
section_type
=
self
.
section_type
,
name
=
scores
[
i
]
.
section
,
name
=
scores
[
i
]
.
section
,
percent
=
percentage
,
percent
=
percentage
,
earned
=
scores
[
i
]
.
earned
,
earned
=
float
(
scores
[
i
]
.
earned
)
,
possible
=
scores
[
i
]
.
possible
)
possible
=
float
(
scores
[
i
]
.
possible
)
)
else
:
else
:
percentage
=
0
percentage
=
0
summary
=
"{section_type} {index} Unreleased - 0
%
(?/?)"
.
format
(
index
=
i
+
1
,
section_type
=
self
.
section_type
)
summary
=
"{section_type} {index} Unreleased - 0
%
(?/?)"
.
format
(
index
=
i
+
1
,
section_type
=
self
.
section_type
)
...
@@ -152,7 +159,7 @@ class AssignmentFormatGrader(CourseGrader):
...
@@ -152,7 +159,7 @@ class AssignmentFormatGrader(CourseGrader):
points_possible
=
random
.
randrange
(
10
,
50
)
points_possible
=
random
.
randrange
(
10
,
50
)
points_earned
=
random
.
randrange
(
5
,
points_possible
)
points_earned
=
random
.
randrange
(
5
,
points_possible
)
percentage
=
points_earned
/
float
(
points_possible
)
percentage
=
points_earned
/
float
(
points_possible
)
summary
=
"{section_type} {index} - {name} - {percent:.0
%
} ({earned:
g}/{possible:g
})"
.
format
(
index
=
i
+
1
,
summary
=
"{section_type} {index} - {name} - {percent:.0
%
} ({earned:
.3n}/{possible:.3n
})"
.
format
(
index
=
i
+
1
,
section_type
=
self
.
section_type
,
section_type
=
self
.
section_type
,
name
=
"Randomly Generated"
,
name
=
"Randomly Generated"
,
percent
=
percentage
,
percent
=
percentage
,
...
@@ -162,7 +169,7 @@ class AssignmentFormatGrader(CourseGrader):
...
@@ -162,7 +169,7 @@ class AssignmentFormatGrader(CourseGrader):
short_label
=
"{short_label} {index:02d}"
.
format
(
index
=
i
+
1
,
short_label
=
self
.
short_label
)
short_label
=
"{short_label} {index:02d}"
.
format
(
index
=
i
+
1
,
short_label
=
self
.
short_label
)
breakdown
.
append
(
{
'percent'
:
percentage
,
'label'
:
short_label
,
'detail'
:
summary
,
'category'
:
self
.
category
}
)
breakdown
.
append
(
{
'percent'
:
percentage
,
'label'
:
short_label
,
'detail'
:
summary
,
'category'
:
self
.
category
}
)
total_percent
,
dropped_indices
=
totalWithDrops
(
breakdown
,
self
.
drop_count
)
total_percent
,
dropped_indices
=
totalWithDrops
(
breakdown
,
self
.
drop_count
)
for
dropped_index
in
dropped_indices
:
for
dropped_index
in
dropped_indices
:
...
@@ -251,23 +258,15 @@ def grade_sheet(student):
...
@@ -251,23 +258,15 @@ def grade_sheet(student):
if
len
(
problems
)
>
0
:
if
len
(
problems
)
>
0
:
for
p
in
problems
:
for
p
in
problems
:
(
correct
,
total
)
=
get_score
(
student
,
p
,
response_by_id
)
(
correct
,
total
)
=
get_score
(
student
,
p
,
response_by_id
)
# id = p.get('id')
# correct = 0
# if id in response_by_id:
# response = response_by_id[id]
# if response.grade!=None:
# correct=response.grade
# total=courseware.modules.capa_module.Module(etree.tostring(p), "id").max_score() # TODO: Add state. Not useful now, but maybe someday problems will have randomized max scores?
# print correct, total
if
settings
.
GENERATE_PROFILE_SCORES
:
if
settings
.
GENERATE_PROFILE_SCORES
:
if
total
>
1
:
if
total
>
1
:
correct
=
random
.
randrange
(
max
(
total
-
2
,
1
)
,
total
+
1
)
correct
=
random
.
randrange
(
max
(
total
-
2
,
1
)
,
total
+
1
)
else
:
else
:
correct
=
total
correct
=
total
scores
.
append
(
Score
(
int
(
correct
),
total
,
float
(
p
.
get
(
"weight"
,
1
)),
graded
,
p
.
get
(
"name"
))
)
scores
.
append
(
Score
(
int
(
correct
),
total
,
float
(
p
.
get
(
"weight"
,
total
)),
graded
,
p
.
get
(
"name"
))
)
section_total
,
graded_total
=
aggregate_scores
(
scores
,
s
)
section_total
,
graded_total
=
aggregate_scores
(
scores
,
s
.
get
(
"name"
),
s
.
get
(
"weight"
,
1
)
)
#Add the graded total to totaled_scores
#Add the graded total to totaled_scores
format
=
s
.
get
(
'format'
)
if
s
.
get
(
'format'
)
else
""
format
=
s
.
get
(
'format'
)
if
s
.
get
(
'format'
)
else
""
subtitle
=
s
.
get
(
'subtitle'
)
if
s
.
get
(
'subtitle'
)
else
format
subtitle
=
s
.
get
(
'subtitle'
)
if
s
.
get
(
'subtitle'
)
else
format
...
@@ -291,10 +290,10 @@ def grade_sheet(student):
...
@@ -291,10 +290,10 @@ def grade_sheet(student):
'sections'
:
sections
,})
'sections'
:
sections
,})
#TODO: This grader declaration should live in the data repository. It is only here now to get it working
#TODO: This grader declaration should live in the data repository. It is only here now to get it working
hwGrader
=
AssignmentFormatGrader
(
"Homework"
,
12
,
2
,
"Homework"
,
"Homework"
,
"HW"
)
hwGrader
=
AssignmentFormatGrader
(
"Homework"
,
12
,
2
,
short_label
=
"HW"
)
labGrader
=
AssignmentFormatGrader
(
"Lab"
,
12
,
2
,
"Labs"
,
"Lab"
,
"Lab
"
)
labGrader
=
AssignmentFormatGrader
(
"Lab"
,
12
,
2
,
category
=
"Labs
"
)
midtermGrader
=
SingleSectionGrader
(
"
Examination"
,
"Midterm Exam"
,
"Midterm"
)
midtermGrader
=
SingleSectionGrader
(
"
Midterm"
,
"Midterm Exam"
,
short_label
=
"Midterm"
)
finalGrader
=
SingleSectionGrader
(
"Examination"
,
"Final Exam"
,
"Final"
)
finalGrader
=
SingleSectionGrader
(
"Examination"
,
"Final Exam"
,
short_label
=
"Final"
)
grader
=
WeightedSubsectionsGrader
(
[(
hwGrader
,
hwGrader
.
category
,
0.15
),
(
labGrader
,
labGrader
.
category
,
0.15
),
grader
=
WeightedSubsectionsGrader
(
[(
hwGrader
,
hwGrader
.
category
,
0.15
),
(
labGrader
,
labGrader
.
category
,
0.15
),
(
midtermGrader
,
midtermGrader
.
category
,
0.30
),
(
finalGrader
,
finalGrader
.
category
,
0.40
)]
)
(
midtermGrader
,
midtermGrader
.
category
,
0.30
),
(
finalGrader
,
finalGrader
.
category
,
0.40
)]
)
...
@@ -304,7 +303,7 @@ def grade_sheet(student):
...
@@ -304,7 +303,7 @@ def grade_sheet(student):
return
{
'courseware_summary'
:
chapters
,
return
{
'courseware_summary'
:
chapters
,
'grade_summary'
:
grade_summary
}
'grade_summary'
:
grade_summary
}
def
aggregate_scores
(
scores
,
section
):
def
aggregate_scores
(
scores
,
section
_name
=
"summary"
,
section_weight
=
1
):
#TODO: What does a possible score of zero mean? We need to think what extra credit is
#TODO: What does a possible score of zero mean? We need to think what extra credit is
scores
=
filter
(
lambda
score
:
score
.
possible
>
0
,
scores
)
scores
=
filter
(
lambda
score
:
score
.
possible
>
0
,
scores
)
...
@@ -313,20 +312,18 @@ def aggregate_scores(scores, section):
...
@@ -313,20 +312,18 @@ def aggregate_scores(scores, section):
total_correct
=
sum
((
score
.
earned
*
1.0
/
score
.
possible
)
*
score
.
weight
for
score
in
scores
)
total_correct
=
sum
((
score
.
earned
*
1.0
/
score
.
possible
)
*
score
.
weight
for
score
in
scores
)
total_possible
=
sum
(
score
.
weight
for
score
in
scores
)
total_possible
=
sum
(
score
.
weight
for
score
in
scores
)
section_weight
=
section
.
get
(
"weight"
,
1
)
#regardless of whether or not it is graded
#regardless of whether or not it is graded
all_total
=
Score
(
total_correct
,
all_total
=
Score
(
total_correct
,
total_possible
,
total_possible
,
section_weight
,
section_weight
,
False
,
False
,
section
.
get
(
"name"
)
)
section
_name
)
#selecting only graded things
#selecting only graded things
graded_total
=
Score
(
total_correct_graded
,
graded_total
=
Score
(
total_correct_graded
,
total_possible_graded
,
total_possible_graded
,
section_weight
,
section_weight
,
True
,
True
,
section
.
get
(
"name"
)
)
section
_name
)
return
all_total
,
graded_total
return
all_total
,
graded_total
This diff is collapsed.
Click to expand it.
djangoapps/courseware/tests.py
View file @
03ea89f6
...
@@ -4,7 +4,7 @@ import numpy
...
@@ -4,7 +4,7 @@ import numpy
import
courseware.modules
import
courseware.modules
import
courseware.capa.calc
as
calc
import
courseware.capa.calc
as
calc
from
grades
import
Score
,
aggregate_scores
from
grades
import
Score
,
aggregate_scores
,
WeightedSubsectionsGrader
,
SingleSectionGrader
,
AssignmentFormatGrader
class
ModelsTest
(
unittest
.
TestCase
):
class
ModelsTest
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
...
@@ -54,7 +54,7 @@ class ModelsTest(unittest.TestCase):
...
@@ -54,7 +54,7 @@ class ModelsTest(unittest.TestCase):
exception_happened
=
True
exception_happened
=
True
self
.
assertTrue
(
exception_happened
)
self
.
assertTrue
(
exception_happened
)
class
Grade
r
Test
(
unittest
.
TestCase
):
class
Grade
sheet
Test
(
unittest
.
TestCase
):
def
test_weighted_grading
(
self
):
def
test_weighted_grading
(
self
):
scores
=
[]
scores
=
[]
...
@@ -93,3 +93,97 @@ class GraderTest(unittest.TestCase):
...
@@ -93,3 +93,97 @@ class GraderTest(unittest.TestCase):
all
,
graded
=
aggregate_scores
(
scores
)
all
,
graded
=
aggregate_scores
(
scores
)
self
.
assertAlmostEqual
(
all
,
Score
(
earned
=
14.0
/
5
,
possible
=
7.5
,
weight
=
1
,
graded
=
False
,
section
=
"summary"
))
self
.
assertAlmostEqual
(
all
,
Score
(
earned
=
14.0
/
5
,
possible
=
7.5
,
weight
=
1
,
graded
=
False
,
section
=
"summary"
))
self
.
assertAlmostEqual
(
graded
,
Score
(
earned
=
8.0
/
5
,
possible
=
3.5
,
weight
=
1
,
graded
=
True
,
section
=
"summary"
))
self
.
assertAlmostEqual
(
graded
,
Score
(
earned
=
8.0
/
5
,
possible
=
3.5
,
weight
=
1
,
graded
=
True
,
section
=
"summary"
))
class
GraderTest
(
unittest
.
TestCase
):
empty_gradesheet
=
{
}
incomplete_gradesheet
=
{
'Homework'
:
[],
'Lab'
:
[],
'Midterm'
:
[],
}
test_gradesheet
=
{
'Homework'
:
[
Score
(
earned
=
2
,
possible
=
20.0
,
weight
=
1
,
graded
=
True
,
section
=
'hw1'
),
Score
(
earned
=
16
,
possible
=
16.0
,
weight
=
1
,
graded
=
True
,
section
=
'hw2'
)],
#The dropped scores should be from the assignments that don't exist yet
'Lab'
:
[
Score
(
earned
=
1
,
possible
=
2.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab1'
),
#Dropped
Score
(
earned
=
1
,
possible
=
1.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab2'
),
Score
(
earned
=
1
,
possible
=
1.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab3'
),
Score
(
earned
=
5
,
possible
=
25.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab4'
),
#Dropped
Score
(
earned
=
3
,
possible
=
4.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab5'
),
#Dropped
Score
(
earned
=
6
,
possible
=
7.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab6'
),
Score
(
earned
=
5
,
possible
=
6.0
,
weight
=
1
,
graded
=
True
,
section
=
'lab7'
)],
'Midterm'
:
[
Score
(
earned
=
50.5
,
possible
=
100
,
weight
=
1
,
graded
=
True
,
section
=
"Midterm Exam"
),],
}
def
test_SingleSectionGrader
(
self
):
midtermGrader
=
SingleSectionGrader
(
"Midterm"
,
"Midterm Exam"
)
lab4Grader
=
SingleSectionGrader
(
"Lab"
,
"lab4"
)
badLabGrader
=
SingleSectionGrader
(
"Lab"
,
"lab42"
)
for
graded
in
[
midtermGrader
.
grade
(
self
.
empty_gradesheet
),
midtermGrader
.
grade
(
self
.
incomplete_gradesheet
),
badLabGrader
.
grade
(
self
.
test_gradesheet
)]:
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
1
)
self
.
assertEqual
(
graded
[
'percent'
],
0.0
)
graded
=
midtermGrader
.
grade
(
self
.
test_gradesheet
)
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.505
)
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
1
)
graded
=
lab4Grader
.
grade
(
self
.
test_gradesheet
)
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.2
)
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
1
)
def
test_assignmentFormatGrader
(
self
):
homeworkGrader
=
AssignmentFormatGrader
(
"Homework"
,
12
,
2
)
noDropGrader
=
AssignmentFormatGrader
(
"Homework"
,
12
,
0
)
#Even though the minimum number is 3, this should grade correctly when 7 assignments are found
overflowGrader
=
AssignmentFormatGrader
(
"Lab"
,
3
,
2
)
labGrader
=
AssignmentFormatGrader
(
"Lab"
,
7
,
3
)
#Test the grading of an empty gradesheet
for
graded
in
[
homeworkGrader
.
grade
(
self
.
empty_gradesheet
),
noDropGrader
.
grade
(
self
.
empty_gradesheet
),
homeworkGrader
.
grade
(
self
.
incomplete_gradesheet
),
noDropGrader
.
grade
(
self
.
incomplete_gradesheet
)
]:
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.0
)
#Make sure the breakdown includes 12 sections, plus one summary
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
12
+
1
)
graded
=
homeworkGrader
.
grade
(
self
.
test_gradesheet
)
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.11
)
# 100% + 10% / 10 assignments
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
12
+
1
)
graded
=
noDropGrader
.
grade
(
self
.
test_gradesheet
)
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.0916666666666666
)
# 100% + 10% / 12 assignments
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
12
+
1
)
graded
=
overflowGrader
.
grade
(
self
.
test_gradesheet
)
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.8880952380952382
)
# 100% + 10% / 5 assignments
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
7
+
1
)
graded
=
labGrader
.
grade
(
self
.
test_gradesheet
)
self
.
assertAlmostEqual
(
graded
[
'percent'
],
0.9226190476190477
)
self
.
assertEqual
(
len
(
graded
[
'section_breakdown'
]),
7
+
1
)
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment