Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
d6012733
Commit
d6012733
authored
Jul 28, 2014
by
gradyward
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Addressed code review comments
parent
0a573a17
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
131 additions
and
44 deletions
+131
-44
openassessment/assessment/api/self.py
+4
-4
openassessment/assessment/models/base.py
+2
-1
openassessment/assessment/test/data/models_check_criteria_assessed.json
+58
-0
openassessment/assessment/test/test_assessment_models.py
+63
-36
openassessment/assessment/test/test_self.py
+4
-2
openassessment/xblock/grade_mixin.py
+0
-1
No files found.
openassessment/assessment/api/self.py
View file @
d6012733
...
...
@@ -168,12 +168,12 @@ def create_assessment(
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart
.
create_from_option_names
(
assessment
,
options_selected
,
feedback
=
criterion_feedback
)
_log_assessment
(
assessment
,
submission
)
except
InvalidRubric
:
msg
=
"Invalid rubric definition
"
except
InvalidRubric
as
ex
:
msg
=
"Invalid rubric definition
: "
+
str
(
ex
)
logger
.
warning
(
msg
,
exc_info
=
True
)
raise
SelfAssessmentRequestError
(
msg
)
except
InvalidRubricSelection
:
msg
=
"Selected options do not match the rubric
"
except
InvalidRubricSelection
as
ex
:
msg
=
"Selected options do not match the rubric
: "
+
str
(
ex
)
logger
.
warning
(
msg
,
exc_info
=
True
)
raise
SelfAssessmentRequestError
(
msg
)
...
...
openassessment/assessment/models/base.py
View file @
d6012733
...
...
@@ -809,5 +809,5 @@ class AssessmentPart(models.Model):
missing_criteria
=
zero_option_criteria_missing_feedback
|
optioned_criteria_missing_selection
if
len
(
missing_criteria
)
>
0
:
msg
=
u"Missing selections for criteria: {missing}"
.
format
(
missing
=
missing_criteria
)
msg
=
u"Missing selections for criteria: {missing}"
.
format
(
missing
=
', '
.
join
(
missing_criteria
)
)
raise
InvalidRubricSelection
(
msg
)
\ No newline at end of file
openassessment/assessment/test/data/models_check_criteria_assessed.json
0 → 100644
View file @
d6012733
{
"No Option Selected, Has Options, No Feedback"
:
{
"has_option_selected"
:
false
,
"has_zero_options"
:
false
,
"has_feedback"
:
false
,
"expected_error"
:
true
},
"No Option Selected, Has Options, Has Feedback"
:
{
"has_option_selected"
:
false
,
"has_zero_options"
:
false
,
"has_feedback"
:
true
,
"expected_error"
:
true
},
"No Option Selected, No Options, No Feedback"
:
{
"has_option_selected"
:
false
,
"has_zero_options"
:
true
,
"has_feedback"
:
false
,
"expected_error"
:
true
},
"No Option Selected, No Options, Has Feedback"
:
{
"has_option_selected"
:
false
,
"has_zero_options"
:
true
,
"has_feedback"
:
true
,
"expected_error"
:
false
},
"Has Option Selected, Has Options, No Feedback"
:
{
"has_option_selected"
:
true
,
"has_zero_options"
:
false
,
"has_feedback"
:
false
,
"expected_error"
:
false
},
"Has Option Selected, No Options, Has Feedback"
:
{
"has_option_selected"
:
true
,
"has_zero_options"
:
true
,
"has_feedback"
:
true
,
"expected_error"
:
true
},
"Has Option Selected, No Options, No Feedback"
:
{
"has_option_selected"
:
true
,
"has_zero_options"
:
true
,
"has_feedback"
:
false
,
"expected_error"
:
true
},
"Has Option Selected, Has Options, Has Feedback"
:
{
"has_option_selected"
:
true
,
"has_zero_options"
:
false
,
"has_feedback"
:
true
,
"expected_error"
:
false
}
}
\ No newline at end of file
openassessment/assessment/test/test_assessment_models.py
View file @
d6012733
...
...
@@ -2,13 +2,16 @@
"""
Tests for the assessment Django models.
"""
import
copy
import
copy
,
ddt
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.assessment.serializers
import
rubric_from_dict
from
openassessment.assessment.models
import
Assessment
,
AssessmentPart
,
InvalidRubricSelection
from
.constants
import
RUBRIC
import
mock
from
openassessment.assessment.api.self
import
create_assessment
from
submissions.api
import
create_submission
from
openassessment.assessment.errors
import
SelfAssessmentRequestError
@ddt.ddt
class
AssessmentTest
(
CacheResetTest
):
"""
Tests for the `Assessment` and `AssessmentPart` models.
...
...
@@ -148,41 +151,65 @@ class AssessmentTest(CacheResetTest):
criterion
[
'options'
]
=
[]
return
rubric_from_dict
(
rubric_dict
)
def
test_check_all_criteria_assessed
(
self
):
"""
Runs a problem with 8 criterion (representing the 8 permutations of possible needs)
through this validator. Represented as:
A -- Has an option selected for it.
B -- Has Zero Options
C -- Has Feedback given
"""
all_criteria
=
[
'---'
,
'--C'
,
'-B-'
,
'-BC'
,
'A--'
,
'A-C'
,
'AB-'
,
'ABC'
]
selected_criteria
=
[
crit
for
crit
in
all_criteria
if
(
'A'
in
crit
)]
zero_option_criteria_names
=
[
crit
for
crit
in
all_criteria
if
(
'B'
in
crit
)]
feedback_given_criteria
=
[
crit
for
crit
in
all_criteria
if
(
'C'
in
crit
)]
zero_option_criteria
=
[]
for
zoc
in
zero_option_criteria_names
:
a
=
mock
.
Mock
()
a
.
name
=
zoc
zero_option_criteria
.
append
(
a
)
fake_rubric_index
=
mock
.
Mock
()
fake_rubric_index
.
find_criteria_without_options
=
mock
.
Mock
(
return_value
=
zero_option_criteria
)
fake_rubric_index
.
find_missing_criteria
=
mock
.
Mock
(
return_value
=
(
set
(
all_criteria
)
-
set
(
selected_criteria
)))
expected_not_assessed
=
{
'---'
,
'--C'
,
'-B-'
,
'AB-'
}
expected_assessed
=
set
(
all_criteria
)
-
expected_not_assessed
@ddt.file_data
(
'data/models_check_criteria_assessed.json'
)
def
test_check_all_criteria_assessed
(
self
,
data
):
student_item
=
{
'student_id'
:
u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗'
,
'item_id'
:
'test_item'
,
'course_id'
:
'test_course'
,
'item_type'
:
'test_type'
}
submission
=
create_submission
(
student_item
,
"Test answer"
)
rubric
,
options_selected
,
criterion_feedback
=
self
.
_create_data_structures_with_criterion_properties
(
has_option_selected
=
data
[
'has_option_selected'
],
has_zero_options
=
data
[
'has_zero_options'
],
has_feedback
=
data
[
'has_feedback'
]
)
error
=
False
try
:
AssessmentPart
.
_check_all_criteria_assessed
(
fake_rubric_index
,
selected_criteria
,
feedback_given_criteria
)
except
InvalidRubricSelection
as
ex
:
for
criterion
in
expected_not_assessed
:
self
.
assertTrue
(
criterion
in
str
(
ex
))
for
criterion
in
expected_assessed
:
self
.
assertFalse
(
criterion
in
str
(
ex
))
create_assessment
(
submission
[
'uuid'
],
student_item
[
'student_id'
],
options_selected
,
criterion_feedback
,
"overall feedback"
,
rubric
)
except
SelfAssessmentRequestError
:
error
=
True
self
.
assertTrue
(
data
[
'expected_error'
]
==
error
)
def
_create_data_structures_with_criterion_properties
(
self
,
has_option_selected
=
True
,
has_zero_options
=
True
,
has_feedback
=
True
):
"""
Generates a dummy set of criterion definition structures that will allow us to specificy a specific combination
of criterion attributes for a test case.
"""
options
=
[]
if
not
has_zero_options
:
options
=
[{
"name"
:
"Okay"
,
"points"
:
1
,
"description"
:
"It was okay I guess."
}]
rubric
=
{
'criteria'
:
[
{
"name"
:
"Quality"
,
"prompt"
:
"How 'good' was it?"
,
"options"
:
options
}
]
}
options_selected
=
{}
if
has_option_selected
:
options_selected
[
'Quality'
]
=
'Okay'
criterion_feedback
=
{}
if
has_feedback
:
criterion_feedback
[
'Quality'
]
=
"This was an assignment of average quality."
self
.
assertTrue
(
error
)
\ No newline at end of file
return
rubric
,
options_selected
,
criterion_feedback
\ No newline at end of file
openassessment/assessment/test/test_self.py
View file @
d6012733
...
...
@@ -56,8 +56,10 @@ class TestSelfApi(CacheResetTest):
"accuracy"
:
"Like my sister's cutting comments about my weight, I may not have enjoyed the piece, but I cannot fault it for its factual nature."
}
OVERALL_FEEDBACK
=
(
u"Unfortunately, the nature of being is too complex to comment, judge, or discern any one"
u"arbitrary set of things over another."
)
OVERALL_FEEDBACK
=
(
u"Unfortunately, the nature of being is too complex to comment, judge, or discern any one"
u"arbitrary set of things over another."
)
def
test_create_assessment
(
self
):
# Initially, there should be no submission or self assessment
...
...
openassessment/xblock/grade_mixin.py
View file @
d6012733
...
...
@@ -264,7 +264,6 @@ class GradeMixin(object):
for
criterion
in
criteria
:
criterion_name
=
criterion
[
'name'
]
criterion
[
'peer_feedback'
]
=
peer_criteria_feedback
[
criterion_name
]
if
self_criteria_feedback
.
get
(
criterion_name
,
False
):
criterion
[
'self_feedback'
]
=
self_criteria_feedback
.
get
(
criterion_name
)
return
criteria
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment