Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
08a5800a
Commit
08a5800a
authored
Aug 04, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #564 from edx/sanchez/use-i18n-xblock-service
Using the XBlock i18n Service for ORA2
parents
c21b3fd0
4b866df3
Show whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
178 additions
and
158 deletions
+178
-158
openassessment/xblock/grade_mixin.py
+5
-6
openassessment/xblock/openassessmentblock.py
+9
-3
openassessment/xblock/peer_assessment_mixin.py
+12
-15
openassessment/xblock/resolve_dates.py
+12
-9
openassessment/xblock/self_assessment_mixin.py
+8
-9
openassessment/xblock/staff_info_mixin.py
+25
-17
openassessment/xblock/student_training_mixin.py
+7
-8
openassessment/xblock/studio_mixin.py
+9
-9
openassessment/xblock/submission_mixin.py
+18
-23
openassessment/xblock/test/test_resolve_dates.py
+14
-6
openassessment/xblock/test/test_submission.py
+3
-3
openassessment/xblock/test/test_validation.py
+18
-15
openassessment/xblock/validation.py
+17
-13
openassessment/xblock/xml.py
+21
-22
No files found.
openassessment/xblock/grade_mixin.py
View file @
08a5800a
...
...
@@ -5,7 +5,6 @@ import copy
from
collections
import
defaultdict
from
lazy
import
lazy
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
openassessment.assessment.api
import
peer
as
peer_api
...
...
@@ -59,7 +58,7 @@ class GradeMixin(object):
else
:
# status is 'self' or 'peer', which implies that the workflow is incomplete
path
,
context
=
self
.
render_grade_incomplete
(
workflow
)
except
(
sub_api
.
SubmissionError
,
PeerAssessmentError
,
SelfAssessmentError
):
return
self
.
render_error
(
_
(
u"An unexpected error occurred."
))
return
self
.
render_error
(
self
.
_
(
u"An unexpected error occurred."
))
else
:
return
self
.
render_assessment
(
path
,
context
)
...
...
@@ -178,9 +177,9 @@ class GradeMixin(object):
incomplete_steps
=
[]
if
_is_incomplete
(
"peer"
):
incomplete_steps
.
append
(
_
(
"Peer Assessment"
))
incomplete_steps
.
append
(
self
.
_
(
"Peer Assessment"
))
if
_is_incomplete
(
"self"
):
incomplete_steps
.
append
(
_
(
"Self Assessment"
))
incomplete_steps
.
append
(
self
.
_
(
"Self Assessment"
))
return
(
'openassessmentblock/grade/oa_grade_incomplete.html'
,
...
...
@@ -213,7 +212,7 @@ class GradeMixin(object):
'options'
:
feedback_options
,
})
except
(
peer_api
.
PeerAssessmentInternalError
,
peer_api
.
PeerAssessmentRequestError
):
return
{
'success'
:
False
,
'msg'
:
_
(
u"Assessment feedback could not be saved."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"Assessment feedback could not be saved."
)}
else
:
self
.
runtime
.
publish
(
self
,
...
...
@@ -224,7 +223,7 @@ class GradeMixin(object):
'options'
:
feedback_options
,
}
)
return
{
'success'
:
True
,
'msg'
:
_
(
u"Feedback saved."
)}
return
{
'success'
:
True
,
'msg'
:
self
.
_
(
u"Feedback saved."
)}
def
_rubric_criteria_grade_context
(
self
,
peer_assessments
,
self_assessment
):
"""
...
...
openassessment/xblock/openassessmentblock.py
View file @
08a5800a
...
...
@@ -83,7 +83,7 @@ def load(path):
data
=
pkg_resources
.
resource_string
(
__name__
,
path
)
return
data
.
decode
(
"utf8"
)
@XBlock.needs
(
"i18n"
)
class
OpenAssessmentBlock
(
XBlock
,
MessageMixin
,
...
...
@@ -247,6 +247,7 @@ class OpenAssessmentBlock(
frag
.
initialize_js
(
'OpenAssessmentBlock'
)
return
frag
@property
def
is_admin
(
self
):
"""
...
...
@@ -353,7 +354,7 @@ class OpenAssessmentBlock(
config
=
parse_from_xml
(
node
)
block
=
runtime
.
construct_xblock_from_class
(
cls
,
keys
)
xblock_validator
=
validator
(
block
,
strict_post_release
=
False
)
xblock_validator
=
validator
(
block
,
block
.
_
,
strict_post_release
=
False
)
xblock_validator
(
create_rubric_dict
(
config
[
'prompt'
],
config
[
'rubric_criteria'
]),
config
[
'rubric_assessments'
],
...
...
@@ -373,6 +374,11 @@ class OpenAssessmentBlock(
return
block
@property
def
_
(
self
):
i18nService
=
self
.
runtime
.
service
(
self
,
'i18n'
)
return
i18nService
.
ugettext
@property
def
valid_assessments
(
self
):
"""
Return a list of assessment dictionaries that we recognize.
...
...
@@ -509,7 +515,7 @@ class OpenAssessmentBlock(
# Resolve unspecified dates and date strings to datetimes
start
,
due
,
date_ranges
=
resolve_dates
(
self
.
start
,
self
.
due
,
[
submission_range
]
+
assessment_ranges
self
.
start
,
self
.
due
,
[
submission_range
]
+
assessment_ranges
,
self
.
_
)
open_range
=
(
start
,
due
)
...
...
openassessment/xblock/peer_assessment_mixin.py
View file @
08a5800a
import
logging
from
django.utils.translation
import
ugettext
as
_
from
webob
import
Response
from
xblock.core
import
XBlock
...
...
@@ -9,11 +8,9 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError
,
PeerAssessmentInternalError
,
PeerAssessmentWorkflowError
)
from
openassessment.workflow.errors
import
AssessmentWorkflowError
from
openassessment.fileupload
import
api
as
file_upload_api
from
openassessment.fileupload.api
import
FileUploadError
from
.data_conversion
import
create_rubric_dict
from
.resolve_dates
import
DISTANT_FUTURE
from
.data_conversion
import
c
reate_rubric_dict
,
c
lean_criterion_feedback
from
.data_conversion
import
clean_criterion_feedback
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -52,16 +49,16 @@ class PeerAssessmentMixin(object):
"""
# Validate the request
if
'options_selected'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide options selected in the assessment'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Must provide options selected in the assessment'
)}
if
'overall_feedback'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide overall feedback in the assessment'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Must provide overall feedback in the assessment'
)}
if
'criterion_feedback'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide feedback for criteria in the assessment'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Must provide feedback for criteria in the assessment'
)}
if
self
.
submission_uuid
is
None
:
return
{
'success'
:
False
,
'msg'
:
_
(
'You must submit a response before you can peer-assess.'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'You must submit a response before you can peer-assess.'
)}
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
if
assessment_ui_model
:
...
...
@@ -85,12 +82,12 @@ class PeerAssessmentMixin(object):
u"Peer API error for submission UUID {}"
.
format
(
self
.
submission_uuid
),
exc_info
=
True
)
return
{
'success'
:
False
,
'msg'
:
_
(
u"Your peer assessment could not be submitted."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"Your peer assessment could not be submitted."
)}
except
PeerAssessmentInternalError
:
logger
.
exception
(
u"Peer API internal error for submission UUID: {}"
.
format
(
self
.
submission_uuid
)
)
msg
=
_
(
"Your peer assessment could not be submitted."
)
msg
=
self
.
_
(
"Your peer assessment could not be submitted."
)
return
{
'success'
:
False
,
'msg'
:
msg
}
# Update both the workflow that the submission we're assessing
...
...
@@ -104,7 +101,7 @@ class PeerAssessmentMixin(object):
u"Workflow error occurred when submitting peer assessment "
u"for submission {}"
.
format
(
self
.
submission_uuid
)
)
msg
=
_
(
'Could not update workflow status.'
)
msg
=
self
.
_
(
'Could not update workflow status.'
)
return
{
'success'
:
False
,
'msg'
:
msg
}
# Temp kludge until we fix JSON serialization for datetime
...
...
@@ -113,7 +110,7 @@ class PeerAssessmentMixin(object):
return
{
'success'
:
True
,
'msg'
:
u''
}
else
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Could not load peer assessment.'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Could not load peer assessment.'
)}
@XBlock.handler
def
render_peer_assessment
(
self
,
data
,
suffix
=
''
):
...
...
@@ -180,15 +177,15 @@ class PeerAssessmentMixin(object):
context_dict
[
"review_num"
]
=
count
+
1
if
continue_grading
:
context_dict
[
"submit_button_text"
]
=
_
(
context_dict
[
"submit_button_text"
]
=
self
.
_
(
"Submit your assessment & review another response"
)
elif
assessment
[
"must_grade"
]
-
count
==
1
:
context_dict
[
"submit_button_text"
]
=
_
(
context_dict
[
"submit_button_text"
]
=
self
.
_
(
"Submit your assessment & move onto next step"
)
else
:
context_dict
[
"submit_button_text"
]
=
_
(
context_dict
[
"submit_button_text"
]
=
self
.
_
(
"Submit your assessment & move to response #{response_number}"
)
.
format
(
response_number
=
(
count
+
2
))
...
...
openassessment/xblock/resolve_dates.py
View file @
08a5800a
...
...
@@ -4,7 +4,6 @@ Resolve unspecified dates and date strings to datetimes.
import
datetime
as
dt
import
pytz
from
dateutil.parser
import
parse
as
parse_date
from
django.utils.translation
import
ugettext
as
_
class
InvalidDateFormat
(
Exception
):
...
...
@@ -25,12 +24,14 @@ DISTANT_PAST = dt.datetime(dt.MINYEAR, 1, 1, tzinfo=pytz.utc)
DISTANT_FUTURE
=
dt
.
datetime
(
dt
.
MAXYEAR
,
1
,
1
,
tzinfo
=
pytz
.
utc
)
def
_parse_date
(
value
):
def
_parse_date
(
value
,
_
):
"""
Parse an ISO formatted datestring into a datetime object with timezone set to UTC.
Args:
value (str or datetime): The ISO formatted date string or datetime object.
_ (function): The i18n service function used to get the appropriate
text for a message.
Returns:
datetime.datetime
...
...
@@ -51,7 +52,7 @@ def _parse_date(value):
raise
InvalidDateFormat
(
_
(
"'{date}' must be a date string or datetime"
)
.
format
(
date
=
value
))
def
resolve_dates
(
start
,
end
,
date_ranges
):
def
resolve_dates
(
start
,
end
,
date_ranges
,
_
):
"""
Resolve date strings (including "default" dates) to datetimes.
The basic rules are:
...
...
@@ -124,6 +125,8 @@ def resolve_dates(start, end, date_ranges):
end (str, ISO date format, or datetime): When the problem closes. A value of None indicates that the problem never closes.
date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating
the start/end timestamps (date string or datetime) of each submission/assessment.
_ (function): An i18n service function to use for retrieving the
proper text.
Returns:
start (datetime): The resolved start date
...
...
@@ -135,8 +138,8 @@ def resolve_dates(start, end, date_ranges):
InvalidDateFormat
"""
# Resolve problem start and end dates to minimum and maximum dates
start
=
_parse_date
(
start
)
if
start
is
not
None
else
DISTANT_PAST
end
=
_parse_date
(
end
)
if
end
is
not
None
else
DISTANT_FUTURE
start
=
_parse_date
(
start
,
_
)
if
start
is
not
None
else
DISTANT_PAST
end
=
_parse_date
(
end
,
_
)
if
end
is
not
None
else
DISTANT_FUTURE
resolved_starts
=
[]
resolved_ends
=
[]
...
...
@@ -162,11 +165,11 @@ def resolve_dates(start, end, date_ranges):
# defaults. See the docstring above for a more detailed justification.
for
step_start
,
step_end
in
date_ranges
:
if
step_start
is
not
None
:
parsed_start
=
_parse_date
(
step_start
)
parsed_start
=
_parse_date
(
step_start
,
_
)
start
=
min
(
start
,
parsed_start
)
end
=
max
(
end
,
parsed_start
+
dt
.
timedelta
(
milliseconds
=
1
))
if
step_end
is
not
None
:
parsed_end
=
_parse_date
(
step_end
)
parsed_end
=
_parse_date
(
step_end
,
_
)
end
=
max
(
end
,
parsed_end
)
start
=
min
(
start
,
parsed_end
-
dt
.
timedelta
(
milliseconds
=
1
))
...
...
@@ -182,13 +185,13 @@ def resolve_dates(start, end, date_ranges):
# If I set a start date for peer-assessment, but don't set a start date for the following self-assessment,
# then the self-assessment should default to the same start date as the peer-assessment.
step_start
,
__
=
date_ranges
[
index
]
step_start
=
_parse_date
(
step_start
)
if
step_start
is
not
None
else
prev_start
step_start
=
_parse_date
(
step_start
,
_
)
if
step_start
is
not
None
else
prev_start
# Resolve "default" end dates to the following end date.
# If I set a due date for self-assessment, but don't set a due date for the previous peer-assessment,
# then the peer-assessment should default to the same due date as the self-assessment.
__
,
step_end
=
date_ranges
[
reverse_index
]
step_end
=
_parse_date
(
step_end
)
if
step_end
is
not
None
else
prev_end
step_end
=
_parse_date
(
step_end
,
_
)
if
step_end
is
not
None
else
prev_end
if
step_start
<
prev_start
:
msg
=
_
(
u"This step's start date '{start}' cannot be earlier than the previous step's start date '{prev}'."
)
.
format
(
...
...
openassessment/xblock/self_assessment_mixin.py
View file @
08a5800a
import
logging
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
webob
import
Response
...
...
@@ -9,7 +8,7 @@ from openassessment.workflow import api as workflow_api
from
submissions
import
api
as
submission_api
from
.data_conversion
import
create_rubric_dict
from
.resolve_dates
import
DISTANT_FUTURE
from
.data_conversion
import
c
reate_rubric_dict
,
c
lean_criterion_feedback
from
.data_conversion
import
clean_criterion_feedback
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -36,7 +35,7 @@ class SelfAssessmentMixin(object):
except
:
msg
=
u"Could not retrieve self assessment for submission {}"
.
format
(
self
.
submission_uuid
)
logger
.
exception
(
msg
)
return
self
.
render_error
(
_
(
u"An unexpected error occurred."
))
return
self
.
render_error
(
self
.
_
(
u"An unexpected error occurred."
))
else
:
return
self
.
render_assessment
(
path
,
context
)
...
...
@@ -112,16 +111,16 @@ class SelfAssessmentMixin(object):
and "msg" (unicode) containing additional information if an error occurs.
"""
if
'options_selected'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"Missing options_selected key in request"
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"Missing options_selected key in request"
)}
if
'overall_feedback'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide overall feedback in the assessment'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Must provide overall feedback in the assessment'
)}
if
'criterion_feedback'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Must provide feedback for criteria in the assessment'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Must provide feedback for criteria in the assessment'
)}
if
self
.
submission_uuid
is
None
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"You must submit a response before you can perform a self-assessment."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"You must submit a response before you can perform a self-assessment."
)}
try
:
assessment
=
self_api
.
create_assessment
(
...
...
@@ -142,14 +141,14 @@ class SelfAssessmentMixin(object):
u"for the submission {}"
.
format
(
self
.
submission_uuid
),
exc_info
=
True
)
msg
=
_
(
u"Your self assessment could not be submitted."
)
msg
=
self
.
_
(
u"Your self assessment could not be submitted."
)
return
{
'success'
:
False
,
'msg'
:
msg
}
except
(
self_api
.
SelfAssessmentInternalError
,
workflow_api
.
AssessmentWorkflowInternalError
):
logger
.
exception
(
u"An error occurred while submitting a self assessment "
u"for the submission {}"
.
format
(
self
.
submission_uuid
),
)
msg
=
_
(
u"Your self assessment could not be submitted."
)
msg
=
self
.
_
(
u"Your self assessment could not be submitted."
)
return
{
'success'
:
False
,
'msg'
:
msg
}
else
:
return
{
'success'
:
True
,
'msg'
:
u""
}
openassessment/xblock/staff_info_mixin.py
View file @
08a5800a
...
...
@@ -4,7 +4,6 @@ determine the flow of the problem.
"""
import
copy
from
functools
import
wraps
from
django.utils.translation
import
ugettext
as
_
from
django.utils.translation
import
ugettext_lazy
from
xblock.core
import
XBlock
...
...
@@ -19,13 +18,13 @@ from openassessment.assessment.api import self as self_api
from
openassessment.assessment.api
import
ai
as
ai_api
def
require_global_admin
(
error_
msg
):
def
require_global_admin
(
error_
key
):
"""
Method decorator to restrict access to an XBlock handler
to only global staff.
Args:
error_
msg (unicode): T
he error message to display to the user
error_
key (str): The key to t
he error message to display to the user
if they do not have sufficient permissions.
Returns:
...
...
@@ -35,22 +34,26 @@ def require_global_admin(error_msg):
def
_decorator
(
func
):
# pylint: disable=C0111
@wraps
(
func
)
def
_wrapped
(
xblock
,
*
args
,
**
kwargs
):
# pylint: disable=C0111
permission_errors
=
{
"SCHEDULE_TRAINING"
:
xblock
.
_
(
u"You do not have permission to schedule training"
),
"RESCHEDULE_TASKS"
:
xblock
.
_
(
u"You do not have permission to reschedule tasks."
),
}
if
not
xblock
.
is_admin
or
xblock
.
in_studio_preview
:
return
{
'success'
:
False
,
'msg'
:
unicode
(
error_msg
)
}
return
{
'success'
:
False
,
'msg'
:
permission_errors
[
error_key
]
}
else
:
return
func
(
xblock
,
*
args
,
**
kwargs
)
return
_wrapped
return
_decorator
def
require_course_staff
(
error_
msg
):
def
require_course_staff
(
error_
key
):
"""
Method decorator to restrict access to an XBlock render
method to only course staff.
Args:
error_
msg (unicode): The error message to display to the user
if they do not have sufficient permissions.
error_
key (str): The key for the error message to display to the
user
if they do not have sufficient permissions.
Returns:
decorated function
...
...
@@ -59,8 +62,13 @@ def require_course_staff(error_msg):
def
_decorator
(
func
):
# pylint: disable=C0111
@wraps
(
func
)
def
_wrapped
(
xblock
,
*
args
,
**
kwargs
):
# pylint: disable=C0111
permission_errors
=
{
"STAFF_INFO"
:
xblock
.
_
(
u"You do not have permission to access staff information"
),
"STUDENT_INFO"
:
xblock
.
_
(
u"You do not have permission to access student information."
),
}
if
not
xblock
.
is_course_staff
or
xblock
.
in_studio_preview
:
return
xblock
.
render_error
(
unicode
(
error_msg
)
)
return
xblock
.
render_error
(
permission_errors
[
error_key
]
)
else
:
return
func
(
xblock
,
*
args
,
**
kwargs
)
return
_wrapped
...
...
@@ -73,7 +81,7 @@ class StaffInfoMixin(object):
"""
@XBlock.handler
@require_course_staff
(
ugettext_lazy
(
u"You do not have permission to access staff information"
)
)
@require_course_staff
(
"STAFF_INFO"
)
def
render_staff_info
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Template context dictionary for course staff debug panel.
...
...
@@ -142,7 +150,7 @@ class StaffInfoMixin(object):
return
path
,
context
@XBlock.json_handler
@require_global_admin
(
ugettext_lazy
(
u"You do not have permission to schedule training"
)
)
@require_global_admin
(
"SCHEDULE_TRAINING"
)
def
schedule_training
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Schedule a new training task for example-based grading.
...
...
@@ -163,22 +171,22 @@ class StaffInfoMixin(object):
return
{
'success'
:
True
,
'workflow_uuid'
:
workflow_uuid
,
'msg'
:
_
(
u"Training scheduled with new Workflow UUID: {uuid}"
.
format
(
uuid
=
workflow_uuid
))
'msg'
:
self
.
_
(
u"Training scheduled with new Workflow UUID: {uuid}"
.
format
(
uuid
=
workflow_uuid
))
}
except
AIError
as
err
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"An error occurred scheduling classifier training: {error}"
.
format
(
error
=
err
))
'msg'
:
self
.
_
(
u"An error occurred scheduling classifier training: {error}"
.
format
(
error
=
err
))
}
else
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"Example Based Assessment is not configured for this location."
)
'msg'
:
self
.
_
(
u"Example Based Assessment is not configured for this location."
)
}
@XBlock.handler
@require_course_staff
(
ugettext_lazy
(
u"You do not have permission to access student information."
)
)
@require_course_staff
(
"STUDENT_INFO"
)
def
render_student_info
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Renders all relative information for a specific student's workflow.
...
...
@@ -248,7 +256,7 @@ class StaffInfoMixin(object):
return
path
,
context
@XBlock.json_handler
@require_global_admin
(
ugettext_lazy
(
u"You do not have permission to reschedule tasks."
)
)
@require_global_admin
(
"RESCHEDULE_TASKS"
)
def
reschedule_unfinished_tasks
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Wrapper which invokes the API call for rescheduling grading tasks.
...
...
@@ -278,10 +286,10 @@ class StaffInfoMixin(object):
ai_api
.
reschedule_unfinished_tasks
(
course_id
=
course_id
,
item_id
=
item_id
,
task_type
=
u"grade"
)
return
{
'success'
:
True
,
'msg'
:
_
(
u"All AI tasks associated with this item have been rescheduled successfully."
)
'msg'
:
self
.
_
(
u"All AI tasks associated with this item have been rescheduled successfully."
)
}
except
AIError
as
ex
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"An error occurred while rescheduling tasks: {}"
.
format
(
ex
))
'msg'
:
self
.
_
(
u"An error occurred while rescheduling tasks: {}"
.
format
(
ex
))
}
openassessment/xblock/student_training_mixin.py
View file @
08a5800a
...
...
@@ -2,7 +2,6 @@
Student training step in the OpenAssessment XBlock.
"""
import
logging
from
django.utils.translation
import
ugettext
as
_
from
webob
import
Response
from
xblock.core
import
XBlock
from
openassessment.assessment.api
import
student_training
...
...
@@ -52,7 +51,7 @@ class StudentTrainingMixin(object):
except
:
# pylint:disable=W0702
msg
=
u"Could not render student training step for submission {}"
.
format
(
self
.
submission_uuid
)
logger
.
exception
(
msg
)
return
self
.
render_error
(
_
(
u"An unexpected error occurred."
))
return
self
.
render_error
(
self
.
_
(
u"An unexpected error occurred."
))
else
:
return
self
.
render_assessment
(
path
,
context
)
...
...
@@ -158,9 +157,9 @@ class StudentTrainingMixin(object):
"""
if
'options_selected'
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"Missing options_selected key in request"
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"Missing options_selected key in request"
)}
if
not
isinstance
(
data
[
'options_selected'
],
dict
):
return
{
'success'
:
False
,
'msg'
:
_
(
u"options_selected must be a dictionary"
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"options_selected must be a dictionary"
)}
# Check the student's scores against the course author's scores.
# This implicitly updates the student training workflow (which example essay is shown)
...
...
@@ -186,23 +185,23 @@ class StudentTrainingMixin(object):
logger
.
warning
(
msg
,
exc_info
=
True
)
return
{
'success'
:
False
,
'msg'
:
_
(
u"Your scores could not be checked."
)
'msg'
:
self
.
_
(
u"Your scores could not be checked."
)
}
except
student_training
.
StudentTrainingInternalError
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"Your scores could not be checked."
)
'msg'
:
self
.
_
(
u"Your scores could not be checked."
)
}
except
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"An unexpected error occurred."
)
'msg'
:
self
.
_
(
u"An unexpected error occurred."
)
}
else
:
try
:
self
.
update_workflow_status
()
except
AssessmentWorkflowError
:
msg
=
_
(
'Could not update workflow status.'
)
msg
=
self
.
_
(
'Could not update workflow status.'
)
logger
.
exception
(
msg
)
return
{
'success'
:
False
,
'msg'
:
msg
}
return
{
...
...
openassessment/xblock/studio_mixin.py
View file @
08a5800a
...
...
@@ -6,7 +6,6 @@ import copy
import
logging
from
django.template
import
Context
from
django.template.loader
import
get_template
from
django.utils.translation
import
ugettext
as
_
from
voluptuous
import
MultipleInvalid
from
xblock.core
import
XBlock
from
xblock.fields
import
List
,
Scope
...
...
@@ -91,7 +90,8 @@ class StudioMixin(object):
__
,
__
,
date_ranges
=
resolve_dates
(
self
.
start
,
self
.
due
,
[(
self
.
submission_start
,
self
.
submission_due
)]
+
[(
asmnt
.
get
(
'start'
),
asmnt
.
get
(
'due'
))
for
asmnt
in
self
.
valid_assessments
]
[(
asmnt
.
get
(
'start'
),
asmnt
.
get
(
'due'
))
for
asmnt
in
self
.
valid_assessments
],
self
.
_
)
submission_start
,
submission_due
=
date_ranges
[
0
]
...
...
@@ -143,12 +143,12 @@ class StudioMixin(object):
data
=
EDITOR_UPDATE_SCHEMA
(
data
)
except
MultipleInvalid
:
logger
.
exception
(
'Editor context is invalid'
)
return
{
'success'
:
False
,
'msg'
:
_
(
'Error updating XBlock configuration'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Error updating XBlock configuration'
)}
# Check that the editor assessment order contains all the assessments. We are more flexible on example-based.
if
set
(
DEFAULT_EDITOR_ASSESSMENTS_ORDER
)
!=
(
set
(
data
[
'editor_assessments_order'
])
-
{
'example-based-assessment'
}):
logger
.
exception
(
'editor_assessments_order does not contain all expected assessment types'
)
return
{
'success'
:
False
,
'msg'
:
_
(
'Error updating XBlock configuration'
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Error updating XBlock configuration'
)}
# Backwards compatibility: We used to treat "name" as both a user-facing label
# and a unique identifier for criteria and options.
...
...
@@ -170,18 +170,18 @@ class StudioMixin(object):
try
:
assessment
[
'examples'
]
=
parse_examples_from_xml_str
(
assessment
[
'examples_xml'
])
except
UpdateFromXmlError
:
return
{
'success'
:
False
,
'msg'
:
_
(
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u'Validation error: There was an error in the XML definition of the '
u'examples provided by the user. Please correct the XML definition before saving.'
)
}
except
KeyError
:
return
{
'success'
:
False
,
'msg'
:
_
(
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u'Validation error: No examples were provided for example based assessment.'
)}
# This is where we default to EASE for problems which are edited in the GUI
assessment
[
'algorithm_id'
]
=
'ease'
xblock_validator
=
validator
(
self
)
xblock_validator
=
validator
(
self
,
self
.
_
)
success
,
msg
=
xblock_validator
(
create_rubric_dict
(
data
[
'prompt'
],
data
[
'criteria'
]),
data
[
'assessments'
],
...
...
@@ -189,7 +189,7 @@ class StudioMixin(object):
submission_due
=
data
[
'submission_due'
],
)
if
not
success
:
return
{
'success'
:
False
,
'msg'
:
_
(
'Validation error: {error}'
)
.
format
(
error
=
msg
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
'Validation error: {error}'
)
.
format
(
error
=
msg
)}
# At this point, all the input data has been validated,
# so we can safely modify the XBlock fields.
...
...
@@ -204,7 +204,7 @@ class StudioMixin(object):
self
.
submission_due
=
data
[
'submission_due'
]
self
.
allow_file_upload
=
bool
(
data
[
'allow_file_upload'
])
return
{
'success'
:
True
,
'msg'
:
_
(
u'Successfully updated OpenAssessment XBlock'
)}
return
{
'success'
:
True
,
'msg'
:
self
.
_
(
u'Successfully updated OpenAssessment XBlock'
)}
@XBlock.json_handler
def
check_released
(
self
,
data
,
suffix
=
''
):
...
...
openassessment/xblock/submission_mixin.py
View file @
08a5800a
import
logging
from
django.utils.translation
import
ugettext
as
_
from
xblock.core
import
XBlock
from
submissions
import
api
...
...
@@ -26,16 +25,6 @@ class SubmissionMixin(object):
"""
submit_errors
=
{
# Reported to user sometimes, and useful in tests
'ENODATA'
:
_
(
u'API returned an empty response.'
),
'EBADFORM'
:
_
(
u'API Submission Request Error.'
),
'EUNKNOWN'
:
_
(
u'API returned unclassified exception.'
),
'ENOMULTI'
:
_
(
u'Multiple submissions are not allowed.'
),
'ENOPREVIEW'
:
_
(
u'To submit a response, view this component in Preview or Live mode.'
),
'EBADARGS'
:
_
(
u'"submission" required to submit answer.'
)
}
@XBlock.json_handler
def
submit
(
self
,
data
,
suffix
=
''
):
"""Place the submission text into Openassessment system
...
...
@@ -57,23 +46,30 @@ class SubmissionMixin(object):
"""
if
'submission'
not
in
data
:
return
False
,
'EBADARGS'
,
self
.
submit_errors
[
'EBADARGS'
]
return
(
False
,
'EBADARGS'
,
self
.
_
(
u'"submission" required to submit answer.'
)
)
status
=
False
status_text
=
None
student_sub
=
data
[
'submission'
]
student_item_dict
=
self
.
get_student_item_dict
()
# Short-circuit if no user is defined (as in Studio Preview mode)
# Since students can't submit, they will never be able to progress in the workflow
if
self
.
in_studio_preview
:
return
False
,
'ENOPREVIEW'
,
self
.
submit_errors
[
'ENOPREVIEW'
]
return
(
False
,
'ENOPREVIEW'
,
self
.
_
(
u'To submit a response, view this component in Preview or Live mode.'
)
)
workflow
=
self
.
get_workflow_info
()
status_tag
=
'ENOMULTI'
# It is an error to submit multiple times for the same item
status_text
=
self
.
_
(
u'Multiple submissions are not allowed.'
)
if
not
workflow
:
status_tag
=
'ENODATA'
try
:
submission
=
self
.
create_submission
(
student_item_dict
,
...
...
@@ -85,13 +81,12 @@ class SubmissionMixin(object):
except
(
api
.
SubmissionError
,
AssessmentWorkflowError
):
logger
.
exception
(
"This response was not submitted."
)
status_tag
=
'EUNKNOWN'
status_text
=
self
.
_
(
u'API returned unclassified exception.'
)
else
:
status
=
True
status_tag
=
submission
.
get
(
'student_item'
)
status_text
=
submission
.
get
(
'attempt_number'
)
# relies on success being orthogonal to errors
status_text
=
status_text
if
status_text
else
self
.
submit_errors
[
status_tag
]
return
status
,
status_tag
,
status_text
@XBlock.json_handler
...
...
@@ -122,11 +117,11 @@ class SubmissionMixin(object):
{
"saved_response"
:
self
.
saved_response
}
)
except
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"This response could not be saved."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"This response could not be saved."
)}
else
:
return
{
'success'
:
True
,
'msg'
:
u''
}
else
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"This response was not submitted."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"This response was not submitted."
)}
def
create_submission
(
self
,
student_item_dict
,
student_sub
):
...
...
@@ -166,11 +161,11 @@ class SubmissionMixin(object):
"""
if
"contentType"
not
in
data
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"Must specify contentType."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"Must specify contentType."
)}
content_type
=
data
[
'contentType'
]
if
not
content_type
.
startswith
(
'image/'
):
return
{
'success'
:
False
,
'msg'
:
_
(
u"contentType must be an image."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"contentType must be an image."
)}
try
:
key
=
self
.
_get_student_item_key
()
...
...
@@ -178,7 +173,7 @@ class SubmissionMixin(object):
return
{
'success'
:
True
,
'url'
:
url
}
except
FileUploadError
:
logger
.
exception
(
"Error retrieving upload URL."
)
return
{
'success'
:
False
,
'msg'
:
_
(
u"Error retrieving upload URL."
)}
return
{
'success'
:
False
,
'msg'
:
self
.
_
(
u"Error retrieving upload URL."
)}
@XBlock.json_handler
def
download_url
(
self
,
data
,
suffix
=
''
):
...
...
@@ -270,7 +265,7 @@ class SubmissionMixin(object):
Returns:
unicode
"""
return
_
(
u'This response has been saved but not submitted.'
)
if
self
.
has_saved
else
_
(
u'This response has not been saved.'
)
return
self
.
_
(
u'This response has been saved but not submitted.'
)
if
self
.
has_saved
else
self
.
_
(
u'This response has not been saved.'
)
@XBlock.handler
def
render_submission
(
self
,
data
,
suffix
=
''
):
...
...
openassessment/xblock/test/test_resolve_dates.py
View file @
08a5800a
...
...
@@ -9,6 +9,8 @@ import ddt
from
openassessment.xblock.resolve_dates
import
resolve_dates
,
DISTANT_PAST
,
DISTANT_FUTURE
STUB_I18N
=
lambda
x
:
x
@ddt.ddt
class
ResolveDatesTest
(
TestCase
):
...
...
@@ -35,7 +37,8 @@ class ResolveDatesTest(TestCase):
[
(
self
.
DATE_STRINGS
[
start
],
self
.
DATE_STRINGS
[
end
])
for
start
,
end
in
tuple
(
data
[
'date_ranges'
])
]
],
STUB_I18N
)
self
.
assertEqual
(
resolved_start
,
self
.
DATES
[
data
[
'resolved_start'
]])
self
.
assertEqual
(
resolved_end
,
self
.
DATES
[
data
[
'resolved_end'
]])
...
...
@@ -57,7 +60,8 @@ class ResolveDatesTest(TestCase):
(
"1999-01-01"
,
"1999-02-03"
),
(
"2003-01-01"
,
"2003-02-03"
),
(
"3234-01-01"
,
"3234-02-03"
),
]
],
STUB_I18N
)
# Should default to the min of all specified start dates
...
...
@@ -76,7 +80,8 @@ class ResolveDatesTest(TestCase):
(
"1999-01-01"
,
"1999-02-03"
),
(
"2003-01-01"
,
"2003-02-03"
),
(
"3234-01-01"
,
"3234-02-03"
),
]
],
STUB_I18N
)
# Should default to the max of all specified end dates
...
...
@@ -95,7 +100,8 @@ class ResolveDatesTest(TestCase):
(
None
,
"2014-08-01"
),
(
None
,
None
),
(
None
,
None
)
]
],
STUB_I18N
)
def
test_start_after_step_due
(
self
):
...
...
@@ -106,7 +112,8 @@ class ResolveDatesTest(TestCase):
(
None
,
"2014-08-01"
),
(
None
,
None
),
(
None
,
None
)
]
],
STUB_I18N
)
def
test_due_before_step_start
(
self
):
...
...
@@ -117,5 +124,6 @@ class ResolveDatesTest(TestCase):
(
None
,
None
),
(
"2014-02-03"
,
None
),
(
None
,
None
)
]
],
STUB_I18N
)
openassessment/xblock/test/test_submission.py
View file @
08a5800a
...
...
@@ -9,7 +9,6 @@ import pytz
from
mock
import
patch
,
Mock
from
submissions
import
api
as
sub_api
from
submissions.api
import
SubmissionRequestError
,
SubmissionInternalError
from
openassessment.xblock.submission_mixin
import
SubmissionMixin
from
.base
import
XBlockHandlerTestCase
,
scenario
...
...
@@ -31,7 +30,7 @@ class SubmissionTest(XBlockHandlerTestCase):
resp
=
self
.
request
(
xblock
,
'submit'
,
self
.
SUBMISSION
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
0
])
self
.
assertEqual
(
resp
[
1
],
"ENOMULTI"
)
self
.
assert
Equal
(
resp
[
2
],
xblock
.
submit_errors
[
"ENOMULTI"
])
self
.
assert
IsNotNone
(
resp
[
2
])
@scenario
(
'data/basic_scenario.xml'
,
user_id
=
'Bob'
)
@patch.object
(
sub_api
,
'create_submission'
)
...
...
@@ -40,7 +39,7 @@ class SubmissionTest(XBlockHandlerTestCase):
resp
=
self
.
request
(
xblock
,
'submit'
,
self
.
SUBMISSION
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
0
])
self
.
assertEqual
(
resp
[
1
],
"EUNKNOWN"
)
self
.
assert
Equal
(
resp
[
2
],
SubmissionMixin
()
.
submit_errors
[
"EUNKNOWN"
])
self
.
assert
IsNotNone
(
resp
[
2
])
@scenario
(
'data/basic_scenario.xml'
,
user_id
=
'Bob'
)
@patch.object
(
sub_api
,
'create_submission'
)
...
...
@@ -49,6 +48,7 @@ class SubmissionTest(XBlockHandlerTestCase):
resp
=
self
.
request
(
xblock
,
'submit'
,
self
.
SUBMISSION
,
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
0
])
self
.
assertEqual
(
resp
[
1
],
"EBADFORM"
)
self
.
assertIsNotNone
(
resp
[
2
])
# In Studio preview mode, the runtime sets the user ID to None
@scenario
(
'data/basic_scenario.xml'
,
user_id
=
None
)
...
...
openassessment/xblock/test/test_validation.py
View file @
08a5800a
...
...
@@ -15,24 +15,25 @@ from openassessment.xblock.validation import (
validate_dates
,
validate_assessment_examples
)
STUB_I18N
=
lambda
x
:
x
@ddt.ddt
class
AssessmentValidationTest
(
TestCase
):
@ddt.file_data
(
'data/valid_assessments.json'
)
def
test_valid_assessment
(
self
,
data
):
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
])
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
]
,
STUB_I18N
)
self
.
assertTrue
(
success
)
self
.
assertEqual
(
msg
,
u''
)
@ddt.file_data
(
'data/invalid_assessments.json'
)
def
test_invalid_assessment
(
self
,
data
):
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
])
success
,
msg
=
validate_assessments
(
data
[
"assessments"
],
data
[
"current_assessments"
],
data
[
"is_released"
]
,
STUB_I18N
)
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
def
test_no_assessments
(
self
):
success
,
msg
=
validate_assessments
([],
[],
False
)
success
,
msg
=
validate_assessments
([],
[],
False
,
STUB_I18N
)
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
...
...
@@ -69,7 +70,7 @@ class AssessmentValidationTest(TestCase):
AssertionError
"""
success
,
msg
=
validate_assessments
(
assessments
,
current_assessments
,
is_released
)
success
,
msg
=
validate_assessments
(
assessments
,
current_assessments
,
is_released
,
STUB_I18N
)
self
.
assertEqual
(
success
,
expected_is_valid
,
msg
=
msg
)
if
not
success
:
...
...
@@ -85,7 +86,7 @@ class RubricValidationTest(TestCase):
is_released
=
data
.
get
(
'is_released'
,
False
)
is_example_based
=
data
.
get
(
'is_example_based'
,
False
)
success
,
msg
=
validate_rubric
(
data
[
'rubric'
],
current_rubric
,
is_released
,
is_example_based
data
[
'rubric'
],
current_rubric
,
is_released
,
is_example_based
,
STUB_I18N
)
self
.
assertTrue
(
success
)
self
.
assertEqual
(
msg
,
u''
)
...
...
@@ -96,7 +97,7 @@ class RubricValidationTest(TestCase):
is_released
=
data
.
get
(
'is_released'
,
False
)
is_example_based
=
data
.
get
(
'is_example_based'
,
False
)
success
,
msg
=
validate_rubric
(
data
[
'rubric'
],
current_rubric
,
is_released
,
is_example_based
data
[
'rubric'
],
current_rubric
,
is_released
,
is_example_based
,
STUB_I18N
)
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
...
...
@@ -107,13 +108,13 @@ class AssessmentExamplesValidationTest(TestCase):
@ddt.file_data
(
'data/valid_assessment_examples.json'
)
def
test_valid_assessment_examples
(
self
,
data
):
success
,
msg
=
validate_assessment_examples
(
data
[
'rubric'
],
data
[
'assessments'
])
success
,
msg
=
validate_assessment_examples
(
data
[
'rubric'
],
data
[
'assessments'
]
,
STUB_I18N
)
self
.
assertTrue
(
success
)
self
.
assertEqual
(
msg
,
u''
)
@ddt.file_data
(
'data/invalid_assessment_examples.json'
)
def
test_invalid_assessment_examples
(
self
,
data
):
success
,
msg
=
validate_assessment_examples
(
data
[
'rubric'
],
data
[
'assessments'
])
success
,
msg
=
validate_assessment_examples
(
data
[
'rubric'
],
data
[
'assessments'
]
,
STUB_I18N
)
self
.
assertFalse
(
success
)
self
.
assertGreater
(
len
(
msg
),
0
)
...
...
@@ -152,7 +153,8 @@ class DateValidationTest(TestCase):
date_range
(
'submission_start'
,
'submission_due'
),
date_range
(
'peer_start'
,
'peer_due'
),
date_range
(
'self_start'
,
'self_due'
),
]
],
STUB_I18N
)
self
.
assertTrue
(
success
,
msg
=
msg
)
...
...
@@ -172,7 +174,8 @@ class DateValidationTest(TestCase):
date_range
(
'submission_start'
,
'submission_due'
),
date_range
(
'peer_start'
,
'peer_due'
),
date_range
(
'self_start'
,
'self_due'
),
]
],
STUB_I18N
)
self
.
assertFalse
(
success
)
...
...
@@ -181,16 +184,16 @@ class DateValidationTest(TestCase):
def
test_invalid_date_format
(
self
):
valid
=
dt
(
2014
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
UTC
)
.
isoformat
()
success
,
_
=
validate_dates
(
"invalid"
,
valid
,
[(
valid
,
valid
)])
success
,
_
=
validate_dates
(
"invalid"
,
valid
,
[(
valid
,
valid
)]
,
STUB_I18N
)
self
.
assertFalse
(
success
)
success
,
_
=
validate_dates
(
valid
,
"invalid"
,
[(
valid
,
valid
)])
success
,
_
=
validate_dates
(
valid
,
"invalid"
,
[(
valid
,
valid
)]
,
STUB_I18N
)
self
.
assertFalse
(
success
)
success
,
_
=
validate_dates
(
valid
,
valid
,
[(
"invalid"
,
valid
)])
success
,
_
=
validate_dates
(
valid
,
valid
,
[(
"invalid"
,
valid
)]
,
STUB_I18N
)
self
.
assertFalse
(
success
)
success
,
_
=
validate_dates
(
valid
,
valid
,
[(
valid
,
"invalid"
)])
success
,
_
=
validate_dates
(
valid
,
valid
,
[(
valid
,
"invalid"
)]
,
STUB_I18N
)
self
.
assertFalse
(
success
)
...
...
@@ -285,7 +288,7 @@ class ValidationIntegrationTest(TestCase):
self
.
oa_block
.
rubric_criteria
=
[]
self
.
oa_block
.
start
=
None
self
.
oa_block
.
due
=
None
self
.
validator
=
validator
(
self
.
oa_block
)
self
.
validator
=
validator
(
self
.
oa_block
,
STUB_I18N
)
def
test_validates_successfully
(
self
):
is_valid
,
msg
=
self
.
validator
(
self
.
RUBRIC
,
self
.
ASSESSMENTS
)
...
...
openassessment/xblock/validation.py
View file @
08a5800a
...
...
@@ -2,7 +2,6 @@
Validate changes to an XBlock before it is updated.
"""
from
collections
import
Counter
from
django.utils.translation
import
ugettext
as
_
from
openassessment.assessment.serializers
import
rubric_from_dict
,
InvalidRubric
from
openassessment.assessment.api.student_training
import
validate_training_examples
from
openassessment.xblock.resolve_dates
import
resolve_dates
,
DateValidationError
,
InvalidDateFormat
...
...
@@ -82,7 +81,7 @@ def _is_valid_assessment_sequence(assessments):
return
sequence
in
valid_sequences
def
validate_assessments
(
assessments
,
current_assessments
,
is_released
):
def
validate_assessments
(
assessments
,
current_assessments
,
is_released
,
_
):
"""
Check that the assessment dict is semantically valid.
...
...
@@ -99,6 +98,7 @@ def validate_assessments(assessments, current_assessments, is_released):
assessment models. Used to determine if the assessment configuration
has changed since the question had been released.
is_released (boolean) : True if the question has been released.
_ (function): The service function used to get the appropriate i18n text
Returns:
tuple (is_valid, msg) where
...
...
@@ -158,7 +158,7 @@ def validate_assessments(assessments, current_assessments, is_released):
return
(
True
,
u''
)
def
validate_rubric
(
rubric_dict
,
current_rubric
,
is_released
,
is_example_based
):
def
validate_rubric
(
rubric_dict
,
current_rubric
,
is_released
,
is_example_based
,
_
):
"""
Check that the rubric is semantically valid.
...
...
@@ -167,6 +167,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
current_rubric (dict): Serialized Rubric model representing the current state of the rubric.
is_released (bool): True if and only if the problem has been released.
is_example_based (bool): True if and only if this is an example-based assessment.
_ (function): The service function used to get the appropriate i18n text
Returns:
tuple (is_valid, msg) where
...
...
@@ -176,7 +177,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
try
:
rubric_from_dict
(
rubric_dict
)
except
InvalidRubric
:
return
(
False
,
u'This rubric definition is not valid.'
)
return
(
False
,
_
(
u'This rubric definition is not valid.'
)
)
# No duplicate criteria names
duplicates
=
_duplicates
([
criterion
[
'name'
]
for
criterion
in
rubric_dict
[
'criteria'
]])
...
...
@@ -229,7 +230,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
current_criterion_names
=
set
(
criterion
.
get
(
'name'
)
for
criterion
in
current_rubric
[
'criteria'
])
new_criterion_names
=
set
(
criterion
.
get
(
'name'
)
for
criterion
in
rubric_dict
[
'criteria'
])
if
current_criterion_names
!=
new_criterion_names
:
return
(
False
,
u'Criteria names cannot be changed after a problem is released'
)
return
(
False
,
_
(
u'Criteria names cannot be changed after a problem is released'
)
)
# Number of options for each criterion must be the same
for
new_criterion
,
old_criterion
in
_match_by_order
(
rubric_dict
[
'criteria'
],
current_rubric
[
'criteria'
]):
...
...
@@ -244,7 +245,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
return
(
True
,
u''
)
def
validate_dates
(
start
,
end
,
date_ranges
):
def
validate_dates
(
start
,
end
,
date_ranges
,
_
):
"""
Check that start and due dates are valid.
...
...
@@ -252,6 +253,7 @@ def validate_dates(start, end, date_ranges):
start (str): ISO-formatted date string indicating when the problem opens.
end (str): ISO-formatted date string indicating when the problem closes.
date_ranges (list of tuples): List of (start, end) pair for each submission / assessment.
_ (function): The service function used to get the appropriate i18n text
Returns:
tuple (is_valid, msg) where
...
...
@@ -259,20 +261,21 @@ def validate_dates(start, end, date_ranges):
and msg describes any validation errors found.
"""
try
:
resolve_dates
(
start
,
end
,
date_ranges
)
resolve_dates
(
start
,
end
,
date_ranges
,
_
)
except
(
DateValidationError
,
InvalidDateFormat
)
as
ex
:
return
(
False
,
unicode
(
ex
))
else
:
return
(
True
,
u''
)
def
validate_assessment_examples
(
rubric_dict
,
assessments
):
def
validate_assessment_examples
(
rubric_dict
,
assessments
,
_
):
"""
Validate assessment training examples.
Args:
rubric_dict (dict): The serialized rubric model.
assessments (list of dict): List of assessment dictionaries.
_ (function): The service function used to get the appropriate i18n text
Returns:
tuple (is_valid, msg) where
...
...
@@ -298,13 +301,14 @@ def validate_assessment_examples(rubric_dict, assessments):
return
True
,
u''
def
validator
(
oa_block
,
strict_post_release
=
True
):
def
validator
(
oa_block
,
_
,
strict_post_release
=
True
):
"""
Return a validator function configured for the XBlock.
This will validate assessments, rubrics, and dates.
Args:
oa_block (OpenAssessmentBlock): The XBlock being updated.
_ (function): The service function used to get the appropriate i18n text
Keyword Arguments:
strict_post_release (bool): If true, restrict what authors can update once
...
...
@@ -320,7 +324,7 @@ def validator(oa_block, strict_post_release=True):
# Assessments
current_assessments
=
oa_block
.
rubric_assessments
success
,
msg
=
validate_assessments
(
assessments
,
current_assessments
,
is_released
)
success
,
msg
=
validate_assessments
(
assessments
,
current_assessments
,
is_released
,
_
)
if
not
success
:
return
(
False
,
msg
)
...
...
@@ -330,19 +334,19 @@ def validator(oa_block, strict_post_release=True):
'prompt'
:
oa_block
.
prompt
,
'criteria'
:
oa_block
.
rubric_criteria
}
success
,
msg
=
validate_rubric
(
rubric_dict
,
current_rubric
,
is_released
,
is_example_based
)
success
,
msg
=
validate_rubric
(
rubric_dict
,
current_rubric
,
is_released
,
is_example_based
,
_
)
if
not
success
:
return
(
False
,
msg
)
# Training examples
success
,
msg
=
validate_assessment_examples
(
rubric_dict
,
assessments
)
success
,
msg
=
validate_assessment_examples
(
rubric_dict
,
assessments
,
_
)
if
not
success
:
return
(
False
,
msg
)
# Dates
submission_dates
=
[(
submission_start
,
submission_due
)]
assessment_dates
=
[(
asmnt
.
get
(
'start'
),
asmnt
.
get
(
'due'
))
for
asmnt
in
assessments
]
success
,
msg
=
validate_dates
(
oa_block
.
start
,
oa_block
.
due
,
submission_dates
+
assessment_dates
)
success
,
msg
=
validate_dates
(
oa_block
.
start
,
oa_block
.
due
,
submission_dates
+
assessment_dates
,
_
)
if
not
success
:
return
(
False
,
msg
)
...
...
openassessment/xblock/xml.py
View file @
08a5800a
...
...
@@ -6,7 +6,6 @@ import lxml.etree as etree
import
pytz
import
dateutil.parser
import
defusedxml.ElementTree
as
safe_etree
from
django.utils.translation
import
ugettext
as
_
class
UpdateFromXmlError
(
Exception
):
...
...
@@ -201,7 +200,7 @@ def parse_date(date_str, name=""):
formatted_date
=
parsed_date
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S"
)
return
unicode
(
formatted_date
)
except
(
ValueError
,
TypeError
):
msg
=
_
(
msg
=
(
'The format of the given date ({date}) for the {name} is invalid. '
'Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'
)
.
format
(
date
=
date_str
,
name
=
name
)
...
...
@@ -251,16 +250,16 @@ def _parse_options_xml(options_root):
try
:
option_dict
[
'points'
]
=
int
(
option
.
get
(
'points'
))
except
ValueError
:
raise
UpdateFromXmlError
(
_
(
'The value for "points" must be an integer.'
)
)
raise
UpdateFromXmlError
(
'The value for "points" must be an integer.'
)
else
:
raise
UpdateFromXmlError
(
_
(
'Every "option" element must contain a "points" attribute.'
)
)
raise
UpdateFromXmlError
(
'Every "option" element must contain a "points" attribute.'
)
# Option name
option_name
=
option
.
find
(
'name'
)
if
option_name
is
not
None
:
option_dict
[
'name'
]
=
_safe_get_text
(
option_name
)
else
:
raise
UpdateFromXmlError
(
_
(
'Every "option" element must contain a "name" element.'
)
)
raise
UpdateFromXmlError
(
'Every "option" element must contain a "name" element.'
)
# Option label
# Backwards compatibility: Older problem definitions won't have this.
...
...
@@ -277,7 +276,7 @@ def _parse_options_xml(options_root):
if
option_explanation
is
not
None
:
option_dict
[
'explanation'
]
=
_safe_get_text
(
option_explanation
)
else
:
raise
UpdateFromXmlError
(
_
(
'Every "option" element must contain an "explanation" element.'
)
)
raise
UpdateFromXmlError
(
'Every "option" element must contain an "explanation" element.'
)
# Add the options dictionary to the list
options_list
.
append
(
option_dict
)
...
...
@@ -313,7 +312,7 @@ def _parse_criteria_xml(criteria_root):
if
criterion_name
is
not
None
:
criterion_dict
[
'name'
]
=
_safe_get_text
(
criterion_name
)
else
:
raise
UpdateFromXmlError
(
_
(
'Every "criterion" element must contain a "name" element.'
)
)
raise
UpdateFromXmlError
(
'Every "criterion" element must contain a "name" element.'
)
# Criterion label
# Backwards compatibility: Older problem definitions won't have this,
...
...
@@ -330,14 +329,14 @@ def _parse_criteria_xml(criteria_root):
if
criterion_prompt
is
not
None
:
criterion_dict
[
'prompt'
]
=
_safe_get_text
(
criterion_prompt
)
else
:
raise
UpdateFromXmlError
(
_
(
'Every "criterion" element must contain a "prompt" element.'
)
)
raise
UpdateFromXmlError
(
'Every "criterion" element must contain a "prompt" element.'
)
# Criterion feedback (disabled, optional, or required)
criterion_feedback
=
criterion
.
get
(
'feedback'
,
'disabled'
)
if
criterion_feedback
in
[
'optional'
,
'disabled'
,
'required'
]:
criterion_dict
[
'feedback'
]
=
criterion_feedback
else
:
raise
UpdateFromXmlError
(
_
(
'Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".'
)
)
raise
UpdateFromXmlError
(
'Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".'
)
# Criterion options
criterion_dict
[
'options'
]
=
_parse_options_xml
(
criterion
)
...
...
@@ -404,16 +403,16 @@ def parse_examples_xml(examples):
# Retrieve the answer from the training example
answer_elements
=
example_el
.
findall
(
'answer'
)
if
len
(
answer_elements
)
!=
1
:
raise
UpdateFromXmlError
(
_
(
u'Each "example" element must contain exactly one "answer" element'
)
)
raise
UpdateFromXmlError
(
u'Each "example" element must contain exactly one "answer" element'
)
example_dict
[
'answer'
]
=
_safe_get_text
(
answer_elements
[
0
])
# Retrieve the options selected from the training example
example_dict
[
'options_selected'
]
=
[]
for
select_el
in
example_el
.
findall
(
'select'
):
if
'criterion'
not
in
select_el
.
attrib
:
raise
UpdateFromXmlError
(
_
(
u'Each "select" element must have a "criterion" attribute'
)
)
raise
UpdateFromXmlError
(
u'Each "select" element must have a "criterion" attribute'
)
if
'option'
not
in
select_el
.
attrib
:
raise
UpdateFromXmlError
(
_
(
u'Each "select" element must have an "option" attribute'
)
)
raise
UpdateFromXmlError
(
u'Each "select" element must have an "option" attribute'
)
example_dict
[
'options_selected'
]
.
append
({
'criterion'
:
unicode
(
select_el
.
get
(
'criterion'
)),
...
...
@@ -449,14 +448,14 @@ def parse_assessments_xml(assessments_root):
if
'name'
in
assessment
.
attrib
:
assessment_dict
[
'name'
]
=
unicode
(
assessment
.
get
(
'name'
))
else
:
raise
UpdateFromXmlError
(
_
(
'All "assessment" elements must contain a "name" element.'
)
)
raise
UpdateFromXmlError
(
'All "assessment" elements must contain a "name" element.'
)
# Assessment start
if
'start'
in
assessment
.
attrib
:
# Example-based assessment is NOT allowed to have a start date
if
assessment_dict
[
'name'
]
==
'example-based-assessment'
:
raise
UpdateFromXmlError
(
_
(
'Example-based assessment cannot have a start date'
)
)
raise
UpdateFromXmlError
(
'Example-based assessment cannot have a start date'
)
# Other assessment types CAN have a start date
parsed_start
=
parse_date
(
assessment
.
get
(
'start'
),
name
=
"{} start date"
.
format
(
assessment_dict
[
'name'
]))
...
...
@@ -471,7 +470,7 @@ def parse_assessments_xml(assessments_root):
# Example-based assessment is NOT allowed to have a due date
if
assessment_dict
[
'name'
]
==
'example-based-assessment'
:
raise
UpdateFromXmlError
(
_
(
'Example-based assessment cannot have a due date'
)
)
raise
UpdateFromXmlError
(
'Example-based assessment cannot have a due date'
)
# Other assessment types CAN have a due date
parsed_due
=
parse_date
(
assessment
.
get
(
'due'
),
name
=
"{} due date"
.
format
(
assessment_dict
[
'name'
]))
...
...
@@ -486,14 +485,14 @@ def parse_assessments_xml(assessments_root):
try
:
assessment_dict
[
'must_grade'
]
=
int
(
assessment
.
get
(
'must_grade'
))
except
ValueError
:
raise
UpdateFromXmlError
(
_
(
'The "must_grade" value must be a positive integer.'
)
)
raise
UpdateFromXmlError
(
'The "must_grade" value must be a positive integer.'
)
# Assessment must_be_graded_by
if
'must_be_graded_by'
in
assessment
.
attrib
:
try
:
assessment_dict
[
'must_be_graded_by'
]
=
int
(
assessment
.
get
(
'must_be_graded_by'
))
except
ValueError
:
raise
UpdateFromXmlError
(
_
(
'The "must_be_graded_by" value must be a positive integer.'
)
)
raise
UpdateFromXmlError
(
'The "must_be_graded_by" value must be a positive integer.'
)
# Training examples
examples
=
assessment
.
findall
(
'example'
)
...
...
@@ -714,7 +713,7 @@ def parse_from_xml(root):
# Check that the root has the correct tag
if
root
.
tag
!=
'openassessment'
:
raise
UpdateFromXmlError
(
_
(
'Every open assessment problem must contain an "openassessment" element.'
)
)
raise
UpdateFromXmlError
(
'Every open assessment problem must contain an "openassessment" element.'
)
# Retrieve the start date for the submission
# Set it to None by default; we will update it to the latest start date later on
...
...
@@ -735,21 +734,21 @@ def parse_from_xml(root):
# Retrieve the title
title_el
=
root
.
find
(
'title'
)
if
title_el
is
None
:
raise
UpdateFromXmlError
(
_
(
'Every assessment must contain a "title" element.'
)
)
raise
UpdateFromXmlError
(
'Every assessment must contain a "title" element.'
)
else
:
title
=
_safe_get_text
(
title_el
)
# Retrieve the rubric
rubric_el
=
root
.
find
(
'rubric'
)
if
rubric_el
is
None
:
raise
UpdateFromXmlError
(
_
(
'Every assessment must contain a "rubric" element.'
)
)
raise
UpdateFromXmlError
(
'Every assessment must contain a "rubric" element.'
)
else
:
rubric
=
parse_rubric_xml
(
rubric_el
)
# Retrieve the assessments
assessments_el
=
root
.
find
(
'assessments'
)
if
assessments_el
is
None
:
raise
UpdateFromXmlError
(
_
(
'Every assessment must contain an "assessments" element.'
)
)
raise
UpdateFromXmlError
(
'Every assessment must contain an "assessments" element.'
)
else
:
assessments
=
parse_assessments_xml
(
assessments_el
)
...
...
@@ -802,7 +801,7 @@ def _unicode_to_xml(xml):
try
:
return
safe_etree
.
fromstring
(
xml
.
encode
(
'utf-8'
))
except
(
ValueError
,
safe_etree
.
ParseError
):
raise
UpdateFromXmlError
(
_
(
"An error occurred while parsing the XML content."
)
)
raise
UpdateFromXmlError
(
"An error occurred while parsing the XML content."
)
def
parse_examples_from_xml_str
(
xml
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment