Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
38a81b46
Commit
38a81b46
authored
Jan 08, 2013
by
Vik Paruchuri
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove open ended grading stuff, fix JS variable
parent
bc97a507
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
3 additions
and
512 deletions
+3
-512
common/lib/capa/capa/inputtypes.py
+0
-48
common/lib/capa/capa/responsetypes.py
+1
-432
common/lib/xmodule/xmodule/js/src/capa/display.coffee
+0
-30
common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
+1
-1
common/lib/xmodule/xmodule/open_ended_module.py
+1
-1
No files found.
common/lib/capa/capa/inputtypes.py
View file @
38a81b46
...
@@ -735,51 +735,3 @@ class ChemicalEquationInput(InputTypeBase):
...
@@ -735,51 +735,3 @@ class ChemicalEquationInput(InputTypeBase):
registry
.
register
(
ChemicalEquationInput
)
registry
.
register
(
ChemicalEquationInput
)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
class
OpenEndedInput
(
InputTypeBase
):
"""
A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
etc.
"""
template
=
"openendedinput.html"
tags
=
[
'openendedinput'
]
# pulled out for testing
submitted_msg
=
(
"Feedback not yet available. Reload to check again. "
"Once the problem is graded, this message will be "
"replaced with the grader's feedback."
)
@classmethod
def
get_attributes
(
cls
):
"""
Convert options to a convenient format.
"""
return
[
Attribute
(
'rows'
,
'30'
),
Attribute
(
'cols'
,
'80'
),
Attribute
(
'hidden'
,
''
),
]
def
setup
(
self
):
"""
Implement special logic: handle queueing state, and default input.
"""
# if no student input yet, then use the default input given by the problem
if
not
self
.
value
:
self
.
value
=
self
.
xml
.
text
# Check if problem has been queued
self
.
queue_len
=
0
# Flag indicating that the problem has been queued, 'msg' is length of queue
if
self
.
status
==
'incomplete'
:
self
.
status
=
'queued'
self
.
queue_len
=
self
.
msg
self
.
msg
=
self
.
submitted_msg
def
_extra_context
(
self
):
"""Defined queue_len, add it """
return
{
'queue_len'
:
self
.
queue_len
,}
registry
.
register
(
OpenEndedInput
)
#-----------------------------------------------------------------------------
common/lib/capa/capa/responsetypes.py
View file @
38a81b46
...
@@ -1815,436 +1815,6 @@ class ImageResponse(LoncapaResponse):
...
@@ -1815,436 +1815,6 @@ class ImageResponse(LoncapaResponse):
return
(
dict
([(
ie
.
get
(
'id'
),
ie
.
get
(
'rectangle'
))
for
ie
in
self
.
ielements
]),
return
(
dict
([(
ie
.
get
(
'id'
),
ie
.
get
(
'rectangle'
))
for
ie
in
self
.
ielements
]),
dict
([(
ie
.
get
(
'id'
),
ie
.
get
(
'regions'
))
for
ie
in
self
.
ielements
]))
dict
([(
ie
.
get
(
'id'
),
ie
.
get
(
'regions'
))
for
ie
in
self
.
ielements
]))
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
class
OpenEndedResponse
(
LoncapaResponse
):
"""
Grade student open ended responses using an external grading system,
accessed through the xqueue system.
Expects 'xqueue' dict in ModuleSystem with the following keys that are
needed by OpenEndedResponse:
system.xqueue = { 'interface': XqueueInterface object,
'callback_url': Per-StudentModule callback URL
where results are posted (string),
}
External requests are only submitted for student submission grading
(i.e. and not for getting reference answers)
By default, uses the OpenEndedResponse.DEFAULT_QUEUE queue.
"""
DEFAULT_QUEUE
=
'open-ended'
DEFAULT_MESSAGE_QUEUE
=
'open-ended-message'
response_tag
=
'openendedresponse'
allowed_inputfields
=
[
'openendedinput'
]
max_inputfields
=
1
def
setup_response
(
self
):
'''
Configure OpenEndedResponse from XML.
'''
xml
=
self
.
xml
self
.
url
=
xml
.
get
(
'url'
,
None
)
self
.
queue_name
=
xml
.
get
(
'queuename'
,
self
.
DEFAULT_QUEUE
)
self
.
message_queue_name
=
xml
.
get
(
'message-queuename'
,
self
.
DEFAULT_MESSAGE_QUEUE
)
# The openendedparam tag encapsulates all grader settings
oeparam
=
self
.
xml
.
find
(
'openendedparam'
)
prompt
=
self
.
xml
.
find
(
'prompt'
)
rubric
=
self
.
xml
.
find
(
'openendedrubric'
)
#This is needed to attach feedback to specific responses later
self
.
submission_id
=
None
self
.
grader_id
=
None
if
oeparam
is
None
:
raise
ValueError
(
"No oeparam found in problem xml."
)
if
prompt
is
None
:
raise
ValueError
(
"No prompt found in problem xml."
)
if
rubric
is
None
:
raise
ValueError
(
"No rubric found in problem xml."
)
self
.
_parse
(
oeparam
,
prompt
,
rubric
)
@staticmethod
def
stringify_children
(
node
):
"""
Modify code from stringify_children in xmodule. Didn't import directly
in order to avoid capa depending on xmodule (seems to be avoided in
code)
"""
parts
=
[
node
.
text
if
node
.
text
is
not
None
else
''
]
for
p
in
node
.
getchildren
():
parts
.
append
(
etree
.
tostring
(
p
,
with_tail
=
True
,
encoding
=
'unicode'
))
return
' '
.
join
(
parts
)
def
_parse
(
self
,
oeparam
,
prompt
,
rubric
):
'''
Parse OpenEndedResponse XML:
self.initial_display
self.payload - dict containing keys --
'grader' : path to grader settings file, 'problem_id' : id of the problem
self.answer - What to display when show answer is clicked
'''
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
prompt_string
=
self
.
stringify_children
(
prompt
)
rubric_string
=
self
.
stringify_children
(
rubric
)
grader_payload
=
oeparam
.
find
(
'grader_payload'
)
grader_payload
=
grader_payload
.
text
if
grader_payload
is
not
None
else
''
#Update grader payload with student id. If grader payload not json, error.
try
:
parsed_grader_payload
=
json
.
loads
(
grader_payload
)
# NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into
# response types)
except
TypeError
,
ValueError
:
log
.
exception
(
"Grader payload
%
r is not a json object!"
,
grader_payload
)
self
.
initial_display
=
find_with_default
(
oeparam
,
'initial_display'
,
''
)
self
.
answer
=
find_with_default
(
oeparam
,
'answer_display'
,
'No answer given.'
)
parsed_grader_payload
.
update
({
'location'
:
self
.
system
.
location
,
'course_id'
:
self
.
system
.
course_id
,
'prompt'
:
prompt_string
,
'rubric'
:
rubric_string
,
'initial_display'
:
self
.
initial_display
,
'answer'
:
self
.
answer
,
})
updated_grader_payload
=
json
.
dumps
(
parsed_grader_payload
)
self
.
payload
=
{
'grader_payload'
:
updated_grader_payload
}
try
:
self
.
max_score
=
int
(
find_with_default
(
oeparam
,
'max_score'
,
1
))
except
ValueError
:
self
.
max_score
=
1
def
handle_message_post
(
self
,
event_info
):
"""
Handles a student message post (a reaction to the grade they received from an open ended grader type)
Returns a boolean success/fail and an error message
"""
survey_responses
=
event_info
[
'survey_responses'
]
for
tag
in
[
'feedback'
,
'submission_id'
,
'grader_id'
,
'score'
]:
if
tag
not
in
survey_responses
:
return
False
,
"Could not find needed tag {0}"
.
format
(
tag
)
try
:
submission_id
=
int
(
survey_responses
[
'submission_id'
])
grader_id
=
int
(
survey_responses
[
'grader_id'
])
feedback
=
str
(
survey_responses
[
'feedback'
]
.
encode
(
'ascii'
,
'ignore'
))
score
=
int
(
survey_responses
[
'score'
])
except
:
error_message
=
(
"Could not parse submission id, grader id, "
"or feedback from message_post ajax call. Here is the message data: {0}"
.
format
(
survey_responses
))
log
.
exception
(
error_message
)
return
False
,
"There was an error saving your feedback. Please contact course staff."
qinterface
=
self
.
system
.
xqueue
[
'interface'
]
qtime
=
datetime
.
strftime
(
datetime
.
now
(),
xqueue_interface
.
dateformat
)
anonymous_student_id
=
self
.
system
.
anonymous_student_id
queuekey
=
xqueue_interface
.
make_hashkey
(
str
(
self
.
system
.
seed
)
+
qtime
+
anonymous_student_id
+
self
.
answer_id
)
xheader
=
xqueue_interface
.
make_xheader
(
lms_callback_url
=
self
.
system
.
xqueue
[
'callback_url'
],
lms_key
=
queuekey
,
queue_name
=
self
.
message_queue_name
)
student_info
=
{
'anonymous_student_id'
:
anonymous_student_id
,
'submission_time'
:
qtime
,
}
contents
=
{
'feedback'
:
feedback
,
'submission_id'
:
submission_id
,
'grader_id'
:
grader_id
,
'score'
:
score
,
'student_info'
:
json
.
dumps
(
student_info
),
}
(
error
,
msg
)
=
qinterface
.
send_to_queue
(
header
=
xheader
,
body
=
json
.
dumps
(
contents
))
#Convert error to a success value
success
=
True
if
error
:
success
=
False
return
success
,
"Successfully submitted your feedback."
def
get_score
(
self
,
student_answers
):
try
:
submission
=
student_answers
[
self
.
answer_id
]
except
KeyError
:
msg
=
(
'Cannot get student answer for answer_id: {0}. student_answers {1}'
.
format
(
self
.
answer_id
,
student_answers
))
log
.
exception
(
msg
)
raise
LoncapaProblemError
(
msg
)
# Prepare xqueue request
#------------------------------------------------------------
qinterface
=
self
.
system
.
xqueue
[
'interface'
]
qtime
=
datetime
.
strftime
(
datetime
.
now
(),
xqueue_interface
.
dateformat
)
anonymous_student_id
=
self
.
system
.
anonymous_student_id
# Generate header
queuekey
=
xqueue_interface
.
make_hashkey
(
str
(
self
.
system
.
seed
)
+
qtime
+
anonymous_student_id
+
self
.
answer_id
)
xheader
=
xqueue_interface
.
make_xheader
(
lms_callback_url
=
self
.
system
.
xqueue
[
'callback_url'
],
lms_key
=
queuekey
,
queue_name
=
self
.
queue_name
)
self
.
context
.
update
({
'submission'
:
submission
})
contents
=
self
.
payload
.
copy
()
# Metadata related to the student submission revealed to the external grader
student_info
=
{
'anonymous_student_id'
:
anonymous_student_id
,
'submission_time'
:
qtime
,
}
#Update contents with student response and student info
contents
.
update
({
'student_info'
:
json
.
dumps
(
student_info
),
'student_response'
:
submission
,
'max_score'
:
self
.
max_score
,
})
# Submit request. When successful, 'msg' is the prior length of the queue
(
error
,
msg
)
=
qinterface
.
send_to_queue
(
header
=
xheader
,
body
=
json
.
dumps
(
contents
))
# State associated with the queueing request
queuestate
=
{
'key'
:
queuekey
,
'time'
:
qtime
,}
cmap
=
CorrectMap
()
if
error
:
cmap
.
set
(
self
.
answer_id
,
queuestate
=
None
,
msg
=
'Unable to deliver your submission to grader. (Reason: {0}.)'
' Please try again later.'
.
format
(
msg
))
else
:
# Queueing mechanism flags:
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that
# the problem has been queued
# 2) Frontend: correctness='incomplete' eventually trickles down
# through inputtypes.textbox and .filesubmission to inform the
# browser that the submission is queued (and it could e.g. poll)
cmap
.
set
(
self
.
answer_id
,
queuestate
=
queuestate
,
correctness
=
'incomplete'
,
msg
=
msg
)
return
cmap
def
update_score
(
self
,
score_msg
,
oldcmap
,
queuekey
):
log
.
debug
(
score_msg
)
score_msg
=
self
.
_parse_score_msg
(
score_msg
)
if
not
score_msg
.
valid
:
oldcmap
.
set
(
self
.
answer_id
,
msg
=
'Invalid grader reply. Please contact the course staff.'
)
return
oldcmap
correctness
=
'correct'
if
score_msg
.
correct
else
'incorrect'
# TODO: Find out how this is used elsewhere, if any
self
.
context
[
'correct'
]
=
correctness
# Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
# does not match, we keep waiting for the score_msg whose key actually matches
if
oldcmap
.
is_right_queuekey
(
self
.
answer_id
,
queuekey
):
# Sanity check on returned points
points
=
score_msg
.
points
if
points
<
0
:
points
=
0
# Queuestate is consumed, so reset it to None
oldcmap
.
set
(
self
.
answer_id
,
npoints
=
points
,
correctness
=
correctness
,
msg
=
score_msg
.
msg
.
replace
(
' '
,
' '
),
queuestate
=
None
)
else
:
log
.
debug
(
'OpenEndedResponse: queuekey {0} does not match for answer_id={1}.'
.
format
(
queuekey
,
self
.
answer_id
))
return
oldcmap
def
get_answers
(
self
):
anshtml
=
'<span class="openended-answer"><pre><code>{0}</code></pre></span>'
.
format
(
self
.
answer
)
return
{
self
.
answer_id
:
anshtml
}
def
get_initial_display
(
self
):
return
{
self
.
answer_id
:
self
.
initial_display
}
def
_convert_longform_feedback_to_html
(
self
,
response_items
):
"""
Take in a dictionary, and return html strings for display to student.
Input:
response_items: Dictionary with keys success, feedback.
if success is True, feedback should be a dictionary, with keys for
types of feedback, and the corresponding feedback values.
if success is False, feedback is actually an error string.
NOTE: this will need to change when we integrate peer grading, because
that will have more complex feedback.
Output:
String -- html that can be displayed to the student.
"""
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities
=
{
# These go at the start of the feedback
'spelling'
:
0
,
'grammar'
:
1
,
# needs to be after all the other feedback
'markup_text'
:
3
}
default_priority
=
2
def
get_priority
(
elt
):
"""
Args:
elt: a tuple of feedback-type, feedback
Returns:
the priority for this feedback type
"""
return
priorities
.
get
(
elt
[
0
],
default_priority
)
def
encode_values
(
feedback_type
,
value
):
feedback_type
=
str
(
feedback_type
)
.
encode
(
'ascii'
,
'ignore'
)
if
not
isinstance
(
value
,
basestring
):
value
=
str
(
value
)
value
=
value
.
encode
(
'ascii'
,
'ignore'
)
return
feedback_type
,
value
def
format_feedback
(
feedback_type
,
value
):
feedback_type
,
value
=
encode_values
(
feedback_type
,
value
)
feedback
=
"""
<div class="{feedback_type}">
{value}
</div>
"""
.
format
(
feedback_type
=
feedback_type
,
value
=
value
)
return
feedback
def
format_feedback_hidden
(
feedback_type
,
value
):
feedback_type
,
value
=
encode_values
(
feedback_type
,
value
)
feedback
=
"""
<div class="{feedback_type}" style="display: none;">
{value}
</div>
"""
.
format
(
feedback_type
=
feedback_type
,
value
=
value
)
return
feedback
# TODO (vshnayder): design and document the details of this format so
# that we can do proper escaping here (e.g. are the graders allowed to
# include HTML?)
for
tag
in
[
'success'
,
'feedback'
,
'submission_id'
,
'grader_id'
]:
if
tag
not
in
response_items
:
return
format_feedback
(
'errors'
,
'Error getting feedback'
)
feedback_items
=
response_items
[
'feedback'
]
try
:
feedback
=
json
.
loads
(
feedback_items
)
except
(
TypeError
,
ValueError
):
log
.
exception
(
"feedback_items have invalid json
%
r"
,
feedback_items
)
return
format_feedback
(
'errors'
,
'Could not parse feedback'
)
if
response_items
[
'success'
]:
if
len
(
feedback
)
==
0
:
return
format_feedback
(
'errors'
,
'No feedback available'
)
feedback_lst
=
sorted
(
feedback
.
items
(),
key
=
get_priority
)
feedback_list_part1
=
u"
\n
"
.
join
(
format_feedback
(
k
,
v
)
for
k
,
v
in
feedback_lst
)
else
:
feedback_list_part1
=
format_feedback
(
'errors'
,
response_items
[
'feedback'
])
feedback_list_part2
=
(
u"
\n
"
.
join
([
format_feedback_hidden
(
feedback_type
,
value
)
for
feedback_type
,
value
in
response_items
.
items
()
if
feedback_type
in
[
'submission_id'
,
'grader_id'
]]))
return
u"
\n
"
.
join
([
feedback_list_part1
,
feedback_list_part2
])
def
_format_feedback
(
self
,
response_items
):
"""
Input:
Dictionary called feedback. Must contain keys seen below.
Output:
Return error message or feedback template
"""
feedback
=
self
.
_convert_longform_feedback_to_html
(
response_items
)
if
not
response_items
[
'success'
]:
return
self
.
system
.
render_template
(
"open_ended_error.html"
,
{
'errors'
:
feedback
})
feedback_template
=
self
.
system
.
render_template
(
"open_ended_feedback.html"
,
{
'grader_type'
:
response_items
[
'grader_type'
],
'score'
:
"{0} / {1}"
.
format
(
response_items
[
'score'
],
self
.
max_score
),
'feedback'
:
feedback
,
})
return
feedback_template
def
_parse_score_msg
(
self
,
score_msg
):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg
'feedback' : feedback from grader
}
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
"""
fail
=
ScoreMessage
(
valid
=
False
,
correct
=
False
,
points
=
0
,
msg
=
''
)
try
:
score_result
=
json
.
loads
(
score_msg
)
except
(
TypeError
,
ValueError
):
log
.
error
(
"External grader message should be a JSON-serialized dict."
" Received score_msg = {0}"
.
format
(
score_msg
))
return
fail
if
not
isinstance
(
score_result
,
dict
):
log
.
error
(
"External grader message should be a JSON-serialized dict."
" Received score_result = {0}"
.
format
(
score_result
))
return
fail
for
tag
in
[
'score'
,
'feedback'
,
'grader_type'
,
'success'
,
'grader_id'
,
'submission_id'
]:
if
tag
not
in
score_result
:
log
.
error
(
"External grader message is missing required tag: {0}"
.
format
(
tag
))
return
fail
feedback
=
self
.
_format_feedback
(
score_result
)
self
.
submission_id
=
score_result
[
'submission_id'
]
self
.
grader_id
=
score_result
[
'grader_id'
]
# HACK: for now, just assume it's correct if you got more than 2/3.
# Also assumes that score_result['score'] is an integer.
score_ratio
=
int
(
score_result
[
'score'
])
/
float
(
self
.
max_score
)
correct
=
(
score_ratio
>=
0.66
)
#Currently ignore msg and only return feedback (which takes the place of msg)
return
ScoreMessage
(
valid
=
True
,
correct
=
correct
,
points
=
score_result
[
'score'
],
msg
=
feedback
)
#-----------------------------------------------------------------------------
# TEMPORARY: List of all response subclasses
# TEMPORARY: List of all response subclasses
# FIXME: To be replaced by auto-registration
# FIXME: To be replaced by auto-registration
...
@@ -2261,5 +1831,4 @@ __all__ = [CodeResponse,
...
@@ -2261,5 +1831,4 @@ __all__ = [CodeResponse,
ChoiceResponse
,
ChoiceResponse
,
MultipleChoiceResponse
,
MultipleChoiceResponse
,
TrueFalseResponse
,
TrueFalseResponse
,
JavascriptResponse
,
JavascriptResponse
]
OpenEndedResponse
]
common/lib/xmodule/xmodule/js/src/capa/display.coffee
View file @
38a81b46
...
@@ -25,7 +25,6 @@ class @Problem
...
@@ -25,7 +25,6 @@ class @Problem
@
$
(
'section.action input.reset'
).
click
@
reset
@
$
(
'section.action input.reset'
).
click
@
reset
@
$
(
'section.action input.show'
).
click
@
show
@
$
(
'section.action input.show'
).
click
@
show
@
$
(
'section.action input.save'
).
click
@
save
@
$
(
'section.action input.save'
).
click
@
save
@
$
(
'section.evaluation input.submit-message'
).
click
@
message_post
# Collapsibles
# Collapsibles
Collapsible
.
setCollapsibles
(
@
el
)
Collapsible
.
setCollapsibles
(
@
el
)
...
@@ -198,35 +197,6 @@ class @Problem
...
@@ -198,35 +197,6 @@ class @Problem
else
else
@
gentle_alert
response
.
success
@
gentle_alert
response
.
success
message_post
:
=>
Logger
.
log
'message_post'
,
@
answers
fd
=
new
FormData
()
feedback
=
@
$
(
'section.evaluation textarea.feedback-on-feedback'
)[
0
].
value
submission_id
=
$
(
'div.external-grader-message div.submission_id'
)[
0
].
innerHTML
grader_id
=
$
(
'div.external-grader-message div.grader_id'
)[
0
].
innerHTML
score
=
$
(
".evaluation-scoring input:radio[name='evaluation-score']:checked"
).
val
()
fd
.
append
(
'feedback'
,
feedback
)
fd
.
append
(
'submission_id'
,
submission_id
)
fd
.
append
(
'grader_id'
,
grader_id
)
if
(
!
score
)
@
gentle_alert
"You need to pick a rating before you can submit."
return
else
fd
.
append
(
'score'
,
score
)
settings
=
type
:
"POST"
data
:
fd
processData
:
false
contentType
:
false
success
:
(
response
)
=>
@
gentle_alert
response
.
message
@
$
(
'section.evaluation'
).
slideToggle
()
$
.
ajaxWithPrefix
(
"
#{
@
url
}
/message_post"
,
settings
)
reset
:
=>
reset
:
=>
Logger
.
log
'problem_reset'
,
@
answers
Logger
.
log
'problem_reset'
,
@
answers
$
.
postWithPrefix
"
#{
@
url
}
/problem_reset"
,
id
:
@
id
,
(
response
)
=>
$
.
postWithPrefix
"
#{
@
url
}
/problem_reset"
,
id
:
@
id
,
(
response
)
=>
...
...
common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
View file @
38a81b46
...
@@ -218,7 +218,7 @@ class @CombinedOpenEnded
...
@@ -218,7 +218,7 @@ class @CombinedOpenEnded
@
reinitialize
(
@
element
)
@
reinitialize
(
@
element
)
@
rebind
()
@
rebind
()
@
next_problem_button
.
hide
()
@
next_problem_button
.
hide
()
if
response
.
allow_reset
==
"False"
if
!
response
.
allow_reset
@
gentle_alert
"Moved to next step."
@
gentle_alert
"Moved to next step."
else
else
@
gentle_alert
"Your score did not meet the criteria to move to the next step."
@
gentle_alert
"Your score did not meet the criteria to move to the next step."
...
...
common/lib/xmodule/xmodule/open_ended_module.py
View file @
38a81b46
...
@@ -245,7 +245,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
...
@@ -245,7 +245,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
that will have more complex feedback.
that will have more complex feedback.
Output:
Output:
String -- html that can be displayed to the student.
String -- html that can be display
incorrect-icon.png
ed to the student.
"""
"""
# We want to display available feedback in a particular order.
# We want to display available feedback in a particular order.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment