Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
6606af28
Commit
6606af28
authored
Oct 03, 2016
by
Eric Fischer
Committed by
GitHub
Oct 03, 2016
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #931 from edx/efischer/tnl-4696
Override submitter requirements on staff override assessment
parents
76acce95
120716dd
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
166 additions
and
160 deletions
+166
-160
openassessment/assessment/api/peer.py
+2
-0
openassessment/workflow/api.py
+5
-2
openassessment/workflow/models.py
+32
-11
openassessment/workflow/test/test_api.py
+35
-2
openassessment/xblock/grade_mixin.py
+11
-10
openassessment/xblock/staff_assessment_mixin.py
+5
-1
scripts/run-pep8.sh
+2
-2
scripts/run-pylint.sh
+2
-2
test/acceptance/tests.py
+72
-130
No files found.
openassessment/assessment/api/peer.py
View file @
6606af28
...
@@ -436,6 +436,8 @@ def get_assessment_median_scores(submission_uuid):
...
@@ -436,6 +436,8 @@ def get_assessment_median_scores(submission_uuid):
assessments
=
[
item
.
assessment
for
item
in
items
]
assessments
=
[
item
.
assessment
for
item
in
items
]
scores
=
Assessment
.
scores_by_criterion
(
assessments
)
scores
=
Assessment
.
scores_by_criterion
(
assessments
)
return
Assessment
.
get_median_score_dict
(
scores
)
return
Assessment
.
get_median_score_dict
(
scores
)
except
PeerWorkflow
.
DoesNotExist
:
return
{}
except
DatabaseError
:
except
DatabaseError
:
error_message
=
(
error_message
=
(
u"Error getting assessment median scores for submission {uuid}"
u"Error getting assessment median scores for submission {uuid}"
...
...
openassessment/workflow/api.py
View file @
6606af28
...
@@ -177,7 +177,7 @@ def get_workflow_for_submission(submission_uuid, assessment_requirements):
...
@@ -177,7 +177,7 @@ def get_workflow_for_submission(submission_uuid, assessment_requirements):
return
update_from_assessments
(
submission_uuid
,
assessment_requirements
)
return
update_from_assessments
(
submission_uuid
,
assessment_requirements
)
def
update_from_assessments
(
submission_uuid
,
assessment_requirements
):
def
update_from_assessments
(
submission_uuid
,
assessment_requirements
,
override_submitter_requirements
=
False
):
"""
"""
Update our workflow status based on the status of the underlying assessments.
Update our workflow status based on the status of the underlying assessments.
...
@@ -204,6 +204,9 @@ def update_from_assessments(submission_uuid, assessment_requirements):
...
@@ -204,6 +204,9 @@ def update_from_assessments(submission_uuid, assessment_requirements):
`must_be_graded_by` to ensure that everyone will get scored.
`must_be_graded_by` to ensure that everyone will get scored.
The intention is to eventually pass in more assessment sequence
The intention is to eventually pass in more assessment sequence
specific requirements in this dict.
specific requirements in this dict.
override_submitter_requirements (bool): If True, the presence of a new
staff score will cause all of the submitter's requirements to be
fulfilled, moving the workflow to DONE and exposing their grade.
Returns:
Returns:
dict: Assessment workflow information with the following
dict: Assessment workflow information with the following
...
@@ -259,7 +262,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
...
@@ -259,7 +262,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
workflow
=
_get_workflow_model
(
submission_uuid
)
workflow
=
_get_workflow_model
(
submission_uuid
)
try
:
try
:
workflow
.
update_from_assessments
(
assessment_requirements
)
workflow
.
update_from_assessments
(
assessment_requirements
,
override_submitter_requirements
)
logger
.
info
((
logger
.
info
((
u"Updated workflow for submission UUID {uuid} "
u"Updated workflow for submission UUID {uuid} "
u"with requirements {reqs}"
u"with requirements {reqs}"
...
...
openassessment/workflow/models.py
View file @
6606af28
...
@@ -171,7 +171,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -171,7 +171,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# If we auto-added a staff step, it is optional and should be marked complete immediately
# If we auto-added a staff step, it is optional and should be marked complete immediately
if
step
.
name
==
"staff"
and
staff_auto_added
:
if
step
.
name
==
"staff"
and
staff_auto_added
:
step
.
assessment_completed_at
=
now
()
step
.
assessment_completed_at
=
now
()
step
.
save
()
step
.
save
()
# For the first valid step, update the workflow status
# For the first valid step, update the workflow status
...
@@ -209,6 +209,12 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -209,6 +209,12 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return
score
return
score
def
status_details
(
self
):
def
status_details
(
self
):
"""
Returns workflow status in the form of a dictionary. Each step in the
workflow is a key, and each key maps to a dictionary defining whether
the step is complete (submitter requirements fulfilled) and graded (the
submission has been assessed).
"""
status_dict
=
{}
status_dict
=
{}
steps
=
self
.
_get_steps
()
steps
=
self
.
_get_steps
()
for
step
in
steps
:
for
step
in
steps
:
...
@@ -259,7 +265,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -259,7 +265,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return
score
return
score
def
update_from_assessments
(
self
,
assessment_requirements
):
def
update_from_assessments
(
self
,
assessment_requirements
,
override_submitter_requirements
=
False
):
"""Query assessment APIs and change our status if appropriate.
"""Query assessment APIs and change our status if appropriate.
If the status is done, we do nothing. Once something is done, we never
If the status is done, we do nothing. Once something is done, we never
...
@@ -291,6 +297,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -291,6 +297,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
can refer to this to decide whether the requirements have been
can refer to this to decide whether the requirements have been
met. Note that the requirements could change if the author
met. Note that the requirements could change if the author
updates the problem definition.
updates the problem definition.
override_submitter_requirements (bool): If True, the presence of a new
staff score will cause all of the submitter's requirements to be
fulfilled, moving the workflow to DONE and exposing their grade.
"""
"""
if
self
.
status
==
self
.
STATUS
.
cancelled
:
if
self
.
status
==
self
.
STATUS
.
cancelled
:
...
@@ -320,7 +329,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -320,7 +329,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# Update the assessment_completed_at field for all steps
# Update the assessment_completed_at field for all steps
# All steps are considered "assessment complete", as the staff score will override all
# All steps are considered "assessment complete", as the staff score will override all
for
step
in
steps
:
for
step
in
steps
:
step
.
assessment_completed_at
=
now
()
common_now
=
now
()
step
.
assessment_completed_at
=
common_now
if
override_submitter_requirements
:
step
.
submitter_completed_at
=
common_now
step
.
save
()
step
.
save
()
if
self
.
status
==
self
.
STATUS
.
done
:
if
self
.
status
==
self
.
STATUS
.
done
:
...
@@ -346,8 +358,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -346,8 +358,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# If the submitter has done all they need to do, let's check to see if
# If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it).
# all steps have been fully assessed (i.e. we can score it).
if
(
new_status
==
self
.
STATUS
.
waiting
and
if
(
all
(
step
.
assessment_completed_at
for
step
in
steps
)):
new_status
==
self
.
STATUS
.
waiting
and
all
(
step
.
assessment_completed_at
for
step
in
steps
)
):
score
=
self
.
get_score
(
assessment_requirements
,
step_for_name
)
score
=
self
.
get_score
(
assessment_requirements
,
step_for_name
)
# If we found a score, then we're done
# If we found a score, then we're done
...
@@ -398,7 +412,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -398,7 +412,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return
steps
return
steps
def
set_staff_score
(
self
,
score
,
is_override
=
False
,
reason
=
None
):
def
set_staff_score
(
self
,
score
,
reason
=
None
):
"""
"""
Set a staff score for the workflow.
Set a staff score for the workflow.
...
@@ -425,9 +439,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
...
@@ -425,9 +439,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self
.
submission_uuid
,
self
.
submission_uuid
,
score
[
"points_earned"
],
score
[
"points_earned"
],
score
[
"points_possible"
],
score
[
"points_possible"
],
annotation_creator
=
score
[
"staff_id"
],
annotation_creator
=
score
[
"staff_id"
],
annotation_type
=
annotation_type
,
annotation_type
=
annotation_type
,
annotation_reason
=
reason
annotation_reason
=
reason
)
)
def
set_score
(
self
,
score
):
def
set_score
(
self
,
score
):
...
@@ -602,9 +616,16 @@ class AssessmentWorkflowStep(models.Model):
...
@@ -602,9 +616,16 @@ class AssessmentWorkflowStep(models.Model):
ordering
=
[
"workflow"
,
"order_num"
]
ordering
=
[
"workflow"
,
"order_num"
]
def
is_submitter_complete
(
self
):
def
is_submitter_complete
(
self
):
"""
Used to determine whether the submitter of the response has completed
their required actions.
"""
return
self
.
submitter_completed_at
is
not
None
return
self
.
submitter_completed_at
is
not
None
def
is_assessment_complete
(
self
):
def
is_assessment_complete
(
self
):
"""
Used to determine whether the response has been assessed at this step.
"""
return
self
.
assessment_completed_at
is
not
None
return
self
.
assessment_completed_at
is
not
None
def
api
(
self
):
def
api
(
self
):
...
@@ -660,12 +681,12 @@ class AssessmentWorkflowStep(models.Model):
...
@@ -660,12 +681,12 @@ class AssessmentWorkflowStep(models.Model):
assessment_finished
=
getattr
(
self
.
api
(),
'assessment_is_finished'
,
default_finished
)
assessment_finished
=
getattr
(
self
.
api
(),
'assessment_is_finished'
,
default_finished
)
# Has the user completed their obligations for this step?
# Has the user completed their obligations for this step?
if
(
not
self
.
is_submitter_complete
()
and
submitter_finished
(
submission_uuid
,
step_reqs
)
):
if
not
self
.
is_submitter_complete
()
and
submitter_finished
(
submission_uuid
,
step_reqs
):
self
.
submitter_completed_at
=
now
()
self
.
submitter_completed_at
=
now
()
step_changed
=
True
step_changed
=
True
# Has the step received a score?
# Has the step received a score?
if
(
not
self
.
is_assessment_complete
()
and
assessment_finished
(
submission_uuid
,
step_reqs
)
):
if
not
self
.
is_assessment_complete
()
and
assessment_finished
(
submission_uuid
,
step_reqs
):
self
.
assessment_completed_at
=
now
()
self
.
assessment_completed_at
=
now
()
step_changed
=
True
step_changed
=
True
...
...
openassessment/workflow/test/test_api.py
View file @
6606af28
...
@@ -3,7 +3,6 @@ from django.test.utils import override_settings
...
@@ -3,7 +3,6 @@ from django.test.utils import override_settings
import
ddt
import
ddt
from
mock
import
patch
from
mock
import
patch
from
nose.tools
import
raises
from
nose.tools
import
raises
from
openassessment.assessment.models
import
PeerWorkflow
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.test_utils
import
CacheResetTest
...
@@ -11,10 +10,11 @@ from submissions.models import Submission
...
@@ -11,10 +10,11 @@ from submissions.models import Submission
import
openassessment.workflow.api
as
workflow_api
import
openassessment.workflow.api
as
workflow_api
from
openassessment.assessment.api
import
ai
as
ai_api
from
openassessment.assessment.api
import
ai
as
ai_api
from
openassessment.assessment.errors
import
AIError
from
openassessment.assessment.errors
import
AIError
from
openassessment.assessment.models
import
StudentTrainingWorkflow
from
openassessment.assessment.models
import
PeerWorkflow
,
StudentTrainingWorkflow
import
submissions.api
as
sub_api
import
submissions.api
as
sub_api
from
openassessment.assessment.api
import
peer
as
peer_api
from
openassessment.assessment.api
import
peer
as
peer_api
from
openassessment.assessment.api
import
self
as
self_api
from
openassessment.assessment.api
import
self
as
self_api
from
openassessment.assessment.api
import
staff
as
staff_api
from
openassessment.workflow.models
import
AssessmentWorkflow
,
AssessmentApiLoadError
from
openassessment.workflow.models
import
AssessmentWorkflow
,
AssessmentApiLoadError
from
openassessment.workflow.errors
import
AssessmentWorkflowInternalError
from
openassessment.workflow.errors
import
AssessmentWorkflowInternalError
...
@@ -254,6 +254,39 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -254,6 +254,39 @@ class TestAssessmentWorkflowApi(CacheResetTest):
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
,
"self"
],
ON_INIT_PARAMS
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
,
"self"
],
ON_INIT_PARAMS
)
@patch.object
(
staff_api
,
'get_score'
)
@patch.object
(
PeerWorkflow
.
objects
,
'get'
)
def
test_no_peer_assessment_error_handled
(
self
,
mock_get_workflow
,
mock_get_staff_score
):
"""
Tests to verify that, given a problem that requires the peer step and a submission associated with a workflow
that has no assessments, an overriding staff score will push the workflow into the done state and not crash
when there are no assessments in the "completed" peer step.
"""
mock_get_workflow
.
raises
=
PeerWorkflow
.
DoesNotExist
mock_get_staff_score
.
return_value
=
{
"points_earned"
:
10
,
"points_possible"
:
10
,
"contributing_assessments"
:
123
,
"staff_id"
:
"staff 1"
,
}
_
,
submission
=
self
.
_create_workflow_with_status
(
"user 1"
,
"test/1/1"
,
"peer-problem"
,
"peer"
,
steps
=
[
"peer"
]
)
workflow_api
.
update_from_assessments
(
submission
[
"uuid"
],
{
"peer"
:
{
"must_grade"
:
5
,
"must_be_graded_by"
:
3
}
},
override_submitter_requirements
=
True
)
@patch.object
(
AssessmentWorkflow
.
objects
,
'get'
)
@patch.object
(
AssessmentWorkflow
.
objects
,
'get'
)
@ddt.file_data
(
'data/assessments.json'
)
@ddt.file_data
(
'data/assessments.json'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
...
...
openassessment/xblock/grade_mixin.py
View file @
6606af28
...
@@ -567,15 +567,16 @@ class GradeMixin(object):
...
@@ -567,15 +567,16 @@ class GradeMixin(object):
# If criteria/options in the problem definition do NOT have a "label" field
# If criteria/options in the problem definition do NOT have a "label" field
# (because they were created before this change),
# (because they were created before this change),
# we create a new label that has the same value as "name".
# we create a new label that has the same value as "name".
for
part
in
assessment
[
'parts'
]:
if
assessment
is
not
None
:
criterion_label_key
=
part
[
'criterion'
][
'name'
]
for
part
in
assessment
[
'parts'
]:
part
[
'criterion'
][
'label'
]
=
criterion_labels
.
get
(
criterion_label_key
,
part
[
'criterion'
][
'name'
])
criterion_label_key
=
part
[
'criterion'
][
'name'
]
part
[
'criterion'
][
'label'
]
=
criterion_labels
.
get
(
criterion_label_key
,
part
[
'criterion'
][
'name'
])
# We need to be a little bit careful here: some assessment parts
# have only written feedback, so they're not associated with any options.
# We need to be a little bit careful here: some assessment parts
# If that's the case, we don't need to add the label field.
# have only written feedback, so they're not associated with any options.
if
part
.
get
(
'option'
)
is
not
None
:
# If that's the case, we don't need to add the label field.
option_label_key
=
(
part
[
'criterion'
][
'name'
],
part
[
'option'
][
'name'
])
if
part
.
get
(
'option'
)
is
not
None
:
part
[
'option'
][
'label'
]
=
option_labels
.
get
(
option_label_key
,
part
[
'option'
][
'name'
])
option_label_key
=
(
part
[
'criterion'
][
'name'
],
part
[
'option'
][
'name'
])
part
[
'option'
][
'label'
]
=
option_labels
.
get
(
option_label_key
,
part
[
'option'
][
'name'
])
return
assessment
return
assessment
openassessment/xblock/staff_assessment_mixin.py
View file @
6606af28
...
@@ -53,7 +53,11 @@ class StaffAssessmentMixin(object):
...
@@ -53,7 +53,11 @@ class StaffAssessmentMixin(object):
)
)
assess_type
=
data
.
get
(
'assess_type'
,
'regrade'
)
assess_type
=
data
.
get
(
'assess_type'
,
'regrade'
)
self
.
publish_assessment_event
(
"openassessmentblock.staff_assess"
,
assessment
,
type
=
assess_type
)
self
.
publish_assessment_event
(
"openassessmentblock.staff_assess"
,
assessment
,
type
=
assess_type
)
workflow_api
.
update_from_assessments
(
assessment
[
"submission_uuid"
],
None
)
workflow_api
.
update_from_assessments
(
assessment
[
"submission_uuid"
],
None
,
override_submitter_requirements
=
(
assess_type
==
'regrade'
)
)
except
StaffAssessmentRequestError
:
except
StaffAssessmentRequestError
:
logger
.
warning
(
logger
.
warning
(
...
...
scripts/run-pep8.sh
View file @
6606af28
#!/usr/bin/env bash
#!/usr/bin/env bash
MAX_PEP8_VIOLATIONS
=
11
0
MAX_PEP8_VIOLATIONS
=
11
1
mkdir
-p
test
/logs
mkdir
-p
test
/logs
PEP8_VIOLATIONS
=
test
/logs/pep8.txt
PEP8_VIOLATIONS
=
test
/logs/pep8.txt
touch
$PEP8_VIOLATIONS
touch
$PEP8_VIOLATIONS
pep8
--config
=
.pep8 openassessment
>
$PEP8_VIOLATIONS
pep8
--config
=
.pep8 openassessment
test
>
$PEP8_VIOLATIONS
NUM_PEP8_VIOLATIONS
=
$(
cat
$PEP8_VIOLATIONS
| wc
-l
)
NUM_PEP8_VIOLATIONS
=
$(
cat
$PEP8_VIOLATIONS
| wc
-l
)
echo
"Found"
$NUM_PEP8_VIOLATIONS
"pep8 violations, threshold is"
$MAX_PEP8_VIOLATIONS
echo
"Found"
$NUM_PEP8_VIOLATIONS
"pep8 violations, threshold is"
$MAX_PEP8_VIOLATIONS
...
...
scripts/run-pylint.sh
View file @
6606af28
#!/usr/bin/env bash
#!/usr/bin/env bash
MAX_PYLINT_VIOLATIONS
=
5
19
MAX_PYLINT_VIOLATIONS
=
5
04
mkdir
-p
test
/logs
mkdir
-p
test
/logs
PYLINT_VIOLATIONS
=
test
/logs/pylint.txt
PYLINT_VIOLATIONS
=
test
/logs/pylint.txt
touch
$PYLINT_VIOLATIONS
touch
$PYLINT_VIOLATIONS
pylint
--rcfile
=
pylintrc openassessment
--msg-template
=
'"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"'
>
$PYLINT_VIOLATIONS
pylint
--rcfile
=
pylintrc openassessment
test
--msg-template
=
'"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"'
>
$PYLINT_VIOLATIONS
./scripts/run-pylint.py
$PYLINT_VIOLATIONS
$MAX_PYLINT_VIOLATIONS
./scripts/run-pylint.py
$PYLINT_VIOLATIONS
$MAX_PYLINT_VIOLATIONS
test/acceptance/tests.py
View file @
6606af28
"""
"""
UI-level acceptance tests for OpenAssessment.
UI-level acceptance tests for OpenAssessment.
"""
"""
from
__future__
import
absolute_import
import
ddt
import
ddt
import
os
import
os
import
unittest
import
unittest
...
@@ -8,13 +10,13 @@ import time
...
@@ -8,13 +10,13 @@ import time
from
functools
import
wraps
from
functools
import
wraps
from
pyinstrument
import
Profiler
from
pyinstrument
import
Profiler
from
nose.plugins.attrib
import
attr
from
acceptance.auto_auth
import
AutoAuthPage
from
bok_choy.web_app_test
import
WebAppTest
from
acceptance.pages
import
(
from
bok_choy.promise
import
BrokenPromise
,
EmptyPromise
from
auto_auth
import
AutoAuthPage
from
pages
import
(
SubmissionPage
,
AssessmentPage
,
GradePage
,
StaffAreaPage
SubmissionPage
,
AssessmentPage
,
GradePage
,
StaffAreaPage
)
)
from
bok_choy.web_app_test
import
WebAppTest
from
bok_choy.promise
import
BrokenPromise
,
EmptyPromise
from
nose.plugins.attrib
import
attr
# This value is generally used in jenkins, but not locally
# This value is generally used in jenkins, but not locally
PROFILING_ENABLED
=
os
.
environ
.
get
(
'ORA_PROFILING_ENABLED'
,
False
)
PROFILING_ENABLED
=
os
.
environ
.
get
(
'ORA_PROFILING_ENABLED'
,
False
)
...
@@ -89,7 +91,6 @@ class OpenAssessmentTest(WebAppTest):
...
@@ -89,7 +91,6 @@ class OpenAssessmentTest(WebAppTest):
STAFF_OVERRIDE_OPTIONS_SELECTED
=
[
0
,
1
]
STAFF_OVERRIDE_OPTIONS_SELECTED
=
[
0
,
1
]
STAFF_OVERRIDE_SCORE
=
1
STAFF_OVERRIDE_SCORE
=
1
STAFF_GRADE_EXISTS
=
"COMPLETE"
STAFF_GRADE_EXISTS
=
"COMPLETE"
STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE
=
"You Must Complete the Steps Above to View Your Grade"
STAFF_AREA_SCORE
=
"Final grade: {} out of 8"
STAFF_AREA_SCORE
=
"Final grade: {} out of 8"
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
=
"The problem has not been completed."
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
=
"The problem has not been completed."
EXPECTED_SCORE
=
6
EXPECTED_SCORE
=
6
...
@@ -179,7 +180,7 @@ class OpenAssessmentTest(WebAppTest):
...
@@ -179,7 +180,7 @@ class OpenAssessmentTest(WebAppTest):
self
.
self_asmnt_page
.
assess
(
options
)
.
wait_for_complete
()
self
.
self_asmnt_page
.
assess
(
options
)
.
wait_for_complete
()
self
.
assertTrue
(
self
.
self_asmnt_page
.
is_complete
)
self
.
assertTrue
(
self
.
self_asmnt_page
.
is_complete
)
def
_verify_staff_grade_section
(
self
,
expected_status
,
expected_message_title
):
def
_verify_staff_grade_section
(
self
,
expected_status
):
"""
"""
Verifies the expected status and message text in the Staff Grade section
Verifies the expected status and message text in the Staff Grade section
(as shown to the learner).
(as shown to the learner).
...
@@ -187,8 +188,6 @@ class OpenAssessmentTest(WebAppTest):
...
@@ -187,8 +188,6 @@ class OpenAssessmentTest(WebAppTest):
self
.
staff_asmnt_page
.
wait_for_page
()
self
.
staff_asmnt_page
.
wait_for_page
()
self
.
assertEqual
(
"Staff Grade"
,
self
.
staff_asmnt_page
.
label
)
self
.
assertEqual
(
"Staff Grade"
,
self
.
staff_asmnt_page
.
label
)
self
.
staff_asmnt_page
.
verify_status_value
(
expected_status
)
self
.
staff_asmnt_page
.
verify_status_value
(
expected_status
)
message_title
=
self
.
staff_asmnt_page
.
open_step
()
.
message_title
self
.
assertEqual
(
expected_message_title
,
message_title
)
def
do_training
(
self
):
def
do_training
(
self
):
"""
"""
...
@@ -273,7 +272,7 @@ class OpenAssessmentTest(WebAppTest):
...
@@ -273,7 +272,7 @@ class OpenAssessmentTest(WebAppTest):
self
.
staff_area_page
.
verify_available_checked_out_numbers
((
ungraded
,
checked_out
-
1
))
self
.
staff_area_page
.
verify_available_checked_out_numbers
((
ungraded
,
checked_out
-
1
))
break
break
else
:
else
:
ungraded
-=
1
ungraded
-=
1
self
.
staff_area_page
.
verify_available_checked_out_numbers
((
ungraded
,
checked_out
))
self
.
staff_area_page
.
verify_available_checked_out_numbers
((
ungraded
,
checked_out
))
def
refresh_page
(
self
):
def
refresh_page
(
self
):
...
@@ -337,14 +336,16 @@ class StaffAssessmentTest(OpenAssessmentTest):
...
@@ -337,14 +336,16 @@ class StaffAssessmentTest(OpenAssessmentTest):
self
.
submission_page
.
visit
()
self
.
submission_page
.
visit
()
# Verify that staff grade step is shown initially
# Verify that staff grade step is shown initially
self
.
_verify_staff_grade_section
(
"NOT AVAILABLE"
,
None
)
self
.
_verify_staff_grade_section
(
"NOT AVAILABLE"
)
# User submits a response
# User submits a response
self
.
submission_page
.
submit_response
(
self
.
SUBMISSION
)
self
.
submission_page
.
submit_response
(
self
.
SUBMISSION
)
self
.
assertTrue
(
self
.
submission_page
.
has_submitted
)
self
.
assertTrue
(
self
.
submission_page
.
has_submitted
)
# Verify staff grade section appears as expected
# Verify staff grade section appears as expected
self
.
_verify_staff_grade_section
(
"NOT AVAILABLE"
,
"Waiting for a Staff Grade"
)
self
.
_verify_staff_grade_section
(
"NOT AVAILABLE"
)
message_title
=
self
.
staff_asmnt_page
.
open_step
()
.
message_title
self
.
assertEqual
(
"Waiting for a Staff Grade"
,
message_title
)
# Perform staff assessment
# Perform staff assessment
self
.
staff_area_page
=
StaffAreaPage
(
self
.
browser
,
self
.
problem_loc
)
self
.
staff_area_page
=
StaffAreaPage
(
self
.
browser
,
self
.
problem_loc
)
...
@@ -352,7 +353,7 @@ class StaffAssessmentTest(OpenAssessmentTest):
...
@@ -352,7 +353,7 @@ class StaffAssessmentTest(OpenAssessmentTest):
# Verify staff grade section appears as expected
# Verify staff grade section appears as expected
self
.
staff_asmnt_page
.
visit
()
self
.
staff_asmnt_page
.
visit
()
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
None
)
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
)
self
.
assertEqual
(
self
.
EXPECTED_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
assertEqual
(
self
.
EXPECTED_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
# Verify that staff scores can be overriden
# Verify that staff scores can be overriden
...
@@ -389,18 +390,25 @@ class PeerAssessmentTest(OpenAssessmentTest):
...
@@ -389,18 +390,25 @@ class PeerAssessmentTest(OpenAssessmentTest):
self
.
do_peer_assessment
()
self
.
do_peer_assessment
()
class
PeerAssessmentTestStaffOverride
(
OpenAssessmentTest
):
class
StaffOverrideTest
(
OpenAssessmentTest
):
"""
"""
Test setting a staff override on a problem which requires peer assessment.
Test setting a staff override on a problem which requires peer or self assessment.
This is used as a base class, as the problem type defined by subclasses must be known in setUp().
"""
"""
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
StaffOverrideTest
,
self
)
.
__init__
(
*
args
,
**
kwargs
)
self
.
problem_type
=
None
def
setUp
(
self
):
def
setUp
(
self
):
super
(
PeerAssessmentTestStaffOverride
,
self
)
.
setUp
(
'peer_only'
,
staff
=
True
)
if
self
.
problem_type
is
None
:
self
.
fail
(
"Please define self.problem_type in a sub-class"
)
super
(
StaffOverrideTest
,
self
)
.
setUp
(
self
.
problem_type
,
staff
=
True
)
self
.
staff_area_page
=
StaffAreaPage
(
self
.
browser
,
self
.
problem_loc
)
self
.
staff_area_page
=
StaffAreaPage
(
self
.
browser
,
self
.
problem_loc
)
@retry
()
@retry
()
@attr
(
'acceptance'
)
@attr
(
'acceptance'
)
def
test_staff_override
(
self
):
def
_
test_staff_override
(
self
):
"""
"""
Scenario: staff can override a learner's grade
Scenario: staff can override a learner's grade
...
@@ -408,21 +416,13 @@ class PeerAssessmentTestStaffOverride(OpenAssessmentTest):
...
@@ -408,21 +416,13 @@ class PeerAssessmentTestStaffOverride(OpenAssessmentTest):
And if I create a response to the problem
And if I create a response to the problem
Then there is no Staff Grade section present
Then there is no Staff Grade section present
And if a staff member creates a grade override
And if a staff member creates a grade override
Then when I refresh the page, I see that a staff override exists
Then I can see my final grade, even though no peers have assessed me
And the message says that I must complete my steps to view the grade
And if I submit required peer assessments
Then the Staff Grade section is marked complete with no message
And I can see my final grade, even though no peers have assessed me
"""
"""
# Create two students with a submission each so that there are 2 submissions to assess.
# Create a submission
for
_
in
range
(
0
,
2
):
self
.
auto_auth_page
.
visit
()
self
.
submission_page
.
visit
()
.
submit_response
(
self
.
SUBMISSION
)
# Create a submission for the third student (used for the remainder of the test).
self
.
auto_auth_page
.
visit
()
self
.
auto_auth_page
.
visit
()
username
,
_
=
self
.
auto_auth_page
.
get_username_and_email
()
username
,
_
=
self
.
auto_auth_page
.
get_username_and_email
()
self
.
submission_page
.
visit
()
.
submit_response
(
self
.
SUBMISSION
)
self
.
submission_page
.
visit
()
.
submit_response
(
self
.
SUBMISSION
)
# Staff Grade field should not be visible yet.
# Staff Grade field should not be visible yet.
self
.
assertFalse
(
self
.
staff_asmnt_page
.
is_browser_on_page
())
self
.
assertFalse
(
self
.
staff_asmnt_page
.
is_browser_on_page
())
...
@@ -431,20 +431,38 @@ class PeerAssessmentTestStaffOverride(OpenAssessmentTest):
...
@@ -431,20 +431,38 @@ class PeerAssessmentTestStaffOverride(OpenAssessmentTest):
# Refresh the page so the learner sees the Staff Grade section.
# Refresh the page so the learner sees the Staff Grade section.
self
.
refresh_page
()
self
.
refresh_page
()
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
self
.
STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE
)
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
)
# Verify
no final grade yet.
# Verify
the staff override grade
self
.
assert
IsNone
(
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
assert
Equal
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
# Assess two submissions
self
.
do_peer_assessment
(
count
=
2
)
# Staff grade section is now marked complete, even though no students have submitted
class
StaffOverrideSelfTest
(
StaffOverrideTest
):
# assessments for this particular student (no longer required since staff grade exists).
"""
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
None
)
Subclass of StaffOverrideTest for a 'self_only' problem.
"""
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
StaffOverrideSelfTest
,
self
)
.
__init__
(
*
args
,
**
kwargs
)
self
.
problem_type
=
'self_only'
# Verify the staff override grade
@retry
()
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
@attr
(
'acceptance'
)
def
test_staff_override
(
self
):
super
(
StaffOverrideSelfTest
,
self
)
.
_test_staff_override
()
class
StaffOverridePeerTest
(
StaffOverrideTest
):
"""
Subclass of StaffOverrideTest for a 'peer_only' problem.
"""
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
StaffOverridePeerTest
,
self
)
.
__init__
(
*
args
,
**
kwargs
)
self
.
problem_type
=
'peer_only'
@retry
()
@attr
(
'acceptance'
)
def
test_staff_override
(
self
):
super
(
StaffOverridePeerTest
,
self
)
.
_test_staff_override
()
class
StudentTrainingTest
(
OpenAssessmentTest
):
class
StudentTrainingTest
(
OpenAssessmentTest
):
...
@@ -522,8 +540,8 @@ class StaffAreaTest(OpenAssessmentTest):
...
@@ -522,8 +540,8 @@ class StaffAreaTest(OpenAssessmentTest):
self
.
assertEqual
(
self
.
staff_area_page
.
visible_staff_panels
,
[])
self
.
assertEqual
(
self
.
staff_area_page
.
visible_staff_panels
,
[])
for
panel_name
,
button_label
in
[
for
panel_name
,
button_label
in
[
(
"staff-tools"
,
"MANAGE INDIVIDUAL LEARNERS"
),
(
"staff-tools"
,
"MANAGE INDIVIDUAL LEARNERS"
),
(
"staff-info"
,
"VIEW ASSIGNMENT STATISTICS"
),
(
"staff-info"
,
"VIEW ASSIGNMENT STATISTICS"
),
]:
]:
# Click on the button and verify that the panel has opened
# Click on the button and verify that the panel has opened
self
.
staff_area_page
.
click_staff_toolbar_button
(
panel_name
)
self
.
staff_area_page
.
click_staff_toolbar_button
(
panel_name
)
...
@@ -678,58 +696,6 @@ class StaffAreaTest(OpenAssessmentTest):
...
@@ -678,58 +696,6 @@ class StaffAreaTest(OpenAssessmentTest):
@retry
()
@retry
()
@attr
(
'acceptance'
)
@attr
(
'acceptance'
)
def
test_staff_grade_override
(
self
):
"""
Scenario: the staff grade section displays correctly
Given I am viewing a new self assessment problem as a learner
Then there is no Staff Grade section present
And if I create a response to the problem
Then there is no Staff Grade section present
And if a staff member creates a grade override
Then when I refresh the page, I see that a staff override exists
And the message says that I must complete my steps to view the grade
And if I submit my self-assessment
Then the Staff Grade section is marked complete with no message
And I can see my final grade
"""
# View the problem-- no Staff Grade area.
self
.
auto_auth_page
.
visit
()
username
,
_
=
self
.
auto_auth_page
.
get_username_and_email
()
self
.
submission_page
.
visit
()
self
.
assertFalse
(
self
.
staff_asmnt_page
.
is_browser_on_page
())
self
.
submission_page
.
submit_response
(
self
.
SUBMISSION
)
self
.
assertTrue
(
self
.
submission_page
.
has_submitted
)
self
.
assertFalse
(
self
.
staff_asmnt_page
.
is_browser_on_page
())
# Submit a staff override
self
.
do_staff_override
(
username
,
self
.
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
)
# Refresh the page so the learner sees the Staff Grade section.
self
.
refresh_page
()
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
self
.
STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE
)
# Verify no final grade yet.
self
.
assertIsNone
(
self
.
grade_page
.
wait_for_page
()
.
score
)
# Verify required staff grading section not available
self
.
staff_area_page
=
StaffAreaPage
(
self
.
browser
,
self
.
problem_loc
)
self
.
assertFalse
(
self
.
staff_area_page
.
is_button_visible
(
'staff-grading'
))
# Learner does required self-assessment
self
.
self_asmnt_page
.
wait_for_page
()
.
wait_for_response
()
self
.
assertIn
(
self
.
SUBMISSION
,
self
.
self_asmnt_page
.
response_text
)
self
.
self_asmnt_page
.
assess
(
self
.
OPTIONS_SELECTED
)
.
wait_for_complete
()
self
.
assertTrue
(
self
.
self_asmnt_page
.
is_complete
)
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
None
)
# Verify the staff override grade
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
@retry
()
@attr
(
'acceptance'
)
def
test_staff_grade_override_cancelled
(
self
):
def
test_staff_grade_override_cancelled
(
self
):
"""
"""
Scenario: the staff grade section displays cancelled when the submission is cancelled
Scenario: the staff grade section displays cancelled when the submission is cancelled
...
@@ -750,7 +716,7 @@ class StaffAreaTest(OpenAssessmentTest):
...
@@ -750,7 +716,7 @@ class StaffAreaTest(OpenAssessmentTest):
# Refresh the page so the learner sees the Staff Grade section shows the submission has been cancelled.
# Refresh the page so the learner sees the Staff Grade section shows the submission has been cancelled.
self
.
refresh_page
()
self
.
refresh_page
()
self
.
_verify_staff_grade_section
(
"CANCELLED"
,
None
)
self
.
_verify_staff_grade_section
(
"CANCELLED"
)
self
.
assertIsNone
(
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
assertIsNone
(
self
.
grade_page
.
wait_for_page
()
.
score
)
...
@@ -881,7 +847,7 @@ class FullWorkflowMixin(object):
...
@@ -881,7 +847,7 @@ class FullWorkflowMixin(object):
# At this point, the learner sees the score (1).
# At this point, the learner sees the score (1).
self
.
refresh_page
()
self
.
refresh_page
()
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
None
)
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
)
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
if
peer_grades_me
:
if
peer_grades_me
:
...
@@ -1035,7 +1001,7 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
...
@@ -1035,7 +1001,7 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
self
.
do_staff_override
(
learner
)
self
.
do_staff_override
(
learner
)
self
.
refresh_page
()
self
.
refresh_page
()
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
None
)
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
)
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
verify_staff_area_fields
(
self
.
verify_staff_area_fields
(
learner
,
self
.
STAFF_AREA_PEER_ASSESSMENT
,
self
.
STAFF_AREA_SUBMITTED
,
self
.
STAFF_AREA_SELF_ASSESSMENT
learner
,
self
.
STAFF_AREA_PEER_ASSESSMENT
,
self
.
STAFF_AREA_SUBMITTED
,
self
.
STAFF_AREA_SELF_ASSESSMENT
...
@@ -1066,10 +1032,7 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
...
@@ -1066,10 +1032,7 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
Given that I have created a submission
Given that I have created a submission
Then I see no score yet
Then I see no score yet
And when a staff member creates a grade override
And when a staff member creates a grade override
Then I see that an override exists, but I cannot see the score
Then I see my staff override score
And when a second learner creates a submission
Then I can complete my required steps (training, self assessment, peer assesssment)
And I see my staff override score
And all fields in the staff area tool are correct
And all fields in the staff area tool are correct
"""
"""
# Create only the initial submission before doing the staff override.
# Create only the initial submission before doing the staff override.
...
@@ -1081,52 +1044,30 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
...
@@ -1081,52 +1044,30 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
self
.
verify_staff_area_fields
(
learner
,
[],
[],
[])
self
.
verify_staff_area_fields
(
learner
,
[],
[],
[])
self
.
staff_area_page
.
verify_learner_final_score
(
self
.
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
)
self
.
staff_area_page
.
verify_learner_final_score
(
self
.
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
)
# Do staff override
-- score still not shown due to steps not being complete.
# Do staff override
self
.
do_staff_override
(
learner
,
self
.
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
)
self
.
do_staff_override
(
learner
,
self
.
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
)
# Refresh the page so the learner sees the Staff Grade section.
# Refresh the page so the learner sees the Staff Grade section.
self
.
refresh_page
()
self
.
refresh_page
()
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
self
.
STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE
)
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
)
# Now create a second learner so that "learner" has someone to assess.
self
.
do_submission
()
# Go back to the original learner to complete her workflow and view score.
self
.
login_user
(
learner
,
learner_email
)
# Do training exercise and self assessment
self
.
student_training_page
.
visit
()
self
.
do_training
()
self
.
submit_self_assessment
(
self
.
SELF_ASSESSMENT
)
# Verify staff grade still not available, as learner has not done peer assessment.
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
self
.
STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE
)
self
.
assertIsNone
(
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
verify_staff_area_fields
(
learner
,
[],
[],
self
.
STAFF_AREA_SELF_ASSESSMENT
)
self
.
staff_area_page
.
verify_learner_final_score
(
self
.
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE
)
# Now do the final required step-- peer grading.
# Grade is now visible to the learner despite not having made any assessments
self
.
do_peer_assessment
(
options
=
self
.
SUBMITTED_ASSESSMENT
)
# Grade is now visible to the learner (even though no student has graded the learner).
self
.
_verify_staff_grade_section
(
self
.
STAFF_GRADE_EXISTS
,
None
)
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
assertEqual
(
self
.
STAFF_OVERRIDE_SCORE
,
self
.
grade_page
.
wait_for_page
()
.
score
)
self
.
verify_staff_area_fields
(
learner
,
[],
self
.
STAFF_AREA_SUBMITTED
,
self
.
STAFF_AREA_SELF_ASSESSMENT
)
self
.
verify_staff_area_fields
(
learner
,
[],
[],
[]
)
self
.
staff_area_page
.
verify_learner_final_score
(
self
.
STAFF_AREA_SCORE
.
format
(
self
.
STAFF_OVERRIDE_SCORE
))
self
.
staff_area_page
.
verify_learner_final_score
(
self
.
STAFF_AREA_SCORE
.
format
(
self
.
STAFF_OVERRIDE_SCORE
))
self
.
assertEquals
(
self
.
assertEquals
(
[
'CRITERION'
,
'STAFF GRADE'
,
'PEER MEDIAN GRADE'
,
'SELF ASSESSMENT GRADE'
],
[
'CRITERION'
,
'STAFF GRADE'
,
'PEER MEDIAN GRADE'
],
self
.
staff_area_page
.
learner_final_score_table_headers
self
.
staff_area_page
.
learner_final_score_table_headers
)
)
self
.
assertEquals
(
self
.
assertEquals
(
[
'Poor - 0 points'
,
'Waiting for peer reviews'
,
'Good'
,
[
'Poor - 0 points'
,
'Waiting for peer reviews'
,
'Fair - 1 point'
,
'Waiting for peer reviews'
,
'Excellent'
],
'Fair - 1 point'
,
'Waiting for peer reviews'
],
self
.
staff_area_page
.
learner_final_score_table_values
self
.
staff_area_page
.
learner_final_score_table_values
)
)
self
.
verify_grade_entries
([
self
.
verify_grade_entries
([
[(
u"STAFF GRADE - 0 POINTS"
,
u"Poor"
),
(
u"STAFF GRADE - 1 POINT"
,
u"Fair"
)],
[(
u"STAFF GRADE - 0 POINTS"
,
u"Poor"
),
(
u"STAFF GRADE - 1 POINT"
,
u"Fair"
)],
[(
u'PEER MEDIAN GRADE'
,
u'Waiting for peer reviews'
),
(
u'PEER MEDIAN GRADE'
,
u'Waiting for peer reviews'
)],
[(
u'PEER MEDIAN GRADE'
,
u'Waiting for peer reviews'
),
(
u'PEER MEDIAN GRADE'
,
u'Waiting for peer reviews'
)],
[(
u"YOUR SELF ASSESSMENT"
,
u"Good"
),
(
u"YOUR SELF ASSESSMENT"
,
u"Excellent"
)]
])
])
...
@@ -1163,7 +1104,8 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
...
@@ -1163,7 +1104,8 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
@ddt.ddt
@ddt.ddt
class
FeedbackOnlyTest
(
OpenAssessmentTest
,
FullWorkflowMixin
):
class
FeedbackOnlyTest
(
OpenAssessmentTest
,
FullWorkflowMixin
):
"""
"""
Test for a problem that containing a criterion that only accepts feedback. Will make and verify self and staff assessments.
Test for a problem that containing a criterion that only accepts feedback. Will make and verify self and staff
assessments.
"""
"""
def
setUp
(
self
):
def
setUp
(
self
):
super
(
FeedbackOnlyTest
,
self
)
.
setUp
(
"feedback_only"
,
staff
=
True
)
super
(
FeedbackOnlyTest
,
self
)
.
setUp
(
"feedback_only"
,
staff
=
True
)
...
@@ -1196,7 +1138,7 @@ class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
...
@@ -1196,7 +1138,7 @@ class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
# Staff assess all available submissions
# Staff assess all available submissions
self
.
do_staff_assessment
(
self
.
do_staff_assessment
(
options_selected
=
[
0
],
# Select the 0-th option (Yes) on the single scored criterion
options_selected
=
[
0
],
# Select the 0-th option (Yes) on the single scored criterion
feedback
=
lambda
feedback_type
:
self
.
generate_feedback
(
"staff"
,
feedback_type
)
feedback
=
lambda
feedback_type
:
self
.
generate_feedback
(
"staff"
,
feedback_type
)
)
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment