Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
d2638ae5
Commit
d2638ae5
authored
Feb 07, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Adding some initial score logic into the APIs.
parent
4a9a153c
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
135 additions
and
21 deletions
+135
-21
apps/openassessment/peer/api.py
+111
-16
apps/openassessment/xblock/openassessmentblock.py
+14
-2
apps/submissions/api.py
+10
-3
No files found.
apps/openassessment/peer/api.py
View file @
d2638ae5
...
@@ -11,8 +11,8 @@ from django.db import DatabaseError
...
@@ -11,8 +11,8 @@ from django.db import DatabaseError
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.serializers
import
PeerEvaluationSerializer
from
openassessment.peer.serializers
import
PeerEvaluationSerializer
from
submissions.models
import
Submission
,
StudentItem
from
submissions.models
import
Submission
,
StudentItem
,
Score
from
submissions.serializers
import
SubmissionSerializer
from
submissions.serializers
import
SubmissionSerializer
,
ScoreSerializer
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
@@ -62,7 +62,12 @@ class PeerEvaluationInternalError(PeerEvaluationError):
...
@@ -62,7 +62,12 @@ class PeerEvaluationInternalError(PeerEvaluationError):
pass
pass
def
create_evaluation
(
submission_uuid
,
scorer_id
,
assessment_dict
,
def
create_evaluation
(
submission_uuid
,
scorer_id
,
required_evaluations_for_student
,
required_evaluations_for_submission
,
assessment_dict
,
scored_at
=
None
):
scored_at
=
None
):
"""Creates an evaluation on the given submission.
"""Creates an evaluation on the given submission.
...
@@ -75,6 +80,10 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
...
@@ -75,6 +80,10 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
Submission model.
Submission model.
scorer_id (str): The user ID for the user giving this assessment. This
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
is required to create an assessment on a submission.
required_evaluations_for_student (int): The number of evaluations
required for the student to receive a score for their submission.
required_evaluations_for_submission (int): The number of evaluations
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which
scored_at (datetime): Optional argument to override the time in which
...
@@ -126,6 +135,35 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
...
@@ -126,6 +135,35 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
if
not
peer_serializer
.
is_valid
():
if
not
peer_serializer
.
is_valid
():
raise
PeerEvaluationRequestError
(
peer_serializer
.
errors
)
raise
PeerEvaluationRequestError
(
peer_serializer
.
errors
)
peer_serializer
.
save
()
peer_serializer
.
save
()
# Check if the submission is finished and its Author has graded enough.
student_item
=
submission
.
student_item
_check_if_finished_and_create_score
(
student_item
,
submission
,
required_evaluations_for_student
,
required_evaluations_for_submission
)
# Check if the grader is finished and has enough evaluations
scorer_item
=
StudentItem
.
objects
.
get
(
student_id
=
scorer_id
,
item_id
=
student_item
.
item_id
,
course_id
=
student_item
.
course_id
,
item_type
=
student_item
.
item_type
)
scorer_submissions
=
Submission
.
objects
.
filter
(
student_item
=
scorer_item
)
.
order_by
(
"-attempt_number"
)
_check_if_finished_and_create_score
(
scorer_item
,
scorer_submissions
[
0
],
required_evaluations_for_student
,
required_evaluations_for_submission
)
return
peer_serializer
.
data
return
peer_serializer
.
data
except
DatabaseError
:
except
DatabaseError
:
error_message
=
u"An error occurred while creating evaluation {} for submission: {} by: {}"
.
format
(
error_message
=
u"An error occurred while creating evaluation {} for submission: {} by: {}"
.
format
(
...
@@ -137,6 +175,58 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
...
@@ -137,6 +175,58 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
raise
PeerEvaluationInternalError
(
error_message
)
raise
PeerEvaluationInternalError
(
error_message
)
def
_check_if_finished_and_create_score
(
student_item
,
submission
,
required_evaluations_for_student
,
required_evaluations_for_submission
):
"""Basic function for checking if a student is finished with peer workflow.
Checks if the student is finished with the peer evaluation workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
"""
if
Score
.
objects
.
filter
(
student_item
=
student_item
):
return
finished_evaluating
=
has_finished_required_evaluating
(
student_item
.
student_id
,
int
(
required_evaluations_for_student
)
)
evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
.
order_by
(
"-points_earned"
)
submission_finished
=
evaluations
.
count
()
>=
int
(
required_evaluations_for_submission
)
scores
=
[]
for
evaluation
in
evaluations
:
scores
.
append
(
evaluation
.
points_earned
)
if
finished_evaluating
and
submission_finished
:
# Create a score for the submission author
score
=
ScoreSerializer
(
data
=
{
"student_item"
:
student_item
.
pk
,
"submission"
:
submission
.
pk
,
"points_earned"
:
_calculate_final_score
(
scores
),
"points_possible"
:
evaluations
[
0
]
.
points_possible
,
})
if
not
score
.
is_valid
():
raise
PeerEvaluationInternalError
(
"Could not create a score"
)
return
score
.
save
()
def
_calculate_final_score
(
scores
):
"""Final grade is calculated using integer values, rounding up.
If there is a true median score, it is returned. If there are two median
values, the average of those two values is returned, rounded up to the
greatest integer value.
"""
total_scores
=
len
(
scores
)
if
total_scores
%
2
:
return
scores
[
total_scores
/
2
]
else
:
return
(
scores
[
total_scores
/
2
]
+
scores
[
total_scores
/
2
+
1
])
/
2
def
has_finished_required_evaluating
(
student_id
,
required_evaluations
):
def
has_finished_required_evaluating
(
student_id
,
required_evaluations
):
"""Check if a student still needs to evaluate more submissions
"""Check if a student still needs to evaluate more submissions
...
@@ -228,7 +318,7 @@ def get_evaluations(submission_id):
...
@@ -228,7 +318,7 @@ def get_evaluations(submission_id):
raise
PeerEvaluationInternalError
(
error_message
)
raise
PeerEvaluationInternalError
(
error_message
)
def
get_submission_to_evaluate
(
student_item_dict
):
def
get_submission_to_evaluate
(
student_item_dict
,
required_num_evaluations
):
"""Get a submission to peer evaluate.
"""Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will
Retrieves a submission for evaluation for the given student_item. This will
...
@@ -243,6 +333,8 @@ def get_submission_to_evaluate(student_item_dict):
...
@@ -243,6 +333,8 @@ def get_submission_to_evaluate(student_item_dict):
item_id, course_id, and item_type, used to identify the unique
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
avoid giving the student their own submission.
required_num_evaluations (int): The number of evaluations a submission
requires before it has completed the peer evaluation process.
Returns:
Returns:
dict: A peer submission for evaluation. This contains a 'student_item',
dict: A peer submission for evaluation. This contains a 'student_item',
...
@@ -279,16 +371,11 @@ def get_submission_to_evaluate(student_item_dict):
...
@@ -279,16 +371,11 @@ def get_submission_to_evaluate(student_item_dict):
item_id
=
student_item_dict
[
"item_id"
],
item_id
=
student_item_dict
[
"item_id"
],
)
.
exclude
(
student_id
=
student_item_dict
[
"student_id"
])
)
.
exclude
(
student_id
=
student_item_dict
[
"student_id"
])
student_evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
_get_first_submission_not_evaluated
(
scorer_id
=
student_item_dict
[
"student_id"
]
student_items
,
student_item_dict
[
"student_id"
],
required_num_evaluations
)
)
# TODO: We need a priority queue.
submissions
=
Submission
.
objects
.
filter
(
student_item__in
=
student_items
)
.
order_by
(
"submitted_at"
,
"-attempt_number"
)
submission
=
_get_first_submission_not_evaluated
(
submissions
,
student_evaluations
)
if
not
submission
:
if
not
submission
:
raise
PeerEvaluationWorkflowError
(
raise
PeerEvaluationWorkflowError
(
"There are no submissions available for evaluation."
"There are no submissions available for evaluation."
...
@@ -296,10 +383,17 @@ def get_submission_to_evaluate(student_item_dict):
...
@@ -296,10 +383,17 @@ def get_submission_to_evaluate(student_item_dict):
return
SubmissionSerializer
(
submission
)
.
data
return
SubmissionSerializer
(
submission
)
.
data
def
_get_first_submission_not_evaluated
(
submissions
,
student_evaluations
):
def
_get_first_submission_not_evaluated
(
student_items
,
student_id
,
required_num_evaluations
):
# TODO: We need a priority queue.
submissions
=
Submission
.
objects
.
filter
(
student_item__in
=
student_items
)
.
order_by
(
"submitted_at"
,
"-attempt_number"
)
for
submission
in
submissions
:
for
submission
in
submissions
:
evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
if
evaluations
.
count
()
<
int
(
required_num_evaluations
):
already_evaluated
=
False
already_evaluated
=
False
for
evaluation
in
student_
evaluations
:
for
evaluation
in
evaluations
:
already_evaluated
=
already_evaluated
or
submission
==
evaluation
.
submission
already_evaluated
=
already_evaluated
or
evaluation
.
scorer_id
==
student_id
if
not
already_evaluated
:
if
not
already_evaluated
:
return
submission
return
submission
\ No newline at end of file
apps/openassessment/xblock/openassessmentblock.py
View file @
d2638ae5
...
@@ -72,7 +72,9 @@ class OpenAssessmentBlock(XBlock):
...
@@ -72,7 +72,9 @@ class OpenAssessmentBlock(XBlock):
student_item_dict
=
self
.
_get_student_item_dict
()
student_item_dict
=
self
.
_get_student_item_dict
()
previous_submissions
=
api
.
get_submissions
(
student_item_dict
)
previous_submissions
=
api
.
get_submissions
(
student_item_dict
)
try
:
try
:
peer_submission
=
peer_api
.
get_submission_to_evaluate
(
student_item_dict
)
# HACK: Replace with proper workflow.
peer_eval
=
self
.
_hack_get_peer_eval
()
peer_submission
=
peer_api
.
get_submission_to_evaluate
(
student_item_dict
,
peer_eval
[
"must_be_graded_by"
])
except
PeerEvaluationWorkflowError
:
except
PeerEvaluationWorkflowError
:
peer_submission
=
False
peer_submission
=
False
...
@@ -103,8 +105,16 @@ class OpenAssessmentBlock(XBlock):
...
@@ -103,8 +105,16 @@ class OpenAssessmentBlock(XBlock):
frag
.
initialize_js
(
'OpenAssessmentBlock'
)
frag
.
initialize_js
(
'OpenAssessmentBlock'
)
return
frag
return
frag
def
_hack_get_peer_eval
(
self
):
# HACK: Forcing Peer Eval, we'll get the Eval config.
for
next_eval
in
self
.
rubric_evals
:
if
next_eval
[
"type"
]
==
"peereval"
:
return
next_eval
@XBlock.json_handler
@XBlock.json_handler
def
assess
(
self
,
data
,
suffix
=
''
):
def
assess
(
self
,
data
,
suffix
=
''
):
# HACK: Replace with proper workflow.
peer_eval
=
self
.
_hack_get_peer_eval
()
"""Place an assessment into Openassessment system"""
"""Place an assessment into Openassessment system"""
# TODO: We're not doing points possible in a good way, need to refactor
# TODO: We're not doing points possible in a good way, need to refactor
# the rubric criteria type, Joe has thoughts on this.
# the rubric criteria type, Joe has thoughts on this.
...
@@ -122,6 +132,8 @@ class OpenAssessmentBlock(XBlock):
...
@@ -122,6 +132,8 @@ class OpenAssessmentBlock(XBlock):
evaluation
=
peer_api
.
create_evaluation
(
evaluation
=
peer_api
.
create_evaluation
(
data
[
"submission_uuid"
],
data
[
"submission_uuid"
],
student_item_dict
[
"student_id"
],
student_item_dict
[
"student_id"
],
peer_eval
[
"must_grade"
],
peer_eval
[
"must_be_graded_by"
],
assessment_dict
assessment_dict
)
)
...
@@ -215,7 +227,7 @@ class OpenAssessmentBlock(XBlock):
...
@@ -215,7 +227,7 @@ class OpenAssessmentBlock(XBlock):
</criterion>
</criterion>
<criterion name="clearheaded">
<criterion name="clearheaded">
How clear is the thinking?
How clear is the thinking?
<option val="0">
The Unabomber
</option>
<option val="0">
Yogi Berra
</option>
<option val="1">Hunter S. Thompson</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="3">Isaac Asimov</option>
...
...
apps/submissions/api.py
View file @
d2638ae5
...
@@ -8,8 +8,8 @@ import logging
...
@@ -8,8 +8,8 @@ import logging
from
django.db
import
DatabaseError
from
django.db
import
DatabaseError
from
django.utils.encoding
import
force_unicode
from
django.utils.encoding
import
force_unicode
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
from
submissions.models
import
Submission
,
StudentItem
from
submissions.models
import
Submission
,
StudentItem
,
Score
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
@@ -217,7 +217,14 @@ def get_submissions(student_item_dict, limit=None):
...
@@ -217,7 +217,14 @@ def get_submissions(student_item_dict, limit=None):
def
get_score
(
student_item
):
def
get_score
(
student_item
):
pass
student_item_model
=
StudentItem
.
objects
.
get
(
student_id
=
student_item
[
"student_id"
],
course_id
=
student_item
[
"course_id"
],
item_id
=
student_item
[
"item_id"
],
item_type
=
student_item
[
"item_type"
]
)
scores
=
Score
.
objects
.
filter
(
student_item
=
student_item_model
)
return
ScoreSerializer
(
scores
,
many
=
True
)
.
data
def
get_scores
(
course_id
,
student_id
,
types
=
None
):
def
get_scores
(
course_id
,
student_id
,
types
=
None
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment