Commit 6cf2742e by Vik Paruchuri

Trim unneeded files

parent 81b86bb8
"""
This module provides an interface on the grading-service backend
for peer grading
Use peer_grading_service() to get the version specified
in settings.PEER_GRADING_INTERFACE
"""
import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
import sys
from django.conf import settings
from django.http import HttpResponse, Http404
from grading_service import GradingService
from grading_service import GradingServiceError
from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric
from student.models import unique_id_for_user
from lxml import etree
log = logging.getLogger(__name__)
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
class MockPeerGradingService(object):
def get_next_submission(self, problem_location, grader_id):
return json.dumps({'success': True,
'submission_id':1,
'submission_key': "",
'student_response': 'fake student response',
'prompt': 'fake submission prompt',
'rubric': 'fake rubric',
'max_score': 4})
def save_grade(self, location, grader_id, submission_id,
score, feedback, submission_key):
return json.dumps({'success': True})
def is_student_calibrated(self, problem_location, grader_id):
return json.dumps({'success': True, 'calibrated': True})
def show_calibration_essay(self, problem_location, grader_id):
return json.dumps({'success': True,
'submission_id':1,
'submission_key': '',
'student_response': 'fake student response',
'prompt': 'fake submission prompt',
'rubric': 'fake rubric',
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
calibration_essay_id, submission_key, score, feedback):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
return json.dumps({'success': True,
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5})
]})
class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
def __init__(self, config):
super(PeerGradingService, self).__init__(config)
self.get_next_submission_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + '/get_notifications/'
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id})
return json.dumps(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged):
data = {'grader_id' : grader_id,
'submission_id' : submission_id,
'score' : score,
'feedback' : feedback,
'submission_key': submission_key,
'location': location,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True,
'submission_flagged' : submission_flagged}
return self.post(self.save_grade_url, data)
def is_student_calibrated(self, problem_location, grader_id):
params = {'problem_id' : problem_location, 'student_id': grader_id}
return self.get(self.is_student_calibrated_url, params)
def show_calibration_essay(self, problem_location, grader_id):
params = {'problem_id' : problem_location, 'student_id': grader_id}
response = self.get(self.show_calibration_essay_url, params)
return json.dumps(self._render_rubric(response))
def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key,
score, feedback, rubric_scores):
data = {'location': problem_location,
'student_id': grader_id,
'calibration_essay_id': calibration_essay_id,
'submission_key': submission_key,
'score': score,
'feedback': feedback,
'rubric_scores[]': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_calibration_essay_url, data)
def get_problem_list(self, course_id, grader_id):
params = {'course_id': course_id, 'student_id': grader_id}
response = self.get(self.get_problem_list_url, params)
return response
def get_notifications(self, course_id, grader_id):
params = {'course_id': course_id, 'student_id': grader_id}
response = self.get(self.get_notifications_url, params)
return response
_service = None
def peer_grading_service():
"""
Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True,
returns a mock one, otherwise a real one.
Caches the result, so changing the setting after the first call to this
function will have no effect.
"""
global _service
if _service is not None:
return _service
if settings.MOCK_PEER_GRADING:
_service = MockPeerGradingService()
else:
_service = PeerGradingService(settings.PEER_GRADING_INTERFACE)
return _service
def _err_response(msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return HttpResponse(json.dumps({'success': False, 'error': msg}),
mimetype="application/json")
def _check_required(request, required):
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
return False, "Missing required keys: {0}".format(', '.join(missing))
else:
return True, ""
def _check_post(request):
if request.method != 'POST':
raise Http404
def get_next_submission(request, course_id):
"""
Makes a call to the grading controller for the next essay that should be graded
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
_check_post(request)
required = set(['location'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
try:
response = peer_grading_service().get_next_submission(location, grader_id)
return HttpResponse(response,
mimetype="application/json")
except GradingServiceError:
log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}"
.format(peer_grading_service().url, location, grader_id))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
def save_grade(request, course_id):
"""
Saves the grade of a given submission.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
"""
_check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
submission_id = p['submission_id']
score = p['score']
feedback = p['feedback']
submission_key = p['submission_key']
rubric_scores = p.getlist('rubric_scores[]')
submission_flagged = p['submission_flagged']
try:
response = peer_grading_service().save_grade(location, grader_id, submission_id,
score, feedback, submission_key, rubric_scores, submission_flagged)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(peer_grading_service().url,
location, submission_id, submission_key, score)
)
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
def is_student_calibrated(request, course_id):
"""
Calls the grading controller to see if the given student is calibrated
on the given problem
Input:
In the request, we need the following arguments:
location - problem location
Returns:
Json object with the following keys
success - bool indicating whether or not the call was successful
calibrated - true if the grader has fully calibrated and can now move on to grading
- false if the grader is still working on calibration problems
total_calibrated_on_so_far - the number of calibration essays for this problem
that this grader has graded
"""
_check_post(request)
required = set(['location'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
try:
response = peer_grading_service().is_student_calibrated(location, grader_id)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}"
.format(peer_grading_service().url, grader_id, location))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
def show_calibration_essay(request, course_id):
"""
Fetch the next calibration essay from the grading controller and return it
Inputs:
In the request
location - problem location
Returns:
A json dict with the following keys
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
_check_post(request)
required = set(['location'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
try:
response = peer_grading_service().show_calibration_essay(location, grader_id)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error from grading service. server url: {0}, location: {0}"
.format(peer_grading_service().url, location))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return json.dumps({'success': False,
'error': 'Error displaying submission'})
def save_calibration_essay(request, course_id):
"""
Saves the grader's grade of a given calibration.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
actual_score: the score that the instructor gave to this calibration essay
"""
_check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
calibration_essay_id = p['submission_id']
submission_key = p['submission_key']
score = p['score']
feedback = p['feedback']
rubric_scores = p.getlist('rubric_scores[]')
try:
response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id,
submission_key, score, feedback, rubric_scores)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
return _err_response('Could not connect to grading service')
\ No newline at end of file
......@@ -12,8 +12,6 @@ from django.core.urlresolvers import reverse
from student.models import unique_id_for_user
from courseware.courses import get_course_with_access
from peer_grading_service import PeerGradingService
from peer_grading_service import MockPeerGradingService
from controller_query_service import ControllerQueryService
from grading_service import GradingServiceError
import json
......@@ -39,10 +37,6 @@ from django.http import HttpResponse, Http404, HttpResponseRedirect
log = logging.getLogger(__name__)
template_imports = {'urllib': urllib}
if settings.MOCK_PEER_GRADING:
peer_gs = MockPeerGradingService()
else:
peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE)
controller_url = open_ended_util.get_controller_url()
controller_qs = ControllerQueryService(controller_url)
......
# This is a simple class that just hides the error container
# and message container when they are empty
# Can (and should be) expanded upon when our problem list
# becomes more sophisticated
class PeerGrading
constructor: () ->
@error_container = $('.error-container')
@error_container.toggle(not @error_container.is(':empty'))
@message_container = $('.message-container')
@message_container.toggle(not @message_container.is(':empty'))
@problem_list = $('.problem-list')
@construct_progress_bar()
construct_progress_bar: () =>
problems = @problem_list.find('tr').next()
problems.each( (index, element) =>
problem = $(element)
progress_bar = problem.find('.progress-bar')
bar_value = parseInt(problem.data('graded'))
bar_max = parseInt(problem.data('required')) + bar_value
progress_bar.progressbar({value: bar_value, max: bar_max})
)
$(document).ready(() -> new PeerGrading())
##################################
#
# This is the JS that renders the peer grading problem page.
# Fetches the correct problem and/or calibration essay
# and sends back the grades
#
# Should not be run when we don't have a location to send back
# to the server
#
# PeerGradingProblemBackend -
# makes all the ajax requests and provides a mock interface
# for testing purposes
#
# PeerGradingProblem -
# handles the rendering and user interactions with the interface
#
##################################
class PeerGradingProblemBackend
constructor: (ajax_url, mock_backend) ->
@mock_backend = mock_backend
@ajax_url = ajax_url
@mock_cnt = 0
post: (cmd, data, callback) ->
if @mock_backend
callback(@mock(cmd, data))
else
# if this post request fails, the error callback will catch it
$.post(@ajax_url + cmd, data, callback)
.error => callback({success: false, error: "Error occured while performing this operation"})
mock: (cmd, data) ->
if cmd == 'is_student_calibrated'
# change to test each version
response =
success: true
calibrated: @mock_cnt >= 2
else if cmd == 'show_calibration_essay'
#response =
# success: false
# error: "There was an error"
@mock_cnt++
response =
success: true
submission_id: 1
submission_key: 'abcd'
student_response: '''
Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
'''
prompt: '''
<h2>S11E3: Metal Bands</h2>
<p>Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.</p>
<p>* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled? </p>
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
'''
rubric: '''
<table class="rubric"><tbody><tr><th>Purpose</th>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
'''
max_score: 4
else if cmd == 'get_next_submission'
response =
success: true
submission_id: 1
submission_key: 'abcd'
student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa.
Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum.
Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. '''
prompt: '''
<h2>S11E3: Metal Bands</h2>
<p>Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.</p>
<p>* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled? </p>
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
'''
rubric: '''
<table class="rubric"><tbody><tr><th>Purpose</th>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
'''
max_score: 4
else if cmd == 'save_calibration_essay'
response =
success: true
actual_score: 2
else if cmd == 'save_grade'
response =
success: true
return response
class PeerGradingProblem
constructor: (backend) ->
@prompt_wrapper = $('.prompt-wrapper')
@backend = backend
# get the location of the problem
@location = $('.peer-grading').data('location')
# prevent this code from trying to run
# when we don't have a location
if(!@location)
return
# get the other elements we want to fill in
@submission_container = $('.submission-container')
@prompt_container = $('.prompt-container')
@rubric_container = $('.rubric-container')
@flag_student_container = $('.flag-student-container')
@calibration_panel = $('.calibration-panel')
@grading_panel = $('.grading-panel')
@content_panel = $('.content-panel')
@grading_message = $('.grading-message')
@grading_message.hide()
@grading_wrapper =$('.grading-wrapper')
@calibration_feedback_panel = $('.calibration-feedback')
@interstitial_page = $('.interstitial-page')
@interstitial_page.hide()
@error_container = $('.error-container')
@submission_key_input = $("input[name='submission-key']")
@essay_id_input = $("input[name='essay-id']")
@feedback_area = $('.feedback-area')
@score_selection_container = $('.score-selection-container')
@rubric_selection_container = $('.rubric-selection-container')
@grade = null
@calibration = null
@submit_button = $('.submit-button')
@action_button = $('.action-button')
@calibration_feedback_button = $('.calibration-feedback-button')
@interstitial_page_button = $('.interstitial-page-button')
@flag_student_checkbox = $('.flag-checkbox')
Collapsible.setCollapsibles(@content_panel)
# Set up the click event handlers
@action_button.click -> history.back()
@calibration_feedback_button.click =>
@calibration_feedback_panel.hide()
@grading_wrapper.show()
@is_calibrated_check()
@interstitial_page_button.click =>
@interstitial_page.hide()
@is_calibrated_check()
@is_calibrated_check()
##########
#
# Ajax calls to the backend
#
##########
is_calibrated_check: () =>
@backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback)
fetch_calibration_essay: () =>
@backend.post('show_calibration_essay', {location: @location}, @render_calibration)
fetch_submission_essay: () =>
@backend.post('get_next_submission', {location: @location}, @render_submission)
# finds the scores for each rubric category
get_score_list: () =>
# find the number of categories:
num_categories = $('table.rubric tr').length
score_lst = []
# get the score for each one
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
score_lst.push(score)
return score_lst
construct_data: () ->
data =
rubric_scores: @get_score_list()
score: @grade
location: @location
submission_id: @essay_id_input.val()
submission_key: @submission_key_input.val()
feedback: @feedback_area.val()
submission_flagged: @flag_student_checkbox.is(':checked')
return data
submit_calibration_essay: ()=>
data = @construct_data()
@backend.post('save_calibration_essay', data, @calibration_callback)
submit_grade: () =>
data = @construct_data()
@backend.post('save_grade', data, @submission_callback)
##########
#
# Callbacks for various events
#
##########
# called after we perform an is_student_calibrated check
calibration_check_callback: (response) =>
if response.success
# if we haven't been calibrating before
if response.calibrated and (@calibration == null or @calibration == false)
@calibration = false
@fetch_submission_essay()
# If we were calibrating before and no longer need to,
# show the interstitial page
else if response.calibrated and @calibration == true
@calibration = false
@render_interstitial_page()
else
@calibration = true
@fetch_calibration_essay()
else if response.error
@render_error(response.error)
else
@render_error("Error contacting the grading service")
# called after we submit a calibration score
calibration_callback: (response) =>
if response.success
@render_calibration_feedback(response)
else if response.error
@render_error(response.error)
else
@render_error("Error saving calibration score")
# called after we submit a submission score
submission_callback: (response) =>
if response.success
@is_calibrated_check()
@grading_message.fadeIn()
@grading_message.html("<p>Grade sent successfully.</p>")
else
if response.error
@render_error(response.error)
else
@render_error("Error occurred while submitting grade")
# called after a grade is selected on the interface
graded_callback: (event) =>
@grade = $("input[name='grade-selection']:checked").val()
if @grade == undefined
return
# check to see whether or not any categories have not been scored
num_categories = $('table.rubric tr').length
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
if score == undefined
return
# show button if we have scores for all categories
@show_submit_button()
##########
#
# Rendering methods and helpers
#
##########
# renders a calibration essay
render_calibration: (response) =>
if response.success
# load in all the data
@submission_container.html("<h3>Training Essay</h3>")
@render_submission_data(response)
# TODO: indicate that we're in calibration mode
@calibration_panel.addClass('current-state')
@grading_panel.removeClass('current-state')
# Display the right text
# both versions of the text are written into the template itself
# we only need to show/hide the correct ones at the correct time
@calibration_panel.find('.calibration-text').show()
@grading_panel.find('.calibration-text').show()
@calibration_panel.find('.grading-text').hide()
@grading_panel.find('.grading-text').hide()
@flag_student_container.hide()
@submit_button.unbind('click')
@submit_button.click @submit_calibration_essay
else if response.error
@render_error(response.error)
else
@render_error("An error occurred while retrieving the next calibration essay")
# Renders a student submission to be graded
render_submission: (response) =>
if response.success
@submit_button.hide()
@submission_container.html("<h3>Submitted Essay</h3>")
@render_submission_data(response)
@calibration_panel.removeClass('current-state')
@grading_panel.addClass('current-state')
# Display the correct text
# both versions of the text are written into the template itself
# we only need to show/hide the correct ones at the correct time
@calibration_panel.find('.calibration-text').hide()
@grading_panel.find('.calibration-text').hide()
@calibration_panel.find('.grading-text').show()
@grading_panel.find('.grading-text').show()
@flag_student_container.show()
@submit_button.unbind('click')
@submit_button.click @submit_grade
else if response.error
@render_error(response.error)
else
@render_error("An error occured when retrieving the next submission.")
make_paragraphs: (text) ->
paragraph_split = text.split(/\n\s*\n/)
new_text = ''
for paragraph in paragraph_split
new_text += "<p>#{paragraph}</p>"
return new_text
# render common information between calibration and grading
render_submission_data: (response) =>
@content_panel.show()
@submission_container.append(@make_paragraphs(response.student_response))
@prompt_container.html(response.prompt)
@rubric_selection_container.html(response.rubric)
@submission_key_input.val(response.submission_key)
@essay_id_input.val(response.submission_id)
@setup_score_selection(response.max_score)
@submit_button.hide()
@action_button.hide()
@calibration_feedback_panel.hide()
render_calibration_feedback: (response) =>
# display correct grade
@calibration_feedback_panel.slideDown()
calibration_wrapper = $('.calibration-feedback-wrapper')
calibration_wrapper.html("<p>The score you gave was: #{@grade}. The actual score is: #{response.actual_score}</p>")
score = parseInt(@grade)
actual_score = parseInt(response.actual_score)
if score == actual_score
calibration_wrapper.append("<p>Congratulations! Your score matches the actual score!</p>")
else
calibration_wrapper.append("<p>Please try to understand the grading critera better to be more accurate next time.</p>")
# disable score selection and submission from the grading interface
$("input[name='score-selection']").attr('disabled', true)
@submit_button.hide()
render_interstitial_page: () =>
@content_panel.hide()
@interstitial_page.show()
render_error: (error_message) =>
@error_container.show()
@calibration_feedback_panel.hide()
@error_container.html(error_message)
@content_panel.hide()
@action_button.show()
show_submit_button: () =>
@submit_button.show()
setup_score_selection: (max_score) =>
# first, get rid of all the old inputs, if any.
@score_selection_container.html("""
<h3>Overall Score</h3>
<p>Choose an overall score for this submission.</p>
""")
# Now create new labels and inputs for each possible score.
for score in [0..max_score]
id = 'score-' + score
label = """<label for="#{id}">#{score}</label>"""
input = """
<input type="radio" name="grade-selection" id="#{id}" value="#{score}"/>
""" # " fix broken parsing in emacs
@score_selection_container.append(input + label)
# And now hook up an event handler again
$("input[name='score-selection']").change @graded_callback
$("input[name='grade-selection']").change @graded_callback
mock_backend = false
ajax_url = $('.peer-grading').data('ajax_url')
backend = new PeerGradingProblemBackend(ajax_url, mock_backend)
$(document).ready(() -> new PeerGradingProblem(backend))
<%inherit file="/main.html" />
<%block name="bodyclass">${course.css_class}</%block>
<%namespace name='static' file='/static_content.html'/>
<%block name="headextra">
<%static:css group='course'/>
</%block>
<%block name="title"><title>${course.number} Peer Grading</title></%block>
<%include file="/courseware/course_navigation.html" args="active_page='peer_grading'" />
<%block name="js_extra">
<%static:js group='peer_grading'/>
</%block>
${peer_grading_html|n}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment