Commit 9918b7a2 by Ibrahim Awwal

Client library updates, grading functionality should all be working. Needs unit tests.

parent 559e4710
......@@ -7,18 +7,18 @@ from grading_client.api import *
# Create 30 local students, 100 remote students, 2 instructors, and 5 graders.
num_local, num_remote, num_instructors, num_graders = (30, 100, 2, 5)
num_local, num_remote, num_instructors, num_graders = (30, 10, 2, 5)
local_students = [User(name="Student %d"%x, external_id="calx:%d"%(x+2000)).save() for x in xrange(num_local)]
remote_students = [User(name="Student %d"%x, external_id="edx:%d"%(x+1000)).save() for x in xrange(num_remote)]
instructors = [User(name="Instructor %d"%x, external_id="edx:%d"%x).save() for x in xrange(num_instructors)]
graders = [User(name="Grader %d"%x, external_id="edx:%d"%(x+100)).save() for x in xrange(num_graders)]
# Create 5 questions
num_questions = 5
num_questions = 3
questions = {}
group_names = ['local', 'remote1']
for variant in group_names:
questions[variant] = [Question(external_id="calx_q:%d"%x, total_points=2, due_date=datetime.datetime.now()).save() for x in xrange(num_questions)]
questions[variant] = [Question(external_id="calx_q:%d"%x, total_points=2, due_date=datetime.datetime.now()).save() for x in xrange(num_questions)]
# Submit submissions for all users
# Keep track of a "ground-truth" value for the scoring somehow
......@@ -28,13 +28,13 @@ local_submissions = {}
# local_submissions_true_scores = np.ndarray((num_local, num_questions, 3), dtype=np.bool)
local_true_scores = {}
for question in questions['local']:
local_submissions[question] = [(Submission(question_id=question.id, user_id=user.id, external_id="calx_s:%d"%(user.id+1000*question.id))) for user in local_students]
for submission in local_submissions[question]:
submission.save()
m1 = (random() > 0.8)
m2 = (random() > 0.7)
correct = not (m1 or m2)
local_true_scores[submission.id] = (m1, m2, correct)
local_submissions[question] = [(Submission(question_id=question.id, user_id=user.id, external_id="calx_s:%d"%(user.id+1000*question.id))) for user in local_students]
for submission in local_submissions[question]:
submission.save()
m1 = (random() > 0.8)
m2 = (random() > 0.7)
correct = not (m1 or m2)
local_true_scores[submission.id] = (m1, m2, correct)
# for user_index in xrange(num_local):
# for question_index in xrange(num_questions):
......@@ -48,9 +48,9 @@ for question in questions['local']:
remote_submissions = {}
#remote_submissions_true_scores = np.ndarray((num_remote, num_questions, 3), dtype=np.bool)
for question in questions['remote1']:
remote_submissions[question] = [Submission(question_id=question.id, user_id=user.id, external_id="edx_s:%d"%(user.id+1000*question.id)) for user in remote_students]
for submission in remote_submissions[question]:
submission.save()
remote_submissions[question] = [Submission(question_id=question.id, user_id=user.id, external_id="edx_s:%d"%(user.id+1000*question.id)) for user in remote_students]
for submission in remote_submissions[question]:
submission.save()
# Instructor creates rubric
......@@ -64,41 +64,50 @@ rubric.save() # Saves all the entries
# This doesn't quite get the interleaving of rubric creation and evaluation, but
# it shouldn't matter in practice
inst1 = instructors[0]
instructor_evals = []
for question in questions['local']:
for submission in local_submissions[question][:5]:
entries_dict = { entry.id:value for entry, value in zip(rubric.entries, local_true_scores[submission.id]) }
evaluation = rubric.create_evaluation(user_id=inst1.id, question_id=question.id, submission_id=submission.id, entry_values=entries_dict)
#evaluation.save()
instructor_evals.append(evaluation)
local_configurations = [question.grading_configuration for question in questions['local']]
# Create group for instructors
instructor_group = Group(title='Local Instructors').save()
for user in instructors:
instructor_group.add_user(user)
# Create group for graders
grader_group = Group(title='Local Graders').save()
for user in graders:
grader_group.add_user(user)
grader_group.add_user(user)
# Configure grading for readers
# Configure grading for readers and instructors
for config in local_configurations:
config.evaluations_per_submission = 1
config.evaluations_per_grader = num_local / num_graders
config.training_exercises_required = 0
config.open_date = datetime.datetime.now()
config.due_date = datetime.datetime.now() # TODO FIX
config.save()
config.evaluations_per_submission = 1
config.evaluations_per_grader = num_local / num_graders
config.training_exercises_required = 0
config.open_date = datetime.datetime.now()
config.due_date = datetime.datetime.now() # TODO FIX
config.save()
admin_role = GroupRole(group_id=instructor_group.id, question_id=config.question_id, role=GroupRole.ADMIN)
admin_role.save()
grader_role = GroupRole(group_id=grader_group.id, question_id=config.question_id,role=GroupRole.GRADER)
grader_role.save()
inst1 = instructors[0]
instructor_evals = []
for question in questions['local']:
for submission in local_submissions[question][:5]:
entries_dict = {entry.id:value for entry, value in zip(rubric.entries, local_true_scores[submission.id])}
evaluation = rubric.create_evaluation(user_id=inst1.id, submission_id=submission.id, entry_values=entries_dict)
#evaluation.save()
instructor_evals.append(evaluation)
role = GroupRole(group_id=grader_group.id,grading_configuration_id=config.id,role=1)
role.save()
# Now readers sign in and get work. Readers are also accurate in grading.
queue = question.grading_queue
for user in graders:
for question, config in zip(questions['local'], local_configurations):
tasks = queue.request_work_for_user(user)
for task in tasks:
submission = Submission.get_by_question_id_and_id(question.id, task.submission_id)
entries_dict = { entry.id:value for entry, value in zip(rubric.entries, local_true_scores[submission.id]) }
evaluation = rubric.create_evaluation(user_id=user.id, submission_id=submission.id, entry_values=entries_dict)
for question, config in zip(questions['local'], local_configurations):
queue = question.grading_queue
tasks = queue.request_work_for_user(user)
for task in tasks:
submission = Submission.get_by_question_id_and_id(question.id, task.submission_id)
entries_dict = { entry.id:value for entry, value in zip(rubric.entries, local_true_scores[submission.id]) }
evaluation = rubric.create_evaluation(user_id=user.id, submission_id=submission.id, entry_values=entries_dict)
#evaluation.save()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment