Commit 19eeaa08 by Usman Khalid

Updated calls to create_rubric_dict

TNL-708
parent 3993cbb3
......@@ -86,7 +86,7 @@ class PeerAssessmentMixin(object):
data['options_selected'],
clean_criterion_feedback(self.rubric_criteria_with_labels, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
assessment_ui_model['must_be_graded_by']
)
......
......@@ -133,7 +133,7 @@ class SelfAssessmentMixin(object):
data['options_selected'],
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
)
self.publish_assessment_event("openassessmentblock.self_assess", assessment)
......
......@@ -138,7 +138,7 @@ class StaffInfoMixin(object):
context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
if display_ai_staff_info:
context['classifierset'] = ai_api.get_classifier_set_info(
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
example_based_assessment['algorithm_id'],
student_item['course_id'],
student_item['item_id']
......@@ -179,7 +179,7 @@ class StaffInfoMixin(object):
examples = assessment["examples"]
try:
workflow_uuid = ai_api.train_classifiers(
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
convert_training_examples_list_to_dict(examples),
student_item_dict.get('course_id'),
student_item_dict.get('item_id'),
......
......@@ -286,7 +286,7 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
create_rubric_dict(xblock.prompts, xblock.rubric_criteria)
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex': False},
......@@ -322,7 +322,7 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
create_rubric_dict(xblock.prompts, xblock.rubric_criteria)
)
# This case probably isn't possible, because presumably when we create
......
......@@ -54,7 +54,7 @@ class WorkflowMixin(object):
ai_module = self.get_assessment_module('example-based-assessment')
on_init_params = {
'ai': {
'rubric': create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
'rubric': create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
'algorithm_id': ai_module["algorithm_id"] if ai_module else None
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment