Commit 9a892d0e by Usman Khalid

Updated leaderboard to show new format submissions.

TNL-708
parent 4017f1de
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
{% if topscore.file %} {% if topscore.file %}
<img class="leaderboard__score__image" alt="{% trans "The image associated with your peer's submission." %}" src="{{ topscore.file }}" /> <img class="leaderboard__score__image" alt="{% trans "The image associated with your peer's submission." %}" src="{{ topscore.file }}" />
{% endif %} {% endif %}
{{ topscore.content|linebreaks }} {% include "openassessmentblock/oa_submission_answer.html" with answer=topscore.submission.answer answer_text_label="Your peer's response to the question above:" %}
</div> </div>
</li> </li>
{% endfor %} {% endfor %}
......
...@@ -4,9 +4,12 @@ Leaderboard step in the OpenAssessment XBlock. ...@@ -4,9 +4,12 @@ Leaderboard step in the OpenAssessment XBlock.
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from openassessment.fileupload import api as file_upload_api from openassessment.fileupload import api as file_upload_api
from openassessment.xblock.data_conversion import create_submission_dict
class LeaderboardMixin(object): class LeaderboardMixin(object):
"""Leaderboard Mixin introduces all handlers for displaying the leaderboard """Leaderboard Mixin introduces all handlers for displaying the leaderboard
...@@ -72,13 +75,16 @@ class LeaderboardMixin(object): ...@@ -72,13 +75,16 @@ class LeaderboardMixin(object):
for score in scores: for score in scores:
if 'file_key' in score['content']: if 'file_key' in score['content']:
score['file'] = file_upload_api.get_download_url(score['content']['file_key']) score['file'] = file_upload_api.get_download_url(score['content']['file_key'])
if 'text' in score['content']: if 'text' in score['content'] or 'parts' in score['content']:
score['content'] = score['content']['text'] submission = {'answer': score.pop('content')}
score['submission'] = create_submission_dict(submission, self.prompts)
elif isinstance(score['content'], basestring): elif isinstance(score['content'], basestring):
pass pass
# Currently, we do not handle non-text submissions. # Currently, we do not handle non-text submissions.
else: else:
score['content'] = "" score['submission'] = ""
score.pop('content', None)
context = { 'topscores': scores, context = { 'topscores': scores,
'allow_latex': self.allow_latex, 'allow_latex': self.allow_latex,
......
<openassessment leaderboard_show="3"> <openassessment leaderboard_show="3">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompts>
<prompt> <prompt>
Given the state of the world today, what do you think should be done to <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
combat poverty? Please answer in a short essay of 200-300 words.
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt> <prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
......
<openassessment leaderboard_show="3" allow_file_upload="True"> <openassessment leaderboard_show="3" allow_file_upload="True">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompts>
<prompt> <prompt>
Given the state of the world today, what do you think should be done to <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
combat poverty? Please answer in a short essay of 200-300 words.
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment leaderboard_show="10"> <openassessment leaderboard_show="10">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompts>
<prompt> <prompt>
Given the state of the world today, what do you think should be done to <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
combat poverty? Please answer in a short essay of 200-300 words.
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
...@@ -14,6 +14,8 @@ import boto ...@@ -14,6 +14,8 @@ import boto
from boto.s3.key import Key from boto.s3.key import Key
from openassessment.fileupload import api from openassessment.fileupload import api
from openassessment.xblock.data_conversion import create_submission_dict, prepare_submission_for_serialization
class TestLeaderboardRender(XBlockHandlerTransactionTestCase): class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
...@@ -40,15 +42,20 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -40,15 +42,20 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_show_submissions(self, xblock): def test_show_submissions(self, xblock):
# Create some submissions (but fewer than the max that can be shown) # Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 1", 1), (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1),
("test answer 2", 2) (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 2)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 2", "score": 2}, {"score": 2, "submission": create_submission_dict(
{"content": "test answer 1", "score": 1} {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
xblock.prompts
)},
{"score": 1, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
xblock.prompts
)}
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
...@@ -58,27 +65,38 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -58,27 +65,38 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Create more submissions than the max # Create more submissions than the max
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 3", 0), (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0),
("test answer 4", 10), (prepare_submission_for_serialization(("test answer 4 part 1", "test answer 4 part 2")), 10),
("test answer 5", 3) (prepare_submission_for_serialization(("test answer 5 part 1", "test answer 5 part 2")), 3),
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 4", "score": 10}, {"score": 10, "submission": create_submission_dict(
{"content": "test answer 5", "score": 3}, {"answer": prepare_submission_for_serialization((u"test answer 4 part 1", u"test answer 4 part 2"))},
{"content": "test answer 2", "score": 2} xblock.prompts
)},
{"score": 3, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 5 part 1", u"test answer 5 part 2"))},
xblock.prompts
)},
{"score": 2, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
xblock.prompts
)}
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_show_submissions_that_have_greater_than_0_score(self, xblock): def test_show_submissions_that_have_greater_than_0_score(self, xblock):
# Create some submissions (but fewer than the max that can be shown) # Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 0", 0), (prepare_submission_for_serialization(("test answer 0 part 1", "test answer 0 part 2")), 0),
("test answer 1", 1), (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 1", "score": 1} {"score": 1, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
xblock.prompts
)},
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
...@@ -88,24 +106,31 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -88,24 +106,31 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Create more submissions than the max # Create more submissions than the max
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 2", 10), (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 10),
("test answer 3", 0) (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 2", "score": 10}, {"score": 10, "submission": create_submission_dict(
{"content": "test answer 1", "score": 1} {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
xblock.prompts
)},
{"score": 1, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
xblock.prompts
)}
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_no_text_key_submission(self, xblock): def test_no_text_key_submission(self, xblock):
self.maxDiff = None
# Instead of using the default submission as a dict with "text", # Instead of using the default submission as a dict with "text",
# make the submission a string. # make the submission a string.
self._create_submissions_and_scores(xblock, [("test answer", 1)], submission_key=None) self._create_submissions_and_scores(xblock, [("test answer", 1)], submission_key=None)
# It should still work # It should still work
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer", "score": 1} {"score": 1}
]) ])
@mock_s3 @mock_s3
...@@ -124,7 +149,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -124,7 +149,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Expect that we default to an empty string for content # Expect that we default to an empty string for content
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "", "score": 1, "file": ""} {"submission": "", "score": 1, "file": ""}
]) ])
@mock_s3 @mock_s3
...@@ -143,16 +168,23 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -143,16 +168,23 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
key.set_contents_from_string("How d'ya do?") key.set_contents_from_string("How d'ya do?")
downloadUrl = api.get_download_url("foo") downloadUrl = api.get_download_url("foo")
# Create a image and text submission # Create a image and text submission
self._create_submissions_and_scores(xblock, [({"text": "test answer", "file_key": "foo"}, 1)], submission_key=None) submission = prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2"))
submission[u"file_key"] = "foo"
self._create_submissions_and_scores(xblock, [
(submission, 1)
])
self.maxDiff = None
# Expect that we retrieve both the text and the download URL for the file # Expect that we retrieve both the text and the download URL for the file
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer", "score": 1, "file": downloadUrl} {"file": downloadUrl, "score": 1, "submission": create_submission_dict(
{"answer": submission},
xblock.prompts
)}
]) ])
def _create_submissions_and_scores( def _create_submissions_and_scores(
self, xblock, submissions_and_scores, self, xblock, submissions_and_scores,
submission_key="text", points_possible=10 submission_key=None, points_possible=10
): ):
""" """
Create submissions and scores that should be displayed by the leaderboard. Create submissions and scores that should be displayed by the leaderboard.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment