Commit 03d2e51f by Awais Jibran

Fix unable to view ORA responses (files) in leaderboard

EDUCATOR-560
parent 93e0ca47
...@@ -28,3 +28,4 @@ Mushtaq Ali <mushtaak@gmail.com> ...@@ -28,3 +28,4 @@ Mushtaq Ali <mushtaak@gmail.com>
Dmitry Viskov <dmitry.viskov@webenterprise.ru> Dmitry Viskov <dmitry.viskov@webenterprise.ru>
Jeff LaJoie <jlajoie@edx.org> Jeff LaJoie <jlajoie@edx.org>
Albert St. Aubin Jr. <astaubin@edx.org> Albert St. Aubin Jr. <astaubin@edx.org>
Awais Jibran <awaisdar001@gmail.com>
...@@ -76,16 +76,18 @@ class LeaderboardMixin(object): ...@@ -76,16 +76,18 @@ class LeaderboardMixin(object):
for score in scores: for score in scores:
score['files'] = [] score['files'] = []
if 'file_keys' in score['content']: if 'file_keys' in score['content']:
for key in score['content']['file_keys']: file_keys = score['content'].get('file_keys', [])
url = '' descriptions = score['content'].get('files_descriptions', [])
try: for idx, key in enumerate(file_keys):
url = file_upload_api.get_download_url(key) file_download_url = self._get_file_download_url(key)
except FileUploadError: if file_download_url:
pass file_description = descriptions[idx] if idx < len(descriptions) else ''
if url: score['files'].append((file_download_url, file_description))
score['files'].append(url)
elif 'file_key' in score['content']: elif 'file_key' in score['content']:
score['files'].append(file_upload_api.get_download_url(score['content']['file_key'])) file_download_url = self._get_file_download_url(score['content']['file_key'])
if file_download_url:
score['files'].append((file_download_url, ''))
if 'text' in score['content'] or 'parts' in score['content']: if 'text' in score['content'] or 'parts' in score['content']:
submission = {'answer': score.pop('content')} submission = {'answer': score.pop('content')}
score['submission'] = create_submission_dict(submission, self.prompts) score['submission'] = create_submission_dict(submission, self.prompts)
...@@ -112,3 +114,19 @@ class LeaderboardMixin(object): ...@@ -112,3 +114,19 @@ class LeaderboardMixin(object):
template_path (string), tuple of context (dict) template_path (string), tuple of context (dict)
""" """
return 'openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {'xblock_id': self.get_xblock_id()} return 'openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {'xblock_id': self.get_xblock_id()}
def _get_file_download_url(self, file_key):
"""
Internal function for retrieving the download url at which the file that corresponds
to the file_key can be downloaded.
Arguments:
file_key (string): Corresponding file key.
Returns:
file_download_url (string) or empty string in case of error.
"""
try:
file_download_url = file_upload_api.get_download_url(file_key)
except FileUploadError:
file_download_url = ''
return file_download_url
...@@ -47,16 +47,16 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -47,16 +47,16 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
def test_show_submissions(self, xblock): def test_show_submissions(self, xblock):
# Create some submissions (but fewer than the max that can be shown) # Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
(prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1), (prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2')), 1),
(prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 2) (prepare_submission_for_serialization(('test answer 2 part 1', 'test answer 2 part 2')), 2)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"score": 2, "submission": create_submission_dict( {'score': 2, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
xblock.prompts xblock.prompts
)}, )},
{"score": 1, "submission": create_submission_dict( {'score': 1, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
xblock.prompts xblock.prompts
)} )}
]) ])
...@@ -68,21 +68,21 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -68,21 +68,21 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Create more submissions than the max # Create more submissions than the max
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
(prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0), (prepare_submission_for_serialization(('test answer 3 part 1', 'test answer 3 part 2')), 0),
(prepare_submission_for_serialization(("test answer 4 part 1", "test answer 4 part 2")), 10), (prepare_submission_for_serialization(('test answer 4 part 1', 'test answer 4 part 2')), 10),
(prepare_submission_for_serialization(("test answer 5 part 1", "test answer 5 part 2")), 3), (prepare_submission_for_serialization(('test answer 5 part 1', 'test answer 5 part 2')), 3),
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"score": 10, "submission": create_submission_dict( {'score': 10, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 4 part 1", u"test answer 4 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 4 part 1', u'test answer 4 part 2'))},
xblock.prompts xblock.prompts
)}, )},
{"score": 3, "submission": create_submission_dict( {'score': 3, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 5 part 1", u"test answer 5 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 5 part 1', u'test answer 5 part 2'))},
xblock.prompts xblock.prompts
)}, )},
{"score": 2, "submission": create_submission_dict( {'score': 2, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
xblock.prompts xblock.prompts
)} )}
]) ])
...@@ -92,12 +92,12 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -92,12 +92,12 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
def test_show_submissions_that_have_greater_than_0_score(self, xblock): def test_show_submissions_that_have_greater_than_0_score(self, xblock):
# Create some submissions (but fewer than the max that can be shown) # Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
(prepare_submission_for_serialization(("test answer 0 part 1", "test answer 0 part 2")), 0), (prepare_submission_for_serialization(('test answer 0 part 1', 'test answer 0 part 2')), 0),
(prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1) (prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2')), 1)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"score": 1, "submission": create_submission_dict( {'score': 1, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
xblock.prompts xblock.prompts
)}, )},
]) ])
...@@ -109,16 +109,16 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -109,16 +109,16 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Create more submissions than the max # Create more submissions than the max
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
(prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 10), (prepare_submission_for_serialization(('test answer 2 part 1', 'test answer 2 part 2')), 10),
(prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0) (prepare_submission_for_serialization(('test answer 3 part 1', 'test answer 3 part 2')), 0)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"score": 10, "submission": create_submission_dict( {'score': 10, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
xblock.prompts xblock.prompts
)}, )},
{"score": 1, "submission": create_submission_dict( {'score': 1, 'files': [], 'submission': create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))}, {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
xblock.prompts xblock.prompts
)} )}
]) ])
...@@ -127,60 +127,105 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -127,60 +127,105 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_no_text_key_submission(self, xblock): def test_no_text_key_submission(self, xblock):
self.maxDiff = None self.maxDiff = None
# Instead of using the default submission as a dict with "text", # Instead of using the default submission as a dict with 'text',
# make the submission a string. # make the submission a string.
self._create_submissions_and_scores(xblock, [("test answer", 1)], submission_key=None) self._create_submissions_and_scores(xblock, [('test answer', 1)], submission_key=None)
# It should still work # It should still work
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"score": 1} {'score': 1, 'files': []}
]) ])
@mock_s3 @mock_s3
@override_settings( @override_settings(
AWS_ACCESS_KEY_ID='foobar', AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz', AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME='mybucket'
) )
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_non_text_submission(self, xblock): def test_non_text_submission(self, xblock):
# Create a mock bucket # Create a mock bucket
conn = boto.connect_s3() conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket') bucket = conn.create_bucket('mybucket')
# Create a non-text submission (the submission dict doesn't contain "text") # Create a non-text submission (the submission dict doesn't contain 'text')
self._create_submissions_and_scores(xblock, [("s3key", 1)], submission_key="file_key") self._create_submissions_and_scores(xblock, [('s3key', 1)], submission_key='file_key')
# Expect that we default to an empty string for content # Expect that we default to an empty string for content
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"submission": "", "score": 1, "file": ""} {'score': 1, 'files': [], 'submission': ''}
]) ])
@mock_s3 @mock_s3
@override_settings( @override_settings(
AWS_ACCESS_KEY_ID='foobar', AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz', AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME='mybucket'
)
@scenario('data/leaderboard_show_allowfiles.xml')
def test_image_and_text_submission_multiple_files(self, xblock):
"""
Tests that leaderboard works as expected when multiple files are uploaded
"""
file_keys = ['foo', 'bar']
file_descriptions = ['{}-description'.format(file_key) for file_key in file_keys]
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
for file_key in file_keys:
key = Key(bucket, 'submissions_attachments/{}'.format(file_key))
key.set_contents_from_string("How d'ya do?")
files_url_and_description = [
(api.get_download_url(file_key), file_descriptions[idx])
for idx, file_key in enumerate(file_keys)
]
# Create a image and text submission
submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
submission[u'file_keys'] = file_keys
submission[u'files_descriptions'] = file_descriptions
self._create_submissions_and_scores(xblock, [
(submission, 1)
])
self.maxDiff = None
# Expect that we retrieve both the text and the download URL for the file
self._assert_scores(xblock, [
{'score': 1, 'files': files_url_and_description, 'submission': create_submission_dict(
{'answer': submission},
xblock.prompts
)}
])
@mock_s3
@override_settings(
AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME='mybucket'
) )
@scenario('data/leaderboard_show_allowfiles.xml') @scenario('data/leaderboard_show_allowfiles.xml')
def test_image_and_text_submission(self, xblock): def test_image_and_text_submission(self, xblock):
"""
Tests that text and image submission works as expected
"""
# Create a file and get the download URL # Create a file and get the download URL
conn = boto.connect_s3() conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket') bucket = conn.create_bucket('mybucket')
key = Key(bucket) key = Key(bucket, 'submissions_attachments/foo')
key.key = "submissions_attachments/foo"
key.set_contents_from_string("How d'ya do?") key.set_contents_from_string("How d'ya do?")
downloadUrl = api.get_download_url("foo")
file_download_url = [(api.get_download_url('foo'), '')]
# Create a image and text submission # Create a image and text submission
submission = prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")) submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
submission[u"file_key"] = "foo" submission[u'file_key'] = 'foo'
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
(submission, 1) (submission, 1)
]) ])
self.maxDiff = None self.maxDiff = None
# Expect that we retrieve both the text and the download URL for the file # Expect that we retrieve both the text and the download URL for the file
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"file": downloadUrl, "score": 1, "submission": create_submission_dict( {'score': 1, 'files': file_download_url, 'submission': create_submission_dict(
{"answer": submission}, {'answer': submission},
xblock.prompts xblock.prompts
)} )}
]) ])
...@@ -209,7 +254,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -209,7 +254,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# to anything without affecting the test. # to anything without affecting the test.
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
# adding rand number to the student_id to make it unique. # adding rand number to the student_id to make it unique.
student_item['student_id'] = "student {num} {num2}".format(num=num, num2=randint(2, 1000)) student_item['student_id'] = 'student {num} {num2}'.format(num=num, num2=randint(2, 1000))
if submission_key is not None: if submission_key is not None:
answer = {submission_key: submission} answer = {submission_key: submission}
else: else:
...@@ -278,9 +323,9 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -278,9 +323,9 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Strip query string parameters from the file URLs, since these are time-dependent # Strip query string parameters from the file URLs, since these are time-dependent
# (expiration and signature) # (expiration and signature)
if "topscores" in expected_context: if 'topscores' in expected_context:
context["topscores"] = self._clean_score_filenames(context["topscores"]) context['topscores'] = self._clean_score_filenames(context['topscores'])
expected_context["topscores"] = self._clean_score_filenames(context["topscores"]) expected_context['topscores'] = self._clean_score_filenames(expected_context['topscores'])
self.assertEqual(path, expected_path) self.assertEqual(path, expected_path)
self.assertEqual(context, expected_context) self.assertEqual(context, expected_context)
...@@ -293,7 +338,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -293,7 +338,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
""" """
Check that the leaderboard is displayed in the student view. Check that the leaderboard is displayed in the student view.
""" """
fragment = self.runtime.render(xblock, "student_view") fragment = self.runtime.render(xblock, 'student_view')
has_leaderboard = 'step--leaderboard' in fragment.body_html() has_leaderboard = 'step--leaderboard' in fragment.body_html()
self.assertEqual(has_leaderboard, is_visible) self.assertEqual(has_leaderboard, is_visible)
...@@ -301,9 +346,14 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -301,9 +346,14 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
""" """
Remove querystring parameters from the file name of the score. Remove querystring parameters from the file name of the score.
""" """
for score in scores: def _clean_query_string(_file_url):
if score.get("file"): url = urlparse(_file_url)
url = urlparse(score["file"]) return url.scheme + '://' + url.netloc + url.path
score["file"] = url.scheme + "://" + url.netloc + url.path
for score in scores:
if score.get('files'):
score['files'] = [
(_clean_query_string(file_info[0]), file_info[1]) for file_info in score['files']
]
return scores return scores
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment