Commit 06b7460b by Braden MacDonald Committed by Jonathan Piacenti

Submissions API demonstration - completes the proof of concept

parent 0d101d8e
...@@ -9,6 +9,7 @@ from opaque_keys.edx.keys import UsageKey ...@@ -9,6 +9,7 @@ from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.django import modulestore from xmodule.modulestore.django import modulestore
from .mcq import MCQBlock, RatingBlock from .mcq import MCQBlock, RatingBlock
from .sub_api import sub_api
logger = get_task_logger(__name__) logger = get_task_logger(__name__)
...@@ -24,7 +25,8 @@ def export_data(source_block_id_str, user_id): ...@@ -24,7 +25,8 @@ def export_data(source_block_id_str, user_id):
block_key = UsageKey.from_string(source_block_id_str) block_key = UsageKey.from_string(source_block_id_str)
src_block = modulestore().get_item(block_key) src_block = modulestore().get_item(block_key)
course_key = src_block.scope_ids.usage_id.course_key course_key = src_block.scope_ids.usage_id.course_key.replace(branch=None, version_guid=None)
course_key_str = unicode(course_key)
# Get the root block: # Get the root block:
root = src_block root = src_block
...@@ -46,12 +48,26 @@ def export_data(source_block_id_str, user_id): ...@@ -46,12 +48,26 @@ def export_data(source_block_id_str, user_id):
# Define the header rows of our CSV: # Define the header rows of our CSV:
rows = [] rows = []
rows.append(block.display_name_with_default for block in blocks_to_include) rows.append(["Student"] + [block.display_name_with_default for block in blocks_to_include])
rows.append(block.scope_ids.block_type for block in blocks_to_include) rows.append([""] + [block.scope_ids.block_type for block in blocks_to_include])
rows.append(block.scope_ids.usage_id for block in blocks_to_include) rows.append([""] + [block.scope_ids.usage_id for block in blocks_to_include])
# Load the actual student submissions for each block in blocks_to_include. # Load the actual student submissions for each block in blocks_to_include.
# Note this requires one giant query per block (all student submissions for each block, one block at a time) # Note this requires one giant query per block (all student submissions for each block, one block at a time)
student_submissions = {} # Key is student ID, value is a list with same length as blocks_to_include
for idx, block in enumerate(blocks_to_include, start=1): # start=1 since first column is stuent ID
# Get all of the most recent student submissions for this block:
block_id = unicode(block.scope_ids.usage_id.replace(branch=None, version_guid=None))
block_type = block.scope_ids.block_type
for submission in sub_api.get_all_submissions(course_key_str, block_id, block_type):
if submission.student_id not in student_submissions:
student_submissions[submission.student_id] = [submission.student_id] + [""] * len(blocks_to_include)
student_submissions[submission.student_id][idx] = submission.answer
# Now change from a dict to an array ordered by student ID as we generate the remaining rows:
for student_id in sorted(student_submissions.iterkeys()):
rows.append(student_submissions[student_id])
del student_submissions[student_id]
# Generate the CSV: # Generate the CSV:
filename = u"pb-data-export-{}.csv".format(report_date.strftime("%Y-%m-%d-%H%M%S")) filename = u"pb-data-export-{}.csv".format(report_date.strftime("%Y-%m-%d-%H%M%S"))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment