Commit 231c199a by Diana Huang

Remove ora2 documentation.

This documentation now lives primarily in
another repo.
parent 177731a6
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
Q_FLAG =
ifeq ($(quiet), true)
Q_FLAG = -Q
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = $(Q_FLAG) -d $(BUILDDIR)/doctrees -c source $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/getting_started.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/getting_started.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/getting_started"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/getting_started"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
.. _PA Accessing Assignment Information:
##########################################
Accessing Assignment and Learner Metrics
##########################################
After your open response assessment assignment has been released, you can access information about the number of learners in each step of the assignment or the performance of individual learners. This information is available in the **Course Staff Information** section at the end of each assignment. To access it, open the assignment in the courseware, scroll to the bottom of the assignment, and then click the black **Course Staff Information** banner.
.. image:: /Images/PA_CourseStaffInfo_Collapsed.png
:alt: The Course Staff Information banner at the bottom of the peer assessment
.. _PA View Metrics for Individual Steps:
************************************************
View Metrics for Individual Steps
************************************************
You can check the number of learners who have completed, or are currently working through, the following steps:
* Submitted responses.
* Completed peer assessments.
* Waiting to assess responses or receive grades.
* Completed self assessments.
* Completed the entire assignment.
To find this information, open the assignment in the courseware, scroll to the bottom of the assignment, and then click **Course Staff Information**.
The **Course Staff Information** section expands, and you can see the number of learners who are currently working through (but have not completed) each step of the problem.
.. image:: /Images/PA_CourseStaffInfo_Expanded.png
:alt: The Course Staff Information box expanded, showing problem status
.. _Access Information for a Specific Learner:
***********************************************
Access Information for a Specific Learner
***********************************************
You can access information about an individual learner's performance on a peer assessment assignment, including:
* The learner's response.
* The peer assessments that other learners performed on the learner's response, including feedback on individual criteria and on the overall response.
* The peer assessments that the learner performed on other learners' responses, including feedback on individual criteria and on the overall responses.
* The learner's self assessment.
In the following example, you can see the learner's response. The response received one peer assessment, and the learner completed a peer assessment on one other learner's response. The learner also completed a self assessment.
.. image:: /Images/PA_SpecificStudent.png
:width: 500
:alt: Report showing information about a learner's response
For an example that shows a learner's response with more assessments, see :ref:`Access Learner Information`.
Accessing information about a specific learner has two steps:
#. Determine the learner's course-specific anonymized ID.
#. Access information for that learner.
=====================================================
Determine the Learner's Course-Specific Anonymized ID
=====================================================
To determine a learner's course-specific anonymized ID, you'll need two .csv spreadsheets from the Instructor Dashboard: the grade report (**<course name>_grade_report_<datetime>.csv**) and the list of course-specific anonymized learner IDs (**<course name>-anon-ids.csv**).
#. In the LMS, click the **Instructor** tab.
#. On the Instructor Dashboard, click **Data Download**.
#. On the **Data Download** page, locate the **Data Download** section, and then click **Get Student Anonymized IDs CSV**. A spreadsheet named **<course name>-anon-ids.csv** automatically downloads. Click to open the spreadsheet.
#. Scroll down to the **Reports** section, and then click **Generate Grade Report**.
The system automatically begins to generate the grade report. When it's finished, a link to the grade report appears in the list below **Reports Available for Download**.
.. note:: Generating a grade report for a large class may take several hours.
5. When the link to the grade report appears in the **Reports Available for Download** list, click the link to open the spreadsheet.
#. When you have both spreadsheets open, view the **<course name>_grade_report_<datetime>.csv** spreadsheet. Locate the learner that you want by username or e-mail address. Make a note of the number in the ID column (column A) for that learner. In the following example, the learner ID for e-mail address ``amydorrit@example.com`` (username ``lildorrit``) is ``18557``.
.. image:: /Images/PA_grade_report.png
:width: 500
:alt: Spreadsheet listing enrolled learners and grades
7. Go to the **<course name>-anon-ids.csv** spreadsheet, locate the user ID that you noted in step 6, and then copy the value in the "Course Specific Anonymized user ID" column (**column C**) for the user. The value in column C is the learner's anonymized user ID for the course. In the following example, the anonymized user ID for learner ID ``18557`` is ``ofouw6265242gedud8w82g16qshsid87``.
.. image:: /Images/PA_anon_ids.png
:width: 500
:alt: Spreadsheet listing learners' anonymous user IDs
.. note:: Make sure that you don't copy the value in column B. You need the *course-specific* anonymized user ID from **column C**.
.. _Access Learner Information:
=======================================
Access the Learner's Information
=======================================
#. In the LMS, go to the peer assessment assignment that you want to see.
#. Scroll to the bottom of the problem, and then click the black **Course Staff Information** banner.
#. Scroll down to the **Get Learner Info** box, paste the learner's course-specific anonymized user ID in the box, and then click **Submit**.
The learner's information appears below the **Get Learner Info** box.
The following example shows:
* The learner's response.
* The two peer assessments for the response.
* The two peer assessments the learner completed.
* The learner's self assessment.
For a larger view, click the image so that it opens by itself in the browser window, and then click anywhere on the image that opens.
.. image:: /Images/PA_SpecificStudent_long.png
:width: 250
:alt: Report showing information about a learner's response
.. _PA Create a PA Assignment:
#############################################
Create an Open Response Assessment Assignment
#############################################
Creating an open response assessment is a multi-step process:
* :ref:`PA Create Component`
* :ref:`PA Add Prompt`
* :ref:`PA Add Rubric`
* :ref:`PA Specify Name and Dates`
* :ref:`PA Select Assignment Steps`
* :ref:`PA Specify Step Settings`
* :ref:`PA Show Top Responses`
* :ref:`PA Test Assignment`
Each of these steps is covered in detail below.
For more information about the components of an open response assessment, see :ref:`Peer Assessments`.
.. _PA Create Component:
******************************
Step 1. Create the Component
******************************
To create the component for your open response assessment:
#. In Studio, open the unit where you want to create the open response assessment.
#. Under **Add New Component**, click **Problem**, click the **Advanced** tab, and then click **Peer Assessment**.
#. In the Problem component that appears, click **Edit**.
.. _PA Add Prompt:
******************************
Step 2. Add the Prompt
******************************
To add the prompt, or question:
#. In the open response assessment component editor, click the **Prompt** tab.
#. Add the text of your question in the text field. Replace any default text, if necessary.
========================================
Add Formatting or Images to the Prompt
========================================
Currently, you cannot add text formatting or images inside the Peer Assessment component. If you want to include formatting or images in the text of your prompt, you can add an HTML component that contains your text above the Peer Assessment component and leave the text field in the **Prompt** tab blank. The instructions for the peer assessment still appear above the **Your Response** field.
.. image:: /Images/PA_HTMLComponent.png
:alt: A peer assessment that has an image in an HTML component
:width: 500
.. _PA Allow Images:
============================================
Allow Learners to Submit Images (optional)
============================================
To allow learners to submit an image with a response:
#. In the component editor, click the **Settings** tab.
#. Next to **Allow Image Responses**, select **True**.
.. note::
* The image file must be a .jpg or .png file, and it must be smaller than 5 MB in size.
* Currently, course teams cannot see any of the images that learners submit. Images are not visible in the body of the assignment in the courseware, and they are not included in the course data package.
* You can allow learners to upload an image, but you cannot require it.
* Learners can only submit one image with a response.
* All responses must contain text. Learners cannot submit a response that contains only an image.
.. _PA Add Rubric:
******************************
Step 3. Add the Rubric
******************************
In this step, you'll add your rubric and provide your learners with feedback options.
For each step below, replace any default text with your own text.
.. note:: All open response assessments include a feedback field below the rubric so that learners can provide written feedback on a peer's overall response. You can also allow or require learners to provide feedback for individual criteria. See step 2.4 below for instructions. For more information, see :ref:`Feedback Options`.
To add the rubric:
#. In the open response assessment component editor, click the **Rubric** tab.
#. In the first **Criterion** section, enter the name and prompt text of your first criterion.
#. In the first **Option** section, enter the name, explanation, and point value for the first option.
#. In the next **Option** section, enter the name, explanation, and point value for the next option.
#. Repeat step 4 for each option. If you need to add more options, click **Add Option**.
#. Next to **Feedback for This Criterion**, select a value in the drop-down list.
* If you don't want learners to provide feedback for this individual criterion, select **None**.
* If you want to require learners to provide feedback, select **Required**.
* If you want to allow learners to provide feedback, but not require it, select **Optional**.
7. Follow the instructions in steps 2-6 to add your remaining criteria. If you need to add more criteria, click **Add Criterion** at the end of the list of criteria.
#. Include instructions for learners to provide overall written feedback on their peers' responses. You can leave the default text in the **Feedback Instructions** field or replace it with your own text.
.. _PA Criteria Comment Field Only:
==========================================================
Provide Only Comment Fields for Individual Criteria
==========================================================
When you add a comment field to a criterion, the comment field appears below the options for the criterion. You can also provide a comment field, but no options.
In the following image, the first criterion has a comment field but no options. The second includes options, but does not have a comment field.
.. image:: /Images/PA_0_Option_Criteria.png
To provide a comment field without options:
#. In the criterion, click **Remove** to remove, or delete, all the options.
#. Next to **Feedback for This Criterion**, select **Required** in the drop-down list.
.. _PA Specify Name and Dates:
************************************************************
Step 4. Specify the Assignment Name and Response Dates
************************************************************
To specify a name for the assignment as well as start and due dates for all learner responses:
#. In the component editor, click the **Settings** tab.
#. Next to **Display Name**, type the name you want to give the assignment.
#. Next to **Response Start Date** and **Response Start Time**, enter the date and time when you want learners to be able to begin submitting responses. Note that all times are in Universal Coordinated Time (UTC).
#. Next to **Response Due Date** and **Response Due Time**, enter the date and time by which all learner responses must be submitted. Note that all times are in Universal Coordinated Time (UTC).
.. note:: We recommend that you set the response due date and time at least two days before the peer assessment due date and time. If the response due time and peer assessment due time are close together, and a learner submits a response just before responses are due, other learners may not have time to perform peer assessments before peer assessments are due.
.. _PA Select Assignment Steps:
****************************************
Step 5. Select Assignment Steps
****************************************
Open response assessment assignments can include learner training, peer assessment, and self assessment steps. You can include a peer assessment step before a self assessment step and vice versa.
If you include a learner training step, you **must** include a peer assessment step. You can also include a self assessment step. The learner training step must come before both the peer assessment and the self assessment step.
To add steps to the assignment:
#. In the component editor, click the **Settings** tab.
#. Scroll down past the **Allow Image Responses** field.
#. Locate the following headings:
* **Step: Learner Training**
* **Step: Peer Assessment**
* **Step: Self Assessment**
Select the check boxes for the steps that you want the assignment to include.
#. (optional) If you want to change the order of the steps, drag the steps into the order that you want. If you include a learner training step, make sure it is the first step in the assignment.
.. _PA Specify Step Settings:
******************************
Step 6. Specify Step Settings
******************************
After you select the steps that you want, you'll specify settings for those steps.
.. note:: If you make changes to a step, but then you clear the check box for that step, the step will no longer be part of the assignment and your changes will not be saved.
.. _PA Learner Training Step:
========================
Learner Training
========================
For the learner training step, you'll enter one or more responses that you have created, then select an option for each criterion in your rubric.
.. note:: You must enter your complete rubric on the **Rubric** tab before you can select options for the learner training responses. If you later change one of your criteria or any of its options, you'll also have to update the learner training step.
To add and score learner training responses:
#. Under **Step: Learner Training**, locate the first **Scored Response** section.
#. In the **Response** field, enter the text of your example response.
#. Under **Response Score**, select the option that you want for each criterion.
For more information, see :ref:`PA Learner Training Assessments`.
============================
Peer Assessment
============================
For the peer assessment step, you'll specify the number of responses that each learner must grade, the number of learners that must grade each response, and start and due dates. All fields are required.
To specify peer assessment settings:
#. Locate the **Step: Peer Assessment** heading.
#. Next to **Must Grade**, enter the number of responses that each learner must grade.
#. Next to **Graded By**, enter the number of learners that must grade each response.
#. Next to **Start Date** and **Start Time**, enter the date and time when learners can begin assessing their peers' responses. All times are in Universal Coordinated Time (UTC).
#. Next to **Due Date** and **Due Time**, enter the date and time by which all peer assessments must be complete. All times are in UTC.
============================
Self Assessment
============================
For the self assessment step, you'll specify when the step starts and ends.
#. Locate the **Step: Self Assessment** heading.
#. Next to **Start Date** and **Start Time**, enter the date and time when learners can begin assessing their peers' responses. All times are in Universal Coordinated Time (UTC).
#. Next to **Due Date** and **Due Time**, enter the date and time by which all peer assessments must be complete. All times are in UTC.
.. _PA Show Top Responses:
******************************
Step 7. Show Top Responses
******************************
To allow learners to see the top-scoring responses for the assignment, you'll specify a number on the **Settings** tab.
#. In the component editor, click the **Settings** tab.
#. In the **Top Responses** field, specify the number of responses that you want to appear in the **Top Responses** section below the learner's final score. If you don't want this section to appear, set the number to 0. The maximum number is 100.
.. note:: Because each response can be up to 300 pixels in height, we recommend that you set this number to 20 or lower to prevent the page from becoming too long.
For more information, see :ref:`PA Top Responses`.
.. _PA Test Assignment:
******************************
Step 8. Test the Assignment
******************************
To test your assignment, set up the assignment in your course, set the section or subsection date in the future, and ask a group of beta users to submit responses and grade each other. The beta testers can then let you know if they found the question and the rubric easy to understand or if they had any problems with the assignment.
For more information about beta testing, see :ref:`Beta_Testing`.
.. _Peer Assessments:
#########################
Open Response Assessments
#########################
*****************************************
Introduction to Open Response Assessments
*****************************************
Open response assessments allow instructors to assign questions that may not have definite answers. Learners submit a response to the question, and then that learner and the learner's peers compare the response to a rubric that you create. Usually learners will submit text responses. You can also allow your learners to upload an image to accompany the text.
Open response assessments include peer assessments and self assessments. In peer assessments, learners compare their peers' responses to a rubric that you create. In self assessments, learners compare their own responses to the rubric.
In open response assessments, learners usually only see their own responses and any peer responses they assess. You can also allow learners to see the top-scoring responses that their peers have submitted. For more information, see :ref:`PA Top Responses`.
For more information about creating open response assessments, including step-by-step instructions, see the following sections:
* :ref:`PA Elements`
* :ref:`PA Scoring`
* :ref:`PA Create a PA Assignment`
* :ref:`PA Accessing Assignment Information`
.. _PA Elements:
==========================================
Elements of an Open Response Assessment
==========================================
When you create an open response assessment assignment, you include several elements:
* The prompt, or question.
* The rubric.
* One or more assessment steps. Assignments can include a learner training step, a peer assessment step, and a self assessment step.
.. note:: If you include a learner training step, you must also add a peer assessment step. The learner training step must be the first step.
For step-by-step instructions for creating an open response assessment, see :ref:`PA Create a PA Assignment`.
************************
Prompt
************************
The **prompt**, or question that you want your learners to answer, appears near the top of the page, followed by a field where the learner enters a response. You can require your learners to enter text as a response, or you can allow your learners to both enter text and upload an image.
.. image:: /Images/PA_QandRField.png
:width: 500
:alt: ORA question and blank response field
.. note:: If learners upload an image, the image file must be a .jpg or .png file, and it must be smaller than 5 MB in size.
When you write your question, you can include helpful information for your learners, such as what learners can expect after they submit responses and the approximate number of words or sentences that a learner's response should have. (A response cannot have more than 10,000 words.)
For more information, see :ref:`PA Add Prompt`.
==========================================
Asking Learners to Upload Images
==========================================
You can ask your learners to upload an image as part of their response. If you do this, however, keep the following in mind:
* Currently, you cannot require your learners to upload an image. You can only allow it.
* All responses must include some text. Learners cannot submit a response that only contains an image.
* Learners can only submit one image with their response.
.. note:: Currently, course teams cannot see any of the images that learners submit. Images are not visible in the body of the assignment in the courseware, and they are not included in the course data package.
.. _PA Rubric:
************************
Rubric
************************
Your assignment must include a **rubric** that you design. The same rubric is used for peer and self assessments, and the rubric appears when learners begin grading. Learners compare their peers' responses to the rubric.
Rubrics are made of *criteria* and *options*.
* Each criterion has a *name*, a *prompt*, and one or more *options*.
* The name is a very short summary of the criterion, such as "Ideas" or "Content". Criterion names generally have just one word. Because the system uses criterion names for identification, **the name for each criterion must be unique.** Criterion names do not appear in the rubric that learners see when they are completing peer assessments, but they do appear on the page that shows the learner's final grade.
.. image :: /Images/PA_CriterionName.png
:alt: A final score page with call-outs for the criterion names
* The prompt is a description of the criterion.
* Options describe how well the response satisfies the criterion.
* Each option has a *name*, an *explanation*, and a *point value*.
.. image:: /Images/PA_Rubric_LMS.png
:alt: Image of a rubric in the LMS with call-outs for the criterion prompt and option names, explanations, and points
Different criteria in the same assignment can have different numbers of options. For example, in the image above, the first criterion has three options and the second criterion has four options.
.. note:: You can also include criteria that do not have options, but that do include a field where learners can enter feedback. For more information, see :ref:`PA Criteria Comment Field Only`.
You can see both criterion and option names when you access assignment information for an individual learner. For more information, see :ref:`PA Accessing Assignment Information`.
.. image:: /Images/PA_Crit_Option_Names.png
:width: 600
:alt: Learner-specific assignment information with call-outs for criterion and option names
When you create your rubric, decide how many points each option will receive, and make sure that the explanation for each option is as specific as possible. For example, one criterion and set of options may resemble the following.
**Criterion**
Name: Origins
Prompt: Does this response explain the origins of the Hundred Years' War? (5 points possible)
**Options**
.. list-table::
:widths: 8 20 50
:stub-columns: 1
:header-rows: 1
* - Points
- Name
- Explanation
* - 0
- Not at all
- This response does not address the origins of the Hundred Years' War.
* - 1
- Dynastic disagreement
- This response alludes to a dynastic disagreement between England and France, but doesn't reference Edward III of England and Philip VI of France.
* - 3
- Edward and Philip
- This response mentions the dynastic disagreement between Edward III and Philip VI, but doesn't address the role of Salic law.
* - 5
- Salic law
- This response explains the way that Salic law contributed to the dynastic disagreement between Edward III and Philip VI, leading to the Hundred Years' War.
For more information about writing effective rubrics, see Heidi Goodrich Andrade's `Understanding Rubrics <http://learnweb.harvard.edu/alps/thinking/docs/rubricar.htm>`_.
For more information, see :ref:`PA Add Rubric`.
************************
Assessment Steps
************************
In your assignment, you'll also specify the **assessment steps**. You can set the assignment to include a learner training step, a peer assessment step, and a self assessment step.
You can see the type and order of the assessments when you look at the assignment. In the following example, after learners submit a response, they complete a learner training step ("Learn to Assess Responses"), complete peer assessments on other learners' responses ("Assess Peers"), and then complete self assessments ("Assess Your Response").
.. image:: /Images/PA_AsmtWithResponse.png
:alt: Image of peer assessment with assessment steps and status labeled
:width: 600
.. note:: If you include a learner training step, you must also include a peer assessment step. The learner training step must come before peer and self assessment steps.
.. _PA Learner Training Assessments:
========================
Learner Training Step
========================
When you create a peer assessment assignment, you can include one or more learner training assessments to help learners learn to perform their own assessments. A learner training assessment contains one or more sample responses that you write, together with the scores that you would give the sample responses. Learners review these responses and try to score them the way that you scored them.
.. note:: If you include a learner training step, you must also include a peer assessment step. The learner training step must come before peer and self assessment steps.
In a learner training assessment, the **Learn to Assess Responses** step opens immediately after a learner submits a response. The learner sees one of the sample responses that you created, along with the rubric. The scores that you gave the response do not appear. The learner also sees the number of sample responses that he or she will assess.
.. image:: Images/PA_TrainingAssessment.png
:alt: Sample training response, unscored
:width: 500
The learner selects an option for each of the assignment's criteria, and then clicks **Compare your selections with the instructor's selections**. If all of the learner's selections match the instructor's selections, the next sample response opens automatically.
If any of the learner's selections differs from the instructor's selections, the learner sees the response again, and the following message appears above the response:
.. code-block:: xml
Learning to Assess Responses
Your assessment differs from the instructor's assessment of this response. Review the
response and consider why the instructor may have assessed it differently. Then, try
the assessment again.
For each of the criteria, the learner sees one of the following two messages, depending on whether the learner's selections matched those of the instructor:
.. code-block:: xml
Selected Options Differ
The option you selected is not the option that the instructor selected.
.. code-block:: xml
Selected Options Agree
The option you selected is the option that the instructor selected.
For example, the following learner chose one correct option and one incorrect option.
.. image:: /Images/PA_TrainingAssessment_Scored.png
:alt: Sample training response, scored
:width: 500
The learner continues to try scoring the sample response until the learner's scoring for all criteria matches the instructor's scoring.
For more information, see :ref:`PA Learner Training Step`.
=====================
Peer Assessment Step
=====================
In the peer assessment step, learners review other learners' responses and select an option for each criterion in your rubric based on the response. Learners can also provide text feedback, or comments, on the response.
Number of Responses and Assessments
************************************
When you specify a peer assessment step, you'll specify the **number of responses** each learner has to assess and the **number of peer assessments** each response has to receive.
.. note:: Because some learners may submit a response but not complete peer assessments, some responses may not receive the required number of assessments. To increase the chance that all responses will receive enough assessments, you must set the number of responses that learners have to assess to be higher than the number of assessments that each response must undergo. For example, if you require each response to receive three assessments, you could require each learner to assess five responses.
If all responses have received assessments, but some learners haven't completed the required number of peer assessments, those learners can assess responses that other learners have already assessed. The learner who submitted the response sees the additional peer assessments when he sees his score. However, the additional peer assessments do not count toward the score that the response receives.
.. _Feedback Options:
Feedback Options
****************
By default, learners see a single comment field below the entire rubric. You can also add a comment field to an individual criterion or to several individual criteria. This comment field can contain up to 300 characters.
The comment field appears below the options for the criterion. In the following image, both criteria have a comment field. There is also a field for overall comments on the response.
.. image:: /Images/PA_CriterionAndOverallComments.png
:alt: Rubric with comment fields under each criterion and under overall response
:width: 600
For more information, see :ref:`Add Rubric` and :ref:`PA Criteria Comment Field Only`.
.. _PA Scoring:
Peer Assessment Scoring
***********************
Peer assessments are scored by criteria. An individual criterion's score is the median of the scores that each peer assessor gave that criterion. For example, if the Ideas criterion in a peer assessment receives a 10 from one learner, a 7 from a second learner, and an 8 from a third learner, the Ideas criterion's score is 8.
A learner's final score for a peer assessment is the sum of the median scores for each individual criterion.
For example, a response may receive the following scores from peer assessors:
.. list-table::
:widths: 25 10 10 10 10
:stub-columns: 1
:header-rows: 1
* - Criterion Name
- Peer 1
- Peer 2
- Peer 3
- Median
* - Ideas (out of 10)
- 10
- 7
- 8
- **8**
* - Content (out of 10)
- 7
- 9
- 8
- **8**
* - Grammar (out of 5)
- 4
- 4
- 5
- **4**
To calculate the final score, add the median scores for each criterion:
**Ideas median (8/10) + Content median (8/10) + Grammar median (4/5) = final score (20/25)**
Note, again, that final scores are calculated by criteria, not by individual assessor. Thus the response's score is not the median of the scores that each individual peer assessor gave the response.
Assessing Additional Responses
********************************
Learners can assess more than the required number of responses. After a learner completes the peer assessment step, the step "collapses" so that just the **Assess Peers** heading is visible.
.. image:: /Images/PA_PAHeadingCollapsed.png
:width: 500
:alt: The peer assessment step with just the heading visible
If the learner clicks the **Assess Peers** heading, the step expands. The learner can then click **Continue Assessing Peers**.
.. image:: /Images/PA_ContinueGrading.png
:width: 500
:alt: The peer assessment step expanded so that "Continue Assessing Peers" is visible
=====================
Self Assessment Step
=====================
In self assessments, the learner sees his response followed by your rubric. As with peer assessments, the learner compares the rubric to his response and selects an option for each of the criteria.
If you include both peer and self assessments, we recommend that you include the peer assessment before the self assessment.
.. _PA Top Responses:
*****************************
Top Responses
*****************************
You can include a **Top Responses** section that shows the top-scoring responses that learners have submitted for the assignment, along with the scores for those responses. The **Top Responses** section appears below the learner's score information after the learner finishes every step in the assignment.
.. image:: /Images/PA_TopResponses.png
:alt: Section that shows the text and scores of the top three responses for the assignment
:width: 500
You can allow the **Top Responses** section to show between 1 and 100 responses. Keep in mind, however, that each response may be up to 300 pixels in height in the list. (For longer responses, learners can scroll to see the entire response.) We recommend that you specify 20 or fewer responses to prevent the page from becoming too long.
.. note:: It may take up to an hour for a high-scoring response to appear in the **Top Responses** list.
For more information, see :ref:`PA Show Top Responses`.
.. _PA for Learners:
###########################################
Open Response Assessments for Learners
###########################################
You may want to let your learners know what to expect when they complete open response assessments. This guide walks learners through each step of the process.
**************************************************
Learner Introduction to Open Response Asssessments
**************************************************
In an open response assessment, you'll provide a response to a question that may not have a simple or definitive answer. Some open response assessments have asked learners to submit written responses, videos of speeches, and computer code.
Open response assessments may include a peer assessment, a self assessment, or both. With a peer assessment, you'll assess, or grade, responses that several of your peers have submitted, and several of your peers will assess your response. With a self assessment, you'll assess your own response. To assess a response, you'll compare the response to a rubric that the instructor provides.
A *rubric* is a list of expectations that a response should meet. Rubrics are made of *criteria* and *options*. *Criteria* describe characteristics that the response should have, such as topics the response should cover. The *options* for each of the criteria describe how well the response satisfies the criteria. In the following image, you can see a rubric with two criteria. Each of the criteria has several options.
.. image:: /Images/PA_S_Rubric.png
:alt: Rubric showing criteria and options
:width: 500
When you assess a response, you'll select the option that best describes the response for each of the criteria.
Some instructors create a **Top Responses** section that shows the top-scoring responses for the assignment and the scores that these responses received. If an instructor creates this section, you can see it below your score after you've completed each step of the assignment.
************************
Learner Instructions
************************
When you come to an open response assessment in the course, you'll see the question and a response field. After you submit your response, you'll assess some of your peers' responses, your own response, or both, depending on the assignment. You can see the steps that your assignment includes below the response field.
.. image:: /Images/PA_S_AsmtWithResponse.png
:alt: Open response assessment example with question, response field, and assessment types and status labeled
:width: 550
Here, we'll walk you through the process of completing an open response assessment that includes a learner training step, a peer assessment, and a self assessment:
#. Submit your response to a question.
#. Learn to assess responses.
#. Assess responses that other learners have submitted.
#. Assess your own response to the question.
#. Receive your score and provide feedback on the peer assessment.
At any time during the assessment, you can see your status at the bottom of the page under **Your Grade**. A message tells you the steps that you still have to perform before you can receive your grade. For example, you may see the following message:
.. code-block:: xml
Not Completed
You have not completed the peer assessment step and self assessment step of this problem.
=====================
Submit Your Response
=====================
Read the question carefully. Some instructors include important information in the question, such as how long your response must be or specific topics your response must cover.
.. note:: Your response must contain fewer than 10,000 words (approximately the equivalent of 20 pages of 8.5x11 paper, with text single-spaced).
After you compose a response, type it into the response field under **Your Response**, and then click **Submit your response and move to the next step**. If you can't finish your response all at once, you can click **Save Your Progress** to save a draft of your response, and then come back and submit it later.
After you submit your response, if other learners have already submitted responses, the peer assessment step starts immediately. However, you don't have to start grading right away. If you want to stop working and come back later, just refresh or reopen your browser when you come back. New peer responses will be available for you to grade.
If no other learners have submitted responses yet, you'll see the following message:
.. code-block:: xml
Waiting for Peer Responses
All submitted peer responses have been assessed. Check back later to see if more learners
have submitted responses. You'll receive your grade after you complete the peer assessment
and self assessment steps, and after your peers have assessed your response.
Note that you can view your response at any time after you submit it. To do this, click the **Your Response** heading to expand the response field.
.. image:: /Images/PA_S_ReviewResponse.png
:alt: Image of the Response field collapsed and then expanded
:width: 550
Submit an Image with Your Response
***********************************
Some assignments allow you to submit an image with your text response. If you can submit an image, you'll see buttons that you'll use to upload your image.
.. image:: /Images/PA_Upload_ChooseFile.png
:alt: Open response assessment example with Choose File and Upload Your Image buttons circled
:width: 500
To upload your image:
#. Click **Choose File**.
#. In the dialog box that opens, select the file that you want, and then click **Open**.
#. When the dialog box closes, click **Upload Your Image**.
.. note:: The image file must be a .jpg or .png file, and it must be smaller than 5 MB in size.
Your image appears below the response field, and the name of the image file appears next to the **Choose File** button. If you want to change the image, follow steps 1-3 again. You can only upload one image.
.. image:: /Images/PA_Upload_WithImage.png
:alt: Example response with an image of Paris
:width: 500
.. note:: You must submit text as well as your image in your response. You can't submit a response that doesn't contain text.
============================
Learn to Assess Responses
============================
In this step, you'll learn to assess responses effectively by reviewing and assessing sample responses that the instructor has provided. You'll try to select the same options for the response that the instructor selected.
.. note:: Not all instructors provide sample responses for training. If the instructor doesn't provide sample responses, this step won't appear in the assignment.
After you submit your response, one of the sample responses opens, together with the rubric for the assignment. Read the sample response and the rubric carefully, select the options that you think best reflect the response, and then click **Compare your selections with the instructor's selections**.
If all of your selections match the instructor's selections, the next sample response opens automatically.
If any your selections doesn't match the instructor's selections, you'll see the response again, and the following message appears above the response:
.. code-block:: xml
Learning to Assess Responses
Your assessment differs from the instructor's assessment of this response. Review the
response and consider why the instructor may have assessed it differently. Then, try
the assessment again.
For each of the criteria, you'll see one of the following two messages, depending on whether your selections matched those of the instructor:
.. code-block:: xml
Selected Options Differ
The option you selected is not the option that the instructor selected.
.. code-block:: xml
Selected Options Agree
The option you selected is the option that the instructor selected.
In the following example, the learner chose one correct option and one incorrect option.
.. image:: /Images/PA_TrainingAssessment_Scored.png
:alt: Sample training response, scored
:width: 500
You'll continue to assess the sample response until the options you select for all criteria match the options the instructor selected.
When you've successfully assessed all of the sample responses, you'll move to the next step in the assignment.
=====================
Assess Peer Responses
=====================
When peer assessment starts, you'll see the original question, another learner's response, and the rubric for the assignment. Above the response you can see how many responses you'll assess and how many you've already assessed.
.. image:: /Images/PA_S_PeerAssmt.png
:alt: In-progress peer assessment
:width: 500
You'll assess these responses by selecting options in the rubric, the same way you assessed the sample responses in the "learn to assess responses" step. Additionally, this step has a field below the rubric where you can provide comments about the learner's response.
.. note:: Some assessments may have an additional **Comments** field for one or more of the assessment's individual criteria. You can enter up to 300 characters in these fields. In the following image, both criteria have a **Comments** field. There is also a field for overall comments on the response.
.. image:: /Images/PA_CriterionAndOverallComments.png
:alt: Rubric with comment fields under each criterion and under overall response
:width: 600
After you've selected options in the rubric and provided additional comments about the response in this field, click **Submit your assessment and move to response #<number>**.
When you submit your assessment of the first learner's response, another response opens for you. Assess this response in the same way that you assessed the first response, and then submit your assessment. You'll repeat these steps until you've assessed the required number of responses. The number in the upper-right corner of the step is updated as you assess each response.
Assess Additional Peer Responses
********************************
You can assess more peer responses if you want to. After you assess the required number of responses, the step "collapses" so that just the **Assess Peers** heading is visible.
.. image:: /Images/PA_PAHeadingCollapsed.png
:width: 500
:alt: The peer assessment step with just the heading visible
To assess more responses, click the **Assess Peers** heading to expand the step. Then, click **Continue Assessing Peers**.
.. image:: /Images/PA_ContinueGrading.png
:width: 500
:alt: The peer assessment step expanded so that "Continue Assessing Peers" is visible
=====================
Assess Your Response
=====================
When you've completed enough peer assessments, your self assessment opens. You'll see your response along with the same rubric that you used in the peer assessment step. Assess your response, and then click **Submit Your Assessment**.
==========================================
Receive Your Score and Provide Feedback
==========================================
After you submit your self assessment, if other learners are still assessing your response, you'll see the following message under the **Assess Your Response** step.
.. code-block:: xml
Your Grade: Waiting for Peer Assessment
Your response is still undergoing peer assessment. After your peers have assessed your
response, you'll see their feedback and receive your final grade.
If you see this message, keep checking back periodically until peer assessment is complete.
When peer assessment is complete, you can see the scores you received from all of your peers, as well as your self assessment. You can also see any additional comments that your peers have provided.
.. image:: /Images/PA_AllScores.png
:alt: A learner's response with peer and self assessment scores
:width: 550
If you want to, you can provide feedback on the scores that you received under **Provide Feedback on Peer Assessments**.
=================================================
Assess Additional Peer Responses (optional)
=================================================
If you've assessed the required number of peer responses and completed your self assessment, you can assess additional peer responses. To do this, click the **Assess Peers** heading. If any responses remain to be assessed, a new response opens.
***********************
Peer Assessment Scoring
***********************
Peer assessments are scored by criteria. An individual criterion's score is the *median*, not average, of the scores that each peer assessor gave that criterion. For example, if the Ideas criterion in a peer assessment receives a 10 from one learner, a 7 from a second learner, and an 8 from a third learner, the Ideas criterion's score is 8.
Your final score for a peer assessment is the sum of the median scores for each individual criterion.
For example, a response may receive the following scores from peer assessors:
.. list-table::
:widths: 25 10 10 10 10
:stub-columns: 1
:header-rows: 1
* - Criterion Name
- Peer 1
- Peer 2
- Peer 3
- Median
* - Ideas (out of 10)
- 10
- 7
- 8
- **8**
* - Content (out of 10)
- 7
- 9
- 8
- **8**
* - Grammar (out of 5)
- 4
- 4
- 5
- **4**
To calculate the final score, the system adds the median scores for each criterion:
**Ideas median (8/10) + Content median (8/10) + Grammar median (4/5) = final score (20/25)**
Note, again, that final scores are calculated by criteria, not by assessor. Thus your score is not the median of the scores that each individual peer assessor gave the response.
==================================
View Top Responses (optional)
==================================
If the instructor has included a **Top Responses** section, you can see the highest-scoring responses that your peers have submitted. This section only appears after you've completed all the steps of the assignment.
.. image:: /Images/PA_TopResponses.png
:alt: Section that shows the text and scores of the top three responses for the assignment
:width: 500
############
Change Log
############
***********
July 2014
***********
.. list-table::
:widths: 10 70
:header-rows: 1
* - Date
- Change
* - 07/15/14
- Added information about uploading an image file in a response to both :ref:`Peer Assessments` and :ref:`PA for Students`.
* -
- Added information about providing a criterion that includes a comment field only to :ref:`Peer Assessments`.
# -*- coding: utf-8 -*-
#
import sys, os
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
sys.path.append(os.path.abspath('../../../'))
sys.path.append(os.path.abspath('../../'))
#from docs.shared.conf import *
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
# General information about the project.
project = u'Creating a Peer Assessment'
copyright = u'2014, edX'
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
.. Getting_Started documentation master file, created by
sphinx-quickstart on Tue Apr 16 11:19:12 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Creating Peer Assessments
========================================
.. toctree::
:numbered:
:maxdepth: 2
PeerAssessment
CreatePeerAssessment
Access_PA_Info
PeerAssessment_Students
\ No newline at end of file
*******
Read Me
*******
The edX *Building a Course with edX Studio* documentation is created
using RST_ files and Sphinx_. You, the user community, can help update and revise
this documentation project on GitHub::
https://github.com/edx/edx-platform/tree/master/docs/course_authors/source
To suggest a revision, fork the project, make changes in your fork, and submit
a pull request back to the original project: this is known as the `GitHub Flow`_.
All pull requests need approval from edX. For more information, contact edX at docs@edx.org.
.. _Sphinx: http://sphinx-doc.org/
.. _LaTeX: http://www.latex-project.org/
.. _`GitHub Flow`: https://github.com/blog/1557-github-flow-in-the-browser
.. _RST: http://docutils.sourceforge.net/rst.html
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
Q_FLAG =
ifeq ($(quiet), true)
Q_FLAG = -Q
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = $(Q_FLAG) -d $(BUILDDIR)/doctrees -c source $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/getting_started.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/getting_started.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/getting_started"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/getting_started"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
# -*- coding: utf-8 -*-
# Copyright 2013 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Sphinx napoleon extension."""
import sys
from sphinxcontrib.napoleon.docstring import GoogleDocstring, NumpyDocstring
class Config(object):
"""Sphinx napoleon extension settings in `conf.py`.
Listed below are all the settings used by napoleon and their default
values. These settings can be changed in the Sphinx `conf.py` file. Make
sure that both "sphinx.ext.autodoc" and "sphinxcontrib.napoleon" are
enabled in `conf.py`::
# conf.py
# Add any Sphinx extension module names here, as strings
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
.. _Google style:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
.. _NumPy style:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
Attributes
----------
napoleon_google_docstring : bool, defaults to True
True to parse `Google style`_ docstrings. False to disable support
for Google style docstrings.
napoleon_numpy_docstring : bool, defaults to True
True to parse `NumPy style`_ docstrings. False to disable support
for NumPy style docstrings.
napoleon_include_private_with_doc : bool, defaults to False
True to include private members (like ``_membername``) with docstrings
in the documentation. False to fall back to Sphinx's default behavior.
**If True**::
def _included(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
pass
def _skipped(self):
# This will NOT be included in the docs
pass
napoleon_include_special_with_doc : bool, defaults to True
True to include special members (like ``__membername__``) with
docstrings in the documentation. False to fall back to Sphinx's
default behavior.
**If True**::
def __str__(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
return unicode(self).encode('utf-8')
def __unicode__(self):
# This will NOT be included in the docs
return unicode(self.__class__.__name__)
napoleon_use_admonition_for_examples : bool, defaults to False
True to use the ``.. admonition::`` directive for the **Example** and
**Examples** sections. False to use the ``.. rubric::`` directive
instead. One may look better than the other depending on what HTML
theme is used.
This `NumPy style`_ snippet will be converted as follows::
Example
-------
This is just a quick example
**If True**::
.. admonition:: Example
This is just a quick example
**If False**::
.. rubric:: Example
This is just a quick example
napoleon_use_admonition_for_notes : bool, defaults to False
True to use the ``.. admonition::`` directive for **Notes** sections.
False to use the ``.. rubric::`` directive instead.
Note
----
The singular **Note** section will always be converted to a
``.. note::`` directive.
See Also
--------
:attr:`napoleon_use_admonition_for_examples`
napoleon_use_admonition_for_references : bool, defaults to False
True to use the ``.. admonition::`` directive for **References**
sections. False to use the ``.. rubric::`` directive instead.
See Also
--------
:attr:`napoleon_use_admonition_for_examples`
napoleon_use_ivar : bool, defaults to False
True to use the ``:ivar:`` role for instance variables. False to use
the ``.. attribute::`` directive instead.
This `NumPy style`_ snippet will be converted as follows::
Attributes
----------
attr1 : int
Description of `attr1`
**If True**::
:ivar attr1: Description of `attr1`
:vartype attr1: int
**If False**::
.. attribute:: attr1
:annotation: int
Description of `attr1`
napoleon_use_param : bool, defaults to False
True to use a ``:param:`` role for each function parameter. False to
use a single ``:parameters:`` role for all the parameters.
This `NumPy style`_ snippet will be converted as follows::
Parameters
----------
arg1 : str
Description of `arg1`
arg2 : int, optional
Description of `arg2`, defaults to 0
**If True**::
:param arg1: Description of `arg1`
:type arg1: str
:param arg2: Description of `arg2`, defaults to 0
:type arg2: int, optional
**If False**::
:parameters: * **arg1** (*str*) --
Description of `arg1`
* **arg2** (*int, optional*) --
Description of `arg2`, defaults to 0
napoleon_use_rtype : bool, defaults to False
True to use the ``:rtype:`` role for the return type. False to output
the return type inline with the description.
This `NumPy style`_ snippet will be converted as follows::
Returns
-------
bool
True if successful, False otherwise
**If True**::
:returns: True if successful, False otherwise
:rtype: bool
**If False**::
:returns: *bool* -- True if successful, False otherwise
"""
_config_values = {
'napoleon_google_docstring': (True, 'env'),
'napoleon_numpy_docstring': (True, 'env'),
'napoleon_include_private_with_doc': (False, 'env'),
'napoleon_include_special_with_doc': (True, 'env'),
'napoleon_use_admonition_for_examples': (False, 'env'),
'napoleon_use_admonition_for_notes': (False, 'env'),
'napoleon_use_admonition_for_references': (False, 'env'),
'napoleon_use_ivar': (False, 'env'),
'napoleon_use_param': (False, 'env'),
'napoleon_use_rtype': (False, 'env'),
}
def __init__(self, **settings):
for name, (default, rebuild) in self._config_values.iteritems():
setattr(self, name, default)
for name, value in settings.iteritems():
setattr(self, name, value)
def setup(app):
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
the ``setup()`` function, which in turn notifies Sphinx of everything
the extension offers.
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
See Also
--------
The Sphinx documentation on `Extensions`_, the `Extension Tutorial`_, and
the `Extension API`_.
.. _Extensions: http://sphinx-doc.org/extensions.html
.. _Extension Tutorial: http://sphinx-doc.org/ext/tutorial.html
.. _Extension API: http://sphinx-doc.org/ext/appapi.html
"""
from sphinx.application import Sphinx
if not isinstance(app, Sphinx):
return # probably called by tests
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
for name, (default, rebuild) in Config._config_values.iteritems():
app.add_config_value(name, default, rebuild)
def _process_docstring(app, what, name, obj, options, lines):
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
of docstring lines that `_process_docstring` modifies in place to change
what Sphinx outputs.
The following settings in conf.py control what styles of docstrings will
be parsed:
* ``napoleon_google_docstring`` -- parse Google style docstrings
* ``napoleon_numpy_docstring`` -- parse NumPy style docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process.
what : str
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
lines : list of str
The lines of the docstring, see above.
.. note:: `lines` is modified *in place*
"""
result_lines = lines
if app.config.napoleon_numpy_docstring:
docstring = NumpyDocstring(result_lines, app.config, app, what, name,
obj, options)
result_lines = docstring.lines()
if app.config.napoleon_google_docstring:
docstring = GoogleDocstring(result_lines, app.config, app, what, name,
obj, options)
result_lines = docstring.lines()
lines[:] = result_lines[:]
def _skip_member(app, what, name, obj, skip, options):
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
members are included in the generated documentation:
* ``napoleon_include_private_with_doc`` --
include private members if they have docstrings
* ``napoleon_include_special_with_doc`` --
include special members if they have docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
what : str
A string specifying the type of the object to which the member
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The name of the member.
obj : module, class, exception, function, method, or attribute.
For example, if the member is the __init__ method of class A, then
`obj` will be `A.__init__`.
skip : bool
A boolean indicating if autodoc will skip this member if `_skip_member`
does not override the decision
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Returns
-------
bool
True if the member should be skipped during creation of the docs,
False if it should be included in the docs.
"""
has_doc = getattr(obj, '__doc__', False)
is_member = (what == 'class' or what == 'exception' or what == 'module')
if name != '__weakref__' and name != '__init__' and has_doc and is_member:
if what == 'class' or what == 'exception':
if sys.version_info[0] < 3:
cls = getattr(obj, 'im_class', getattr(obj, '__objclass__',
None))
cls_is_owner = (cls and hasattr(cls, name) and
name in cls.__dict__)
elif sys.version_info[1] >= 3 and hasattr(obj, '__qualname__'):
cls_path, _, _ = obj.__qualname__.rpartition('.')
if cls_path:
import importlib
import functools
mod = importlib.import_module(obj.__module__)
cls = functools.reduce(getattr, cls_path.split('.'), mod)
cls_is_owner = (cls and hasattr(cls, name) and
name in cls.__dict__)
else:
cls_is_owner = False
else:
cls_is_owner = True
if what == 'module' or cls_is_owner:
is_special = name.startswith('__') and name.endswith('__')
is_private = not is_special and name.startswith('_')
inc_special = app.config.napoleon_include_special_with_doc
inc_private = app.config.napoleon_include_private_with_doc
if (is_special and inc_special) or (is_private and inc_private):
return False
return skip
# Packages we need in order to build the docs, separated out so that rtfd.org
# can install them.
path.py
sphinx
sphinxcontrib-napoleon
markupsafe
# need to install requirements for application, in order to import Python code
-r ../../../requirements/base.txt
.. _api:
Public API
----------
Every Django application in edx-ora2 has an `api.py` that is its public
interface. If you are using one of these applications from the outside, you
should only import things from that module. The ground rules for api modules
are:
1. All inputs and outputs must be trivially serializable to JSON. This means
`None`, `int`, `float`, `unicode`, `list`, `tuple`, `dict`, and `datetime`.
2. Returned objects should not have methods or business logic attached to them.
3. Caller should assume that these calls can be moderately expensive, as they
may one day move out of process and become network calls. So calling
something a hundred times in a loop should be avoided.
Peer Assessment
***************
.. automodule:: openassessment.assessment.api.peer
:members:
Self Assessment
***************
.. automodule:: openassessment.assessment.api.self
:members:
Example-Based Assessment (AI)
*****************************
.. automodule:: openassessment.assessment.api.ai
:members:
Learner Training
****************
.. automodule:: openassessment.assessment.api.student_training
:members:
File Upload
***********
.. automodule:: openassessment.fileupload.api
:members:
Workflow
********
.. automodule:: openassessment.workflow
:members:
Django Apps
-----------
Assessment
**********
Models
++++++
.. automodule:: openassessment.assessment.models.base
:members:
.. automodule:: openassessment.assessment.models.peer
:members:
.. automodule:: openassessment.assessment.models.peer
:members:
.. automodule:: openassessment.assessment.models.training
:members:
.. automodule:: openassessment.assessment.models.student_training
:members:
Workflow
********
Models
++++++
.. automodule:: openassessment.workflow.models
:members:
.. _ai_grading:
##########
AI Grading
##########
Overview
--------
In this document, we describe the architecture for:
* Training a classifier using a supervised machine learning algorithm.
* Grading learner essays using a trained classifier.
Both training and grading require more time than is acceptable within the
request-response cycle of a web application. Therefore, both
training and grading must occur asynchronously.
The architecture should not constrain the ML algorithm (or algorithms)
used by a particular implementation. It should be possible to replace
the ML algorithm with any supervised learning algorithm that produces
a text classifier.
We also avoid constraining the particular task queue implementation.
In principle, any task queue that provides basic reliability guarantees
and a retry mechanism will work (see :ref:`entities`).
Requirements
------------
* Grading tasks *must* be completed within hours after being scheduled.
Ideally, the delay would be within several minutes, but learners could
tolerate longer delays during periods of high usage or failure recovery.
The AI Grading API does not implement deadlines, so if a submission
is submitted for grading (allowed when the problem is open),
the learner will receive a grade for the AI assessment step.
* Grading task queues must tolerate periods of high usage,
as the number of submissions will likely increase when
problem deadlines approach.
* Training tasks must also be completed within hours. Tasks will
likely be scheduled infrequently.
* Students must be able to submit even if classifiers have not yet been trained.
Course authors will need to grade submissions to create
the training examples used by the ML algorithm [#]_.
* Grading tasks must be fault tolerant. If failures occur during grading,
it must be possible to reschedule failed tasks.
.. [#] It may be possible for course authors to re-use submissions from a
previous run of a course (perhaps run with only peer- and/or self-assessment).
However, it's unclear whether this option will be acceptable to course teams.
.. _entities:
Entities
--------
* **AI Grading API**: An API that encapsulates all interactions with AI-grading database models and the task queue. All inputs and outputs are JSON-serializable, so the API calls can be made in-process (likely the initial implementation) or through the network.
* **Submission**: An essay submitted by a learner to a problem in a course.
* **Assessment**: Specifies the scores a submission received for each criterion in a rubric.
* **TrainingExample**: An example essay and associated scores (options selected in a rubric).
* **Classifier**: A function mapping submissions to scores.
* **Task** and **Task queue**
* **Worker**
Tasks
-----
Assumptions about the task queue implementation:
* **Reliability**: Each task placed on the queue will be picked up by at least one worker,
although the task may not complete successfully. Tasks should not get "lost"
before reaching a worker.
* **Retry**: A worker can reschedule a task to handle recoverable errors.
* **Parameters**: We can parametrize tasks with JSON-serializable inputs.
We do **NOT** require:
* **Task Status**: We don't need to be able to query the status of tasks in-flight.
* **Notifications**: We don't need to be notified about task status changes.
* **Worker Pull or Push**: We assume that workers will pick up tasks, but we don't care how they
find out about tasks.
* **Result storage**: Workers write results to the database (via the **AI Grading API**),
so the task queue does not need to handle storage of results.
* **Periodic tasks**: All tasks are triggered by user actions.
* **Synchronization**: Tasks are independent and idempotent
(at least from the perspective of clients of the **AI Grading API**), so we
do not require synchronization mechanisms.
Tasks should be idempotent from the perspective of clients using the **AI Grading API**:
* With the exception of "workflow" models, the database models created by tasks should be immutable and timestamped.
* The **AI Grading API** should return the *most recent* assessment/classifier.
The above assumptions ensure that running a task twice (with equivalent workflow parameters) will
not change the assessment/classifiers returned by the API, even if additional records are created
in the database.
Grading Task
============
.. image:: grading_task_diagram.png
Parameter: AI Grading Workflow ID
Procedure:
1. A learner submits an essay, creating a **submission** in the database.
2. The learner updates the workflow, and the **Workflow API** uses the **AI Grading API** to:
a. Retrieve the most recent **ClassifierSet** for the current rubric definition (possibly none if training hasn't yet finished).
b. Create an **AI Grading Workflow** record in the database, associated with a Submission ID and **ClassifierSet**.
c. Schedule a **Grading Task** parametrized by the workflow ID.
3. A worker picks up the **Grading Task** and uses the **AI Grading API** to:
a. Retrieve the submission and classifiers from persistent storage or a cache.
i. If the **ClassifierSet** is null, then the classifier wasn't available when the learner created the submission.
ii. Since we cannot grade the learner without a classifier, we create the **AI Grading Workflow** record but do not schedule the **Grading Task**. This means that the workflow will not be marked complete.
iii. When a **Training Task** completes, update incomplete **Grading Tasks** with null **ClassifierSets** with the newly created **ClassifierSet**, then schedule the **GradingTasks**.
b. **Optimization**: Check whether a completed **AI Grading Workflow** exists for this submission using the same **ClassifierSet**.
i. If so, set the current workflow's **Assessment** to the other workflow's **Assessment** and exit with success.
ii. This reduces the cost (in time) for rescheduling tasks that are in-flight but not yet completed (see :ref:`recovery_from_failure`).
iii. Even without this optimization, the task is idempotent. If a race condition occurs such that two *Assessments** are created, the **AI Grading API** will simply return the latest one.
c. Evaluate the submission using each classifier.
d. Create an **Assessment** with a type indicating that it is an AI assessment rather than self- or peer-assessment.
e. Create an **AssessmentPart** for each rubric criterion, containing the score assigned by the classifier for that criterion.
f. Mark the **AI Grading Workflow** as complete by associating the **Assessment** with the workflow.
4. When a learner checks the status of the submission, the **AI Grading API**:
a. Queries the database for the latest **AI Grading Workflow** matching the submission.
b. Reports whether the workflow is started or complete.
c. If the workflow is complete, the **AI Grading API** can retrieve the associated **Assessment**.
Training Task
=============
.. image:: training_task_diagram.png
Parameter: AI Training Workflow ID
Procedure:
1. Course staff create **TrainingExamples** (using the same infrastructure, although not necessarily the same UI, as "Student Training" for peer assessment).
2. Course staff request that a classifier be trained based on staff assessments. Using the **AI Grading API**, the request handler:
a. Creates an **AI Training Workflow** record in the database, associated with **TrainingExamples** and an **ML Algorithm ID**.
b. Schedules a **Training Task** parametrized by the workflow ID.
3. A worker picks up the **Training Task** and uses the **AI Grading API** to:
a. Retrieve the *TrainingExamples* from persistent storage or a cache.
b. Verify that all *TrainingExamples* use the same rubric.
c. Train a classifier for each rubric criterion. Choose the training algorithm based on the **ML Algorithm ID**.
d. Commit the trained classifiers to persistent storage.
e. Mark the **AI Training Workflow** as complete.
4. The worker uses the **AI Grading API** to schedule **AI Grading Tasks** for submissions made before the model was trained. This is the same procedure used to manually reschedule grading tasks after a non-recoverable error (see :ref:`recovery_from_failure`).
5. Course staff can query the status of the training task using the **AI Grading API**, which in turn checks whether the latest **AI Training Workflow** is marked as started or complete.
Queues
------
In the simplest implementation, we could create separate queues dedicated to each task type. It makes sense to separate the queues because the task types have very different usage patterns and performance requirements:
* **Training Tasks**: Infrequent, approximately 25-60 minutes per task, depending on the number of training examples and criteria).
* **Grading Tasks**: Frequent, less than 2 minutes per task (varies with the size of the submission and number of criteria).
.. _recovery_from_failure:
Recovery from Failure
---------------------
1. A scheduled task is not completed:
a. We assume that the task queue is (for the most part) *reliable*: If a task is scheduled, then a worker will pick it up and execute it (although it might not complete the task successfully). Even if tasks occasionally are dropped, however, we can rely on the error recovery procedure below.
b. If an error occurs, first retry the task. This allows the worker to gracefully handle recoverable errors such as temporary network connectivity issues.
c. If a task fails repeatedly, the worker should log the failure as a non-recoverable error and stop retrying the task. Once the issue has been fixed, failed tasks should be rescheduled manually (e.g. by a command that queries for incomplete workflows and reschedules tasks).
2. A course author publishes a problem without training classifiers.
a. All grading tasks scheduled without a classifier available will fail.
b. We consider this a non-recoverable error and manually reschedule the tasks once the classifiers have been trained.
3. A course author modifies rubric criteria after training classifiers.
a. If the problem has **NOT** been published: Warn the author that they will need to retrain the classifier before letting them save.
b. If the problem **HAS** been published: Display a validation error and prevent the save (this is the current behavior).
3. Workers are not processing tasks quickly enough, causing queues to back up.
a. Monitor queue length and alert if queue length is too long.
b. Configure workers to time out if a task is taking too long to complete.
c. Horizontally scale workers to handle additional load.
Notes:
* The storage backend is pluggable. In production, we use Amazon S3, but in principle we could use other backends (including the local filesystem in local dev).
* Unfortunately, the ML algorithm we will use for initial release (EASE) requires that we
persist the trained classifiers using Python's ``pickle`` module. This has security implications
(if the persisted classifiers are compromised, then someone could run arbitrary code on the workers);
it also creates dependencies on external libraries used to create the pickled object (e.g. ``scikit-learn``).
The proposed design accommodates the requirement that we use ``pickle``,
but would also work with classifiers serialized to other formats -- we'd simply use a different
algorithm ID and store the classifier in a non-pickle format.
.. _fileupload:
##########
FileUpload
##########
Overview
--------
In this document, we describe the use of the File Upload API.
By design, this is a simple API for requesting an Upload URL or Download URL
for a piece of content. The means by which the media is stored is relative to
the implementation of the File Upload Service.
This project initially has one File Upload Service implementation for
retrieving Upload / Download URLs for Amazon S3.
The URLs provided by the File Upload API are intended to be used to upload and
download content from the client to the content store directly.
In order to provide a seamless interaction on the client, this may require an
AJAX request to first retrieve the URL, then upload content. This type of
request is restricted via Cross Origin Policy, but can be resolved through CORS
configuration on the content store.
Configuration
-------------
The Amazon S3 File Upload Service requires the following settings to be
configured:
* AWS_ACCESS_KEY_ID - The AWS Access Key ID.
* AWS_SECRET_ACCESS_KEY - The associated AWS Secret Access Key.
* FILE_UPLOAD_STORAGE_BUCKET_NAME - The name of the S3 Bucket configured for uploading and downloading content.
* FILE_UPLOAD_STORAGE_PREFIX (optional) - The file prefix within the bucket for storing all content. Defaults to 'submissions_attachments'
Note that your S3 bucket must have a DNS compliant name, which will be used by
the File Upload Service to generate the upload and download URLs.
In addition, your S3 bucket must be have CORS configuration set up to allow PUT
and GET requests to be performed across request origins. To do so, you must:
1. Log into Amazon AWS
2. Select S3 from the available applications
3. Expand the "Permissions" section
4. Click "Edit CORS configuration"
5. Your CORS configuration must have the following values:
.. code-block:: xml
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedHeader>*</AllowedHeader>
<AllowedMethod>PUT</AllowedMethod>
<AllowedMethod>GET</AllowedMethod>
</CORSRule>
</CORSConfiguration>
Note that you must configure an IAM user and role for access to your S3 bucket.
1. From Amazon AWS, select services, IAM.
2. Select Groups
3. Create a new 'upload' group.
4. This new group will require a policy. The following is a lenient upload
policy for S3:
.. code-block:: json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1403207543000",
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"*"
]
}
]
}
5. Create a new User, add this user to the new 'upload' Group. Choose to
generate a new access key for this user.
6. This new access key must be used in the settings described above:
AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY.
.. _Architecture index:
############
Architecture
############
.. toctree::
:maxdepth: 2
workflow
ai_grading
fileupload
.. _workflow:
##########################
Understanding the Workflow
##########################
The `openassessment.workflow` application is tasked with managing the overall
life-cycle of a learner's submission as it goes through various evaluation steps
(e.g. peer assessment, self assessment). A new workflow entry is created as soon
as the learner submits their response to a question, and it is initialized with
the steps (and step order) are initialized at that time.
Canonical Status
Except in the case of `done`, the `status` value stored in the
`AssessmentWorkflow` model is not the canonical status. This is because the
determination of what we need to do in order to be "done" is specified by the
OpenAssessmentBlock problem definition and can change. So every time we are
asked where the learner is, we have to query the assessment APIs (peer, self,
AI, etc.) with the latest requirements(e.g. "number of submissions you have
to assess = 5"). The "status" field on this model is an after the fact
recording of the last known state of that information so we can search
easily.
However, once a workflow has transitioned to `done`, it means that a score
has been created for this workflow and it should not be possible to pull it
back into an "in progress" state. Once you're finished, you're finished.
Isolation of Assessment types
The various assessment types a workflow invokes are not aware of where they
are in the grander scheme of things. So for instance, the peer assessment API
is unaware that self assessment exists and vice versa. The overall workflow
is responsible for querying these sub-APIs through a small set of pre-defined
calls:
`bool is_submitter_done(submission_uuid)`
Has the person submitting the problem (the owner of `submission_uuid`) done
everything they need to do in order to advance to the next step?
`bool is_assessment_done(submission_uuid)`
Is there enough information to score this assessment step? In the case of
peer grading, this would mean that a sufficient number of people have
assessed it.
`dict get_score(submission_uuid)`
Returns a dict with keys `points_earned` and `points_possible` where both
values are integers. This represents the recorded score for this
assessment step. If no score is possible at this time, return `None`. Once
a non `None` value has been returned by this function for a given
`submission_uuid`, repeated calls to this function should return the same
thing.
`on_init(submission_uuid)`
Notification to the API that the learner has submitted a response.
`on_start(submission_uuid)`
Notification to the API that the learner has started the assessment step.
In the long run, it could be that `OpenAssessmentBlock` becomes a wrapper
that talks to child XBlocks via this kind of API, and that each child would
be responsible for answering these questions and proxy to relevant backend
APIs as appropriate. For now though, the workflow just calls these things
directly.
Determining Completion
We start with the first assessment step as a status (unsubmitted items have
no workflow, so there is no step for that). As each step completes, we move
to the next one specified in the XML. If we have completed all steps, then
we check to see if the `is_assessment_done()` is `True` for all steps. If
the assessments are complete, we give the submission a `Score` in the
submissions API. If there are steps for which the assessment is incomplete,
we move the status to `waiting`.
Simple Order/Dependency Assumptions
We assume that the user is never blocked from going to the next step because
a previous step needs assessment. They may be unable to proceed because they
can't complete a step for reasons beyond their control (e.g. there are no
other submissions to assess in peer), but the gating thing is always
`is_submitter_done()`, and never `is_assessment_done()`. There are no gating
dependencies where, for instance, you might be restricted from starting peer
grading until AI grading has assessed you and given you a score.
This assumption will not be true forever, but it's a simplifying assumption
for the next six months or so.
Steps Stay Completed
In the interests of not surprising/enraging learners, once a step is complete,
it stays complete. So if peer grading requires two assessors and a particular
submission meets that threshold, it will be considered complete at that point
in time. Raising the threshold to three required assessors in the future will
not cause that step for that submission workflow to be considered incomplete
again.
Handling Problem Definition Change
Not all of this is going to be implemented in the first cut, but a longer
term plan for how conflict resolution should happen in the case of the
overall submission workflow:
1. Any completed steps stay completed. If a completed step is no longer part
of the workflow (e.g. we removed self-assessment), then we keep around
the model information for that step anyway, but just don't reference it.
2. If the sequence of steps changes, we look at the new steps and advance to
the first step that the user has not completed (`is_submitter_done()`
returns `False`).
Django settings
Assessments in the workflow are configurable using Django settings.
This encapsulates the workflow API from the assessment modules.
The two settings are:
* `ORA2_ASSESSMENTS`: a `dict` mapping assessment names to the Python module path
of the corresponding assessment API.
* `ORA2_ASSESSMENT_SCORE_PRIORITY`: a `list` of assessment names that determine
which assessment type is used to generate a learner's score.
# -*- coding: utf-8 -*-
#
# edx-ora2 documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 17 08:32:59 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from path import path
#import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "openassessment/assessment")
sys.path.append(root / "openassessment/management")
sys.path.append(root / "openassessment/templates")
sys.path.append(root / "openassessment/workflow")
sys.path.append(root / "openassessment/xblock")
sys.path.append(root / "openassessment/templates")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.base")
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'edx-ora2'
copyright = u'2014, edX.org'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'edx-ora2-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'edx-ora2.tex',
u'edX ORA 2 Documentation',
u'edX.org',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'edx-ora2', u'edX ORA 2 Documentation',
[u'edX.org'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'edx-ora2',
u'edX ORA 2 Documentation',
u'edX.org',
'edx-ora2',
'Grading Modules',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
.. edx-ora2 documentation master file, created by
sphinx-quickstart on Fri Jan 17 08:32:59 2014.
Open Response Assessment (v2)
======================================
This is an initial prototype for redesigning Peer Grading and general Open Ended
Submission Evaluation. This project is in the early stages of development and is
not ready for general use.
Setup
-----
::
See the `README <https://github.com/edx/edx-ora2/blob/master/README.rst>`_
Developer Documentation
-----------------------
.. toctree::
:maxdepth: 2
architecture/index
Migrating AI Problems to ORA2
-----------------------------
.. toctree::
:maxdepth: 2
migrate_ai
API Documentation
-----------------
.. toctree::
:maxdepth: 2
api
.. _migrate_ai:
Migrating AI Problems
---------------------
ORA2 supports AI assessment for learner responses, but currently does not support authoring of AI problems. In order to migrate an existing AI assessment problem into ORA2, you will need to:
1. Create a problem with example-based assessment enabled.
a. Create an ORA2 problem in a course. See `the user documentation <http://edx.readthedocs.org/projects/edx-open-response-assessments>`__ for directions.
b. `Export the course using Studio <http://ca.readthedocs.org/en/latest/building_course/export_import_course.html>`__
c. Untar the exported course and find the problem XML. You can search for the HTML tag "openassessment".
d. Add the AI ("example-based") assessment to the XML, including the example essays and scores. The selected criteria and options **must** match the rubric in the XML definition.
.. code:: xml
<assessment name="example-based-assessment" algorithm_id="ease">
<example>
<answer>First essay</answer>
<select criterion="Ideas" option="Bad" />
<select criterion="Content" option="Bad" />
</example>
<example>
<answer>Second essay</answer>
<select criterion="Ideas" option="Good" />
<select criterion="Content" option="Bad" />
</example>
<example>
<answer>Third essay</answer>
<select criterion="Ideas" option="Bad" />
<select criterion="Content" option="Good" />
</example>
</assessment>
..
e. Archive the course in "tar.gz" format.
f. `Import the course into Studio <http://ca.readthedocs.org/en/latest/building_course/export_import_course.html>`__
2. Train classifiers.
a. Log in to the LMS as global staff. (If your account does not have global staff permissions, you will need to run a Django management command).
b. Navigate to the ORA2 problem you created.
c. In the "Course Staff Information" section (at the bottom of the problem), click the button "Schedule Example-Based Training"
d. When training completes (should take ~1 hour), the "Course Staff Information" section will show that a classifier has been trained.
.. image:: course_staff_ai.png
3. At this point, learners can submit essays and receive grades.
#!/usr/bin/env bash
# Need to exit with an error code to fail the Travis build
set -e
pip install -q Sphinx sphinx_rtd_theme
# go into docs directory
cd docs/en_us
# build course authors docs
cd course_authors
if [ -f requirements.txt ]; then
pip install -q -r requirements.txt
fi
make html
cd ..
# build developer docs
cd developers
if [ -f requirements.txt ]; then
pip install -q -r requirements.txt
fi
make html
cd ..
# go back where we started
cd ../..
......@@ -7,4 +7,3 @@ cd `dirname $BASH_SOURCE` && cd ..
export DJANGO_SETTINGS_MODULE=${DJANGO_SETTINGS_MODULE:-"settings.test_with_coverage"}
./scripts/test-python.sh $1
./scripts/test-js.sh
./scripts/build-docs.sh
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment