Commit 36c445ba by Mark Hoeber

Notice on Developer's Guide move

parent 6be35e0f
......@@ -6,6 +6,7 @@ The following documentation projects have been moved to the `edx-documentation`_
* course_authors
* data
* developers
* install_operations
* mobile
* OLX
......
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
PAPER ?=
BUILDDIR ?= build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
Q_FLAG =
ifeq ($(quiet), true)
Q_FLAG = -Q
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = $(Q_FLAG) -d $(BUILDDIR)/doctrees -c source $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/edX.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/edX.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/edX"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/edX"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
######################
EdX Developer's Guide
######################
We have moved the edX Developer's Guide to the `edx-documentation`_ repository
as of January 13, 2015.
By moving documentation to its own repository, we will be better able to
develop workflows, manage versioning, create translations, and automate
testing, without complicating ongoing development of the edX Platform.
.. _edx_documentation: https://github.com/edx/edx-documentation
******************************
EdX Platform Docstrings
******************************
We are in the process of creating a new project to publish docstrings for the
edX Platform. Docstrings will be included for CMS, LMS, and Common modules.
This project will remain in the edX Platform repository, in the docs/en-us
directory.
******************************
View Published Documentation
******************************
EdX documentation is published through Read the Docs. Links to all published
documentation are available through `docs.edx.org`_.
.. _docs.edx.org: http://docs.edx.org
******************************
Submit Documentation Issues
******************************
We welcome input from the community on any documentation issues. You can
submit issues to the Documentation project in the `Open edX JIRA board`_.
You will need to `create a free JIRA account`_ account before you can submit your first
ticket.
.. _create a free JIRA account: https://openedx.atlassian.net/admin/users/sign-up
.. _Open edX JIRA board: https://openedx.atlassian.net
You can also email docs@edx.org.
path.py
Django >=1.4,<1.5
pytz
-e git+https://github.com/edx/XBlock.git#egg=XBlock
lxml
sphinxcontrib-napoleon==0.2.6
stevedore==0.14.1
{% extends "!layout.html" %}
{% block header %}
<img src="{{ pathto("_static/homepage-bg.jpg", 1) }}" alt="Edx logo" width="100%;"/>
{% endblock %}
\ No newline at end of file
.. _analytics:
##############
Analytics
##############
The edX LMS and Studio are instrumented to enable tracking of metrics and events of interest. These data can be used for educational research, decision support, and operational monitoring.
The primary mechanism for tracking events is the `Event Tracking`_ API. It should be used for the vast majority of cases.
=================
Event Tracking
=================
The `event-tracking`_ library aims to provide a simple API for tracking point-in-time events. The `event-tracking documentation`_ summarizes the features and primary use cases of the library as well as the current and future design intent.
Emitting Events
*****************
Emitting from server-side python code::
from eventtracking import tracker
tracker.emit('some.event.name', {'foo': 'bar'})
Emitting from client-side coffee script::
Logger.log 'some.event.name', 'foo': 'bar'
.. note::
The client-side API currently uses a deprecated library (the ``track`` djangoapp) instead of the event-tracking library. Eventually, event-tracking will publish a client-side API of its own: when available, that API should be used instead of the ``track``-based solution. See :ref:`deprecated_api`.
Naming Events
==============
Event names are intended to be formatted as `.` separated strings and help processing of related events. The structure is intended to be `namespace.object.action`. The namespace is intended to be a `.` separated string that helps identify the source of events and prevent events with similar or identical objects and actions from being confused with one another. The object is intended to be a noun describing the object that was acted on. The action is intended to be a past tense verb describing what happened.
Examples:
* ``edx.course.enrollment.activated``
* Namespace: ``edx``
* Object: ``course.enrollment``
* Action: ``activated``
Choosing Events to Emit
========================
Consider emitting events to capture user intent. These will typically be emitted on the client side when a user
interacts with the user interface in some way.
Consider also emitting events when models change. Most models are not append-only and it is frequently the case that an
analyst would want to see the value of a particular field at a particular moment in time. Given that many fields are
overwritten, that information is lost unless an event is emitted when the model is changed.
Sensitive Information
=====================
By default, event information is written to an unencrypted file on disk. Therefore, do not include clear text passwords, credit card numbers, or other similarly sensitive information.
Size
======
A cursory effort to regulate the size of the event is appreciated. If an event is too large, it may be omitted from the event stream. However, do not sacrifice the clarity of an event in an attempt to minimize size. It is much more important that the event is clear.
Debugging Events
=================
On devstack, emitted events are stored in the ``/edx/var/log/tracking.log`` log
file. This file can be useful for validation and debugging.
Documenting Events
*******************
The *edX Platform Developer's Guide* provides guidelines for `Contributing to
Open edX <http://edx.readthedocs.org/projects/edx-developer-
guide/en/latest/process/index.html>`_. As part of your effort to add events to
the platform, consider including comments that identify the purpose of the
events and the fields emitted for them. A description can assure that
researchers and other members of the open edX community understand your intent
and use the event correctly.
The `edX Research Guide
<http://edx.readthedocs.org/projects/devdata/en/latest/>`_ includes reference
information for emitted events that are included in tracking logs.
Request Context Middleware
**********************************
The platform includes a middleware class that enriches all events emitted
during the processing of a given request with details about the request that
greatly simplify downstream processing. This is called the ``TrackMiddleware``
and can be found in ``edx-platform/common/djangoapps/track/middleware.py``.
Legacy Application Event Processor
**********************************
In order to support legacy analysis applications, the platform emits standard events using ``eventtracking.tracker.emit()``. However, it uses a custom event processor which modifies the event before saving it to ensure that the event can be parsed by legacy systems. Specifically, it replicates some information so that it is accessible in exactly the same way as it was before. This state is intended to be temporary until all existing legacy systems can be altered to use the new field locations.
=======================
Other Tracking Systems
=======================
The following tracking systems are currently used for specialized analytics. There is some redundancy with event-tracking that is undesirable. The event-tracking library could be extended to support some of these systems, allowing for a single API to be used while still transmitting data to each of these service providers. This would reduce discrepancies between the measurements made by the various systems and significantly clarify the instrumentation.
Data Dog
*****************
Data dog is used primarily for real-time operational monitoring of a running edX platform server. It supports rapid display and monitoring of various metrics within the platform such as enrollments, user creation and answers to problems.
edX platform is instrumented to send data to `data dog`_ using the standard `dogapi`_ python package. If ``lms.auth.json`` contains a ``DATADOG_API`` key whose value is a valid data dog API key, then the edX platform will transmit a variety of metrics to data dog. Running ``git grep dog_stats_api`` will give a pretty good overview of the usage of data dog to track operational metrics.
Segment.IO
*****************
A selection of events can be transmitted to segment.io in order to take advantage of a wide variety of analytics-related third party services such as Mixpanel and Chartbeat. It is enabled in the LMS if the ``SEGMENT_IO_LMS`` feature flag is enabled and the ``SEGMENT_IO_LMS_KEY`` key is set to a valid segment.io API key in the ``lms.auth.json`` file.
Google Analytics
*****************
Google analytics tracks all LMS page views. It provides several useful metrics such as common referrers and search terms that users used to find the edX web site.
.. _deprecated_api:
Deprecated APIs
*****************
The ``track`` djangoapp contains a deprecated mechanism for emitting events. Direct usage of ``server_track`` is deprecated and should be avoided in new code. Old calls to ``server_track`` should be replaced with calls to ``tracker.emit()``. The celery task-based event emission and client-side event handling do not currently have a suitable alternative approach, so they continue to be supported.
.. _event-tracking: https://github.com/edx/event-tracking
.. _event-tracking documentation: http://event-tracking.readthedocs.org/en/latest/overview.html#event-tracking
.. _data dog: http://www.datadoghq.com/
.. _dogapi: http://pydoc.datadoghq.com/en/latest/
.. include:: ../../shared/browsers.rst
\ No newline at end of file
*******************************************
Calc
*******************************************
.. automodule:: calc
:members:
:show-inheritance:
*******************************************
Capa module
*******************************************
Contents:
.. module:: capa
.. toctree::
:maxdepth: 2
Capa_problem
============
.. automodule:: capa.capa_problem
:members:
:show-inheritance:
Checker
=======
.. automodule:: capa.checker
:members:
:show-inheritance:
Correctmap
==========
.. automodule:: capa.correctmap
:members:
:show-inheritance:
Customrender
============
.. automodule:: capa.customrender
:members:
:show-inheritance:
Inputtypes
==========
.. automodule:: capa.inputtypes
:members:
:show-inheritance:
Resposetypes
============
.. automodule:: capa.responsetypes
:members:
:show-inheritance:
**********
Change Log
**********
.. list-table::
:widths: 15 75
:header-rows: 1
* - Date
- Change
* - 11/07/14
- Created the :ref:`edX Modulestores` section and added the :ref:`Split
Mongo Modulestore` chapter.
* - 06/23/14
- Added a :ref:`Preface` with resources for course teams, developers, researchers, and students.
* - 05/20/14
- Added the :ref:`Analytics` chapter.
* - 04/25/2014
- Added the :ref:`Contributing to Open edX` chapter.
* - 03/28/2014
- Added the :ref:`Custom JavaScript Applications` chapter.
*******************************************
Chemistry modules
*******************************************
.. module:: chem
Miller
======
.. automodule:: chem.miller
:members:
:show-inheritance:
UI part and inputtypes
----------------------
Miller module is used in the system in crystallography problems.
Crystallography is a class in :mod:`capa` inputtypes module.
It uses *crystallography.html* for rendering and **crystallography.js**
for UI part.
Documentation from **crystallography.js**::
For a crystallographic problem of the type
Given a plane definition via miller indexes, specify it by plotting points on the edges
of a 3D cube. Additionally, select the correct Bravais cubic lattice type depending on the
physical crystal mentioned in the problem.
we create a graph which contains a cube, and a 3D Cartesian coordinate system. The interface
will allow to plot 3 points anywhere along the edges of the cube, and select which type of
Bravais lattice should be displayed along with the basic cube outline.
When 3 points are successfully plotted, an intersection of the resulting plane (defined by
the 3 plotted points), and the cube, will be automatically displayed for clarity.
After lotting the three points, it is possible to continue plotting additional points. By
doing so, the point that was plotted first (from the three that already exist), will be
removed, and the new point will be added. The intersection of the resulting new plane and
the cube will be redrawn.
The UI has been designed in such a way, that the user is able to determine which point will
be removed next (if adding a new point). This is achieved via filling the to-be-removed point
with a different color.
Chemcalc
========
.. automodule:: chem.chemcalc
:members:
:show-inheritance:
Chemtools
=========
.. automodule:: chem.chemtools
:members:
:show-inheritance:
Tests
=====
.. automodule:: chem.tests
:members:
:show-inheritance:
*******************************************
CMS module
*******************************************
.. module:: cms
.. toctree::
transcripts.rst
*******************
Code Considerations
*******************
This is a checklist of all of the things that we expect a developer to consider
as they are building new or modifying existing functionality.
Operational Impact
==================
* Are there new points in the system that require operational monitoring?
* External system that you now depend on (Mathworks, SoftwareSecure,
CyberSource, etc...)
* New reliance on disk space?
* New stand process (workers? elastic search?) that need to always be available?
* A new queue that needs to be monitored for dequeueing
* Bulk Email --> Amazon SES, Inbound queues, etc...
* Are important feature metrics sent to datadog and is there a
dashboard to monitor them?
* Am I building a feature that will have impact to the performance of the system?
Keep in mind that Open edX needs to support hundreds of thousands if not
millions of students, so be careful that you code will work well when the
numbers get large.
* Deep Search
* Grade Downloads
* Are reasonable log messages being written out for debugging purposes?
* Will this new feature easily start up in the Vagrant image?
* Do we have documentation for how to start up this feature if it has any
new startup requirements?
* Are there any special directories/file system permissions that need to be set?
* Will this have any impact to the CDN related technologies?
* Are we pushing any extra manual burden on the Operations team to have to
provision anything new when new courses launch? when new schools start? etc....
* Has the feature been tested using a production configuration with vagrant?
See also: :doc:`deploy-new-service`
Documentation/Training/Support
==============================
* Is there appropriate documentation in the context of the product for
this feature? If not, how can we get it to folks?
* For Studio much of the documentation is in the product.
* Is this feature big enough that we need to have a session with stakeholders
to introduce this feature BEFORE we release it? (PMs, Support, etc...)
* Paid Certificates
* Do I have to give some more information to the Escalation Team
so that this can be supported?
* Did you add an entry to CHANGELOG?
* Did you write/edit docstrings for all of your modules, classes, and functions?
Development
===========
* Did you consider a reasonable upgrade path?
* Is this a feature that we need to slowly roll out to different audiences?
* Bulk Email
* Have you considered exposing an appropriate amount of configuration options
in case something happens?
* Have you considered a simple way to "disable" this feature if something is broken?
* Centralized Logging
* Will this feature require any security provisioning?
* Which roles use this feature? Does it make sense to ensure that only those
roles can see this feature?
* Assets in the Studio Library
* Did you ensure that any new libraries are added to appropriate provisioning
scripts and have been checked by OSCM for license appropriateness?
* Is there an open source alternative?
* Are we locked down to any proprietary technologies? (AWS, ...)
* Did you consider making APIs so that others can change the implementation if applicable?
* Did you consider Internationalization (I18N) and Localization (L10N)?
* Did you consider Accessibility (A11y)?
* Will your code work properly in workers?
* Have you considered the large-scale modularity of the code? For example,
xmodule and xblock should not use Django features directly.
Testing
=======
* Did you make sure that you tried boundary conditions?
* Did you try unicode input/data?
* The name of the person in paid certifactes
* The name of the person in bulk email
* The body of the text in bulk email
* etc
* Did you try funny characters in the input/data? (~!@#$%^&*()';/.,<>, etc...)
* Have you done performance testing on this feature? Do you know how much
performance is good enough?
* Did you ensure that your functionality works across all supported browsers?
* Do you have the right hooks in your HTML to ensure that the views are automatable?
* Are you ready if this feature has 10x the expected usage?
* What happens if an external service does not respond or responds with
a significant delay?
* What are possible failure modes? Do your unit tests exercise these code paths?
* Does this change affect templates and/or JavaScript? If so, are there
Selenium tests for the affected page(s)? Have you tested the affected
page(s) in a sandbox?
Analytics
=========
* Are learning analytics events being recorded in an appropriate way?
* Do your events use a descriptive and uniquely enough event type and
namespace?
* Did you ensure that you capture enough information for the researchers
to benefit from this event information?
* Is it possible to reconstruct the state of your module from the history
of its events?
* Has this new event been documented so that folks downstream know how
to interpret it?
* Are you increasing the amount of logging in any major way?
* Are you sending appropriate/enough information to MixPanel,
Google Analytics, Segment IO?
Collaboration
=============
* Are there are other teams that would benefit from knowing about this feature?
* Forums/LMS - email
* Does this feature require a special broadcast to external teams as well?
Open Source
===========
* Can we get help from the community on this feature?
* Does the community know enough about this?
UX/Design/Front End Development
===============================
* Did you make sure that the feature is going to pass
Accessibility requirements (still TBD)?
* Did you make sure any system/instructional text is I18N ready?
* Did you ensure that basic functionality works across all supported browsers?
* Did you plan for the feature's UI to degrade gracefully (or be
progressively enhanced) based on browser capability?
* Did you review the page/view under all browser/agent conditions -
viewport sizes, images off, css off?
* Did you write any HTML with ideal page/view semantics in mind?
* When writing HTML, did you adhere to standards/conventions around class/id names?
* When writing Sass, did you follow OOCSS/SMACSS philosophy ([1]_, [2]_, [3]_),
variable/extend organization and naming conventions, and UI abstraction conventions?
* When writing Sass, did you document any new variables,
extend-based classes, or mixins?
* When writing/adding JavaScript, did you consider the asset pipeline
and page load timeline?
* When writing JavaScript, did you note what code is for prototyping vs. production?
* When adding new templates, views, assets (Sass, images, plugins/libraries),
did you follow existing naming and file architecture conventions?
* When adding new templates, views, assets (Sass, images, plugins/libraries),
did you add any needed documentation?
* Did you use templates and good Sass architecture to keep DRY?
* Did we document any aspects about the feature (flow, purpose, intent)
that we or other teams will need to know going forward?
.. [1] http://smacss.com/
.. [2] http://thesassway.com/intermediate/avoid-nested-selectors-for-more-modular-css
.. [3] http://ianstormtaylor.com/oocss-plus-sass-is-the-best-way-to-css/
edX.org Specific
================
* Ensure that you have not broken import/export?
* Ensure that you have not broken video player? (Lyla video)
Common / lib
===============================
Contents:
.. toctree::
:maxdepth: 2
xmodule.rst
modulestore.rst
capa.rst
chem.rst
sandbox-packages.rst
symmath.rst
calc.rst
opaque-keys.rst
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# pylint: disable=redefined-builtin
# pylint: disable=protected-access
# pylint: disable=unused-argument
import os
from path import path
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "common/djangoapps")
sys.path.append(root / "common/lib")
sys.path.append(root / "common/lib/capa")
sys.path.append(root / "common/lib/chem")
sys.path.append(root / "common/lib/sandbox-packages")
sys.path.append(root / "common/lib/xmodule")
sys.path.append(root / "common/lib/opaque_keys")
sys.path.append(root / "lms/djangoapps")
sys.path.append(root / "lms/lib")
sys.path.append(root / "cms/djangoapps")
sys.path.append(root / "cms/lib")
sys.path.insert(
0,
os.path.abspath(
os.path.normpath(
os.path.dirname(__file__) + '/../../../'
)
)
)
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform Developer Documentation'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
***********************************
So You Want to Deploy a New Service
***********************************
Intro
=====
This page is a work-in-progress aimed at capturing all the details needed to
deploy a new service in the edX environment.
Considerations
==============
What Does Your Service Do
-------------------------
Understanding how your service works and what it does helps Ops support
the service in production.
Sizing and Resource Profile
---------------------------
What class of machine does your service require. What resources are most
likely to be bottlenecks for your service, CPU, memory, bandwidth, something else?
Customers
---------
Who will be consuming your service? What is the anticipated initial usage?
What factors will cause usage to grow? How many users can your service support?
Code
----
What repository or repositories does your service require.
Will your service be deployed from a non-public repo?
Ideally your service should follow the same release management process as the LMS.
This is documented in the wiki, so please ensure you understand that process in depth.
Was the service code reviewed?
Settings
--------
How does your service read in environment specific settings? Were all
hard-coded references to values that should be settings, e.g., database URLs
and credentials, message queue endpoints, etc., found and resolved during
code review?
License
-------
Is the license included in the repo?
How does your service run
-------------------------
Is it HTTP based? Does it run periodically? Both?
Persistence
-----------
Ops will need to know the following things:
* What persistence needs does you service have
* Will it connect to an existing database?
* Will it connect to Mongo
* What are the least permissive permissions your service needs to do its job.
Logging
-------
It's important that your application logging in built out to provide sufficient
feedback for problem determination as well as ensuring that it is operating as
desired. It's also important that your service log using our deployment
standards, i.e., logs vs syslog in deployment environments and utilizes the
standard log format for syslog. Can the logs be consumed by Splunk? They
should not be if they contain data discussed in the Data Security section below.
Metrics
-------
What are the key metrics for your application? Concurrent users?
Transactions per second? Ideally you should create a DataDog view that
captures the key metrics for your service and provided an instant gauge of
overally service health.
Messaging
---------
Does your service need to access a message queue.
Email
-----
Does your service need to send email
Access to Other Service
-----------------------
Does your service need access to other service either within or
outside of the edX environment. Some example might be, the comment service,
the LMS, YouTube, s3 buckets, etc.
Service Monitoring
------------------
Your service should have a facility for remote monitoring that has the
following characteristics:
* It should exercise all the components that your service requires to run successfully.
* It should be necessary and sufficient for ensuring your service is healthy.
* It should be secure.
* It should not open your service to DDOS attacks.
Fault Tolerance and Scalability
-------------------------------
How can your application be deployed to ensure that it is fault tolerant
and scalable?
Network Access
--------------
From where should your service be accessible.
Data Security
-------------
Will your application be storing or handling data in any of the
following categories:
* Personally Identifiable Information in General, e.g., user's email addresses.
* Tracking log data
* edX confidential data
Testing
-------
Has your service been load tested? What there the details of the test.
What determinations can we make regarding when we will need to scale if usage
trend upward? How can ops exercise your service in order to tests end-to-end
integration. We love no-op-able tasks.
Additional Requirements
-----------------------
Anything else we should know about.
*******************************************
Common
*******************************************
.. module:: common.djangoapps
Student
=======
.. automodule:: student
:members:
:show-inheritance:
Models
------
.. automodule:: student.models
:members:
:show-inheritance:
Views
-----
.. automodule:: student.views
:members:
:show-inheritance:
Admin
-----
.. automodule:: student.admin
:members:
:show-inheritance:
Tests
-----
.. automodule:: student.tests
:members:
:show-inheritance:
Management
----------
.. automodule:: student.management
:members:
:show-inheritance:
Migrations
----------
.. automodule:: student.migrations
:members:
:show-inheritance:
\ No newline at end of file
Django applications
===============================
Contents:
.. toctree::
:maxdepth: 2
lms.rst
cms.rst
djangoapps-common.rst
\ No newline at end of file
*******************************************
Content experiments
*******************************************
This is a brief overview of the support for content experiments in the platform.
For now, there is only one type of experiment: content split testing. This lets course authors define an experiment with several *experimental conditions*, add xblocks that reference that experiment in various places in the course, and specify what content students in each experimental condition should see. The LMS provides a way to randomly assign students to experimental conditions for each experiment, so that they see the right content at runtime.
Experimental conditions are essentially just a set of groups to partition users into. This may be useful to other non-experiment uses, so the implementation is done via a generic UserPartition interface. Copying the doc string, a UserPartition is:
A named way to partition users into groups, primarily intended for running
experiments. It is expected that each user will be in at most one group in a
partition.
A Partition has an id, name, description, and a list of groups.
The id is intended to be unique within the context where these are used. (e.g. for
partitions of users within a course, the ids should be unique per-course)
There is an XModule helper library ``partitions_service`` that helps manage user partitions from XBlocks (at the moment just from the split_test module). It provides an interface to store and retrieve the groups a user is in for particular partitions.
User assignments to particular groups within a partition must be persisted. This is done via a User Info service provided by the XBlock runtime, which exposes a generic user tagging interface, allowing storing key-value pairs for the user scoped to a particular course.
UserPartitions are configured at the course level (makes sense in Studio, for author context, and there's no XBlock scope to store per-course configuration state), and currently exposed via the LMS XBlock runtime as ``runtime.user_partitions``.
More details on the components below.
User metadata service
---------------------
Goals: provide a standard way to store information about users, to be used e.g. by XBlocks, and make that information easily accessible when looking at analytics.
When the course context is added to the analytics events, it should add the user's course-specific tags as well.
When the users global context is added to analytics events, it should add the user's global tags.
We have a ``user_api`` app, which has REST interface to "User Preferences" for global preferences, and now a ``user_service.py`` interface that exposes per-course tags, with string keys (<=255 chars) and arbitrary string values. The intention is that the values are fairly short, as they will be included in all analytics events about this user.
The XBlock runtime includes a ``UserServiceInterface`` mixin that provides access to this interface, automatically filling in the current user and course context. This means that with the current design, an XBlock can't access tags for other users or from other courses.
To avoid name collisions in the keys, we rely on convention. e.g. the XBlock partition service uses ``'xblock.partition_service.partition_{0}'.format(user_partition.id)``.
Where the code is:
----------------
common:
- partitions library--defines UserPartitions, provides partitions_service API.
- split_test_module -- a block that has one child per experimental condition (could be a vertical or other container with more blocks inside), and config specifying which child corresponds to which condition.
- course_module -- a course has a list of UserPartitions, each of which specifies the set of groups to divide users into.
LMS:
- runtime--LmsUserPartitions, UserServiceMixin mixins. Provides a way for the partition_service to get the list of UserPartitions defined in a course, and get/set per-user tags within a course scope.
- user_api app -- provides persistence for the user tags.
Things to watch out for (some not implemented yet):
-------------------------------------------
- grade export needs to be smarter, because different students can see different graded things
- grading needs to only grade the children that a particular student sees (so if there are problems in both conditions in a split_test, any student would see only one set)
- ui -- icons in sequences need to be passed through
- tooltips need to be passed through
- author changes post-release: conditions can be added or deleted after an experiment is live. This is usually a bad idea, but can be useful, so it's allowed. Need to handle all the cases.
- analytics logging needs to log all the user tags (if we really think it's a good idea). We'll probably want to cache the tags in memory for the duration of the request, being careful that they may change as the request is processed.
- need to add a "hiding" interface to XBlocks that verticals, sequentials, and courses understand, to hide children that set it. Then give the split test module a way to say that particular condition should be empty and hidden, and pass that up.
- staff view should show all the conditions, clearly marked
Things to test:
- randomization
- persistence
- correlation between test that use the same groups
- non-correlation between tests that use different groups
.. _Options for Extending the edX Platform:
##########################################
Options for Extending the edX Platform
##########################################
There are several options for extending the Open edX Platform to provide useful
and innovative educational content in your courses.
This section of the developers' documentation lists and explains the different ways to extend the platform, starting with the following table. Click the name of the extension type in the column header for more information.
.. list-table::
:widths: 10 10 10 10 10 10
:header-rows: 1
* -
- :ref:`Custom JavaScript Applications`
- LTI
- External Graders
- XBlocks
- Platform Customization
* - Development Cost
- Low
- Low
- Medium
- Medium
- High
* - Language
- JavaScript
- Any
- Any
- Python
- Python
* - Development Environment Needed
- No
- No
- Yes
- Yes
- Yes
* - Self-hosting Needed
- No
- Yes
- Yes
- No
- No
* - Need edX Involvement
- No
- No
- Yes
- Yes
- Yes
* - Clean UI Integration
- Yes
- No (see LTI)
- Yes
- Yes
- Yes
* - Mobile enabled
- Possibly
- Possibly
- Yes
- Yes
- Yes
* - Server Side Grading
- Possibly (See JavaScript)
- Yes
- Yes
- Yes
- Yes
* - Usage Data
- No (See JavaScript)
- No
- Limited
- Yes
- Yes
* - Provision in Studio
- No
- No
- No
- Yes
- No
* - Privacy Loss Compared to Hosting Open edX
- No
- Possibly
- Possibly
- No
- No
###########################
Extending the edX Platform
###########################
.. toctree::
:maxdepth: 2
extending.rst
javascript
js_template_example
\ No newline at end of file
.. _The Custom JavaScript Display and Grading Example Template:
###########################################################
The Custom JavaScript Display and Grading Example Template
###########################################################
As referred to in `course staff documentation <http://edx.readthedocs.org/projects/ca/en/latest/problems_tools/advanced_problems.html#custom-javascript-display-and-grading>`_, there is a built-in template in edX Studio that uses a sample JavaScript application.
This sample application has students select two different shapes, a cone and a
cube. The correct state is when the cone is selected and the cube is not
selected:
.. image:: ../images/JavaScriptInputExample.png
:alt: Image of the sample JavaScript application, with the cone selected
You can `download files for that application <http://files.edx.org/JSInput.zip>`_.
You must upload these files in Studio to use them in a problem.
The following information steps through this example to demonstrate how to apply
the guidelines in `Custom JavaScript Display and Grading`.
****************************
Example getState() Function
****************************
In the example, the ``state`` variable is initialized for the cylinder and cube
in the ``WebGLDemo.js`` file:
.. code-block:: javascript
var state = {
'selectedObjects': {
'cylinder': false,
'cube': false
}
}
User interactions toggle the ``state`` values of the cylinder and cube between
``true`` and ``false``.
The ``getState()`` function in the sample application returns the state as a
JSON string:
.. code-block:: javascript
function getState() {
return JSON.stringify(state);
}
******************************
Example setState() Function
******************************
In the example, when a student clicks **Check**, the ``state`` variable is saved
so that the student can later return to the application and find it in the same
state:
.. code-block:: javascript
function setState() {
stateStr = arguments.length === 1 ? arguments[0] : arguments[1];
state = JSON.parse(stateStr);
updateMaterials();
}
The ``updateMaterials()`` function called by ``setState()`` updates the state of
the cylinder and cone with the user's current selections:
.. code-block:: javascript
function updateMaterials() {
if (state.selectedObjects.cylinder) {
cylinder.material = selectedMaterial;
}
else {
cylinder.material = unselectedMaterial;
}
if (state.selectedObjects.cube) {
cube.material = selectedMaterial;
}
else {
cube.material = unselectedMaterial;
}
}
******************************
Example getGrade() function
******************************
In the example, when a student clicks **Check**, the ``getGrade()`` function in
returns the selected objects:
.. code-block:: javascript
function getGrade() {
return JSON.stringify(state['selectedObjects']);
}
The returned JSON string is then used by the Python code defined in the problem
to determine if correct objects were selected or not, and to return a result.
*******************************
Grade the Student Response
*******************************
The following is the Python function ``vglcfn`` in the sample application:
.. code-block:: python
<script type="loncapa/python">
import json
def vglcfn(e, ans):
'''
par is a dictionary containing two keys, "answer" and "state"
The value of answer is the JSON string returned by getGrade
The value of state is the JSON string returned by getState
'''
par = json.loads(ans)
# We can use either the value of the answer key to grade
answer = json.loads(par["answer"])
return answer["cylinder"] and not answer["cube"]
'''
# Or we could use the value of the state key
state = json.loads(par["state"])
selectedObjects = state["selectedObjects"]
return selectedObjects["cylinder"] and not selectedObjects["cube"]
'''
</script>
The ``ans`` parameter contains the JSON string returned by ``getGrade()``. The
value is converted to a Python Unicode structure in the variable ``par``.
In the function's first option, object(s) the student selected are stored in the
``answer`` variable. If the student selected the cylinder and not the cube, the
``answer`` variable contains only ``cylinder``, and the function returns
``True``, which signifies a correct answer. Otherwise, it returns ``False`` and
the answer is incorrect.
In the function's second option, the objects' states are retrieved. If the
cylinder is selected and not the cube, the function returns ``True``, which
signifies a correct answer. Otherwise, it returns ``False`` and the answer is
incorrect.
*******************************
XML Problem Structure
*******************************
The XML problem for the sample template is:
.. code-block:: xml
<problem display_name="webGLDemo">
<script type="loncapa/python">
import json
def vglcfn(e, ans):
'''
par is a dictionary containing two keys, "answer" and "state"
The value of answer is the JSON string returned by getGrade
The value of state is the JSON string returned by getState
'''
par = json.loads(ans)
# We can use either the value of the answer key to grade
answer = json.loads(par["answer"])
return answer["cylinder"] and not answer["cube"]
'''
# Or we could use the value of the state key
state = json.loads(par["state"])
selectedObjects = state["selectedObjects"]
return selectedObjects["cylinder"] and not selectedObjects["cube"]
'''
</script>
<p>
The shapes below can be selected (yellow) or unselected (cyan).
Clicking on them repeatedly will cycle through these two states.
</p>
<p>
If the cone is selected (and not the cube), a correct answer will be
generated after pressing "Check". Clicking on either "Check" or "Save"
will register the current state.
</p>
<customresponse cfn="vglcfn">
<jsinput gradefn="WebGLDemo.getGrade"
get_statefn="WebGLDemo.getState"
set_statefn="WebGLDemo.setState"
width="400"
height="400"
html_file="https://studio.edx.org/c4x/edX/DemoX/asset/webGLDemo.html"
sop="false"/>
</customresponse>
</problem>
\ No newline at end of file
.. EdX Dev documentation master file, created by
sphinx-quickstart on Fri Nov 2 15:43:00 2012.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
edX Developer's Guide
===================================
Contents:
.. this is wildly disorganized, and is basically just a dumping ground for
.rst files at the moment.
.. toctree::
:maxdepth: 2
change_log
preface.rst
browsers.rst
overview.rst
extending_platform/index
modulestores/index
xblocks.rst
pavelib.rst
public_sandboxes.rst
analytics.rst
process/index
testing/index
code-considerations
deploy-new-service
APIs
-----
.. toctree::
:maxdepth: 2
djangoapps.rst
common-lib.rst
experiments.rst
Internationalization
---------------------
.. toctree::
:maxdepth: 2
i18n.rst
i18n_translators_guide.rst
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
*******************************************
LMS module
*******************************************
.. module:: lms
Branding
========
.. automodule:: branding
:members:
:show-inheritance:
Views
-----
.. automodule:: branding.views
:members:
:show-inheritance:
Certificates
============
.. automodule:: certificates
:members:
:show-inheritance:
Models
------
.. automodule:: certificates.models
:members:
:show-inheritance:
Views
-----
.. automodule:: certificates.views
:members:
:show-inheritance:
Circuit
=======
.. automodule:: circuit
:members:
:show-inheritance:
Models
------
.. automodule:: circuit.models
:members:
:show-inheritance:
Views
-----
.. automodule:: circuit.views
:members:
:show-inheritance:
Course_wiki
===========
.. automodule:: course_wiki
:members:
:show-inheritance:
Course Nav
----------
.. .. automodule:: course_wiki.course_nav
.. :members:
.. :show-inheritance:
Views
-----
.. automodule:: course_wiki.views
:members:
:show-inheritance:
Editors
-------
.. automodule:: course_wiki.editors
:members:
:show-inheritance:
Courseware
==========
.. automodule:: courseware
:members:
:show-inheritance:
Access
------
.. automodule:: courseware.access
:members:
:show-inheritance:
Admin
-----
.. automodule:: courseware.admin
:members:
:show-inheritance:
Courses
-------
.. automodule:: courseware.courses
:members:
:show-inheritance:
Grades
------
.. automodule:: courseware.grades
:members:
:show-inheritance:
Models
------
.. automodule:: courseware.models
:members:
:show-inheritance:
Progress
--------
.. automodule:: courseware.progress
:members:
:show-inheritance:
Tabs
----
.. automodule:: courseware.tabs
:members:
:show-inheritance:
Dashboard
=========
.. automodule:: dashboard
:members:
:show-inheritance:
Models
------
.. automodule:: dashboard.models
:members:
:show-inheritance:
Views
-----
.. automodule:: dashboard.views
:members:
:show-inheritance:
Django comment client
=====================
.. automodule:: django_comment_client
:members:
:show-inheritance:
Models
------
.. automodule:: django_comment_client.models
:members:
:show-inheritance:
Heartbeat
=========
.. automodule:: heartbeat
:members:
:show-inheritance:
Instructor
==========
.. automodule:: instructor
:members:
:show-inheritance:
Views
-----
.. automodule:: instructor.views
:members:
:show-inheritance:
Lisenses
========
.. automodule:: licenses
:members:
:show-inheritance:
Models
------
.. automodule:: licenses.models
:members:
:show-inheritance:
Views
-----
.. automodule:: licenses.views
:members:
:show-inheritance:
LMS migration
=============
.. automodule:: lms_migration
:members:
:show-inheritance:
Migration
---------
.. automodule:: lms_migration.migrate
:members:
:show-inheritance:
Multicourse
===========
.. automodule:: multicourse
:members:
:show-inheritance:
Psychometrics
=============
.. automodule:: psychometrics
:members:
:show-inheritance:
Models
------
.. automodule:: psychometrics.models
:members:
:show-inheritance:
Admin
-----
.. automodule:: psychometrics.admin
:members:
:show-inheritance:
Psychoanalyze
-------------
.. automodule:: psychometrics.psychoanalyze
:members:
:show-inheritance:
Static template view
====================
.. automodule:: static_template_view
:members:
:show-inheritance:
Views
-----
.. automodule:: static_template_view.views
:members:
:show-inheritance:
Static book
===========
.. automodule:: staticbook
:members:
:show-inheritance:
Models
------
.. automodule:: staticbook.models
:members:
:show-inheritance:
Views
-----
.. automodule:: staticbook.views
:members:
:show-inheritance:
*******************************************
Xmodule.Modulestore
*******************************************
.. contents:: Table of Contents
.. automodule:: xmodule.modulestore
:members:
:show-inheritance:
Modulestores
============
These implement the :class:`.ModuleStoreRead` and :class:`.ModuleStoreWrite`
to provide access to XBlock content.
.. automodule:: xmodule.modulestore.xml
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.mongo
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.split_mongo
:members:
:show-inheritance:
Modulestore Helpers
-------------------
These packages provide utilities for easier use of modulestores,
and migrating data between modulestores.
.. automodule:: xmodule.modulestore.search
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.mongoengine_fields
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.split_migrator
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.store_utilities
:members:
:show-inheritance:
Xml Import/Export
=================
These packages focus on importing and exporting xml serialized
course content to/from modulestores.
.. automodule:: xmodule.modulestore.xml_exporter
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.xml_importer
:members:
:show-inheritance:
Miscellaneous
=============
.. automodule:: xmodule.modulestore.django
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.exceptions
:members:
:show-inheritance:
.. automodule:: xmodule.modulestore.inheritance
:members:
:show-inheritance:
.. _edX Modulestores:
###########################
edX Modulestores
###########################
.. toctree::
:maxdepth: 2
overview
mixedmodulestore
split-mongo
\ No newline at end of file
#################
MixedModuleStore
#################
MixedModuleStore provides a common API for all modulestore functions.
In addition, MixedModuleStore allows you to select which modulestore a
specific course is stored in (XMLModuleStore, DraftModuleStore, Split Mongo)
and routes requests for that course to the correct modulestore.
MixedModuleStore can also handle some conversions from one modulestore to
another.
\ No newline at end of file
#################################
Overview of the edX Modulestores
#################################
The edX Platform uses several different modulestores to store course data. Each
of these modulestores is in use on edx.org.
See:
* `XMLModuleStore`_
* `DraftModuleStore`_
* :ref:`Split Mongo Modulestore`
***************
XMLModuleStore
***************
The XMLModuleStore was the first modulestore used for the edX Platform.
XMLModuleStore uses a file system that stores XML-based courses. When the LMS
server starts, XMLModuleStore loads every block for every course into memory.
XMLModuleStore is read-only and does not enable users to change a course
without restarting the server.
*****************
DraftModuleStore
*****************
DraftModuleStore was the next generation modulestore and provides greater
scalability by allowing random access to course blocks and loading blocks on
requests.
DraftModuleStore allows editing of courses without restarting the server.
In addition, DraftModuleStore stores a draft version of some types of blocks.
*****************
Split Mongo
*****************
Split Mongo is the newest modulestore. See the :ref:`Split Mongo Modulestore`
chapter for more information.
\ No newline at end of file
.. _Split Mongo Modulestore:
############################
Split Mongo Modulestore
############################
See:
* `Overview`_
* `Split Mongo Data Model`_
* `Split Mongo Capabilities`_
************************
Overview
************************
*Split Mongo* is the term used for the new edX modulestore. Split Mongo is
built on mongoDB. For information about mongoDB, see the `mongoDB website`_.
The "split" in Split Mongo refers to how a course is split into three types of
information:
* The course identity, referred to as the course index
* The course structure
* The course content, referred to as XBlock definitions.
This separation of identity, structure, and content enables course authors to
use more advanced capabilities when developing and managing courses.
.. _mongoDB website: http://www.mongodb.org
************************
Split Mongo Data Model
************************
In the Split Mongo data model, edX courses are split into three collections:
* `Course Index`_
* `Course Structures`_
* `XBlock Definitions`_
=============
Course Index
=============
The course index is a dictionary that stores course IDs. Each course ID points
to a course structure.
The course index supports multiple branches of a course. The index can store
multiple entries for a course ID, with each entry pointing to a different
course structure that corresponds to a different branch.
As currently implemented, for each course, there is a branch for both the
published and draft versions of the course. The published and draft branches of
the course point to different structures.
In the edX Platform:
* Students using the LMS see and interact with the published version of the
course.
* Course staff using edX Studio make changes to the draft version of the
course.
* When the user changes a the course outline, display names, the course
about page, course updates, other course pages, sections or subsections,
the draft branch is automatically published; that is, it becomes the
published branch.
* For units and components, changes are saved in the draft branch. The user
must publish the unit to change the draft branch to the published branch.
When the user begins another set of changes, the draft branch is updated.
Course Reruns
**************
The edX Platform enables you to rerun a course. When you rerun a course, a new
course index is created. The new course index points to the same course
structure as the original course index.
==========================
Course Structures
==========================
The course structure defines, or outlines, the content of a course.
A course structure is made up of blocks in a tree data structure. Blocks are
objects in a course, such as the course itself, sections, subsections, and
units. A block can reference other blocks; for example, a section references
one or more subsections. Each block has a unique ID that is generated by the
edX Platform.
Each block in the course structure points to an XBlock definition. Different
blocks, in the same or in different structures, can point to the same
definition.
Course structures, and each block within a structure, are versioned. That is,
when a course author changes a course, or a block in the course, a new course
structure is saved; the previous course structure, and previous versions of
blocks within the structure, remain in the database and are not modified.
==========================
XBlock Definitions
==========================
XBlock definitions contain the content of each block. For some blocks, such as
sections and subsections, the definition consists of the block's display name.
For components, such as HTML or problem components, the definition also
contains the content of the object. A definition can be referenced by multiple
blocks.
XBlock definitions are versioned. That is, when a course author changes
content, a new XBlock definition for that object is saved; the previous
definition remains in the database and is not modified.
************************
Split Mongo Capabilities
************************
The Split Mongo data model enables the edX Platform to implement advanced
content management functionality. Specifically, Split Mongo is designed to
enable:
* `Multiple Course Branches`_
* `Versioning`_
* `Content Reuse`_
While these capabilities are not fully implemented in the edX Platform, Split
Mongo is designed to allow future enhancements that enable these content
management capabilities.
========================
Multiple Course Branches
========================
Split Mongo enables multiple branches of a course. The `course index <Course
Index`>_ can have multiple entries for a course ID, each of which points to a
different structure.
The edX Platform currently uses a draft and a published branch for a course.
Future enhancements may use other branches.
============
Versioning
============
In Split Mongo, every change to a course or a block within the course is saved,
with the time and user recorded.
Versioning enables future enhancements such as allowing course authors to
revert a course or block to a previous version.
==============
Content Reuse
==============
By using pointers to reference XBlock definitions from `course structures
<Course Structures>`_, Split Mongo enables content reuse. A single `XBlock
definition <XBlock Definition>`_ can be referenced from multiple course
structures.
Future enhancements to the edX Platform can allow course authors to reuse an
XBlock in multiple contexts, streamlining course development and maintenance.
\ No newline at end of file
*******************************************
OpaqueKeys
*******************************************
.. module:: opaque_keys
OpaqueKeys
==========
.. automodule:: opaque_keys
:members:
:show-inheritance:
\ No newline at end of file
*******************************************
Overview
*******************************************
This is EdX Dev documentation, mainly extracted from docstrings.
Autogenerated by Sphinx from python code.
Soon support for JS will be impemented.
*******************************************
Paver
*******************************************
Paver provides a standardised way of managing development and operational tasks in edX.
To run individual commands, use the following syntax:
paver <command_name> --option=<option value>
Paver Commands
*******************************************
Paver commands are grouped as follows:
- Prereqs_ Install all of the prerequisite environments for Python, Node and Ruby
- Docs_ Docs is used to build and then optionally display the EdX docs relating to development, authoring and data management
- Assets_ Assets will compile Sass (CSS), Coffeescript (Javascript) and XModule assets. Optionally it can call Django’s collectstatic method
- `Run Servers`_ Run servers
.. _Prereqs:
Prereqs
=============
Install all of the prerequisite for Python, Node and Ruby
**install_prereqs** : installs Ruby, Node and Python requirements
::
paver install_prereqs
..
.. _Docs:
Docs
=============
Docs is used to build and then optionally display the EdX docs relating to development, authoring and data management
**build_docs**: Invoke sphinx 'make build' to generate docs.
*--type=* <dev, author, data> Type of docs to compile
*--verbose* Display verbose output
::
paver build_docs --type=dev --verbose
..
.. _Assets:
Assets
=============
Assets will compile Sass (CSS), CoffeeScript (Javascript) and XModule assets. Optionally it can call Django's collectstatic command.
**update_assets**: Compiles Coffeescript, Sass, Xmodule and runs collectstatic
*system* lms or studio
*--settings=* Django settings e.g. aws, dev, devstack (the default)
*--debug* Disable Sass compression
*--skip-collect* Skip collection of static assets
::
paver update_assets lms
..
.. _Run Servers:
Run Servers
=============
**lms**: runs LMS server
*--settings=* Django settings e.g. aws, dev, devstack (the default)
*--fast* Skip updating assets
::
paver lms --settings=dev
..
**studio**: runs Studio
*--settings=* Django settings e.g. aws, dev, devstack (the default)
*--fast* Skip updating assets
::
paver studio --settings=dev
..
**run_all_servers**: runs lms, cms and celery workers
*--settings=* Django settings e.g. aws, dev, devstack (the default)
*--worker_settings=* Django settings for celery workers
::
paver run_all_servers --settings=dev --worker_settings=celery
..
**run_celery**: runs celery for specified system
*--settings=* Environment settings e.g. aws, dev both for LMS and Studio
*--settings_lms=* Override django settings for LMS e.g. lms.dev
*--settings_cms=* Override django settings for Studio
::
paver celery --settings=dev
..
**update_db**: runs syncdb and then migrate
*--settings=* Django settings e.g. aws, dev, devstack (the default)
::
paver update_db --settings=dev
..
**check_settings**: checks settings files
*system*: System to check (lms or studio)
*settings*: Django settings to check.
::
paver check_settings lms aws
..
.. include:: ../../shared/preface.rst
\ No newline at end of file
*****************
Community Manager
*****************
Community managers handle the first part of the process of responding to pull
requests, before they are reviewed by core committers. Community managers are
responsible for monitoring the Github project so that they are aware of incoming
pull requests. For each pull request, a community manager should:
#. Read the description of the pull request to understand the idea behind it
and what parts of the code it impacts. If the description is absent or
unclear, inform the author that the pull request cannot be reviewed until
the description is clearer. Guide them to the :doc:`pull request cover letter <cover-letter>`
guidelines.
#. Help the product team evaluate the idea behind the pull request.
Is this something that Open edX wants? If you and the
product owner(s) all believe that Open edX does not want this pull request,
add a comment to the pull request explaining the reasoning behind that
decision. Be polite, and remind them that they are welcome to fork the code
and run their own fork on their own servers, without needing permission
from edX. Try to suggest ways that they can build something that Open edX
*does* want: for example, perhaps an API that would allow the contributor
to build their own component separately. Then close the pull request.
#. Check that the author of the pull requests has submitted a
`contributor's agreement`_, added name to AUTHORS file, and any other
necessary administrivia (our bot will make an automated comment if this is not
properly in place). If not, inform author of problems and wait for them to fix it.
#. Once you’ve verified that the code change is not malicious,
run a Jenkins job on the pull request and check the result.
If there are failing tests (and they are real failures, not flaky tests),
inform the author that the pull request cannot be reviewed until the tests
are passing.
#. When all the tests pass, check the diff coverage and diff quality.
If they are too low, inform the author of how to check these metrics,
and ask the author to write unit tests to increase coverage and quality
Diff quality should be 100%, and diff coverage should be at least 95% unless
there are exceptional circumstances.
#. Skim the contents of the pull request and suggest obvious fixes/improvements
to the pull request. Note that this is *not* a thorough code review --
this is simply to catch obvious issues and low-hanging fruit.
The point is to avoid interrupting core committers for trivial issues.
#. Ask the author of the pull request for a test plan:
once this code is merged, how can we test that it’s working properly?
Whichever core committer merges this pull request will need to test it
on a staging server before the code is deployed to production, so be sure
that the test plan is clear enough for a core committer to follow.
#. If the PR includes any visual changes, or changes in user interaction,
ask the author of the pull request to provide some screenshots.
(For interaction changes, GIFs are awesome!) When a core committer starts
reviewing the changes, it is often helpful to deploy the pull request to a
sandbox server, so that the reviewer can click around and verify that the
changes look good.
#. The core committers will put together a style guide.
Pull requests that have visual/UX changes will be expected to respect this
style guide -- if they don’t, point the author to the style guide and tell
them to resubmit the pull request when it does.
.. _contributor's agreement: http://code.edx.org/individual-contributor-agreement.pdf
At this point, the pull request is ready for code review. There are two
different options: small PR review and large PR review. A PR is “small” if it
can be read and understood in less than 15 minutes, including time spent
context-switching, reading the description of the pull request, reading any
necessary code context, etc. Typically, “small” PRs consist of fixing typos,
improving documentation, adding comments, changing strings to unicode, marking
strings that need to be translated, adding tests, and other chores. A “small”
pull request doesn’t modify the code that will be run in production in any
meaningful way.
If the pull request is small, it can be reviewed immediately. If the community
manager that is handling this pull request feels comfortable doing the code
review, then he or she should do so rather than handing it off to a core
committer. If not, he or she should move the JIRA ticket for the PR review
into the "Awaiting Prioritization" state and add enough detail on the ticket for
the product team to understand the size and scope of the changes.
Inform the author that it might take a few days for the engineering team to review the PR.
If the pull request is not small, it will be handled by the full pull request process:
.. image:: pr-process.png
:align: center
:alt: A visualization of the pull request process
The community manager should:
* Make sure the pull request is ready for Product Review, if that has not yet happened.
That means getting enough detail out of the contributor for the product owner
to properly do a product review. Once this is done, move the JIRA ticket to the
"Product Review" state.
* If questions arise from product owners during review, work with the contributor to
get those questions answered before the next round of review.
* Once a PR has passed product review, do a first-round review of the PR with the
contributor. That is, make sure quality and test coverage is up to par, and that
the code generally meets our style guidelines. Once this has happened, move the
ticket to the "Awaiting Prioritization" state.
* At each of these junctures, try to update the author with an estimate of how long
the next steps will take. The product team will meet biweekly to review new
proposals and prioritize PRs for team review. Direct the contributor to the JIRA ticket
as well; the state of the JIRA ticket reflect the above diagram and can give a good
sense of where in the process the pull request is.
* Once a PR has been prioritized for team review, ask the product owner for an estimate
of how many sprints it will take for the pull request to be reviewed:
if its more than one, try to push back and advocate for the contributor.
However, the estimate is ultimately up to the product owner, and if he/she
says it will really be more than one sprint, respect that.
* Add a comment to the pull request and inform the author that the pull request
is queued to be reviewed. Give them an estimate of when the pull request
will be reviewed: if you’re not sure what to say, tell them it will be in
two weeks. If the product owner has estimated that it will take more than
one sprint before the pull request can be reviewed, direct the contributor to
JIRA to monitor progress.
For determining which teams that the pull request impacts, use common sense --
but in addition, there are a few guidelines:
* If any SASS files are modified, or any HTML in templates,
include the UX (user experience) team.
* If any settings files or requirements files are modified,
include the devops team.
* If any XModules are modified,
include the blades team.
* If any logging events are modified,
include the analytics team.
* Include the doc team on every contributor pull request that has a user-facing change.
Once the code review process has started, the community managers are also
responsible for keeping the pull request unblocked during the review process. If
a pull request has been waiting on a core committer for a few days, a community
manager should remind the core committer to re-review the pull request. If a
pull request has been waiting on a contributor for a few days, a community
manager should add a comment to the pull request, informing the contributor that
if they want the pull request merged, they need to address the review comments.
If the contributor still has not responded after a few more days, a community
manager should close the pull request. Note that if a contributor adds a comment
saying something along the lines of “I can’t do this right now, but I’ll come
back to it in X amount of time”, that’s fine, and the PR can remain open -- but
a community manager should come back after X amount of time, and if the PR still
hasn’t been addressed, he or she should warn the contributor again.
***********
Contributor
***********
Before you make a pull request, it’s a good idea to reach out to the edX
developers and the rest of the Open edX community to discuss your ideas. There
might well be someone else already working on the same change you want to make,
and it’s much better to collaborate than to submit incompatible pull requests.
You can `send an email to the mailing list`_, `chat on the IRC channel`_, or
`open an issue in our JIRA issue tracker`_. The earlier you start the
conversation, the easier it will be to make sure that everyone’s on the right
track -- before you spend a lot of time and effort making a pull request.
.. _send an email to the mailing list: https://groups.google.com/forum/#!forum/edx-code
.. _chat on the IRC channel: http://webchat.freenode.net?channels=edx-code
.. _open an issue in our JIRA issue tracker: https://openedx.atlassian.net
If you've got an idea for a new feature or new functionality for an existing feature,
and wish to contribute your code upstream, please `start a discussion on JIRA`_
(you may first need to `create a free JIRA account`_).
Do this by visiting the JIRA website and clicking the "Create" button at the top.
Choose the project "Open Source Pull Requests" and the issue type "Feature Proposal";
in the description give us as much detail as you can for the feature or functionality
you are thinking about implementing. We encourage you to do this before
you begin implementing your feature, in order to get valuable feedback from the edX
product team early on in your journey and increase the likelihood of a successful
pull request.
.. _start a discussion on JIRA: https://openedx.atlassian.net/secure/Dashboard.jspa
.. _create a free JIRA account: https://openedx.atlassian.net/admin/users/sign-up
It’s also sometimes useful to submit a pull request even before the code is
working properly, to make it easier to collect early feedback. To indicate to
others that your pull request is not yet in a functional state, just prefix the
pull request title with "(WIP)" (which stands for Work In Progress). Please do
include a link to a WIP pull request in your JIRA ticket, if you have one.
Once you’re ready to submit your changes in a pull request, check the following
list of requirements to be sure that your pull request is ready to be reviewed:
#. Prepare a :doc:`pull request cover letter <cover-letter>`. When you open
up your pull request, put your cover letter into the "Description" field on Github.
#. The code should be clear and understandable.
Comments in code, detailed docstrings, and good variable naming conventions
are expected. The `edx-platform Github wiki`_ contains many great links to
style guides for Python, Javascript, and internationalization (i18n) conventions.
#. The pull request should be as small as possible.
Each pull request should encompass only one idea: one bugfix, one feature,
etc. Multiple features (or multiple bugfixes) should not be bundled into
one pull request. A handful of small pull requests is much better than
one large pull request.
#. Structure your pull request into logical commits.
"Fixup" commits should be squashed together. The best pull requests contain
only a single, logical change -- which means only a single, logical commit.
#. All code in the pull request must be compatible with edX's AGPL license.
This means that the author of the pull request must sign a `contributor's
agreement with edX`_, and all libraries included or referenced in
the pull request must have `compatible licenses`_.
#. All of the tests must pass.
If a pull request contains a new feature, it should also contain
new tests for that feature. If the pull request fixes a bug, it should
also contain a test for that bug to be sure that it stays fixed.
(edX’s continuous integration server will verify this for your pull request,
and point out any failing tests.)
#. The author of the pull request should provide a test plan for manually verifying
the change in this pull request. The test plan should include details
of what should be checked, how to check it, and what the correct behavior
should be. When it makes sense to do so, a good test plan includes a tarball
of a small edX test course that has a unit which triggers the bug or illustrates
the new feature.
#. For pull requests that make changes to the user interface,
please include screenshots of what you changed. Github will allow
you to upload images directly from your computer.
In the future, the core committers will produce a style guide that
contains more requirements around how pages should appear and how
front-end code should be structured.
#. The pull request should contain some documentation for the feature or bugfix,
either in a README file or in a comment on the pull request.
A well-written description for the pull request may be sufficient.
#. The pull request should integrate with existing infrastructure as much
as possible, rather than reinventing the wheel. In a project as large as
Open edX, there are many foundational components that might be hard to find,
but it is important not to duplicate functionality, even if small,
that already exists.
#. The author of the pull request should be receptive to feedback and
constructive criticism.
The pull request will not be accepted until all feedback from reviewers
is addressed. Once a core committer has reviewed a pull request from a
contributor, no further review is required from the core committer until
the contributor has addressed all of the core committer’s feedback:
either making changes to the pull request, or adding another comment
explaining why the contributor has chosen not make any change
based on that feedback.
It’s also important to realize that you and the core committers may have
different ideas of what is important in the codebase. The power and freedom of
open source software comes from the fact that you can fork our software and make
any modifications that you like, without permission from us; however, the core
committers are similarly empowered and free to decide what modifications to pull
in from other contributors, and what not to pull in. While your code might work
great for you on a small installation, it might not work as well on a large
installation, have problems with performance or security, not be compatible with
internationalization or accessibility guidelines, and so on. There are many,
many reasons why the core committers may decide not to accept your pull request,
even for reasons that are unrelated to the quality of your code change. However,
if we do reject your pull request, we will explain why we aren’t taking it, and
try to suggest other ways that you can accomplish the same result in a way that
we will accept.
Once A PR is Open
-----------------
Once a pull request is open, our faithful robot "Botbro" will open up a JIRA ticket
in our system to track review of your pull request. The JIRA ticket is a way for
non-engineers (particularly, product owners) to understand your change and prioritize
your pull request for team review.
If you open up your pull request with a solid description, following the
:doc:`pull request cover letter <cover-letter>` guidelines, the product owners will be able
to quickly understand your change and prioritize it for review. However, they may have
some questions about your intention, need, and/or approach that they will ask about
on the JIRA ticket. A community manager will ping you on Github to clarify these questions if
they arise; you are not required to monitor the JIRA discussion.
Once the product team has sent your pull request to the engineering teams for review, all
technical discussion regarding your change will occur on Github, inline with your code.
Further Information
-------------------
For futher information on the pull request requirements, please see the following
links:
* :doc:`../code-considerations`
* :doc:`../testing/jenkins`
* :doc:`../testing/code-coverage`
* :doc:`../testing/code-quality`
* `Python Guidelines <https://github.com/edx/edx-platform/wiki/Python-Guidelines>`_
* `Javascript Guidelines <https://github.com/edx/edx-platform/wiki/Javascript-Guidelines>`_
.. _edx-platform Github wiki: https://github.com/edx/edx-platform/wiki#development
.. _contributor's agreement with edX: http://code.edx.org/individual-contributor-agreement.pdf
.. _compatible licenses: https://github.com/edx/edx-platform/wiki/Licensing
**************
Core Committer
**************
Core committers are responsible for doing code review on pull requests from
contributors, once the pull request has passed through a community manager and
been prioritized by a product owner. As much as possible, the code review
process should be treated identically to the process of reviewing a pull request
from another core committer: we’re all part of the same community. However,
there are a few ways that the process is different:
* The contributor cannot see when conflicts occur in the branch.
These conflicts prevent the pull request from being merged,
so you should ask the contributor to rebase their pull request,
and point them to `the documentation for doing so`_.
* Jenkins may not run on the contributor’s pull request automatically.
Be sure to start new Jenkins jobs for the PR as necessary -- do not approve
a pull request unless Jenkins has run, and passed, on the last commit
in the pull request. If this contributor has already contributed a few
good pull requests, that contributor can be added to the Jenkins whitelist,
so that jobs are run automatically.
* The contributor may not respond to comments in a timely manner.
This is not your concern: you can move on to other things while waiting.
If there is no response after a few days, a community manager will warn the
contributor that if the comments are not addressed, the pull request will
be closed. (You can also warn the contributor yourself, if you wish.)
Do not close the pull request merely because the contributor hasn’t responded
-- if you think the pull request should be closed, inform the
community managers, and they will handle it.
.. _the documentation for doing so: https://github.com/edx/edx-platform/wiki/How-to-Rebase-a-Pull-Request
Each Scrum team should decide for themselves how to estimate stories related to
reviewing external pull requests, and how to claim points for those stories,
keeping in mind that an unresponsive contributor may block the story in ways
that the team can’t control. When deciding how many contributor pull request
reviews to commit to in the upcoming iteration, teams should plan to spend about
two hours per week per developer on the team -- larger teams can plan to spend
more time than smaller teams. For example, a team with two developers should plan
to spend about four hours per week on pull request review, while a team with
four developers should plan to spend about eight hours per week on pull request
review -- these hours can be spread out among multiple developers, or one
developer can do all the review for the whole team in that iteration.
However, this is just a guideline: the teams can decide for themselves how
many contributor pull request reviews they want to commit to.
Once a pull request from a contributor passes all required code reviews, a core
committer will need to merge the pull request into the project. The core
committer who merges the pull request will be responsible for verifying those
changes on the staging server prior to release, using the manual test plan provided
by the author of the pull request.
In addition to reviewing contributor requests as part of sprint work, core
committers should expect to spend about one hour per week doing other tasks
related to the open source community: reading/responding to questions on the
mailing list and/or IRC channel, disseminating information about what edX is
working on, and so on.
Review Comments Terminology
---------------------------
In order to expedite the review process and to have a clear and mutual understanding
between reviewers and contributors, the following terminology is strongly suggested
when submitting comments on a PR:
* **Must** - A comment of type "Must" indicates the reviewer feels strongly about
their requested change to the code and feels the PR should not be merged unless
their concern is satisfactorily addressed.
* **Opt(ional)** - A comment of type "Optional" indicates the reviewer strongly
favors their suggestion, but may be agreeable to the current behavior, especially
with a persuasive response.
* **Nit(pick)** - A comment of type "Nitpick" indicates the reviewer has a minor
criticism that *may* not be critical to address, but considers important to share
in the given context. Contributors should still seriously consider and weigh these
nits and address them in the spirit of maintaining high quality code.
* **FYI** - A comment of type "FYI" is a related side comment that is
informative, but with the intention of having no required immediate action.
As an example, the following PR comment is clearly categorized as Optional:
``"Optional: Consider reducing the high degree of connascense in this code by using
keyword arguments."``
**Note:** Unless stated or implied otherwise, all comments are assumed to be of type
"Must".
**Note 2:** It is possible that after further discussion and review, the reviewer
chooses to amend their comment, thereby changing its severity to be higher or
lower than what was originally set.
*************************
Pull Request Cover Letter
*************************
When opening up a pull request, please prepare a "cover letter" to place into
the "Description" field on Github. A good cover letter concisely answers as
many of the following questions as possible. Not all pull requests will have
answers to every one of these questions, which is okay!
* What JIRA ticket does this address (if any)? Please provide a link to the JIRA ticket
representing the bug you are fixing or the feature discussion you've already
had with the edX product owners.
* Who have you talked to at edX about this work? Design, architecture, previous PRs,
course project manager, IRC, mailing list, etc. Please include links to relevant
discussions.
* Why do you need this change? It's important for us to understand what problem your
change is trying to solve, so please describe fully why you feel this change is needed.
* What components are affected? (LMS, Studio, a specific app in the system, etc)
* What users are affected? For example, is this a new component intended for use
in just one course, or is this a system wide change affecting all edX students?
* Test instructions for manual testing. When it makes sense to do so, a good test
plan includes a tarball of a small test course that has a unit which triggers
the bug or illustrates the new feature. Another option would be to provide
explicit, numbered steps (ideally with screenshots!) to walk the reviewer
through your feature or fix.
* Please provide screenshots for all user-facing changes.
* Indicate the urgency of your request. If this is a pull request for a course
running or about to run on edx.org, we need to understand your time constraints.
Good pieces of information to provide are the course(s) that need this feature
and the date that the feature needed by.
* What are your concerns (the author’s) about the PR? Is there a corner case you
don't know how to address or some tests you aren't sure how to add? Please bring
these concerns up in your cover letter so we can help!
Example Of A Good PR Cover Letter
---------------------------------
`Pull Request 4675`_ is one of the first edX pull requests to include a cover
letter, and it is great! It clearly explains what the bug is, what system is
affected (just the LMS), includes a tarball of a course that demonstrates the
issue, and provides clear manual testing instructions.
`Pull Request 4983`_ is another great example. This pull request's cover letter
includes before and after screenshots, so the UX team can quickly understand
what changes were made and make suggestions. Further, the pull request indicates
how to manually test the feature and what date it is needed by.
.. _Pull Request 4675: https://github.com/edx/edx-platform/pull/4675
.. _Pull Request 4983: https://github.com/edx/edx-platform/pull/4983
.. _Contributing to Open edX:
###########################
Contributing to Open edX
###########################
.. toctree::
:maxdepth: 2
overview
contributor
cover-letter
community-manager
product-owner
core-committer
*****************************
Process for Contributing Code
*****************************
Open edX is a massive project, and we would love you to help us build
the best online education system in the world -- we can’t do it alone!
However, the core committers on the project are also developing features
and creating pull requests, so we need to balance reviewing time with
development time. To help manage our time and keep everyone as happy as
possible, we’ve developed this document that explains what core committers
and other contributors can expect from each other. The goals are:
* Keep pull requests unblocked and flowing as much as possible,
while respecting developer time and product owner prioritization.
* Maintain a high standard for code quality, while avoiding hurt feelings
as much as possible.
Roles
-----
People play different roles in the pull-request review process. Each role has
different jobs and responsibilities:
:doc:`core-committer`
Can commit changes to an Open edX repository. Core committers are
responsible for the quality of the code, and for supporting the code in the
future. Core committers are also developers in their own right.
:doc:`product-owner`
Prioritizes the work of core committers.
:doc:`community-manager`
Helps keep the community healthy and working smoothly.
:doc:`contributor`
Submits pull requests for eventual committing to an Open edX repository.
.. note::
At the moment, developers who work for edX are core committers, and other
developers are contributors. This may change in the future.
Overview
--------
.. image:: pr-process.png
:align: center
:alt: A visualization of the pull request process
:target: ../_images/pr-process.png
If you are a :doc:`contributor <contributor>` submitting a pull request, expect that it will
take a few weeks before it can be merged. The earlier you can start talking
with the rest of the Open edX community about the changes you want to make,
before you even start changing code, the better the whole process
will go.
Follow the guidelines in this document for a high-quality pull request: include a detailed
description of your pull request when you open it on Github (we recommend using a
:doc:`pull request cover letter <cover-letter>` to guide your description),
keep the code clear and readable, make sure the tests pass, be responsive to code review comments.
Small pull requests are easier to review than large pull requests, so
split up your changes into several small pull requests when possible --
it will make everything go faster. See the full :doc:`contributor guidelines <contributor>`
for details of what to do and what to expect.
If you are a :doc:`product owner <product-owner>`, treat pull requests
from contributors like feature requests from a customer.
Keep the lines of communication open -- if there are delays or unexpected
problems, add a comment to the pull request informing the author of the
pull request of what’s going on. No one likes to feel like they’re being ignored!
More details are in the :doc:`product owner guidelines <product-owner>`.
If you are a :doc:`core committer <core-committer>`, allocate some time
in every two-week sprint to review pull requests from other contributors.
The community managers will make sure that these pull requests meet a
basic standard for quality before asking you spend time reviewing them.
More details are in the :doc:`core committer guidelines <core-committer>`.
Feel free to read the other documentation specific to each individual role in the
process, but you don’t need to read everything to get started! If you're not
sure where to start, check out the :doc:`contributor <contributor>` documentation. Thanks
for helping us grow the project smoothly! :)
*************
Product Owner
*************
The product owner has two main responsibilities: approving user-facing features
and improvements from a product point of view, and prioritizing pull request
reviews.
When a contributor is interested in developing a new feature, or enhancing
an existing one, they can engage in a dialogue with the product team about
the feature: why it is needed, what does it do, etc. Product owners are expected
to fully engage in this process and treat contributors like customers. If
the idea is good but the implementation idea is poor, direct them to a better
solution. If the feature is not something we can support at this time, provide
a detailed explanation of why that is.
A product owner is responsible for prioritizing pull requests from
contributors, and keeping them informed when prioritization slips. Pull
requests that are ready to be prioritized in the next sprint will have a
"Awaiting Prioritization" label on their JIRA review tickets. At every
product review meeting (which should happen each sprint), pull requests awaiting
prioritization should either be included in the sprint for the appropriate team
as a commitment to get the pull request reviewed, or the
product owner must inform the author of the pull request that the pull request
is still queued and is not being ignored. Contributors should be treated as
customers, and if their pull requests are delayed then they should be informed
of that, just as a product owner would inform any customer when that customer’s
requests are delayed.
####################
edX Public Sandboxes
####################
EdX maintains a set of publicly-available sandboxes that allow contributors
to interact with the software without having to set up a local development
environment.
* `edx.org Sandbox`_ for those looking to try out the software powering edx.org.
* `Language Sandboxes`_ for contributors helping to translate Open edX into
various languages, who have a need to see translations "in context" - that is,
in use on an actual website.
edx.org Sandbox
***************
This sandbox is intended for those looking to try out the software powering
`edx.org <www.edx.org>`_.
The sandbox provides staff- and student-level access to a copy of the current
version of the edx.org website. This sandbox does not allow access to Studio, the
course-authoring system.
Log in by visiting the following URL:
* `https://www.sandbox.edx.org/ <https://www.sandbox.edx.org/>`_
You can log in to a staff account using the following credentials:
* username: staff@example.com
* password: edx
You can log in to a student account using one the following credentials.
These user accounts represent students enrolled in the demo course with an
audit, honor code, or verified certificate, respectively:
* username: audit@example.com / honor@example.com / verified@example.com
* password: edx
Language Sandboxes
******************
These sandboxes are intended for translators who have a need to see
translations "in context" - that is, in use on an actual website.
On edx.org, we only pull down reviewed translations from Transifex. See the
`translation guidelines <https://github.com/edx/edx-platform/blob/master/docs/en_us/developers/source/i18n_translators_guide.rst#joining-a-review-team>`_
for more details.
To help you review and test, these sandboxes present *all* translations, not
just reviewed translations. This means that you may encounter broken pages as
you navigate the website. If this happens, it is probably because some of the
translated strings in your language have errors such as missing HTML tags or
altered {placeholders}. Go through your translations to find and correct these
types of translation errors. Use
`this guide <https://github.com/edx/edx-platform/blob/master/docs/en_us/developers/source/i18n_translators_guide.rst#guidelines-for-translators>`_
to review how to produce non-broken translations.
Visiting the Sandboxes
======================
There are two language sandboxes, one for right-to-left, aka "RTL", languages
(Arabic, Farsi, Hebrew, and Urdu) and a second one for left-to-right, aka "LTR",
languages. Right now, RTL and LTR cannot be supported on the same installation,
because the CSS needs to be compiled separately (fixing this issue is a task on our
backlog!).
Note: This is our first deployment of our alpha version of RTL language support! If
you have any comments or find any visual bugs, please let us know by posting on the
`openedx-translation <https://groups.google.com/forum/#!forum/openedx-translation>`_
mailing list.
LTR and RTL sandboxes are available for both the LMS, or learning managment system (the part
of the website that students see) and Studio, the course authoring platform.
You can access the LMS at:
* LTR Sandbox `http://translation-ltr.m.sandbox.edx.org/ <http://translation-ltr.m.sandbox.edx.org/>`_
* RTL Sandbox `http://translation-rtl.m.sandbox.edx.org/ <http://translation-rtl.m.sandbox.edx.org/>`_
And you can access Studio at:
* LTR Sandbox `http://studio.translation-ltr.m.sandbox.edx.org/ <http://studio.translation-ltr.m.sandbox.edx.org/>`_
* RTL Sandbox `http://studio.translation-rtl.m.sandbox.edx.org/ <http://studio.translation-rtl.m.sandbox.edx.org/>`_
To access the sandbox servers, you must supply the following username and password:
* username: edx
* password: translation
Logging In To Sandbox Accounts
==============================
To log in to the sandbox for a language, you supply the language code in the
username as follows:
* username: LANGUAGE_CODE@example.com
* password: edx
So if you are working on Chinese (China), you'll log in with these credentials:
* username: zh_CN@example.com
* password: edx
This user account has Course Staff privileges so that you can test Studio and
instructor-specific pages in the LMS.
You can also make new student-level user accounts, which is useful for verifying
translations within the registration flow.
Feel free to test in any way that you want in these sandboxes. Particularly, you are
encourage to make new courses, as well as add and delete course content. The sandboxes
can be reset if anything breaks, and they are completely disconnected from the
production version of the edx.org website.
Caveats and Warnings
====================
#. These sandboxes will be updated with new translations and the newest version
of the edx-platform code about once per week.
#. We recommend users utilize Chrome or Firefox when using the edX courseware.
#. When you test, make sure that your browser preference is set to the language
you want to test. When you are logged in to the LMS, you can use the
language preference widget on the student dashboard page to set or change
your language. However, when you are viewing Studio, or if you are not yet
logged in to the LMS, the site uses your browser preference to determine
what language to display. See `this page on changing your browser's language
<http://www.wikihow.com/Change-Your-Browser's-Language>`_ if you need help.
#. To see an untranslated edX instance in English, which can be helpful to
compare to the translated instance, switch your language to English, or
visit the `edx.org Sandbox`_.
#. At the moment, the site does not properly work for languages with an ``@``
symbol in the language code, so for now, those languages cannot use the
sandbox.
#. If you have a copy of the edx-platform code, you can generate a list of broken
translations in your language by first pulling down the latest translation files::
tx pull -l LANGUAGE_CODE
Replace ``LANGUAGE_CODE`` with your code, for example ``zh_CN``.
See `this page for instructions on how to configure Transifex <https://github.com/edx/edx-platform/wiki/Internationalization-and-localization>`_.
Next, run the commands::
paver i18n_generate
i18n_tool validate
This will generate reports of broken translations in your language. This will not, however,
catch HTML tags that are out of order (ex. ``</b> <b>`` instead of ``<b> </b>``).
We hope you find these sandboxes helpful. If you have any questions, comments, or
concerns, please give us feedback by posting on the
`openedx-translation <https://groups.google.com/forum/#!forum/openedx-translation>`_
mailing list. We'd be happy to hear about any improvements you think we could make!
*******************************************
Sandbox-packages
*******************************************
.. module:: sandbox-packages
Loncapa
=======
.. automodule:: loncapa.loncapa_check
:members:
:show-inheritance:
\ No newline at end of file
*******************************************
Symmath
*******************************************
.. module:: symmath
Formula
=======
.. automodule:: symmath.formula
:members:
:show-inheritance:
Symmath check
=============
.. automodule:: symmath.symmath_check
:members:
:show-inheritance:
Symmath tests
=============
.. automodule:: symmath.test_formula
:members:
:show-inheritance:
.. automodule:: symmath.test_symmath_check
:members:
:show-inheritance:
\ No newline at end of file
*************
Code Coverage
*************
We measure which lines of our codebase are covered by unit tests using
`coverage.py`_ for Python and `JSCover`_ for Javascript.
Our codebase is far from perfect, but the goal is to steadily improve our coverage
over time. To do this, we wrote a tool called `diff-cover`_ that will
report which lines in your branch are not covered by tests, while ignoring
other lines in the project that may not be covered. Using this tool,
we can ensure that pull requests have a very high percentage of test coverage
-- and ideally, they increase the test coverage of existing code, as well.
To check the coverage of your pull request, just go to the top level of the
edx-platform codebase and run::
$ paver coverage
This will print a coverage report for your branch. We aim for
a coverage report score of 95% or higher. We also encourage you to write
acceptance tests as your changes require.
.. _coverage.py: https://pypi.python.org/pypi/coverage
.. _JSCover: http://tntim96.github.io/JSCover/
.. _diff-cover: https://github.com/edx/diff-cover
************
Code Quality
************
In order to keep our code as clear and readable as possible, we use various
tools to assess the quality of pull requests:
* We use the `pep8`_ tool to follow `PEP-8`_ guidelines
* We use `pylint`_ for static analysis and uncovering trouble spots in our code
Our codebase is far from perfect, but the goal is to steadily improve our quality
over time. To do this, we wrote a tool called `diff-quality`_ that will
only report on the quality violations on lines that have changed in a
pull request. Using this tool, we can ensure that pull requests do not introduce
any new quality violations -- and ideally, they clean up existing violations
in the process of introducing other changes.
To check the quality of your pull request, just go to the top level of the
edx-platform codebase and run::
$ paver run_quality
You can also use the `paver run_pep8`` and ``paver run_pylint`` commands to run just pep8 or
pylint.
This will print a report of the quality violations that your branch has made.
Although we try to be vigilant and resolve all quality violations, some Pylint
violations are just too challenging to resolve, so we opt to ignore them via
use of a pragma. A pragma tells Pylint to ignore the violation in the given
line. An example is::
self.assertEquals(msg, form._errors['course_id'][0]) # pylint: disable=protected-access
The pragma starts with a ``#`` two spaces after the end of the line. We prefer
that you use the full name of the error (``pylint: disable=unused-argument`` as
opposed to ``pylint: disable=W0613``), so it's more clear what you're disabling
in the line.
.. _PEP-8: http://legacy.python.org/dev/peps/pep-0008/
.. _pep8: https://pypi.python.org/pypi/pep8
.. _coverage.py: https://pypi.python.org/pypi/coverage
.. _pylint: http://pylint.org/
.. _diff-quality: https://github.com/edx/diff-cover
*******
Testing
*******
Testing is something that we take very seriously at edX: we even have a
"test engineering" team at edX devoted purely to making our testing
infrastructure even more awesome.
This file is currently a stub: to find out more about our testing infrastructure,
check out the `testing.md`_ file on Github.
.. toctree::
:maxdepth: 2
jenkins
code-coverage
code-quality
.. _testing.md: https://github.com/edx/edx-platform/blob/master/docs/en_us/internal/testing.md
*******
Jenkins
*******
`Jenkins`_ is an open source continuous integration server. edX has a Jenkins
installation specifically for testing pull requests to our open source software
project, including edx-platform. Before a pull request can be merged, Jenkins
must run all the tests for that pull request: this is known as a "build".
If even one test in the build fails, then the entire build is considered a
failure. Pull requests cannot be merged until they have a passing build.
Kicking Off Builds
==================
Jenkins has the ability to automatically detect new pull requests and changed
pull requests on Github, and it can automatically run builds in response to
these events. We have Jenkins configured to automatically run builds for all
pull requests from core committers; however, Jenkins will *not* automatically
run builds for new contributors, so a community manager will need to manually
kick off a build for a pull request from a new contributor.
The reason for this distinction is a matter of trust. Running a build means that
Jenkins will execute all the code in the pull request. A pull request can
contain any code whatsoever: if we allowed Jenkins to automatically build every
pull request, then a malicious developer could make our Jenkins server do whatever
he or she wanted. Before kicking off a build, community managers look at the
code changes to verify that they are not malicious; this protects us from nasty
people.
Once a contributor has submitted a few pull requests, they can request to be
added to the Jenkins whitelist: this is a special list of people that Jenkins
*will* kick off builds for automatically. If the community managers feel that
the contributor is trustworthy, then they will grant the request, which will
make future development faster and easier for both the contributor and edX. If
a contibutor shows that they can not be trusted for some reason, they will be
removed from this whitelist.
Failed Builds
=============
Click on the build to be brought to the build page. You'll see a matrix of blue
and red dots; the red dots indicate what section failing tests were present in.
You can click on the test name to be brought to an error trace that explains
why the tests fail. Please address the failing tests before requesting a new
build on your branch. If the failures appear to not have anything to do with
your code, it may be the case that the master branch is failing. You can ask
your reviewers for advice in this scenario.
If the build says "Unstable" but passes all tests, you have introduced too many
pep8 and pylint violations. Please refer to the documentation for :doc:`code-quality`
and clean up the code.
Successful Builds
=================
If all the tests pass, the "Diff Coverage" and "Diff Quality" reports are
generated. Click on the "View Reports" link on your pull request to be brought
to the Jenkins report page. In a column on the left side of the page are a few
links, including "Diff Coverage Report" and "Diff Quality Report". View each of
these reports (making note that the Diff Quality report has two tabs - one for
pep8, and one for Pylint).
Make sure your quality coverage is 100% and your test coverage is at least 95%.
Adjust your code appropriately if these metrics are not high enough. Be sure to
ask your reviewers for advice if you need it.
.. _Jenkins: http://jenkins-ci.org/
Video player persists some user preferences between videos and these preferences are stored on server.
Content for sequential positions is loaded just once on page load and is not updated when the user navigates between sequential positions. So, we doesn't have an actual data from server.
To resolve this issue, cookies are used as temporary storage and are removed on page unload.
How it works:
1) On page load: cookies are empty and player get an actual data from server.
2) When user change some preferences, new value is stored to cookie;
3) If we navigate to another sequential position, video player get an actual data from cookies.
4) Close the page: `unload` event fires and we clear our cookies and send user preferences to the server.
Integrating XBlocks with edx-platform
=====================================
The edX LMS and Studio have several features that are extensions of the core XBlock
libraries (https://xblock.readthedocs.org). These features are listed below.
* `LMS`_
* `Studio`_
* `Testing`_
* `Deploying your XBlock`_
LMS
---
Runtime Features
~~~~~~~~~~~~~~~~
These are properties and methods available on ``self.runtime`` when a view or handler is executed by the LMS.
* anonymous_student_id: An identifier unique to the student in the particular course
that the block is being executed in. The same student in two different courses
will have two different ids.
* publish(block, event_type, event): Emit events to the surrounding system. Events are dictionaries that can contain arbitrary data.
XBlocks can publish events by calling ``self.runtime.publish(self, event_type, event)``. The ``event_type`` parameter
enables downstream processing of the event since it uniquely identifies the schema. This call will cause the runtime
to save the event data in the application event stream. XBlocks should publish events whenever a significant state
change occurs. Post-hoc analysis of the event stream can yield insight about how the XBlock is used in the context of
the application. Ideally interesting state of the XBlock could be reconstructed at any point in history through
careful analysis of the event stream.
TODO: Link to the authoritive list of event types.
In the future, these are likely to become more formal XBlock services (one related to users,
and the other to event publishing).
Class Features
~~~~~~~~~~~~~~
These are class attributes or functions that can be provided by an XBlock to customize behaviour
in the LMS.
* student_view (XBlock view): This is the view that will be rendered to display the XBlock
in the LMS. It will also be used to render the block in "preview" mode in Studio, unless
the XBlock also implements author_view.
* has_score (class property): True if this block should appear in the LMS progress page.
* get_progress (method): See documentation in x_module.py:XModuleMixin.get_progress.
* icon_class (class property): This can be one of (``other``, ``video``, or ``problem``), and
determines which icon appears in edx sequence headers. There is currently no way to provide
a different icon.
Grading
~~~~~~~
To participate in the course grade, an XBlock should set ``has_score`` to ``True``, and
should ``publish`` a ``grade`` event whenever the grade changes. The ``grade`` event is a
dictionary of the following form::
{
'value': <number>,
'max_value': <number>,
'user_id': <number>,
}
The grade event represents a grade of ``value/max_value`` for the current user. The
``user_id`` field is optional, the currently logged in user's ID will be used if it is
omitted.
Restrictions
~~~~~~~~~~~~
* A block can't modify the value of any field with a scope where the ``user`` property
is ``UserScope.NONE``.
Studio
------
Class Features
~~~~~~~~~~~~~~
* studio_view (XBlock.view): The view used to render an editor in Studio. The editor rendering can be completely different from the LMS student_view, and it is only shown when the author selects "Edit".
* author_view (XBlock.view): An optional view of the XBlock similar to student_view, but with possible inline editing capabilities. This view differs from studio_view in that it should be as similar to student_view as possible. When previewing XBlocks within Studio, Studio will prefer author_view to student_view.
* non_editable_metadata_fields (property): A list of ``xblock.fields.Field`` objects that shouldn't be displayed in the default editing view for Studio.
Restrictions
~~~~~~~~~~~~
* A block can't modify the value of any field with a scope where the ``user`` property
is not ``UserScope.NONE``.
Testing
-------
These instructions are temporary. Once XBlocks are fully supported by edx-platform
(both the LMS and Studio), installation and testing will be much more straightforward.
To enable an XBlock for testing in your devstack (https://github.com/edx/configuration/wiki/edX-Developer-Stack):
#. Install your block::
$ vagrant ssh
vagrant@precise64:~$ sudo -u edxapp /edx/bin/pip.edxapp install /path/to/your/block
#. Enable the block
#. In ``edx-platform/lms/envs/common.py``, uncomment::
# from xmodule.x_module import prefer_xmodules
# XBLOCK_SELECT_FUNCTION = prefer_xmodules
#. In ``edx-platform/cms/envs/common.py``, uncomment::
# from xmodule.x_module import prefer_xmodules
# XBLOCK_SELECT_FUNCTION = prefer_xmodules
#. In ``edx-platform/cms/envs/common.py``, change::
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
to::
'ALLOW_ALL_ADVANCED_COMPONENTS': True,
#. Add the block to your courses' advanced settings in Studio
#. Log in to Studio, and open your course
#. Settings -> Advanced Settings
#. Change the value for the key ``"advanced_modules"`` to ``["your-block"]``
#. Add your block into your course
#. Edit a unit
#. Advanced -> your-block
Note the name ``your-block`` used in Studio must exactly match the key you used to add your
block to your ``setup.py`` ``entry_points`` list (if you are still discovering Xblocks and simply used the ``startnew.py`` script as described at https://github.com/edx/XBlock/blob/master/doc/getting_started.rst , look in the ``setup.py`` file that was created)
Deploying your XBlock
---------------------
To deploy your block to your own hosted version of edx-platform, you need to install it
into the virtualenv that the platform is running out of, and add to the list of ``ADVANCED_COMPONENT_TYPES``
in ``edx-platform/cms/djangoapps/contentstore/views/component.py``.
*******************************************
Xmodule
*******************************************
.. module:: xmodule
Abtest
======
.. automodule:: xmodule.abtest_module
:members:
:show-inheritance:
Back compatibility
==================
.. automodule:: xmodule.backcompat_module
:members:
:show-inheritance:
Capa
====
.. automodule:: xmodule.capa_module
:members:
:show-inheritance:
Course
======
.. automodule:: xmodule.course_module
:members:
:show-inheritance:
Discussion
==========
.. automodule:: xmodule.discussion_module
:members:
:show-inheritance:
Editing
=======
.. automodule:: xmodule.editing_module
:members:
:show-inheritance:
Error
=====
.. automodule:: xmodule.error_module
:members:
:show-inheritance:
Error tracker
=============
.. automodule:: xmodule.errortracker
:members:
:show-inheritance:
Exceptions
==========
.. automodule:: xmodule.exceptions
:members:
:show-inheritance:
Graders
=======
.. automodule:: xmodule.graders
:members:
:show-inheritance:
Hidden
======
.. automodule:: xmodule.hidden_module
:members:
:show-inheritance:
Html checker
============
.. automodule:: xmodule.html_checker
:members:
:show-inheritance:
Html
====
.. automodule:: xmodule.html_module
:members:
:show-inheritance:
LTI
===
.. automodule:: xmodule.lti_module
:members:
:show-inheritance:
Mako
====
.. automodule:: xmodule.mako_module
:members:
:show-inheritance:
Progress
========
.. automodule:: xmodule.progress
:members:
:show-inheritance:
Sequence
========
.. automodule:: xmodule.seq_module
:members:
:show-inheritance:
Stringify
=========
.. automodule:: xmodule.stringify
:members:
:show-inheritance:
Template
========
.. automodule:: xmodule.template_module
:members:
:show-inheritance:
Templates
=========
.. automodule:: xmodule.templates
:members:
:show-inheritance:
Vertical
========
.. automodule:: xmodule.vertical_module
:members:
:show-inheritance:
Video
=====
.. include:: video_player.rst
.. automodule:: xmodule.video_module
:members:
:show-inheritance:
Word Cloud
==========
.. automodule:: xmodule.word_cloud_module
:members:
:show-inheritance:
X
=
.. automodule:: xmodule.x_module
:members:
:show-inheritance:
Xml
===
.. automodule:: xmodule.xml_module
:members:
:show-inheritance:
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment