Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
ease
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
ease
Commits
5378393b
Commit
5378393b
authored
Feb 26, 2013
by
Vik Paruchuri
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
More documentation for ML grading algo
parent
b70221db
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
22 additions
and
16 deletions
+22
-16
grade.py
+22
-16
No files found.
grade.py
View file @
5378393b
#Grader called by pyxserver_wsgi.py
#Loads a grader file, which is a dict containing the prompt of the question,
#a feature extractor object, and a trained model.
#Extracts features and runs trained model on the submission to produce a final score.
#Correctness determined by ratio of score to max possible score.
#Requires aspell to be installed and added to the path.
""""
Functions to score specified data using specified ML models
""""
import sys
import pickle
...
...
@@ -12,9 +9,11 @@ import numpy
import logging
from statsd import statsd
#Append sys to base path to import the following modules
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
#Depend on base path to be imported
from essay_set import EssaySet
import predictor_extractor
import predictor_set
...
...
@@ -29,17 +28,26 @@ log = logging.getLogger(__name__)
@statsd.timed('open_ended_assessment.machine_learning.grader.time')
def grade(grader_data,grader_config,submission):
"""
Grades
a
specified
submission
using
specified
models
grader_data
-
A
dictionary
:
{
'model'
:
trained
model
,
'extractor'
:
trained
feature
extractor
,
'prompt'
:
prompt
for
the
question
,
}
grader_config
-
Legacy
,
kept
for
compatibility
with
old
code
.
Need
to
remove
.
submission
-
The
student
submission
(
string
)
"""
#Initialize result dictionary
results = {'errors': [],'tests': [],'score': 0, 'feedback' : "", 'success' : False, 'confidence' : 0}
has_error=False
#Try to find and load the model file
grader_set=EssaySet(type="test")
#Try to add essays to essay set object
try:
#Try to add essay to essay set object
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except:
...
...
@@ -68,6 +76,7 @@ def grade(grader_data,grader_config,submission):
if not has_error:
#If the essay is just a copy of the prompt, return a 0 as the score
if(feedback['too_similar_to_prompt']):
results['score']=0
results['correct']=False
...
...
@@ -75,12 +84,8 @@ def grade(grader_data,grader_config,submission):
results['success']=True
#Generate short form output--number of problem areas identified in feedback
problem_areas
=
0
for
tag
in
feedback
:
if
tag
in
[
'topicality'
,
'prompt-overlap'
,
'spelling'
,
'grammar'
]:
problem_areas
+=
len
(
feedback
[
tag
])
>
5
#Add feedback to results
#Add feedback to results
if available
results['feedback'] = {}
if 'topicality' in feedback and 'prompt_overlap' in feedback:
results['feedback'].update({
...
...
@@ -88,6 +93,7 @@ def grade(grader_data,grader_config,submission):
'prompt-overlap' : feedback['prompt_overlap'],
})
#Only return spelling and grammar feedback for low scoring responses
if results['score']/float(max_score)<.33:
results['feedback'].update(
{'spelling' : feedback['spelling'],
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment