Commit f66fcf25 by Dan Garrette

fixed all semantics-related doctests

parent 513ef630
......@@ -15,7 +15,7 @@ from nltk.sem.logic import (APP, AbstractVariableExpression, AllExpression,
FunctionVariableExpression, ImpExpression,
IndividualVariableExpression, LambdaExpression, Tokens,
LogicParser, NegatedExpression, OrExpression, Variable,
is_eventvar, is_funcvar, is_indvar)
is_eventvar, is_funcvar, is_indvar, unique_variable)
# Import Tkinter-based modules if they are available
try:
......
......@@ -511,11 +511,11 @@ class Glue(object):
if add_reading:
reading_list.append(glueformula.meaning)
def parse_to_compiled(self, sentence='a man sees Mary'):
def parse_to_compiled(self, sentence='a man sees Mary'.split()):
gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)]
return [self.gfl_to_compiled(gfl) for gfl in gfls]
def dep_parse(self, sentence='every cat leaves'):
def dep_parse(self, sentence='every cat leaves'.split()):
#Lazy-initialize the depparser
if self.depparser is None:
from nltk.parse import MaltParser
......@@ -649,7 +649,7 @@ def demo(show_example=-1):
for (i, sentence) in enumerate(examples):
if i==show_example or show_example==-1:
print '[[[Example %s]]] %s' % (i, sentence)
for reading in glue.parse_to_meaning(sentence):
for reading in glue.parse_to_meaning(sentence.split()):
print reading.simplify()
print ''
......
......@@ -509,35 +509,10 @@ Regression Testing from book
>>> logic._counter._value = 0
>>> from nltk.tag import RegexpTagger
>>> tagger = RegexpTagger(
... [('^(chases|runs)$', 'VB'),
... ('^(a)$', 'ex_quant'),
... ('^(every)$', 'univ_quant'),
... ('^(dog|boy)$', 'NN'),
... ('^(He)$', 'PRP')
... ])
>>> rc = DrtGlueReadingCommand(depparser=MaltParser(tagger=tagger))
>>> dt = DiscourseTester(['Every dog chases a boy', 'He runs'], rc)
>>> dt.readings()
<BLANKLINE>
s0 readings:
<BLANKLINE>
s0-r0: ([],[(([x],[dog(x)]) -> ([z1],[boy(z1), chases(x,z1)]))])
s0-r1: ([z2],[boy(z2), (([x],[dog(x)]) -> ([],[chases(x,z2)]))])
<BLANKLINE>
s1 readings:
<BLANKLINE>
s1-r0: ([x],[PRO(x), runs(x)])
>>> dt.readings(show_thread_readings=True)
d0: ['s0-r0', 's1-r0'] : INVALID: AnaphoraResolutionException
d1: ['s0-r1', 's1-r0'] : ([z6,z10],[boy(z6), (([x],[dog(x)]) -> ([],[chases(x,z6)])), (z10 = z6), runs(z10)])
>>> dt.readings(filter=True, show_thread_readings=True)
d1: ['s0-r1', 's1-r0'] : ([z12,z15],[boy(z12), (([x],[dog(x)]) -> ([],[chases(x,z12)])), (z15 = z12), runs(z15)])
>>> from nltk.parse import load_earley
>>> from nltk.parse import FeatureEarleyChartParser
>>> from nltk.sem.drt import DrtParser
>>> parser = load_earley('grammars/book_grammars/drt.fcfg', trace=0, logic_parser=DrtParser())
>>> grammar = nltk.data.load('grammars/book_grammars/drt.fcfg', logic_parser=DrtParser())
>>> parser = FeatureEarleyChartParser(grammar, trace=0)
>>> trees = parser.nbest_parse('Angus owns a dog'.split())
>>> print trees[0].node['SEM'].simplify()
([x,z17],[Angus(x), dog(z17), own(x,z17)])
([x,z2],[Angus(x), dog(z2), own(x,z2)])
......@@ -410,57 +410,57 @@ Parse errors
============
>>> try: dp.parse(r'')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expression expected.
<BLANKLINE>
^
>>> try: dp.parse(r'(')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expression expected.
(
^
>>> try: dp.parse(r'()')
... except ParseException, e: print e
... except logic.ParseException, e: print e
Unexpected token: ')'. Expression expected.
()
^
>>> try: dp.parse(r'([')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expected token ']'.
([
^
>>> try: dp.parse(r'([,')
... except ParseException, e: print e
... except logic.ParseException, e: print e
',' is an illegal variable name. Constants may not be quantified.
([,
^
>>> try: dp.parse(r'([x,')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Variable expected.
([x,
^
>>> try: dp.parse(r'([]')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expected token '['.
([]
^
>>> try: dp.parse(r'([][')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expected token ']'.
([][
^
>>> try: dp.parse(r'([][,')
... except ParseException, e: print e
... except logic.ParseException, e: print e
Unexpected token: ','. Expression expected.
([][,
^
>>> try: dp.parse(r'([][]')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expected token ')'.
([][]
^
>>> try: dp.parse(r'([x][man(x)]) |')
... except ParseException, e: print e
... except logic.ParseException, e: print e
End of input found. Expression expected.
([x][man(x)]) |
^
......
......@@ -12,13 +12,13 @@
Linear logic
======================
>>> from nltk.sem import logic
>>> from nltk.sem.linearlogic import *
>>> from nltk.sem import linearlogic as linlog
>>> from nltk.sem.glue import *
>>> llp = LinearLogicParser()
>>> from nltk.sem import logic
Parser
>>> print llp.parse(r'f')
......@@ -55,13 +55,13 @@ Simplify
Test BindingDict
>>> h = ConstantExpression('h')
>>> g = ConstantExpression('g')
>>> f = ConstantExpression('f')
>>> h = linlog.ConstantExpression('h')
>>> g = linlog.ConstantExpression('g')
>>> f = linlog.ConstantExpression('f')
>>> H = VariableExpression('H')
>>> G = VariableExpression('G')
>>> F = VariableExpression('F')
>>> H = linlog.VariableExpression('H')
>>> G = linlog.VariableExpression('G')
>>> F = linlog.VariableExpression('F')
>>> d1 = BindingDict([(H,h)])
>>> d2 = BindingDict([(F,f),(G,F)])
......@@ -332,6 +332,8 @@ Derivation:
---------------------------------
Dependency Graph to Glue Formulas
---------------------------------
>>> from nltk.corpus.reader.dependency import DependencyGraph
>>> depgraph = DependencyGraph("""1 John _ NNP NNP _ 2 SUBJ _ _
... 2 sees _ VB VB _ 0 ROOT _ _
... 3 a _ ex_quant ex_quant _ 4 SPEC _ _
......@@ -347,8 +349,8 @@ Dependency Graph to Glue Formulas
>>> glue = Glue()
>>> for r in glue.get_readings(glue.gfl_to_compiled(gfl)):
... print r.simplify().normalize()
exists x.(dog(x) & exists z1.(John(z1) & sees(z1,x)))
exists x.(John(x) & exists z1.(dog(z1) & sees(x,z1)))
exists z1.(dog(z1) & exists z2.(John(z2) & sees(z2,z1)))
exists z1.(John(z1) & exists z2.(dog(z2) & sees(z1,z2)))
-----------------------------------
Dependency Graph to LFG f-structure
......@@ -375,6 +377,8 @@ LFG f-structure to Glue
--------------------------------
Initialize the Dependency Parser
--------------------------------
>>> from nltk.parse.malt import MaltParser
>>> tagger = RegexpTagger(
... [('^(John|Mary)$', 'NNP'),
... ('^(sees|chases)$', 'VB'),
......@@ -388,16 +392,16 @@ Initialize the Dependency Parser
Automated Derivation
--------------------
>>> glue = Glue(depparser=depparser)
>>> for reading in glue.parse_to_meaning('every girl chases a dog'):
>>> for reading in glue.parse_to_meaning('every girl chases a dog'.split()):
... print reading.simplify().normalize()
all x.(girl(x) -> exists z1.(dog(z1) & chases(x,z1)))
exists x.(dog(x) & all z1.(girl(z1) -> chases(z1,x)))
all z1.(girl(z1) -> exists z2.(dog(z2) & chases(z1,z2)))
exists z1.(dog(z1) & all z2.(girl(z2) -> chases(z2,z1)))
>>> drtglue = DrtGlue(depparser=depparser)
>>> for reading in drtglue.parse_to_meaning('every girl chases a dog'):
>>> for reading in drtglue.parse_to_meaning('every girl chases a dog'.split()):
... print reading.simplify().normalize()
([],[(([x],[girl(x)]) -> ([z1],[dog(z1), chases(x,z1)]))])
([z1],[dog(z1), (([x],[girl(x)]) -> ([],[chases(x,z1)]))])
([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chases(z1,z2)]))])
([z1],[dog(z1), (([z2],[girl(z2)]) -> ([],[chases(z2,z1)]))])
--------------
With inference
......@@ -408,11 +412,11 @@ For example, the ``glue`` module generates two readings for the sentence
*John sees Mary*:
>>> from nltk.sem.glue import DrtGlue
>>> readings = drtglue.parse_to_meaning('John sees Mary')
>>> readings = drtglue.parse_to_meaning('John sees Mary'.split())
>>> for drs in readings:
... print drs.simplify().normalize()
([x,z1],[John(x), Mary(z1), sees(x,z1)])
([x,z1],[Mary(x), John(z1), sees(z1,x)])
([z1,z2],[John(z1), Mary(z2), sees(z1,z2)])
([z1,z2],[Mary(z1), John(z2), sees(z2,z1)])
However, it is easy to tell that these two readings are logically the
same, and therefore one of them is superfluous. We can use the theorem prover
......
......@@ -529,44 +529,3 @@ Mace can also be used with propositional logic.
True
>>> mb.valuation
{'Q': False, 'P': True}
--------------------------------------------------------
Running a Theorem Prover and a Model Builder in Parallel
--------------------------------------------------------
The ParallelProverBuilder gives the ability to run a Theorem Prover
and a Model Builder in parallel, using the result of whichever finishes
first. This is beneficial because if the Theorem Prover finds a proof,
then we can be certain that the Model Builder will not find a model.
Conversely, if the Model Build finds a model, we know that there is no
proof.
>>> p = lp.parse('P')
>>> q = lp.parse('Q')
>>> prover = Prover9()
>>> builder = Mace(end_size=50)
>>> ppb = ParallelProverBuilder(prover, builder)
>>> ppb.prove(q, [p, p>q])
True
>>> ppb.build_model(q, [p, p>q])
False
>>> ppb = ParallelProverBuilder(prover, builder)
>>> ppb.prove(-q, [p, p>q])
False
>>> ppb.build_model(-q, [p, p>q])
True
The ParallelProverBuilderCommand offers the same functionality in a
stateful command format.
>>> ppbc = ParallelProverBuilderCommand(prover, builder, q, [p, p>q])
>>> ppbc.prove()
True
>>> ppbc.build_model()
False
>>> ppbc = ParallelProverBuilderCommand(prover, builder, -q, [p, p>q])
>>> ppbc.prove()
False
>>> ppbc.build_model()
True
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment