Commit ba171e83 by Steven Bird

Merge pull request #676 from stevenbird/issue/657

Names for grammar classes
parents ba6e408c af68c53d
...@@ -51,7 +51,7 @@ from nltk.parse.chart import (BottomUpPredictCombineRule, BottomUpPredictRule, ...@@ -51,7 +51,7 @@ from nltk.parse.chart import (BottomUpPredictCombineRule, BottomUpPredictRule,
SteppingChartParser, TopDownInitRule, TopDownPredictRule, SteppingChartParser, TopDownInitRule, TopDownPredictRule,
TreeEdge) TreeEdge)
from nltk.tree import Tree from nltk.tree import Tree
from nltk.grammar import Nonterminal, ContextFreeGrammar from nltk.grammar import Nonterminal, CFG
from nltk.util import in_idle from nltk.util import in_idle
from nltk.draw.util import (CanvasFrame, ColorizedList, from nltk.draw.util import (CanvasFrame, ColorizedList,
EntryDialog, MutableOptionMenu, EntryDialog, MutableOptionMenu,
...@@ -2038,7 +2038,7 @@ class ChartParserApp(object): ...@@ -2038,7 +2038,7 @@ class ChartParserApp(object):
grammar = pickle.load(infile) grammar = pickle.load(infile)
else: else:
with open(filename, 'r') as infile: with open(filename, 'r') as infile:
grammar = ContextFreeGrammar.fromstring(infile.read()) grammar = CFG.fromstring(infile.read())
self.set_grammar(grammar) self.set_grammar(grammar)
except Exception as e: except Exception as e:
tkinter.messagebox.showerror('Error Loading Grammar', tkinter.messagebox.showerror('Error Loading Grammar',
...@@ -2230,7 +2230,7 @@ class ChartParserApp(object): ...@@ -2230,7 +2230,7 @@ class ChartParserApp(object):
self.apply_strategy(self._TD_STRATEGY, TopDownPredictEdgeRule) self.apply_strategy(self._TD_STRATEGY, TopDownPredictEdgeRule)
def app(): def app():
grammar = ContextFreeGrammar.fromstring(""" grammar = CFG.fromstring("""
# Grammatical productions. # Grammatical productions.
S -> NP VP S -> NP VP
VP -> VP PP | V NP | V VP -> VP PP | V NP | V
......
...@@ -867,8 +867,8 @@ def app(): ...@@ -867,8 +867,8 @@ def app():
Create a recursive descent parser demo, using a simple grammar and Create a recursive descent parser demo, using a simple grammar and
text. text.
""" """
from nltk.grammar import ContextFreeGrammar from nltk.grammar import CFG
grammar = ContextFreeGrammar.fromstring(""" grammar = CFG.fromstring("""
# Grammatical productions. # Grammatical productions.
S -> NP VP S -> NP VP
NP -> Det N PP | Det N NP -> Det N PP | Det N
......
...@@ -772,7 +772,7 @@ def app(): ...@@ -772,7 +772,7 @@ def app():
text. text.
""" """
from nltk.grammar import Nonterminal, Production, ContextFreeGrammar from nltk.grammar import Nonterminal, Production, CFG
nonterminals = 'S VP NP PP P N Name V Det' nonterminals = 'S VP NP PP P N Name V Det'
(S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s) (S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s)
for s in nonterminals.split()] for s in nonterminals.split()]
...@@ -796,7 +796,7 @@ def app(): ...@@ -796,7 +796,7 @@ def app():
Production(Det, ['my']), Production(Det, ['my']),
) )
grammar = ContextFreeGrammar(S, productions) grammar = CFG(S, productions)
# tokenize the sentence # tokenize the sentence
sent = 'my dog saw a man in the park with a statue'.split() sent = 'my dog saw a man in the park with a statue'.split()
......
...@@ -803,10 +803,10 @@ def load(resource_url, format='auto', cache=True, verbose=False, ...@@ -803,10 +803,10 @@ def load(resource_url, format='auto', cache=True, verbose=False,
if format == 'text': if format == 'text':
resource_val = string_data resource_val = string_data
elif format == 'cfg': elif format == 'cfg':
resource_val = nltk.grammar.ContextFreeGrammar.fromstring( resource_val = nltk.grammar.CFG.fromstring(
string_data, encoding=encoding) string_data, encoding=encoding)
elif format == 'pcfg': elif format == 'pcfg':
resource_val = nltk.grammar.WeightedGrammar.fromstring( resource_val = nltk.grammar.PCFG.fromstring(
string_data, encoding=encoding) string_data, encoding=encoding)
elif format == 'fcfg': elif format == 'fcfg':
resource_val = nltk.grammar.FeatureGrammar.fromstring( resource_val = nltk.grammar.FeatureGrammar.fromstring(
......
...@@ -54,7 +54,7 @@ import re ...@@ -54,7 +54,7 @@ import re
from tkinter import (Button, Canvas, Entry, Frame, IntVar, Label, from tkinter import (Button, Canvas, Entry, Frame, IntVar, Label,
Scrollbar, Text, Tk, Toplevel) Scrollbar, Text, Tk, Toplevel)
from nltk.grammar import (ContextFreeGrammar, _read_cfg_production, from nltk.grammar import (CFG, _read_cfg_production,
Nonterminal, nonterminals) Nonterminal, nonterminals)
from nltk.tree import Tree from nltk.tree import Tree
from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment
...@@ -157,7 +157,7 @@ class CFGEditor(object): ...@@ -157,7 +157,7 @@ class CFGEditor(object):
def __init__(self, parent, cfg=None, set_cfg_callback=None): def __init__(self, parent, cfg=None, set_cfg_callback=None):
self._parent = parent self._parent = parent
if cfg is not None: self._cfg = cfg if cfg is not None: self._cfg = cfg
else: self._cfg = ContextFreeGrammar(Nonterminal('S'), []) else: self._cfg = CFG(Nonterminal('S'), [])
self._set_cfg_callback = set_cfg_callback self._set_cfg_callback = set_cfg_callback
self._highlight_matching_nonterminals = 1 self._highlight_matching_nonterminals = 1
...@@ -482,7 +482,7 @@ class CFGEditor(object): ...@@ -482,7 +482,7 @@ class CFGEditor(object):
def _apply(self, *e): def _apply(self, *e):
productions = self._parse_productions() productions = self._parse_productions()
start = Nonterminal(self._start.get()) start = Nonterminal(self._start.get())
cfg = ContextFreeGrammar(start, productions) cfg = CFG(start, productions)
if self._set_cfg_callback is not None: if self._set_cfg_callback is not None:
self._set_cfg_callback(cfg) self._set_cfg_callback(cfg)
...@@ -666,7 +666,7 @@ class CFGDemo(object): ...@@ -666,7 +666,7 @@ class CFGDemo(object):
self._top.mainloop(*args, **kwargs) self._top.mainloop(*args, **kwargs)
def demo2(): def demo2():
from nltk import Nonterminal, Production, ContextFreeGrammar from nltk import Nonterminal, Production, CFG
nonterminals = 'S VP NP PP P N Name V Det' nonterminals = 'S VP NP PP P N Name V Det'
(S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s) (S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s)
for s in nonterminals.split()] for s in nonterminals.split()]
...@@ -691,7 +691,7 @@ def demo2(): ...@@ -691,7 +691,7 @@ def demo2():
Production(N, ['dog']), Production(N, ['statue']), Production(N, ['dog']), Production(N, ['statue']),
Production(Det, ['my']), Production(Det, ['my']),
) )
grammar = ContextFreeGrammar(S, productions) grammar = CFG(S, productions)
text = 'I saw a man in the park'.split() text = 'I saw a man in the park'.split()
d=CFGDemo(grammar, text) d=CFGDemo(grammar, text)
...@@ -702,12 +702,12 @@ def demo2(): ...@@ -702,12 +702,12 @@ def demo2():
###################################################################### ######################################################################
def demo(): def demo():
from nltk import Nonterminal, ContextFreeGrammar from nltk import Nonterminal, CFG
nonterminals = 'S VP NP PP P N Name V Det' nonterminals = 'S VP NP PP P N Name V Det'
(S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s) (S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s)
for s in nonterminals.split()] for s in nonterminals.split()]
grammar = ContextFreeGrammar.fromstring(""" grammar = CFG.fromstring("""
S -> NP VP S -> NP VP
PP -> P NP PP -> P NP
NP -> Det N NP -> Det N
......
...@@ -43,7 +43,7 @@ import warnings ...@@ -43,7 +43,7 @@ import warnings
from nltk import compat from nltk import compat
from nltk.tree import Tree from nltk.tree import Tree
from nltk.grammar import WeightedGrammar, is_nonterminal, is_terminal from nltk.grammar import PCFG, is_nonterminal, is_terminal
from nltk.util import OrderedDict from nltk.util import OrderedDict
from nltk.internals import raise_unorderable_types from nltk.internals import raise_unorderable_types
from nltk.compat import (total_ordering, python_2_unicode_compatible, from nltk.compat import (total_ordering, python_2_unicode_compatible,
...@@ -1242,7 +1242,7 @@ class ChartParser(ParserI): ...@@ -1242,7 +1242,7 @@ class ChartParser(ParserI):
Create a new chart parser, that uses ``grammar`` to parse Create a new chart parser, that uses ``grammar`` to parse
texts. texts.
:type grammar: ContextFreeGrammar :type grammar: CFG
:param grammar: The grammar used to parse texts. :param grammar: The grammar used to parse texts.
:type strategy: list(ChartRuleI) :type strategy: list(ChartRuleI)
:param strategy: A list of rules that should be used to decide :param strategy: A list of rules that should be used to decide
...@@ -1364,8 +1364,8 @@ class BottomUpChartParser(ChartParser): ...@@ -1364,8 +1364,8 @@ class BottomUpChartParser(ChartParser):
See ``ChartParser`` for more information. See ``ChartParser`` for more information.
""" """
def __init__(self, grammar, **parser_args): def __init__(self, grammar, **parser_args):
if isinstance(grammar, WeightedGrammar): if isinstance(grammar, PCFG):
warnings.warn("BottomUpChartParser only works for ContextFreeGrammar, " warnings.warn("BottomUpChartParser only works for CFG, "
"use BottomUpProbabilisticChartParser instead", "use BottomUpProbabilisticChartParser instead",
category=DeprecationWarning) category=DeprecationWarning)
ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args) ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args)
...@@ -1548,8 +1548,8 @@ class SteppingChartParser(ChartParser): ...@@ -1548,8 +1548,8 @@ class SteppingChartParser(ChartParser):
######################################################################## ########################################################################
def demo_grammar(): def demo_grammar():
from nltk.grammar import ContextFreeGrammar from nltk.grammar import CFG
return ContextFreeGrammar.fromstring(""" return CFG.fromstring("""
S -> NP VP S -> NP VP
PP -> "with" NP PP -> "with" NP
NP -> NP PP NP -> NP PP
...@@ -1578,7 +1578,7 @@ def demo(choice=None, ...@@ -1578,7 +1578,7 @@ def demo(choice=None,
A demonstration of the chart parsers. A demonstration of the chart parsers.
""" """
import sys, time import sys, time
from nltk import nonterminals, Production, ContextFreeGrammar from nltk import nonterminals, Production, CFG
# The grammar for ChartParser and SteppingChartParser: # The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar() grammar = demo_grammar()
......
...@@ -268,7 +268,7 @@ class IncrementalChartParser(ChartParser): ...@@ -268,7 +268,7 @@ class IncrementalChartParser(ChartParser):
Create a new Earley chart parser, that uses ``grammar`` to Create a new Earley chart parser, that uses ``grammar`` to
parse texts. parse texts.
:type grammar: ContextFreeGrammar :type grammar: CFG
:param grammar: The grammar used to parse texts. :param grammar: The grammar used to parse texts.
:type trace: int :type trace: int
:param trace: The level of tracing that should be used when :param trace: The level of tracing that should be used when
......
...@@ -17,7 +17,7 @@ from nltk.compat import xrange, python_2_unicode_compatible ...@@ -17,7 +17,7 @@ from nltk.compat import xrange, python_2_unicode_compatible
from nltk.featstruct import FeatStruct, unify, TYPE, find_variables from nltk.featstruct import FeatStruct, unify, TYPE, find_variables
from nltk.sem import logic from nltk.sem import logic
from nltk.tree import Tree from nltk.tree import Tree
from nltk.grammar import (Nonterminal, Production, ContextFreeGrammar, from nltk.grammar import (Nonterminal, Production, CFG,
FeatStructNonterminal, is_nonterminal, FeatStructNonterminal, is_nonterminal,
is_terminal) is_terminal)
from nltk.parse.chart import (TreeEdge, Chart, ChartParser, EdgeI, from nltk.parse.chart import (TreeEdge, Chart, ChartParser, EdgeI,
......
...@@ -64,11 +64,11 @@ demo_grammar = """ ...@@ -64,11 +64,11 @@ demo_grammar = """
""" """
def demo(N=23): def demo(N=23):
from nltk.grammar import ContextFreeGrammar from nltk.grammar import CFG
print('Generating the first %d sentences for demo grammar:' % (N,)) print('Generating the first %d sentences for demo grammar:' % (N,))
print(demo_grammar) print(demo_grammar)
grammar = ContextFreeGrammar.fromstring(demo_grammar) grammar = CFG.fromstring(demo_grammar)
for n, sent in enumerate(generate(grammar, n=N), 1): for n, sent in enumerate(generate(grammar, n=N), 1):
print('%3d. %s' % (n, ' '.join(sent))) print('%3d. %s' % (n, ' '.join(sent)))
......
...@@ -40,7 +40,7 @@ from __future__ import print_function, unicode_literals ...@@ -40,7 +40,7 @@ from __future__ import print_function, unicode_literals
from functools import reduce from functools import reduce
from nltk.tree import Tree, ProbabilisticTree from nltk.tree import Tree, ProbabilisticTree
from nltk.grammar import Nonterminal, WeightedGrammar from nltk.grammar import Nonterminal, PCFG
from nltk.parse.api import ParserI from nltk.parse.api import ParserI
from nltk.parse.chart import Chart, LeafEdge, TreeEdge, AbstractChartRule from nltk.parse.chart import Chart, LeafEdge, TreeEdge, AbstractChartRule
...@@ -173,8 +173,8 @@ class BottomUpProbabilisticChartParser(ParserI): ...@@ -173,8 +173,8 @@ class BottomUpProbabilisticChartParser(ParserI):
and higher numbers will produce more verbose tracing and higher numbers will produce more verbose tracing
output. output.
""" """
if not isinstance(grammar, WeightedGrammar): if not isinstance(grammar, PCFG):
raise ValueError("The grammar must be probabilistic WeightedGrammar") raise ValueError("The grammar must be probabilistic PCFG")
self._grammar = grammar self._grammar = grammar
self.beam_size = beam_size self.beam_size = beam_size
self._trace = trace self._trace = trace
......
...@@ -11,7 +11,7 @@ from __future__ import print_function, unicode_literals ...@@ -11,7 +11,7 @@ from __future__ import print_function, unicode_literals
from collections import defaultdict from collections import defaultdict
from nltk.grammar import (DependencyProduction, DependencyGrammar, from nltk.grammar import (DependencyProduction, DependencyGrammar,
StatisticalDependencyGrammar) ProbabilisticDependencyGrammar)
from nltk.parse.dependencygraph import DependencyGraph, conll_data2 from nltk.parse.dependencygraph import DependencyGraph, conll_data2
from nltk.internals import raise_unorderable_types from nltk.internals import raise_unorderable_types
from nltk.compat import total_ordering, python_2_unicode_compatible from nltk.compat import total_ordering, python_2_unicode_compatible
...@@ -334,7 +334,7 @@ class ProbabilisticProjectiveDependencyParser(object): ...@@ -334,7 +334,7 @@ class ProbabilisticProjectiveDependencyParser(object):
def train(self, graphs): def train(self, graphs):
""" """
Trains a StatisticalDependencyGrammar based on the list of input Trains a ProbabilisticDependencyGrammar based on the list of input
DependencyGraphs. This model is an implementation of Eisner's (1996) DependencyGraphs. This model is an implementation of Eisner's (1996)
Model C, which derives its statistics from head-word, head-tag, Model C, which derives its statistics from head-word, head-tag,
child-word, and child-tag relationships. child-word, and child-tag relationships.
...@@ -390,7 +390,7 @@ class ProbabilisticProjectiveDependencyParser(object): ...@@ -390,7 +390,7 @@ class ProbabilisticProjectiveDependencyParser(object):
mod_event = '(mods (%s, %s, %s) right))' % (prev_tag, head_word, head_tag) mod_event = '(mods (%s, %s, %s) right))' % (prev_tag, head_word, head_tag)
events[head_event] += 1 events[head_event] += 1
events[mod_event] += 1 events[mod_event] += 1
self._grammar = StatisticalDependencyGrammar(productions, events, tags) self._grammar = ProbabilisticDependencyGrammar(productions, events, tags)
# print self._grammar # print self._grammar
def compute_prob(self, dg): def compute_prob(self, dg):
......
...@@ -56,7 +56,7 @@ class RecursiveDescentParser(ParserI): ...@@ -56,7 +56,7 @@ class RecursiveDescentParser(ParserI):
Create a new ``RecursiveDescentParser``, that uses ``grammar`` Create a new ``RecursiveDescentParser``, that uses ``grammar``
to parse texts. to parse texts.
:type grammar: ContextFreeGrammar :type grammar: CFG
:param grammar: The grammar used to parse texts. :param grammar: The grammar used to parse texts.
:type trace: int :type trace: int
:param trace: The level of tracing that should be used when :param trace: The level of tracing that should be used when
...@@ -630,9 +630,9 @@ def demo(): ...@@ -630,9 +630,9 @@ def demo():
A demonstration of the recursive descent parser. A demonstration of the recursive descent parser.
""" """
from nltk import parse, ContextFreeGrammar from nltk import parse, CFG
grammar = ContextFreeGrammar.fromstring(""" grammar = CFG.fromstring("""
S -> NP VP S -> NP VP
NP -> Det N | Det N PP NP -> Det N | Det N PP
VP -> V NP | V NP PP VP -> V NP | V NP PP
......
...@@ -435,9 +435,9 @@ def demo(): ...@@ -435,9 +435,9 @@ def demo():
A demonstration of the shift-reduce parser. A demonstration of the shift-reduce parser.
""" """
from nltk import parse, ContextFreeGrammar from nltk import parse, CFG
grammar = ContextFreeGrammar.fromstring(""" grammar = CFG.fromstring("""
S -> NP VP S -> NP VP
NP -> Det N | Det N PP NP -> Det N | Det N PP
VP -> V NP | V NP PP VP -> V NP | V NP PP
......
...@@ -12,7 +12,7 @@ Utility functions for parsers. ...@@ -12,7 +12,7 @@ Utility functions for parsers.
""" """
from __future__ import print_function from __future__ import print_function
from nltk.grammar import ContextFreeGrammar, FeatureGrammar, WeightedGrammar from nltk.grammar import CFG, FeatureGrammar, PCFG
from nltk.data import load from nltk.data import load
from nltk.parse.chart import Chart, ChartParser from nltk.parse.chart import Chart, ChartParser
...@@ -28,9 +28,9 @@ def load_parser(grammar_url, trace=0, ...@@ -28,9 +28,9 @@ def load_parser(grammar_url, trace=0,
on properties of the grammar itself. on properties of the grammar itself.
The following grammar formats are currently supported: The following grammar formats are currently supported:
- ``'cfg'`` (CFGs: ``ContextFreeGrammar``) - ``'cfg'`` (CFGs: ``CFG``)
- ``'pcfg'`` (probabilistic CFGs: ``WeightedGrammar``) - ``'pcfg'`` (probabilistic CFGs: ``PCFG``)
- ``'fcfg'`` (feature-based CFGs: ``ContextFreeGrammar``) - ``'fcfg'`` (feature-based CFGs: ``CFG``)
:type grammar_url: str :type grammar_url: str
:param grammar_url: A URL specifying where the grammar is located. :param grammar_url: A URL specifying where the grammar is located.
...@@ -54,10 +54,10 @@ def load_parser(grammar_url, trace=0, ...@@ -54,10 +54,10 @@ def load_parser(grammar_url, trace=0,
See ``data.load`` for more information. See ``data.load`` for more information.
""" """
grammar = load(grammar_url, **load_args) grammar = load(grammar_url, **load_args)
if not isinstance(grammar, ContextFreeGrammar): if not isinstance(grammar, CFG):
raise ValueError("The grammar must be a ContextFreeGrammar, " raise ValueError("The grammar must be a CFG, "
"or a subclass thereof.") "or a subclass thereof.")
if isinstance(grammar, WeightedGrammar): if isinstance(grammar, PCFG):
if parser is None: if parser is None:
parser = InsideChartParser parser = InsideChartParser
return parser(grammar, trace=trace, beam_size=beam_size) return parser(grammar, trace=trace, beam_size=beam_size)
...@@ -69,7 +69,7 @@ def load_parser(grammar_url, trace=0, ...@@ -69,7 +69,7 @@ def load_parser(grammar_url, trace=0,
chart_class = FeatureChart chart_class = FeatureChart
return parser(grammar, trace=trace, chart_class=chart_class) return parser(grammar, trace=trace, chart_class=chart_class)
else: # Plain ContextFreeGrammar. else: # Plain CFG.
if parser is None: if parser is None:
parser = ChartParser parser = ChartParser
if chart_class is None: if chart_class is None:
......
...@@ -67,7 +67,7 @@ class ViterbiParser(ParserI): ...@@ -67,7 +67,7 @@ class ViterbiParser(ParserI):
| MLC[start, start+width, prod.lhs] = new_tree | MLC[start, start+width, prod.lhs] = new_tree
| Return MLC[0, len(text), start_symbol] | Return MLC[0, len(text), start_symbol]
:type _grammar: WeightedGrammar :type _grammar: PCFG
:ivar _grammar: The grammar used to parse sentences. :ivar _grammar: The grammar used to parse sentences.
:type _trace: int :type _trace: int
:ivar _trace: The level of tracing output that should be generated :ivar _trace: The level of tracing output that should be generated
...@@ -78,7 +78,7 @@ class ViterbiParser(ParserI): ...@@ -78,7 +78,7 @@ class ViterbiParser(ParserI):
Create a new ``ViterbiParser`` parser, that uses ``grammar`` to Create a new ``ViterbiParser`` parser, that uses ``grammar`` to
parse texts. parse texts.
:type grammar: WeightedGrammar :type grammar: PCFG
:param grammar: The grammar used to parse texts. :param grammar: The grammar used to parse texts.
:type trace: int :type trace: int
:param trace: The level of tracing that should be used when :param trace: The level of tracing that should be used when
......
...@@ -309,7 +309,7 @@ internal use by NLTK's corpus readers. ...@@ -309,7 +309,7 @@ internal use by NLTK's corpus readers.
>>> # Show that it's now been loaded: >>> # Show that it's now been loaded:
>>> object.__repr__(ll) # doctest: +ELLIPSIS >>> object.__repr__(ll) # doctest: +ELLIPSIS
'<nltk.grammar.ContextFreeGrammar object at ...>' '<nltk.grammar.CFG object at ...>'
>>> # Test that accessing an attribute also loads it: >>> # Test that accessing an attribute also loads it:
...@@ -317,7 +317,7 @@ internal use by NLTK's corpus readers. ...@@ -317,7 +317,7 @@ internal use by NLTK's corpus readers.
>>> ll.start() >>> ll.start()
S S
>>> object.__repr__(ll) # doctest: +ELLIPSIS >>> object.__repr__(ll) # doctest: +ELLIPSIS
'<nltk.grammar.ContextFreeGrammar object at ...>' '<nltk.grammar.CFG object at ...>'
Buffered Gzip Reading and Writing Buffered Gzip Reading and Writing
--------------------------------- ---------------------------------
......
...@@ -8,8 +8,8 @@ Generating sentences from context-free grammars ...@@ -8,8 +8,8 @@ Generating sentences from context-free grammars
An example grammar: An example grammar:
>>> from nltk.parse.generate import generate, demo_grammar >>> from nltk.parse.generate import generate, demo_grammar
>>> from nltk import ContextFreeGrammar >>> from nltk import CFG
>>> grammar = ContextFreeGrammar.fromstring(demo_grammar) >>> grammar = CFG.fromstring(demo_grammar)
>>> print(grammar) >>> print(grammar)
Grammar with 13 productions (start state = S) Grammar with 13 productions (start state = S)
S -> NP VP S -> NP VP
......
...@@ -7,8 +7,8 @@ Grammar Parsing ...@@ -7,8 +7,8 @@ Grammar Parsing
Grammars can be parsed from strings: Grammars can be parsed from strings:
>>> from nltk import ContextFreeGrammar >>> from nltk import CFG
>>> grammar = ContextFreeGrammar.fromstring(""" >>> grammar = CFG.fromstring("""
... S -> NP VP ... S -> NP VP
... PP -> P NP ... PP -> P NP
... NP -> Det N | NP PP ... NP -> Det N | NP PP
...@@ -29,8 +29,8 @@ Grammars can be parsed from strings: ...@@ -29,8 +29,8 @@ Grammars can be parsed from strings:
Probabilistic CFGs: Probabilistic CFGs:
>>> from nltk import WeightedGrammar >>> from nltk import PCFG
>>> toy_pcfg1 = WeightedGrammar.fromstring(""" >>> toy_pcfg1 = PCFG.fromstring("""
... S -> NP VP [1.0] ... S -> NP VP [1.0]
... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] ... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
... Det -> 'the' [0.8] | 'my' [0.2] ... Det -> 'the' [0.8] | 'my' [0.2]
...@@ -43,6 +43,6 @@ Probabilistic CFGs: ...@@ -43,6 +43,6 @@ Probabilistic CFGs:
Chomsky Normal Form grammar (Test for bug 474) Chomsky Normal Form grammar (Test for bug 474)
>>> g = ContextFreeGrammar.fromstring("VP^<TOP> -> VBP NP^<VP-TOP>") >>> g = CFG.fromstring("VP^<TOP> -> VBP NP^<VP-TOP>")
>>> g.productions()[0].lhs() >>> g.productions()[0].lhs()
VP^<TOP> VP^<TOP>
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
Unit tests for the Context Free Grammar class Unit tests for the Context Free Grammar class
--------------------------------------------- ---------------------------------------------
>>> from nltk import Nonterminal, nonterminals, Production, ContextFreeGrammar >>> from nltk import Nonterminal, nonterminals, Production, CFG
>>> nt1 = Nonterminal('NP') >>> nt1 = Nonterminal('NP')
>>> nt2 = Nonterminal('VP') >>> nt2 = Nonterminal('VP')
...@@ -40,7 +40,7 @@ Unit tests for the Context Free Grammar class ...@@ -40,7 +40,7 @@ Unit tests for the Context Free Grammar class
>>> prod1 == prod2 >>> prod1 == prod2
False False
>>> grammar = ContextFreeGrammar.fromstring(""" >>> grammar = CFG.fromstring("""
... S -> NP VP ... S -> NP VP
... PP -> P NP ... PP -> P NP
... NP -> 'the' N | N PP | 'the' N PP ... NP -> 'the' N | N PP | 'the' N PP
...@@ -529,11 +529,11 @@ Unit tests for the Probabilistic CFG class ...@@ -529,11 +529,11 @@ Unit tests for the Probabilistic CFG class
>>> from nltk.corpus import treebank >>> from nltk.corpus import treebank
>>> from itertools import islice >>> from itertools import islice
>>> from nltk.grammar import WeightedGrammar, induce_pcfg, toy_pcfg1, toy_pcfg2 >>> from nltk.grammar import PCFG, induce_pcfg, toy_pcfg1, toy_pcfg2
Create a set of probabilistic CFG productions. Create a set of PCFG productions.
>>> grammar = WeightedGrammar.fromstring(""" >>> grammar = PCFG.fromstring("""
... A -> B B [.3] | C B C [.7] ... A -> B B [.3] | C B C [.7]
... B -> B D [.5] | C [.5] ... B -> B D [.5] | C [.5]
... C -> 'a' [.1] | 'b' [0.9] ... C -> 'a' [.1] | 'b' [0.9]
......
...@@ -62,8 +62,8 @@ Parsing ...@@ -62,8 +62,8 @@ Parsing
------- -------
>>> from nltk.parse.recursivedescent import RecursiveDescentParser >>> from nltk.parse.recursivedescent import RecursiveDescentParser
>>> from nltk.grammar import ContextFreeGrammar >>> from nltk.grammar import CFG
>>> grammar = ContextFreeGrammar.fromstring(""" >>> grammar = CFG.fromstring("""
... S -> NP VP ... S -> NP VP
... PP -> P NP ... PP -> P NP
... NP -> 'the' N | N PP | 'the' N PP ... NP -> 'the' N | N PP | 'the' N PP
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment