id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
21,200
|
explosion/spaCy
|
examples/pipeline/custom_attr_methods.py
|
overlap_tokens
|
def overlap_tokens(doc, other_doc):
"""Get the tokens from the original Doc that are also in the comparison Doc.
"""
overlap = []
other_tokens = [token.text for token in other_doc]
for token in doc:
if token.text in other_tokens:
overlap.append(token)
return overlap
|
python
|
def overlap_tokens(doc, other_doc):
"""Get the tokens from the original Doc that are also in the comparison Doc.
"""
overlap = []
other_tokens = [token.text for token in other_doc]
for token in doc:
if token.text in other_tokens:
overlap.append(token)
return overlap
|
[
"def",
"overlap_tokens",
"(",
"doc",
",",
"other_doc",
")",
":",
"overlap",
"=",
"[",
"]",
"other_tokens",
"=",
"[",
"token",
".",
"text",
"for",
"token",
"in",
"other_doc",
"]",
"for",
"token",
"in",
"doc",
":",
"if",
"token",
".",
"text",
"in",
"other_tokens",
":",
"overlap",
".",
"append",
"(",
"token",
")",
"return",
"overlap"
] |
Get the tokens from the original Doc that are also in the comparison Doc.
|
[
"Get",
"the",
"tokens",
"from",
"the",
"original",
"Doc",
"that",
"are",
"also",
"in",
"the",
"comparison",
"Doc",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/pipeline/custom_attr_methods.py#L61-L69
|
21,201
|
explosion/spaCy
|
spacy/cli/converters/iob2json.py
|
iob2json
|
def iob2json(input_data, n_sents=10, *args, **kwargs):
"""
Convert IOB files into JSON format for use with train cli.
"""
docs = []
for group in minibatch(docs, n_sents):
group = list(group)
first = group.pop(0)
to_extend = first["paragraphs"][0]["sentences"]
for sent in group[1:]:
to_extend.extend(sent["paragraphs"][0]["sentences"])
docs.append(first)
return docs
|
python
|
def iob2json(input_data, n_sents=10, *args, **kwargs):
"""
Convert IOB files into JSON format for use with train cli.
"""
docs = []
for group in minibatch(docs, n_sents):
group = list(group)
first = group.pop(0)
to_extend = first["paragraphs"][0]["sentences"]
for sent in group[1:]:
to_extend.extend(sent["paragraphs"][0]["sentences"])
docs.append(first)
return docs
|
[
"def",
"iob2json",
"(",
"input_data",
",",
"n_sents",
"=",
"10",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"docs",
"=",
"[",
"]",
"for",
"group",
"in",
"minibatch",
"(",
"docs",
",",
"n_sents",
")",
":",
"group",
"=",
"list",
"(",
"group",
")",
"first",
"=",
"group",
".",
"pop",
"(",
"0",
")",
"to_extend",
"=",
"first",
"[",
"\"paragraphs\"",
"]",
"[",
"0",
"]",
"[",
"\"sentences\"",
"]",
"for",
"sent",
"in",
"group",
"[",
"1",
":",
"]",
":",
"to_extend",
".",
"extend",
"(",
"sent",
"[",
"\"paragraphs\"",
"]",
"[",
"0",
"]",
"[",
"\"sentences\"",
"]",
")",
"docs",
".",
"append",
"(",
"first",
")",
"return",
"docs"
] |
Convert IOB files into JSON format for use with train cli.
|
[
"Convert",
"IOB",
"files",
"into",
"JSON",
"format",
"for",
"use",
"with",
"train",
"cli",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/iob2json.py#L10-L22
|
21,202
|
explosion/spaCy
|
spacy/displacy/__init__.py
|
render
|
def render(
docs, style="dep", page=False, minify=False, jupyter=None, options={}, manual=False
):
"""Render displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
jupyter (bool): Override Jupyter auto-detection.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
RETURNS (unicode): Rendered HTML markup.
DOCS: https://spacy.io/api/top-level#displacy.render
USAGE: https://spacy.io/usage/visualizers
"""
factories = {
"dep": (DependencyRenderer, parse_deps),
"ent": (EntityRenderer, parse_ents),
}
if style not in factories:
raise ValueError(Errors.E087.format(style=style))
if isinstance(docs, (Doc, Span, dict)):
docs = [docs]
docs = [obj if not isinstance(obj, Span) else obj.as_doc() for obj in docs]
if not all(isinstance(obj, (Doc, Span, dict)) for obj in docs):
raise ValueError(Errors.E096)
renderer, converter = factories[style]
renderer = renderer(options=options)
parsed = [converter(doc, options) for doc in docs] if not manual else docs
_html["parsed"] = renderer.render(parsed, page=page, minify=minify).strip()
html = _html["parsed"]
if RENDER_WRAPPER is not None:
html = RENDER_WRAPPER(html)
if jupyter or (jupyter is None and is_in_jupyter()):
# return HTML rendered by IPython display()
from IPython.core.display import display, HTML
return display(HTML(html))
return html
|
python
|
def render(
docs, style="dep", page=False, minify=False, jupyter=None, options={}, manual=False
):
"""Render displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
jupyter (bool): Override Jupyter auto-detection.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
RETURNS (unicode): Rendered HTML markup.
DOCS: https://spacy.io/api/top-level#displacy.render
USAGE: https://spacy.io/usage/visualizers
"""
factories = {
"dep": (DependencyRenderer, parse_deps),
"ent": (EntityRenderer, parse_ents),
}
if style not in factories:
raise ValueError(Errors.E087.format(style=style))
if isinstance(docs, (Doc, Span, dict)):
docs = [docs]
docs = [obj if not isinstance(obj, Span) else obj.as_doc() for obj in docs]
if not all(isinstance(obj, (Doc, Span, dict)) for obj in docs):
raise ValueError(Errors.E096)
renderer, converter = factories[style]
renderer = renderer(options=options)
parsed = [converter(doc, options) for doc in docs] if not manual else docs
_html["parsed"] = renderer.render(parsed, page=page, minify=minify).strip()
html = _html["parsed"]
if RENDER_WRAPPER is not None:
html = RENDER_WRAPPER(html)
if jupyter or (jupyter is None and is_in_jupyter()):
# return HTML rendered by IPython display()
from IPython.core.display import display, HTML
return display(HTML(html))
return html
|
[
"def",
"render",
"(",
"docs",
",",
"style",
"=",
"\"dep\"",
",",
"page",
"=",
"False",
",",
"minify",
"=",
"False",
",",
"jupyter",
"=",
"None",
",",
"options",
"=",
"{",
"}",
",",
"manual",
"=",
"False",
")",
":",
"factories",
"=",
"{",
"\"dep\"",
":",
"(",
"DependencyRenderer",
",",
"parse_deps",
")",
",",
"\"ent\"",
":",
"(",
"EntityRenderer",
",",
"parse_ents",
")",
",",
"}",
"if",
"style",
"not",
"in",
"factories",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E087",
".",
"format",
"(",
"style",
"=",
"style",
")",
")",
"if",
"isinstance",
"(",
"docs",
",",
"(",
"Doc",
",",
"Span",
",",
"dict",
")",
")",
":",
"docs",
"=",
"[",
"docs",
"]",
"docs",
"=",
"[",
"obj",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"Span",
")",
"else",
"obj",
".",
"as_doc",
"(",
")",
"for",
"obj",
"in",
"docs",
"]",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"obj",
",",
"(",
"Doc",
",",
"Span",
",",
"dict",
")",
")",
"for",
"obj",
"in",
"docs",
")",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E096",
")",
"renderer",
",",
"converter",
"=",
"factories",
"[",
"style",
"]",
"renderer",
"=",
"renderer",
"(",
"options",
"=",
"options",
")",
"parsed",
"=",
"[",
"converter",
"(",
"doc",
",",
"options",
")",
"for",
"doc",
"in",
"docs",
"]",
"if",
"not",
"manual",
"else",
"docs",
"_html",
"[",
"\"parsed\"",
"]",
"=",
"renderer",
".",
"render",
"(",
"parsed",
",",
"page",
"=",
"page",
",",
"minify",
"=",
"minify",
")",
".",
"strip",
"(",
")",
"html",
"=",
"_html",
"[",
"\"parsed\"",
"]",
"if",
"RENDER_WRAPPER",
"is",
"not",
"None",
":",
"html",
"=",
"RENDER_WRAPPER",
"(",
"html",
")",
"if",
"jupyter",
"or",
"(",
"jupyter",
"is",
"None",
"and",
"is_in_jupyter",
"(",
")",
")",
":",
"# return HTML rendered by IPython display()",
"from",
"IPython",
".",
"core",
".",
"display",
"import",
"display",
",",
"HTML",
"return",
"display",
"(",
"HTML",
"(",
"html",
")",
")",
"return",
"html"
] |
Render displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
jupyter (bool): Override Jupyter auto-detection.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
RETURNS (unicode): Rendered HTML markup.
DOCS: https://spacy.io/api/top-level#displacy.render
USAGE: https://spacy.io/usage/visualizers
|
[
"Render",
"displaCy",
"visualisation",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/__init__.py#L21-L61
|
21,203
|
explosion/spaCy
|
spacy/displacy/__init__.py
|
serve
|
def serve(
docs,
style="dep",
page=True,
minify=False,
options={},
manual=False,
port=5000,
host="0.0.0.0",
):
"""Serve displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation.
host (unicode): Host to serve visualisation.
DOCS: https://spacy.io/api/top-level#displacy.serve
USAGE: https://spacy.io/usage/visualizers
"""
from wsgiref import simple_server
if is_in_jupyter():
user_warning(Warnings.W011)
render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
httpd = simple_server.make_server(host, port, app)
print("\nUsing the '{}' visualizer".format(style))
print("Serving on http://{}:{} ...\n".format(host, port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("Shutting down server on port {}.".format(port))
finally:
httpd.server_close()
|
python
|
def serve(
docs,
style="dep",
page=True,
minify=False,
options={},
manual=False,
port=5000,
host="0.0.0.0",
):
"""Serve displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation.
host (unicode): Host to serve visualisation.
DOCS: https://spacy.io/api/top-level#displacy.serve
USAGE: https://spacy.io/usage/visualizers
"""
from wsgiref import simple_server
if is_in_jupyter():
user_warning(Warnings.W011)
render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
httpd = simple_server.make_server(host, port, app)
print("\nUsing the '{}' visualizer".format(style))
print("Serving on http://{}:{} ...\n".format(host, port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("Shutting down server on port {}.".format(port))
finally:
httpd.server_close()
|
[
"def",
"serve",
"(",
"docs",
",",
"style",
"=",
"\"dep\"",
",",
"page",
"=",
"True",
",",
"minify",
"=",
"False",
",",
"options",
"=",
"{",
"}",
",",
"manual",
"=",
"False",
",",
"port",
"=",
"5000",
",",
"host",
"=",
"\"0.0.0.0\"",
",",
")",
":",
"from",
"wsgiref",
"import",
"simple_server",
"if",
"is_in_jupyter",
"(",
")",
":",
"user_warning",
"(",
"Warnings",
".",
"W011",
")",
"render",
"(",
"docs",
",",
"style",
"=",
"style",
",",
"page",
"=",
"page",
",",
"minify",
"=",
"minify",
",",
"options",
"=",
"options",
",",
"manual",
"=",
"manual",
")",
"httpd",
"=",
"simple_server",
".",
"make_server",
"(",
"host",
",",
"port",
",",
"app",
")",
"print",
"(",
"\"\\nUsing the '{}' visualizer\"",
".",
"format",
"(",
"style",
")",
")",
"print",
"(",
"\"Serving on http://{}:{} ...\\n\"",
".",
"format",
"(",
"host",
",",
"port",
")",
")",
"try",
":",
"httpd",
".",
"serve_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"Shutting down server on port {}.\"",
".",
"format",
"(",
"port",
")",
")",
"finally",
":",
"httpd",
".",
"server_close",
"(",
")"
] |
Serve displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation.
host (unicode): Host to serve visualisation.
DOCS: https://spacy.io/api/top-level#displacy.serve
USAGE: https://spacy.io/usage/visualizers
|
[
"Serve",
"displaCy",
"visualisation",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/__init__.py#L64-L102
|
21,204
|
explosion/spaCy
|
spacy/displacy/__init__.py
|
set_render_wrapper
|
def set_render_wrapper(func):
"""Set an optional wrapper function that is called around the generated
HTML markup on displacy.render. This can be used to allow integration into
other platforms, similar to Jupyter Notebooks that require functions to be
called around the HTML. It can also be used to implement custom callbacks
on render, or to embed the visualization in a custom page.
func (callable): Function to call around markup before rendering it. Needs
to take one argument, the HTML markup, and should return the desired
output of displacy.render.
"""
global RENDER_WRAPPER
if not hasattr(func, "__call__"):
raise ValueError(Errors.E110.format(obj=type(func)))
RENDER_WRAPPER = func
|
python
|
def set_render_wrapper(func):
"""Set an optional wrapper function that is called around the generated
HTML markup on displacy.render. This can be used to allow integration into
other platforms, similar to Jupyter Notebooks that require functions to be
called around the HTML. It can also be used to implement custom callbacks
on render, or to embed the visualization in a custom page.
func (callable): Function to call around markup before rendering it. Needs
to take one argument, the HTML markup, and should return the desired
output of displacy.render.
"""
global RENDER_WRAPPER
if not hasattr(func, "__call__"):
raise ValueError(Errors.E110.format(obj=type(func)))
RENDER_WRAPPER = func
|
[
"def",
"set_render_wrapper",
"(",
"func",
")",
":",
"global",
"RENDER_WRAPPER",
"if",
"not",
"hasattr",
"(",
"func",
",",
"\"__call__\"",
")",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E110",
".",
"format",
"(",
"obj",
"=",
"type",
"(",
"func",
")",
")",
")",
"RENDER_WRAPPER",
"=",
"func"
] |
Set an optional wrapper function that is called around the generated
HTML markup on displacy.render. This can be used to allow integration into
other platforms, similar to Jupyter Notebooks that require functions to be
called around the HTML. It can also be used to implement custom callbacks
on render, or to embed the visualization in a custom page.
func (callable): Function to call around markup before rendering it. Needs
to take one argument, the HTML markup, and should return the desired
output of displacy.render.
|
[
"Set",
"an",
"optional",
"wrapper",
"function",
"that",
"is",
"called",
"around",
"the",
"generated",
"HTML",
"markup",
"on",
"displacy",
".",
"render",
".",
"This",
"can",
"be",
"used",
"to",
"allow",
"integration",
"into",
"other",
"platforms",
"similar",
"to",
"Jupyter",
"Notebooks",
"that",
"require",
"functions",
"to",
"be",
"called",
"around",
"the",
"HTML",
".",
"It",
"can",
"also",
"be",
"used",
"to",
"implement",
"custom",
"callbacks",
"on",
"render",
"or",
"to",
"embed",
"the",
"visualization",
"in",
"a",
"custom",
"page",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/__init__.py#L185-L199
|
21,205
|
explosion/spaCy
|
spacy/cli/evaluate.py
|
evaluate
|
def evaluate(
model,
data_path,
gpu_id=-1,
gold_preproc=False,
displacy_path=None,
displacy_limit=25,
return_scores=False,
):
"""
Evaluate a model. To render a sample of parses in a HTML file, set an
output directory as the displacy_path argument.
"""
msg = Printer()
util.fix_random_seed()
if gpu_id >= 0:
util.use_gpu(gpu_id)
util.set_env_log(False)
data_path = util.ensure_path(data_path)
displacy_path = util.ensure_path(displacy_path)
if not data_path.exists():
msg.fail("Evaluation data not found", data_path, exits=1)
if displacy_path and not displacy_path.exists():
msg.fail("Visualization output directory not found", displacy_path, exits=1)
corpus = GoldCorpus(data_path, data_path)
nlp = util.load_model(model)
dev_docs = list(corpus.dev_docs(nlp, gold_preproc=gold_preproc))
begin = timer()
scorer = nlp.evaluate(dev_docs, verbose=False)
end = timer()
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
results = {
"Time": "%.2f s" % (end - begin),
"Words": nwords,
"Words/s": "%.0f" % (nwords / (end - begin)),
"TOK": "%.2f" % scorer.token_acc,
"POS": "%.2f" % scorer.tags_acc,
"UAS": "%.2f" % scorer.uas,
"LAS": "%.2f" % scorer.las,
"NER P": "%.2f" % scorer.ents_p,
"NER R": "%.2f" % scorer.ents_r,
"NER F": "%.2f" % scorer.ents_f,
}
msg.table(results, title="Results")
if displacy_path:
docs, golds = zip(*dev_docs)
render_deps = "parser" in nlp.meta.get("pipeline", [])
render_ents = "ner" in nlp.meta.get("pipeline", [])
render_parses(
docs,
displacy_path,
model_name=model,
limit=displacy_limit,
deps=render_deps,
ents=render_ents,
)
msg.good("Generated {} parses as HTML".format(displacy_limit), displacy_path)
if return_scores:
return scorer.scores
|
python
|
def evaluate(
model,
data_path,
gpu_id=-1,
gold_preproc=False,
displacy_path=None,
displacy_limit=25,
return_scores=False,
):
"""
Evaluate a model. To render a sample of parses in a HTML file, set an
output directory as the displacy_path argument.
"""
msg = Printer()
util.fix_random_seed()
if gpu_id >= 0:
util.use_gpu(gpu_id)
util.set_env_log(False)
data_path = util.ensure_path(data_path)
displacy_path = util.ensure_path(displacy_path)
if not data_path.exists():
msg.fail("Evaluation data not found", data_path, exits=1)
if displacy_path and not displacy_path.exists():
msg.fail("Visualization output directory not found", displacy_path, exits=1)
corpus = GoldCorpus(data_path, data_path)
nlp = util.load_model(model)
dev_docs = list(corpus.dev_docs(nlp, gold_preproc=gold_preproc))
begin = timer()
scorer = nlp.evaluate(dev_docs, verbose=False)
end = timer()
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
results = {
"Time": "%.2f s" % (end - begin),
"Words": nwords,
"Words/s": "%.0f" % (nwords / (end - begin)),
"TOK": "%.2f" % scorer.token_acc,
"POS": "%.2f" % scorer.tags_acc,
"UAS": "%.2f" % scorer.uas,
"LAS": "%.2f" % scorer.las,
"NER P": "%.2f" % scorer.ents_p,
"NER R": "%.2f" % scorer.ents_r,
"NER F": "%.2f" % scorer.ents_f,
}
msg.table(results, title="Results")
if displacy_path:
docs, golds = zip(*dev_docs)
render_deps = "parser" in nlp.meta.get("pipeline", [])
render_ents = "ner" in nlp.meta.get("pipeline", [])
render_parses(
docs,
displacy_path,
model_name=model,
limit=displacy_limit,
deps=render_deps,
ents=render_ents,
)
msg.good("Generated {} parses as HTML".format(displacy_limit), displacy_path)
if return_scores:
return scorer.scores
|
[
"def",
"evaluate",
"(",
"model",
",",
"data_path",
",",
"gpu_id",
"=",
"-",
"1",
",",
"gold_preproc",
"=",
"False",
",",
"displacy_path",
"=",
"None",
",",
"displacy_limit",
"=",
"25",
",",
"return_scores",
"=",
"False",
",",
")",
":",
"msg",
"=",
"Printer",
"(",
")",
"util",
".",
"fix_random_seed",
"(",
")",
"if",
"gpu_id",
">=",
"0",
":",
"util",
".",
"use_gpu",
"(",
"gpu_id",
")",
"util",
".",
"set_env_log",
"(",
"False",
")",
"data_path",
"=",
"util",
".",
"ensure_path",
"(",
"data_path",
")",
"displacy_path",
"=",
"util",
".",
"ensure_path",
"(",
"displacy_path",
")",
"if",
"not",
"data_path",
".",
"exists",
"(",
")",
":",
"msg",
".",
"fail",
"(",
"\"Evaluation data not found\"",
",",
"data_path",
",",
"exits",
"=",
"1",
")",
"if",
"displacy_path",
"and",
"not",
"displacy_path",
".",
"exists",
"(",
")",
":",
"msg",
".",
"fail",
"(",
"\"Visualization output directory not found\"",
",",
"displacy_path",
",",
"exits",
"=",
"1",
")",
"corpus",
"=",
"GoldCorpus",
"(",
"data_path",
",",
"data_path",
")",
"nlp",
"=",
"util",
".",
"load_model",
"(",
"model",
")",
"dev_docs",
"=",
"list",
"(",
"corpus",
".",
"dev_docs",
"(",
"nlp",
",",
"gold_preproc",
"=",
"gold_preproc",
")",
")",
"begin",
"=",
"timer",
"(",
")",
"scorer",
"=",
"nlp",
".",
"evaluate",
"(",
"dev_docs",
",",
"verbose",
"=",
"False",
")",
"end",
"=",
"timer",
"(",
")",
"nwords",
"=",
"sum",
"(",
"len",
"(",
"doc_gold",
"[",
"0",
"]",
")",
"for",
"doc_gold",
"in",
"dev_docs",
")",
"results",
"=",
"{",
"\"Time\"",
":",
"\"%.2f s\"",
"%",
"(",
"end",
"-",
"begin",
")",
",",
"\"Words\"",
":",
"nwords",
",",
"\"Words/s\"",
":",
"\"%.0f\"",
"%",
"(",
"nwords",
"/",
"(",
"end",
"-",
"begin",
")",
")",
",",
"\"TOK\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"token_acc",
",",
"\"POS\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"tags_acc",
",",
"\"UAS\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"uas",
",",
"\"LAS\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"las",
",",
"\"NER P\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"ents_p",
",",
"\"NER R\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"ents_r",
",",
"\"NER F\"",
":",
"\"%.2f\"",
"%",
"scorer",
".",
"ents_f",
",",
"}",
"msg",
".",
"table",
"(",
"results",
",",
"title",
"=",
"\"Results\"",
")",
"if",
"displacy_path",
":",
"docs",
",",
"golds",
"=",
"zip",
"(",
"*",
"dev_docs",
")",
"render_deps",
"=",
"\"parser\"",
"in",
"nlp",
".",
"meta",
".",
"get",
"(",
"\"pipeline\"",
",",
"[",
"]",
")",
"render_ents",
"=",
"\"ner\"",
"in",
"nlp",
".",
"meta",
".",
"get",
"(",
"\"pipeline\"",
",",
"[",
"]",
")",
"render_parses",
"(",
"docs",
",",
"displacy_path",
",",
"model_name",
"=",
"model",
",",
"limit",
"=",
"displacy_limit",
",",
"deps",
"=",
"render_deps",
",",
"ents",
"=",
"render_ents",
",",
")",
"msg",
".",
"good",
"(",
"\"Generated {} parses as HTML\"",
".",
"format",
"(",
"displacy_limit",
")",
",",
"displacy_path",
")",
"if",
"return_scores",
":",
"return",
"scorer",
".",
"scores"
] |
Evaluate a model. To render a sample of parses in a HTML file, set an
output directory as the displacy_path argument.
|
[
"Evaluate",
"a",
"model",
".",
"To",
"render",
"a",
"sample",
"of",
"parses",
"in",
"a",
"HTML",
"file",
"set",
"an",
"output",
"directory",
"as",
"the",
"displacy_path",
"argument",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/evaluate.py#L22-L81
|
21,206
|
explosion/spaCy
|
spacy/cli/profile.py
|
profile
|
def profile(model, inputs=None, n_texts=10000):
"""
Profile a spaCy pipeline, to find out which functions take the most time.
Input should be formatted as one JSON object per line with a key "text".
It can either be provided as a JSONL file, or be read from sys.sytdin.
If no input file is specified, the IMDB dataset is loaded via Thinc.
"""
msg = Printer()
if inputs is not None:
inputs = _read_inputs(inputs, msg)
if inputs is None:
n_inputs = 25000
with msg.loading("Loading IMDB dataset via Thinc..."):
imdb_train, _ = thinc.extra.datasets.imdb()
inputs, _ = zip(*imdb_train)
msg.info("Loaded IMDB dataset and using {} examples".format(n_inputs))
inputs = inputs[:n_inputs]
with msg.loading("Loading model '{}'...".format(model)):
nlp = load_model(model)
msg.good("Loaded model '{}'".format(model))
texts = list(itertools.islice(inputs, n_texts))
cProfile.runctx("parse_texts(nlp, texts)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
msg.divider("Profile stats")
s.strip_dirs().sort_stats("time").print_stats()
|
python
|
def profile(model, inputs=None, n_texts=10000):
"""
Profile a spaCy pipeline, to find out which functions take the most time.
Input should be formatted as one JSON object per line with a key "text".
It can either be provided as a JSONL file, or be read from sys.sytdin.
If no input file is specified, the IMDB dataset is loaded via Thinc.
"""
msg = Printer()
if inputs is not None:
inputs = _read_inputs(inputs, msg)
if inputs is None:
n_inputs = 25000
with msg.loading("Loading IMDB dataset via Thinc..."):
imdb_train, _ = thinc.extra.datasets.imdb()
inputs, _ = zip(*imdb_train)
msg.info("Loaded IMDB dataset and using {} examples".format(n_inputs))
inputs = inputs[:n_inputs]
with msg.loading("Loading model '{}'...".format(model)):
nlp = load_model(model)
msg.good("Loaded model '{}'".format(model))
texts = list(itertools.islice(inputs, n_texts))
cProfile.runctx("parse_texts(nlp, texts)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
msg.divider("Profile stats")
s.strip_dirs().sort_stats("time").print_stats()
|
[
"def",
"profile",
"(",
"model",
",",
"inputs",
"=",
"None",
",",
"n_texts",
"=",
"10000",
")",
":",
"msg",
"=",
"Printer",
"(",
")",
"if",
"inputs",
"is",
"not",
"None",
":",
"inputs",
"=",
"_read_inputs",
"(",
"inputs",
",",
"msg",
")",
"if",
"inputs",
"is",
"None",
":",
"n_inputs",
"=",
"25000",
"with",
"msg",
".",
"loading",
"(",
"\"Loading IMDB dataset via Thinc...\"",
")",
":",
"imdb_train",
",",
"_",
"=",
"thinc",
".",
"extra",
".",
"datasets",
".",
"imdb",
"(",
")",
"inputs",
",",
"_",
"=",
"zip",
"(",
"*",
"imdb_train",
")",
"msg",
".",
"info",
"(",
"\"Loaded IMDB dataset and using {} examples\"",
".",
"format",
"(",
"n_inputs",
")",
")",
"inputs",
"=",
"inputs",
"[",
":",
"n_inputs",
"]",
"with",
"msg",
".",
"loading",
"(",
"\"Loading model '{}'...\"",
".",
"format",
"(",
"model",
")",
")",
":",
"nlp",
"=",
"load_model",
"(",
"model",
")",
"msg",
".",
"good",
"(",
"\"Loaded model '{}'\"",
".",
"format",
"(",
"model",
")",
")",
"texts",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"inputs",
",",
"n_texts",
")",
")",
"cProfile",
".",
"runctx",
"(",
"\"parse_texts(nlp, texts)\"",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"\"Profile.prof\"",
")",
"s",
"=",
"pstats",
".",
"Stats",
"(",
"\"Profile.prof\"",
")",
"msg",
".",
"divider",
"(",
"\"Profile stats\"",
")",
"s",
".",
"strip_dirs",
"(",
")",
".",
"sort_stats",
"(",
"\"time\"",
")",
".",
"print_stats",
"(",
")"
] |
Profile a spaCy pipeline, to find out which functions take the most time.
Input should be formatted as one JSON object per line with a key "text".
It can either be provided as a JSONL file, or be read from sys.sytdin.
If no input file is specified, the IMDB dataset is loaded via Thinc.
|
[
"Profile",
"a",
"spaCy",
"pipeline",
"to",
"find",
"out",
"which",
"functions",
"take",
"the",
"most",
"time",
".",
"Input",
"should",
"be",
"formatted",
"as",
"one",
"JSON",
"object",
"per",
"line",
"with",
"a",
"key",
"text",
".",
"It",
"can",
"either",
"be",
"provided",
"as",
"a",
"JSONL",
"file",
"or",
"be",
"read",
"from",
"sys",
".",
"sytdin",
".",
"If",
"no",
"input",
"file",
"is",
"specified",
"the",
"IMDB",
"dataset",
"is",
"loaded",
"via",
"Thinc",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/profile.py#L23-L47
|
21,207
|
explosion/spaCy
|
spacy/lang/ja/__init__.py
|
detailed_tokens
|
def detailed_tokens(tokenizer, text):
"""Format Mecab output into a nice data structure, based on Janome."""
node = tokenizer.parseToNode(text)
node = node.next # first node is beginning of sentence and empty, skip it
words = []
while node.posid != 0:
surface = node.surface
base = surface # a default value. Updated if available later.
parts = node.feature.split(",")
pos = ",".join(parts[0:4])
if len(parts) > 7:
# this information is only available for words in the tokenizer
# dictionary
base = parts[7]
words.append(ShortUnitWord(surface, base, pos))
node = node.next
return words
|
python
|
def detailed_tokens(tokenizer, text):
"""Format Mecab output into a nice data structure, based on Janome."""
node = tokenizer.parseToNode(text)
node = node.next # first node is beginning of sentence and empty, skip it
words = []
while node.posid != 0:
surface = node.surface
base = surface # a default value. Updated if available later.
parts = node.feature.split(",")
pos = ",".join(parts[0:4])
if len(parts) > 7:
# this information is only available for words in the tokenizer
# dictionary
base = parts[7]
words.append(ShortUnitWord(surface, base, pos))
node = node.next
return words
|
[
"def",
"detailed_tokens",
"(",
"tokenizer",
",",
"text",
")",
":",
"node",
"=",
"tokenizer",
".",
"parseToNode",
"(",
"text",
")",
"node",
"=",
"node",
".",
"next",
"# first node is beginning of sentence and empty, skip it",
"words",
"=",
"[",
"]",
"while",
"node",
".",
"posid",
"!=",
"0",
":",
"surface",
"=",
"node",
".",
"surface",
"base",
"=",
"surface",
"# a default value. Updated if available later.",
"parts",
"=",
"node",
".",
"feature",
".",
"split",
"(",
"\",\"",
")",
"pos",
"=",
"\",\"",
".",
"join",
"(",
"parts",
"[",
"0",
":",
"4",
"]",
")",
"if",
"len",
"(",
"parts",
")",
">",
"7",
":",
"# this information is only available for words in the tokenizer",
"# dictionary",
"base",
"=",
"parts",
"[",
"7",
"]",
"words",
".",
"append",
"(",
"ShortUnitWord",
"(",
"surface",
",",
"base",
",",
"pos",
")",
")",
"node",
"=",
"node",
".",
"next",
"return",
"words"
] |
Format Mecab output into a nice data structure, based on Janome.
|
[
"Format",
"Mecab",
"output",
"into",
"a",
"nice",
"data",
"structure",
"based",
"on",
"Janome",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/ja/__init__.py#L52-L68
|
21,208
|
explosion/spaCy
|
spacy/compat.py
|
symlink_to
|
def symlink_to(orig, dest):
"""Create a symlink. Used for model shortcut links.
orig (unicode / Path): The origin path.
dest (unicode / Path): The destination path of the symlink.
"""
if is_windows:
import subprocess
subprocess.check_call(
["mklink", "/d", path2str(orig), path2str(dest)], shell=True
)
else:
orig.symlink_to(dest)
|
python
|
def symlink_to(orig, dest):
"""Create a symlink. Used for model shortcut links.
orig (unicode / Path): The origin path.
dest (unicode / Path): The destination path of the symlink.
"""
if is_windows:
import subprocess
subprocess.check_call(
["mklink", "/d", path2str(orig), path2str(dest)], shell=True
)
else:
orig.symlink_to(dest)
|
[
"def",
"symlink_to",
"(",
"orig",
",",
"dest",
")",
":",
"if",
"is_windows",
":",
"import",
"subprocess",
"subprocess",
".",
"check_call",
"(",
"[",
"\"mklink\"",
",",
"\"/d\"",
",",
"path2str",
"(",
"orig",
")",
",",
"path2str",
"(",
"dest",
")",
"]",
",",
"shell",
"=",
"True",
")",
"else",
":",
"orig",
".",
"symlink_to",
"(",
"dest",
")"
] |
Create a symlink. Used for model shortcut links.
orig (unicode / Path): The origin path.
dest (unicode / Path): The destination path of the symlink.
|
[
"Create",
"a",
"symlink",
".",
"Used",
"for",
"model",
"shortcut",
"links",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/compat.py#L86-L99
|
21,209
|
explosion/spaCy
|
spacy/compat.py
|
symlink_remove
|
def symlink_remove(link):
"""Remove a symlink. Used for model shortcut links.
link (unicode / Path): The path to the symlink.
"""
# https://stackoverflow.com/q/26554135/6400719
if os.path.isdir(path2str(link)) and is_windows:
# this should only be on Py2.7 and windows
os.rmdir(path2str(link))
else:
os.unlink(path2str(link))
|
python
|
def symlink_remove(link):
"""Remove a symlink. Used for model shortcut links.
link (unicode / Path): The path to the symlink.
"""
# https://stackoverflow.com/q/26554135/6400719
if os.path.isdir(path2str(link)) and is_windows:
# this should only be on Py2.7 and windows
os.rmdir(path2str(link))
else:
os.unlink(path2str(link))
|
[
"def",
"symlink_remove",
"(",
"link",
")",
":",
"# https://stackoverflow.com/q/26554135/6400719",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path2str",
"(",
"link",
")",
")",
"and",
"is_windows",
":",
"# this should only be on Py2.7 and windows",
"os",
".",
"rmdir",
"(",
"path2str",
"(",
"link",
")",
")",
"else",
":",
"os",
".",
"unlink",
"(",
"path2str",
"(",
"link",
")",
")"
] |
Remove a symlink. Used for model shortcut links.
link (unicode / Path): The path to the symlink.
|
[
"Remove",
"a",
"symlink",
".",
"Used",
"for",
"model",
"shortcut",
"links",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/compat.py#L102-L112
|
21,210
|
explosion/spaCy
|
spacy/compat.py
|
is_config
|
def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
"""Check if a specific configuration of Python version and operating system
matches the user's setup. Mostly used to display targeted error messages.
python2 (bool): spaCy is executed with Python 2.x.
python3 (bool): spaCy is executed with Python 3.x.
windows (bool): spaCy is executed on Windows.
linux (bool): spaCy is executed on Linux.
osx (bool): spaCy is executed on OS X or macOS.
RETURNS (bool): Whether the configuration matches the user's platform.
DOCS: https://spacy.io/api/top-level#compat.is_config
"""
return (
python2 in (None, is_python2)
and python3 in (None, is_python3)
and windows in (None, is_windows)
and linux in (None, is_linux)
and osx in (None, is_osx)
)
|
python
|
def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
"""Check if a specific configuration of Python version and operating system
matches the user's setup. Mostly used to display targeted error messages.
python2 (bool): spaCy is executed with Python 2.x.
python3 (bool): spaCy is executed with Python 3.x.
windows (bool): spaCy is executed on Windows.
linux (bool): spaCy is executed on Linux.
osx (bool): spaCy is executed on OS X or macOS.
RETURNS (bool): Whether the configuration matches the user's platform.
DOCS: https://spacy.io/api/top-level#compat.is_config
"""
return (
python2 in (None, is_python2)
and python3 in (None, is_python3)
and windows in (None, is_windows)
and linux in (None, is_linux)
and osx in (None, is_osx)
)
|
[
"def",
"is_config",
"(",
"python2",
"=",
"None",
",",
"python3",
"=",
"None",
",",
"windows",
"=",
"None",
",",
"linux",
"=",
"None",
",",
"osx",
"=",
"None",
")",
":",
"return",
"(",
"python2",
"in",
"(",
"None",
",",
"is_python2",
")",
"and",
"python3",
"in",
"(",
"None",
",",
"is_python3",
")",
"and",
"windows",
"in",
"(",
"None",
",",
"is_windows",
")",
"and",
"linux",
"in",
"(",
"None",
",",
"is_linux",
")",
"and",
"osx",
"in",
"(",
"None",
",",
"is_osx",
")",
")"
] |
Check if a specific configuration of Python version and operating system
matches the user's setup. Mostly used to display targeted error messages.
python2 (bool): spaCy is executed with Python 2.x.
python3 (bool): spaCy is executed with Python 3.x.
windows (bool): spaCy is executed on Windows.
linux (bool): spaCy is executed on Linux.
osx (bool): spaCy is executed on OS X or macOS.
RETURNS (bool): Whether the configuration matches the user's platform.
DOCS: https://spacy.io/api/top-level#compat.is_config
|
[
"Check",
"if",
"a",
"specific",
"configuration",
"of",
"Python",
"version",
"and",
"operating",
"system",
"matches",
"the",
"user",
"s",
"setup",
".",
"Mostly",
"used",
"to",
"display",
"targeted",
"error",
"messages",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/compat.py#L115-L134
|
21,211
|
explosion/spaCy
|
spacy/compat.py
|
import_file
|
def import_file(name, loc):
"""Import module from a file. Used to load models from a directory.
name (unicode): Name of module to load.
loc (unicode / Path): Path to the file.
RETURNS: The loaded module.
"""
loc = path2str(loc)
if is_python_pre_3_5:
import imp
return imp.load_source(name, loc)
else:
import importlib.util
spec = importlib.util.spec_from_file_location(name, str(loc))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
python
|
def import_file(name, loc):
"""Import module from a file. Used to load models from a directory.
name (unicode): Name of module to load.
loc (unicode / Path): Path to the file.
RETURNS: The loaded module.
"""
loc = path2str(loc)
if is_python_pre_3_5:
import imp
return imp.load_source(name, loc)
else:
import importlib.util
spec = importlib.util.spec_from_file_location(name, str(loc))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
[
"def",
"import_file",
"(",
"name",
",",
"loc",
")",
":",
"loc",
"=",
"path2str",
"(",
"loc",
")",
"if",
"is_python_pre_3_5",
":",
"import",
"imp",
"return",
"imp",
".",
"load_source",
"(",
"name",
",",
"loc",
")",
"else",
":",
"import",
"importlib",
".",
"util",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"name",
",",
"str",
"(",
"loc",
")",
")",
"module",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"module",
")",
"return",
"module"
] |
Import module from a file. Used to load models from a directory.
name (unicode): Name of module to load.
loc (unicode / Path): Path to the file.
RETURNS: The loaded module.
|
[
"Import",
"module",
"from",
"a",
"file",
".",
"Used",
"to",
"load",
"models",
"from",
"a",
"directory",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/compat.py#L137-L155
|
21,212
|
explosion/spaCy
|
spacy/util.py
|
get_lang_class
|
def get_lang_class(lang):
"""Import and load a Language class.
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (Language): Language class.
"""
global LANGUAGES
# Check if an entry point is exposed for the language code
entry_point = get_entry_point("spacy_languages", lang)
if entry_point is not None:
LANGUAGES[lang] = entry_point
return entry_point
if lang not in LANGUAGES:
try:
module = importlib.import_module(".lang.%s" % lang, "spacy")
except ImportError as err:
raise ImportError(Errors.E048.format(lang=lang, err=err))
LANGUAGES[lang] = getattr(module, module.__all__[0])
return LANGUAGES[lang]
|
python
|
def get_lang_class(lang):
"""Import and load a Language class.
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (Language): Language class.
"""
global LANGUAGES
# Check if an entry point is exposed for the language code
entry_point = get_entry_point("spacy_languages", lang)
if entry_point is not None:
LANGUAGES[lang] = entry_point
return entry_point
if lang not in LANGUAGES:
try:
module = importlib.import_module(".lang.%s" % lang, "spacy")
except ImportError as err:
raise ImportError(Errors.E048.format(lang=lang, err=err))
LANGUAGES[lang] = getattr(module, module.__all__[0])
return LANGUAGES[lang]
|
[
"def",
"get_lang_class",
"(",
"lang",
")",
":",
"global",
"LANGUAGES",
"# Check if an entry point is exposed for the language code",
"entry_point",
"=",
"get_entry_point",
"(",
"\"spacy_languages\"",
",",
"lang",
")",
"if",
"entry_point",
"is",
"not",
"None",
":",
"LANGUAGES",
"[",
"lang",
"]",
"=",
"entry_point",
"return",
"entry_point",
"if",
"lang",
"not",
"in",
"LANGUAGES",
":",
"try",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"\".lang.%s\"",
"%",
"lang",
",",
"\"spacy\"",
")",
"except",
"ImportError",
"as",
"err",
":",
"raise",
"ImportError",
"(",
"Errors",
".",
"E048",
".",
"format",
"(",
"lang",
"=",
"lang",
",",
"err",
"=",
"err",
")",
")",
"LANGUAGES",
"[",
"lang",
"]",
"=",
"getattr",
"(",
"module",
",",
"module",
".",
"__all__",
"[",
"0",
"]",
")",
"return",
"LANGUAGES",
"[",
"lang",
"]"
] |
Import and load a Language class.
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (Language): Language class.
|
[
"Import",
"and",
"load",
"a",
"Language",
"class",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L53-L71
|
21,213
|
explosion/spaCy
|
spacy/util.py
|
load_model
|
def load_model(name, **overrides):
"""Load a model from a shortcut link, package or data path.
name (unicode): Package name, shortcut link or model path.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with the loaded model.
"""
data_path = get_data_path()
if not data_path or not data_path.exists():
raise IOError(Errors.E049.format(path=path2str(data_path)))
if isinstance(name, basestring_): # in data dir / shortcut
if name in set([d.name for d in data_path.iterdir()]):
return load_model_from_link(name, **overrides)
if is_package(name): # installed as package
return load_model_from_package(name, **overrides)
if Path(name).exists(): # path to model data directory
return load_model_from_path(Path(name), **overrides)
elif hasattr(name, "exists"): # Path or Path-like to model data
return load_model_from_path(name, **overrides)
raise IOError(Errors.E050.format(name=name))
|
python
|
def load_model(name, **overrides):
"""Load a model from a shortcut link, package or data path.
name (unicode): Package name, shortcut link or model path.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with the loaded model.
"""
data_path = get_data_path()
if not data_path or not data_path.exists():
raise IOError(Errors.E049.format(path=path2str(data_path)))
if isinstance(name, basestring_): # in data dir / shortcut
if name in set([d.name for d in data_path.iterdir()]):
return load_model_from_link(name, **overrides)
if is_package(name): # installed as package
return load_model_from_package(name, **overrides)
if Path(name).exists(): # path to model data directory
return load_model_from_path(Path(name), **overrides)
elif hasattr(name, "exists"): # Path or Path-like to model data
return load_model_from_path(name, **overrides)
raise IOError(Errors.E050.format(name=name))
|
[
"def",
"load_model",
"(",
"name",
",",
"*",
"*",
"overrides",
")",
":",
"data_path",
"=",
"get_data_path",
"(",
")",
"if",
"not",
"data_path",
"or",
"not",
"data_path",
".",
"exists",
"(",
")",
":",
"raise",
"IOError",
"(",
"Errors",
".",
"E049",
".",
"format",
"(",
"path",
"=",
"path2str",
"(",
"data_path",
")",
")",
")",
"if",
"isinstance",
"(",
"name",
",",
"basestring_",
")",
":",
"# in data dir / shortcut",
"if",
"name",
"in",
"set",
"(",
"[",
"d",
".",
"name",
"for",
"d",
"in",
"data_path",
".",
"iterdir",
"(",
")",
"]",
")",
":",
"return",
"load_model_from_link",
"(",
"name",
",",
"*",
"*",
"overrides",
")",
"if",
"is_package",
"(",
"name",
")",
":",
"# installed as package",
"return",
"load_model_from_package",
"(",
"name",
",",
"*",
"*",
"overrides",
")",
"if",
"Path",
"(",
"name",
")",
".",
"exists",
"(",
")",
":",
"# path to model data directory",
"return",
"load_model_from_path",
"(",
"Path",
"(",
"name",
")",
",",
"*",
"*",
"overrides",
")",
"elif",
"hasattr",
"(",
"name",
",",
"\"exists\"",
")",
":",
"# Path or Path-like to model data",
"return",
"load_model_from_path",
"(",
"name",
",",
"*",
"*",
"overrides",
")",
"raise",
"IOError",
"(",
"Errors",
".",
"E050",
".",
"format",
"(",
"name",
"=",
"name",
")",
")"
] |
Load a model from a shortcut link, package or data path.
name (unicode): Package name, shortcut link or model path.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with the loaded model.
|
[
"Load",
"a",
"model",
"from",
"a",
"shortcut",
"link",
"package",
"or",
"data",
"path",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L117-L136
|
21,214
|
explosion/spaCy
|
spacy/util.py
|
load_model_from_link
|
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
path = get_data_path() / name / "__init__.py"
try:
cls = import_file(name, path)
except AttributeError:
raise IOError(Errors.E051.format(name=name))
return cls.load(**overrides)
|
python
|
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
path = get_data_path() / name / "__init__.py"
try:
cls = import_file(name, path)
except AttributeError:
raise IOError(Errors.E051.format(name=name))
return cls.load(**overrides)
|
[
"def",
"load_model_from_link",
"(",
"name",
",",
"*",
"*",
"overrides",
")",
":",
"path",
"=",
"get_data_path",
"(",
")",
"/",
"name",
"/",
"\"__init__.py\"",
"try",
":",
"cls",
"=",
"import_file",
"(",
"name",
",",
"path",
")",
"except",
"AttributeError",
":",
"raise",
"IOError",
"(",
"Errors",
".",
"E051",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"return",
"cls",
".",
"load",
"(",
"*",
"*",
"overrides",
")"
] |
Load a model from a shortcut link, or directory in spaCy data path.
|
[
"Load",
"a",
"model",
"from",
"a",
"shortcut",
"link",
"or",
"directory",
"in",
"spaCy",
"data",
"path",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L139-L146
|
21,215
|
explosion/spaCy
|
spacy/util.py
|
load_model_from_package
|
def load_model_from_package(name, **overrides):
"""Load a model from an installed package."""
cls = importlib.import_module(name)
return cls.load(**overrides)
|
python
|
def load_model_from_package(name, **overrides):
"""Load a model from an installed package."""
cls = importlib.import_module(name)
return cls.load(**overrides)
|
[
"def",
"load_model_from_package",
"(",
"name",
",",
"*",
"*",
"overrides",
")",
":",
"cls",
"=",
"importlib",
".",
"import_module",
"(",
"name",
")",
"return",
"cls",
".",
"load",
"(",
"*",
"*",
"overrides",
")"
] |
Load a model from an installed package.
|
[
"Load",
"a",
"model",
"from",
"an",
"installed",
"package",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L149-L152
|
21,216
|
explosion/spaCy
|
spacy/util.py
|
get_model_meta
|
def get_model_meta(path):
"""Get model meta.json from a directory path and validate its contents.
path (unicode or Path): Path to model directory.
RETURNS (dict): The model's meta data.
"""
model_path = ensure_path(path)
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(model_path)))
meta_path = model_path / "meta.json"
if not meta_path.is_file():
raise IOError(Errors.E053.format(path=meta_path))
meta = srsly.read_json(meta_path)
for setting in ["lang", "name", "version"]:
if setting not in meta or not meta[setting]:
raise ValueError(Errors.E054.format(setting=setting))
return meta
|
python
|
def get_model_meta(path):
"""Get model meta.json from a directory path and validate its contents.
path (unicode or Path): Path to model directory.
RETURNS (dict): The model's meta data.
"""
model_path = ensure_path(path)
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(model_path)))
meta_path = model_path / "meta.json"
if not meta_path.is_file():
raise IOError(Errors.E053.format(path=meta_path))
meta = srsly.read_json(meta_path)
for setting in ["lang", "name", "version"]:
if setting not in meta or not meta[setting]:
raise ValueError(Errors.E054.format(setting=setting))
return meta
|
[
"def",
"get_model_meta",
"(",
"path",
")",
":",
"model_path",
"=",
"ensure_path",
"(",
"path",
")",
"if",
"not",
"model_path",
".",
"exists",
"(",
")",
":",
"raise",
"IOError",
"(",
"Errors",
".",
"E052",
".",
"format",
"(",
"path",
"=",
"path2str",
"(",
"model_path",
")",
")",
")",
"meta_path",
"=",
"model_path",
"/",
"\"meta.json\"",
"if",
"not",
"meta_path",
".",
"is_file",
"(",
")",
":",
"raise",
"IOError",
"(",
"Errors",
".",
"E053",
".",
"format",
"(",
"path",
"=",
"meta_path",
")",
")",
"meta",
"=",
"srsly",
".",
"read_json",
"(",
"meta_path",
")",
"for",
"setting",
"in",
"[",
"\"lang\"",
",",
"\"name\"",
",",
"\"version\"",
"]",
":",
"if",
"setting",
"not",
"in",
"meta",
"or",
"not",
"meta",
"[",
"setting",
"]",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E054",
".",
"format",
"(",
"setting",
"=",
"setting",
")",
")",
"return",
"meta"
] |
Get model meta.json from a directory path and validate its contents.
path (unicode or Path): Path to model directory.
RETURNS (dict): The model's meta data.
|
[
"Get",
"model",
"meta",
".",
"json",
"from",
"a",
"directory",
"path",
"and",
"validate",
"its",
"contents",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L193-L209
|
21,217
|
explosion/spaCy
|
spacy/util.py
|
get_package_path
|
def get_package_path(name):
"""Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
"""
name = name.lower() # use lowercase version to be safe
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(name)
return Path(pkg.__file__).parent
|
python
|
def get_package_path(name):
"""Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
"""
name = name.lower() # use lowercase version to be safe
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(name)
return Path(pkg.__file__).parent
|
[
"def",
"get_package_path",
"(",
"name",
")",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"# use lowercase version to be safe",
"# Here we're importing the module just to find it. This is worryingly",
"# indirect, but it's otherwise very difficult to find the package.",
"pkg",
"=",
"importlib",
".",
"import_module",
"(",
"name",
")",
"return",
"Path",
"(",
"pkg",
".",
"__file__",
")",
".",
"parent"
] |
Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
|
[
"Get",
"the",
"path",
"to",
"an",
"installed",
"package",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L226-L236
|
21,218
|
explosion/spaCy
|
spacy/util.py
|
get_entry_point
|
def get_entry_point(key, value):
"""Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
"""
for entry_point in pkg_resources.iter_entry_points(key):
if entry_point.name == value:
return entry_point.load()
|
python
|
def get_entry_point(key, value):
"""Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
"""
for entry_point in pkg_resources.iter_entry_points(key):
if entry_point.name == value:
return entry_point.load()
|
[
"def",
"get_entry_point",
"(",
"key",
",",
"value",
")",
":",
"for",
"entry_point",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"key",
")",
":",
"if",
"entry_point",
".",
"name",
"==",
"value",
":",
"return",
"entry_point",
".",
"load",
"(",
")"
] |
Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
|
[
"Check",
"if",
"registered",
"entry",
"point",
"is",
"available",
"for",
"a",
"given",
"name",
"and",
"load",
"it",
".",
"Otherwise",
"return",
"None",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L252-L262
|
21,219
|
explosion/spaCy
|
spacy/util.py
|
compile_suffix_regex
|
def compile_suffix_regex(entries):
"""Compile a sequence of suffix rules into a regex object.
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
"""
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
return re.compile(expression)
|
python
|
def compile_suffix_regex(entries):
"""Compile a sequence of suffix rules into a regex object.
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
"""
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
return re.compile(expression)
|
[
"def",
"compile_suffix_regex",
"(",
"entries",
")",
":",
"expression",
"=",
"\"|\"",
".",
"join",
"(",
"[",
"piece",
"+",
"\"$\"",
"for",
"piece",
"in",
"entries",
"if",
"piece",
".",
"strip",
"(",
")",
"]",
")",
"return",
"re",
".",
"compile",
"(",
"expression",
")"
] |
Compile a sequence of suffix rules into a regex object.
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
|
[
"Compile",
"a",
"sequence",
"of",
"suffix",
"rules",
"into",
"a",
"regex",
"object",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L346-L353
|
21,220
|
explosion/spaCy
|
spacy/util.py
|
compile_infix_regex
|
def compile_infix_regex(entries):
"""Compile a sequence of infix rules into a regex object.
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
"""
expression = "|".join([piece for piece in entries if piece.strip()])
return re.compile(expression)
|
python
|
def compile_infix_regex(entries):
"""Compile a sequence of infix rules into a regex object.
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
"""
expression = "|".join([piece for piece in entries if piece.strip()])
return re.compile(expression)
|
[
"def",
"compile_infix_regex",
"(",
"entries",
")",
":",
"expression",
"=",
"\"|\"",
".",
"join",
"(",
"[",
"piece",
"for",
"piece",
"in",
"entries",
"if",
"piece",
".",
"strip",
"(",
")",
"]",
")",
"return",
"re",
".",
"compile",
"(",
"expression",
")"
] |
Compile a sequence of infix rules into a regex object.
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
|
[
"Compile",
"a",
"sequence",
"of",
"infix",
"rules",
"into",
"a",
"regex",
"object",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L356-L363
|
21,221
|
explosion/spaCy
|
spacy/util.py
|
expand_exc
|
def expand_exc(excs, search, replace):
"""Find string in tokenizer exceptions, duplicate entry and replace string.
For example, to add additional versions with typographic apostrophes.
excs (dict): Tokenizer exceptions.
search (unicode): String to find and replace.
replace (unicode): Replacement.
RETURNS (dict): Combined tokenizer exceptions.
"""
def _fix_token(token, search, replace):
fixed = dict(token)
fixed[ORTH] = fixed[ORTH].replace(search, replace)
return fixed
new_excs = dict(excs)
for token_string, tokens in excs.items():
if search in token_string:
new_key = token_string.replace(search, replace)
new_value = [_fix_token(t, search, replace) for t in tokens]
new_excs[new_key] = new_value
return new_excs
|
python
|
def expand_exc(excs, search, replace):
"""Find string in tokenizer exceptions, duplicate entry and replace string.
For example, to add additional versions with typographic apostrophes.
excs (dict): Tokenizer exceptions.
search (unicode): String to find and replace.
replace (unicode): Replacement.
RETURNS (dict): Combined tokenizer exceptions.
"""
def _fix_token(token, search, replace):
fixed = dict(token)
fixed[ORTH] = fixed[ORTH].replace(search, replace)
return fixed
new_excs = dict(excs)
for token_string, tokens in excs.items():
if search in token_string:
new_key = token_string.replace(search, replace)
new_value = [_fix_token(t, search, replace) for t in tokens]
new_excs[new_key] = new_value
return new_excs
|
[
"def",
"expand_exc",
"(",
"excs",
",",
"search",
",",
"replace",
")",
":",
"def",
"_fix_token",
"(",
"token",
",",
"search",
",",
"replace",
")",
":",
"fixed",
"=",
"dict",
"(",
"token",
")",
"fixed",
"[",
"ORTH",
"]",
"=",
"fixed",
"[",
"ORTH",
"]",
".",
"replace",
"(",
"search",
",",
"replace",
")",
"return",
"fixed",
"new_excs",
"=",
"dict",
"(",
"excs",
")",
"for",
"token_string",
",",
"tokens",
"in",
"excs",
".",
"items",
"(",
")",
":",
"if",
"search",
"in",
"token_string",
":",
"new_key",
"=",
"token_string",
".",
"replace",
"(",
"search",
",",
"replace",
")",
"new_value",
"=",
"[",
"_fix_token",
"(",
"t",
",",
"search",
",",
"replace",
")",
"for",
"t",
"in",
"tokens",
"]",
"new_excs",
"[",
"new_key",
"]",
"=",
"new_value",
"return",
"new_excs"
] |
Find string in tokenizer exceptions, duplicate entry and replace string.
For example, to add additional versions with typographic apostrophes.
excs (dict): Tokenizer exceptions.
search (unicode): String to find and replace.
replace (unicode): Replacement.
RETURNS (dict): Combined tokenizer exceptions.
|
[
"Find",
"string",
"in",
"tokenizer",
"exceptions",
"duplicate",
"entry",
"and",
"replace",
"string",
".",
"For",
"example",
"to",
"add",
"additional",
"versions",
"with",
"typographic",
"apostrophes",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L406-L427
|
21,222
|
explosion/spaCy
|
spacy/util.py
|
minibatch
|
def minibatch(items, size=8):
"""Iterate over batches of items. `size` may be an iterator,
so that batch-size can vary on each step.
"""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = list(itertools.islice(items, int(batch_size)))
if len(batch) == 0:
break
yield list(batch)
|
python
|
def minibatch(items, size=8):
"""Iterate over batches of items. `size` may be an iterator,
so that batch-size can vary on each step.
"""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = list(itertools.islice(items, int(batch_size)))
if len(batch) == 0:
break
yield list(batch)
|
[
"def",
"minibatch",
"(",
"items",
",",
"size",
"=",
"8",
")",
":",
"if",
"isinstance",
"(",
"size",
",",
"int",
")",
":",
"size_",
"=",
"itertools",
".",
"repeat",
"(",
"size",
")",
"else",
":",
"size_",
"=",
"size",
"items",
"=",
"iter",
"(",
"items",
")",
"while",
"True",
":",
"batch_size",
"=",
"next",
"(",
"size_",
")",
"batch",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"items",
",",
"int",
"(",
"batch_size",
")",
")",
")",
"if",
"len",
"(",
"batch",
")",
"==",
"0",
":",
"break",
"yield",
"list",
"(",
"batch",
")"
] |
Iterate over batches of items. `size` may be an iterator,
so that batch-size can vary on each step.
|
[
"Iterate",
"over",
"batches",
"of",
"items",
".",
"size",
"may",
"be",
"an",
"iterator",
"so",
"that",
"batch",
"-",
"size",
"can",
"vary",
"on",
"each",
"step",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L446-L460
|
21,223
|
explosion/spaCy
|
spacy/util.py
|
minibatch_by_words
|
def minibatch_by_words(items, size, tuples=True, count_words=len):
"""Create minibatches of a given number of words."""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = []
while batch_size >= 0:
try:
if tuples:
doc, gold = next(items)
else:
doc = next(items)
except StopIteration:
if batch:
yield batch
return
batch_size -= count_words(doc)
if tuples:
batch.append((doc, gold))
else:
batch.append(doc)
if batch:
yield batch
|
python
|
def minibatch_by_words(items, size, tuples=True, count_words=len):
"""Create minibatches of a given number of words."""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = []
while batch_size >= 0:
try:
if tuples:
doc, gold = next(items)
else:
doc = next(items)
except StopIteration:
if batch:
yield batch
return
batch_size -= count_words(doc)
if tuples:
batch.append((doc, gold))
else:
batch.append(doc)
if batch:
yield batch
|
[
"def",
"minibatch_by_words",
"(",
"items",
",",
"size",
",",
"tuples",
"=",
"True",
",",
"count_words",
"=",
"len",
")",
":",
"if",
"isinstance",
"(",
"size",
",",
"int",
")",
":",
"size_",
"=",
"itertools",
".",
"repeat",
"(",
"size",
")",
"else",
":",
"size_",
"=",
"size",
"items",
"=",
"iter",
"(",
"items",
")",
"while",
"True",
":",
"batch_size",
"=",
"next",
"(",
"size_",
")",
"batch",
"=",
"[",
"]",
"while",
"batch_size",
">=",
"0",
":",
"try",
":",
"if",
"tuples",
":",
"doc",
",",
"gold",
"=",
"next",
"(",
"items",
")",
"else",
":",
"doc",
"=",
"next",
"(",
"items",
")",
"except",
"StopIteration",
":",
"if",
"batch",
":",
"yield",
"batch",
"return",
"batch_size",
"-=",
"count_words",
"(",
"doc",
")",
"if",
"tuples",
":",
"batch",
".",
"append",
"(",
"(",
"doc",
",",
"gold",
")",
")",
"else",
":",
"batch",
".",
"append",
"(",
"doc",
")",
"if",
"batch",
":",
"yield",
"batch"
] |
Create minibatches of a given number of words.
|
[
"Create",
"minibatches",
"of",
"a",
"given",
"number",
"of",
"words",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L516-L542
|
21,224
|
explosion/spaCy
|
spacy/pipeline/entityruler.py
|
EntityRuler.labels
|
def labels(self):
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
"""
all_labels = set(self.token_patterns.keys())
all_labels.update(self.phrase_patterns.keys())
return tuple(all_labels)
|
python
|
def labels(self):
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
"""
all_labels = set(self.token_patterns.keys())
all_labels.update(self.phrase_patterns.keys())
return tuple(all_labels)
|
[
"def",
"labels",
"(",
"self",
")",
":",
"all_labels",
"=",
"set",
"(",
"self",
".",
"token_patterns",
".",
"keys",
"(",
")",
")",
"all_labels",
".",
"update",
"(",
"self",
".",
"phrase_patterns",
".",
"keys",
"(",
")",
")",
"return",
"tuple",
"(",
"all_labels",
")"
] |
All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
|
[
"All",
"labels",
"present",
"in",
"the",
"match",
"patterns",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L96-L105
|
21,225
|
explosion/spaCy
|
spacy/pipeline/entityruler.py
|
EntityRuler.patterns
|
def patterns(self):
"""Get all patterns that were added to the entity ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/entityruler#patterns
"""
all_patterns = []
for label, patterns in self.token_patterns.items():
for pattern in patterns:
all_patterns.append({"label": label, "pattern": pattern})
for label, patterns in self.phrase_patterns.items():
for pattern in patterns:
all_patterns.append({"label": label, "pattern": pattern.text})
return all_patterns
|
python
|
def patterns(self):
"""Get all patterns that were added to the entity ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/entityruler#patterns
"""
all_patterns = []
for label, patterns in self.token_patterns.items():
for pattern in patterns:
all_patterns.append({"label": label, "pattern": pattern})
for label, patterns in self.phrase_patterns.items():
for pattern in patterns:
all_patterns.append({"label": label, "pattern": pattern.text})
return all_patterns
|
[
"def",
"patterns",
"(",
"self",
")",
":",
"all_patterns",
"=",
"[",
"]",
"for",
"label",
",",
"patterns",
"in",
"self",
".",
"token_patterns",
".",
"items",
"(",
")",
":",
"for",
"pattern",
"in",
"patterns",
":",
"all_patterns",
".",
"append",
"(",
"{",
"\"label\"",
":",
"label",
",",
"\"pattern\"",
":",
"pattern",
"}",
")",
"for",
"label",
",",
"patterns",
"in",
"self",
".",
"phrase_patterns",
".",
"items",
"(",
")",
":",
"for",
"pattern",
"in",
"patterns",
":",
"all_patterns",
".",
"append",
"(",
"{",
"\"label\"",
":",
"label",
",",
"\"pattern\"",
":",
"pattern",
".",
"text",
"}",
")",
"return",
"all_patterns"
] |
Get all patterns that were added to the entity ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/entityruler#patterns
|
[
"Get",
"all",
"patterns",
"that",
"were",
"added",
"to",
"the",
"entity",
"ruler",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L108-L122
|
21,226
|
explosion/spaCy
|
spacy/pipeline/entityruler.py
|
EntityRuler.from_bytes
|
def from_bytes(self, patterns_bytes, **kwargs):
"""Load the entity ruler from a bytestring.
patterns_bytes (bytes): The bytestring to load.
**kwargs: Other config paramters, mostly for consistency.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_bytes
"""
patterns = srsly.msgpack_loads(patterns_bytes)
self.add_patterns(patterns)
return self
|
python
|
def from_bytes(self, patterns_bytes, **kwargs):
"""Load the entity ruler from a bytestring.
patterns_bytes (bytes): The bytestring to load.
**kwargs: Other config paramters, mostly for consistency.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_bytes
"""
patterns = srsly.msgpack_loads(patterns_bytes)
self.add_patterns(patterns)
return self
|
[
"def",
"from_bytes",
"(",
"self",
",",
"patterns_bytes",
",",
"*",
"*",
"kwargs",
")",
":",
"patterns",
"=",
"srsly",
".",
"msgpack_loads",
"(",
"patterns_bytes",
")",
"self",
".",
"add_patterns",
"(",
"patterns",
")",
"return",
"self"
] |
Load the entity ruler from a bytestring.
patterns_bytes (bytes): The bytestring to load.
**kwargs: Other config paramters, mostly for consistency.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_bytes
|
[
"Load",
"the",
"entity",
"ruler",
"from",
"a",
"bytestring",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L148-L159
|
21,227
|
explosion/spaCy
|
bin/ud/ud_train.py
|
golds_to_gold_tuples
|
def golds_to_gold_tuples(docs, golds):
"""Get out the annoying 'tuples' format used by begin_training, given the
GoldParse objects."""
tuples = []
for doc, gold in zip(docs, golds):
text = doc.text
ids, words, tags, heads, labels, iob = zip(*gold.orig_annot)
sents = [((ids, words, tags, heads, labels, iob), [])]
tuples.append((text, sents))
return tuples
|
python
|
def golds_to_gold_tuples(docs, golds):
"""Get out the annoying 'tuples' format used by begin_training, given the
GoldParse objects."""
tuples = []
for doc, gold in zip(docs, golds):
text = doc.text
ids, words, tags, heads, labels, iob = zip(*gold.orig_annot)
sents = [((ids, words, tags, heads, labels, iob), [])]
tuples.append((text, sents))
return tuples
|
[
"def",
"golds_to_gold_tuples",
"(",
"docs",
",",
"golds",
")",
":",
"tuples",
"=",
"[",
"]",
"for",
"doc",
",",
"gold",
"in",
"zip",
"(",
"docs",
",",
"golds",
")",
":",
"text",
"=",
"doc",
".",
"text",
"ids",
",",
"words",
",",
"tags",
",",
"heads",
",",
"labels",
",",
"iob",
"=",
"zip",
"(",
"*",
"gold",
".",
"orig_annot",
")",
"sents",
"=",
"[",
"(",
"(",
"ids",
",",
"words",
",",
"tags",
",",
"heads",
",",
"labels",
",",
"iob",
")",
",",
"[",
"]",
")",
"]",
"tuples",
".",
"append",
"(",
"(",
"text",
",",
"sents",
")",
")",
"return",
"tuples"
] |
Get out the annoying 'tuples' format used by begin_training, given the
GoldParse objects.
|
[
"Get",
"out",
"the",
"annoying",
"tuples",
"format",
"used",
"by",
"begin_training",
"given",
"the",
"GoldParse",
"objects",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/ud_train.py#L173-L182
|
21,228
|
explosion/spaCy
|
spacy/tokens/_serialize.py
|
merge_bytes
|
def merge_bytes(binder_strings):
"""Concatenate multiple serialized binders into one byte string."""
output = None
for byte_string in binder_strings:
binder = Binder().from_bytes(byte_string)
if output is None:
output = binder
else:
output.merge(binder)
return output.to_bytes()
|
python
|
def merge_bytes(binder_strings):
"""Concatenate multiple serialized binders into one byte string."""
output = None
for byte_string in binder_strings:
binder = Binder().from_bytes(byte_string)
if output is None:
output = binder
else:
output.merge(binder)
return output.to_bytes()
|
[
"def",
"merge_bytes",
"(",
"binder_strings",
")",
":",
"output",
"=",
"None",
"for",
"byte_string",
"in",
"binder_strings",
":",
"binder",
"=",
"Binder",
"(",
")",
".",
"from_bytes",
"(",
"byte_string",
")",
"if",
"output",
"is",
"None",
":",
"output",
"=",
"binder",
"else",
":",
"output",
".",
"merge",
"(",
"binder",
")",
"return",
"output",
".",
"to_bytes",
"(",
")"
] |
Concatenate multiple serialized binders into one byte string.
|
[
"Concatenate",
"multiple",
"serialized",
"binders",
"into",
"one",
"byte",
"string",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L97-L106
|
21,229
|
explosion/spaCy
|
spacy/tokens/_serialize.py
|
Binder.add
|
def add(self, doc):
"""Add a doc's annotations to the binder for serialization."""
array = doc.to_array(self.attrs)
if len(array.shape) == 1:
array = array.reshape((array.shape[0], 1))
self.tokens.append(array)
spaces = doc.to_array(SPACY)
assert array.shape[0] == spaces.shape[0]
spaces = spaces.reshape((spaces.shape[0], 1))
self.spaces.append(numpy.asarray(spaces, dtype=bool))
self.strings.update(w.text for w in doc)
|
python
|
def add(self, doc):
"""Add a doc's annotations to the binder for serialization."""
array = doc.to_array(self.attrs)
if len(array.shape) == 1:
array = array.reshape((array.shape[0], 1))
self.tokens.append(array)
spaces = doc.to_array(SPACY)
assert array.shape[0] == spaces.shape[0]
spaces = spaces.reshape((spaces.shape[0], 1))
self.spaces.append(numpy.asarray(spaces, dtype=bool))
self.strings.update(w.text for w in doc)
|
[
"def",
"add",
"(",
"self",
",",
"doc",
")",
":",
"array",
"=",
"doc",
".",
"to_array",
"(",
"self",
".",
"attrs",
")",
"if",
"len",
"(",
"array",
".",
"shape",
")",
"==",
"1",
":",
"array",
"=",
"array",
".",
"reshape",
"(",
"(",
"array",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"self",
".",
"tokens",
".",
"append",
"(",
"array",
")",
"spaces",
"=",
"doc",
".",
"to_array",
"(",
"SPACY",
")",
"assert",
"array",
".",
"shape",
"[",
"0",
"]",
"==",
"spaces",
".",
"shape",
"[",
"0",
"]",
"spaces",
"=",
"spaces",
".",
"reshape",
"(",
"(",
"spaces",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"self",
".",
"spaces",
".",
"append",
"(",
"numpy",
".",
"asarray",
"(",
"spaces",
",",
"dtype",
"=",
"bool",
")",
")",
"self",
".",
"strings",
".",
"update",
"(",
"w",
".",
"text",
"for",
"w",
"in",
"doc",
")"
] |
Add a doc's annotations to the binder for serialization.
|
[
"Add",
"a",
"doc",
"s",
"annotations",
"to",
"the",
"binder",
"for",
"serialization",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L35-L45
|
21,230
|
explosion/spaCy
|
spacy/tokens/_serialize.py
|
Binder.get_docs
|
def get_docs(self, vocab):
"""Recover Doc objects from the annotations, using the given vocab."""
for string in self.strings:
vocab[string]
orth_col = self.attrs.index(ORTH)
for tokens, spaces in zip(self.tokens, self.spaces):
words = [vocab.strings[orth] for orth in tokens[:, orth_col]]
doc = Doc(vocab, words=words, spaces=spaces)
doc = doc.from_array(self.attrs, tokens)
yield doc
|
python
|
def get_docs(self, vocab):
"""Recover Doc objects from the annotations, using the given vocab."""
for string in self.strings:
vocab[string]
orth_col = self.attrs.index(ORTH)
for tokens, spaces in zip(self.tokens, self.spaces):
words = [vocab.strings[orth] for orth in tokens[:, orth_col]]
doc = Doc(vocab, words=words, spaces=spaces)
doc = doc.from_array(self.attrs, tokens)
yield doc
|
[
"def",
"get_docs",
"(",
"self",
",",
"vocab",
")",
":",
"for",
"string",
"in",
"self",
".",
"strings",
":",
"vocab",
"[",
"string",
"]",
"orth_col",
"=",
"self",
".",
"attrs",
".",
"index",
"(",
"ORTH",
")",
"for",
"tokens",
",",
"spaces",
"in",
"zip",
"(",
"self",
".",
"tokens",
",",
"self",
".",
"spaces",
")",
":",
"words",
"=",
"[",
"vocab",
".",
"strings",
"[",
"orth",
"]",
"for",
"orth",
"in",
"tokens",
"[",
":",
",",
"orth_col",
"]",
"]",
"doc",
"=",
"Doc",
"(",
"vocab",
",",
"words",
"=",
"words",
",",
"spaces",
"=",
"spaces",
")",
"doc",
"=",
"doc",
".",
"from_array",
"(",
"self",
".",
"attrs",
",",
"tokens",
")",
"yield",
"doc"
] |
Recover Doc objects from the annotations, using the given vocab.
|
[
"Recover",
"Doc",
"objects",
"from",
"the",
"annotations",
"using",
"the",
"given",
"vocab",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L47-L56
|
21,231
|
explosion/spaCy
|
spacy/tokens/_serialize.py
|
Binder.merge
|
def merge(self, other):
"""Extend the annotations of this binder with the annotations from another."""
assert self.attrs == other.attrs
self.tokens.extend(other.tokens)
self.spaces.extend(other.spaces)
self.strings.update(other.strings)
|
python
|
def merge(self, other):
"""Extend the annotations of this binder with the annotations from another."""
assert self.attrs == other.attrs
self.tokens.extend(other.tokens)
self.spaces.extend(other.spaces)
self.strings.update(other.strings)
|
[
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"assert",
"self",
".",
"attrs",
"==",
"other",
".",
"attrs",
"self",
".",
"tokens",
".",
"extend",
"(",
"other",
".",
"tokens",
")",
"self",
".",
"spaces",
".",
"extend",
"(",
"other",
".",
"spaces",
")",
"self",
".",
"strings",
".",
"update",
"(",
"other",
".",
"strings",
")"
] |
Extend the annotations of this binder with the annotations from another.
|
[
"Extend",
"the",
"annotations",
"of",
"this",
"binder",
"with",
"the",
"annotations",
"from",
"another",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L58-L63
|
21,232
|
explosion/spaCy
|
spacy/tokens/_serialize.py
|
Binder.to_bytes
|
def to_bytes(self):
"""Serialize the binder's annotations into a byte string."""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
lengths = [len(tokens) for tokens in self.tokens]
msg = {
"attrs": self.attrs,
"tokens": numpy.vstack(self.tokens).tobytes("C"),
"spaces": numpy.vstack(self.spaces).tobytes("C"),
"lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(self.strings),
}
return gzip.compress(srsly.msgpack_dumps(msg))
|
python
|
def to_bytes(self):
"""Serialize the binder's annotations into a byte string."""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
lengths = [len(tokens) for tokens in self.tokens]
msg = {
"attrs": self.attrs,
"tokens": numpy.vstack(self.tokens).tobytes("C"),
"spaces": numpy.vstack(self.spaces).tobytes("C"),
"lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(self.strings),
}
return gzip.compress(srsly.msgpack_dumps(msg))
|
[
"def",
"to_bytes",
"(",
"self",
")",
":",
"for",
"tokens",
"in",
"self",
".",
"tokens",
":",
"assert",
"len",
"(",
"tokens",
".",
"shape",
")",
"==",
"2",
",",
"tokens",
".",
"shape",
"lengths",
"=",
"[",
"len",
"(",
"tokens",
")",
"for",
"tokens",
"in",
"self",
".",
"tokens",
"]",
"msg",
"=",
"{",
"\"attrs\"",
":",
"self",
".",
"attrs",
",",
"\"tokens\"",
":",
"numpy",
".",
"vstack",
"(",
"self",
".",
"tokens",
")",
".",
"tobytes",
"(",
"\"C\"",
")",
",",
"\"spaces\"",
":",
"numpy",
".",
"vstack",
"(",
"self",
".",
"spaces",
")",
".",
"tobytes",
"(",
"\"C\"",
")",
",",
"\"lengths\"",
":",
"numpy",
".",
"asarray",
"(",
"lengths",
",",
"dtype",
"=",
"\"int32\"",
")",
".",
"tobytes",
"(",
"\"C\"",
")",
",",
"\"strings\"",
":",
"list",
"(",
"self",
".",
"strings",
")",
",",
"}",
"return",
"gzip",
".",
"compress",
"(",
"srsly",
".",
"msgpack_dumps",
"(",
"msg",
")",
")"
] |
Serialize the binder's annotations into a byte string.
|
[
"Serialize",
"the",
"binder",
"s",
"annotations",
"into",
"a",
"byte",
"string",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L65-L77
|
21,233
|
explosion/spaCy
|
spacy/tokens/_serialize.py
|
Binder.from_bytes
|
def from_bytes(self, string):
"""Deserialize the binder's annotations from a byte string."""
msg = srsly.msgpack_loads(gzip.decompress(string))
self.attrs = msg["attrs"]
self.strings = set(msg["strings"])
lengths = numpy.fromstring(msg["lengths"], dtype="int32")
flat_spaces = numpy.fromstring(msg["spaces"], dtype=bool)
flat_tokens = numpy.fromstring(msg["tokens"], dtype="uint64")
shape = (flat_tokens.size // len(self.attrs), len(self.attrs))
flat_tokens = flat_tokens.reshape(shape)
flat_spaces = flat_spaces.reshape((flat_spaces.size, 1))
self.tokens = NumpyOps().unflatten(flat_tokens, lengths)
self.spaces = NumpyOps().unflatten(flat_spaces, lengths)
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
return self
|
python
|
def from_bytes(self, string):
"""Deserialize the binder's annotations from a byte string."""
msg = srsly.msgpack_loads(gzip.decompress(string))
self.attrs = msg["attrs"]
self.strings = set(msg["strings"])
lengths = numpy.fromstring(msg["lengths"], dtype="int32")
flat_spaces = numpy.fromstring(msg["spaces"], dtype=bool)
flat_tokens = numpy.fromstring(msg["tokens"], dtype="uint64")
shape = (flat_tokens.size // len(self.attrs), len(self.attrs))
flat_tokens = flat_tokens.reshape(shape)
flat_spaces = flat_spaces.reshape((flat_spaces.size, 1))
self.tokens = NumpyOps().unflatten(flat_tokens, lengths)
self.spaces = NumpyOps().unflatten(flat_spaces, lengths)
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
return self
|
[
"def",
"from_bytes",
"(",
"self",
",",
"string",
")",
":",
"msg",
"=",
"srsly",
".",
"msgpack_loads",
"(",
"gzip",
".",
"decompress",
"(",
"string",
")",
")",
"self",
".",
"attrs",
"=",
"msg",
"[",
"\"attrs\"",
"]",
"self",
".",
"strings",
"=",
"set",
"(",
"msg",
"[",
"\"strings\"",
"]",
")",
"lengths",
"=",
"numpy",
".",
"fromstring",
"(",
"msg",
"[",
"\"lengths\"",
"]",
",",
"dtype",
"=",
"\"int32\"",
")",
"flat_spaces",
"=",
"numpy",
".",
"fromstring",
"(",
"msg",
"[",
"\"spaces\"",
"]",
",",
"dtype",
"=",
"bool",
")",
"flat_tokens",
"=",
"numpy",
".",
"fromstring",
"(",
"msg",
"[",
"\"tokens\"",
"]",
",",
"dtype",
"=",
"\"uint64\"",
")",
"shape",
"=",
"(",
"flat_tokens",
".",
"size",
"//",
"len",
"(",
"self",
".",
"attrs",
")",
",",
"len",
"(",
"self",
".",
"attrs",
")",
")",
"flat_tokens",
"=",
"flat_tokens",
".",
"reshape",
"(",
"shape",
")",
"flat_spaces",
"=",
"flat_spaces",
".",
"reshape",
"(",
"(",
"flat_spaces",
".",
"size",
",",
"1",
")",
")",
"self",
".",
"tokens",
"=",
"NumpyOps",
"(",
")",
".",
"unflatten",
"(",
"flat_tokens",
",",
"lengths",
")",
"self",
".",
"spaces",
"=",
"NumpyOps",
"(",
")",
".",
"unflatten",
"(",
"flat_spaces",
",",
"lengths",
")",
"for",
"tokens",
"in",
"self",
".",
"tokens",
":",
"assert",
"len",
"(",
"tokens",
".",
"shape",
")",
"==",
"2",
",",
"tokens",
".",
"shape",
"return",
"self"
] |
Deserialize the binder's annotations from a byte string.
|
[
"Deserialize",
"the",
"binder",
"s",
"annotations",
"from",
"a",
"byte",
"string",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L79-L94
|
21,234
|
explosion/spaCy
|
spacy/lang/fr/lemmatizer/lemmatizer.py
|
FrenchLemmatizer.is_base_form
|
def is_base_form(self, univ_pos, morphology=None):
"""
Check whether we're dealing with an uninflected paradigm, so we can
avoid lemmatization entirely.
"""
morphology = {} if morphology is None else morphology
others = [key for key in morphology
if key not in (POS, 'Number', 'POS', 'VerbForm', 'Tense')]
if univ_pos == 'noun' and morphology.get('Number') == 'sing':
return True
elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf':
return True
# This maps 'VBP' to base form -- probably just need 'IS_BASE'
# morphology
elif univ_pos == 'verb' and (morphology.get('VerbForm') == 'fin' and
morphology.get('Tense') == 'pres' and
morphology.get('Number') is None and
not others):
return True
elif univ_pos == 'adj' and morphology.get('Degree') == 'pos':
return True
elif VerbForm_inf in morphology:
return True
elif VerbForm_none in morphology:
return True
elif Number_sing in morphology:
return True
elif Degree_pos in morphology:
return True
else:
return False
|
python
|
def is_base_form(self, univ_pos, morphology=None):
"""
Check whether we're dealing with an uninflected paradigm, so we can
avoid lemmatization entirely.
"""
morphology = {} if morphology is None else morphology
others = [key for key in morphology
if key not in (POS, 'Number', 'POS', 'VerbForm', 'Tense')]
if univ_pos == 'noun' and morphology.get('Number') == 'sing':
return True
elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf':
return True
# This maps 'VBP' to base form -- probably just need 'IS_BASE'
# morphology
elif univ_pos == 'verb' and (morphology.get('VerbForm') == 'fin' and
morphology.get('Tense') == 'pres' and
morphology.get('Number') is None and
not others):
return True
elif univ_pos == 'adj' and morphology.get('Degree') == 'pos':
return True
elif VerbForm_inf in morphology:
return True
elif VerbForm_none in morphology:
return True
elif Number_sing in morphology:
return True
elif Degree_pos in morphology:
return True
else:
return False
|
[
"def",
"is_base_form",
"(",
"self",
",",
"univ_pos",
",",
"morphology",
"=",
"None",
")",
":",
"morphology",
"=",
"{",
"}",
"if",
"morphology",
"is",
"None",
"else",
"morphology",
"others",
"=",
"[",
"key",
"for",
"key",
"in",
"morphology",
"if",
"key",
"not",
"in",
"(",
"POS",
",",
"'Number'",
",",
"'POS'",
",",
"'VerbForm'",
",",
"'Tense'",
")",
"]",
"if",
"univ_pos",
"==",
"'noun'",
"and",
"morphology",
".",
"get",
"(",
"'Number'",
")",
"==",
"'sing'",
":",
"return",
"True",
"elif",
"univ_pos",
"==",
"'verb'",
"and",
"morphology",
".",
"get",
"(",
"'VerbForm'",
")",
"==",
"'inf'",
":",
"return",
"True",
"# This maps 'VBP' to base form -- probably just need 'IS_BASE'",
"# morphology",
"elif",
"univ_pos",
"==",
"'verb'",
"and",
"(",
"morphology",
".",
"get",
"(",
"'VerbForm'",
")",
"==",
"'fin'",
"and",
"morphology",
".",
"get",
"(",
"'Tense'",
")",
"==",
"'pres'",
"and",
"morphology",
".",
"get",
"(",
"'Number'",
")",
"is",
"None",
"and",
"not",
"others",
")",
":",
"return",
"True",
"elif",
"univ_pos",
"==",
"'adj'",
"and",
"morphology",
".",
"get",
"(",
"'Degree'",
")",
"==",
"'pos'",
":",
"return",
"True",
"elif",
"VerbForm_inf",
"in",
"morphology",
":",
"return",
"True",
"elif",
"VerbForm_none",
"in",
"morphology",
":",
"return",
"True",
"elif",
"Number_sing",
"in",
"morphology",
":",
"return",
"True",
"elif",
"Degree_pos",
"in",
"morphology",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check whether we're dealing with an uninflected paradigm, so we can
avoid lemmatization entirely.
|
[
"Check",
"whether",
"we",
"re",
"dealing",
"with",
"an",
"uninflected",
"paradigm",
"so",
"we",
"can",
"avoid",
"lemmatization",
"entirely",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/fr/lemmatizer/lemmatizer.py#L63-L93
|
21,235
|
explosion/spaCy
|
examples/training/train_new_entity_type.py
|
main
|
def main(model=None, new_model_name="animal", output_dir=None, n_iter=30):
"""Set up the pipeline and entity recognizer, and train the new entity."""
random.seed(0)
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# Add entity recognizer to model if it's not in the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner)
# otherwise, get it, so we can add labels to it
else:
ner = nlp.get_pipe("ner")
ner.add_label(LABEL) # add new entity label to entity recognizer
# Adding extraneous labels shouldn't mess anything up
ner.add_label("VEGETABLE")
if model is None:
optimizer = nlp.begin_training()
else:
optimizer = nlp.resume_training()
move_names = list(ner.move_names)
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
sizes = compounding(1.0, 4.0, 1.001)
# batch up the examples using spaCy's minibatch
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
batches = minibatch(TRAIN_DATA, size=sizes)
losses = {}
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)
print("Losses", losses)
# test the trained model
test_text = "Do you like horses?"
doc = nlp(test_text)
print("Entities in '%s'" % test_text)
for ent in doc.ents:
print(ent.label_, ent.text)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.meta["name"] = new_model_name # rename model
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
# Check the classes have loaded back consistently
assert nlp2.get_pipe("ner").move_names == move_names
doc2 = nlp2(test_text)
for ent in doc2.ents:
print(ent.label_, ent.text)
|
python
|
def main(model=None, new_model_name="animal", output_dir=None, n_iter=30):
"""Set up the pipeline and entity recognizer, and train the new entity."""
random.seed(0)
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# Add entity recognizer to model if it's not in the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner)
# otherwise, get it, so we can add labels to it
else:
ner = nlp.get_pipe("ner")
ner.add_label(LABEL) # add new entity label to entity recognizer
# Adding extraneous labels shouldn't mess anything up
ner.add_label("VEGETABLE")
if model is None:
optimizer = nlp.begin_training()
else:
optimizer = nlp.resume_training()
move_names = list(ner.move_names)
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
sizes = compounding(1.0, 4.0, 1.001)
# batch up the examples using spaCy's minibatch
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
batches = minibatch(TRAIN_DATA, size=sizes)
losses = {}
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)
print("Losses", losses)
# test the trained model
test_text = "Do you like horses?"
doc = nlp(test_text)
print("Entities in '%s'" % test_text)
for ent in doc.ents:
print(ent.label_, ent.text)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.meta["name"] = new_model_name # rename model
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
# Check the classes have loaded back consistently
assert nlp2.get_pipe("ner").move_names == move_names
doc2 = nlp2(test_text)
for ent in doc2.ents:
print(ent.label_, ent.text)
|
[
"def",
"main",
"(",
"model",
"=",
"None",
",",
"new_model_name",
"=",
"\"animal\"",
",",
"output_dir",
"=",
"None",
",",
"n_iter",
"=",
"30",
")",
":",
"random",
".",
"seed",
"(",
"0",
")",
"if",
"model",
"is",
"not",
"None",
":",
"nlp",
"=",
"spacy",
".",
"load",
"(",
"model",
")",
"# load existing spaCy model",
"print",
"(",
"\"Loaded model '%s'\"",
"%",
"model",
")",
"else",
":",
"nlp",
"=",
"spacy",
".",
"blank",
"(",
"\"en\"",
")",
"# create blank Language class",
"print",
"(",
"\"Created blank 'en' model\"",
")",
"# Add entity recognizer to model if it's not in the pipeline",
"# nlp.create_pipe works for built-ins that are registered with spaCy",
"if",
"\"ner\"",
"not",
"in",
"nlp",
".",
"pipe_names",
":",
"ner",
"=",
"nlp",
".",
"create_pipe",
"(",
"\"ner\"",
")",
"nlp",
".",
"add_pipe",
"(",
"ner",
")",
"# otherwise, get it, so we can add labels to it",
"else",
":",
"ner",
"=",
"nlp",
".",
"get_pipe",
"(",
"\"ner\"",
")",
"ner",
".",
"add_label",
"(",
"LABEL",
")",
"# add new entity label to entity recognizer",
"# Adding extraneous labels shouldn't mess anything up",
"ner",
".",
"add_label",
"(",
"\"VEGETABLE\"",
")",
"if",
"model",
"is",
"None",
":",
"optimizer",
"=",
"nlp",
".",
"begin_training",
"(",
")",
"else",
":",
"optimizer",
"=",
"nlp",
".",
"resume_training",
"(",
")",
"move_names",
"=",
"list",
"(",
"ner",
".",
"move_names",
")",
"# get names of other pipes to disable them during training",
"other_pipes",
"=",
"[",
"pipe",
"for",
"pipe",
"in",
"nlp",
".",
"pipe_names",
"if",
"pipe",
"!=",
"\"ner\"",
"]",
"with",
"nlp",
".",
"disable_pipes",
"(",
"*",
"other_pipes",
")",
":",
"# only train NER",
"sizes",
"=",
"compounding",
"(",
"1.0",
",",
"4.0",
",",
"1.001",
")",
"# batch up the examples using spaCy's minibatch",
"for",
"itn",
"in",
"range",
"(",
"n_iter",
")",
":",
"random",
".",
"shuffle",
"(",
"TRAIN_DATA",
")",
"batches",
"=",
"minibatch",
"(",
"TRAIN_DATA",
",",
"size",
"=",
"sizes",
")",
"losses",
"=",
"{",
"}",
"for",
"batch",
"in",
"batches",
":",
"texts",
",",
"annotations",
"=",
"zip",
"(",
"*",
"batch",
")",
"nlp",
".",
"update",
"(",
"texts",
",",
"annotations",
",",
"sgd",
"=",
"optimizer",
",",
"drop",
"=",
"0.35",
",",
"losses",
"=",
"losses",
")",
"print",
"(",
"\"Losses\"",
",",
"losses",
")",
"# test the trained model",
"test_text",
"=",
"\"Do you like horses?\"",
"doc",
"=",
"nlp",
"(",
"test_text",
")",
"print",
"(",
"\"Entities in '%s'\"",
"%",
"test_text",
")",
"for",
"ent",
"in",
"doc",
".",
"ents",
":",
"print",
"(",
"ent",
".",
"label_",
",",
"ent",
".",
"text",
")",
"# save model to output directory",
"if",
"output_dir",
"is",
"not",
"None",
":",
"output_dir",
"=",
"Path",
"(",
"output_dir",
")",
"if",
"not",
"output_dir",
".",
"exists",
"(",
")",
":",
"output_dir",
".",
"mkdir",
"(",
")",
"nlp",
".",
"meta",
"[",
"\"name\"",
"]",
"=",
"new_model_name",
"# rename model",
"nlp",
".",
"to_disk",
"(",
"output_dir",
")",
"print",
"(",
"\"Saved model to\"",
",",
"output_dir",
")",
"# test the saved model",
"print",
"(",
"\"Loading from\"",
",",
"output_dir",
")",
"nlp2",
"=",
"spacy",
".",
"load",
"(",
"output_dir",
")",
"# Check the classes have loaded back consistently",
"assert",
"nlp2",
".",
"get_pipe",
"(",
"\"ner\"",
")",
".",
"move_names",
"==",
"move_names",
"doc2",
"=",
"nlp2",
"(",
"test_text",
")",
"for",
"ent",
"in",
"doc2",
".",
"ents",
":",
"print",
"(",
"ent",
".",
"label_",
",",
"ent",
".",
"text",
")"
] |
Set up the pipeline and entity recognizer, and train the new entity.
|
[
"Set",
"up",
"the",
"pipeline",
"and",
"entity",
"recognizer",
"and",
"train",
"the",
"new",
"entity",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_new_entity_type.py#L71-L134
|
21,236
|
explosion/spaCy
|
spacy/cli/converters/conll_ner2json.py
|
conll_ner2json
|
def conll_ner2json(input_data, **kwargs):
"""
Convert files in the CoNLL-2003 NER format into JSON format for use with
train cli.
"""
delimit_docs = "-DOCSTART- -X- O O"
output_docs = []
for doc in input_data.strip().split(delimit_docs):
doc = doc.strip()
if not doc:
continue
output_doc = []
for sent in doc.split("\n\n"):
sent = sent.strip()
if not sent:
continue
lines = [line.strip() for line in sent.split("\n") if line.strip()]
words, tags, chunks, iob_ents = zip(*[line.split() for line in lines])
biluo_ents = iob_to_biluo(iob_ents)
output_doc.append(
{
"tokens": [
{"orth": w, "tag": tag, "ner": ent}
for (w, tag, ent) in zip(words, tags, biluo_ents)
]
}
)
output_docs.append(
{"id": len(output_docs), "paragraphs": [{"sentences": output_doc}]}
)
output_doc = []
return output_docs
|
python
|
def conll_ner2json(input_data, **kwargs):
"""
Convert files in the CoNLL-2003 NER format into JSON format for use with
train cli.
"""
delimit_docs = "-DOCSTART- -X- O O"
output_docs = []
for doc in input_data.strip().split(delimit_docs):
doc = doc.strip()
if not doc:
continue
output_doc = []
for sent in doc.split("\n\n"):
sent = sent.strip()
if not sent:
continue
lines = [line.strip() for line in sent.split("\n") if line.strip()]
words, tags, chunks, iob_ents = zip(*[line.split() for line in lines])
biluo_ents = iob_to_biluo(iob_ents)
output_doc.append(
{
"tokens": [
{"orth": w, "tag": tag, "ner": ent}
for (w, tag, ent) in zip(words, tags, biluo_ents)
]
}
)
output_docs.append(
{"id": len(output_docs), "paragraphs": [{"sentences": output_doc}]}
)
output_doc = []
return output_docs
|
[
"def",
"conll_ner2json",
"(",
"input_data",
",",
"*",
"*",
"kwargs",
")",
":",
"delimit_docs",
"=",
"\"-DOCSTART- -X- O O\"",
"output_docs",
"=",
"[",
"]",
"for",
"doc",
"in",
"input_data",
".",
"strip",
"(",
")",
".",
"split",
"(",
"delimit_docs",
")",
":",
"doc",
"=",
"doc",
".",
"strip",
"(",
")",
"if",
"not",
"doc",
":",
"continue",
"output_doc",
"=",
"[",
"]",
"for",
"sent",
"in",
"doc",
".",
"split",
"(",
"\"\\n\\n\"",
")",
":",
"sent",
"=",
"sent",
".",
"strip",
"(",
")",
"if",
"not",
"sent",
":",
"continue",
"lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"sent",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"line",
".",
"strip",
"(",
")",
"]",
"words",
",",
"tags",
",",
"chunks",
",",
"iob_ents",
"=",
"zip",
"(",
"*",
"[",
"line",
".",
"split",
"(",
")",
"for",
"line",
"in",
"lines",
"]",
")",
"biluo_ents",
"=",
"iob_to_biluo",
"(",
"iob_ents",
")",
"output_doc",
".",
"append",
"(",
"{",
"\"tokens\"",
":",
"[",
"{",
"\"orth\"",
":",
"w",
",",
"\"tag\"",
":",
"tag",
",",
"\"ner\"",
":",
"ent",
"}",
"for",
"(",
"w",
",",
"tag",
",",
"ent",
")",
"in",
"zip",
"(",
"words",
",",
"tags",
",",
"biluo_ents",
")",
"]",
"}",
")",
"output_docs",
".",
"append",
"(",
"{",
"\"id\"",
":",
"len",
"(",
"output_docs",
")",
",",
"\"paragraphs\"",
":",
"[",
"{",
"\"sentences\"",
":",
"output_doc",
"}",
"]",
"}",
")",
"output_doc",
"=",
"[",
"]",
"return",
"output_docs"
] |
Convert files in the CoNLL-2003 NER format into JSON format for use with
train cli.
|
[
"Convert",
"files",
"in",
"the",
"CoNLL",
"-",
"2003",
"NER",
"format",
"into",
"JSON",
"format",
"for",
"use",
"with",
"train",
"cli",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conll_ner2json.py#L7-L38
|
21,237
|
explosion/spaCy
|
examples/training/train_tagger.py
|
main
|
def main(lang="en", output_dir=None, n_iter=25):
"""Create a new model, set up the pipeline and train the tagger. In order to
train the tagger with a custom tag map, we're creating a new Language
instance with a custom vocab.
"""
nlp = spacy.blank(lang)
# add the tagger to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
tagger = nlp.create_pipe("tagger")
# Add the tags. This needs to be done before you start training.
for tag, values in TAG_MAP.items():
tagger.add_label(tag, values)
nlp.add_pipe(tagger)
optimizer = nlp.begin_training()
for i in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
print("Losses", losses)
# test the trained model
test_text = "I like blue eggs"
doc = nlp(test_text)
print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the save model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc = nlp2(test_text)
print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
|
python
|
def main(lang="en", output_dir=None, n_iter=25):
"""Create a new model, set up the pipeline and train the tagger. In order to
train the tagger with a custom tag map, we're creating a new Language
instance with a custom vocab.
"""
nlp = spacy.blank(lang)
# add the tagger to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
tagger = nlp.create_pipe("tagger")
# Add the tags. This needs to be done before you start training.
for tag, values in TAG_MAP.items():
tagger.add_label(tag, values)
nlp.add_pipe(tagger)
optimizer = nlp.begin_training()
for i in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
print("Losses", losses)
# test the trained model
test_text = "I like blue eggs"
doc = nlp(test_text)
print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the save model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc = nlp2(test_text)
print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
|
[
"def",
"main",
"(",
"lang",
"=",
"\"en\"",
",",
"output_dir",
"=",
"None",
",",
"n_iter",
"=",
"25",
")",
":",
"nlp",
"=",
"spacy",
".",
"blank",
"(",
"lang",
")",
"# add the tagger to the pipeline",
"# nlp.create_pipe works for built-ins that are registered with spaCy",
"tagger",
"=",
"nlp",
".",
"create_pipe",
"(",
"\"tagger\"",
")",
"# Add the tags. This needs to be done before you start training.",
"for",
"tag",
",",
"values",
"in",
"TAG_MAP",
".",
"items",
"(",
")",
":",
"tagger",
".",
"add_label",
"(",
"tag",
",",
"values",
")",
"nlp",
".",
"add_pipe",
"(",
"tagger",
")",
"optimizer",
"=",
"nlp",
".",
"begin_training",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"n_iter",
")",
":",
"random",
".",
"shuffle",
"(",
"TRAIN_DATA",
")",
"losses",
"=",
"{",
"}",
"# batch up the examples using spaCy's minibatch",
"batches",
"=",
"minibatch",
"(",
"TRAIN_DATA",
",",
"size",
"=",
"compounding",
"(",
"4.0",
",",
"32.0",
",",
"1.001",
")",
")",
"for",
"batch",
"in",
"batches",
":",
"texts",
",",
"annotations",
"=",
"zip",
"(",
"*",
"batch",
")",
"nlp",
".",
"update",
"(",
"texts",
",",
"annotations",
",",
"sgd",
"=",
"optimizer",
",",
"losses",
"=",
"losses",
")",
"print",
"(",
"\"Losses\"",
",",
"losses",
")",
"# test the trained model",
"test_text",
"=",
"\"I like blue eggs\"",
"doc",
"=",
"nlp",
"(",
"test_text",
")",
"print",
"(",
"\"Tags\"",
",",
"[",
"(",
"t",
".",
"text",
",",
"t",
".",
"tag_",
",",
"t",
".",
"pos_",
")",
"for",
"t",
"in",
"doc",
"]",
")",
"# save model to output directory",
"if",
"output_dir",
"is",
"not",
"None",
":",
"output_dir",
"=",
"Path",
"(",
"output_dir",
")",
"if",
"not",
"output_dir",
".",
"exists",
"(",
")",
":",
"output_dir",
".",
"mkdir",
"(",
")",
"nlp",
".",
"to_disk",
"(",
"output_dir",
")",
"print",
"(",
"\"Saved model to\"",
",",
"output_dir",
")",
"# test the save model",
"print",
"(",
"\"Loading from\"",
",",
"output_dir",
")",
"nlp2",
"=",
"spacy",
".",
"load",
"(",
"output_dir",
")",
"doc",
"=",
"nlp2",
"(",
"test_text",
")",
"print",
"(",
"\"Tags\"",
",",
"[",
"(",
"t",
".",
"text",
",",
"t",
".",
"tag_",
",",
"t",
".",
"pos_",
")",
"for",
"t",
"in",
"doc",
"]",
")"
] |
Create a new model, set up the pipeline and train the tagger. In order to
train the tagger with a custom tag map, we're creating a new Language
instance with a custom vocab.
|
[
"Create",
"a",
"new",
"model",
"set",
"up",
"the",
"pipeline",
"and",
"train",
"the",
"tagger",
".",
"In",
"order",
"to",
"train",
"the",
"tagger",
"with",
"a",
"custom",
"tag",
"map",
"we",
"re",
"creating",
"a",
"new",
"Language",
"instance",
"with",
"a",
"custom",
"vocab",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_tagger.py#L47-L89
|
21,238
|
explosion/spaCy
|
spacy/cli/init_model.py
|
init_model
|
def init_model(
lang,
output_dir,
freqs_loc=None,
clusters_loc=None,
jsonl_loc=None,
vectors_loc=None,
prune_vectors=-1,
):
"""
Create a new model from raw data, like word frequencies, Brown clusters
and word vectors. If vectors are provided in Word2Vec format, they can
be either a .txt or zipped as a .zip or .tar.gz.
"""
if jsonl_loc is not None:
if freqs_loc is not None or clusters_loc is not None:
settings = ["-j"]
if freqs_loc:
settings.append("-f")
if clusters_loc:
settings.append("-c")
msg.warn(
"Incompatible arguments",
"The -f and -c arguments are deprecated, and not compatible "
"with the -j argument, which should specify the same "
"information. Either merge the frequencies and clusters data "
"into the JSONL-formatted file (recommended), or use only the "
"-f and -c files, without the other lexical attributes.",
)
jsonl_loc = ensure_path(jsonl_loc)
lex_attrs = srsly.read_jsonl(jsonl_loc)
else:
clusters_loc = ensure_path(clusters_loc)
freqs_loc = ensure_path(freqs_loc)
if freqs_loc is not None and not freqs_loc.exists():
msg.fail("Can't find words frequencies file", freqs_loc, exits=1)
lex_attrs = read_attrs_from_deprecated(freqs_loc, clusters_loc)
with msg.loading("Creating model..."):
nlp = create_model(lang, lex_attrs)
msg.good("Successfully created model")
if vectors_loc is not None:
add_vectors(nlp, vectors_loc, prune_vectors)
vec_added = len(nlp.vocab.vectors)
lex_added = len(nlp.vocab)
msg.good(
"Sucessfully compiled vocab",
"{} entries, {} vectors".format(lex_added, vec_added),
)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
return nlp
|
python
|
def init_model(
lang,
output_dir,
freqs_loc=None,
clusters_loc=None,
jsonl_loc=None,
vectors_loc=None,
prune_vectors=-1,
):
"""
Create a new model from raw data, like word frequencies, Brown clusters
and word vectors. If vectors are provided in Word2Vec format, they can
be either a .txt or zipped as a .zip or .tar.gz.
"""
if jsonl_loc is not None:
if freqs_loc is not None or clusters_loc is not None:
settings = ["-j"]
if freqs_loc:
settings.append("-f")
if clusters_loc:
settings.append("-c")
msg.warn(
"Incompatible arguments",
"The -f and -c arguments are deprecated, and not compatible "
"with the -j argument, which should specify the same "
"information. Either merge the frequencies and clusters data "
"into the JSONL-formatted file (recommended), or use only the "
"-f and -c files, without the other lexical attributes.",
)
jsonl_loc = ensure_path(jsonl_loc)
lex_attrs = srsly.read_jsonl(jsonl_loc)
else:
clusters_loc = ensure_path(clusters_loc)
freqs_loc = ensure_path(freqs_loc)
if freqs_loc is not None and not freqs_loc.exists():
msg.fail("Can't find words frequencies file", freqs_loc, exits=1)
lex_attrs = read_attrs_from_deprecated(freqs_loc, clusters_loc)
with msg.loading("Creating model..."):
nlp = create_model(lang, lex_attrs)
msg.good("Successfully created model")
if vectors_loc is not None:
add_vectors(nlp, vectors_loc, prune_vectors)
vec_added = len(nlp.vocab.vectors)
lex_added = len(nlp.vocab)
msg.good(
"Sucessfully compiled vocab",
"{} entries, {} vectors".format(lex_added, vec_added),
)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
return nlp
|
[
"def",
"init_model",
"(",
"lang",
",",
"output_dir",
",",
"freqs_loc",
"=",
"None",
",",
"clusters_loc",
"=",
"None",
",",
"jsonl_loc",
"=",
"None",
",",
"vectors_loc",
"=",
"None",
",",
"prune_vectors",
"=",
"-",
"1",
",",
")",
":",
"if",
"jsonl_loc",
"is",
"not",
"None",
":",
"if",
"freqs_loc",
"is",
"not",
"None",
"or",
"clusters_loc",
"is",
"not",
"None",
":",
"settings",
"=",
"[",
"\"-j\"",
"]",
"if",
"freqs_loc",
":",
"settings",
".",
"append",
"(",
"\"-f\"",
")",
"if",
"clusters_loc",
":",
"settings",
".",
"append",
"(",
"\"-c\"",
")",
"msg",
".",
"warn",
"(",
"\"Incompatible arguments\"",
",",
"\"The -f and -c arguments are deprecated, and not compatible \"",
"\"with the -j argument, which should specify the same \"",
"\"information. Either merge the frequencies and clusters data \"",
"\"into the JSONL-formatted file (recommended), or use only the \"",
"\"-f and -c files, without the other lexical attributes.\"",
",",
")",
"jsonl_loc",
"=",
"ensure_path",
"(",
"jsonl_loc",
")",
"lex_attrs",
"=",
"srsly",
".",
"read_jsonl",
"(",
"jsonl_loc",
")",
"else",
":",
"clusters_loc",
"=",
"ensure_path",
"(",
"clusters_loc",
")",
"freqs_loc",
"=",
"ensure_path",
"(",
"freqs_loc",
")",
"if",
"freqs_loc",
"is",
"not",
"None",
"and",
"not",
"freqs_loc",
".",
"exists",
"(",
")",
":",
"msg",
".",
"fail",
"(",
"\"Can't find words frequencies file\"",
",",
"freqs_loc",
",",
"exits",
"=",
"1",
")",
"lex_attrs",
"=",
"read_attrs_from_deprecated",
"(",
"freqs_loc",
",",
"clusters_loc",
")",
"with",
"msg",
".",
"loading",
"(",
"\"Creating model...\"",
")",
":",
"nlp",
"=",
"create_model",
"(",
"lang",
",",
"lex_attrs",
")",
"msg",
".",
"good",
"(",
"\"Successfully created model\"",
")",
"if",
"vectors_loc",
"is",
"not",
"None",
":",
"add_vectors",
"(",
"nlp",
",",
"vectors_loc",
",",
"prune_vectors",
")",
"vec_added",
"=",
"len",
"(",
"nlp",
".",
"vocab",
".",
"vectors",
")",
"lex_added",
"=",
"len",
"(",
"nlp",
".",
"vocab",
")",
"msg",
".",
"good",
"(",
"\"Sucessfully compiled vocab\"",
",",
"\"{} entries, {} vectors\"",
".",
"format",
"(",
"lex_added",
",",
"vec_added",
")",
",",
")",
"if",
"not",
"output_dir",
".",
"exists",
"(",
")",
":",
"output_dir",
".",
"mkdir",
"(",
")",
"nlp",
".",
"to_disk",
"(",
"output_dir",
")",
"return",
"nlp"
] |
Create a new model from raw data, like word frequencies, Brown clusters
and word vectors. If vectors are provided in Word2Vec format, they can
be either a .txt or zipped as a .zip or .tar.gz.
|
[
"Create",
"a",
"new",
"model",
"from",
"raw",
"data",
"like",
"word",
"frequencies",
"Brown",
"clusters",
"and",
"word",
"vectors",
".",
"If",
"vectors",
"are",
"provided",
"in",
"Word2Vec",
"format",
"they",
"can",
"be",
"either",
"a",
".",
"txt",
"or",
"zipped",
"as",
"a",
".",
"zip",
"or",
".",
"tar",
".",
"gz",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/init_model.py#L39-L91
|
21,239
|
explosion/spaCy
|
examples/training/train_ner.py
|
main
|
def main(model=None, output_dir=None, n_iter=100):
"""Load the model, set up the pipeline and train the entity recognizer."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
# reset and initialize the weights randomly – but only if we're
# training a new model
if model is None:
nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
# test the trained model
for text, _ in TRAIN_DATA:
doc = nlp(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
python
|
def main(model=None, output_dir=None, n_iter=100):
"""Load the model, set up the pipeline and train the entity recognizer."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
# reset and initialize the weights randomly – but only if we're
# training a new model
if model is None:
nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
# test the trained model
for text, _ in TRAIN_DATA:
doc = nlp(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
[
"def",
"main",
"(",
"model",
"=",
"None",
",",
"output_dir",
"=",
"None",
",",
"n_iter",
"=",
"100",
")",
":",
"if",
"model",
"is",
"not",
"None",
":",
"nlp",
"=",
"spacy",
".",
"load",
"(",
"model",
")",
"# load existing spaCy model",
"print",
"(",
"\"Loaded model '%s'\"",
"%",
"model",
")",
"else",
":",
"nlp",
"=",
"spacy",
".",
"blank",
"(",
"\"en\"",
")",
"# create blank Language class",
"print",
"(",
"\"Created blank 'en' model\"",
")",
"# create the built-in pipeline components and add them to the pipeline",
"# nlp.create_pipe works for built-ins that are registered with spaCy",
"if",
"\"ner\"",
"not",
"in",
"nlp",
".",
"pipe_names",
":",
"ner",
"=",
"nlp",
".",
"create_pipe",
"(",
"\"ner\"",
")",
"nlp",
".",
"add_pipe",
"(",
"ner",
",",
"last",
"=",
"True",
")",
"# otherwise, get it so we can add labels",
"else",
":",
"ner",
"=",
"nlp",
".",
"get_pipe",
"(",
"\"ner\"",
")",
"# add labels",
"for",
"_",
",",
"annotations",
"in",
"TRAIN_DATA",
":",
"for",
"ent",
"in",
"annotations",
".",
"get",
"(",
"\"entities\"",
")",
":",
"ner",
".",
"add_label",
"(",
"ent",
"[",
"2",
"]",
")",
"# get names of other pipes to disable them during training",
"other_pipes",
"=",
"[",
"pipe",
"for",
"pipe",
"in",
"nlp",
".",
"pipe_names",
"if",
"pipe",
"!=",
"\"ner\"",
"]",
"with",
"nlp",
".",
"disable_pipes",
"(",
"*",
"other_pipes",
")",
":",
"# only train NER",
"# reset and initialize the weights randomly – but only if we're",
"# training a new model",
"if",
"model",
"is",
"None",
":",
"nlp",
".",
"begin_training",
"(",
")",
"for",
"itn",
"in",
"range",
"(",
"n_iter",
")",
":",
"random",
".",
"shuffle",
"(",
"TRAIN_DATA",
")",
"losses",
"=",
"{",
"}",
"# batch up the examples using spaCy's minibatch",
"batches",
"=",
"minibatch",
"(",
"TRAIN_DATA",
",",
"size",
"=",
"compounding",
"(",
"4.0",
",",
"32.0",
",",
"1.001",
")",
")",
"for",
"batch",
"in",
"batches",
":",
"texts",
",",
"annotations",
"=",
"zip",
"(",
"*",
"batch",
")",
"nlp",
".",
"update",
"(",
"texts",
",",
"# batch of texts",
"annotations",
",",
"# batch of annotations",
"drop",
"=",
"0.5",
",",
"# dropout - make it harder to memorise data",
"losses",
"=",
"losses",
",",
")",
"print",
"(",
"\"Losses\"",
",",
"losses",
")",
"# test the trained model",
"for",
"text",
",",
"_",
"in",
"TRAIN_DATA",
":",
"doc",
"=",
"nlp",
"(",
"text",
")",
"print",
"(",
"\"Entities\"",
",",
"[",
"(",
"ent",
".",
"text",
",",
"ent",
".",
"label_",
")",
"for",
"ent",
"in",
"doc",
".",
"ents",
"]",
")",
"print",
"(",
"\"Tokens\"",
",",
"[",
"(",
"t",
".",
"text",
",",
"t",
".",
"ent_type_",
",",
"t",
".",
"ent_iob",
")",
"for",
"t",
"in",
"doc",
"]",
")",
"# save model to output directory",
"if",
"output_dir",
"is",
"not",
"None",
":",
"output_dir",
"=",
"Path",
"(",
"output_dir",
")",
"if",
"not",
"output_dir",
".",
"exists",
"(",
")",
":",
"output_dir",
".",
"mkdir",
"(",
")",
"nlp",
".",
"to_disk",
"(",
"output_dir",
")",
"print",
"(",
"\"Saved model to\"",
",",
"output_dir",
")",
"# test the saved model",
"print",
"(",
"\"Loading from\"",
",",
"output_dir",
")",
"nlp2",
"=",
"spacy",
".",
"load",
"(",
"output_dir",
")",
"for",
"text",
",",
"_",
"in",
"TRAIN_DATA",
":",
"doc",
"=",
"nlp2",
"(",
"text",
")",
"print",
"(",
"\"Entities\"",
",",
"[",
"(",
"ent",
".",
"text",
",",
"ent",
".",
"label_",
")",
"for",
"ent",
"in",
"doc",
".",
"ents",
"]",
")",
"print",
"(",
"\"Tokens\"",
",",
"[",
"(",
"t",
".",
"text",
",",
"t",
".",
"ent_type_",
",",
"t",
".",
"ent_iob",
")",
"for",
"t",
"in",
"doc",
"]",
")"
] |
Load the model, set up the pipeline and train the entity recognizer.
|
[
"Load",
"the",
"model",
"set",
"up",
"the",
"pipeline",
"and",
"train",
"the",
"entity",
"recognizer",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_ner.py#L34-L99
|
21,240
|
explosion/spaCy
|
spacy/cli/pretrain.py
|
make_update
|
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
"""Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
"""
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to return a cupy object here
# The gradients are modified in-place by the BERT MLM,
# so we get an accurate loss
return float(loss)
|
python
|
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
"""Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
"""
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to return a cupy object here
# The gradients are modified in-place by the BERT MLM,
# so we get an accurate loss
return float(loss)
|
[
"def",
"make_update",
"(",
"model",
",",
"docs",
",",
"optimizer",
",",
"drop",
"=",
"0.0",
",",
"objective",
"=",
"\"L2\"",
")",
":",
"predictions",
",",
"backprop",
"=",
"model",
".",
"begin_update",
"(",
"docs",
",",
"drop",
"=",
"drop",
")",
"loss",
",",
"gradients",
"=",
"get_vectors_loss",
"(",
"model",
".",
"ops",
",",
"docs",
",",
"predictions",
",",
"objective",
")",
"backprop",
"(",
"gradients",
",",
"sgd",
"=",
"optimizer",
")",
"# Don't want to return a cupy object here",
"# The gradients are modified in-place by the BERT MLM,",
"# so we get an accurate loss",
"return",
"float",
"(",
"loss",
")"
] |
Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
|
[
"Perform",
"an",
"update",
"over",
"a",
"single",
"batch",
"of",
"documents",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L164-L178
|
21,241
|
explosion/spaCy
|
spacy/cli/pretrain.py
|
get_vectors_loss
|
def get_vectors_loss(ops, docs, prediction, objective="L2"):
"""Compute a mean-squared error loss between the documents' vectors and
the prediction.
Note that this is ripe for customization! We could compute the vectors
in some other word, e.g. with an LSTM language model, or use some other
type of objective.
"""
# The simplest way to implement this would be to vstack the
# token.vector values, but that's a bit inefficient, especially on GPU.
# Instead we fetch the index into the vectors table for each of our tokens,
# and look them up all at once. This prevents data copying.
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
target = docs[0].vocab.vectors.data[ids]
if objective == "L2":
d_target = prediction - target
loss = (d_target ** 2).sum()
elif objective == "cosine":
loss, d_target = get_cossim_loss(prediction, target)
return loss, d_target
|
python
|
def get_vectors_loss(ops, docs, prediction, objective="L2"):
"""Compute a mean-squared error loss between the documents' vectors and
the prediction.
Note that this is ripe for customization! We could compute the vectors
in some other word, e.g. with an LSTM language model, or use some other
type of objective.
"""
# The simplest way to implement this would be to vstack the
# token.vector values, but that's a bit inefficient, especially on GPU.
# Instead we fetch the index into the vectors table for each of our tokens,
# and look them up all at once. This prevents data copying.
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
target = docs[0].vocab.vectors.data[ids]
if objective == "L2":
d_target = prediction - target
loss = (d_target ** 2).sum()
elif objective == "cosine":
loss, d_target = get_cossim_loss(prediction, target)
return loss, d_target
|
[
"def",
"get_vectors_loss",
"(",
"ops",
",",
"docs",
",",
"prediction",
",",
"objective",
"=",
"\"L2\"",
")",
":",
"# The simplest way to implement this would be to vstack the",
"# token.vector values, but that's a bit inefficient, especially on GPU.",
"# Instead we fetch the index into the vectors table for each of our tokens,",
"# and look them up all at once. This prevents data copying.",
"ids",
"=",
"ops",
".",
"flatten",
"(",
"[",
"doc",
".",
"to_array",
"(",
"ID",
")",
".",
"ravel",
"(",
")",
"for",
"doc",
"in",
"docs",
"]",
")",
"target",
"=",
"docs",
"[",
"0",
"]",
".",
"vocab",
".",
"vectors",
".",
"data",
"[",
"ids",
"]",
"if",
"objective",
"==",
"\"L2\"",
":",
"d_target",
"=",
"prediction",
"-",
"target",
"loss",
"=",
"(",
"d_target",
"**",
"2",
")",
".",
"sum",
"(",
")",
"elif",
"objective",
"==",
"\"cosine\"",
":",
"loss",
",",
"d_target",
"=",
"get_cossim_loss",
"(",
"prediction",
",",
"target",
")",
"return",
"loss",
",",
"d_target"
] |
Compute a mean-squared error loss between the documents' vectors and
the prediction.
Note that this is ripe for customization! We could compute the vectors
in some other word, e.g. with an LSTM language model, or use some other
type of objective.
|
[
"Compute",
"a",
"mean",
"-",
"squared",
"error",
"loss",
"between",
"the",
"documents",
"vectors",
"and",
"the",
"prediction",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L199-L218
|
21,242
|
explosion/spaCy
|
spacy/cli/pretrain.py
|
_smart_round
|
def _smart_round(figure, width=10, max_decimal=4):
"""Round large numbers as integers, smaller numbers as decimals."""
n_digits = len(str(int(figure)))
n_decimal = width - (n_digits + 1)
if n_decimal <= 1:
return str(int(figure))
else:
n_decimal = min(n_decimal, max_decimal)
format_str = "%." + str(n_decimal) + "f"
return format_str % figure
|
python
|
def _smart_round(figure, width=10, max_decimal=4):
"""Round large numbers as integers, smaller numbers as decimals."""
n_digits = len(str(int(figure)))
n_decimal = width - (n_digits + 1)
if n_decimal <= 1:
return str(int(figure))
else:
n_decimal = min(n_decimal, max_decimal)
format_str = "%." + str(n_decimal) + "f"
return format_str % figure
|
[
"def",
"_smart_round",
"(",
"figure",
",",
"width",
"=",
"10",
",",
"max_decimal",
"=",
"4",
")",
":",
"n_digits",
"=",
"len",
"(",
"str",
"(",
"int",
"(",
"figure",
")",
")",
")",
"n_decimal",
"=",
"width",
"-",
"(",
"n_digits",
"+",
"1",
")",
"if",
"n_decimal",
"<=",
"1",
":",
"return",
"str",
"(",
"int",
"(",
"figure",
")",
")",
"else",
":",
"n_decimal",
"=",
"min",
"(",
"n_decimal",
",",
"max_decimal",
")",
"format_str",
"=",
"\"%.\"",
"+",
"str",
"(",
"n_decimal",
")",
"+",
"\"f\"",
"return",
"format_str",
"%",
"figure"
] |
Round large numbers as integers, smaller numbers as decimals.
|
[
"Round",
"large",
"numbers",
"as",
"integers",
"smaller",
"numbers",
"as",
"decimals",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L295-L304
|
21,243
|
explosion/spaCy
|
spacy/lang/el/syntax_iterators.py
|
noun_chunks
|
def noun_chunks(obj):
"""
Detect base noun phrases. Works on both Doc and Span.
"""
# It follows the logic of the noun chunks finder of English language,
# adjusted to some Greek language special characteristics.
# obj tag corrects some DEP tagger mistakes.
# Further improvement of the models will eliminate the need for this tag.
labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"]
doc = obj.doc # Ensure works on both Doc and Span.
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add("conj")
nmod = doc.vocab.strings.add("nmod")
np_label = doc.vocab.strings.add("NP")
seen = set()
for i, word in enumerate(obj):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.i in seen:
continue
if word.dep in np_deps:
if any(w.i in seen for w in word.subtree):
continue
flag = False
if word.pos == NOUN:
# check for patterns such as γραμμή παραγωγής
for potential_nmod in word.rights:
if potential_nmod.dep == nmod:
seen.update(
j for j in range(word.left_edge.i, potential_nmod.i + 1)
)
yield word.left_edge.i, potential_nmod.i + 1, np_label
flag = True
break
if flag is False:
seen.update(j for j in range(word.left_edge.i, word.i + 1))
yield word.left_edge.i, word.i + 1, np_label
elif word.dep == conj:
# covers the case: έχει όμορφα και έξυπνα παιδιά
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
if any(w.i in seen for w in word.subtree):
continue
seen.update(j for j in range(word.left_edge.i, word.i + 1))
yield word.left_edge.i, word.i + 1, np_label
|
python
|
def noun_chunks(obj):
"""
Detect base noun phrases. Works on both Doc and Span.
"""
# It follows the logic of the noun chunks finder of English language,
# adjusted to some Greek language special characteristics.
# obj tag corrects some DEP tagger mistakes.
# Further improvement of the models will eliminate the need for this tag.
labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"]
doc = obj.doc # Ensure works on both Doc and Span.
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add("conj")
nmod = doc.vocab.strings.add("nmod")
np_label = doc.vocab.strings.add("NP")
seen = set()
for i, word in enumerate(obj):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.i in seen:
continue
if word.dep in np_deps:
if any(w.i in seen for w in word.subtree):
continue
flag = False
if word.pos == NOUN:
# check for patterns such as γραμμή παραγωγής
for potential_nmod in word.rights:
if potential_nmod.dep == nmod:
seen.update(
j for j in range(word.left_edge.i, potential_nmod.i + 1)
)
yield word.left_edge.i, potential_nmod.i + 1, np_label
flag = True
break
if flag is False:
seen.update(j for j in range(word.left_edge.i, word.i + 1))
yield word.left_edge.i, word.i + 1, np_label
elif word.dep == conj:
# covers the case: έχει όμορφα και έξυπνα παιδιά
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
if any(w.i in seen for w in word.subtree):
continue
seen.update(j for j in range(word.left_edge.i, word.i + 1))
yield word.left_edge.i, word.i + 1, np_label
|
[
"def",
"noun_chunks",
"(",
"obj",
")",
":",
"# It follows the logic of the noun chunks finder of English language,",
"# adjusted to some Greek language special characteristics.",
"# obj tag corrects some DEP tagger mistakes.",
"# Further improvement of the models will eliminate the need for this tag.",
"labels",
"=",
"[",
"\"nsubj\"",
",",
"\"obj\"",
",",
"\"iobj\"",
",",
"\"appos\"",
",",
"\"ROOT\"",
",",
"\"obl\"",
"]",
"doc",
"=",
"obj",
".",
"doc",
"# Ensure works on both Doc and Span.",
"np_deps",
"=",
"[",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"label",
")",
"for",
"label",
"in",
"labels",
"]",
"conj",
"=",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"\"conj\"",
")",
"nmod",
"=",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"\"nmod\"",
")",
"np_label",
"=",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"\"NP\"",
")",
"seen",
"=",
"set",
"(",
")",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"obj",
")",
":",
"if",
"word",
".",
"pos",
"not",
"in",
"(",
"NOUN",
",",
"PROPN",
",",
"PRON",
")",
":",
"continue",
"# Prevent nested chunks from being produced",
"if",
"word",
".",
"i",
"in",
"seen",
":",
"continue",
"if",
"word",
".",
"dep",
"in",
"np_deps",
":",
"if",
"any",
"(",
"w",
".",
"i",
"in",
"seen",
"for",
"w",
"in",
"word",
".",
"subtree",
")",
":",
"continue",
"flag",
"=",
"False",
"if",
"word",
".",
"pos",
"==",
"NOUN",
":",
"# check for patterns such as γραμμή παραγωγής",
"for",
"potential_nmod",
"in",
"word",
".",
"rights",
":",
"if",
"potential_nmod",
".",
"dep",
"==",
"nmod",
":",
"seen",
".",
"update",
"(",
"j",
"for",
"j",
"in",
"range",
"(",
"word",
".",
"left_edge",
".",
"i",
",",
"potential_nmod",
".",
"i",
"+",
"1",
")",
")",
"yield",
"word",
".",
"left_edge",
".",
"i",
",",
"potential_nmod",
".",
"i",
"+",
"1",
",",
"np_label",
"flag",
"=",
"True",
"break",
"if",
"flag",
"is",
"False",
":",
"seen",
".",
"update",
"(",
"j",
"for",
"j",
"in",
"range",
"(",
"word",
".",
"left_edge",
".",
"i",
",",
"word",
".",
"i",
"+",
"1",
")",
")",
"yield",
"word",
".",
"left_edge",
".",
"i",
",",
"word",
".",
"i",
"+",
"1",
",",
"np_label",
"elif",
"word",
".",
"dep",
"==",
"conj",
":",
"# covers the case: έχει όμορφα και έξυπνα παιδιά",
"head",
"=",
"word",
".",
"head",
"while",
"head",
".",
"dep",
"==",
"conj",
"and",
"head",
".",
"head",
".",
"i",
"<",
"head",
".",
"i",
":",
"head",
"=",
"head",
".",
"head",
"# If the head is an NP, and we're coordinated to it, we're an NP",
"if",
"head",
".",
"dep",
"in",
"np_deps",
":",
"if",
"any",
"(",
"w",
".",
"i",
"in",
"seen",
"for",
"w",
"in",
"word",
".",
"subtree",
")",
":",
"continue",
"seen",
".",
"update",
"(",
"j",
"for",
"j",
"in",
"range",
"(",
"word",
".",
"left_edge",
".",
"i",
",",
"word",
".",
"i",
"+",
"1",
")",
")",
"yield",
"word",
".",
"left_edge",
".",
"i",
",",
"word",
".",
"i",
"+",
"1",
",",
"np_label"
] |
Detect base noun phrases. Works on both Doc and Span.
|
[
"Detect",
"base",
"noun",
"phrases",
".",
"Works",
"on",
"both",
"Doc",
"and",
"Span",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/el/syntax_iterators.py#L7-L55
|
21,244
|
explosion/spaCy
|
spacy/tokens/underscore.py
|
get_ext_args
|
def get_ext_args(**kwargs):
"""Validate and convert arguments. Reused in Doc, Token and Span."""
default = kwargs.get("default")
getter = kwargs.get("getter")
setter = kwargs.get("setter")
method = kwargs.get("method")
if getter is None and setter is not None:
raise ValueError(Errors.E089)
valid_opts = ("default" in kwargs, method is not None, getter is not None)
nr_defined = sum(t is True for t in valid_opts)
if nr_defined != 1:
raise ValueError(Errors.E083.format(nr_defined=nr_defined))
if setter is not None and not hasattr(setter, "__call__"):
raise ValueError(Errors.E091.format(name="setter", value=repr(setter)))
if getter is not None and not hasattr(getter, "__call__"):
raise ValueError(Errors.E091.format(name="getter", value=repr(getter)))
if method is not None and not hasattr(method, "__call__"):
raise ValueError(Errors.E091.format(name="method", value=repr(method)))
return (default, method, getter, setter)
|
python
|
def get_ext_args(**kwargs):
"""Validate and convert arguments. Reused in Doc, Token and Span."""
default = kwargs.get("default")
getter = kwargs.get("getter")
setter = kwargs.get("setter")
method = kwargs.get("method")
if getter is None and setter is not None:
raise ValueError(Errors.E089)
valid_opts = ("default" in kwargs, method is not None, getter is not None)
nr_defined = sum(t is True for t in valid_opts)
if nr_defined != 1:
raise ValueError(Errors.E083.format(nr_defined=nr_defined))
if setter is not None and not hasattr(setter, "__call__"):
raise ValueError(Errors.E091.format(name="setter", value=repr(setter)))
if getter is not None and not hasattr(getter, "__call__"):
raise ValueError(Errors.E091.format(name="getter", value=repr(getter)))
if method is not None and not hasattr(method, "__call__"):
raise ValueError(Errors.E091.format(name="method", value=repr(method)))
return (default, method, getter, setter)
|
[
"def",
"get_ext_args",
"(",
"*",
"*",
"kwargs",
")",
":",
"default",
"=",
"kwargs",
".",
"get",
"(",
"\"default\"",
")",
"getter",
"=",
"kwargs",
".",
"get",
"(",
"\"getter\"",
")",
"setter",
"=",
"kwargs",
".",
"get",
"(",
"\"setter\"",
")",
"method",
"=",
"kwargs",
".",
"get",
"(",
"\"method\"",
")",
"if",
"getter",
"is",
"None",
"and",
"setter",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E089",
")",
"valid_opts",
"=",
"(",
"\"default\"",
"in",
"kwargs",
",",
"method",
"is",
"not",
"None",
",",
"getter",
"is",
"not",
"None",
")",
"nr_defined",
"=",
"sum",
"(",
"t",
"is",
"True",
"for",
"t",
"in",
"valid_opts",
")",
"if",
"nr_defined",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E083",
".",
"format",
"(",
"nr_defined",
"=",
"nr_defined",
")",
")",
"if",
"setter",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"setter",
",",
"\"__call__\"",
")",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E091",
".",
"format",
"(",
"name",
"=",
"\"setter\"",
",",
"value",
"=",
"repr",
"(",
"setter",
")",
")",
")",
"if",
"getter",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"getter",
",",
"\"__call__\"",
")",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E091",
".",
"format",
"(",
"name",
"=",
"\"getter\"",
",",
"value",
"=",
"repr",
"(",
"getter",
")",
")",
")",
"if",
"method",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"method",
",",
"\"__call__\"",
")",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E091",
".",
"format",
"(",
"name",
"=",
"\"method\"",
",",
"value",
"=",
"repr",
"(",
"method",
")",
")",
")",
"return",
"(",
"default",
",",
"method",
",",
"getter",
",",
"setter",
")"
] |
Validate and convert arguments. Reused in Doc, Token and Span.
|
[
"Validate",
"and",
"convert",
"arguments",
".",
"Reused",
"in",
"Doc",
"Token",
"and",
"Span",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/underscore.py#L69-L87
|
21,245
|
explosion/spaCy
|
setup.py
|
is_new_osx
|
def is_new_osx():
"""Check whether we're on OSX >= 10.10"""
name = distutils.util.get_platform()
if sys.platform != "darwin":
return False
elif name.startswith("macosx-10"):
minor_version = int(name.split("-")[1].split(".")[1])
if minor_version >= 7:
return True
else:
return False
else:
return False
|
python
|
def is_new_osx():
"""Check whether we're on OSX >= 10.10"""
name = distutils.util.get_platform()
if sys.platform != "darwin":
return False
elif name.startswith("macosx-10"):
minor_version = int(name.split("-")[1].split(".")[1])
if minor_version >= 7:
return True
else:
return False
else:
return False
|
[
"def",
"is_new_osx",
"(",
")",
":",
"name",
"=",
"distutils",
".",
"util",
".",
"get_platform",
"(",
")",
"if",
"sys",
".",
"platform",
"!=",
"\"darwin\"",
":",
"return",
"False",
"elif",
"name",
".",
"startswith",
"(",
"\"macosx-10\"",
")",
":",
"minor_version",
"=",
"int",
"(",
"name",
".",
"split",
"(",
"\"-\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"1",
"]",
")",
"if",
"minor_version",
">=",
"7",
":",
"return",
"True",
"else",
":",
"return",
"False",
"else",
":",
"return",
"False"
] |
Check whether we're on OSX >= 10.10
|
[
"Check",
"whether",
"we",
"re",
"on",
"OSX",
">",
"=",
"10",
".",
"10"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/setup.py#L15-L27
|
21,246
|
explosion/spaCy
|
examples/training/ner_multitask_objective.py
|
get_position_label
|
def get_position_label(i, words, tags, heads, labels, ents):
"""Return labels indicating the position of the word in the document.
"""
if len(words) < 20:
return "short-doc"
elif i == 0:
return "first-word"
elif i < 10:
return "early-word"
elif i < 20:
return "mid-word"
elif i == len(words) - 1:
return "last-word"
else:
return "late-word"
|
python
|
def get_position_label(i, words, tags, heads, labels, ents):
"""Return labels indicating the position of the word in the document.
"""
if len(words) < 20:
return "short-doc"
elif i == 0:
return "first-word"
elif i < 10:
return "early-word"
elif i < 20:
return "mid-word"
elif i == len(words) - 1:
return "last-word"
else:
return "late-word"
|
[
"def",
"get_position_label",
"(",
"i",
",",
"words",
",",
"tags",
",",
"heads",
",",
"labels",
",",
"ents",
")",
":",
"if",
"len",
"(",
"words",
")",
"<",
"20",
":",
"return",
"\"short-doc\"",
"elif",
"i",
"==",
"0",
":",
"return",
"\"first-word\"",
"elif",
"i",
"<",
"10",
":",
"return",
"\"early-word\"",
"elif",
"i",
"<",
"20",
":",
"return",
"\"mid-word\"",
"elif",
"i",
"==",
"len",
"(",
"words",
")",
"-",
"1",
":",
"return",
"\"last-word\"",
"else",
":",
"return",
"\"late-word\""
] |
Return labels indicating the position of the word in the document.
|
[
"Return",
"labels",
"indicating",
"the",
"position",
"of",
"the",
"word",
"in",
"the",
"document",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/ner_multitask_objective.py#L36-L50
|
21,247
|
explosion/spaCy
|
bin/ud/run_eval.py
|
load_model
|
def load_model(modelname, add_sentencizer=False):
""" Load a specific spaCy model """
loading_start = time.time()
nlp = spacy.load(modelname)
if add_sentencizer:
nlp.add_pipe(nlp.create_pipe('sentencizer'))
loading_end = time.time()
loading_time = loading_end - loading_start
if add_sentencizer:
return nlp, loading_time, modelname + '_sentencizer'
return nlp, loading_time, modelname
|
python
|
def load_model(modelname, add_sentencizer=False):
""" Load a specific spaCy model """
loading_start = time.time()
nlp = spacy.load(modelname)
if add_sentencizer:
nlp.add_pipe(nlp.create_pipe('sentencizer'))
loading_end = time.time()
loading_time = loading_end - loading_start
if add_sentencizer:
return nlp, loading_time, modelname + '_sentencizer'
return nlp, loading_time, modelname
|
[
"def",
"load_model",
"(",
"modelname",
",",
"add_sentencizer",
"=",
"False",
")",
":",
"loading_start",
"=",
"time",
".",
"time",
"(",
")",
"nlp",
"=",
"spacy",
".",
"load",
"(",
"modelname",
")",
"if",
"add_sentencizer",
":",
"nlp",
".",
"add_pipe",
"(",
"nlp",
".",
"create_pipe",
"(",
"'sentencizer'",
")",
")",
"loading_end",
"=",
"time",
".",
"time",
"(",
")",
"loading_time",
"=",
"loading_end",
"-",
"loading_start",
"if",
"add_sentencizer",
":",
"return",
"nlp",
",",
"loading_time",
",",
"modelname",
"+",
"'_sentencizer'",
"return",
"nlp",
",",
"loading_time",
",",
"modelname"
] |
Load a specific spaCy model
|
[
"Load",
"a",
"specific",
"spaCy",
"model"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L34-L44
|
21,248
|
explosion/spaCy
|
bin/ud/run_eval.py
|
load_default_model_sentencizer
|
def load_default_model_sentencizer(lang):
""" Load a generic spaCy model and add the sentencizer for sentence tokenization"""
loading_start = time.time()
lang_class = get_lang_class(lang)
nlp = lang_class()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
loading_end = time.time()
loading_time = loading_end - loading_start
return nlp, loading_time, lang + "_default_" + 'sentencizer'
|
python
|
def load_default_model_sentencizer(lang):
""" Load a generic spaCy model and add the sentencizer for sentence tokenization"""
loading_start = time.time()
lang_class = get_lang_class(lang)
nlp = lang_class()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
loading_end = time.time()
loading_time = loading_end - loading_start
return nlp, loading_time, lang + "_default_" + 'sentencizer'
|
[
"def",
"load_default_model_sentencizer",
"(",
"lang",
")",
":",
"loading_start",
"=",
"time",
".",
"time",
"(",
")",
"lang_class",
"=",
"get_lang_class",
"(",
"lang",
")",
"nlp",
"=",
"lang_class",
"(",
")",
"nlp",
".",
"add_pipe",
"(",
"nlp",
".",
"create_pipe",
"(",
"'sentencizer'",
")",
")",
"loading_end",
"=",
"time",
".",
"time",
"(",
")",
"loading_time",
"=",
"loading_end",
"-",
"loading_start",
"return",
"nlp",
",",
"loading_time",
",",
"lang",
"+",
"\"_default_\"",
"+",
"'sentencizer'"
] |
Load a generic spaCy model and add the sentencizer for sentence tokenization
|
[
"Load",
"a",
"generic",
"spaCy",
"model",
"and",
"add",
"the",
"sentencizer",
"for",
"sentence",
"tokenization"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L47-L55
|
21,249
|
explosion/spaCy
|
bin/ud/run_eval.py
|
get_freq_tuples
|
def get_freq_tuples(my_list, print_total_threshold):
""" Turn a list of errors into frequency-sorted tuples thresholded by a certain total number """
d = {}
for token in my_list:
d.setdefault(token, 0)
d[token] += 1
return sorted(d.items(), key=operator.itemgetter(1), reverse=True)[:print_total_threshold]
|
python
|
def get_freq_tuples(my_list, print_total_threshold):
""" Turn a list of errors into frequency-sorted tuples thresholded by a certain total number """
d = {}
for token in my_list:
d.setdefault(token, 0)
d[token] += 1
return sorted(d.items(), key=operator.itemgetter(1), reverse=True)[:print_total_threshold]
|
[
"def",
"get_freq_tuples",
"(",
"my_list",
",",
"print_total_threshold",
")",
":",
"d",
"=",
"{",
"}",
"for",
"token",
"in",
"my_list",
":",
"d",
".",
"setdefault",
"(",
"token",
",",
"0",
")",
"d",
"[",
"token",
"]",
"+=",
"1",
"return",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"[",
":",
"print_total_threshold",
"]"
] |
Turn a list of errors into frequency-sorted tuples thresholded by a certain total number
|
[
"Turn",
"a",
"list",
"of",
"errors",
"into",
"frequency",
"-",
"sorted",
"tuples",
"thresholded",
"by",
"a",
"certain",
"total",
"number"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L62-L68
|
21,250
|
explosion/spaCy
|
bin/ud/run_eval.py
|
_contains_blinded_text
|
def _contains_blinded_text(stats_xml):
""" Heuristic to determine whether the treebank has blinded texts or not """
tree = ET.parse(stats_xml)
root = tree.getroot()
total_tokens = int(root.find('size/total/tokens').text)
unique_lemmas = int(root.find('lemmas').get('unique'))
# assume the corpus is largely blinded when there are less than 1% unique tokens
return (unique_lemmas / total_tokens) < 0.01
|
python
|
def _contains_blinded_text(stats_xml):
""" Heuristic to determine whether the treebank has blinded texts or not """
tree = ET.parse(stats_xml)
root = tree.getroot()
total_tokens = int(root.find('size/total/tokens').text)
unique_lemmas = int(root.find('lemmas').get('unique'))
# assume the corpus is largely blinded when there are less than 1% unique tokens
return (unique_lemmas / total_tokens) < 0.01
|
[
"def",
"_contains_blinded_text",
"(",
"stats_xml",
")",
":",
"tree",
"=",
"ET",
".",
"parse",
"(",
"stats_xml",
")",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"total_tokens",
"=",
"int",
"(",
"root",
".",
"find",
"(",
"'size/total/tokens'",
")",
".",
"text",
")",
"unique_lemmas",
"=",
"int",
"(",
"root",
".",
"find",
"(",
"'lemmas'",
")",
".",
"get",
"(",
"'unique'",
")",
")",
"# assume the corpus is largely blinded when there are less than 1% unique tokens",
"return",
"(",
"unique_lemmas",
"/",
"total_tokens",
")",
"<",
"0.01"
] |
Heuristic to determine whether the treebank has blinded texts or not
|
[
"Heuristic",
"to",
"determine",
"whether",
"the",
"treebank",
"has",
"blinded",
"texts",
"or",
"not"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L71-L79
|
21,251
|
explosion/spaCy
|
bin/ud/run_eval.py
|
fetch_all_treebanks
|
def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language):
"""" Fetch the txt files for all treebanks for a given set of languages """
all_treebanks = dict()
treebank_size = dict()
for l in languages:
all_treebanks[l] = []
treebank_size[l] = 0
for treebank_dir in ud_dir.iterdir():
if treebank_dir.is_dir():
for txt_path in treebank_dir.iterdir():
if txt_path.name.endswith('-ud-' + corpus + '.txt'):
file_lang = txt_path.name.split('_')[0]
if file_lang in languages:
gold_path = treebank_dir / txt_path.name.replace('.txt', '.conllu')
stats_xml = treebank_dir / "stats.xml"
# ignore treebanks where the texts are not publicly available
if not _contains_blinded_text(stats_xml):
if not best_per_language:
all_treebanks[file_lang].append(txt_path)
# check the tokens in the gold annotation to keep only the biggest treebank per language
else:
with gold_path.open(mode='r', encoding='utf-8') as gold_file:
gold_ud = conll17_ud_eval.load_conllu(gold_file)
gold_tokens = len(gold_ud.tokens)
if treebank_size[file_lang] < gold_tokens:
all_treebanks[file_lang] = [txt_path]
treebank_size[file_lang] = gold_tokens
return all_treebanks
|
python
|
def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language):
"""" Fetch the txt files for all treebanks for a given set of languages """
all_treebanks = dict()
treebank_size = dict()
for l in languages:
all_treebanks[l] = []
treebank_size[l] = 0
for treebank_dir in ud_dir.iterdir():
if treebank_dir.is_dir():
for txt_path in treebank_dir.iterdir():
if txt_path.name.endswith('-ud-' + corpus + '.txt'):
file_lang = txt_path.name.split('_')[0]
if file_lang in languages:
gold_path = treebank_dir / txt_path.name.replace('.txt', '.conllu')
stats_xml = treebank_dir / "stats.xml"
# ignore treebanks where the texts are not publicly available
if not _contains_blinded_text(stats_xml):
if not best_per_language:
all_treebanks[file_lang].append(txt_path)
# check the tokens in the gold annotation to keep only the biggest treebank per language
else:
with gold_path.open(mode='r', encoding='utf-8') as gold_file:
gold_ud = conll17_ud_eval.load_conllu(gold_file)
gold_tokens = len(gold_ud.tokens)
if treebank_size[file_lang] < gold_tokens:
all_treebanks[file_lang] = [txt_path]
treebank_size[file_lang] = gold_tokens
return all_treebanks
|
[
"def",
"fetch_all_treebanks",
"(",
"ud_dir",
",",
"languages",
",",
"corpus",
",",
"best_per_language",
")",
":",
"all_treebanks",
"=",
"dict",
"(",
")",
"treebank_size",
"=",
"dict",
"(",
")",
"for",
"l",
"in",
"languages",
":",
"all_treebanks",
"[",
"l",
"]",
"=",
"[",
"]",
"treebank_size",
"[",
"l",
"]",
"=",
"0",
"for",
"treebank_dir",
"in",
"ud_dir",
".",
"iterdir",
"(",
")",
":",
"if",
"treebank_dir",
".",
"is_dir",
"(",
")",
":",
"for",
"txt_path",
"in",
"treebank_dir",
".",
"iterdir",
"(",
")",
":",
"if",
"txt_path",
".",
"name",
".",
"endswith",
"(",
"'-ud-'",
"+",
"corpus",
"+",
"'.txt'",
")",
":",
"file_lang",
"=",
"txt_path",
".",
"name",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"if",
"file_lang",
"in",
"languages",
":",
"gold_path",
"=",
"treebank_dir",
"/",
"txt_path",
".",
"name",
".",
"replace",
"(",
"'.txt'",
",",
"'.conllu'",
")",
"stats_xml",
"=",
"treebank_dir",
"/",
"\"stats.xml\"",
"# ignore treebanks where the texts are not publicly available",
"if",
"not",
"_contains_blinded_text",
"(",
"stats_xml",
")",
":",
"if",
"not",
"best_per_language",
":",
"all_treebanks",
"[",
"file_lang",
"]",
".",
"append",
"(",
"txt_path",
")",
"# check the tokens in the gold annotation to keep only the biggest treebank per language",
"else",
":",
"with",
"gold_path",
".",
"open",
"(",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"gold_file",
":",
"gold_ud",
"=",
"conll17_ud_eval",
".",
"load_conllu",
"(",
"gold_file",
")",
"gold_tokens",
"=",
"len",
"(",
"gold_ud",
".",
"tokens",
")",
"if",
"treebank_size",
"[",
"file_lang",
"]",
"<",
"gold_tokens",
":",
"all_treebanks",
"[",
"file_lang",
"]",
"=",
"[",
"txt_path",
"]",
"treebank_size",
"[",
"file_lang",
"]",
"=",
"gold_tokens",
"return",
"all_treebanks"
] |
Fetch the txt files for all treebanks for a given set of languages
|
[
"Fetch",
"the",
"txt",
"files",
"for",
"all",
"treebanks",
"for",
"a",
"given",
"set",
"of",
"languages"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L82-L111
|
21,252
|
explosion/spaCy
|
bin/ud/run_eval.py
|
run_all_evals
|
def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks):
"""" Run an evaluation for each language with its specified models and treebanks """
print_header = True
for tb_lang, treebank_list in treebanks.items():
print()
print("Language", tb_lang)
for text_path in treebank_list:
print(" Evaluating on", text_path)
gold_path = text_path.parent / (text_path.stem + '.conllu')
print(" Gold data from ", gold_path)
# nested try blocks to ensure the code can continue with the next iteration after a failure
try:
with gold_path.open(mode='r', encoding='utf-8') as gold_file:
gold_ud = conll17_ud_eval.load_conllu(gold_file)
for nlp, nlp_loading_time, nlp_name in models[tb_lang]:
try:
print(" Benchmarking", nlp_name)
tmp_output_path = text_path.parent / str('tmp_' + nlp_name + '.conllu')
run_single_eval(nlp, nlp_loading_time, nlp_name, text_path, gold_ud, tmp_output_path, out_file,
print_header, check_parse, print_freq_tasks)
print_header = False
except Exception as e:
print(" Ran into trouble: ", str(e))
except Exception as e:
print(" Ran into trouble: ", str(e))
|
python
|
def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks):
"""" Run an evaluation for each language with its specified models and treebanks """
print_header = True
for tb_lang, treebank_list in treebanks.items():
print()
print("Language", tb_lang)
for text_path in treebank_list:
print(" Evaluating on", text_path)
gold_path = text_path.parent / (text_path.stem + '.conllu')
print(" Gold data from ", gold_path)
# nested try blocks to ensure the code can continue with the next iteration after a failure
try:
with gold_path.open(mode='r', encoding='utf-8') as gold_file:
gold_ud = conll17_ud_eval.load_conllu(gold_file)
for nlp, nlp_loading_time, nlp_name in models[tb_lang]:
try:
print(" Benchmarking", nlp_name)
tmp_output_path = text_path.parent / str('tmp_' + nlp_name + '.conllu')
run_single_eval(nlp, nlp_loading_time, nlp_name, text_path, gold_ud, tmp_output_path, out_file,
print_header, check_parse, print_freq_tasks)
print_header = False
except Exception as e:
print(" Ran into trouble: ", str(e))
except Exception as e:
print(" Ran into trouble: ", str(e))
|
[
"def",
"run_all_evals",
"(",
"models",
",",
"treebanks",
",",
"out_file",
",",
"check_parse",
",",
"print_freq_tasks",
")",
":",
"print_header",
"=",
"True",
"for",
"tb_lang",
",",
"treebank_list",
"in",
"treebanks",
".",
"items",
"(",
")",
":",
"print",
"(",
")",
"print",
"(",
"\"Language\"",
",",
"tb_lang",
")",
"for",
"text_path",
"in",
"treebank_list",
":",
"print",
"(",
"\" Evaluating on\"",
",",
"text_path",
")",
"gold_path",
"=",
"text_path",
".",
"parent",
"/",
"(",
"text_path",
".",
"stem",
"+",
"'.conllu'",
")",
"print",
"(",
"\" Gold data from \"",
",",
"gold_path",
")",
"# nested try blocks to ensure the code can continue with the next iteration after a failure",
"try",
":",
"with",
"gold_path",
".",
"open",
"(",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"gold_file",
":",
"gold_ud",
"=",
"conll17_ud_eval",
".",
"load_conllu",
"(",
"gold_file",
")",
"for",
"nlp",
",",
"nlp_loading_time",
",",
"nlp_name",
"in",
"models",
"[",
"tb_lang",
"]",
":",
"try",
":",
"print",
"(",
"\" Benchmarking\"",
",",
"nlp_name",
")",
"tmp_output_path",
"=",
"text_path",
".",
"parent",
"/",
"str",
"(",
"'tmp_'",
"+",
"nlp_name",
"+",
"'.conllu'",
")",
"run_single_eval",
"(",
"nlp",
",",
"nlp_loading_time",
",",
"nlp_name",
",",
"text_path",
",",
"gold_ud",
",",
"tmp_output_path",
",",
"out_file",
",",
"print_header",
",",
"check_parse",
",",
"print_freq_tasks",
")",
"print_header",
"=",
"False",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\" Ran into trouble: \"",
",",
"str",
"(",
"e",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\" Ran into trouble: \"",
",",
"str",
"(",
"e",
")",
")"
] |
Run an evaluation for each language with its specified models and treebanks
|
[
"Run",
"an",
"evaluation",
"for",
"each",
"language",
"with",
"its",
"specified",
"models",
"and",
"treebanks"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L184-L212
|
21,253
|
explosion/spaCy
|
bin/ud/run_eval.py
|
main
|
def main(out_path, ud_dir, check_parse=False, langs=ALL_LANGUAGES, exclude_trained_models=False, exclude_multi=False,
hide_freq=False, corpus='train', best_per_language=False):
""""
Assemble all treebanks and models to run evaluations with.
When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality
"""
languages = [lang.strip() for lang in langs.split(",")]
print_freq_tasks = []
if not hide_freq:
print_freq_tasks = ['Tokens']
# fetching all relevant treebank from the directory
treebanks = fetch_all_treebanks(ud_dir, languages, corpus, best_per_language)
print()
print("Loading all relevant models for", languages)
models = dict()
# multi-lang model
multi = None
if not exclude_multi and not check_parse:
multi = load_model('xx_ent_wiki_sm', add_sentencizer=True)
# initialize all models with the multi-lang model
for lang in languages:
models[lang] = [multi] if multi else []
# add default models if we don't want to evaluate parsing info
if not check_parse:
# Norwegian is 'nb' in spaCy but 'no' in the UD corpora
if lang == 'no':
models['no'].append(load_default_model_sentencizer('nb'))
else:
models[lang].append(load_default_model_sentencizer(lang))
# language-specific trained models
if not exclude_trained_models:
if 'de' in models:
models['de'].append(load_model('de_core_news_sm'))
if 'es' in models:
models['es'].append(load_model('es_core_news_sm'))
models['es'].append(load_model('es_core_news_md'))
if 'pt' in models:
models['pt'].append(load_model('pt_core_news_sm'))
if 'it' in models:
models['it'].append(load_model('it_core_news_sm'))
if 'nl' in models:
models['nl'].append(load_model('nl_core_news_sm'))
if 'en' in models:
models['en'].append(load_model('en_core_web_sm'))
models['en'].append(load_model('en_core_web_md'))
models['en'].append(load_model('en_core_web_lg'))
if 'fr' in models:
models['fr'].append(load_model('fr_core_news_sm'))
models['fr'].append(load_model('fr_core_news_md'))
with out_path.open(mode='w', encoding='utf-8') as out_file:
run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks)
|
python
|
def main(out_path, ud_dir, check_parse=False, langs=ALL_LANGUAGES, exclude_trained_models=False, exclude_multi=False,
hide_freq=False, corpus='train', best_per_language=False):
""""
Assemble all treebanks and models to run evaluations with.
When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality
"""
languages = [lang.strip() for lang in langs.split(",")]
print_freq_tasks = []
if not hide_freq:
print_freq_tasks = ['Tokens']
# fetching all relevant treebank from the directory
treebanks = fetch_all_treebanks(ud_dir, languages, corpus, best_per_language)
print()
print("Loading all relevant models for", languages)
models = dict()
# multi-lang model
multi = None
if not exclude_multi and not check_parse:
multi = load_model('xx_ent_wiki_sm', add_sentencizer=True)
# initialize all models with the multi-lang model
for lang in languages:
models[lang] = [multi] if multi else []
# add default models if we don't want to evaluate parsing info
if not check_parse:
# Norwegian is 'nb' in spaCy but 'no' in the UD corpora
if lang == 'no':
models['no'].append(load_default_model_sentencizer('nb'))
else:
models[lang].append(load_default_model_sentencizer(lang))
# language-specific trained models
if not exclude_trained_models:
if 'de' in models:
models['de'].append(load_model('de_core_news_sm'))
if 'es' in models:
models['es'].append(load_model('es_core_news_sm'))
models['es'].append(load_model('es_core_news_md'))
if 'pt' in models:
models['pt'].append(load_model('pt_core_news_sm'))
if 'it' in models:
models['it'].append(load_model('it_core_news_sm'))
if 'nl' in models:
models['nl'].append(load_model('nl_core_news_sm'))
if 'en' in models:
models['en'].append(load_model('en_core_web_sm'))
models['en'].append(load_model('en_core_web_md'))
models['en'].append(load_model('en_core_web_lg'))
if 'fr' in models:
models['fr'].append(load_model('fr_core_news_sm'))
models['fr'].append(load_model('fr_core_news_md'))
with out_path.open(mode='w', encoding='utf-8') as out_file:
run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks)
|
[
"def",
"main",
"(",
"out_path",
",",
"ud_dir",
",",
"check_parse",
"=",
"False",
",",
"langs",
"=",
"ALL_LANGUAGES",
",",
"exclude_trained_models",
"=",
"False",
",",
"exclude_multi",
"=",
"False",
",",
"hide_freq",
"=",
"False",
",",
"corpus",
"=",
"'train'",
",",
"best_per_language",
"=",
"False",
")",
":",
"languages",
"=",
"[",
"lang",
".",
"strip",
"(",
")",
"for",
"lang",
"in",
"langs",
".",
"split",
"(",
"\",\"",
")",
"]",
"print_freq_tasks",
"=",
"[",
"]",
"if",
"not",
"hide_freq",
":",
"print_freq_tasks",
"=",
"[",
"'Tokens'",
"]",
"# fetching all relevant treebank from the directory",
"treebanks",
"=",
"fetch_all_treebanks",
"(",
"ud_dir",
",",
"languages",
",",
"corpus",
",",
"best_per_language",
")",
"print",
"(",
")",
"print",
"(",
"\"Loading all relevant models for\"",
",",
"languages",
")",
"models",
"=",
"dict",
"(",
")",
"# multi-lang model",
"multi",
"=",
"None",
"if",
"not",
"exclude_multi",
"and",
"not",
"check_parse",
":",
"multi",
"=",
"load_model",
"(",
"'xx_ent_wiki_sm'",
",",
"add_sentencizer",
"=",
"True",
")",
"# initialize all models with the multi-lang model",
"for",
"lang",
"in",
"languages",
":",
"models",
"[",
"lang",
"]",
"=",
"[",
"multi",
"]",
"if",
"multi",
"else",
"[",
"]",
"# add default models if we don't want to evaluate parsing info",
"if",
"not",
"check_parse",
":",
"# Norwegian is 'nb' in spaCy but 'no' in the UD corpora",
"if",
"lang",
"==",
"'no'",
":",
"models",
"[",
"'no'",
"]",
".",
"append",
"(",
"load_default_model_sentencizer",
"(",
"'nb'",
")",
")",
"else",
":",
"models",
"[",
"lang",
"]",
".",
"append",
"(",
"load_default_model_sentencizer",
"(",
"lang",
")",
")",
"# language-specific trained models",
"if",
"not",
"exclude_trained_models",
":",
"if",
"'de'",
"in",
"models",
":",
"models",
"[",
"'de'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'de_core_news_sm'",
")",
")",
"if",
"'es'",
"in",
"models",
":",
"models",
"[",
"'es'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'es_core_news_sm'",
")",
")",
"models",
"[",
"'es'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'es_core_news_md'",
")",
")",
"if",
"'pt'",
"in",
"models",
":",
"models",
"[",
"'pt'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'pt_core_news_sm'",
")",
")",
"if",
"'it'",
"in",
"models",
":",
"models",
"[",
"'it'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'it_core_news_sm'",
")",
")",
"if",
"'nl'",
"in",
"models",
":",
"models",
"[",
"'nl'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'nl_core_news_sm'",
")",
")",
"if",
"'en'",
"in",
"models",
":",
"models",
"[",
"'en'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'en_core_web_sm'",
")",
")",
"models",
"[",
"'en'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'en_core_web_md'",
")",
")",
"models",
"[",
"'en'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'en_core_web_lg'",
")",
")",
"if",
"'fr'",
"in",
"models",
":",
"models",
"[",
"'fr'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'fr_core_news_sm'",
")",
")",
"models",
"[",
"'fr'",
"]",
".",
"append",
"(",
"load_model",
"(",
"'fr_core_news_md'",
")",
")",
"with",
"out_path",
".",
"open",
"(",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"out_file",
":",
"run_all_evals",
"(",
"models",
",",
"treebanks",
",",
"out_file",
",",
"check_parse",
",",
"print_freq_tasks",
")"
] |
Assemble all treebanks and models to run evaluations with.
When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality
|
[
"Assemble",
"all",
"treebanks",
"and",
"models",
"to",
"run",
"evaluations",
"with",
".",
"When",
"setting",
"check_parse",
"to",
"True",
"the",
"default",
"models",
"will",
"not",
"be",
"evaluated",
"as",
"they",
"don",
"t",
"have",
"parsing",
"functionality"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L226-L283
|
21,254
|
explosion/spaCy
|
spacy/lang/de/syntax_iterators.py
|
noun_chunks
|
def noun_chunks(obj):
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# this iterator extracts spans headed by NOUNs starting from the left-most
# syntactic dependent until the NOUN itself for close apposition and
# measurement construction, the span is sometimes extended to the right of
# the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee"
# and not just "eine Tasse", same for "das Thema Familie".
labels = [
"sb",
"oa",
"da",
"nk",
"mo",
"ag",
"ROOT",
"root",
"cj",
"pd",
"og",
"app",
]
doc = obj.doc # Ensure works on both Doc and Span.
np_label = doc.vocab.strings.add("NP")
np_deps = set(doc.vocab.strings.add(label) for label in labels)
close_app = doc.vocab.strings.add("nk")
rbracket = 0
for i, word in enumerate(obj):
if i < rbracket:
continue
if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps:
rbracket = word.i + 1
# try to extend the span to the right
# to capture close apposition/measurement constructions
for rdep in doc[word.i].rights:
if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app:
rbracket = rdep.i + 1
yield word.left_edge.i, rbracket, np_label
|
python
|
def noun_chunks(obj):
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# this iterator extracts spans headed by NOUNs starting from the left-most
# syntactic dependent until the NOUN itself for close apposition and
# measurement construction, the span is sometimes extended to the right of
# the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee"
# and not just "eine Tasse", same for "das Thema Familie".
labels = [
"sb",
"oa",
"da",
"nk",
"mo",
"ag",
"ROOT",
"root",
"cj",
"pd",
"og",
"app",
]
doc = obj.doc # Ensure works on both Doc and Span.
np_label = doc.vocab.strings.add("NP")
np_deps = set(doc.vocab.strings.add(label) for label in labels)
close_app = doc.vocab.strings.add("nk")
rbracket = 0
for i, word in enumerate(obj):
if i < rbracket:
continue
if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps:
rbracket = word.i + 1
# try to extend the span to the right
# to capture close apposition/measurement constructions
for rdep in doc[word.i].rights:
if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app:
rbracket = rdep.i + 1
yield word.left_edge.i, rbracket, np_label
|
[
"def",
"noun_chunks",
"(",
"obj",
")",
":",
"# this iterator extracts spans headed by NOUNs starting from the left-most",
"# syntactic dependent until the NOUN itself for close apposition and",
"# measurement construction, the span is sometimes extended to the right of",
"# the NOUN. Example: \"eine Tasse Tee\" (a cup (of) tea) returns \"eine Tasse Tee\"",
"# and not just \"eine Tasse\", same for \"das Thema Familie\".",
"labels",
"=",
"[",
"\"sb\"",
",",
"\"oa\"",
",",
"\"da\"",
",",
"\"nk\"",
",",
"\"mo\"",
",",
"\"ag\"",
",",
"\"ROOT\"",
",",
"\"root\"",
",",
"\"cj\"",
",",
"\"pd\"",
",",
"\"og\"",
",",
"\"app\"",
",",
"]",
"doc",
"=",
"obj",
".",
"doc",
"# Ensure works on both Doc and Span.",
"np_label",
"=",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"\"NP\"",
")",
"np_deps",
"=",
"set",
"(",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"label",
")",
"for",
"label",
"in",
"labels",
")",
"close_app",
"=",
"doc",
".",
"vocab",
".",
"strings",
".",
"add",
"(",
"\"nk\"",
")",
"rbracket",
"=",
"0",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"obj",
")",
":",
"if",
"i",
"<",
"rbracket",
":",
"continue",
"if",
"word",
".",
"pos",
"in",
"(",
"NOUN",
",",
"PROPN",
",",
"PRON",
")",
"and",
"word",
".",
"dep",
"in",
"np_deps",
":",
"rbracket",
"=",
"word",
".",
"i",
"+",
"1",
"# try to extend the span to the right",
"# to capture close apposition/measurement constructions",
"for",
"rdep",
"in",
"doc",
"[",
"word",
".",
"i",
"]",
".",
"rights",
":",
"if",
"rdep",
".",
"pos",
"in",
"(",
"NOUN",
",",
"PROPN",
")",
"and",
"rdep",
".",
"dep",
"==",
"close_app",
":",
"rbracket",
"=",
"rdep",
".",
"i",
"+",
"1",
"yield",
"word",
".",
"left_edge",
".",
"i",
",",
"rbracket",
",",
"np_label"
] |
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
|
[
"Detect",
"base",
"noun",
"phrases",
"from",
"a",
"dependency",
"parse",
".",
"Works",
"on",
"both",
"Doc",
"and",
"Span",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/de/syntax_iterators.py#L7-L46
|
21,255
|
explosion/spaCy
|
spacy/_ml.py
|
with_cpu
|
def with_cpu(ops, model):
"""Wrap a model that should run on CPU, transferring inputs and outputs
as necessary."""
model.to_cpu()
def with_cpu_forward(inputs, drop=0.0):
cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop)
gpu_outputs = _to_device(ops, cpu_outputs)
def with_cpu_backprop(d_outputs, sgd=None):
cpu_d_outputs = _to_cpu(d_outputs)
return backprop(cpu_d_outputs, sgd=sgd)
return gpu_outputs, with_cpu_backprop
return wrap(with_cpu_forward, model)
|
python
|
def with_cpu(ops, model):
"""Wrap a model that should run on CPU, transferring inputs and outputs
as necessary."""
model.to_cpu()
def with_cpu_forward(inputs, drop=0.0):
cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop)
gpu_outputs = _to_device(ops, cpu_outputs)
def with_cpu_backprop(d_outputs, sgd=None):
cpu_d_outputs = _to_cpu(d_outputs)
return backprop(cpu_d_outputs, sgd=sgd)
return gpu_outputs, with_cpu_backprop
return wrap(with_cpu_forward, model)
|
[
"def",
"with_cpu",
"(",
"ops",
",",
"model",
")",
":",
"model",
".",
"to_cpu",
"(",
")",
"def",
"with_cpu_forward",
"(",
"inputs",
",",
"drop",
"=",
"0.0",
")",
":",
"cpu_outputs",
",",
"backprop",
"=",
"model",
".",
"begin_update",
"(",
"_to_cpu",
"(",
"inputs",
")",
",",
"drop",
"=",
"drop",
")",
"gpu_outputs",
"=",
"_to_device",
"(",
"ops",
",",
"cpu_outputs",
")",
"def",
"with_cpu_backprop",
"(",
"d_outputs",
",",
"sgd",
"=",
"None",
")",
":",
"cpu_d_outputs",
"=",
"_to_cpu",
"(",
"d_outputs",
")",
"return",
"backprop",
"(",
"cpu_d_outputs",
",",
"sgd",
"=",
"sgd",
")",
"return",
"gpu_outputs",
",",
"with_cpu_backprop",
"return",
"wrap",
"(",
"with_cpu_forward",
",",
"model",
")"
] |
Wrap a model that should run on CPU, transferring inputs and outputs
as necessary.
|
[
"Wrap",
"a",
"model",
"that",
"should",
"run",
"on",
"CPU",
"transferring",
"inputs",
"and",
"outputs",
"as",
"necessary",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L84-L99
|
21,256
|
explosion/spaCy
|
spacy/_ml.py
|
masked_language_model
|
def masked_language_model(vocab, model, mask_prob=0.15):
"""Convert a model into a BERT-style masked language model"""
random_words = _RandomWords(vocab)
def mlm_forward(docs, drop=0.0):
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
mask = model.ops.asarray(mask).reshape((mask.shape[0], 1))
output, backprop = model.begin_update(docs, drop=drop)
def mlm_backward(d_output, sgd=None):
d_output *= 1 - mask
return backprop(d_output, sgd=sgd)
return output, mlm_backward
return wrap(mlm_forward, model)
|
python
|
def masked_language_model(vocab, model, mask_prob=0.15):
"""Convert a model into a BERT-style masked language model"""
random_words = _RandomWords(vocab)
def mlm_forward(docs, drop=0.0):
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
mask = model.ops.asarray(mask).reshape((mask.shape[0], 1))
output, backprop = model.begin_update(docs, drop=drop)
def mlm_backward(d_output, sgd=None):
d_output *= 1 - mask
return backprop(d_output, sgd=sgd)
return output, mlm_backward
return wrap(mlm_forward, model)
|
[
"def",
"masked_language_model",
"(",
"vocab",
",",
"model",
",",
"mask_prob",
"=",
"0.15",
")",
":",
"random_words",
"=",
"_RandomWords",
"(",
"vocab",
")",
"def",
"mlm_forward",
"(",
"docs",
",",
"drop",
"=",
"0.0",
")",
":",
"mask",
",",
"docs",
"=",
"_apply_mask",
"(",
"docs",
",",
"random_words",
",",
"mask_prob",
"=",
"mask_prob",
")",
"mask",
"=",
"model",
".",
"ops",
".",
"asarray",
"(",
"mask",
")",
".",
"reshape",
"(",
"(",
"mask",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"output",
",",
"backprop",
"=",
"model",
".",
"begin_update",
"(",
"docs",
",",
"drop",
"=",
"drop",
")",
"def",
"mlm_backward",
"(",
"d_output",
",",
"sgd",
"=",
"None",
")",
":",
"d_output",
"*=",
"1",
"-",
"mask",
"return",
"backprop",
"(",
"d_output",
",",
"sgd",
"=",
"sgd",
")",
"return",
"output",
",",
"mlm_backward",
"return",
"wrap",
"(",
"mlm_forward",
",",
"model",
")"
] |
Convert a model into a BERT-style masked language model
|
[
"Convert",
"a",
"model",
"into",
"a",
"BERT",
"-",
"style",
"masked",
"language",
"model"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L693-L709
|
21,257
|
explosion/spaCy
|
spacy/pipeline/hooks.py
|
SimilarityHook.begin_training
|
def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs):
"""Allocate model, using width from tensorizer in pipeline.
gold_tuples (iterable): Gold-standard training data.
pipeline (list): The pipeline the model is part of.
"""
if self.model is True:
self.model = self.Model(pipeline[0].model.nO)
link_vectors_to_models(self.vocab)
if sgd is None:
sgd = self.create_optimizer()
return sgd
|
python
|
def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs):
"""Allocate model, using width from tensorizer in pipeline.
gold_tuples (iterable): Gold-standard training data.
pipeline (list): The pipeline the model is part of.
"""
if self.model is True:
self.model = self.Model(pipeline[0].model.nO)
link_vectors_to_models(self.vocab)
if sgd is None:
sgd = self.create_optimizer()
return sgd
|
[
"def",
"begin_training",
"(",
"self",
",",
"_",
"=",
"tuple",
"(",
")",
",",
"pipeline",
"=",
"None",
",",
"sgd",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"model",
"is",
"True",
":",
"self",
".",
"model",
"=",
"self",
".",
"Model",
"(",
"pipeline",
"[",
"0",
"]",
".",
"model",
".",
"nO",
")",
"link_vectors_to_models",
"(",
"self",
".",
"vocab",
")",
"if",
"sgd",
"is",
"None",
":",
"sgd",
"=",
"self",
".",
"create_optimizer",
"(",
")",
"return",
"sgd"
] |
Allocate model, using width from tensorizer in pipeline.
gold_tuples (iterable): Gold-standard training data.
pipeline (list): The pipeline the model is part of.
|
[
"Allocate",
"model",
"using",
"width",
"from",
"tensorizer",
"in",
"pipeline",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/hooks.py#L89-L100
|
21,258
|
explosion/spaCy
|
spacy/displacy/render.py
|
DependencyRenderer.render_svg
|
def render_svg(self, render_id, words, arcs):
"""Render SVG.
render_id (int): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (unicode): Rendered SVG markup.
"""
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words) * self.distance
self.height = self.offset_y + 3 * self.word_spacing
self.id = render_id
words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)]
arcs = [
self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i)
for i, a in enumerate(arcs)
]
content = "".join(words) + "".join(arcs)
return TPL_DEP_SVG.format(
id=self.id,
width=self.width,
height=self.height,
color=self.color,
bg=self.bg,
font=self.font,
content=content,
dir=self.direction,
lang=self.lang,
)
|
python
|
def render_svg(self, render_id, words, arcs):
"""Render SVG.
render_id (int): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (unicode): Rendered SVG markup.
"""
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words) * self.distance
self.height = self.offset_y + 3 * self.word_spacing
self.id = render_id
words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)]
arcs = [
self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i)
for i, a in enumerate(arcs)
]
content = "".join(words) + "".join(arcs)
return TPL_DEP_SVG.format(
id=self.id,
width=self.width,
height=self.height,
color=self.color,
bg=self.bg,
font=self.font,
content=content,
dir=self.direction,
lang=self.lang,
)
|
[
"def",
"render_svg",
"(",
"self",
",",
"render_id",
",",
"words",
",",
"arcs",
")",
":",
"self",
".",
"levels",
"=",
"self",
".",
"get_levels",
"(",
"arcs",
")",
"self",
".",
"highest_level",
"=",
"len",
"(",
"self",
".",
"levels",
")",
"self",
".",
"offset_y",
"=",
"self",
".",
"distance",
"/",
"2",
"*",
"self",
".",
"highest_level",
"+",
"self",
".",
"arrow_stroke",
"self",
".",
"width",
"=",
"self",
".",
"offset_x",
"+",
"len",
"(",
"words",
")",
"*",
"self",
".",
"distance",
"self",
".",
"height",
"=",
"self",
".",
"offset_y",
"+",
"3",
"*",
"self",
".",
"word_spacing",
"self",
".",
"id",
"=",
"render_id",
"words",
"=",
"[",
"self",
".",
"render_word",
"(",
"w",
"[",
"\"text\"",
"]",
",",
"w",
"[",
"\"tag\"",
"]",
",",
"i",
")",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"words",
")",
"]",
"arcs",
"=",
"[",
"self",
".",
"render_arrow",
"(",
"a",
"[",
"\"label\"",
"]",
",",
"a",
"[",
"\"start\"",
"]",
",",
"a",
"[",
"\"end\"",
"]",
",",
"a",
"[",
"\"dir\"",
"]",
",",
"i",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"arcs",
")",
"]",
"content",
"=",
"\"\"",
".",
"join",
"(",
"words",
")",
"+",
"\"\"",
".",
"join",
"(",
"arcs",
")",
"return",
"TPL_DEP_SVG",
".",
"format",
"(",
"id",
"=",
"self",
".",
"id",
",",
"width",
"=",
"self",
".",
"width",
",",
"height",
"=",
"self",
".",
"height",
",",
"color",
"=",
"self",
".",
"color",
",",
"bg",
"=",
"self",
".",
"bg",
",",
"font",
"=",
"self",
".",
"font",
",",
"content",
"=",
"content",
",",
"dir",
"=",
"self",
".",
"direction",
",",
"lang",
"=",
"self",
".",
"lang",
",",
")"
] |
Render SVG.
render_id (int): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (unicode): Rendered SVG markup.
|
[
"Render",
"SVG",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L70-L100
|
21,259
|
explosion/spaCy
|
spacy/displacy/render.py
|
DependencyRenderer.render_word
|
def render_word(self, text, tag, i):
"""Render individual word.
text (unicode): Word text.
tag (unicode): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (unicode): Rendered SVG markup.
"""
y = self.offset_y + self.word_spacing
x = self.offset_x + i * self.distance
if self.direction == "rtl":
x = self.width - x
html_text = escape_html(text)
return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
|
python
|
def render_word(self, text, tag, i):
"""Render individual word.
text (unicode): Word text.
tag (unicode): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (unicode): Rendered SVG markup.
"""
y = self.offset_y + self.word_spacing
x = self.offset_x + i * self.distance
if self.direction == "rtl":
x = self.width - x
html_text = escape_html(text)
return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
|
[
"def",
"render_word",
"(",
"self",
",",
"text",
",",
"tag",
",",
"i",
")",
":",
"y",
"=",
"self",
".",
"offset_y",
"+",
"self",
".",
"word_spacing",
"x",
"=",
"self",
".",
"offset_x",
"+",
"i",
"*",
"self",
".",
"distance",
"if",
"self",
".",
"direction",
"==",
"\"rtl\"",
":",
"x",
"=",
"self",
".",
"width",
"-",
"x",
"html_text",
"=",
"escape_html",
"(",
"text",
")",
"return",
"TPL_DEP_WORDS",
".",
"format",
"(",
"text",
"=",
"html_text",
",",
"tag",
"=",
"tag",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
")"
] |
Render individual word.
text (unicode): Word text.
tag (unicode): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (unicode): Rendered SVG markup.
|
[
"Render",
"individual",
"word",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L102-L115
|
21,260
|
explosion/spaCy
|
spacy/displacy/render.py
|
DependencyRenderer.render_arrow
|
def render_arrow(self, label, start, end, direction, i):
"""Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
"""
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
)
|
python
|
def render_arrow(self, label, start, end, direction, i):
"""Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
"""
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
)
|
[
"def",
"render_arrow",
"(",
"self",
",",
"label",
",",
"start",
",",
"end",
",",
"direction",
",",
"i",
")",
":",
"level",
"=",
"self",
".",
"levels",
".",
"index",
"(",
"end",
"-",
"start",
")",
"+",
"1",
"x_start",
"=",
"self",
".",
"offset_x",
"+",
"start",
"*",
"self",
".",
"distance",
"+",
"self",
".",
"arrow_spacing",
"if",
"self",
".",
"direction",
"==",
"\"rtl\"",
":",
"x_start",
"=",
"self",
".",
"width",
"-",
"x_start",
"y",
"=",
"self",
".",
"offset_y",
"x_end",
"=",
"(",
"self",
".",
"offset_x",
"+",
"(",
"end",
"-",
"start",
")",
"*",
"self",
".",
"distance",
"+",
"start",
"*",
"self",
".",
"distance",
"-",
"self",
".",
"arrow_spacing",
"*",
"(",
"self",
".",
"highest_level",
"-",
"level",
")",
"/",
"4",
")",
"if",
"self",
".",
"direction",
"==",
"\"rtl\"",
":",
"x_end",
"=",
"self",
".",
"width",
"-",
"x_end",
"y_curve",
"=",
"self",
".",
"offset_y",
"-",
"level",
"*",
"self",
".",
"distance",
"/",
"2",
"if",
"self",
".",
"compact",
":",
"y_curve",
"=",
"self",
".",
"offset_y",
"-",
"level",
"*",
"self",
".",
"distance",
"/",
"6",
"if",
"y_curve",
"==",
"0",
"and",
"len",
"(",
"self",
".",
"levels",
")",
">",
"5",
":",
"y_curve",
"=",
"-",
"self",
".",
"distance",
"arrowhead",
"=",
"self",
".",
"get_arrowhead",
"(",
"direction",
",",
"x_start",
",",
"y",
",",
"x_end",
")",
"arc",
"=",
"self",
".",
"get_arc",
"(",
"x_start",
",",
"y",
",",
"y_curve",
",",
"x_end",
")",
"label_side",
"=",
"\"right\"",
"if",
"self",
".",
"direction",
"==",
"\"rtl\"",
"else",
"\"left\"",
"return",
"TPL_DEP_ARCS",
".",
"format",
"(",
"id",
"=",
"self",
".",
"id",
",",
"i",
"=",
"i",
",",
"stroke",
"=",
"self",
".",
"arrow_stroke",
",",
"head",
"=",
"arrowhead",
",",
"label",
"=",
"label",
",",
"label_side",
"=",
"label_side",
",",
"arc",
"=",
"arc",
",",
")"
] |
Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
|
[
"Render",
"individual",
"arrow",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L117-L156
|
21,261
|
explosion/spaCy
|
spacy/displacy/render.py
|
DependencyRenderer.get_arc
|
def get_arc(self, x_start, y, y_curve, x_end):
"""Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arc path ('d' attribute).
"""
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end)
|
python
|
def get_arc(self, x_start, y, y_curve, x_end):
"""Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arc path ('d' attribute).
"""
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end)
|
[
"def",
"get_arc",
"(",
"self",
",",
"x_start",
",",
"y",
",",
"y_curve",
",",
"x_end",
")",
":",
"template",
"=",
"\"M{x},{y} C{x},{c} {e},{c} {e},{y}\"",
"if",
"self",
".",
"compact",
":",
"template",
"=",
"\"M{x},{y} {x},{c} {e},{c} {e},{y}\"",
"return",
"template",
".",
"format",
"(",
"x",
"=",
"x_start",
",",
"y",
"=",
"y",
",",
"c",
"=",
"y_curve",
",",
"e",
"=",
"x_end",
")"
] |
Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arc path ('d' attribute).
|
[
"Render",
"individual",
"arc",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L158-L170
|
21,262
|
explosion/spaCy
|
spacy/displacy/render.py
|
DependencyRenderer.get_arrowhead
|
def get_arrowhead(self, direction, x, y, end):
"""Render individual arrow head.
direction (unicode): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arrow head path ('d' attribute).
"""
if direction == "left":
pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
pos1, pos2, pos3 = (
end,
end + self.arrow_width - 2,
end - self.arrow_width + 2,
)
arrowhead = (
pos1,
y + 2,
pos2,
y - self.arrow_width,
pos3,
y - self.arrow_width,
)
return "M{},{} L{},{} {},{}".format(*arrowhead)
|
python
|
def get_arrowhead(self, direction, x, y, end):
"""Render individual arrow head.
direction (unicode): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arrow head path ('d' attribute).
"""
if direction == "left":
pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
pos1, pos2, pos3 = (
end,
end + self.arrow_width - 2,
end - self.arrow_width + 2,
)
arrowhead = (
pos1,
y + 2,
pos2,
y - self.arrow_width,
pos3,
y - self.arrow_width,
)
return "M{},{} L{},{} {},{}".format(*arrowhead)
|
[
"def",
"get_arrowhead",
"(",
"self",
",",
"direction",
",",
"x",
",",
"y",
",",
"end",
")",
":",
"if",
"direction",
"==",
"\"left\"",
":",
"pos1",
",",
"pos2",
",",
"pos3",
"=",
"(",
"x",
",",
"x",
"-",
"self",
".",
"arrow_width",
"+",
"2",
",",
"x",
"+",
"self",
".",
"arrow_width",
"-",
"2",
")",
"else",
":",
"pos1",
",",
"pos2",
",",
"pos3",
"=",
"(",
"end",
",",
"end",
"+",
"self",
".",
"arrow_width",
"-",
"2",
",",
"end",
"-",
"self",
".",
"arrow_width",
"+",
"2",
",",
")",
"arrowhead",
"=",
"(",
"pos1",
",",
"y",
"+",
"2",
",",
"pos2",
",",
"y",
"-",
"self",
".",
"arrow_width",
",",
"pos3",
",",
"y",
"-",
"self",
".",
"arrow_width",
",",
")",
"return",
"\"M{},{} L{},{} {},{}\"",
".",
"format",
"(",
"*",
"arrowhead",
")"
] |
Render individual arrow head.
direction (unicode): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arrow head path ('d' attribute).
|
[
"Render",
"individual",
"arrow",
"head",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L172-L197
|
21,263
|
explosion/spaCy
|
spacy/displacy/render.py
|
DependencyRenderer.get_levels
|
def get_levels(self, arcs):
"""Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (list): Arc levels sorted from lowest to highest.
"""
levels = set(map(lambda arc: arc["end"] - arc["start"], arcs))
return sorted(list(levels))
|
python
|
def get_levels(self, arcs):
"""Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (list): Arc levels sorted from lowest to highest.
"""
levels = set(map(lambda arc: arc["end"] - arc["start"], arcs))
return sorted(list(levels))
|
[
"def",
"get_levels",
"(",
"self",
",",
"arcs",
")",
":",
"levels",
"=",
"set",
"(",
"map",
"(",
"lambda",
"arc",
":",
"arc",
"[",
"\"end\"",
"]",
"-",
"arc",
"[",
"\"start\"",
"]",
",",
"arcs",
")",
")",
"return",
"sorted",
"(",
"list",
"(",
"levels",
")",
")"
] |
Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (list): Arc levels sorted from lowest to highest.
|
[
"Calculate",
"available",
"arc",
"height",
"levels",
".",
"Used",
"to",
"calculate",
"arrow",
"heights",
"dynamically",
"and",
"without",
"wasting",
"space",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L199-L207
|
21,264
|
explosion/spaCy
|
spacy/displacy/render.py
|
EntityRenderer.render_ents
|
def render_ents(self, text, spans, title):
"""Render entities in text.
text (unicode): Original text.
spans (list): Individual entity spans and their start, end and label.
title (unicode or None): Document title set in Doc.user_data['title'].
"""
markup = ""
offset = 0
for span in spans:
label = span["label"]
start = span["start"]
end = span["end"]
entity = escape_html(text[start:end])
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
if self.ents is None or label.upper() in self.ents:
color = self.colors.get(label.upper(), self.default_color)
ent_settings = {"label": label, "text": entity, "bg": color}
if self.direction == "rtl":
markup += TPL_ENT_RTL.format(**ent_settings)
else:
markup += TPL_ENT.format(**ent_settings)
else:
markup += entity
offset = end
markup += escape_html(text[offset:])
markup = TPL_ENTS.format(content=markup, dir=self.direction)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
|
python
|
def render_ents(self, text, spans, title):
"""Render entities in text.
text (unicode): Original text.
spans (list): Individual entity spans and their start, end and label.
title (unicode or None): Document title set in Doc.user_data['title'].
"""
markup = ""
offset = 0
for span in spans:
label = span["label"]
start = span["start"]
end = span["end"]
entity = escape_html(text[start:end])
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
if self.ents is None or label.upper() in self.ents:
color = self.colors.get(label.upper(), self.default_color)
ent_settings = {"label": label, "text": entity, "bg": color}
if self.direction == "rtl":
markup += TPL_ENT_RTL.format(**ent_settings)
else:
markup += TPL_ENT.format(**ent_settings)
else:
markup += entity
offset = end
markup += escape_html(text[offset:])
markup = TPL_ENTS.format(content=markup, dir=self.direction)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
|
[
"def",
"render_ents",
"(",
"self",
",",
"text",
",",
"spans",
",",
"title",
")",
":",
"markup",
"=",
"\"\"",
"offset",
"=",
"0",
"for",
"span",
"in",
"spans",
":",
"label",
"=",
"span",
"[",
"\"label\"",
"]",
"start",
"=",
"span",
"[",
"\"start\"",
"]",
"end",
"=",
"span",
"[",
"\"end\"",
"]",
"entity",
"=",
"escape_html",
"(",
"text",
"[",
"start",
":",
"end",
"]",
")",
"fragments",
"=",
"text",
"[",
"offset",
":",
"start",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"for",
"i",
",",
"fragment",
"in",
"enumerate",
"(",
"fragments",
")",
":",
"markup",
"+=",
"escape_html",
"(",
"fragment",
")",
"if",
"len",
"(",
"fragments",
")",
">",
"1",
"and",
"i",
"!=",
"len",
"(",
"fragments",
")",
"-",
"1",
":",
"markup",
"+=",
"\"</br>\"",
"if",
"self",
".",
"ents",
"is",
"None",
"or",
"label",
".",
"upper",
"(",
")",
"in",
"self",
".",
"ents",
":",
"color",
"=",
"self",
".",
"colors",
".",
"get",
"(",
"label",
".",
"upper",
"(",
")",
",",
"self",
".",
"default_color",
")",
"ent_settings",
"=",
"{",
"\"label\"",
":",
"label",
",",
"\"text\"",
":",
"entity",
",",
"\"bg\"",
":",
"color",
"}",
"if",
"self",
".",
"direction",
"==",
"\"rtl\"",
":",
"markup",
"+=",
"TPL_ENT_RTL",
".",
"format",
"(",
"*",
"*",
"ent_settings",
")",
"else",
":",
"markup",
"+=",
"TPL_ENT",
".",
"format",
"(",
"*",
"*",
"ent_settings",
")",
"else",
":",
"markup",
"+=",
"entity",
"offset",
"=",
"end",
"markup",
"+=",
"escape_html",
"(",
"text",
"[",
"offset",
":",
"]",
")",
"markup",
"=",
"TPL_ENTS",
".",
"format",
"(",
"content",
"=",
"markup",
",",
"dir",
"=",
"self",
".",
"direction",
")",
"if",
"title",
":",
"markup",
"=",
"TPL_TITLE",
".",
"format",
"(",
"title",
"=",
"title",
")",
"+",
"markup",
"return",
"markup"
] |
Render entities in text.
text (unicode): Original text.
spans (list): Individual entity spans and their start, end and label.
title (unicode or None): Document title set in Doc.user_data['title'].
|
[
"Render",
"entities",
"in",
"text",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L271-L304
|
21,265
|
explosion/spaCy
|
spacy/pipeline/functions.py
|
merge_noun_chunks
|
def merge_noun_chunks(doc):
"""Merge noun chunks into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged noun chunks.
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
"""
if not doc.is_parsed:
return doc
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
attrs = {"tag": np.root.tag, "dep": np.root.dep}
retokenizer.merge(np, attrs=attrs)
return doc
|
python
|
def merge_noun_chunks(doc):
"""Merge noun chunks into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged noun chunks.
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
"""
if not doc.is_parsed:
return doc
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
attrs = {"tag": np.root.tag, "dep": np.root.dep}
retokenizer.merge(np, attrs=attrs)
return doc
|
[
"def",
"merge_noun_chunks",
"(",
"doc",
")",
":",
"if",
"not",
"doc",
".",
"is_parsed",
":",
"return",
"doc",
"with",
"doc",
".",
"retokenize",
"(",
")",
"as",
"retokenizer",
":",
"for",
"np",
"in",
"doc",
".",
"noun_chunks",
":",
"attrs",
"=",
"{",
"\"tag\"",
":",
"np",
".",
"root",
".",
"tag",
",",
"\"dep\"",
":",
"np",
".",
"root",
".",
"dep",
"}",
"retokenizer",
".",
"merge",
"(",
"np",
",",
"attrs",
"=",
"attrs",
")",
"return",
"doc"
] |
Merge noun chunks into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged noun chunks.
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
|
[
"Merge",
"noun",
"chunks",
"into",
"a",
"single",
"token",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L7-L21
|
21,266
|
explosion/spaCy
|
spacy/pipeline/functions.py
|
merge_entities
|
def merge_entities(doc):
"""Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
"""
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc
|
python
|
def merge_entities(doc):
"""Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
"""
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc
|
[
"def",
"merge_entities",
"(",
"doc",
")",
":",
"with",
"doc",
".",
"retokenize",
"(",
")",
"as",
"retokenizer",
":",
"for",
"ent",
"in",
"doc",
".",
"ents",
":",
"attrs",
"=",
"{",
"\"tag\"",
":",
"ent",
".",
"root",
".",
"tag",
",",
"\"dep\"",
":",
"ent",
".",
"root",
".",
"dep",
",",
"\"ent_type\"",
":",
"ent",
".",
"label",
"}",
"retokenizer",
".",
"merge",
"(",
"ent",
",",
"attrs",
"=",
"attrs",
")",
"return",
"doc"
] |
Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
|
[
"Merge",
"entities",
"into",
"a",
"single",
"token",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L24-L36
|
21,267
|
explosion/spaCy
|
spacy/pipeline/functions.py
|
merge_subtokens
|
def merge_subtokens(doc, label="subtok"):
"""Merge subtokens into a single token.
doc (Doc): The Doc object.
label (unicode): The subtoken dependency label.
RETURNS (Doc): The Doc object with merged subtokens.
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
"""
merger = Matcher(doc.vocab)
merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}])
matches = merger(doc)
spans = [doc[start : end + 1] for _, start, end in matches]
with doc.retokenize() as retokenizer:
for span in spans:
retokenizer.merge(span)
return doc
|
python
|
def merge_subtokens(doc, label="subtok"):
"""Merge subtokens into a single token.
doc (Doc): The Doc object.
label (unicode): The subtoken dependency label.
RETURNS (Doc): The Doc object with merged subtokens.
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
"""
merger = Matcher(doc.vocab)
merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}])
matches = merger(doc)
spans = [doc[start : end + 1] for _, start, end in matches]
with doc.retokenize() as retokenizer:
for span in spans:
retokenizer.merge(span)
return doc
|
[
"def",
"merge_subtokens",
"(",
"doc",
",",
"label",
"=",
"\"subtok\"",
")",
":",
"merger",
"=",
"Matcher",
"(",
"doc",
".",
"vocab",
")",
"merger",
".",
"add",
"(",
"\"SUBTOK\"",
",",
"None",
",",
"[",
"{",
"\"DEP\"",
":",
"label",
",",
"\"op\"",
":",
"\"+\"",
"}",
"]",
")",
"matches",
"=",
"merger",
"(",
"doc",
")",
"spans",
"=",
"[",
"doc",
"[",
"start",
":",
"end",
"+",
"1",
"]",
"for",
"_",
",",
"start",
",",
"end",
"in",
"matches",
"]",
"with",
"doc",
".",
"retokenize",
"(",
")",
"as",
"retokenizer",
":",
"for",
"span",
"in",
"spans",
":",
"retokenizer",
".",
"merge",
"(",
"span",
")",
"return",
"doc"
] |
Merge subtokens into a single token.
doc (Doc): The Doc object.
label (unicode): The subtoken dependency label.
RETURNS (Doc): The Doc object with merged subtokens.
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
|
[
"Merge",
"subtokens",
"into",
"a",
"single",
"token",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L39-L55
|
21,268
|
explosion/spaCy
|
spacy/cli/train.py
|
_score_for_model
|
def _score_for_model(meta):
""" Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in pipes:
mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3)
return sum(mean_acc) / len(mean_acc)
|
python
|
def _score_for_model(meta):
""" Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in pipes:
mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3)
return sum(mean_acc) / len(mean_acc)
|
[
"def",
"_score_for_model",
"(",
"meta",
")",
":",
"mean_acc",
"=",
"list",
"(",
")",
"pipes",
"=",
"meta",
"[",
"\"pipeline\"",
"]",
"acc",
"=",
"meta",
"[",
"\"accuracy\"",
"]",
"if",
"\"tagger\"",
"in",
"pipes",
":",
"mean_acc",
".",
"append",
"(",
"acc",
"[",
"\"tags_acc\"",
"]",
")",
"if",
"\"parser\"",
"in",
"pipes",
":",
"mean_acc",
".",
"append",
"(",
"(",
"acc",
"[",
"\"uas\"",
"]",
"+",
"acc",
"[",
"\"las\"",
"]",
")",
"/",
"2",
")",
"if",
"\"ner\"",
"in",
"pipes",
":",
"mean_acc",
".",
"append",
"(",
"(",
"acc",
"[",
"\"ents_p\"",
"]",
"+",
"acc",
"[",
"\"ents_r\"",
"]",
"+",
"acc",
"[",
"\"ents_f\"",
"]",
")",
"/",
"3",
")",
"return",
"sum",
"(",
"mean_acc",
")",
"/",
"len",
"(",
"mean_acc",
")"
] |
Returns mean score between tasks in pipeline that can be used for early stopping.
|
[
"Returns",
"mean",
"score",
"between",
"tasks",
"in",
"pipeline",
"that",
"can",
"be",
"used",
"for",
"early",
"stopping",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L371-L382
|
21,269
|
explosion/spaCy
|
spacy/cli/train.py
|
_load_pretrained_tok2vec
|
def _load_pretrained_tok2vec(nlp, loc):
"""Load pre-trained weights for the 'token-to-vector' part of the component
models, which is typically a CNN. See 'spacy pretrain'. Experimental.
"""
with loc.open("rb") as file_:
weights_data = file_.read()
loaded = []
for name, component in nlp.pipeline:
if hasattr(component, "model") and hasattr(component.model, "tok2vec"):
component.tok2vec.from_bytes(weights_data)
loaded.append(name)
return loaded
|
python
|
def _load_pretrained_tok2vec(nlp, loc):
"""Load pre-trained weights for the 'token-to-vector' part of the component
models, which is typically a CNN. See 'spacy pretrain'. Experimental.
"""
with loc.open("rb") as file_:
weights_data = file_.read()
loaded = []
for name, component in nlp.pipeline:
if hasattr(component, "model") and hasattr(component.model, "tok2vec"):
component.tok2vec.from_bytes(weights_data)
loaded.append(name)
return loaded
|
[
"def",
"_load_pretrained_tok2vec",
"(",
"nlp",
",",
"loc",
")",
":",
"with",
"loc",
".",
"open",
"(",
"\"rb\"",
")",
"as",
"file_",
":",
"weights_data",
"=",
"file_",
".",
"read",
"(",
")",
"loaded",
"=",
"[",
"]",
"for",
"name",
",",
"component",
"in",
"nlp",
".",
"pipeline",
":",
"if",
"hasattr",
"(",
"component",
",",
"\"model\"",
")",
"and",
"hasattr",
"(",
"component",
".",
"model",
",",
"\"tok2vec\"",
")",
":",
"component",
".",
"tok2vec",
".",
"from_bytes",
"(",
"weights_data",
")",
"loaded",
".",
"append",
"(",
"name",
")",
"return",
"loaded"
] |
Load pre-trained weights for the 'token-to-vector' part of the component
models, which is typically a CNN. See 'spacy pretrain'. Experimental.
|
[
"Load",
"pre",
"-",
"trained",
"weights",
"for",
"the",
"token",
"-",
"to",
"-",
"vector",
"part",
"of",
"the",
"component",
"models",
"which",
"is",
"typically",
"a",
"CNN",
".",
"See",
"spacy",
"pretrain",
".",
"Experimental",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L407-L418
|
21,270
|
explosion/spaCy
|
spacy/cli/converters/conllu2json.py
|
conllu2json
|
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None):
"""
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
Extract NER tags if available and convert them so that they follow
BILUO and the Wikipedia scheme
"""
# by @dvsrepo, via #11 explosion/spacy-dev-resources
# by @katarkor
docs = []
sentences = []
conll_tuples = read_conllx(input_data, use_morphology=use_morphology)
checked_for_ner = False
has_ner_tags = False
for i, (raw_text, tokens) in enumerate(conll_tuples):
sentence, brackets = tokens[0]
if not checked_for_ner:
has_ner_tags = is_ner(sentence[5][0])
checked_for_ner = True
sentences.append(generate_sentence(sentence, has_ner_tags))
# Real-sized documents could be extracted using the comments on the
# conluu document
if len(sentences) % n_sents == 0:
doc = create_doc(sentences, i)
docs.append(doc)
sentences = []
return docs
|
python
|
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None):
"""
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
Extract NER tags if available and convert them so that they follow
BILUO and the Wikipedia scheme
"""
# by @dvsrepo, via #11 explosion/spacy-dev-resources
# by @katarkor
docs = []
sentences = []
conll_tuples = read_conllx(input_data, use_morphology=use_morphology)
checked_for_ner = False
has_ner_tags = False
for i, (raw_text, tokens) in enumerate(conll_tuples):
sentence, brackets = tokens[0]
if not checked_for_ner:
has_ner_tags = is_ner(sentence[5][0])
checked_for_ner = True
sentences.append(generate_sentence(sentence, has_ner_tags))
# Real-sized documents could be extracted using the comments on the
# conluu document
if len(sentences) % n_sents == 0:
doc = create_doc(sentences, i)
docs.append(doc)
sentences = []
return docs
|
[
"def",
"conllu2json",
"(",
"input_data",
",",
"n_sents",
"=",
"10",
",",
"use_morphology",
"=",
"False",
",",
"lang",
"=",
"None",
")",
":",
"# by @dvsrepo, via #11 explosion/spacy-dev-resources",
"# by @katarkor",
"docs",
"=",
"[",
"]",
"sentences",
"=",
"[",
"]",
"conll_tuples",
"=",
"read_conllx",
"(",
"input_data",
",",
"use_morphology",
"=",
"use_morphology",
")",
"checked_for_ner",
"=",
"False",
"has_ner_tags",
"=",
"False",
"for",
"i",
",",
"(",
"raw_text",
",",
"tokens",
")",
"in",
"enumerate",
"(",
"conll_tuples",
")",
":",
"sentence",
",",
"brackets",
"=",
"tokens",
"[",
"0",
"]",
"if",
"not",
"checked_for_ner",
":",
"has_ner_tags",
"=",
"is_ner",
"(",
"sentence",
"[",
"5",
"]",
"[",
"0",
"]",
")",
"checked_for_ner",
"=",
"True",
"sentences",
".",
"append",
"(",
"generate_sentence",
"(",
"sentence",
",",
"has_ner_tags",
")",
")",
"# Real-sized documents could be extracted using the comments on the",
"# conluu document",
"if",
"len",
"(",
"sentences",
")",
"%",
"n_sents",
"==",
"0",
":",
"doc",
"=",
"create_doc",
"(",
"sentences",
",",
"i",
")",
"docs",
".",
"append",
"(",
"doc",
")",
"sentences",
"=",
"[",
"]",
"return",
"docs"
] |
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
Extract NER tags if available and convert them so that they follow
BILUO and the Wikipedia scheme
|
[
"Convert",
"conllu",
"files",
"into",
"JSON",
"format",
"for",
"use",
"with",
"train",
"cli",
".",
"use_morphology",
"parameter",
"enables",
"appending",
"morphology",
"to",
"tags",
"which",
"is",
"useful",
"for",
"languages",
"such",
"as",
"Spanish",
"where",
"UD",
"tags",
"are",
"not",
"so",
"rich",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conllu2json.py#L9-L37
|
21,271
|
explosion/spaCy
|
spacy/cli/converters/conllu2json.py
|
is_ner
|
def is_ner(tag):
"""
Check the 10th column of the first token to determine if the file contains
NER tags
"""
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag)
if tag_match:
return True
elif tag == "O":
return True
else:
return False
|
python
|
def is_ner(tag):
"""
Check the 10th column of the first token to determine if the file contains
NER tags
"""
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag)
if tag_match:
return True
elif tag == "O":
return True
else:
return False
|
[
"def",
"is_ner",
"(",
"tag",
")",
":",
"tag_match",
"=",
"re",
".",
"match",
"(",
"\"([A-Z_]+)-([A-Z_]+)\"",
",",
"tag",
")",
"if",
"tag_match",
":",
"return",
"True",
"elif",
"tag",
"==",
"\"O\"",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check the 10th column of the first token to determine if the file contains
NER tags
|
[
"Check",
"the",
"10th",
"column",
"of",
"the",
"first",
"token",
"to",
"determine",
"if",
"the",
"file",
"contains",
"NER",
"tags"
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conllu2json.py#L40-L51
|
21,272
|
explosion/spaCy
|
examples/training/train_intent_parser.py
|
main
|
def main(model=None, output_dir=None, n_iter=15):
"""Load the model, set up the pipeline and train the parser."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# We'll use the built-in dependency parser class, but we want to create a
# fresh instance – just in case.
if "parser" in nlp.pipe_names:
nlp.remove_pipe("parser")
parser = nlp.create_pipe("parser")
nlp.add_pipe(parser, first=True)
for text, annotations in TRAIN_DATA:
for dep in annotations.get("deps", []):
parser.add_label(dep)
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"]
with nlp.disable_pipes(*other_pipes): # only train parser
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
print("Losses", losses)
# test the trained model
test_model(nlp)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
test_model(nlp2)
|
python
|
def main(model=None, output_dir=None, n_iter=15):
"""Load the model, set up the pipeline and train the parser."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# We'll use the built-in dependency parser class, but we want to create a
# fresh instance – just in case.
if "parser" in nlp.pipe_names:
nlp.remove_pipe("parser")
parser = nlp.create_pipe("parser")
nlp.add_pipe(parser, first=True)
for text, annotations in TRAIN_DATA:
for dep in annotations.get("deps", []):
parser.add_label(dep)
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"]
with nlp.disable_pipes(*other_pipes): # only train parser
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
print("Losses", losses)
# test the trained model
test_model(nlp)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
test_model(nlp2)
|
[
"def",
"main",
"(",
"model",
"=",
"None",
",",
"output_dir",
"=",
"None",
",",
"n_iter",
"=",
"15",
")",
":",
"if",
"model",
"is",
"not",
"None",
":",
"nlp",
"=",
"spacy",
".",
"load",
"(",
"model",
")",
"# load existing spaCy model",
"print",
"(",
"\"Loaded model '%s'\"",
"%",
"model",
")",
"else",
":",
"nlp",
"=",
"spacy",
".",
"blank",
"(",
"\"en\"",
")",
"# create blank Language class",
"print",
"(",
"\"Created blank 'en' model\"",
")",
"# We'll use the built-in dependency parser class, but we want to create a",
"# fresh instance – just in case.",
"if",
"\"parser\"",
"in",
"nlp",
".",
"pipe_names",
":",
"nlp",
".",
"remove_pipe",
"(",
"\"parser\"",
")",
"parser",
"=",
"nlp",
".",
"create_pipe",
"(",
"\"parser\"",
")",
"nlp",
".",
"add_pipe",
"(",
"parser",
",",
"first",
"=",
"True",
")",
"for",
"text",
",",
"annotations",
"in",
"TRAIN_DATA",
":",
"for",
"dep",
"in",
"annotations",
".",
"get",
"(",
"\"deps\"",
",",
"[",
"]",
")",
":",
"parser",
".",
"add_label",
"(",
"dep",
")",
"other_pipes",
"=",
"[",
"pipe",
"for",
"pipe",
"in",
"nlp",
".",
"pipe_names",
"if",
"pipe",
"!=",
"\"parser\"",
"]",
"with",
"nlp",
".",
"disable_pipes",
"(",
"*",
"other_pipes",
")",
":",
"# only train parser",
"optimizer",
"=",
"nlp",
".",
"begin_training",
"(",
")",
"for",
"itn",
"in",
"range",
"(",
"n_iter",
")",
":",
"random",
".",
"shuffle",
"(",
"TRAIN_DATA",
")",
"losses",
"=",
"{",
"}",
"# batch up the examples using spaCy's minibatch",
"batches",
"=",
"minibatch",
"(",
"TRAIN_DATA",
",",
"size",
"=",
"compounding",
"(",
"4.0",
",",
"32.0",
",",
"1.001",
")",
")",
"for",
"batch",
"in",
"batches",
":",
"texts",
",",
"annotations",
"=",
"zip",
"(",
"*",
"batch",
")",
"nlp",
".",
"update",
"(",
"texts",
",",
"annotations",
",",
"sgd",
"=",
"optimizer",
",",
"losses",
"=",
"losses",
")",
"print",
"(",
"\"Losses\"",
",",
"losses",
")",
"# test the trained model",
"test_model",
"(",
"nlp",
")",
"# save model to output directory",
"if",
"output_dir",
"is",
"not",
"None",
":",
"output_dir",
"=",
"Path",
"(",
"output_dir",
")",
"if",
"not",
"output_dir",
".",
"exists",
"(",
")",
":",
"output_dir",
".",
"mkdir",
"(",
")",
"nlp",
".",
"to_disk",
"(",
"output_dir",
")",
"print",
"(",
"\"Saved model to\"",
",",
"output_dir",
")",
"# test the saved model",
"print",
"(",
"\"Loading from\"",
",",
"output_dir",
")",
"nlp2",
"=",
"spacy",
".",
"load",
"(",
"output_dir",
")",
"test_model",
"(",
"nlp2",
")"
] |
Load the model, set up the pipeline and train the parser.
|
[
"Load",
"the",
"model",
"set",
"up",
"the",
"pipeline",
"and",
"train",
"the",
"parser",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_intent_parser.py#L107-L154
|
21,273
|
explosion/spaCy
|
spacy/language.py
|
Language.get_pipe
|
def get_pipe(self, name):
"""Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
"""
for pipe_name, component in self.pipeline:
if pipe_name == name:
return component
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
|
python
|
def get_pipe(self, name):
"""Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
"""
for pipe_name, component in self.pipeline:
if pipe_name == name:
return component
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
|
[
"def",
"get_pipe",
"(",
"self",
",",
"name",
")",
":",
"for",
"pipe_name",
",",
"component",
"in",
"self",
".",
"pipeline",
":",
"if",
"pipe_name",
"==",
"name",
":",
"return",
"component",
"raise",
"KeyError",
"(",
"Errors",
".",
"E001",
".",
"format",
"(",
"name",
"=",
"name",
",",
"opts",
"=",
"self",
".",
"pipe_names",
")",
")"
] |
Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
|
[
"Get",
"a",
"pipeline",
"component",
"for",
"a",
"given",
"component",
"name",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L232-L243
|
21,274
|
explosion/spaCy
|
spacy/language.py
|
Language.replace_pipe
|
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
self.pipeline[self.pipe_names.index(name)] = (name, component)
|
python
|
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
self.pipeline[self.pipe_names.index(name)] = (name, component)
|
[
"def",
"replace_pipe",
"(",
"self",
",",
"name",
",",
"component",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"pipe_names",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E001",
".",
"format",
"(",
"name",
"=",
"name",
",",
"opts",
"=",
"self",
".",
"pipe_names",
")",
")",
"self",
".",
"pipeline",
"[",
"self",
".",
"pipe_names",
".",
"index",
"(",
"name",
")",
"]",
"=",
"(",
"name",
",",
"component",
")"
] |
Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
|
[
"Replace",
"a",
"component",
"in",
"the",
"pipeline",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L326-L336
|
21,275
|
explosion/spaCy
|
spacy/language.py
|
Language.rename_pipe
|
def rename_pipe(self, old_name, new_name):
"""Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
"""
if old_name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
if new_name in self.pipe_names:
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
i = self.pipe_names.index(old_name)
self.pipeline[i] = (new_name, self.pipeline[i][1])
|
python
|
def rename_pipe(self, old_name, new_name):
"""Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
"""
if old_name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
if new_name in self.pipe_names:
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
i = self.pipe_names.index(old_name)
self.pipeline[i] = (new_name, self.pipeline[i][1])
|
[
"def",
"rename_pipe",
"(",
"self",
",",
"old_name",
",",
"new_name",
")",
":",
"if",
"old_name",
"not",
"in",
"self",
".",
"pipe_names",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E001",
".",
"format",
"(",
"name",
"=",
"old_name",
",",
"opts",
"=",
"self",
".",
"pipe_names",
")",
")",
"if",
"new_name",
"in",
"self",
".",
"pipe_names",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E007",
".",
"format",
"(",
"name",
"=",
"new_name",
",",
"opts",
"=",
"self",
".",
"pipe_names",
")",
")",
"i",
"=",
"self",
".",
"pipe_names",
".",
"index",
"(",
"old_name",
")",
"self",
".",
"pipeline",
"[",
"i",
"]",
"=",
"(",
"new_name",
",",
"self",
".",
"pipeline",
"[",
"i",
"]",
"[",
"1",
"]",
")"
] |
Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
|
[
"Rename",
"a",
"pipeline",
"component",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L338-L351
|
21,276
|
explosion/spaCy
|
spacy/language.py
|
Language.remove_pipe
|
def remove_pipe(self, name):
"""Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
return self.pipeline.pop(self.pipe_names.index(name))
|
python
|
def remove_pipe(self, name):
"""Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
return self.pipeline.pop(self.pipe_names.index(name))
|
[
"def",
"remove_pipe",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"pipe_names",
":",
"raise",
"ValueError",
"(",
"Errors",
".",
"E001",
".",
"format",
"(",
"name",
"=",
"name",
",",
"opts",
"=",
"self",
".",
"pipe_names",
")",
")",
"return",
"self",
".",
"pipeline",
".",
"pop",
"(",
"self",
".",
"pipe_names",
".",
"index",
"(",
"name",
")",
")"
] |
Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
|
[
"Remove",
"a",
"component",
"from",
"the",
"pipeline",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L353-L363
|
21,277
|
explosion/spaCy
|
spacy/language.py
|
Language.update
|
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
golds = gold_objs
docs = doc_objs
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
|
python
|
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
golds = gold_objs
docs = doc_objs
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
|
[
"def",
"update",
"(",
"self",
",",
"docs",
",",
"golds",
",",
"drop",
"=",
"0.0",
",",
"sgd",
"=",
"None",
",",
"losses",
"=",
"None",
",",
"component_cfg",
"=",
"None",
")",
":",
"if",
"len",
"(",
"docs",
")",
"!=",
"len",
"(",
"golds",
")",
":",
"raise",
"IndexError",
"(",
"Errors",
".",
"E009",
".",
"format",
"(",
"n_docs",
"=",
"len",
"(",
"docs",
")",
",",
"n_golds",
"=",
"len",
"(",
"golds",
")",
")",
")",
"if",
"len",
"(",
"docs",
")",
"==",
"0",
":",
"return",
"if",
"sgd",
"is",
"None",
":",
"if",
"self",
".",
"_optimizer",
"is",
"None",
":",
"self",
".",
"_optimizer",
"=",
"create_default_optimizer",
"(",
"Model",
".",
"ops",
")",
"sgd",
"=",
"self",
".",
"_optimizer",
"# Allow dict of args to GoldParse, instead of GoldParse objects.",
"gold_objs",
"=",
"[",
"]",
"doc_objs",
"=",
"[",
"]",
"for",
"doc",
",",
"gold",
"in",
"zip",
"(",
"docs",
",",
"golds",
")",
":",
"if",
"isinstance",
"(",
"doc",
",",
"basestring_",
")",
":",
"doc",
"=",
"self",
".",
"make_doc",
"(",
"doc",
")",
"if",
"not",
"isinstance",
"(",
"gold",
",",
"GoldParse",
")",
":",
"gold",
"=",
"GoldParse",
"(",
"doc",
",",
"*",
"*",
"gold",
")",
"doc_objs",
".",
"append",
"(",
"doc",
")",
"gold_objs",
".",
"append",
"(",
"gold",
")",
"golds",
"=",
"gold_objs",
"docs",
"=",
"doc_objs",
"grads",
"=",
"{",
"}",
"def",
"get_grads",
"(",
"W",
",",
"dW",
",",
"key",
"=",
"None",
")",
":",
"grads",
"[",
"key",
"]",
"=",
"(",
"W",
",",
"dW",
")",
"get_grads",
".",
"alpha",
"=",
"sgd",
".",
"alpha",
"get_grads",
".",
"b1",
"=",
"sgd",
".",
"b1",
"get_grads",
".",
"b2",
"=",
"sgd",
".",
"b2",
"pipes",
"=",
"list",
"(",
"self",
".",
"pipeline",
")",
"random",
".",
"shuffle",
"(",
"pipes",
")",
"if",
"component_cfg",
"is",
"None",
":",
"component_cfg",
"=",
"{",
"}",
"for",
"name",
",",
"proc",
"in",
"pipes",
":",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"update\"",
")",
":",
"continue",
"grads",
"=",
"{",
"}",
"kwargs",
"=",
"component_cfg",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"kwargs",
".",
"setdefault",
"(",
"\"drop\"",
",",
"drop",
")",
"proc",
".",
"update",
"(",
"docs",
",",
"golds",
",",
"sgd",
"=",
"get_grads",
",",
"losses",
"=",
"losses",
",",
"*",
"*",
"kwargs",
")",
"for",
"key",
",",
"(",
"W",
",",
"dW",
")",
"in",
"grads",
".",
"items",
"(",
")",
":",
"sgd",
"(",
"W",
",",
"dW",
",",
"key",
"=",
"key",
")"
] |
Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
|
[
"Update",
"the",
"models",
"in",
"the",
"pipeline",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L408-L459
|
21,278
|
explosion/spaCy
|
spacy/language.py
|
Language.rehearse
|
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
|
python
|
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
|
[
"def",
"rehearse",
"(",
"self",
",",
"docs",
",",
"sgd",
"=",
"None",
",",
"losses",
"=",
"None",
",",
"config",
"=",
"None",
")",
":",
"# TODO: document",
"if",
"len",
"(",
"docs",
")",
"==",
"0",
":",
"return",
"if",
"sgd",
"is",
"None",
":",
"if",
"self",
".",
"_optimizer",
"is",
"None",
":",
"self",
".",
"_optimizer",
"=",
"create_default_optimizer",
"(",
"Model",
".",
"ops",
")",
"sgd",
"=",
"self",
".",
"_optimizer",
"docs",
"=",
"list",
"(",
"docs",
")",
"for",
"i",
",",
"doc",
"in",
"enumerate",
"(",
"docs",
")",
":",
"if",
"isinstance",
"(",
"doc",
",",
"basestring_",
")",
":",
"docs",
"[",
"i",
"]",
"=",
"self",
".",
"make_doc",
"(",
"doc",
")",
"pipes",
"=",
"list",
"(",
"self",
".",
"pipeline",
")",
"random",
".",
"shuffle",
"(",
"pipes",
")",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"{",
"}",
"grads",
"=",
"{",
"}",
"def",
"get_grads",
"(",
"W",
",",
"dW",
",",
"key",
"=",
"None",
")",
":",
"grads",
"[",
"key",
"]",
"=",
"(",
"W",
",",
"dW",
")",
"get_grads",
".",
"alpha",
"=",
"sgd",
".",
"alpha",
"get_grads",
".",
"b1",
"=",
"sgd",
".",
"b1",
"get_grads",
".",
"b2",
"=",
"sgd",
".",
"b2",
"for",
"name",
",",
"proc",
"in",
"pipes",
":",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"rehearse\"",
")",
":",
"continue",
"grads",
"=",
"{",
"}",
"proc",
".",
"rehearse",
"(",
"docs",
",",
"sgd",
"=",
"get_grads",
",",
"losses",
"=",
"losses",
",",
"*",
"*",
"config",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
")",
"for",
"key",
",",
"(",
"W",
",",
"dW",
")",
"in",
"grads",
".",
"items",
"(",
")",
":",
"sgd",
"(",
"W",
",",
"dW",
",",
"key",
"=",
"key",
")",
"return",
"losses"
] |
Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
|
[
"Make",
"a",
"rehearsal",
"update",
"to",
"the",
"models",
"in",
"the",
"pipeline",
"to",
"prevent",
"forgetting",
".",
"Rehearsal",
"updates",
"run",
"an",
"initial",
"copy",
"of",
"the",
"model",
"over",
"some",
"data",
"and",
"update",
"the",
"model",
"so",
"its",
"current",
"predictions",
"are",
"more",
"like",
"the",
"initial",
"ones",
".",
"This",
"is",
"useful",
"for",
"keeping",
"a",
"pre",
"-",
"trained",
"model",
"on",
"-",
"track",
"even",
"if",
"you",
"re",
"updating",
"it",
"with",
"a",
"smaller",
"set",
"of",
"examples",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L461-L511
|
21,279
|
explosion/spaCy
|
spacy/language.py
|
Language.preprocess_gold
|
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold
|
python
|
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold
|
[
"def",
"preprocess_gold",
"(",
"self",
",",
"docs_golds",
")",
":",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"hasattr",
"(",
"proc",
",",
"\"preprocess_gold\"",
")",
":",
"docs_golds",
"=",
"proc",
".",
"preprocess_gold",
"(",
"docs_golds",
")",
"for",
"doc",
",",
"gold",
"in",
"docs_golds",
":",
"yield",
"doc",
",",
"gold"
] |
Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
|
[
"Can",
"be",
"called",
"before",
"training",
"to",
"pre",
"-",
"process",
"gold",
"data",
".",
"By",
"default",
"it",
"handles",
"nonprojectivity",
"and",
"adds",
"missing",
"tags",
"to",
"the",
"tag",
"map",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L513-L524
|
21,280
|
explosion/spaCy
|
spacy/language.py
|
Language.begin_training
|
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer
|
python
|
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer
|
[
"def",
"begin_training",
"(",
"self",
",",
"get_gold_tuples",
"=",
"None",
",",
"sgd",
"=",
"None",
",",
"component_cfg",
"=",
"None",
",",
"*",
"*",
"cfg",
")",
":",
"if",
"get_gold_tuples",
"is",
"None",
":",
"get_gold_tuples",
"=",
"lambda",
":",
"[",
"]",
"# Populate vocab",
"else",
":",
"for",
"_",
",",
"annots_brackets",
"in",
"get_gold_tuples",
"(",
")",
":",
"for",
"annots",
",",
"_",
"in",
"annots_brackets",
":",
"for",
"word",
"in",
"annots",
"[",
"1",
"]",
":",
"_",
"=",
"self",
".",
"vocab",
"[",
"word",
"]",
"# noqa: F841",
"if",
"cfg",
".",
"get",
"(",
"\"device\"",
",",
"-",
"1",
")",
">=",
"0",
":",
"util",
".",
"use_gpu",
"(",
"cfg",
"[",
"\"device\"",
"]",
")",
"if",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
".",
"shape",
"[",
"1",
"]",
">=",
"1",
":",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
"=",
"Model",
".",
"ops",
".",
"asarray",
"(",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
")",
"link_vectors_to_models",
"(",
"self",
".",
"vocab",
")",
"if",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
".",
"shape",
"[",
"1",
"]",
":",
"cfg",
"[",
"\"pretrained_vectors\"",
"]",
"=",
"self",
".",
"vocab",
".",
"vectors",
".",
"name",
"if",
"sgd",
"is",
"None",
":",
"sgd",
"=",
"create_default_optimizer",
"(",
"Model",
".",
"ops",
")",
"self",
".",
"_optimizer",
"=",
"sgd",
"if",
"component_cfg",
"is",
"None",
":",
"component_cfg",
"=",
"{",
"}",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"hasattr",
"(",
"proc",
",",
"\"begin_training\"",
")",
":",
"kwargs",
"=",
"component_cfg",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"kwargs",
".",
"update",
"(",
"cfg",
")",
"proc",
".",
"begin_training",
"(",
"get_gold_tuples",
",",
"pipeline",
"=",
"self",
".",
"pipeline",
",",
"sgd",
"=",
"self",
".",
"_optimizer",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_optimizer"
] |
Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
|
[
"Allocate",
"models",
"pre",
"-",
"process",
"training",
"data",
"and",
"acquire",
"a",
"trainer",
"and",
"optimizer",
".",
"Used",
"as",
"a",
"contextmanager",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L526-L567
|
21,281
|
explosion/spaCy
|
spacy/language.py
|
Language.resume_training
|
def resume_training(self, sgd=None, **cfg):
"""Continue training a pre-trained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
"""
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "_rehearsal_model"):
proc._rehearsal_model = deepcopy(proc.model)
return self._optimizer
|
python
|
def resume_training(self, sgd=None, **cfg):
"""Continue training a pre-trained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
"""
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "_rehearsal_model"):
proc._rehearsal_model = deepcopy(proc.model)
return self._optimizer
|
[
"def",
"resume_training",
"(",
"self",
",",
"sgd",
"=",
"None",
",",
"*",
"*",
"cfg",
")",
":",
"if",
"cfg",
".",
"get",
"(",
"\"device\"",
",",
"-",
"1",
")",
">=",
"0",
":",
"util",
".",
"use_gpu",
"(",
"cfg",
"[",
"\"device\"",
"]",
")",
"if",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
".",
"shape",
"[",
"1",
"]",
">=",
"1",
":",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
"=",
"Model",
".",
"ops",
".",
"asarray",
"(",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
")",
"link_vectors_to_models",
"(",
"self",
".",
"vocab",
")",
"if",
"self",
".",
"vocab",
".",
"vectors",
".",
"data",
".",
"shape",
"[",
"1",
"]",
":",
"cfg",
"[",
"\"pretrained_vectors\"",
"]",
"=",
"self",
".",
"vocab",
".",
"vectors",
".",
"name",
"if",
"sgd",
"is",
"None",
":",
"sgd",
"=",
"create_default_optimizer",
"(",
"Model",
".",
"ops",
")",
"self",
".",
"_optimizer",
"=",
"sgd",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"hasattr",
"(",
"proc",
",",
"\"_rehearsal_model\"",
")",
":",
"proc",
".",
"_rehearsal_model",
"=",
"deepcopy",
"(",
"proc",
".",
"model",
")",
"return",
"self",
".",
"_optimizer"
] |
Continue training a pre-trained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
|
[
"Continue",
"training",
"a",
"pre",
"-",
"trained",
"model",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L569-L591
|
21,282
|
explosion/spaCy
|
spacy/language.py
|
Language.use_params
|
def use_params(self, params, **cfg):
"""Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
"""
contexts = [
pipe.use_params(params)
for name, pipe in self.pipeline
if hasattr(pipe, "use_params")
]
# TODO: Having trouble with contextlib
# Workaround: these aren't actually context managers atm.
for context in contexts:
try:
next(context)
except StopIteration:
pass
yield
for context in contexts:
try:
next(context)
except StopIteration:
pass
|
python
|
def use_params(self, params, **cfg):
"""Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
"""
contexts = [
pipe.use_params(params)
for name, pipe in self.pipeline
if hasattr(pipe, "use_params")
]
# TODO: Having trouble with contextlib
# Workaround: these aren't actually context managers atm.
for context in contexts:
try:
next(context)
except StopIteration:
pass
yield
for context in contexts:
try:
next(context)
except StopIteration:
pass
|
[
"def",
"use_params",
"(",
"self",
",",
"params",
",",
"*",
"*",
"cfg",
")",
":",
"contexts",
"=",
"[",
"pipe",
".",
"use_params",
"(",
"params",
")",
"for",
"name",
",",
"pipe",
"in",
"self",
".",
"pipeline",
"if",
"hasattr",
"(",
"pipe",
",",
"\"use_params\"",
")",
"]",
"# TODO: Having trouble with contextlib",
"# Workaround: these aren't actually context managers atm.",
"for",
"context",
"in",
"contexts",
":",
"try",
":",
"next",
"(",
"context",
")",
"except",
"StopIteration",
":",
"pass",
"yield",
"for",
"context",
"in",
"contexts",
":",
"try",
":",
"next",
"(",
"context",
")",
"except",
"StopIteration",
":",
"pass"
] |
Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
|
[
"Replace",
"weights",
"of",
"models",
"in",
"the",
"pipeline",
"with",
"those",
"provided",
"in",
"the",
"params",
"dictionary",
".",
"Can",
"be",
"used",
"as",
"a",
"contextmanager",
"in",
"which",
"case",
"models",
"go",
"back",
"to",
"their",
"original",
"weights",
"after",
"the",
"block",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L619-L648
|
21,283
|
explosion/spaCy
|
spacy/language.py
|
Language.pipe
|
def pipe(
self,
texts,
as_tuples=False,
n_threads=-1,
batch_size=1000,
disable=[],
cleanup=False,
component_cfg=None,
):
"""Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
"""
if n_threads != -1:
deprecation_warning(Warnings.W016)
if as_tuples:
text_context1, text_context2 = itertools.tee(texts)
texts = (tc[0] for tc in text_context1)
contexts = (tc[1] for tc in text_context2)
docs = self.pipe(
texts,
batch_size=batch_size,
disable=disable,
component_cfg=component_cfg,
)
for doc, context in izip(docs, contexts):
yield (doc, context)
return
docs = (self.make_doc(text) for text in texts)
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if name in disable:
continue
kwargs = component_cfg.get(name, {})
# Allow component_cfg to overwrite the top-level kwargs.
kwargs.setdefault("batch_size", batch_size)
if hasattr(proc, "pipe"):
docs = proc.pipe(docs, **kwargs)
else:
# Apply the function, but yield the doc
docs = _pipe(proc, docs, kwargs)
# Track weakrefs of "recent" documents, so that we can see when they
# expire from memory. When they do, we know we don't need old strings.
# This way, we avoid maintaining an unbounded growth in string entries
# in the string store.
recent_refs = weakref.WeakSet()
old_refs = weakref.WeakSet()
# Keep track of the original string data, so that if we flush old strings,
# we can recover the original ones. However, we only want to do this if we're
# really adding strings, to save up-front costs.
original_strings_data = None
nr_seen = 0
for doc in docs:
yield doc
if cleanup:
recent_refs.add(doc)
if nr_seen < 10000:
old_refs.add(doc)
nr_seen += 1
elif len(old_refs) == 0:
old_refs, recent_refs = recent_refs, old_refs
if original_strings_data is None:
original_strings_data = list(self.vocab.strings)
else:
keys, strings = self.vocab.strings._cleanup_stale_strings(
original_strings_data
)
self.vocab._reset_cache(keys, strings)
self.tokenizer._reset_cache(keys)
nr_seen = 0
|
python
|
def pipe(
self,
texts,
as_tuples=False,
n_threads=-1,
batch_size=1000,
disable=[],
cleanup=False,
component_cfg=None,
):
"""Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
"""
if n_threads != -1:
deprecation_warning(Warnings.W016)
if as_tuples:
text_context1, text_context2 = itertools.tee(texts)
texts = (tc[0] for tc in text_context1)
contexts = (tc[1] for tc in text_context2)
docs = self.pipe(
texts,
batch_size=batch_size,
disable=disable,
component_cfg=component_cfg,
)
for doc, context in izip(docs, contexts):
yield (doc, context)
return
docs = (self.make_doc(text) for text in texts)
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if name in disable:
continue
kwargs = component_cfg.get(name, {})
# Allow component_cfg to overwrite the top-level kwargs.
kwargs.setdefault("batch_size", batch_size)
if hasattr(proc, "pipe"):
docs = proc.pipe(docs, **kwargs)
else:
# Apply the function, but yield the doc
docs = _pipe(proc, docs, kwargs)
# Track weakrefs of "recent" documents, so that we can see when they
# expire from memory. When they do, we know we don't need old strings.
# This way, we avoid maintaining an unbounded growth in string entries
# in the string store.
recent_refs = weakref.WeakSet()
old_refs = weakref.WeakSet()
# Keep track of the original string data, so that if we flush old strings,
# we can recover the original ones. However, we only want to do this if we're
# really adding strings, to save up-front costs.
original_strings_data = None
nr_seen = 0
for doc in docs:
yield doc
if cleanup:
recent_refs.add(doc)
if nr_seen < 10000:
old_refs.add(doc)
nr_seen += 1
elif len(old_refs) == 0:
old_refs, recent_refs = recent_refs, old_refs
if original_strings_data is None:
original_strings_data = list(self.vocab.strings)
else:
keys, strings = self.vocab.strings._cleanup_stale_strings(
original_strings_data
)
self.vocab._reset_cache(keys, strings)
self.tokenizer._reset_cache(keys)
nr_seen = 0
|
[
"def",
"pipe",
"(",
"self",
",",
"texts",
",",
"as_tuples",
"=",
"False",
",",
"n_threads",
"=",
"-",
"1",
",",
"batch_size",
"=",
"1000",
",",
"disable",
"=",
"[",
"]",
",",
"cleanup",
"=",
"False",
",",
"component_cfg",
"=",
"None",
",",
")",
":",
"if",
"n_threads",
"!=",
"-",
"1",
":",
"deprecation_warning",
"(",
"Warnings",
".",
"W016",
")",
"if",
"as_tuples",
":",
"text_context1",
",",
"text_context2",
"=",
"itertools",
".",
"tee",
"(",
"texts",
")",
"texts",
"=",
"(",
"tc",
"[",
"0",
"]",
"for",
"tc",
"in",
"text_context1",
")",
"contexts",
"=",
"(",
"tc",
"[",
"1",
"]",
"for",
"tc",
"in",
"text_context2",
")",
"docs",
"=",
"self",
".",
"pipe",
"(",
"texts",
",",
"batch_size",
"=",
"batch_size",
",",
"disable",
"=",
"disable",
",",
"component_cfg",
"=",
"component_cfg",
",",
")",
"for",
"doc",
",",
"context",
"in",
"izip",
"(",
"docs",
",",
"contexts",
")",
":",
"yield",
"(",
"doc",
",",
"context",
")",
"return",
"docs",
"=",
"(",
"self",
".",
"make_doc",
"(",
"text",
")",
"for",
"text",
"in",
"texts",
")",
"if",
"component_cfg",
"is",
"None",
":",
"component_cfg",
"=",
"{",
"}",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"name",
"in",
"disable",
":",
"continue",
"kwargs",
"=",
"component_cfg",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"# Allow component_cfg to overwrite the top-level kwargs.",
"kwargs",
".",
"setdefault",
"(",
"\"batch_size\"",
",",
"batch_size",
")",
"if",
"hasattr",
"(",
"proc",
",",
"\"pipe\"",
")",
":",
"docs",
"=",
"proc",
".",
"pipe",
"(",
"docs",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"# Apply the function, but yield the doc",
"docs",
"=",
"_pipe",
"(",
"proc",
",",
"docs",
",",
"kwargs",
")",
"# Track weakrefs of \"recent\" documents, so that we can see when they",
"# expire from memory. When they do, we know we don't need old strings.",
"# This way, we avoid maintaining an unbounded growth in string entries",
"# in the string store.",
"recent_refs",
"=",
"weakref",
".",
"WeakSet",
"(",
")",
"old_refs",
"=",
"weakref",
".",
"WeakSet",
"(",
")",
"# Keep track of the original string data, so that if we flush old strings,",
"# we can recover the original ones. However, we only want to do this if we're",
"# really adding strings, to save up-front costs.",
"original_strings_data",
"=",
"None",
"nr_seen",
"=",
"0",
"for",
"doc",
"in",
"docs",
":",
"yield",
"doc",
"if",
"cleanup",
":",
"recent_refs",
".",
"add",
"(",
"doc",
")",
"if",
"nr_seen",
"<",
"10000",
":",
"old_refs",
".",
"add",
"(",
"doc",
")",
"nr_seen",
"+=",
"1",
"elif",
"len",
"(",
"old_refs",
")",
"==",
"0",
":",
"old_refs",
",",
"recent_refs",
"=",
"recent_refs",
",",
"old_refs",
"if",
"original_strings_data",
"is",
"None",
":",
"original_strings_data",
"=",
"list",
"(",
"self",
".",
"vocab",
".",
"strings",
")",
"else",
":",
"keys",
",",
"strings",
"=",
"self",
".",
"vocab",
".",
"strings",
".",
"_cleanup_stale_strings",
"(",
"original_strings_data",
")",
"self",
".",
"vocab",
".",
"_reset_cache",
"(",
"keys",
",",
"strings",
")",
"self",
".",
"tokenizer",
".",
"_reset_cache",
"(",
"keys",
")",
"nr_seen",
"=",
"0"
] |
Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
|
[
"Process",
"texts",
"as",
"a",
"stream",
"and",
"yield",
"Doc",
"objects",
"in",
"order",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L650-L733
|
21,284
|
explosion/spaCy
|
spacy/language.py
|
Language.to_disk
|
def to_disk(self, path, exclude=tuple(), disable=None):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
serializers = OrderedDict()
serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"])
serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta))
for name, proc in self.pipeline:
if not hasattr(proc, "name"):
continue
if name in exclude:
continue
if not hasattr(proc, "to_disk"):
continue
serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"])
serializers["vocab"] = lambda p: self.vocab.to_disk(p)
util.to_disk(path, serializers, exclude)
|
python
|
def to_disk(self, path, exclude=tuple(), disable=None):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
serializers = OrderedDict()
serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"])
serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta))
for name, proc in self.pipeline:
if not hasattr(proc, "name"):
continue
if name in exclude:
continue
if not hasattr(proc, "to_disk"):
continue
serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"])
serializers["vocab"] = lambda p: self.vocab.to_disk(p)
util.to_disk(path, serializers, exclude)
|
[
"def",
"to_disk",
"(",
"self",
",",
"path",
",",
"exclude",
"=",
"tuple",
"(",
")",
",",
"disable",
"=",
"None",
")",
":",
"if",
"disable",
"is",
"not",
"None",
":",
"deprecation_warning",
"(",
"Warnings",
".",
"W014",
")",
"exclude",
"=",
"disable",
"path",
"=",
"util",
".",
"ensure_path",
"(",
"path",
")",
"serializers",
"=",
"OrderedDict",
"(",
")",
"serializers",
"[",
"\"tokenizer\"",
"]",
"=",
"lambda",
"p",
":",
"self",
".",
"tokenizer",
".",
"to_disk",
"(",
"p",
",",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"serializers",
"[",
"\"meta.json\"",
"]",
"=",
"lambda",
"p",
":",
"p",
".",
"open",
"(",
"\"w\"",
")",
".",
"write",
"(",
"srsly",
".",
"json_dumps",
"(",
"self",
".",
"meta",
")",
")",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"name\"",
")",
":",
"continue",
"if",
"name",
"in",
"exclude",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"to_disk\"",
")",
":",
"continue",
"serializers",
"[",
"name",
"]",
"=",
"lambda",
"p",
",",
"proc",
"=",
"proc",
":",
"proc",
".",
"to_disk",
"(",
"p",
",",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"serializers",
"[",
"\"vocab\"",
"]",
"=",
"lambda",
"p",
":",
"self",
".",
"vocab",
".",
"to_disk",
"(",
"p",
")",
"util",
".",
"to_disk",
"(",
"path",
",",
"serializers",
",",
"exclude",
")"
] |
Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
|
[
"Save",
"the",
"current",
"state",
"to",
"a",
"directory",
".",
"If",
"a",
"model",
"is",
"loaded",
"this",
"will",
"include",
"the",
"model",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L735-L761
|
21,285
|
explosion/spaCy
|
spacy/language.py
|
Language.from_disk
|
def from_disk(self, path, exclude=tuple(), disable=None):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
deserializers = OrderedDict()
deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"])
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, exclude=["vocab"])
if not (path / "vocab").exists() and "vocab" not in exclude:
# Convert to list here in case exclude is (default) tuple
exclude = list(exclude) + ["vocab"]
util.from_disk(path, deserializers, exclude)
self._path = path
return self
|
python
|
def from_disk(self, path, exclude=tuple(), disable=None):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
deserializers = OrderedDict()
deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"])
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, exclude=["vocab"])
if not (path / "vocab").exists() and "vocab" not in exclude:
# Convert to list here in case exclude is (default) tuple
exclude = list(exclude) + ["vocab"]
util.from_disk(path, deserializers, exclude)
self._path = path
return self
|
[
"def",
"from_disk",
"(",
"self",
",",
"path",
",",
"exclude",
"=",
"tuple",
"(",
")",
",",
"disable",
"=",
"None",
")",
":",
"if",
"disable",
"is",
"not",
"None",
":",
"deprecation_warning",
"(",
"Warnings",
".",
"W014",
")",
"exclude",
"=",
"disable",
"path",
"=",
"util",
".",
"ensure_path",
"(",
"path",
")",
"deserializers",
"=",
"OrderedDict",
"(",
")",
"deserializers",
"[",
"\"meta.json\"",
"]",
"=",
"lambda",
"p",
":",
"self",
".",
"meta",
".",
"update",
"(",
"srsly",
".",
"read_json",
"(",
"p",
")",
")",
"deserializers",
"[",
"\"vocab\"",
"]",
"=",
"lambda",
"p",
":",
"self",
".",
"vocab",
".",
"from_disk",
"(",
"p",
")",
"and",
"_fix_pretrained_vectors_name",
"(",
"self",
")",
"deserializers",
"[",
"\"tokenizer\"",
"]",
"=",
"lambda",
"p",
":",
"self",
".",
"tokenizer",
".",
"from_disk",
"(",
"p",
",",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"name",
"in",
"exclude",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"from_disk\"",
")",
":",
"continue",
"deserializers",
"[",
"name",
"]",
"=",
"lambda",
"p",
",",
"proc",
"=",
"proc",
":",
"proc",
".",
"from_disk",
"(",
"p",
",",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"if",
"not",
"(",
"path",
"/",
"\"vocab\"",
")",
".",
"exists",
"(",
")",
"and",
"\"vocab\"",
"not",
"in",
"exclude",
":",
"# Convert to list here in case exclude is (default) tuple",
"exclude",
"=",
"list",
"(",
"exclude",
")",
"+",
"[",
"\"vocab\"",
"]",
"util",
".",
"from_disk",
"(",
"path",
",",
"deserializers",
",",
"exclude",
")",
"self",
".",
"_path",
"=",
"path",
"return",
"self"
] |
Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
|
[
"Loads",
"state",
"from",
"a",
"directory",
".",
"Modifies",
"the",
"object",
"in",
"place",
"and",
"returns",
"it",
".",
"If",
"the",
"saved",
"Language",
"object",
"contains",
"a",
"model",
"the",
"model",
"will",
"be",
"loaded",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L763-L793
|
21,286
|
explosion/spaCy
|
spacy/language.py
|
Language.to_bytes
|
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
|
python
|
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
|
[
"def",
"to_bytes",
"(",
"self",
",",
"exclude",
"=",
"tuple",
"(",
")",
",",
"disable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"disable",
"is",
"not",
"None",
":",
"deprecation_warning",
"(",
"Warnings",
".",
"W014",
")",
"exclude",
"=",
"disable",
"serializers",
"=",
"OrderedDict",
"(",
")",
"serializers",
"[",
"\"vocab\"",
"]",
"=",
"lambda",
":",
"self",
".",
"vocab",
".",
"to_bytes",
"(",
")",
"serializers",
"[",
"\"tokenizer\"",
"]",
"=",
"lambda",
":",
"self",
".",
"tokenizer",
".",
"to_bytes",
"(",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"serializers",
"[",
"\"meta.json\"",
"]",
"=",
"lambda",
":",
"srsly",
".",
"json_dumps",
"(",
"self",
".",
"meta",
")",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"name",
"in",
"exclude",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"to_bytes\"",
")",
":",
"continue",
"serializers",
"[",
"name",
"]",
"=",
"lambda",
"proc",
"=",
"proc",
":",
"proc",
".",
"to_bytes",
"(",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"exclude",
"=",
"util",
".",
"get_serialization_exclude",
"(",
"serializers",
",",
"exclude",
",",
"kwargs",
")",
"return",
"util",
".",
"to_bytes",
"(",
"serializers",
",",
"exclude",
")"
] |
Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
|
[
"Serialize",
"the",
"current",
"state",
"to",
"a",
"binary",
"string",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L795-L817
|
21,287
|
explosion/spaCy
|
spacy/language.py
|
Language.from_bytes
|
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
deserializers = OrderedDict()
deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b))
deserializers["vocab"] = lambda b: self.vocab.from_bytes(b) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(b, exclude=["vocab"])
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[name] = lambda b, proc=proc: proc.from_bytes(b, exclude=["vocab"])
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
util.from_bytes(bytes_data, deserializers, exclude)
return self
|
python
|
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
deserializers = OrderedDict()
deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b))
deserializers["vocab"] = lambda b: self.vocab.from_bytes(b) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(b, exclude=["vocab"])
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[name] = lambda b, proc=proc: proc.from_bytes(b, exclude=["vocab"])
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
util.from_bytes(bytes_data, deserializers, exclude)
return self
|
[
"def",
"from_bytes",
"(",
"self",
",",
"bytes_data",
",",
"exclude",
"=",
"tuple",
"(",
")",
",",
"disable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"disable",
"is",
"not",
"None",
":",
"deprecation_warning",
"(",
"Warnings",
".",
"W014",
")",
"exclude",
"=",
"disable",
"deserializers",
"=",
"OrderedDict",
"(",
")",
"deserializers",
"[",
"\"meta.json\"",
"]",
"=",
"lambda",
"b",
":",
"self",
".",
"meta",
".",
"update",
"(",
"srsly",
".",
"json_loads",
"(",
"b",
")",
")",
"deserializers",
"[",
"\"vocab\"",
"]",
"=",
"lambda",
"b",
":",
"self",
".",
"vocab",
".",
"from_bytes",
"(",
"b",
")",
"and",
"_fix_pretrained_vectors_name",
"(",
"self",
")",
"deserializers",
"[",
"\"tokenizer\"",
"]",
"=",
"lambda",
"b",
":",
"self",
".",
"tokenizer",
".",
"from_bytes",
"(",
"b",
",",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"name",
"in",
"exclude",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"proc",
",",
"\"from_bytes\"",
")",
":",
"continue",
"deserializers",
"[",
"name",
"]",
"=",
"lambda",
"b",
",",
"proc",
"=",
"proc",
":",
"proc",
".",
"from_bytes",
"(",
"b",
",",
"exclude",
"=",
"[",
"\"vocab\"",
"]",
")",
"exclude",
"=",
"util",
".",
"get_serialization_exclude",
"(",
"deserializers",
",",
"exclude",
",",
"kwargs",
")",
"util",
".",
"from_bytes",
"(",
"bytes_data",
",",
"deserializers",
",",
"exclude",
")",
"return",
"self"
] |
Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
|
[
"Load",
"state",
"from",
"a",
"binary",
"string",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L819-L843
|
21,288
|
explosion/spaCy
|
spacy/language.py
|
DisabledPipes.restore
|
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = []
|
python
|
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = []
|
[
"def",
"restore",
"(",
"self",
")",
":",
"current",
",",
"self",
".",
"nlp",
".",
"pipeline",
"=",
"self",
".",
"nlp",
".",
"pipeline",
",",
"self",
".",
"original_pipeline",
"unexpected",
"=",
"[",
"name",
"for",
"name",
",",
"pipe",
"in",
"current",
"if",
"not",
"self",
".",
"nlp",
".",
"has_pipe",
"(",
"name",
")",
"]",
"if",
"unexpected",
":",
"# Don't change the pipeline if we're raising an error.",
"self",
".",
"nlp",
".",
"pipeline",
"=",
"current",
"raise",
"ValueError",
"(",
"Errors",
".",
"E008",
".",
"format",
"(",
"names",
"=",
"unexpected",
")",
")",
"self",
"[",
":",
"]",
"=",
"[",
"]"
] |
Restore the pipeline to its state when DisabledPipes was created.
|
[
"Restore",
"the",
"pipeline",
"to",
"its",
"state",
"when",
"DisabledPipes",
"was",
"created",
"."
] |
8ee4100f8ffb336886208a1ea827bf4c745e2709
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L886-L894
|
21,289
|
nvbn/thefuck
|
thefuck/corrector.py
|
get_loaded_rules
|
def get_loaded_rules(rules_paths):
"""Yields all available rules.
:type rules_paths: [Path]
:rtype: Iterable[Rule]
"""
for path in rules_paths:
if path.name != '__init__.py':
rule = Rule.from_path(path)
if rule.is_enabled:
yield rule
|
python
|
def get_loaded_rules(rules_paths):
"""Yields all available rules.
:type rules_paths: [Path]
:rtype: Iterable[Rule]
"""
for path in rules_paths:
if path.name != '__init__.py':
rule = Rule.from_path(path)
if rule.is_enabled:
yield rule
|
[
"def",
"get_loaded_rules",
"(",
"rules_paths",
")",
":",
"for",
"path",
"in",
"rules_paths",
":",
"if",
"path",
".",
"name",
"!=",
"'__init__.py'",
":",
"rule",
"=",
"Rule",
".",
"from_path",
"(",
"path",
")",
"if",
"rule",
".",
"is_enabled",
":",
"yield",
"rule"
] |
Yields all available rules.
:type rules_paths: [Path]
:rtype: Iterable[Rule]
|
[
"Yields",
"all",
"available",
"rules",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L8-L19
|
21,290
|
nvbn/thefuck
|
thefuck/corrector.py
|
get_rules_import_paths
|
def get_rules_import_paths():
"""Yields all rules import paths.
:rtype: Iterable[Path]
"""
# Bundled rules:
yield Path(__file__).parent.joinpath('rules')
# Rules defined by user:
yield settings.user_dir.joinpath('rules')
# Packages with third-party rules:
for path in sys.path:
for contrib_module in Path(path).glob('thefuck_contrib_*'):
contrib_rules = contrib_module.joinpath('rules')
if contrib_rules.is_dir():
yield contrib_rules
|
python
|
def get_rules_import_paths():
"""Yields all rules import paths.
:rtype: Iterable[Path]
"""
# Bundled rules:
yield Path(__file__).parent.joinpath('rules')
# Rules defined by user:
yield settings.user_dir.joinpath('rules')
# Packages with third-party rules:
for path in sys.path:
for contrib_module in Path(path).glob('thefuck_contrib_*'):
contrib_rules = contrib_module.joinpath('rules')
if contrib_rules.is_dir():
yield contrib_rules
|
[
"def",
"get_rules_import_paths",
"(",
")",
":",
"# Bundled rules:",
"yield",
"Path",
"(",
"__file__",
")",
".",
"parent",
".",
"joinpath",
"(",
"'rules'",
")",
"# Rules defined by user:",
"yield",
"settings",
".",
"user_dir",
".",
"joinpath",
"(",
"'rules'",
")",
"# Packages with third-party rules:",
"for",
"path",
"in",
"sys",
".",
"path",
":",
"for",
"contrib_module",
"in",
"Path",
"(",
"path",
")",
".",
"glob",
"(",
"'thefuck_contrib_*'",
")",
":",
"contrib_rules",
"=",
"contrib_module",
".",
"joinpath",
"(",
"'rules'",
")",
"if",
"contrib_rules",
".",
"is_dir",
"(",
")",
":",
"yield",
"contrib_rules"
] |
Yields all rules import paths.
:rtype: Iterable[Path]
|
[
"Yields",
"all",
"rules",
"import",
"paths",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L22-L37
|
21,291
|
nvbn/thefuck
|
thefuck/corrector.py
|
get_rules
|
def get_rules():
"""Returns all enabled rules.
:rtype: [Rule]
"""
paths = [rule_path for path in get_rules_import_paths()
for rule_path in sorted(path.glob('*.py'))]
return sorted(get_loaded_rules(paths),
key=lambda rule: rule.priority)
|
python
|
def get_rules():
"""Returns all enabled rules.
:rtype: [Rule]
"""
paths = [rule_path for path in get_rules_import_paths()
for rule_path in sorted(path.glob('*.py'))]
return sorted(get_loaded_rules(paths),
key=lambda rule: rule.priority)
|
[
"def",
"get_rules",
"(",
")",
":",
"paths",
"=",
"[",
"rule_path",
"for",
"path",
"in",
"get_rules_import_paths",
"(",
")",
"for",
"rule_path",
"in",
"sorted",
"(",
"path",
".",
"glob",
"(",
"'*.py'",
")",
")",
"]",
"return",
"sorted",
"(",
"get_loaded_rules",
"(",
"paths",
")",
",",
"key",
"=",
"lambda",
"rule",
":",
"rule",
".",
"priority",
")"
] |
Returns all enabled rules.
:rtype: [Rule]
|
[
"Returns",
"all",
"enabled",
"rules",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L40-L49
|
21,292
|
nvbn/thefuck
|
thefuck/corrector.py
|
organize_commands
|
def organize_commands(corrected_commands):
"""Yields sorted commands without duplicates.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
try:
first_command = next(corrected_commands)
yield first_command
except StopIteration:
return
without_duplicates = {
command for command in sorted(
corrected_commands, key=lambda command: command.priority)
if command != first_command}
sorted_commands = sorted(
without_duplicates,
key=lambda corrected_command: corrected_command.priority)
logs.debug('Corrected commands: '.format(
', '.join(u'{}'.format(cmd) for cmd in [first_command] + sorted_commands)))
for command in sorted_commands:
yield command
|
python
|
def organize_commands(corrected_commands):
"""Yields sorted commands without duplicates.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
try:
first_command = next(corrected_commands)
yield first_command
except StopIteration:
return
without_duplicates = {
command for command in sorted(
corrected_commands, key=lambda command: command.priority)
if command != first_command}
sorted_commands = sorted(
without_duplicates,
key=lambda corrected_command: corrected_command.priority)
logs.debug('Corrected commands: '.format(
', '.join(u'{}'.format(cmd) for cmd in [first_command] + sorted_commands)))
for command in sorted_commands:
yield command
|
[
"def",
"organize_commands",
"(",
"corrected_commands",
")",
":",
"try",
":",
"first_command",
"=",
"next",
"(",
"corrected_commands",
")",
"yield",
"first_command",
"except",
"StopIteration",
":",
"return",
"without_duplicates",
"=",
"{",
"command",
"for",
"command",
"in",
"sorted",
"(",
"corrected_commands",
",",
"key",
"=",
"lambda",
"command",
":",
"command",
".",
"priority",
")",
"if",
"command",
"!=",
"first_command",
"}",
"sorted_commands",
"=",
"sorted",
"(",
"without_duplicates",
",",
"key",
"=",
"lambda",
"corrected_command",
":",
"corrected_command",
".",
"priority",
")",
"logs",
".",
"debug",
"(",
"'Corrected commands: '",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"u'{}'",
".",
"format",
"(",
"cmd",
")",
"for",
"cmd",
"in",
"[",
"first_command",
"]",
"+",
"sorted_commands",
")",
")",
")",
"for",
"command",
"in",
"sorted_commands",
":",
"yield",
"command"
] |
Yields sorted commands without duplicates.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: Iterable[thefuck.types.CorrectedCommand]
|
[
"Yields",
"sorted",
"commands",
"without",
"duplicates",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L52-L78
|
21,293
|
nvbn/thefuck
|
thefuck/corrector.py
|
get_corrected_commands
|
def get_corrected_commands(command):
"""Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
corrected_commands = (
corrected for rule in get_rules()
if rule.is_match(command)
for corrected in rule.get_corrected_commands(command))
return organize_commands(corrected_commands)
|
python
|
def get_corrected_commands(command):
"""Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
corrected_commands = (
corrected for rule in get_rules()
if rule.is_match(command)
for corrected in rule.get_corrected_commands(command))
return organize_commands(corrected_commands)
|
[
"def",
"get_corrected_commands",
"(",
"command",
")",
":",
"corrected_commands",
"=",
"(",
"corrected",
"for",
"rule",
"in",
"get_rules",
"(",
")",
"if",
"rule",
".",
"is_match",
"(",
"command",
")",
"for",
"corrected",
"in",
"rule",
".",
"get_corrected_commands",
"(",
"command",
")",
")",
"return",
"organize_commands",
"(",
"corrected_commands",
")"
] |
Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand]
|
[
"Returns",
"generator",
"with",
"sorted",
"and",
"unique",
"corrected",
"commands",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L81-L92
|
21,294
|
nvbn/thefuck
|
thefuck/entrypoints/fix_command.py
|
fix_command
|
def fix_command(known_args):
"""Fixes previous command. Used when `thefuck` called without arguments."""
settings.init(known_args)
with logs.debug_time('Total'):
logs.debug(u'Run with settings: {}'.format(pformat(settings)))
raw_command = _get_raw_command(known_args)
try:
command = types.Command.from_raw_script(raw_command)
except EmptyCommand:
logs.debug('Empty command, nothing to do')
return
corrected_commands = get_corrected_commands(command)
selected_command = select_command(corrected_commands)
if selected_command:
selected_command.run(command)
else:
sys.exit(1)
|
python
|
def fix_command(known_args):
"""Fixes previous command. Used when `thefuck` called without arguments."""
settings.init(known_args)
with logs.debug_time('Total'):
logs.debug(u'Run with settings: {}'.format(pformat(settings)))
raw_command = _get_raw_command(known_args)
try:
command = types.Command.from_raw_script(raw_command)
except EmptyCommand:
logs.debug('Empty command, nothing to do')
return
corrected_commands = get_corrected_commands(command)
selected_command = select_command(corrected_commands)
if selected_command:
selected_command.run(command)
else:
sys.exit(1)
|
[
"def",
"fix_command",
"(",
"known_args",
")",
":",
"settings",
".",
"init",
"(",
"known_args",
")",
"with",
"logs",
".",
"debug_time",
"(",
"'Total'",
")",
":",
"logs",
".",
"debug",
"(",
"u'Run with settings: {}'",
".",
"format",
"(",
"pformat",
"(",
"settings",
")",
")",
")",
"raw_command",
"=",
"_get_raw_command",
"(",
"known_args",
")",
"try",
":",
"command",
"=",
"types",
".",
"Command",
".",
"from_raw_script",
"(",
"raw_command",
")",
"except",
"EmptyCommand",
":",
"logs",
".",
"debug",
"(",
"'Empty command, nothing to do'",
")",
"return",
"corrected_commands",
"=",
"get_corrected_commands",
"(",
"command",
")",
"selected_command",
"=",
"select_command",
"(",
"corrected_commands",
")",
"if",
"selected_command",
":",
"selected_command",
".",
"run",
"(",
"command",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Fixes previous command. Used when `thefuck` called without arguments.
|
[
"Fixes",
"previous",
"command",
".",
"Used",
"when",
"thefuck",
"called",
"without",
"arguments",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/entrypoints/fix_command.py#L28-L47
|
21,295
|
nvbn/thefuck
|
thefuck/output_readers/shell_logger.py
|
get_output
|
def get_output(script):
"""Gets command output from shell logger."""
with logs.debug_time(u'Read output from external shell logger'):
commands = _get_last_n(const.SHELL_LOGGER_LIMIT)
for command in commands:
if command['command'] == script:
lines = _get_output_lines(command['output'])
output = '\n'.join(lines).strip()
return output
else:
logs.warn("Output isn't available in shell logger")
return None
|
python
|
def get_output(script):
"""Gets command output from shell logger."""
with logs.debug_time(u'Read output from external shell logger'):
commands = _get_last_n(const.SHELL_LOGGER_LIMIT)
for command in commands:
if command['command'] == script:
lines = _get_output_lines(command['output'])
output = '\n'.join(lines).strip()
return output
else:
logs.warn("Output isn't available in shell logger")
return None
|
[
"def",
"get_output",
"(",
"script",
")",
":",
"with",
"logs",
".",
"debug_time",
"(",
"u'Read output from external shell logger'",
")",
":",
"commands",
"=",
"_get_last_n",
"(",
"const",
".",
"SHELL_LOGGER_LIMIT",
")",
"for",
"command",
"in",
"commands",
":",
"if",
"command",
"[",
"'command'",
"]",
"==",
"script",
":",
"lines",
"=",
"_get_output_lines",
"(",
"command",
"[",
"'output'",
"]",
")",
"output",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
".",
"strip",
"(",
")",
"return",
"output",
"else",
":",
"logs",
".",
"warn",
"(",
"\"Output isn't available in shell logger\"",
")",
"return",
"None"
] |
Gets command output from shell logger.
|
[
"Gets",
"command",
"output",
"from",
"shell",
"logger",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/output_readers/shell_logger.py#L49-L60
|
21,296
|
nvbn/thefuck
|
thefuck/shells/generic.py
|
Generic._get_history_lines
|
def _get_history_lines(self):
"""Returns list of history entries."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with io.open(history_file_name, 'r',
encoding='utf-8', errors='ignore') as history_file:
lines = history_file.readlines()
if settings.history_limit:
lines = lines[-settings.history_limit:]
for line in lines:
prepared = self._script_from_history(line) \
.strip()
if prepared:
yield prepared
|
python
|
def _get_history_lines(self):
"""Returns list of history entries."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with io.open(history_file_name, 'r',
encoding='utf-8', errors='ignore') as history_file:
lines = history_file.readlines()
if settings.history_limit:
lines = lines[-settings.history_limit:]
for line in lines:
prepared = self._script_from_history(line) \
.strip()
if prepared:
yield prepared
|
[
"def",
"_get_history_lines",
"(",
"self",
")",
":",
"history_file_name",
"=",
"self",
".",
"_get_history_file_name",
"(",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"history_file_name",
")",
":",
"with",
"io",
".",
"open",
"(",
"history_file_name",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'ignore'",
")",
"as",
"history_file",
":",
"lines",
"=",
"history_file",
".",
"readlines",
"(",
")",
"if",
"settings",
".",
"history_limit",
":",
"lines",
"=",
"lines",
"[",
"-",
"settings",
".",
"history_limit",
":",
"]",
"for",
"line",
"in",
"lines",
":",
"prepared",
"=",
"self",
".",
"_script_from_history",
"(",
"line",
")",
".",
"strip",
"(",
")",
"if",
"prepared",
":",
"yield",
"prepared"
] |
Returns list of history entries.
|
[
"Returns",
"list",
"of",
"history",
"entries",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/shells/generic.py#L54-L69
|
21,297
|
nvbn/thefuck
|
thefuck/shells/generic.py
|
Generic.split_command
|
def split_command(self, command):
"""Split the command using shell-like syntax."""
encoded = self.encode_utf8(command)
try:
splitted = [s.replace("??", "\\ ") for s in shlex.split(encoded.replace('\\ ', '??'))]
except ValueError:
splitted = encoded.split(' ')
return self.decode_utf8(splitted)
|
python
|
def split_command(self, command):
"""Split the command using shell-like syntax."""
encoded = self.encode_utf8(command)
try:
splitted = [s.replace("??", "\\ ") for s in shlex.split(encoded.replace('\\ ', '??'))]
except ValueError:
splitted = encoded.split(' ')
return self.decode_utf8(splitted)
|
[
"def",
"split_command",
"(",
"self",
",",
"command",
")",
":",
"encoded",
"=",
"self",
".",
"encode_utf8",
"(",
"command",
")",
"try",
":",
"splitted",
"=",
"[",
"s",
".",
"replace",
"(",
"\"??\"",
",",
"\"\\\\ \"",
")",
"for",
"s",
"in",
"shlex",
".",
"split",
"(",
"encoded",
".",
"replace",
"(",
"'\\\\ '",
",",
"'??'",
")",
")",
"]",
"except",
"ValueError",
":",
"splitted",
"=",
"encoded",
".",
"split",
"(",
"' '",
")",
"return",
"self",
".",
"decode_utf8",
"(",
"splitted",
")"
] |
Split the command using shell-like syntax.
|
[
"Split",
"the",
"command",
"using",
"shell",
"-",
"like",
"syntax",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/shells/generic.py#L80-L89
|
21,298
|
nvbn/thefuck
|
thefuck/shells/generic.py
|
Generic.quote
|
def quote(self, s):
"""Return a shell-escaped version of the string s."""
if six.PY2:
from pipes import quote
else:
from shlex import quote
return quote(s)
|
python
|
def quote(self, s):
"""Return a shell-escaped version of the string s."""
if six.PY2:
from pipes import quote
else:
from shlex import quote
return quote(s)
|
[
"def",
"quote",
"(",
"self",
",",
"s",
")",
":",
"if",
"six",
".",
"PY2",
":",
"from",
"pipes",
"import",
"quote",
"else",
":",
"from",
"shlex",
"import",
"quote",
"return",
"quote",
"(",
"s",
")"
] |
Return a shell-escaped version of the string s.
|
[
"Return",
"a",
"shell",
"-",
"escaped",
"version",
"of",
"the",
"string",
"s",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/shells/generic.py#L101-L109
|
21,299
|
nvbn/thefuck
|
thefuck/shells/fish.py
|
Fish._put_to_history
|
def _put_to_history(self, command_script):
"""Puts command script to shell history."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with open(history_file_name, 'a') as history:
entry = self._get_history_line(command_script)
if six.PY2:
history.write(entry.encode('utf-8'))
else:
history.write(entry)
|
python
|
def _put_to_history(self, command_script):
"""Puts command script to shell history."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with open(history_file_name, 'a') as history:
entry = self._get_history_line(command_script)
if six.PY2:
history.write(entry.encode('utf-8'))
else:
history.write(entry)
|
[
"def",
"_put_to_history",
"(",
"self",
",",
"command_script",
")",
":",
"history_file_name",
"=",
"self",
".",
"_get_history_file_name",
"(",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"history_file_name",
")",
":",
"with",
"open",
"(",
"history_file_name",
",",
"'a'",
")",
"as",
"history",
":",
"entry",
"=",
"self",
".",
"_get_history_line",
"(",
"command_script",
")",
"if",
"six",
".",
"PY2",
":",
"history",
".",
"write",
"(",
"entry",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"history",
".",
"write",
"(",
"entry",
")"
] |
Puts command script to shell history.
|
[
"Puts",
"command",
"script",
"to",
"shell",
"history",
"."
] |
40ab4eb62db57627bff10cf029d29c94704086a2
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/shells/fish.py#L120-L129
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.