partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
get_model_meta
Get model meta.json from a directory path and validate its contents. path (unicode or Path): Path to model directory. RETURNS (dict): The model's meta data.
spacy/util.py
def get_model_meta(path): """Get model meta.json from a directory path and validate its contents. path (unicode or Path): Path to model directory. RETURNS (dict): The model's meta data. """ model_path = ensure_path(path) if not model_path.exists(): raise IOError(Errors.E052.format(path=path2str(model_path))) meta_path = model_path / "meta.json" if not meta_path.is_file(): raise IOError(Errors.E053.format(path=meta_path)) meta = srsly.read_json(meta_path) for setting in ["lang", "name", "version"]: if setting not in meta or not meta[setting]: raise ValueError(Errors.E054.format(setting=setting)) return meta
def get_model_meta(path): """Get model meta.json from a directory path and validate its contents. path (unicode or Path): Path to model directory. RETURNS (dict): The model's meta data. """ model_path = ensure_path(path) if not model_path.exists(): raise IOError(Errors.E052.format(path=path2str(model_path))) meta_path = model_path / "meta.json" if not meta_path.is_file(): raise IOError(Errors.E053.format(path=meta_path)) meta = srsly.read_json(meta_path) for setting in ["lang", "name", "version"]: if setting not in meta or not meta[setting]: raise ValueError(Errors.E054.format(setting=setting)) return meta
[ "Get", "model", "meta", ".", "json", "from", "a", "directory", "path", "and", "validate", "its", "contents", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L193-L209
[ "def", "get_model_meta", "(", "path", ")", ":", "model_path", "=", "ensure_path", "(", "path", ")", "if", "not", "model_path", ".", "exists", "(", ")", ":", "raise", "IOError", "(", "Errors", ".", "E052", ".", "format", "(", "path", "=", "path2str", "(", "model_path", ")", ")", ")", "meta_path", "=", "model_path", "/", "\"meta.json\"", "if", "not", "meta_path", ".", "is_file", "(", ")", ":", "raise", "IOError", "(", "Errors", ".", "E053", ".", "format", "(", "path", "=", "meta_path", ")", ")", "meta", "=", "srsly", ".", "read_json", "(", "meta_path", ")", "for", "setting", "in", "[", "\"lang\"", ",", "\"name\"", ",", "\"version\"", "]", ":", "if", "setting", "not", "in", "meta", "or", "not", "meta", "[", "setting", "]", ":", "raise", "ValueError", "(", "Errors", ".", "E054", ".", "format", "(", "setting", "=", "setting", ")", ")", "return", "meta" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_package_path
Get the path to an installed package. name (unicode): Package name. RETURNS (Path): Path to installed package.
spacy/util.py
def get_package_path(name): """Get the path to an installed package. name (unicode): Package name. RETURNS (Path): Path to installed package. """ name = name.lower() # use lowercase version to be safe # Here we're importing the module just to find it. This is worryingly # indirect, but it's otherwise very difficult to find the package. pkg = importlib.import_module(name) return Path(pkg.__file__).parent
def get_package_path(name): """Get the path to an installed package. name (unicode): Package name. RETURNS (Path): Path to installed package. """ name = name.lower() # use lowercase version to be safe # Here we're importing the module just to find it. This is worryingly # indirect, but it's otherwise very difficult to find the package. pkg = importlib.import_module(name) return Path(pkg.__file__).parent
[ "Get", "the", "path", "to", "an", "installed", "package", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L226-L236
[ "def", "get_package_path", "(", "name", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "# use lowercase version to be safe", "# Here we're importing the module just to find it. This is worryingly", "# indirect, but it's otherwise very difficult to find the package.", "pkg", "=", "importlib", ".", "import_module", "(", "name", ")", "return", "Path", "(", "pkg", ".", "__file__", ")", ".", "parent" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_entry_points
Get registered entry points from other packages for a given key, e.g. 'spacy_factories' and return them as a dictionary, keyed by name. key (unicode): Entry point name. RETURNS (dict): Entry points, keyed by name.
spacy/util.py
def get_entry_points(key): """Get registered entry points from other packages for a given key, e.g. 'spacy_factories' and return them as a dictionary, keyed by name. key (unicode): Entry point name. RETURNS (dict): Entry points, keyed by name. """ result = {} for entry_point in pkg_resources.iter_entry_points(key): result[entry_point.name] = entry_point.load() return result
def get_entry_points(key): """Get registered entry points from other packages for a given key, e.g. 'spacy_factories' and return them as a dictionary, keyed by name. key (unicode): Entry point name. RETURNS (dict): Entry points, keyed by name. """ result = {} for entry_point in pkg_resources.iter_entry_points(key): result[entry_point.name] = entry_point.load() return result
[ "Get", "registered", "entry", "points", "from", "other", "packages", "for", "a", "given", "key", "e", ".", "g", ".", "spacy_factories", "and", "return", "them", "as", "a", "dictionary", "keyed", "by", "name", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L239-L249
[ "def", "get_entry_points", "(", "key", ")", ":", "result", "=", "{", "}", "for", "entry_point", "in", "pkg_resources", ".", "iter_entry_points", "(", "key", ")", ":", "result", "[", "entry_point", ".", "name", "]", "=", "entry_point", ".", "load", "(", ")", "return", "result" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_entry_point
Check if registered entry point is available for a given name and load it. Otherwise, return None. key (unicode): Entry point name. value (unicode): Name of entry point to load. RETURNS: The loaded entry point or None.
spacy/util.py
def get_entry_point(key, value): """Check if registered entry point is available for a given name and load it. Otherwise, return None. key (unicode): Entry point name. value (unicode): Name of entry point to load. RETURNS: The loaded entry point or None. """ for entry_point in pkg_resources.iter_entry_points(key): if entry_point.name == value: return entry_point.load()
def get_entry_point(key, value): """Check if registered entry point is available for a given name and load it. Otherwise, return None. key (unicode): Entry point name. value (unicode): Name of entry point to load. RETURNS: The loaded entry point or None. """ for entry_point in pkg_resources.iter_entry_points(key): if entry_point.name == value: return entry_point.load()
[ "Check", "if", "registered", "entry", "point", "is", "available", "for", "a", "given", "name", "and", "load", "it", ".", "Otherwise", "return", "None", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L252-L262
[ "def", "get_entry_point", "(", "key", ",", "value", ")", ":", "for", "entry_point", "in", "pkg_resources", ".", "iter_entry_points", "(", "key", ")", ":", "if", "entry_point", ".", "name", "==", "value", ":", "return", "entry_point", ".", "load", "(", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
is_in_jupyter
Check if user is running spaCy from a Jupyter notebook by detecting the IPython kernel. Mainly used for the displaCy visualizer. RETURNS (bool): True if in Jupyter, False if not.
spacy/util.py
def is_in_jupyter(): """Check if user is running spaCy from a Jupyter notebook by detecting the IPython kernel. Mainly used for the displaCy visualizer. RETURNS (bool): True if in Jupyter, False if not. """ # https://stackoverflow.com/a/39662359/6400719 try: shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell": return True # Jupyter notebook or qtconsole except NameError: return False # Probably standard Python interpreter return False
def is_in_jupyter(): """Check if user is running spaCy from a Jupyter notebook by detecting the IPython kernel. Mainly used for the displaCy visualizer. RETURNS (bool): True if in Jupyter, False if not. """ # https://stackoverflow.com/a/39662359/6400719 try: shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell": return True # Jupyter notebook or qtconsole except NameError: return False # Probably standard Python interpreter return False
[ "Check", "if", "user", "is", "running", "spaCy", "from", "a", "Jupyter", "notebook", "by", "detecting", "the", "IPython", "kernel", ".", "Mainly", "used", "for", "the", "displaCy", "visualizer", ".", "RETURNS", "(", "bool", ")", ":", "True", "if", "in", "Jupyter", "False", "if", "not", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L265-L277
[ "def", "is_in_jupyter", "(", ")", ":", "# https://stackoverflow.com/a/39662359/6400719", "try", ":", "shell", "=", "get_ipython", "(", ")", ".", "__class__", ".", "__name__", "if", "shell", "==", "\"ZMQInteractiveShell\"", ":", "return", "True", "# Jupyter notebook or qtconsole", "except", "NameError", ":", "return", "False", "# Probably standard Python interpreter", "return", "False" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
compile_suffix_regex
Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
spacy/util.py
def compile_suffix_regex(entries): """Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search. """ expression = "|".join([piece + "$" for piece in entries if piece.strip()]) return re.compile(expression)
def compile_suffix_regex(entries): """Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search. """ expression = "|".join([piece + "$" for piece in entries if piece.strip()]) return re.compile(expression)
[ "Compile", "a", "sequence", "of", "suffix", "rules", "into", "a", "regex", "object", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L346-L353
[ "def", "compile_suffix_regex", "(", "entries", ")", ":", "expression", "=", "\"|\"", ".", "join", "(", "[", "piece", "+", "\"$\"", "for", "piece", "in", "entries", "if", "piece", ".", "strip", "(", ")", "]", ")", "return", "re", ".", "compile", "(", "expression", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
compile_infix_regex
Compile a sequence of infix rules into a regex object. entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
spacy/util.py
def compile_infix_regex(entries): """Compile a sequence of infix rules into a regex object. entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer. """ expression = "|".join([piece for piece in entries if piece.strip()]) return re.compile(expression)
def compile_infix_regex(entries): """Compile a sequence of infix rules into a regex object. entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer. """ expression = "|".join([piece for piece in entries if piece.strip()]) return re.compile(expression)
[ "Compile", "a", "sequence", "of", "infix", "rules", "into", "a", "regex", "object", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L356-L363
[ "def", "compile_infix_regex", "(", "entries", ")", ":", "expression", "=", "\"|\"", ".", "join", "(", "[", "piece", "for", "piece", "in", "entries", "if", "piece", ".", "strip", "(", ")", "]", ")", "return", "re", ".", "compile", "(", "expression", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
update_exc
Update and validate tokenizer exceptions. Will overwrite exceptions. base_exceptions (dict): Base exceptions. *addition_dicts (dict): Exceptions to add to the base dict, in order. RETURNS (dict): Combined tokenizer exceptions.
spacy/util.py
def update_exc(base_exceptions, *addition_dicts): """Update and validate tokenizer exceptions. Will overwrite exceptions. base_exceptions (dict): Base exceptions. *addition_dicts (dict): Exceptions to add to the base dict, in order. RETURNS (dict): Combined tokenizer exceptions. """ exc = dict(base_exceptions) for additions in addition_dicts: for orth, token_attrs in additions.items(): if not all(isinstance(attr[ORTH], unicode_) for attr in token_attrs): raise ValueError(Errors.E055.format(key=orth, orths=token_attrs)) described_orth = "".join(attr[ORTH] for attr in token_attrs) if orth != described_orth: raise ValueError(Errors.E056.format(key=orth, orths=described_orth)) exc.update(additions) exc = expand_exc(exc, "'", "’") return exc
def update_exc(base_exceptions, *addition_dicts): """Update and validate tokenizer exceptions. Will overwrite exceptions. base_exceptions (dict): Base exceptions. *addition_dicts (dict): Exceptions to add to the base dict, in order. RETURNS (dict): Combined tokenizer exceptions. """ exc = dict(base_exceptions) for additions in addition_dicts: for orth, token_attrs in additions.items(): if not all(isinstance(attr[ORTH], unicode_) for attr in token_attrs): raise ValueError(Errors.E055.format(key=orth, orths=token_attrs)) described_orth = "".join(attr[ORTH] for attr in token_attrs) if orth != described_orth: raise ValueError(Errors.E056.format(key=orth, orths=described_orth)) exc.update(additions) exc = expand_exc(exc, "'", "’") return exc
[ "Update", "and", "validate", "tokenizer", "exceptions", ".", "Will", "overwrite", "exceptions", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L386-L403
[ "def", "update_exc", "(", "base_exceptions", ",", "*", "addition_dicts", ")", ":", "exc", "=", "dict", "(", "base_exceptions", ")", "for", "additions", "in", "addition_dicts", ":", "for", "orth", ",", "token_attrs", "in", "additions", ".", "items", "(", ")", ":", "if", "not", "all", "(", "isinstance", "(", "attr", "[", "ORTH", "]", ",", "unicode_", ")", "for", "attr", "in", "token_attrs", ")", ":", "raise", "ValueError", "(", "Errors", ".", "E055", ".", "format", "(", "key", "=", "orth", ",", "orths", "=", "token_attrs", ")", ")", "described_orth", "=", "\"\"", ".", "join", "(", "attr", "[", "ORTH", "]", "for", "attr", "in", "token_attrs", ")", "if", "orth", "!=", "described_orth", ":", "raise", "ValueError", "(", "Errors", ".", "E056", ".", "format", "(", "key", "=", "orth", ",", "orths", "=", "described_orth", ")", ")", "exc", ".", "update", "(", "additions", ")", "exc", "=", "expand_exc", "(", "exc", ",", "\"'\"", ",", "\"’\")", "", "return", "exc" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
expand_exc
Find string in tokenizer exceptions, duplicate entry and replace string. For example, to add additional versions with typographic apostrophes. excs (dict): Tokenizer exceptions. search (unicode): String to find and replace. replace (unicode): Replacement. RETURNS (dict): Combined tokenizer exceptions.
spacy/util.py
def expand_exc(excs, search, replace): """Find string in tokenizer exceptions, duplicate entry and replace string. For example, to add additional versions with typographic apostrophes. excs (dict): Tokenizer exceptions. search (unicode): String to find and replace. replace (unicode): Replacement. RETURNS (dict): Combined tokenizer exceptions. """ def _fix_token(token, search, replace): fixed = dict(token) fixed[ORTH] = fixed[ORTH].replace(search, replace) return fixed new_excs = dict(excs) for token_string, tokens in excs.items(): if search in token_string: new_key = token_string.replace(search, replace) new_value = [_fix_token(t, search, replace) for t in tokens] new_excs[new_key] = new_value return new_excs
def expand_exc(excs, search, replace): """Find string in tokenizer exceptions, duplicate entry and replace string. For example, to add additional versions with typographic apostrophes. excs (dict): Tokenizer exceptions. search (unicode): String to find and replace. replace (unicode): Replacement. RETURNS (dict): Combined tokenizer exceptions. """ def _fix_token(token, search, replace): fixed = dict(token) fixed[ORTH] = fixed[ORTH].replace(search, replace) return fixed new_excs = dict(excs) for token_string, tokens in excs.items(): if search in token_string: new_key = token_string.replace(search, replace) new_value = [_fix_token(t, search, replace) for t in tokens] new_excs[new_key] = new_value return new_excs
[ "Find", "string", "in", "tokenizer", "exceptions", "duplicate", "entry", "and", "replace", "string", ".", "For", "example", "to", "add", "additional", "versions", "with", "typographic", "apostrophes", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L406-L427
[ "def", "expand_exc", "(", "excs", ",", "search", ",", "replace", ")", ":", "def", "_fix_token", "(", "token", ",", "search", ",", "replace", ")", ":", "fixed", "=", "dict", "(", "token", ")", "fixed", "[", "ORTH", "]", "=", "fixed", "[", "ORTH", "]", ".", "replace", "(", "search", ",", "replace", ")", "return", "fixed", "new_excs", "=", "dict", "(", "excs", ")", "for", "token_string", ",", "tokens", "in", "excs", ".", "items", "(", ")", ":", "if", "search", "in", "token_string", ":", "new_key", "=", "token_string", ".", "replace", "(", "search", ",", "replace", ")", "new_value", "=", "[", "_fix_token", "(", "t", ",", "search", ",", "replace", ")", "for", "t", "in", "tokens", "]", "new_excs", "[", "new_key", "]", "=", "new_value", "return", "new_excs" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
minibatch
Iterate over batches of items. `size` may be an iterator, so that batch-size can vary on each step.
spacy/util.py
def minibatch(items, size=8): """Iterate over batches of items. `size` may be an iterator, so that batch-size can vary on each step. """ if isinstance(size, int): size_ = itertools.repeat(size) else: size_ = size items = iter(items) while True: batch_size = next(size_) batch = list(itertools.islice(items, int(batch_size))) if len(batch) == 0: break yield list(batch)
def minibatch(items, size=8): """Iterate over batches of items. `size` may be an iterator, so that batch-size can vary on each step. """ if isinstance(size, int): size_ = itertools.repeat(size) else: size_ = size items = iter(items) while True: batch_size = next(size_) batch = list(itertools.islice(items, int(batch_size))) if len(batch) == 0: break yield list(batch)
[ "Iterate", "over", "batches", "of", "items", ".", "size", "may", "be", "an", "iterator", "so", "that", "batch", "-", "size", "can", "vary", "on", "each", "step", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L446-L460
[ "def", "minibatch", "(", "items", ",", "size", "=", "8", ")", ":", "if", "isinstance", "(", "size", ",", "int", ")", ":", "size_", "=", "itertools", ".", "repeat", "(", "size", ")", "else", ":", "size_", "=", "size", "items", "=", "iter", "(", "items", ")", "while", "True", ":", "batch_size", "=", "next", "(", "size_", ")", "batch", "=", "list", "(", "itertools", ".", "islice", "(", "items", ",", "int", "(", "batch_size", ")", ")", ")", "if", "len", "(", "batch", ")", "==", "0", ":", "break", "yield", "list", "(", "batch", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
compounding
Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5
spacy/util.py
def compounding(start, stop, compound): """Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5 """ def clip(value): return max(value, stop) if (start > stop) else min(value, stop) curr = float(start) while True: yield clip(curr) curr *= compound
def compounding(start, stop, compound): """Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5 """ def clip(value): return max(value, stop) if (start > stop) else min(value, stop) curr = float(start) while True: yield clip(curr) curr *= compound
[ "Yield", "an", "infinite", "series", "of", "compounding", "values", ".", "Each", "time", "the", "generator", "is", "called", "a", "value", "is", "produced", "by", "multiplying", "the", "previous", "value", "by", "the", "compound", "rate", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L463-L481
[ "def", "compounding", "(", "start", ",", "stop", ",", "compound", ")", ":", "def", "clip", "(", "value", ")", ":", "return", "max", "(", "value", ",", "stop", ")", "if", "(", "start", ">", "stop", ")", "else", "min", "(", "value", ",", "stop", ")", "curr", "=", "float", "(", "start", ")", "while", "True", ":", "yield", "clip", "(", "curr", ")", "curr", "*=", "compound" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
stepping
Yield an infinite series of values that step from a start value to a final value over some number of steps. Each step is (stop-start)/steps. After the final value is reached, the generator continues yielding that value. EXAMPLE: >>> sizes = stepping(1., 200., 100) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * (200.-1.) / 100 >>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100
spacy/util.py
def stepping(start, stop, steps): """Yield an infinite series of values that step from a start value to a final value over some number of steps. Each step is (stop-start)/steps. After the final value is reached, the generator continues yielding that value. EXAMPLE: >>> sizes = stepping(1., 200., 100) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * (200.-1.) / 100 >>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100 """ def clip(value): return max(value, stop) if (start > stop) else min(value, stop) curr = float(start) while True: yield clip(curr) curr += (stop - start) / steps
def stepping(start, stop, steps): """Yield an infinite series of values that step from a start value to a final value over some number of steps. Each step is (stop-start)/steps. After the final value is reached, the generator continues yielding that value. EXAMPLE: >>> sizes = stepping(1., 200., 100) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * (200.-1.) / 100 >>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100 """ def clip(value): return max(value, stop) if (start > stop) else min(value, stop) curr = float(start) while True: yield clip(curr) curr += (stop - start) / steps
[ "Yield", "an", "infinite", "series", "of", "values", "that", "step", "from", "a", "start", "value", "to", "a", "final", "value", "over", "some", "number", "of", "steps", ".", "Each", "step", "is", "(", "stop", "-", "start", ")", "/", "steps", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L484-L504
[ "def", "stepping", "(", "start", ",", "stop", ",", "steps", ")", ":", "def", "clip", "(", "value", ")", ":", "return", "max", "(", "value", ",", "stop", ")", "if", "(", "start", ">", "stop", ")", "else", "min", "(", "value", ",", "stop", ")", "curr", "=", "float", "(", "start", ")", "while", "True", ":", "yield", "clip", "(", "curr", ")", "curr", "+=", "(", "stop", "-", "start", ")", "/", "steps" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
decaying
Yield an infinite series of linearly decaying values.
spacy/util.py
def decaying(start, stop, decay): """Yield an infinite series of linearly decaying values.""" curr = float(start) while True: yield max(curr, stop) curr -= (decay)
def decaying(start, stop, decay): """Yield an infinite series of linearly decaying values.""" curr = float(start) while True: yield max(curr, stop) curr -= (decay)
[ "Yield", "an", "infinite", "series", "of", "linearly", "decaying", "values", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L507-L513
[ "def", "decaying", "(", "start", ",", "stop", ",", "decay", ")", ":", "curr", "=", "float", "(", "start", ")", "while", "True", ":", "yield", "max", "(", "curr", ",", "stop", ")", "curr", "-=", "(", "decay", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
minibatch_by_words
Create minibatches of a given number of words.
spacy/util.py
def minibatch_by_words(items, size, tuples=True, count_words=len): """Create minibatches of a given number of words.""" if isinstance(size, int): size_ = itertools.repeat(size) else: size_ = size items = iter(items) while True: batch_size = next(size_) batch = [] while batch_size >= 0: try: if tuples: doc, gold = next(items) else: doc = next(items) except StopIteration: if batch: yield batch return batch_size -= count_words(doc) if tuples: batch.append((doc, gold)) else: batch.append(doc) if batch: yield batch
def minibatch_by_words(items, size, tuples=True, count_words=len): """Create minibatches of a given number of words.""" if isinstance(size, int): size_ = itertools.repeat(size) else: size_ = size items = iter(items) while True: batch_size = next(size_) batch = [] while batch_size >= 0: try: if tuples: doc, gold = next(items) else: doc = next(items) except StopIteration: if batch: yield batch return batch_size -= count_words(doc) if tuples: batch.append((doc, gold)) else: batch.append(doc) if batch: yield batch
[ "Create", "minibatches", "of", "a", "given", "number", "of", "words", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L516-L542
[ "def", "minibatch_by_words", "(", "items", ",", "size", ",", "tuples", "=", "True", ",", "count_words", "=", "len", ")", ":", "if", "isinstance", "(", "size", ",", "int", ")", ":", "size_", "=", "itertools", ".", "repeat", "(", "size", ")", "else", ":", "size_", "=", "size", "items", "=", "iter", "(", "items", ")", "while", "True", ":", "batch_size", "=", "next", "(", "size_", ")", "batch", "=", "[", "]", "while", "batch_size", ">=", "0", ":", "try", ":", "if", "tuples", ":", "doc", ",", "gold", "=", "next", "(", "items", ")", "else", ":", "doc", "=", "next", "(", "items", ")", "except", "StopIteration", ":", "if", "batch", ":", "yield", "batch", "return", "batch_size", "-=", "count_words", "(", "doc", ")", "if", "tuples", ":", "batch", ".", "append", "(", "(", "doc", ",", "gold", ")", ")", "else", ":", "batch", ".", "append", "(", "doc", ")", "if", "batch", ":", "yield", "batch" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
itershuffle
Shuffle an iterator. This works by holding `bufsize` items back and yielding them sometime later. Obviously, this is not unbiased – but should be good enough for batching. Larger bufsize means less bias. From https://gist.github.com/andres-erbsen/1307752 iterable (iterable): Iterator to shuffle. bufsize (int): Items to hold back. YIELDS (iterable): The shuffled iterator.
spacy/util.py
def itershuffle(iterable, bufsize=1000): """Shuffle an iterator. This works by holding `bufsize` items back and yielding them sometime later. Obviously, this is not unbiased – but should be good enough for batching. Larger bufsize means less bias. From https://gist.github.com/andres-erbsen/1307752 iterable (iterable): Iterator to shuffle. bufsize (int): Items to hold back. YIELDS (iterable): The shuffled iterator. """ iterable = iter(iterable) buf = [] try: while True: for i in range(random.randint(1, bufsize - len(buf))): buf.append(next(iterable)) random.shuffle(buf) for i in range(random.randint(1, bufsize)): if buf: yield buf.pop() else: break except StopIteration: random.shuffle(buf) while buf: yield buf.pop() raise StopIteration
def itershuffle(iterable, bufsize=1000): """Shuffle an iterator. This works by holding `bufsize` items back and yielding them sometime later. Obviously, this is not unbiased – but should be good enough for batching. Larger bufsize means less bias. From https://gist.github.com/andres-erbsen/1307752 iterable (iterable): Iterator to shuffle. bufsize (int): Items to hold back. YIELDS (iterable): The shuffled iterator. """ iterable = iter(iterable) buf = [] try: while True: for i in range(random.randint(1, bufsize - len(buf))): buf.append(next(iterable)) random.shuffle(buf) for i in range(random.randint(1, bufsize)): if buf: yield buf.pop() else: break except StopIteration: random.shuffle(buf) while buf: yield buf.pop() raise StopIteration
[ "Shuffle", "an", "iterator", ".", "This", "works", "by", "holding", "bufsize", "items", "back", "and", "yielding", "them", "sometime", "later", ".", "Obviously", "this", "is", "not", "unbiased", "–", "but", "should", "be", "good", "enough", "for", "batching", ".", "Larger", "bufsize", "means", "less", "bias", ".", "From", "https", ":", "//", "gist", ".", "github", ".", "com", "/", "andres", "-", "erbsen", "/", "1307752" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L545-L571
[ "def", "itershuffle", "(", "iterable", ",", "bufsize", "=", "1000", ")", ":", "iterable", "=", "iter", "(", "iterable", ")", "buf", "=", "[", "]", "try", ":", "while", "True", ":", "for", "i", "in", "range", "(", "random", ".", "randint", "(", "1", ",", "bufsize", "-", "len", "(", "buf", ")", ")", ")", ":", "buf", ".", "append", "(", "next", "(", "iterable", ")", ")", "random", ".", "shuffle", "(", "buf", ")", "for", "i", "in", "range", "(", "random", ".", "randint", "(", "1", ",", "bufsize", ")", ")", ":", "if", "buf", ":", "yield", "buf", ".", "pop", "(", ")", "else", ":", "break", "except", "StopIteration", ":", "random", ".", "shuffle", "(", "buf", ")", "while", "buf", ":", "yield", "buf", ".", "pop", "(", ")", "raise", "StopIteration" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
validate_json
Validate data against a given JSON schema (see https://json-schema.org). data: JSON-serializable data to validate. validator (jsonschema.DraftXValidator): The validator. RETURNS (list): A list of error messages, if available.
spacy/util.py
def validate_json(data, validator): """Validate data against a given JSON schema (see https://json-schema.org). data: JSON-serializable data to validate. validator (jsonschema.DraftXValidator): The validator. RETURNS (list): A list of error messages, if available. """ errors = [] for err in sorted(validator.iter_errors(data), key=lambda e: e.path): if err.path: err_path = "[{}]".format(" -> ".join([str(p) for p in err.path])) else: err_path = "" msg = err.message + " " + err_path if err.context: # Error has suberrors, e.g. if schema uses anyOf suberrs = [" - {}".format(suberr.message) for suberr in err.context] msg += ":\n{}".format("".join(suberrs)) errors.append(msg) return errors
def validate_json(data, validator): """Validate data against a given JSON schema (see https://json-schema.org). data: JSON-serializable data to validate. validator (jsonschema.DraftXValidator): The validator. RETURNS (list): A list of error messages, if available. """ errors = [] for err in sorted(validator.iter_errors(data), key=lambda e: e.path): if err.path: err_path = "[{}]".format(" -> ".join([str(p) for p in err.path])) else: err_path = "" msg = err.message + " " + err_path if err.context: # Error has suberrors, e.g. if schema uses anyOf suberrs = [" - {}".format(suberr.message) for suberr in err.context] msg += ":\n{}".format("".join(suberrs)) errors.append(msg) return errors
[ "Validate", "data", "against", "a", "given", "JSON", "schema", "(", "see", "https", ":", "//", "json", "-", "schema", ".", "org", ")", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L672-L690
[ "def", "validate_json", "(", "data", ",", "validator", ")", ":", "errors", "=", "[", "]", "for", "err", "in", "sorted", "(", "validator", ".", "iter_errors", "(", "data", ")", ",", "key", "=", "lambda", "e", ":", "e", ".", "path", ")", ":", "if", "err", ".", "path", ":", "err_path", "=", "\"[{}]\"", ".", "format", "(", "\" -> \"", ".", "join", "(", "[", "str", "(", "p", ")", "for", "p", "in", "err", ".", "path", "]", ")", ")", "else", ":", "err_path", "=", "\"\"", "msg", "=", "err", ".", "message", "+", "\" \"", "+", "err_path", "if", "err", ".", "context", ":", "# Error has suberrors, e.g. if schema uses anyOf", "suberrs", "=", "[", "\" - {}\"", ".", "format", "(", "suberr", ".", "message", ")", "for", "suberr", "in", "err", ".", "context", "]", "msg", "+=", "\":\\n{}\"", ".", "format", "(", "\"\"", ".", "join", "(", "suberrs", ")", ")", "errors", ".", "append", "(", "msg", ")", "return", "errors" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_serialization_exclude
Helper function to validate serialization args and manage transition from keyword arguments (pre v2.1) to exclude argument.
spacy/util.py
def get_serialization_exclude(serializers, exclude, kwargs): """Helper function to validate serialization args and manage transition from keyword arguments (pre v2.1) to exclude argument. """ exclude = list(exclude) # Split to support file names like meta.json options = [name.split(".")[0] for name in serializers] for key, value in kwargs.items(): if key in ("vocab",) and value is False: deprecation_warning(Warnings.W015.format(arg=key)) exclude.append(key) elif key.split(".")[0] in options: raise ValueError(Errors.E128.format(arg=key)) # TODO: user warning? return exclude
def get_serialization_exclude(serializers, exclude, kwargs): """Helper function to validate serialization args and manage transition from keyword arguments (pre v2.1) to exclude argument. """ exclude = list(exclude) # Split to support file names like meta.json options = [name.split(".")[0] for name in serializers] for key, value in kwargs.items(): if key in ("vocab",) and value is False: deprecation_warning(Warnings.W015.format(arg=key)) exclude.append(key) elif key.split(".")[0] in options: raise ValueError(Errors.E128.format(arg=key)) # TODO: user warning? return exclude
[ "Helper", "function", "to", "validate", "serialization", "args", "and", "manage", "transition", "from", "keyword", "arguments", "(", "pre", "v2", ".", "1", ")", "to", "exclude", "argument", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L693-L707
[ "def", "get_serialization_exclude", "(", "serializers", ",", "exclude", ",", "kwargs", ")", ":", "exclude", "=", "list", "(", "exclude", ")", "# Split to support file names like meta.json", "options", "=", "[", "name", ".", "split", "(", "\".\"", ")", "[", "0", "]", "for", "name", "in", "serializers", "]", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "in", "(", "\"vocab\"", ",", ")", "and", "value", "is", "False", ":", "deprecation_warning", "(", "Warnings", ".", "W015", ".", "format", "(", "arg", "=", "key", ")", ")", "exclude", ".", "append", "(", "key", ")", "elif", "key", ".", "split", "(", "\".\"", ")", "[", "0", "]", "in", "options", ":", "raise", "ValueError", "(", "Errors", ".", "E128", ".", "format", "(", "arg", "=", "key", ")", ")", "# TODO: user warning?", "return", "exclude" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRuler.labels
All labels present in the match patterns. RETURNS (set): The string labels. DOCS: https://spacy.io/api/entityruler#labels
spacy/pipeline/entityruler.py
def labels(self): """All labels present in the match patterns. RETURNS (set): The string labels. DOCS: https://spacy.io/api/entityruler#labels """ all_labels = set(self.token_patterns.keys()) all_labels.update(self.phrase_patterns.keys()) return tuple(all_labels)
def labels(self): """All labels present in the match patterns. RETURNS (set): The string labels. DOCS: https://spacy.io/api/entityruler#labels """ all_labels = set(self.token_patterns.keys()) all_labels.update(self.phrase_patterns.keys()) return tuple(all_labels)
[ "All", "labels", "present", "in", "the", "match", "patterns", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L96-L105
[ "def", "labels", "(", "self", ")", ":", "all_labels", "=", "set", "(", "self", ".", "token_patterns", ".", "keys", "(", ")", ")", "all_labels", ".", "update", "(", "self", ".", "phrase_patterns", ".", "keys", "(", ")", ")", "return", "tuple", "(", "all_labels", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRuler.patterns
Get all patterns that were added to the entity ruler. RETURNS (list): The original patterns, one dictionary per pattern. DOCS: https://spacy.io/api/entityruler#patterns
spacy/pipeline/entityruler.py
def patterns(self): """Get all patterns that were added to the entity ruler. RETURNS (list): The original patterns, one dictionary per pattern. DOCS: https://spacy.io/api/entityruler#patterns """ all_patterns = [] for label, patterns in self.token_patterns.items(): for pattern in patterns: all_patterns.append({"label": label, "pattern": pattern}) for label, patterns in self.phrase_patterns.items(): for pattern in patterns: all_patterns.append({"label": label, "pattern": pattern.text}) return all_patterns
def patterns(self): """Get all patterns that were added to the entity ruler. RETURNS (list): The original patterns, one dictionary per pattern. DOCS: https://spacy.io/api/entityruler#patterns """ all_patterns = [] for label, patterns in self.token_patterns.items(): for pattern in patterns: all_patterns.append({"label": label, "pattern": pattern}) for label, patterns in self.phrase_patterns.items(): for pattern in patterns: all_patterns.append({"label": label, "pattern": pattern.text}) return all_patterns
[ "Get", "all", "patterns", "that", "were", "added", "to", "the", "entity", "ruler", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L108-L122
[ "def", "patterns", "(", "self", ")", ":", "all_patterns", "=", "[", "]", "for", "label", ",", "patterns", "in", "self", ".", "token_patterns", ".", "items", "(", ")", ":", "for", "pattern", "in", "patterns", ":", "all_patterns", ".", "append", "(", "{", "\"label\"", ":", "label", ",", "\"pattern\"", ":", "pattern", "}", ")", "for", "label", ",", "patterns", "in", "self", ".", "phrase_patterns", ".", "items", "(", ")", ":", "for", "pattern", "in", "patterns", ":", "all_patterns", ".", "append", "(", "{", "\"label\"", ":", "label", ",", "\"pattern\"", ":", "pattern", ".", "text", "}", ")", "return", "all_patterns" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRuler.add_patterns
Add patterns to the entitiy ruler. A pattern can either be a token pattern (list of dicts) or a phrase pattern (string). For example: {'label': 'ORG', 'pattern': 'Apple'} {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]} patterns (list): The patterns to add. DOCS: https://spacy.io/api/entityruler#add_patterns
spacy/pipeline/entityruler.py
def add_patterns(self, patterns): """Add patterns to the entitiy ruler. A pattern can either be a token pattern (list of dicts) or a phrase pattern (string). For example: {'label': 'ORG', 'pattern': 'Apple'} {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]} patterns (list): The patterns to add. DOCS: https://spacy.io/api/entityruler#add_patterns """ for entry in patterns: label = entry["label"] pattern = entry["pattern"] if isinstance(pattern, basestring_): self.phrase_patterns[label].append(self.nlp(pattern)) elif isinstance(pattern, list): self.token_patterns[label].append(pattern) else: raise ValueError(Errors.E097.format(pattern=pattern)) for label, patterns in self.token_patterns.items(): self.matcher.add(label, None, *patterns) for label, patterns in self.phrase_patterns.items(): self.phrase_matcher.add(label, None, *patterns)
def add_patterns(self, patterns): """Add patterns to the entitiy ruler. A pattern can either be a token pattern (list of dicts) or a phrase pattern (string). For example: {'label': 'ORG', 'pattern': 'Apple'} {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]} patterns (list): The patterns to add. DOCS: https://spacy.io/api/entityruler#add_patterns """ for entry in patterns: label = entry["label"] pattern = entry["pattern"] if isinstance(pattern, basestring_): self.phrase_patterns[label].append(self.nlp(pattern)) elif isinstance(pattern, list): self.token_patterns[label].append(pattern) else: raise ValueError(Errors.E097.format(pattern=pattern)) for label, patterns in self.token_patterns.items(): self.matcher.add(label, None, *patterns) for label, patterns in self.phrase_patterns.items(): self.phrase_matcher.add(label, None, *patterns)
[ "Add", "patterns", "to", "the", "entitiy", "ruler", ".", "A", "pattern", "can", "either", "be", "a", "token", "pattern", "(", "list", "of", "dicts", ")", "or", "a", "phrase", "pattern", "(", "string", ")", ".", "For", "example", ":", "{", "label", ":", "ORG", "pattern", ":", "Apple", "}", "{", "label", ":", "GPE", "pattern", ":", "[", "{", "lower", ":", "san", "}", "{", "lower", ":", "francisco", "}", "]", "}" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L124-L146
[ "def", "add_patterns", "(", "self", ",", "patterns", ")", ":", "for", "entry", "in", "patterns", ":", "label", "=", "entry", "[", "\"label\"", "]", "pattern", "=", "entry", "[", "\"pattern\"", "]", "if", "isinstance", "(", "pattern", ",", "basestring_", ")", ":", "self", ".", "phrase_patterns", "[", "label", "]", ".", "append", "(", "self", ".", "nlp", "(", "pattern", ")", ")", "elif", "isinstance", "(", "pattern", ",", "list", ")", ":", "self", ".", "token_patterns", "[", "label", "]", ".", "append", "(", "pattern", ")", "else", ":", "raise", "ValueError", "(", "Errors", ".", "E097", ".", "format", "(", "pattern", "=", "pattern", ")", ")", "for", "label", ",", "patterns", "in", "self", ".", "token_patterns", ".", "items", "(", ")", ":", "self", ".", "matcher", ".", "add", "(", "label", ",", "None", ",", "*", "patterns", ")", "for", "label", ",", "patterns", "in", "self", ".", "phrase_patterns", ".", "items", "(", ")", ":", "self", ".", "phrase_matcher", ".", "add", "(", "label", ",", "None", ",", "*", "patterns", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRuler.from_bytes
Load the entity ruler from a bytestring. patterns_bytes (bytes): The bytestring to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#from_bytes
spacy/pipeline/entityruler.py
def from_bytes(self, patterns_bytes, **kwargs): """Load the entity ruler from a bytestring. patterns_bytes (bytes): The bytestring to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#from_bytes """ patterns = srsly.msgpack_loads(patterns_bytes) self.add_patterns(patterns) return self
def from_bytes(self, patterns_bytes, **kwargs): """Load the entity ruler from a bytestring. patterns_bytes (bytes): The bytestring to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#from_bytes """ patterns = srsly.msgpack_loads(patterns_bytes) self.add_patterns(patterns) return self
[ "Load", "the", "entity", "ruler", "from", "a", "bytestring", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L148-L159
[ "def", "from_bytes", "(", "self", ",", "patterns_bytes", ",", "*", "*", "kwargs", ")", ":", "patterns", "=", "srsly", ".", "msgpack_loads", "(", "patterns_bytes", ")", "self", ".", "add_patterns", "(", "patterns", ")", "return", "self" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRuler.from_disk
Load the entity ruler from a file. Expects a file containing newline-delimited JSON (JSONL) with one entry per line. path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#from_disk
spacy/pipeline/entityruler.py
def from_disk(self, path, **kwargs): """Load the entity ruler from a file. Expects a file containing newline-delimited JSON (JSONL) with one entry per line. path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#from_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") patterns = srsly.read_jsonl(path) self.add_patterns(patterns) return self
def from_disk(self, path, **kwargs): """Load the entity ruler from a file. Expects a file containing newline-delimited JSON (JSONL) with one entry per line. path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#from_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") patterns = srsly.read_jsonl(path) self.add_patterns(patterns) return self
[ "Load", "the", "entity", "ruler", "from", "a", "file", ".", "Expects", "a", "file", "containing", "newline", "-", "delimited", "JSON", "(", "JSONL", ")", "with", "one", "entry", "per", "line", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L170-L184
[ "def", "from_disk", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "path", "=", "ensure_path", "(", "path", ")", "path", "=", "path", ".", "with_suffix", "(", "\".jsonl\"", ")", "patterns", "=", "srsly", ".", "read_jsonl", "(", "path", ")", "self", ".", "add_patterns", "(", "patterns", ")", "return", "self" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRuler.to_disk
Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk
spacy/pipeline/entityruler.py
def to_disk(self, path, **kwargs): """Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") srsly.write_jsonl(path, self.patterns)
def to_disk(self, path, **kwargs): """Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") srsly.write_jsonl(path, self.patterns)
[ "Save", "the", "entity", "ruler", "patterns", "to", "a", "directory", ".", "The", "patterns", "will", "be", "saved", "as", "newline", "-", "delimited", "JSON", "(", "JSONL", ")", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L186-L198
[ "def", "to_disk", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "path", "=", "ensure_path", "(", "path", ")", "path", "=", "path", ".", "with_suffix", "(", "\".jsonl\"", ")", "srsly", ".", "write_jsonl", "(", "path", ",", "self", ".", "patterns", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
read_data
Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True, include Doc objects created using nlp.make_doc and then aligned against the gold-standard sequences. If oracle_segments=True, include Doc objects created from the gold-standard segments. At least one must be True.
bin/ud/ud_train.py
def read_data( nlp, conllu_file, text_file, raw_text=True, oracle_segments=False, max_doc_length=None, limit=None, ): """Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True, include Doc objects created using nlp.make_doc and then aligned against the gold-standard sequences. If oracle_segments=True, include Doc objects created from the gold-standard segments. At least one must be True.""" if not raw_text and not oracle_segments: raise ValueError("At least one of raw_text or oracle_segments must be True") paragraphs = split_text(text_file.read()) conllu = read_conllu(conllu_file) # sd is spacy doc; cd is conllu doc # cs is conllu sent, ct is conllu token docs = [] golds = [] for doc_id, (text, cd) in enumerate(zip(paragraphs, conllu)): sent_annots = [] for cs in cd: sent = defaultdict(list) for id_, word, lemma, pos, tag, morph, head, dep, _, space_after in cs: if "." in id_: continue if "-" in id_: continue id_ = int(id_) - 1 head = int(head) - 1 if head != "0" else id_ sent["words"].append(word) sent["tags"].append(tag) sent["heads"].append(head) sent["deps"].append("ROOT" if dep == "root" else dep) sent["spaces"].append(space_after == "_") sent["entities"] = ["-"] * len(sent["words"]) sent["heads"], sent["deps"] = projectivize(sent["heads"], sent["deps"]) if oracle_segments: docs.append(Doc(nlp.vocab, words=sent["words"], spaces=sent["spaces"])) golds.append(GoldParse(docs[-1], **sent)) sent_annots.append(sent) if raw_text and max_doc_length and len(sent_annots) >= max_doc_length: doc, gold = _make_gold(nlp, None, sent_annots) sent_annots = [] docs.append(doc) golds.append(gold) if limit and len(docs) >= limit: return docs, golds if raw_text and sent_annots: doc, gold = _make_gold(nlp, None, sent_annots) docs.append(doc) golds.append(gold) if limit and len(docs) >= limit: return docs, golds return docs, golds
def read_data( nlp, conllu_file, text_file, raw_text=True, oracle_segments=False, max_doc_length=None, limit=None, ): """Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True, include Doc objects created using nlp.make_doc and then aligned against the gold-standard sequences. If oracle_segments=True, include Doc objects created from the gold-standard segments. At least one must be True.""" if not raw_text and not oracle_segments: raise ValueError("At least one of raw_text or oracle_segments must be True") paragraphs = split_text(text_file.read()) conllu = read_conllu(conllu_file) # sd is spacy doc; cd is conllu doc # cs is conllu sent, ct is conllu token docs = [] golds = [] for doc_id, (text, cd) in enumerate(zip(paragraphs, conllu)): sent_annots = [] for cs in cd: sent = defaultdict(list) for id_, word, lemma, pos, tag, morph, head, dep, _, space_after in cs: if "." in id_: continue if "-" in id_: continue id_ = int(id_) - 1 head = int(head) - 1 if head != "0" else id_ sent["words"].append(word) sent["tags"].append(tag) sent["heads"].append(head) sent["deps"].append("ROOT" if dep == "root" else dep) sent["spaces"].append(space_after == "_") sent["entities"] = ["-"] * len(sent["words"]) sent["heads"], sent["deps"] = projectivize(sent["heads"], sent["deps"]) if oracle_segments: docs.append(Doc(nlp.vocab, words=sent["words"], spaces=sent["spaces"])) golds.append(GoldParse(docs[-1], **sent)) sent_annots.append(sent) if raw_text and max_doc_length and len(sent_annots) >= max_doc_length: doc, gold = _make_gold(nlp, None, sent_annots) sent_annots = [] docs.append(doc) golds.append(gold) if limit and len(docs) >= limit: return docs, golds if raw_text and sent_annots: doc, gold = _make_gold(nlp, None, sent_annots) docs.append(doc) golds.append(gold) if limit and len(docs) >= limit: return docs, golds return docs, golds
[ "Read", "the", "CONLLU", "format", "into", "(", "Doc", "GoldParse", ")", "tuples", ".", "If", "raw_text", "=", "True", "include", "Doc", "objects", "created", "using", "nlp", ".", "make_doc", "and", "then", "aligned", "against", "the", "gold", "-", "standard", "sequences", ".", "If", "oracle_segments", "=", "True", "include", "Doc", "objects", "created", "from", "the", "gold", "-", "standard", "segments", ".", "At", "least", "one", "must", "be", "True", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/ud_train.py#L52-L110
[ "def", "read_data", "(", "nlp", ",", "conllu_file", ",", "text_file", ",", "raw_text", "=", "True", ",", "oracle_segments", "=", "False", ",", "max_doc_length", "=", "None", ",", "limit", "=", "None", ",", ")", ":", "if", "not", "raw_text", "and", "not", "oracle_segments", ":", "raise", "ValueError", "(", "\"At least one of raw_text or oracle_segments must be True\"", ")", "paragraphs", "=", "split_text", "(", "text_file", ".", "read", "(", ")", ")", "conllu", "=", "read_conllu", "(", "conllu_file", ")", "# sd is spacy doc; cd is conllu doc", "# cs is conllu sent, ct is conllu token", "docs", "=", "[", "]", "golds", "=", "[", "]", "for", "doc_id", ",", "(", "text", ",", "cd", ")", "in", "enumerate", "(", "zip", "(", "paragraphs", ",", "conllu", ")", ")", ":", "sent_annots", "=", "[", "]", "for", "cs", "in", "cd", ":", "sent", "=", "defaultdict", "(", "list", ")", "for", "id_", ",", "word", ",", "lemma", ",", "pos", ",", "tag", ",", "morph", ",", "head", ",", "dep", ",", "_", ",", "space_after", "in", "cs", ":", "if", "\".\"", "in", "id_", ":", "continue", "if", "\"-\"", "in", "id_", ":", "continue", "id_", "=", "int", "(", "id_", ")", "-", "1", "head", "=", "int", "(", "head", ")", "-", "1", "if", "head", "!=", "\"0\"", "else", "id_", "sent", "[", "\"words\"", "]", ".", "append", "(", "word", ")", "sent", "[", "\"tags\"", "]", ".", "append", "(", "tag", ")", "sent", "[", "\"heads\"", "]", ".", "append", "(", "head", ")", "sent", "[", "\"deps\"", "]", ".", "append", "(", "\"ROOT\"", "if", "dep", "==", "\"root\"", "else", "dep", ")", "sent", "[", "\"spaces\"", "]", ".", "append", "(", "space_after", "==", "\"_\"", ")", "sent", "[", "\"entities\"", "]", "=", "[", "\"-\"", "]", "*", "len", "(", "sent", "[", "\"words\"", "]", ")", "sent", "[", "\"heads\"", "]", ",", "sent", "[", "\"deps\"", "]", "=", "projectivize", "(", "sent", "[", "\"heads\"", "]", ",", "sent", "[", "\"deps\"", "]", ")", "if", "oracle_segments", ":", "docs", ".", "append", "(", "Doc", "(", "nlp", ".", "vocab", ",", "words", "=", "sent", "[", "\"words\"", "]", ",", "spaces", "=", "sent", "[", "\"spaces\"", "]", ")", ")", "golds", ".", "append", "(", "GoldParse", "(", "docs", "[", "-", "1", "]", ",", "*", "*", "sent", ")", ")", "sent_annots", ".", "append", "(", "sent", ")", "if", "raw_text", "and", "max_doc_length", "and", "len", "(", "sent_annots", ")", ">=", "max_doc_length", ":", "doc", ",", "gold", "=", "_make_gold", "(", "nlp", ",", "None", ",", "sent_annots", ")", "sent_annots", "=", "[", "]", "docs", ".", "append", "(", "doc", ")", "golds", ".", "append", "(", "gold", ")", "if", "limit", "and", "len", "(", "docs", ")", ">=", "limit", ":", "return", "docs", ",", "golds", "if", "raw_text", "and", "sent_annots", ":", "doc", ",", "gold", "=", "_make_gold", "(", "nlp", ",", "None", ",", "sent_annots", ")", "docs", ".", "append", "(", "doc", ")", "golds", ".", "append", "(", "gold", ")", "if", "limit", "and", "len", "(", "docs", ")", ">=", "limit", ":", "return", "docs", ",", "golds", "return", "docs", ",", "golds" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
golds_to_gold_tuples
Get out the annoying 'tuples' format used by begin_training, given the GoldParse objects.
bin/ud/ud_train.py
def golds_to_gold_tuples(docs, golds): """Get out the annoying 'tuples' format used by begin_training, given the GoldParse objects.""" tuples = [] for doc, gold in zip(docs, golds): text = doc.text ids, words, tags, heads, labels, iob = zip(*gold.orig_annot) sents = [((ids, words, tags, heads, labels, iob), [])] tuples.append((text, sents)) return tuples
def golds_to_gold_tuples(docs, golds): """Get out the annoying 'tuples' format used by begin_training, given the GoldParse objects.""" tuples = [] for doc, gold in zip(docs, golds): text = doc.text ids, words, tags, heads, labels, iob = zip(*gold.orig_annot) sents = [((ids, words, tags, heads, labels, iob), [])] tuples.append((text, sents)) return tuples
[ "Get", "out", "the", "annoying", "tuples", "format", "used", "by", "begin_training", "given", "the", "GoldParse", "objects", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/ud_train.py#L173-L182
[ "def", "golds_to_gold_tuples", "(", "docs", ",", "golds", ")", ":", "tuples", "=", "[", "]", "for", "doc", ",", "gold", "in", "zip", "(", "docs", ",", "golds", ")", ":", "text", "=", "doc", ".", "text", "ids", ",", "words", ",", "tags", ",", "heads", ",", "labels", ",", "iob", "=", "zip", "(", "*", "gold", ".", "orig_annot", ")", "sents", "=", "[", "(", "(", "ids", ",", "words", ",", "tags", ",", "heads", ",", "labels", ",", "iob", ")", ",", "[", "]", ")", "]", "tuples", ".", "append", "(", "(", "text", ",", "sents", ")", ")", "return", "tuples" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
like_num
check if text resembles a number
spacy/lang/fa/lex_attrs.py
def like_num(text): """ check if text resembles a number """ text = ( text.replace(",", "") .replace(".", "") .replace("،", "") .replace("٫", "") .replace("/", "") ) if text.isdigit(): return True if text in _num_words: return True if text in _ordinal_words: return True return False
def like_num(text): """ check if text resembles a number """ text = ( text.replace(",", "") .replace(".", "") .replace("،", "") .replace("٫", "") .replace("/", "") ) if text.isdigit(): return True if text in _num_words: return True if text in _ordinal_words: return True return False
[ "check", "if", "text", "resembles", "a", "number" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/fa/lex_attrs.py#L84-L101
[ "def", "like_num", "(", "text", ")", ":", "text", "=", "(", "text", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ".", "replace", "(", "\"،\",", " ", "\")", "", ".", "replace", "(", "\"٫\",", " ", "\")", "", ".", "replace", "(", "\"/\"", ",", "\"\"", ")", ")", "if", "text", ".", "isdigit", "(", ")", ":", "return", "True", "if", "text", "in", "_num_words", ":", "return", "True", "if", "text", "in", "_ordinal_words", ":", "return", "True", "return", "False" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
merge_bytes
Concatenate multiple serialized binders into one byte string.
spacy/tokens/_serialize.py
def merge_bytes(binder_strings): """Concatenate multiple serialized binders into one byte string.""" output = None for byte_string in binder_strings: binder = Binder().from_bytes(byte_string) if output is None: output = binder else: output.merge(binder) return output.to_bytes()
def merge_bytes(binder_strings): """Concatenate multiple serialized binders into one byte string.""" output = None for byte_string in binder_strings: binder = Binder().from_bytes(byte_string) if output is None: output = binder else: output.merge(binder) return output.to_bytes()
[ "Concatenate", "multiple", "serialized", "binders", "into", "one", "byte", "string", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L97-L106
[ "def", "merge_bytes", "(", "binder_strings", ")", ":", "output", "=", "None", "for", "byte_string", "in", "binder_strings", ":", "binder", "=", "Binder", "(", ")", ".", "from_bytes", "(", "byte_string", ")", "if", "output", "is", "None", ":", "output", "=", "binder", "else", ":", "output", ".", "merge", "(", "binder", ")", "return", "output", ".", "to_bytes", "(", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Binder.add
Add a doc's annotations to the binder for serialization.
spacy/tokens/_serialize.py
def add(self, doc): """Add a doc's annotations to the binder for serialization.""" array = doc.to_array(self.attrs) if len(array.shape) == 1: array = array.reshape((array.shape[0], 1)) self.tokens.append(array) spaces = doc.to_array(SPACY) assert array.shape[0] == spaces.shape[0] spaces = spaces.reshape((spaces.shape[0], 1)) self.spaces.append(numpy.asarray(spaces, dtype=bool)) self.strings.update(w.text for w in doc)
def add(self, doc): """Add a doc's annotations to the binder for serialization.""" array = doc.to_array(self.attrs) if len(array.shape) == 1: array = array.reshape((array.shape[0], 1)) self.tokens.append(array) spaces = doc.to_array(SPACY) assert array.shape[0] == spaces.shape[0] spaces = spaces.reshape((spaces.shape[0], 1)) self.spaces.append(numpy.asarray(spaces, dtype=bool)) self.strings.update(w.text for w in doc)
[ "Add", "a", "doc", "s", "annotations", "to", "the", "binder", "for", "serialization", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L35-L45
[ "def", "add", "(", "self", ",", "doc", ")", ":", "array", "=", "doc", ".", "to_array", "(", "self", ".", "attrs", ")", "if", "len", "(", "array", ".", "shape", ")", "==", "1", ":", "array", "=", "array", ".", "reshape", "(", "(", "array", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "self", ".", "tokens", ".", "append", "(", "array", ")", "spaces", "=", "doc", ".", "to_array", "(", "SPACY", ")", "assert", "array", ".", "shape", "[", "0", "]", "==", "spaces", ".", "shape", "[", "0", "]", "spaces", "=", "spaces", ".", "reshape", "(", "(", "spaces", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "self", ".", "spaces", ".", "append", "(", "numpy", ".", "asarray", "(", "spaces", ",", "dtype", "=", "bool", ")", ")", "self", ".", "strings", ".", "update", "(", "w", ".", "text", "for", "w", "in", "doc", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Binder.get_docs
Recover Doc objects from the annotations, using the given vocab.
spacy/tokens/_serialize.py
def get_docs(self, vocab): """Recover Doc objects from the annotations, using the given vocab.""" for string in self.strings: vocab[string] orth_col = self.attrs.index(ORTH) for tokens, spaces in zip(self.tokens, self.spaces): words = [vocab.strings[orth] for orth in tokens[:, orth_col]] doc = Doc(vocab, words=words, spaces=spaces) doc = doc.from_array(self.attrs, tokens) yield doc
def get_docs(self, vocab): """Recover Doc objects from the annotations, using the given vocab.""" for string in self.strings: vocab[string] orth_col = self.attrs.index(ORTH) for tokens, spaces in zip(self.tokens, self.spaces): words = [vocab.strings[orth] for orth in tokens[:, orth_col]] doc = Doc(vocab, words=words, spaces=spaces) doc = doc.from_array(self.attrs, tokens) yield doc
[ "Recover", "Doc", "objects", "from", "the", "annotations", "using", "the", "given", "vocab", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L47-L56
[ "def", "get_docs", "(", "self", ",", "vocab", ")", ":", "for", "string", "in", "self", ".", "strings", ":", "vocab", "[", "string", "]", "orth_col", "=", "self", ".", "attrs", ".", "index", "(", "ORTH", ")", "for", "tokens", ",", "spaces", "in", "zip", "(", "self", ".", "tokens", ",", "self", ".", "spaces", ")", ":", "words", "=", "[", "vocab", ".", "strings", "[", "orth", "]", "for", "orth", "in", "tokens", "[", ":", ",", "orth_col", "]", "]", "doc", "=", "Doc", "(", "vocab", ",", "words", "=", "words", ",", "spaces", "=", "spaces", ")", "doc", "=", "doc", ".", "from_array", "(", "self", ".", "attrs", ",", "tokens", ")", "yield", "doc" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Binder.merge
Extend the annotations of this binder with the annotations from another.
spacy/tokens/_serialize.py
def merge(self, other): """Extend the annotations of this binder with the annotations from another.""" assert self.attrs == other.attrs self.tokens.extend(other.tokens) self.spaces.extend(other.spaces) self.strings.update(other.strings)
def merge(self, other): """Extend the annotations of this binder with the annotations from another.""" assert self.attrs == other.attrs self.tokens.extend(other.tokens) self.spaces.extend(other.spaces) self.strings.update(other.strings)
[ "Extend", "the", "annotations", "of", "this", "binder", "with", "the", "annotations", "from", "another", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L58-L63
[ "def", "merge", "(", "self", ",", "other", ")", ":", "assert", "self", ".", "attrs", "==", "other", ".", "attrs", "self", ".", "tokens", ".", "extend", "(", "other", ".", "tokens", ")", "self", ".", "spaces", ".", "extend", "(", "other", ".", "spaces", ")", "self", ".", "strings", ".", "update", "(", "other", ".", "strings", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Binder.to_bytes
Serialize the binder's annotations into a byte string.
spacy/tokens/_serialize.py
def to_bytes(self): """Serialize the binder's annotations into a byte string.""" for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape lengths = [len(tokens) for tokens in self.tokens] msg = { "attrs": self.attrs, "tokens": numpy.vstack(self.tokens).tobytes("C"), "spaces": numpy.vstack(self.spaces).tobytes("C"), "lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"), "strings": list(self.strings), } return gzip.compress(srsly.msgpack_dumps(msg))
def to_bytes(self): """Serialize the binder's annotations into a byte string.""" for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape lengths = [len(tokens) for tokens in self.tokens] msg = { "attrs": self.attrs, "tokens": numpy.vstack(self.tokens).tobytes("C"), "spaces": numpy.vstack(self.spaces).tobytes("C"), "lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"), "strings": list(self.strings), } return gzip.compress(srsly.msgpack_dumps(msg))
[ "Serialize", "the", "binder", "s", "annotations", "into", "a", "byte", "string", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L65-L77
[ "def", "to_bytes", "(", "self", ")", ":", "for", "tokens", "in", "self", ".", "tokens", ":", "assert", "len", "(", "tokens", ".", "shape", ")", "==", "2", ",", "tokens", ".", "shape", "lengths", "=", "[", "len", "(", "tokens", ")", "for", "tokens", "in", "self", ".", "tokens", "]", "msg", "=", "{", "\"attrs\"", ":", "self", ".", "attrs", ",", "\"tokens\"", ":", "numpy", ".", "vstack", "(", "self", ".", "tokens", ")", ".", "tobytes", "(", "\"C\"", ")", ",", "\"spaces\"", ":", "numpy", ".", "vstack", "(", "self", ".", "spaces", ")", ".", "tobytes", "(", "\"C\"", ")", ",", "\"lengths\"", ":", "numpy", ".", "asarray", "(", "lengths", ",", "dtype", "=", "\"int32\"", ")", ".", "tobytes", "(", "\"C\"", ")", ",", "\"strings\"", ":", "list", "(", "self", ".", "strings", ")", ",", "}", "return", "gzip", ".", "compress", "(", "srsly", ".", "msgpack_dumps", "(", "msg", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Binder.from_bytes
Deserialize the binder's annotations from a byte string.
spacy/tokens/_serialize.py
def from_bytes(self, string): """Deserialize the binder's annotations from a byte string.""" msg = srsly.msgpack_loads(gzip.decompress(string)) self.attrs = msg["attrs"] self.strings = set(msg["strings"]) lengths = numpy.fromstring(msg["lengths"], dtype="int32") flat_spaces = numpy.fromstring(msg["spaces"], dtype=bool) flat_tokens = numpy.fromstring(msg["tokens"], dtype="uint64") shape = (flat_tokens.size // len(self.attrs), len(self.attrs)) flat_tokens = flat_tokens.reshape(shape) flat_spaces = flat_spaces.reshape((flat_spaces.size, 1)) self.tokens = NumpyOps().unflatten(flat_tokens, lengths) self.spaces = NumpyOps().unflatten(flat_spaces, lengths) for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape return self
def from_bytes(self, string): """Deserialize the binder's annotations from a byte string.""" msg = srsly.msgpack_loads(gzip.decompress(string)) self.attrs = msg["attrs"] self.strings = set(msg["strings"]) lengths = numpy.fromstring(msg["lengths"], dtype="int32") flat_spaces = numpy.fromstring(msg["spaces"], dtype=bool) flat_tokens = numpy.fromstring(msg["tokens"], dtype="uint64") shape = (flat_tokens.size // len(self.attrs), len(self.attrs)) flat_tokens = flat_tokens.reshape(shape) flat_spaces = flat_spaces.reshape((flat_spaces.size, 1)) self.tokens = NumpyOps().unflatten(flat_tokens, lengths) self.spaces = NumpyOps().unflatten(flat_spaces, lengths) for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape return self
[ "Deserialize", "the", "binder", "s", "annotations", "from", "a", "byte", "string", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/_serialize.py#L79-L94
[ "def", "from_bytes", "(", "self", ",", "string", ")", ":", "msg", "=", "srsly", ".", "msgpack_loads", "(", "gzip", ".", "decompress", "(", "string", ")", ")", "self", ".", "attrs", "=", "msg", "[", "\"attrs\"", "]", "self", ".", "strings", "=", "set", "(", "msg", "[", "\"strings\"", "]", ")", "lengths", "=", "numpy", ".", "fromstring", "(", "msg", "[", "\"lengths\"", "]", ",", "dtype", "=", "\"int32\"", ")", "flat_spaces", "=", "numpy", ".", "fromstring", "(", "msg", "[", "\"spaces\"", "]", ",", "dtype", "=", "bool", ")", "flat_tokens", "=", "numpy", ".", "fromstring", "(", "msg", "[", "\"tokens\"", "]", ",", "dtype", "=", "\"uint64\"", ")", "shape", "=", "(", "flat_tokens", ".", "size", "//", "len", "(", "self", ".", "attrs", ")", ",", "len", "(", "self", ".", "attrs", ")", ")", "flat_tokens", "=", "flat_tokens", ".", "reshape", "(", "shape", ")", "flat_spaces", "=", "flat_spaces", ".", "reshape", "(", "(", "flat_spaces", ".", "size", ",", "1", ")", ")", "self", ".", "tokens", "=", "NumpyOps", "(", ")", ".", "unflatten", "(", "flat_tokens", ",", "lengths", ")", "self", ".", "spaces", "=", "NumpyOps", "(", ")", ".", "unflatten", "(", "flat_spaces", ",", "lengths", ")", "for", "tokens", "in", "self", ".", "tokens", ":", "assert", "len", "(", "tokens", ".", "shape", ")", "==", "2", ",", "tokens", ".", "shape", "return", "self" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
load_data
Load data from the IMDB dataset.
examples/training/train_textcat.py
def load_data(limit=0, split=0.8): """Load data from the IMDB dataset.""" # Partition off part of the train data for evaluation train_data, _ = thinc.extra.datasets.imdb() random.shuffle(train_data) train_data = train_data[-limit:] texts, labels = zip(*train_data) cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels] split = int(len(train_data) * split) return (texts[:split], cats[:split]), (texts[split:], cats[split:])
def load_data(limit=0, split=0.8): """Load data from the IMDB dataset.""" # Partition off part of the train data for evaluation train_data, _ = thinc.extra.datasets.imdb() random.shuffle(train_data) train_data = train_data[-limit:] texts, labels = zip(*train_data) cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels] split = int(len(train_data) * split) return (texts[:split], cats[:split]), (texts[split:], cats[split:])
[ "Load", "data", "from", "the", "IMDB", "dataset", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_textcat.py#L120-L129
[ "def", "load_data", "(", "limit", "=", "0", ",", "split", "=", "0.8", ")", ":", "# Partition off part of the train data for evaluation", "train_data", ",", "_", "=", "thinc", ".", "extra", ".", "datasets", ".", "imdb", "(", ")", "random", ".", "shuffle", "(", "train_data", ")", "train_data", "=", "train_data", "[", "-", "limit", ":", "]", "texts", ",", "labels", "=", "zip", "(", "*", "train_data", ")", "cats", "=", "[", "{", "\"POSITIVE\"", ":", "bool", "(", "y", ")", ",", "\"NEGATIVE\"", ":", "not", "bool", "(", "y", ")", "}", "for", "y", "in", "labels", "]", "split", "=", "int", "(", "len", "(", "train_data", ")", "*", "split", ")", "return", "(", "texts", "[", ":", "split", "]", ",", "cats", "[", ":", "split", "]", ")", ",", "(", "texts", "[", "split", ":", "]", ",", "cats", "[", "split", ":", "]", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
package
Generate Python package for model data, including meta and required installation files. A new directory will be created in the specified output directory, and model data will be copied over. If --create-meta is set and a meta.json already exists in the output directory, the existing values will be used as the defaults in the command-line prompt.
spacy/cli/package.py
def package(input_dir, output_dir, meta_path=None, create_meta=False, force=False): """ Generate Python package for model data, including meta and required installation files. A new directory will be created in the specified output directory, and model data will be copied over. If --create-meta is set and a meta.json already exists in the output directory, the existing values will be used as the defaults in the command-line prompt. """ msg = Printer() input_path = util.ensure_path(input_dir) output_path = util.ensure_path(output_dir) meta_path = util.ensure_path(meta_path) if not input_path or not input_path.exists(): msg.fail("Can't locate model data", input_path, exits=1) if not output_path or not output_path.exists(): msg.fail("Output directory not found", output_path, exits=1) if meta_path and not meta_path.exists(): msg.fail("Can't find model meta.json", meta_path, exits=1) meta_path = meta_path or input_path / "meta.json" if meta_path.is_file(): meta = srsly.read_json(meta_path) if not create_meta: # only print if user doesn't want to overwrite msg.good("Loaded meta.json from file", meta_path) else: meta = generate_meta(input_dir, meta, msg) for key in ("lang", "name", "version"): if key not in meta or meta[key] == "": msg.fail( "No '{}' setting found in meta.json".format(key), "This setting is required to build your package.", exits=1, ) model_name = meta["lang"] + "_" + meta["name"] model_name_v = model_name + "-" + meta["version"] main_path = output_path / model_name_v package_path = main_path / model_name if package_path.exists(): if force: shutil.rmtree(path2str(package_path)) else: msg.fail( "Package directory already exists", "Please delete the directory and try again, or use the " "`--force` flag to overwrite existing " "directories.".format(path=path2str(package_path)), exits=1, ) Path.mkdir(package_path, parents=True) shutil.copytree(path2str(input_path), path2str(package_path / model_name_v)) create_file(main_path / "meta.json", srsly.json_dumps(meta, indent=2)) create_file(main_path / "setup.py", TEMPLATE_SETUP) create_file(main_path / "MANIFEST.in", TEMPLATE_MANIFEST) create_file(package_path / "__init__.py", TEMPLATE_INIT) msg.good("Successfully created package '{}'".format(model_name_v), main_path) msg.text("To build the package, run `python setup.py sdist` in this directory.")
def package(input_dir, output_dir, meta_path=None, create_meta=False, force=False): """ Generate Python package for model data, including meta and required installation files. A new directory will be created in the specified output directory, and model data will be copied over. If --create-meta is set and a meta.json already exists in the output directory, the existing values will be used as the defaults in the command-line prompt. """ msg = Printer() input_path = util.ensure_path(input_dir) output_path = util.ensure_path(output_dir) meta_path = util.ensure_path(meta_path) if not input_path or not input_path.exists(): msg.fail("Can't locate model data", input_path, exits=1) if not output_path or not output_path.exists(): msg.fail("Output directory not found", output_path, exits=1) if meta_path and not meta_path.exists(): msg.fail("Can't find model meta.json", meta_path, exits=1) meta_path = meta_path or input_path / "meta.json" if meta_path.is_file(): meta = srsly.read_json(meta_path) if not create_meta: # only print if user doesn't want to overwrite msg.good("Loaded meta.json from file", meta_path) else: meta = generate_meta(input_dir, meta, msg) for key in ("lang", "name", "version"): if key not in meta or meta[key] == "": msg.fail( "No '{}' setting found in meta.json".format(key), "This setting is required to build your package.", exits=1, ) model_name = meta["lang"] + "_" + meta["name"] model_name_v = model_name + "-" + meta["version"] main_path = output_path / model_name_v package_path = main_path / model_name if package_path.exists(): if force: shutil.rmtree(path2str(package_path)) else: msg.fail( "Package directory already exists", "Please delete the directory and try again, or use the " "`--force` flag to overwrite existing " "directories.".format(path=path2str(package_path)), exits=1, ) Path.mkdir(package_path, parents=True) shutil.copytree(path2str(input_path), path2str(package_path / model_name_v)) create_file(main_path / "meta.json", srsly.json_dumps(meta, indent=2)) create_file(main_path / "setup.py", TEMPLATE_SETUP) create_file(main_path / "MANIFEST.in", TEMPLATE_MANIFEST) create_file(package_path / "__init__.py", TEMPLATE_INIT) msg.good("Successfully created package '{}'".format(model_name_v), main_path) msg.text("To build the package, run `python setup.py sdist` in this directory.")
[ "Generate", "Python", "package", "for", "model", "data", "including", "meta", "and", "required", "installation", "files", ".", "A", "new", "directory", "will", "be", "created", "in", "the", "specified", "output", "directory", "and", "model", "data", "will", "be", "copied", "over", ".", "If", "--", "create", "-", "meta", "is", "set", "and", "a", "meta", ".", "json", "already", "exists", "in", "the", "output", "directory", "the", "existing", "values", "will", "be", "used", "as", "the", "defaults", "in", "the", "command", "-", "line", "prompt", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/package.py#L22-L78
[ "def", "package", "(", "input_dir", ",", "output_dir", ",", "meta_path", "=", "None", ",", "create_meta", "=", "False", ",", "force", "=", "False", ")", ":", "msg", "=", "Printer", "(", ")", "input_path", "=", "util", ".", "ensure_path", "(", "input_dir", ")", "output_path", "=", "util", ".", "ensure_path", "(", "output_dir", ")", "meta_path", "=", "util", ".", "ensure_path", "(", "meta_path", ")", "if", "not", "input_path", "or", "not", "input_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Can't locate model data\"", ",", "input_path", ",", "exits", "=", "1", ")", "if", "not", "output_path", "or", "not", "output_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Output directory not found\"", ",", "output_path", ",", "exits", "=", "1", ")", "if", "meta_path", "and", "not", "meta_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Can't find model meta.json\"", ",", "meta_path", ",", "exits", "=", "1", ")", "meta_path", "=", "meta_path", "or", "input_path", "/", "\"meta.json\"", "if", "meta_path", ".", "is_file", "(", ")", ":", "meta", "=", "srsly", ".", "read_json", "(", "meta_path", ")", "if", "not", "create_meta", ":", "# only print if user doesn't want to overwrite", "msg", ".", "good", "(", "\"Loaded meta.json from file\"", ",", "meta_path", ")", "else", ":", "meta", "=", "generate_meta", "(", "input_dir", ",", "meta", ",", "msg", ")", "for", "key", "in", "(", "\"lang\"", ",", "\"name\"", ",", "\"version\"", ")", ":", "if", "key", "not", "in", "meta", "or", "meta", "[", "key", "]", "==", "\"\"", ":", "msg", ".", "fail", "(", "\"No '{}' setting found in meta.json\"", ".", "format", "(", "key", ")", ",", "\"This setting is required to build your package.\"", ",", "exits", "=", "1", ",", ")", "model_name", "=", "meta", "[", "\"lang\"", "]", "+", "\"_\"", "+", "meta", "[", "\"name\"", "]", "model_name_v", "=", "model_name", "+", "\"-\"", "+", "meta", "[", "\"version\"", "]", "main_path", "=", "output_path", "/", "model_name_v", "package_path", "=", "main_path", "/", "model_name", "if", "package_path", ".", "exists", "(", ")", ":", "if", "force", ":", "shutil", ".", "rmtree", "(", "path2str", "(", "package_path", ")", ")", "else", ":", "msg", ".", "fail", "(", "\"Package directory already exists\"", ",", "\"Please delete the directory and try again, or use the \"", "\"`--force` flag to overwrite existing \"", "\"directories.\"", ".", "format", "(", "path", "=", "path2str", "(", "package_path", ")", ")", ",", "exits", "=", "1", ",", ")", "Path", ".", "mkdir", "(", "package_path", ",", "parents", "=", "True", ")", "shutil", ".", "copytree", "(", "path2str", "(", "input_path", ")", ",", "path2str", "(", "package_path", "/", "model_name_v", ")", ")", "create_file", "(", "main_path", "/", "\"meta.json\"", ",", "srsly", ".", "json_dumps", "(", "meta", ",", "indent", "=", "2", ")", ")", "create_file", "(", "main_path", "/", "\"setup.py\"", ",", "TEMPLATE_SETUP", ")", "create_file", "(", "main_path", "/", "\"MANIFEST.in\"", ",", "TEMPLATE_MANIFEST", ")", "create_file", "(", "package_path", "/", "\"__init__.py\"", ",", "TEMPLATE_INIT", ")", "msg", ".", "good", "(", "\"Successfully created package '{}'\"", ".", "format", "(", "model_name_v", ")", ",", "main_path", ")", "msg", ".", "text", "(", "\"To build the package, run `python setup.py sdist` in this directory.\"", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
FrenchLemmatizer.is_base_form
Check whether we're dealing with an uninflected paradigm, so we can avoid lemmatization entirely.
spacy/lang/fr/lemmatizer/lemmatizer.py
def is_base_form(self, univ_pos, morphology=None): """ Check whether we're dealing with an uninflected paradigm, so we can avoid lemmatization entirely. """ morphology = {} if morphology is None else morphology others = [key for key in morphology if key not in (POS, 'Number', 'POS', 'VerbForm', 'Tense')] if univ_pos == 'noun' and morphology.get('Number') == 'sing': return True elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf': return True # This maps 'VBP' to base form -- probably just need 'IS_BASE' # morphology elif univ_pos == 'verb' and (morphology.get('VerbForm') == 'fin' and morphology.get('Tense') == 'pres' and morphology.get('Number') is None and not others): return True elif univ_pos == 'adj' and morphology.get('Degree') == 'pos': return True elif VerbForm_inf in morphology: return True elif VerbForm_none in morphology: return True elif Number_sing in morphology: return True elif Degree_pos in morphology: return True else: return False
def is_base_form(self, univ_pos, morphology=None): """ Check whether we're dealing with an uninflected paradigm, so we can avoid lemmatization entirely. """ morphology = {} if morphology is None else morphology others = [key for key in morphology if key not in (POS, 'Number', 'POS', 'VerbForm', 'Tense')] if univ_pos == 'noun' and morphology.get('Number') == 'sing': return True elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf': return True # This maps 'VBP' to base form -- probably just need 'IS_BASE' # morphology elif univ_pos == 'verb' and (morphology.get('VerbForm') == 'fin' and morphology.get('Tense') == 'pres' and morphology.get('Number') is None and not others): return True elif univ_pos == 'adj' and morphology.get('Degree') == 'pos': return True elif VerbForm_inf in morphology: return True elif VerbForm_none in morphology: return True elif Number_sing in morphology: return True elif Degree_pos in morphology: return True else: return False
[ "Check", "whether", "we", "re", "dealing", "with", "an", "uninflected", "paradigm", "so", "we", "can", "avoid", "lemmatization", "entirely", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/fr/lemmatizer/lemmatizer.py#L63-L93
[ "def", "is_base_form", "(", "self", ",", "univ_pos", ",", "morphology", "=", "None", ")", ":", "morphology", "=", "{", "}", "if", "morphology", "is", "None", "else", "morphology", "others", "=", "[", "key", "for", "key", "in", "morphology", "if", "key", "not", "in", "(", "POS", ",", "'Number'", ",", "'POS'", ",", "'VerbForm'", ",", "'Tense'", ")", "]", "if", "univ_pos", "==", "'noun'", "and", "morphology", ".", "get", "(", "'Number'", ")", "==", "'sing'", ":", "return", "True", "elif", "univ_pos", "==", "'verb'", "and", "morphology", ".", "get", "(", "'VerbForm'", ")", "==", "'inf'", ":", "return", "True", "# This maps 'VBP' to base form -- probably just need 'IS_BASE'", "# morphology", "elif", "univ_pos", "==", "'verb'", "and", "(", "morphology", ".", "get", "(", "'VerbForm'", ")", "==", "'fin'", "and", "morphology", ".", "get", "(", "'Tense'", ")", "==", "'pres'", "and", "morphology", ".", "get", "(", "'Number'", ")", "is", "None", "and", "not", "others", ")", ":", "return", "True", "elif", "univ_pos", "==", "'adj'", "and", "morphology", ".", "get", "(", "'Degree'", ")", "==", "'pos'", ":", "return", "True", "elif", "VerbForm_inf", "in", "morphology", ":", "return", "True", "elif", "VerbForm_none", "in", "morphology", ":", "return", "True", "elif", "Number_sing", "in", "morphology", ":", "return", "True", "elif", "Degree_pos", "in", "morphology", ":", "return", "True", "else", ":", "return", "False" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
main
Set up the pipeline and entity recognizer, and train the new entity.
examples/training/train_new_entity_type.py
def main(model=None, new_model_name="animal", output_dir=None, n_iter=30): """Set up the pipeline and entity recognizer, and train the new entity.""" random.seed(0) if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # Add entity recognizer to model if it's not in the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner) # otherwise, get it, so we can add labels to it else: ner = nlp.get_pipe("ner") ner.add_label(LABEL) # add new entity label to entity recognizer # Adding extraneous labels shouldn't mess anything up ner.add_label("VEGETABLE") if model is None: optimizer = nlp.begin_training() else: optimizer = nlp.resume_training() move_names = list(ner.move_names) # get names of other pipes to disable them during training other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"] with nlp.disable_pipes(*other_pipes): # only train NER sizes = compounding(1.0, 4.0, 1.001) # batch up the examples using spaCy's minibatch for itn in range(n_iter): random.shuffle(TRAIN_DATA) batches = minibatch(TRAIN_DATA, size=sizes) losses = {} for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses) print("Losses", losses) # test the trained model test_text = "Do you like horses?" doc = nlp(test_text) print("Entities in '%s'" % test_text) for ent in doc.ents: print(ent.label_, ent.text) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.meta["name"] = new_model_name # rename model nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) # Check the classes have loaded back consistently assert nlp2.get_pipe("ner").move_names == move_names doc2 = nlp2(test_text) for ent in doc2.ents: print(ent.label_, ent.text)
def main(model=None, new_model_name="animal", output_dir=None, n_iter=30): """Set up the pipeline and entity recognizer, and train the new entity.""" random.seed(0) if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # Add entity recognizer to model if it's not in the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner) # otherwise, get it, so we can add labels to it else: ner = nlp.get_pipe("ner") ner.add_label(LABEL) # add new entity label to entity recognizer # Adding extraneous labels shouldn't mess anything up ner.add_label("VEGETABLE") if model is None: optimizer = nlp.begin_training() else: optimizer = nlp.resume_training() move_names = list(ner.move_names) # get names of other pipes to disable them during training other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"] with nlp.disable_pipes(*other_pipes): # only train NER sizes = compounding(1.0, 4.0, 1.001) # batch up the examples using spaCy's minibatch for itn in range(n_iter): random.shuffle(TRAIN_DATA) batches = minibatch(TRAIN_DATA, size=sizes) losses = {} for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses) print("Losses", losses) # test the trained model test_text = "Do you like horses?" doc = nlp(test_text) print("Entities in '%s'" % test_text) for ent in doc.ents: print(ent.label_, ent.text) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.meta["name"] = new_model_name # rename model nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) # Check the classes have loaded back consistently assert nlp2.get_pipe("ner").move_names == move_names doc2 = nlp2(test_text) for ent in doc2.ents: print(ent.label_, ent.text)
[ "Set", "up", "the", "pipeline", "and", "entity", "recognizer", "and", "train", "the", "new", "entity", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_new_entity_type.py#L71-L134
[ "def", "main", "(", "model", "=", "None", ",", "new_model_name", "=", "\"animal\"", ",", "output_dir", "=", "None", ",", "n_iter", "=", "30", ")", ":", "random", ".", "seed", "(", "0", ")", "if", "model", "is", "not", "None", ":", "nlp", "=", "spacy", ".", "load", "(", "model", ")", "# load existing spaCy model", "print", "(", "\"Loaded model '%s'\"", "%", "model", ")", "else", ":", "nlp", "=", "spacy", ".", "blank", "(", "\"en\"", ")", "# create blank Language class", "print", "(", "\"Created blank 'en' model\"", ")", "# Add entity recognizer to model if it's not in the pipeline", "# nlp.create_pipe works for built-ins that are registered with spaCy", "if", "\"ner\"", "not", "in", "nlp", ".", "pipe_names", ":", "ner", "=", "nlp", ".", "create_pipe", "(", "\"ner\"", ")", "nlp", ".", "add_pipe", "(", "ner", ")", "# otherwise, get it, so we can add labels to it", "else", ":", "ner", "=", "nlp", ".", "get_pipe", "(", "\"ner\"", ")", "ner", ".", "add_label", "(", "LABEL", ")", "# add new entity label to entity recognizer", "# Adding extraneous labels shouldn't mess anything up", "ner", ".", "add_label", "(", "\"VEGETABLE\"", ")", "if", "model", "is", "None", ":", "optimizer", "=", "nlp", ".", "begin_training", "(", ")", "else", ":", "optimizer", "=", "nlp", ".", "resume_training", "(", ")", "move_names", "=", "list", "(", "ner", ".", "move_names", ")", "# get names of other pipes to disable them during training", "other_pipes", "=", "[", "pipe", "for", "pipe", "in", "nlp", ".", "pipe_names", "if", "pipe", "!=", "\"ner\"", "]", "with", "nlp", ".", "disable_pipes", "(", "*", "other_pipes", ")", ":", "# only train NER", "sizes", "=", "compounding", "(", "1.0", ",", "4.0", ",", "1.001", ")", "# batch up the examples using spaCy's minibatch", "for", "itn", "in", "range", "(", "n_iter", ")", ":", "random", ".", "shuffle", "(", "TRAIN_DATA", ")", "batches", "=", "minibatch", "(", "TRAIN_DATA", ",", "size", "=", "sizes", ")", "losses", "=", "{", "}", "for", "batch", "in", "batches", ":", "texts", ",", "annotations", "=", "zip", "(", "*", "batch", ")", "nlp", ".", "update", "(", "texts", ",", "annotations", ",", "sgd", "=", "optimizer", ",", "drop", "=", "0.35", ",", "losses", "=", "losses", ")", "print", "(", "\"Losses\"", ",", "losses", ")", "# test the trained model", "test_text", "=", "\"Do you like horses?\"", "doc", "=", "nlp", "(", "test_text", ")", "print", "(", "\"Entities in '%s'\"", "%", "test_text", ")", "for", "ent", "in", "doc", ".", "ents", ":", "print", "(", "ent", ".", "label_", ",", "ent", ".", "text", ")", "# save model to output directory", "if", "output_dir", "is", "not", "None", ":", "output_dir", "=", "Path", "(", "output_dir", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "nlp", ".", "meta", "[", "\"name\"", "]", "=", "new_model_name", "# rename model", "nlp", ".", "to_disk", "(", "output_dir", ")", "print", "(", "\"Saved model to\"", ",", "output_dir", ")", "# test the saved model", "print", "(", "\"Loading from\"", ",", "output_dir", ")", "nlp2", "=", "spacy", ".", "load", "(", "output_dir", ")", "# Check the classes have loaded back consistently", "assert", "nlp2", ".", "get_pipe", "(", "\"ner\"", ")", ".", "move_names", "==", "move_names", "doc2", "=", "nlp2", "(", "test_text", ")", "for", "ent", "in", "doc2", ".", "ents", ":", "print", "(", "ent", ".", "label_", ",", "ent", ".", "text", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
conll_ner2json
Convert files in the CoNLL-2003 NER format into JSON format for use with train cli.
spacy/cli/converters/conll_ner2json.py
def conll_ner2json(input_data, **kwargs): """ Convert files in the CoNLL-2003 NER format into JSON format for use with train cli. """ delimit_docs = "-DOCSTART- -X- O O" output_docs = [] for doc in input_data.strip().split(delimit_docs): doc = doc.strip() if not doc: continue output_doc = [] for sent in doc.split("\n\n"): sent = sent.strip() if not sent: continue lines = [line.strip() for line in sent.split("\n") if line.strip()] words, tags, chunks, iob_ents = zip(*[line.split() for line in lines]) biluo_ents = iob_to_biluo(iob_ents) output_doc.append( { "tokens": [ {"orth": w, "tag": tag, "ner": ent} for (w, tag, ent) in zip(words, tags, biluo_ents) ] } ) output_docs.append( {"id": len(output_docs), "paragraphs": [{"sentences": output_doc}]} ) output_doc = [] return output_docs
def conll_ner2json(input_data, **kwargs): """ Convert files in the CoNLL-2003 NER format into JSON format for use with train cli. """ delimit_docs = "-DOCSTART- -X- O O" output_docs = [] for doc in input_data.strip().split(delimit_docs): doc = doc.strip() if not doc: continue output_doc = [] for sent in doc.split("\n\n"): sent = sent.strip() if not sent: continue lines = [line.strip() for line in sent.split("\n") if line.strip()] words, tags, chunks, iob_ents = zip(*[line.split() for line in lines]) biluo_ents = iob_to_biluo(iob_ents) output_doc.append( { "tokens": [ {"orth": w, "tag": tag, "ner": ent} for (w, tag, ent) in zip(words, tags, biluo_ents) ] } ) output_docs.append( {"id": len(output_docs), "paragraphs": [{"sentences": output_doc}]} ) output_doc = [] return output_docs
[ "Convert", "files", "in", "the", "CoNLL", "-", "2003", "NER", "format", "into", "JSON", "format", "for", "use", "with", "train", "cli", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conll_ner2json.py#L7-L38
[ "def", "conll_ner2json", "(", "input_data", ",", "*", "*", "kwargs", ")", ":", "delimit_docs", "=", "\"-DOCSTART- -X- O O\"", "output_docs", "=", "[", "]", "for", "doc", "in", "input_data", ".", "strip", "(", ")", ".", "split", "(", "delimit_docs", ")", ":", "doc", "=", "doc", ".", "strip", "(", ")", "if", "not", "doc", ":", "continue", "output_doc", "=", "[", "]", "for", "sent", "in", "doc", ".", "split", "(", "\"\\n\\n\"", ")", ":", "sent", "=", "sent", ".", "strip", "(", ")", "if", "not", "sent", ":", "continue", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "sent", ".", "split", "(", "\"\\n\"", ")", "if", "line", ".", "strip", "(", ")", "]", "words", ",", "tags", ",", "chunks", ",", "iob_ents", "=", "zip", "(", "*", "[", "line", ".", "split", "(", ")", "for", "line", "in", "lines", "]", ")", "biluo_ents", "=", "iob_to_biluo", "(", "iob_ents", ")", "output_doc", ".", "append", "(", "{", "\"tokens\"", ":", "[", "{", "\"orth\"", ":", "w", ",", "\"tag\"", ":", "tag", ",", "\"ner\"", ":", "ent", "}", "for", "(", "w", ",", "tag", ",", "ent", ")", "in", "zip", "(", "words", ",", "tags", ",", "biluo_ents", ")", "]", "}", ")", "output_docs", ".", "append", "(", "{", "\"id\"", ":", "len", "(", "output_docs", ")", ",", "\"paragraphs\"", ":", "[", "{", "\"sentences\"", ":", "output_doc", "}", "]", "}", ")", "output_doc", "=", "[", "]", "return", "output_docs" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
main
Create a new model, set up the pipeline and train the tagger. In order to train the tagger with a custom tag map, we're creating a new Language instance with a custom vocab.
examples/training/train_tagger.py
def main(lang="en", output_dir=None, n_iter=25): """Create a new model, set up the pipeline and train the tagger. In order to train the tagger with a custom tag map, we're creating a new Language instance with a custom vocab. """ nlp = spacy.blank(lang) # add the tagger to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy tagger = nlp.create_pipe("tagger") # Add the tags. This needs to be done before you start training. for tag, values in TAG_MAP.items(): tagger.add_label(tag, values) nlp.add_pipe(tagger) optimizer = nlp.begin_training() for i in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, losses=losses) print("Losses", losses) # test the trained model test_text = "I like blue eggs" doc = nlp(test_text) print("Tags", [(t.text, t.tag_, t.pos_) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the save model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) doc = nlp2(test_text) print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
def main(lang="en", output_dir=None, n_iter=25): """Create a new model, set up the pipeline and train the tagger. In order to train the tagger with a custom tag map, we're creating a new Language instance with a custom vocab. """ nlp = spacy.blank(lang) # add the tagger to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy tagger = nlp.create_pipe("tagger") # Add the tags. This needs to be done before you start training. for tag, values in TAG_MAP.items(): tagger.add_label(tag, values) nlp.add_pipe(tagger) optimizer = nlp.begin_training() for i in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, losses=losses) print("Losses", losses) # test the trained model test_text = "I like blue eggs" doc = nlp(test_text) print("Tags", [(t.text, t.tag_, t.pos_) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the save model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) doc = nlp2(test_text) print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
[ "Create", "a", "new", "model", "set", "up", "the", "pipeline", "and", "train", "the", "tagger", ".", "In", "order", "to", "train", "the", "tagger", "with", "a", "custom", "tag", "map", "we", "re", "creating", "a", "new", "Language", "instance", "with", "a", "custom", "vocab", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_tagger.py#L47-L89
[ "def", "main", "(", "lang", "=", "\"en\"", ",", "output_dir", "=", "None", ",", "n_iter", "=", "25", ")", ":", "nlp", "=", "spacy", ".", "blank", "(", "lang", ")", "# add the tagger to the pipeline", "# nlp.create_pipe works for built-ins that are registered with spaCy", "tagger", "=", "nlp", ".", "create_pipe", "(", "\"tagger\"", ")", "# Add the tags. This needs to be done before you start training.", "for", "tag", ",", "values", "in", "TAG_MAP", ".", "items", "(", ")", ":", "tagger", ".", "add_label", "(", "tag", ",", "values", ")", "nlp", ".", "add_pipe", "(", "tagger", ")", "optimizer", "=", "nlp", ".", "begin_training", "(", ")", "for", "i", "in", "range", "(", "n_iter", ")", ":", "random", ".", "shuffle", "(", "TRAIN_DATA", ")", "losses", "=", "{", "}", "# batch up the examples using spaCy's minibatch", "batches", "=", "minibatch", "(", "TRAIN_DATA", ",", "size", "=", "compounding", "(", "4.0", ",", "32.0", ",", "1.001", ")", ")", "for", "batch", "in", "batches", ":", "texts", ",", "annotations", "=", "zip", "(", "*", "batch", ")", "nlp", ".", "update", "(", "texts", ",", "annotations", ",", "sgd", "=", "optimizer", ",", "losses", "=", "losses", ")", "print", "(", "\"Losses\"", ",", "losses", ")", "# test the trained model", "test_text", "=", "\"I like blue eggs\"", "doc", "=", "nlp", "(", "test_text", ")", "print", "(", "\"Tags\"", ",", "[", "(", "t", ".", "text", ",", "t", ".", "tag_", ",", "t", ".", "pos_", ")", "for", "t", "in", "doc", "]", ")", "# save model to output directory", "if", "output_dir", "is", "not", "None", ":", "output_dir", "=", "Path", "(", "output_dir", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "nlp", ".", "to_disk", "(", "output_dir", ")", "print", "(", "\"Saved model to\"", ",", "output_dir", ")", "# test the save model", "print", "(", "\"Loading from\"", ",", "output_dir", ")", "nlp2", "=", "spacy", ".", "load", "(", "output_dir", ")", "doc", "=", "nlp2", "(", "test_text", ")", "print", "(", "\"Tags\"", ",", "[", "(", "t", ".", "text", ",", "t", ".", "tag_", ",", "t", ".", "pos_", ")", "for", "t", "in", "doc", "]", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
load_textcat_data
Load data from the IMDB dataset.
examples/training/pretrain_textcat.py
def load_textcat_data(limit=0): """Load data from the IMDB dataset.""" # Partition off part of the train data for evaluation train_data, eval_data = thinc.extra.datasets.imdb() random.shuffle(train_data) train_data = train_data[-limit:] texts, labels = zip(*train_data) eval_texts, eval_labels = zip(*eval_data) cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels] eval_cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in eval_labels] return (texts, cats), (eval_texts, eval_cats)
def load_textcat_data(limit=0): """Load data from the IMDB dataset.""" # Partition off part of the train data for evaluation train_data, eval_data = thinc.extra.datasets.imdb() random.shuffle(train_data) train_data = train_data[-limit:] texts, labels = zip(*train_data) eval_texts, eval_labels = zip(*eval_data) cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels] eval_cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in eval_labels] return (texts, cats), (eval_texts, eval_cats)
[ "Load", "data", "from", "the", "IMDB", "dataset", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/pretrain_textcat.py#L41-L51
[ "def", "load_textcat_data", "(", "limit", "=", "0", ")", ":", "# Partition off part of the train data for evaluation", "train_data", ",", "eval_data", "=", "thinc", ".", "extra", ".", "datasets", ".", "imdb", "(", ")", "random", ".", "shuffle", "(", "train_data", ")", "train_data", "=", "train_data", "[", "-", "limit", ":", "]", "texts", ",", "labels", "=", "zip", "(", "*", "train_data", ")", "eval_texts", ",", "eval_labels", "=", "zip", "(", "*", "eval_data", ")", "cats", "=", "[", "{", "\"POSITIVE\"", ":", "bool", "(", "y", ")", ",", "\"NEGATIVE\"", ":", "not", "bool", "(", "y", ")", "}", "for", "y", "in", "labels", "]", "eval_cats", "=", "[", "{", "\"POSITIVE\"", ":", "bool", "(", "y", ")", ",", "\"NEGATIVE\"", ":", "not", "bool", "(", "y", ")", "}", "for", "y", "in", "eval_labels", "]", "return", "(", "texts", ",", "cats", ")", ",", "(", "eval_texts", ",", "eval_cats", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
init_model
Create a new model from raw data, like word frequencies, Brown clusters and word vectors. If vectors are provided in Word2Vec format, they can be either a .txt or zipped as a .zip or .tar.gz.
spacy/cli/init_model.py
def init_model( lang, output_dir, freqs_loc=None, clusters_loc=None, jsonl_loc=None, vectors_loc=None, prune_vectors=-1, ): """ Create a new model from raw data, like word frequencies, Brown clusters and word vectors. If vectors are provided in Word2Vec format, they can be either a .txt or zipped as a .zip or .tar.gz. """ if jsonl_loc is not None: if freqs_loc is not None or clusters_loc is not None: settings = ["-j"] if freqs_loc: settings.append("-f") if clusters_loc: settings.append("-c") msg.warn( "Incompatible arguments", "The -f and -c arguments are deprecated, and not compatible " "with the -j argument, which should specify the same " "information. Either merge the frequencies and clusters data " "into the JSONL-formatted file (recommended), or use only the " "-f and -c files, without the other lexical attributes.", ) jsonl_loc = ensure_path(jsonl_loc) lex_attrs = srsly.read_jsonl(jsonl_loc) else: clusters_loc = ensure_path(clusters_loc) freqs_loc = ensure_path(freqs_loc) if freqs_loc is not None and not freqs_loc.exists(): msg.fail("Can't find words frequencies file", freqs_loc, exits=1) lex_attrs = read_attrs_from_deprecated(freqs_loc, clusters_loc) with msg.loading("Creating model..."): nlp = create_model(lang, lex_attrs) msg.good("Successfully created model") if vectors_loc is not None: add_vectors(nlp, vectors_loc, prune_vectors) vec_added = len(nlp.vocab.vectors) lex_added = len(nlp.vocab) msg.good( "Sucessfully compiled vocab", "{} entries, {} vectors".format(lex_added, vec_added), ) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) return nlp
def init_model( lang, output_dir, freqs_loc=None, clusters_loc=None, jsonl_loc=None, vectors_loc=None, prune_vectors=-1, ): """ Create a new model from raw data, like word frequencies, Brown clusters and word vectors. If vectors are provided in Word2Vec format, they can be either a .txt or zipped as a .zip or .tar.gz. """ if jsonl_loc is not None: if freqs_loc is not None or clusters_loc is not None: settings = ["-j"] if freqs_loc: settings.append("-f") if clusters_loc: settings.append("-c") msg.warn( "Incompatible arguments", "The -f and -c arguments are deprecated, and not compatible " "with the -j argument, which should specify the same " "information. Either merge the frequencies and clusters data " "into the JSONL-formatted file (recommended), or use only the " "-f and -c files, without the other lexical attributes.", ) jsonl_loc = ensure_path(jsonl_loc) lex_attrs = srsly.read_jsonl(jsonl_loc) else: clusters_loc = ensure_path(clusters_loc) freqs_loc = ensure_path(freqs_loc) if freqs_loc is not None and not freqs_loc.exists(): msg.fail("Can't find words frequencies file", freqs_loc, exits=1) lex_attrs = read_attrs_from_deprecated(freqs_loc, clusters_loc) with msg.loading("Creating model..."): nlp = create_model(lang, lex_attrs) msg.good("Successfully created model") if vectors_loc is not None: add_vectors(nlp, vectors_loc, prune_vectors) vec_added = len(nlp.vocab.vectors) lex_added = len(nlp.vocab) msg.good( "Sucessfully compiled vocab", "{} entries, {} vectors".format(lex_added, vec_added), ) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) return nlp
[ "Create", "a", "new", "model", "from", "raw", "data", "like", "word", "frequencies", "Brown", "clusters", "and", "word", "vectors", ".", "If", "vectors", "are", "provided", "in", "Word2Vec", "format", "they", "can", "be", "either", "a", ".", "txt", "or", "zipped", "as", "a", ".", "zip", "or", ".", "tar", ".", "gz", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/init_model.py#L39-L91
[ "def", "init_model", "(", "lang", ",", "output_dir", ",", "freqs_loc", "=", "None", ",", "clusters_loc", "=", "None", ",", "jsonl_loc", "=", "None", ",", "vectors_loc", "=", "None", ",", "prune_vectors", "=", "-", "1", ",", ")", ":", "if", "jsonl_loc", "is", "not", "None", ":", "if", "freqs_loc", "is", "not", "None", "or", "clusters_loc", "is", "not", "None", ":", "settings", "=", "[", "\"-j\"", "]", "if", "freqs_loc", ":", "settings", ".", "append", "(", "\"-f\"", ")", "if", "clusters_loc", ":", "settings", ".", "append", "(", "\"-c\"", ")", "msg", ".", "warn", "(", "\"Incompatible arguments\"", ",", "\"The -f and -c arguments are deprecated, and not compatible \"", "\"with the -j argument, which should specify the same \"", "\"information. Either merge the frequencies and clusters data \"", "\"into the JSONL-formatted file (recommended), or use only the \"", "\"-f and -c files, without the other lexical attributes.\"", ",", ")", "jsonl_loc", "=", "ensure_path", "(", "jsonl_loc", ")", "lex_attrs", "=", "srsly", ".", "read_jsonl", "(", "jsonl_loc", ")", "else", ":", "clusters_loc", "=", "ensure_path", "(", "clusters_loc", ")", "freqs_loc", "=", "ensure_path", "(", "freqs_loc", ")", "if", "freqs_loc", "is", "not", "None", "and", "not", "freqs_loc", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Can't find words frequencies file\"", ",", "freqs_loc", ",", "exits", "=", "1", ")", "lex_attrs", "=", "read_attrs_from_deprecated", "(", "freqs_loc", ",", "clusters_loc", ")", "with", "msg", ".", "loading", "(", "\"Creating model...\"", ")", ":", "nlp", "=", "create_model", "(", "lang", ",", "lex_attrs", ")", "msg", ".", "good", "(", "\"Successfully created model\"", ")", "if", "vectors_loc", "is", "not", "None", ":", "add_vectors", "(", "nlp", ",", "vectors_loc", ",", "prune_vectors", ")", "vec_added", "=", "len", "(", "nlp", ".", "vocab", ".", "vectors", ")", "lex_added", "=", "len", "(", "nlp", ".", "vocab", ")", "msg", ".", "good", "(", "\"Sucessfully compiled vocab\"", ",", "\"{} entries, {} vectors\"", ".", "format", "(", "lex_added", ",", "vec_added", ")", ",", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "nlp", ".", "to_disk", "(", "output_dir", ")", "return", "nlp" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
open_file
Handle .gz, .tar.gz or unzipped files
spacy/cli/init_model.py
def open_file(loc): """Handle .gz, .tar.gz or unzipped files""" loc = ensure_path(loc) if tarfile.is_tarfile(str(loc)): return tarfile.open(str(loc), "r:gz") elif loc.parts[-1].endswith("gz"): return (line.decode("utf8") for line in gzip.open(str(loc), "r")) elif loc.parts[-1].endswith("zip"): zip_file = zipfile.ZipFile(str(loc)) names = zip_file.namelist() file_ = zip_file.open(names[0]) return (line.decode("utf8") for line in file_) else: return loc.open("r", encoding="utf8")
def open_file(loc): """Handle .gz, .tar.gz or unzipped files""" loc = ensure_path(loc) if tarfile.is_tarfile(str(loc)): return tarfile.open(str(loc), "r:gz") elif loc.parts[-1].endswith("gz"): return (line.decode("utf8") for line in gzip.open(str(loc), "r")) elif loc.parts[-1].endswith("zip"): zip_file = zipfile.ZipFile(str(loc)) names = zip_file.namelist() file_ = zip_file.open(names[0]) return (line.decode("utf8") for line in file_) else: return loc.open("r", encoding="utf8")
[ "Handle", ".", "gz", ".", "tar", ".", "gz", "or", "unzipped", "files" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/init_model.py#L94-L107
[ "def", "open_file", "(", "loc", ")", ":", "loc", "=", "ensure_path", "(", "loc", ")", "if", "tarfile", ".", "is_tarfile", "(", "str", "(", "loc", ")", ")", ":", "return", "tarfile", ".", "open", "(", "str", "(", "loc", ")", ",", "\"r:gz\"", ")", "elif", "loc", ".", "parts", "[", "-", "1", "]", ".", "endswith", "(", "\"gz\"", ")", ":", "return", "(", "line", ".", "decode", "(", "\"utf8\"", ")", "for", "line", "in", "gzip", ".", "open", "(", "str", "(", "loc", ")", ",", "\"r\"", ")", ")", "elif", "loc", ".", "parts", "[", "-", "1", "]", ".", "endswith", "(", "\"zip\"", ")", ":", "zip_file", "=", "zipfile", ".", "ZipFile", "(", "str", "(", "loc", ")", ")", "names", "=", "zip_file", ".", "namelist", "(", ")", "file_", "=", "zip_file", ".", "open", "(", "names", "[", "0", "]", ")", "return", "(", "line", ".", "decode", "(", "\"utf8\"", ")", "for", "line", "in", "file_", ")", "else", ":", "return", "loc", ".", "open", "(", "\"r\"", ",", "encoding", "=", "\"utf8\"", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
main
Load the model, set up the pipeline and train the entity recognizer.
examples/training/train_ner.py
def main(model=None, output_dir=None, n_iter=100): """Load the model, set up the pipeline and train the entity recognizer.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner, last=True) # otherwise, get it so we can add labels else: ner = nlp.get_pipe("ner") # add labels for _, annotations in TRAIN_DATA: for ent in annotations.get("entities"): ner.add_label(ent[2]) # get names of other pipes to disable them during training other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"] with nlp.disable_pipes(*other_pipes): # only train NER # reset and initialize the weights randomly – but only if we're # training a new model if model is None: nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update( texts, # batch of texts annotations, # batch of annotations drop=0.5, # dropout - make it harder to memorise data losses=losses, ) print("Losses", losses) # test the trained model for text, _ in TRAIN_DATA: doc = nlp(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) for text, _ in TRAIN_DATA: doc = nlp2(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
def main(model=None, output_dir=None, n_iter=100): """Load the model, set up the pipeline and train the entity recognizer.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner, last=True) # otherwise, get it so we can add labels else: ner = nlp.get_pipe("ner") # add labels for _, annotations in TRAIN_DATA: for ent in annotations.get("entities"): ner.add_label(ent[2]) # get names of other pipes to disable them during training other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"] with nlp.disable_pipes(*other_pipes): # only train NER # reset and initialize the weights randomly – but only if we're # training a new model if model is None: nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update( texts, # batch of texts annotations, # batch of annotations drop=0.5, # dropout - make it harder to memorise data losses=losses, ) print("Losses", losses) # test the trained model for text, _ in TRAIN_DATA: doc = nlp(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) for text, _ in TRAIN_DATA: doc = nlp2(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
[ "Load", "the", "model", "set", "up", "the", "pipeline", "and", "train", "the", "entity", "recognizer", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_ner.py#L34-L99
[ "def", "main", "(", "model", "=", "None", ",", "output_dir", "=", "None", ",", "n_iter", "=", "100", ")", ":", "if", "model", "is", "not", "None", ":", "nlp", "=", "spacy", ".", "load", "(", "model", ")", "# load existing spaCy model", "print", "(", "\"Loaded model '%s'\"", "%", "model", ")", "else", ":", "nlp", "=", "spacy", ".", "blank", "(", "\"en\"", ")", "# create blank Language class", "print", "(", "\"Created blank 'en' model\"", ")", "# create the built-in pipeline components and add them to the pipeline", "# nlp.create_pipe works for built-ins that are registered with spaCy", "if", "\"ner\"", "not", "in", "nlp", ".", "pipe_names", ":", "ner", "=", "nlp", ".", "create_pipe", "(", "\"ner\"", ")", "nlp", ".", "add_pipe", "(", "ner", ",", "last", "=", "True", ")", "# otherwise, get it so we can add labels", "else", ":", "ner", "=", "nlp", ".", "get_pipe", "(", "\"ner\"", ")", "# add labels", "for", "_", ",", "annotations", "in", "TRAIN_DATA", ":", "for", "ent", "in", "annotations", ".", "get", "(", "\"entities\"", ")", ":", "ner", ".", "add_label", "(", "ent", "[", "2", "]", ")", "# get names of other pipes to disable them during training", "other_pipes", "=", "[", "pipe", "for", "pipe", "in", "nlp", ".", "pipe_names", "if", "pipe", "!=", "\"ner\"", "]", "with", "nlp", ".", "disable_pipes", "(", "*", "other_pipes", ")", ":", "# only train NER", "# reset and initialize the weights randomly – but only if we're", "# training a new model", "if", "model", "is", "None", ":", "nlp", ".", "begin_training", "(", ")", "for", "itn", "in", "range", "(", "n_iter", ")", ":", "random", ".", "shuffle", "(", "TRAIN_DATA", ")", "losses", "=", "{", "}", "# batch up the examples using spaCy's minibatch", "batches", "=", "minibatch", "(", "TRAIN_DATA", ",", "size", "=", "compounding", "(", "4.0", ",", "32.0", ",", "1.001", ")", ")", "for", "batch", "in", "batches", ":", "texts", ",", "annotations", "=", "zip", "(", "*", "batch", ")", "nlp", ".", "update", "(", "texts", ",", "# batch of texts", "annotations", ",", "# batch of annotations", "drop", "=", "0.5", ",", "# dropout - make it harder to memorise data", "losses", "=", "losses", ",", ")", "print", "(", "\"Losses\"", ",", "losses", ")", "# test the trained model", "for", "text", ",", "_", "in", "TRAIN_DATA", ":", "doc", "=", "nlp", "(", "text", ")", "print", "(", "\"Entities\"", ",", "[", "(", "ent", ".", "text", ",", "ent", ".", "label_", ")", "for", "ent", "in", "doc", ".", "ents", "]", ")", "print", "(", "\"Tokens\"", ",", "[", "(", "t", ".", "text", ",", "t", ".", "ent_type_", ",", "t", ".", "ent_iob", ")", "for", "t", "in", "doc", "]", ")", "# save model to output directory", "if", "output_dir", "is", "not", "None", ":", "output_dir", "=", "Path", "(", "output_dir", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "nlp", ".", "to_disk", "(", "output_dir", ")", "print", "(", "\"Saved model to\"", ",", "output_dir", ")", "# test the saved model", "print", "(", "\"Loading from\"", ",", "output_dir", ")", "nlp2", "=", "spacy", ".", "load", "(", "output_dir", ")", "for", "text", ",", "_", "in", "TRAIN_DATA", ":", "doc", "=", "nlp2", "(", "text", ")", "print", "(", "\"Entities\"", ",", "[", "(", "ent", ".", "text", ",", "ent", ".", "label_", ")", "for", "ent", "in", "doc", ".", "ents", "]", ")", "print", "(", "\"Tokens\"", ",", "[", "(", "t", ".", "text", ",", "t", ".", "ent_type_", ",", "t", ".", "ent_iob", ")", "for", "t", "in", "doc", "]", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
pretrain
Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components, using an approximate language-modelling objective. Specifically, we load pre-trained vectors, and train a component like a CNN, BiLSTM, etc to predict vectors which match the pre-trained ones. The weights are saved to a directory after each epoch. You can then pass a path to one of these pre-trained weights files to the 'spacy train' command. This technique may be especially helpful if you have little labelled data. However, it's still quite experimental, so your mileage may vary. To load the weights back in during 'spacy train', you need to ensure all settings are the same between pretraining and training. The API and errors around this need some improvement.
spacy/cli/pretrain.py
def pretrain( texts_loc, vectors_model, output_dir, width=96, depth=4, embed_rows=2000, loss_func="cosine", use_vectors=False, dropout=0.2, n_iter=1000, batch_size=3000, max_length=500, min_length=5, seed=0, n_save_every=None, ): """ Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components, using an approximate language-modelling objective. Specifically, we load pre-trained vectors, and train a component like a CNN, BiLSTM, etc to predict vectors which match the pre-trained ones. The weights are saved to a directory after each epoch. You can then pass a path to one of these pre-trained weights files to the 'spacy train' command. This technique may be especially helpful if you have little labelled data. However, it's still quite experimental, so your mileage may vary. To load the weights back in during 'spacy train', you need to ensure all settings are the same between pretraining and training. The API and errors around this need some improvement. """ config = dict(locals()) msg = Printer() util.fix_random_seed(seed) has_gpu = prefer_gpu() msg.info("Using GPU" if has_gpu else "Not using GPU") output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() msg.good("Created output directory") srsly.write_json(output_dir / "config.json", config) msg.good("Saved settings to config.json") # Load texts from file or stdin if texts_loc != "-": # reading from a file texts_loc = Path(texts_loc) if not texts_loc.exists(): msg.fail("Input text file doesn't exist", texts_loc, exits=1) with msg.loading("Loading input texts..."): texts = list(srsly.read_jsonl(texts_loc)) msg.good("Loaded input texts") random.shuffle(texts) else: # reading from stdin msg.text("Reading input text from stdin...") texts = srsly.read_jsonl("-") with msg.loading("Loading model '{}'...".format(vectors_model)): nlp = util.load_model(vectors_model) msg.good("Loaded model '{}'".format(vectors_model)) pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name model = create_pretraining_model( nlp, Tok2Vec( width, embed_rows, conv_depth=depth, pretrained_vectors=pretrained_vectors, bilstm_depth=0, # Requires PyTorch. Experimental. cnn_maxout_pieces=3, # You can try setting this higher subword_features=True, # Set to False for Chinese etc ), ) optimizer = create_default_optimizer(model.ops) tracker = ProgressTracker(frequency=10000) msg.divider("Pre-training tok2vec layer") row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")} msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings) def _save_model(epoch, is_temp=False): is_temp_str = ".temp" if is_temp else "" with model.use_params(optimizer.averages): with (output_dir / ("model%d%s.bin" % (epoch, is_temp_str))).open( "wb" ) as file_: file_.write(model.tok2vec.to_bytes()) log = { "nr_word": tracker.nr_word, "loss": tracker.loss, "epoch_loss": tracker.epoch_loss, "epoch": epoch, } with (output_dir / "log.jsonl").open("a") as file_: file_.write(srsly.json_dumps(log) + "\n") for epoch in range(n_iter): for batch_id, batch in enumerate( util.minibatch_by_words(((text, None) for text in texts), size=batch_size) ): docs = make_docs( nlp, [text for (text, _) in batch], max_length=max_length, min_length=min_length, ) loss = make_update( model, docs, optimizer, objective=loss_func, drop=dropout ) progress = tracker.update(epoch, loss, docs) if progress: msg.row(progress, **row_settings) if texts_loc == "-" and tracker.words_per_epoch[epoch] >= 10 ** 7: break if n_save_every and (batch_id % n_save_every == 0): _save_model(epoch, is_temp=True) _save_model(epoch) tracker.epoch_loss = 0.0 if texts_loc != "-": # Reshuffle the texts if texts were loaded from a file random.shuffle(texts)
def pretrain( texts_loc, vectors_model, output_dir, width=96, depth=4, embed_rows=2000, loss_func="cosine", use_vectors=False, dropout=0.2, n_iter=1000, batch_size=3000, max_length=500, min_length=5, seed=0, n_save_every=None, ): """ Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components, using an approximate language-modelling objective. Specifically, we load pre-trained vectors, and train a component like a CNN, BiLSTM, etc to predict vectors which match the pre-trained ones. The weights are saved to a directory after each epoch. You can then pass a path to one of these pre-trained weights files to the 'spacy train' command. This technique may be especially helpful if you have little labelled data. However, it's still quite experimental, so your mileage may vary. To load the weights back in during 'spacy train', you need to ensure all settings are the same between pretraining and training. The API and errors around this need some improvement. """ config = dict(locals()) msg = Printer() util.fix_random_seed(seed) has_gpu = prefer_gpu() msg.info("Using GPU" if has_gpu else "Not using GPU") output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() msg.good("Created output directory") srsly.write_json(output_dir / "config.json", config) msg.good("Saved settings to config.json") # Load texts from file or stdin if texts_loc != "-": # reading from a file texts_loc = Path(texts_loc) if not texts_loc.exists(): msg.fail("Input text file doesn't exist", texts_loc, exits=1) with msg.loading("Loading input texts..."): texts = list(srsly.read_jsonl(texts_loc)) msg.good("Loaded input texts") random.shuffle(texts) else: # reading from stdin msg.text("Reading input text from stdin...") texts = srsly.read_jsonl("-") with msg.loading("Loading model '{}'...".format(vectors_model)): nlp = util.load_model(vectors_model) msg.good("Loaded model '{}'".format(vectors_model)) pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name model = create_pretraining_model( nlp, Tok2Vec( width, embed_rows, conv_depth=depth, pretrained_vectors=pretrained_vectors, bilstm_depth=0, # Requires PyTorch. Experimental. cnn_maxout_pieces=3, # You can try setting this higher subword_features=True, # Set to False for Chinese etc ), ) optimizer = create_default_optimizer(model.ops) tracker = ProgressTracker(frequency=10000) msg.divider("Pre-training tok2vec layer") row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")} msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings) def _save_model(epoch, is_temp=False): is_temp_str = ".temp" if is_temp else "" with model.use_params(optimizer.averages): with (output_dir / ("model%d%s.bin" % (epoch, is_temp_str))).open( "wb" ) as file_: file_.write(model.tok2vec.to_bytes()) log = { "nr_word": tracker.nr_word, "loss": tracker.loss, "epoch_loss": tracker.epoch_loss, "epoch": epoch, } with (output_dir / "log.jsonl").open("a") as file_: file_.write(srsly.json_dumps(log) + "\n") for epoch in range(n_iter): for batch_id, batch in enumerate( util.minibatch_by_words(((text, None) for text in texts), size=batch_size) ): docs = make_docs( nlp, [text for (text, _) in batch], max_length=max_length, min_length=min_length, ) loss = make_update( model, docs, optimizer, objective=loss_func, drop=dropout ) progress = tracker.update(epoch, loss, docs) if progress: msg.row(progress, **row_settings) if texts_loc == "-" and tracker.words_per_epoch[epoch] >= 10 ** 7: break if n_save_every and (batch_id % n_save_every == 0): _save_model(epoch, is_temp=True) _save_model(epoch) tracker.epoch_loss = 0.0 if texts_loc != "-": # Reshuffle the texts if texts were loaded from a file random.shuffle(texts)
[ "Pre", "-", "train", "the", "token", "-", "to", "-", "vector", "(", "tok2vec", ")", "layer", "of", "pipeline", "components", "using", "an", "approximate", "language", "-", "modelling", "objective", ".", "Specifically", "we", "load", "pre", "-", "trained", "vectors", "and", "train", "a", "component", "like", "a", "CNN", "BiLSTM", "etc", "to", "predict", "vectors", "which", "match", "the", "pre", "-", "trained", "ones", ".", "The", "weights", "are", "saved", "to", "a", "directory", "after", "each", "epoch", ".", "You", "can", "then", "pass", "a", "path", "to", "one", "of", "these", "pre", "-", "trained", "weights", "files", "to", "the", "spacy", "train", "command", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L40-L161
[ "def", "pretrain", "(", "texts_loc", ",", "vectors_model", ",", "output_dir", ",", "width", "=", "96", ",", "depth", "=", "4", ",", "embed_rows", "=", "2000", ",", "loss_func", "=", "\"cosine\"", ",", "use_vectors", "=", "False", ",", "dropout", "=", "0.2", ",", "n_iter", "=", "1000", ",", "batch_size", "=", "3000", ",", "max_length", "=", "500", ",", "min_length", "=", "5", ",", "seed", "=", "0", ",", "n_save_every", "=", "None", ",", ")", ":", "config", "=", "dict", "(", "locals", "(", ")", ")", "msg", "=", "Printer", "(", ")", "util", ".", "fix_random_seed", "(", "seed", ")", "has_gpu", "=", "prefer_gpu", "(", ")", "msg", ".", "info", "(", "\"Using GPU\"", "if", "has_gpu", "else", "\"Not using GPU\"", ")", "output_dir", "=", "Path", "(", "output_dir", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "msg", ".", "good", "(", "\"Created output directory\"", ")", "srsly", ".", "write_json", "(", "output_dir", "/", "\"config.json\"", ",", "config", ")", "msg", ".", "good", "(", "\"Saved settings to config.json\"", ")", "# Load texts from file or stdin", "if", "texts_loc", "!=", "\"-\"", ":", "# reading from a file", "texts_loc", "=", "Path", "(", "texts_loc", ")", "if", "not", "texts_loc", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Input text file doesn't exist\"", ",", "texts_loc", ",", "exits", "=", "1", ")", "with", "msg", ".", "loading", "(", "\"Loading input texts...\"", ")", ":", "texts", "=", "list", "(", "srsly", ".", "read_jsonl", "(", "texts_loc", ")", ")", "msg", ".", "good", "(", "\"Loaded input texts\"", ")", "random", ".", "shuffle", "(", "texts", ")", "else", ":", "# reading from stdin", "msg", ".", "text", "(", "\"Reading input text from stdin...\"", ")", "texts", "=", "srsly", ".", "read_jsonl", "(", "\"-\"", ")", "with", "msg", ".", "loading", "(", "\"Loading model '{}'...\"", ".", "format", "(", "vectors_model", ")", ")", ":", "nlp", "=", "util", ".", "load_model", "(", "vectors_model", ")", "msg", ".", "good", "(", "\"Loaded model '{}'\"", ".", "format", "(", "vectors_model", ")", ")", "pretrained_vectors", "=", "None", "if", "not", "use_vectors", "else", "nlp", ".", "vocab", ".", "vectors", ".", "name", "model", "=", "create_pretraining_model", "(", "nlp", ",", "Tok2Vec", "(", "width", ",", "embed_rows", ",", "conv_depth", "=", "depth", ",", "pretrained_vectors", "=", "pretrained_vectors", ",", "bilstm_depth", "=", "0", ",", "# Requires PyTorch. Experimental.", "cnn_maxout_pieces", "=", "3", ",", "# You can try setting this higher", "subword_features", "=", "True", ",", "# Set to False for Chinese etc", ")", ",", ")", "optimizer", "=", "create_default_optimizer", "(", "model", ".", "ops", ")", "tracker", "=", "ProgressTracker", "(", "frequency", "=", "10000", ")", "msg", ".", "divider", "(", "\"Pre-training tok2vec layer\"", ")", "row_settings", "=", "{", "\"widths\"", ":", "(", "3", ",", "10", ",", "10", ",", "6", ",", "4", ")", ",", "\"aligns\"", ":", "(", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ")", "}", "msg", ".", "row", "(", "(", "\"#\"", ",", "\"# Words\"", ",", "\"Total Loss\"", ",", "\"Loss\"", ",", "\"w/s\"", ")", ",", "*", "*", "row_settings", ")", "def", "_save_model", "(", "epoch", ",", "is_temp", "=", "False", ")", ":", "is_temp_str", "=", "\".temp\"", "if", "is_temp", "else", "\"\"", "with", "model", ".", "use_params", "(", "optimizer", ".", "averages", ")", ":", "with", "(", "output_dir", "/", "(", "\"model%d%s.bin\"", "%", "(", "epoch", ",", "is_temp_str", ")", ")", ")", ".", "open", "(", "\"wb\"", ")", "as", "file_", ":", "file_", ".", "write", "(", "model", ".", "tok2vec", ".", "to_bytes", "(", ")", ")", "log", "=", "{", "\"nr_word\"", ":", "tracker", ".", "nr_word", ",", "\"loss\"", ":", "tracker", ".", "loss", ",", "\"epoch_loss\"", ":", "tracker", ".", "epoch_loss", ",", "\"epoch\"", ":", "epoch", ",", "}", "with", "(", "output_dir", "/", "\"log.jsonl\"", ")", ".", "open", "(", "\"a\"", ")", "as", "file_", ":", "file_", ".", "write", "(", "srsly", ".", "json_dumps", "(", "log", ")", "+", "\"\\n\"", ")", "for", "epoch", "in", "range", "(", "n_iter", ")", ":", "for", "batch_id", ",", "batch", "in", "enumerate", "(", "util", ".", "minibatch_by_words", "(", "(", "(", "text", ",", "None", ")", "for", "text", "in", "texts", ")", ",", "size", "=", "batch_size", ")", ")", ":", "docs", "=", "make_docs", "(", "nlp", ",", "[", "text", "for", "(", "text", ",", "_", ")", "in", "batch", "]", ",", "max_length", "=", "max_length", ",", "min_length", "=", "min_length", ",", ")", "loss", "=", "make_update", "(", "model", ",", "docs", ",", "optimizer", ",", "objective", "=", "loss_func", ",", "drop", "=", "dropout", ")", "progress", "=", "tracker", ".", "update", "(", "epoch", ",", "loss", ",", "docs", ")", "if", "progress", ":", "msg", ".", "row", "(", "progress", ",", "*", "*", "row_settings", ")", "if", "texts_loc", "==", "\"-\"", "and", "tracker", ".", "words_per_epoch", "[", "epoch", "]", ">=", "10", "**", "7", ":", "break", "if", "n_save_every", "and", "(", "batch_id", "%", "n_save_every", "==", "0", ")", ":", "_save_model", "(", "epoch", ",", "is_temp", "=", "True", ")", "_save_model", "(", "epoch", ")", "tracker", ".", "epoch_loss", "=", "0.0", "if", "texts_loc", "!=", "\"-\"", ":", "# Reshuffle the texts if texts were loaded from a file", "random", ".", "shuffle", "(", "texts", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
make_update
Perform an update over a single batch of documents. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. optimizer (callable): An optimizer. RETURNS loss: A float for the loss.
spacy/cli/pretrain.py
def make_update(model, docs, optimizer, drop=0.0, objective="L2"): """Perform an update over a single batch of documents. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. optimizer (callable): An optimizer. RETURNS loss: A float for the loss. """ predictions, backprop = model.begin_update(docs, drop=drop) loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective) backprop(gradients, sgd=optimizer) # Don't want to return a cupy object here # The gradients are modified in-place by the BERT MLM, # so we get an accurate loss return float(loss)
def make_update(model, docs, optimizer, drop=0.0, objective="L2"): """Perform an update over a single batch of documents. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. optimizer (callable): An optimizer. RETURNS loss: A float for the loss. """ predictions, backprop = model.begin_update(docs, drop=drop) loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective) backprop(gradients, sgd=optimizer) # Don't want to return a cupy object here # The gradients are modified in-place by the BERT MLM, # so we get an accurate loss return float(loss)
[ "Perform", "an", "update", "over", "a", "single", "batch", "of", "documents", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L164-L178
[ "def", "make_update", "(", "model", ",", "docs", ",", "optimizer", ",", "drop", "=", "0.0", ",", "objective", "=", "\"L2\"", ")", ":", "predictions", ",", "backprop", "=", "model", ".", "begin_update", "(", "docs", ",", "drop", "=", "drop", ")", "loss", ",", "gradients", "=", "get_vectors_loss", "(", "model", ".", "ops", ",", "docs", ",", "predictions", ",", "objective", ")", "backprop", "(", "gradients", ",", "sgd", "=", "optimizer", ")", "# Don't want to return a cupy object here", "# The gradients are modified in-place by the BERT MLM,", "# so we get an accurate loss", "return", "float", "(", "loss", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_vectors_loss
Compute a mean-squared error loss between the documents' vectors and the prediction. Note that this is ripe for customization! We could compute the vectors in some other word, e.g. with an LSTM language model, or use some other type of objective.
spacy/cli/pretrain.py
def get_vectors_loss(ops, docs, prediction, objective="L2"): """Compute a mean-squared error loss between the documents' vectors and the prediction. Note that this is ripe for customization! We could compute the vectors in some other word, e.g. with an LSTM language model, or use some other type of objective. """ # The simplest way to implement this would be to vstack the # token.vector values, but that's a bit inefficient, especially on GPU. # Instead we fetch the index into the vectors table for each of our tokens, # and look them up all at once. This prevents data copying. ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs]) target = docs[0].vocab.vectors.data[ids] if objective == "L2": d_target = prediction - target loss = (d_target ** 2).sum() elif objective == "cosine": loss, d_target = get_cossim_loss(prediction, target) return loss, d_target
def get_vectors_loss(ops, docs, prediction, objective="L2"): """Compute a mean-squared error loss between the documents' vectors and the prediction. Note that this is ripe for customization! We could compute the vectors in some other word, e.g. with an LSTM language model, or use some other type of objective. """ # The simplest way to implement this would be to vstack the # token.vector values, but that's a bit inefficient, especially on GPU. # Instead we fetch the index into the vectors table for each of our tokens, # and look them up all at once. This prevents data copying. ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs]) target = docs[0].vocab.vectors.data[ids] if objective == "L2": d_target = prediction - target loss = (d_target ** 2).sum() elif objective == "cosine": loss, d_target = get_cossim_loss(prediction, target) return loss, d_target
[ "Compute", "a", "mean", "-", "squared", "error", "loss", "between", "the", "documents", "vectors", "and", "the", "prediction", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L199-L218
[ "def", "get_vectors_loss", "(", "ops", ",", "docs", ",", "prediction", ",", "objective", "=", "\"L2\"", ")", ":", "# The simplest way to implement this would be to vstack the", "# token.vector values, but that's a bit inefficient, especially on GPU.", "# Instead we fetch the index into the vectors table for each of our tokens,", "# and look them up all at once. This prevents data copying.", "ids", "=", "ops", ".", "flatten", "(", "[", "doc", ".", "to_array", "(", "ID", ")", ".", "ravel", "(", ")", "for", "doc", "in", "docs", "]", ")", "target", "=", "docs", "[", "0", "]", ".", "vocab", ".", "vectors", ".", "data", "[", "ids", "]", "if", "objective", "==", "\"L2\"", ":", "d_target", "=", "prediction", "-", "target", "loss", "=", "(", "d_target", "**", "2", ")", ".", "sum", "(", ")", "elif", "objective", "==", "\"cosine\"", ":", "loss", ",", "d_target", "=", "get_cossim_loss", "(", "prediction", ",", "target", ")", "return", "loss", ",", "d_target" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
create_pretraining_model
Define a network for the pretraining. We simply add an output layer onto the tok2vec input model. The tok2vec input model needs to be a model that takes a batch of Doc objects (as a list), and returns a list of arrays. Each array in the output needs to have one row per token in the doc.
spacy/cli/pretrain.py
def create_pretraining_model(nlp, tok2vec): """Define a network for the pretraining. We simply add an output layer onto the tok2vec input model. The tok2vec input model needs to be a model that takes a batch of Doc objects (as a list), and returns a list of arrays. Each array in the output needs to have one row per token in the doc. """ output_size = nlp.vocab.vectors.data.shape[1] output_layer = chain( LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0) ) # This is annoying, but the parser etc have the flatten step after # the tok2vec. To load the weights in cleanly, we need to match # the shape of the models' components exactly. So what we cann # "tok2vec" has to be the same set of processes as what the components do. tok2vec = chain(tok2vec, flatten) model = chain(tok2vec, output_layer) model = masked_language_model(nlp.vocab, model) model.tok2vec = tok2vec model.output_layer = output_layer model.begin_training([nlp.make_doc("Give it a doc to infer shapes")]) return model
def create_pretraining_model(nlp, tok2vec): """Define a network for the pretraining. We simply add an output layer onto the tok2vec input model. The tok2vec input model needs to be a model that takes a batch of Doc objects (as a list), and returns a list of arrays. Each array in the output needs to have one row per token in the doc. """ output_size = nlp.vocab.vectors.data.shape[1] output_layer = chain( LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0) ) # This is annoying, but the parser etc have the flatten step after # the tok2vec. To load the weights in cleanly, we need to match # the shape of the models' components exactly. So what we cann # "tok2vec" has to be the same set of processes as what the components do. tok2vec = chain(tok2vec, flatten) model = chain(tok2vec, output_layer) model = masked_language_model(nlp.vocab, model) model.tok2vec = tok2vec model.output_layer = output_layer model.begin_training([nlp.make_doc("Give it a doc to infer shapes")]) return model
[ "Define", "a", "network", "for", "the", "pretraining", ".", "We", "simply", "add", "an", "output", "layer", "onto", "the", "tok2vec", "input", "model", ".", "The", "tok2vec", "input", "model", "needs", "to", "be", "a", "model", "that", "takes", "a", "batch", "of", "Doc", "objects", "(", "as", "a", "list", ")", "and", "returns", "a", "list", "of", "arrays", ".", "Each", "array", "in", "the", "output", "needs", "to", "have", "one", "row", "per", "token", "in", "the", "doc", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L236-L256
[ "def", "create_pretraining_model", "(", "nlp", ",", "tok2vec", ")", ":", "output_size", "=", "nlp", ".", "vocab", ".", "vectors", ".", "data", ".", "shape", "[", "1", "]", "output_layer", "=", "chain", "(", "LN", "(", "Maxout", "(", "300", ",", "pieces", "=", "3", ")", ")", ",", "Affine", "(", "output_size", ",", "drop_factor", "=", "0.0", ")", ")", "# This is annoying, but the parser etc have the flatten step after", "# the tok2vec. To load the weights in cleanly, we need to match", "# the shape of the models' components exactly. So what we cann", "# \"tok2vec\" has to be the same set of processes as what the components do.", "tok2vec", "=", "chain", "(", "tok2vec", ",", "flatten", ")", "model", "=", "chain", "(", "tok2vec", ",", "output_layer", ")", "model", "=", "masked_language_model", "(", "nlp", ".", "vocab", ",", "model", ")", "model", ".", "tok2vec", "=", "tok2vec", "model", ".", "output_layer", "=", "output_layer", "model", ".", "begin_training", "(", "[", "nlp", ".", "make_doc", "(", "\"Give it a doc to infer shapes\"", ")", "]", ")", "return", "model" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
_smart_round
Round large numbers as integers, smaller numbers as decimals.
spacy/cli/pretrain.py
def _smart_round(figure, width=10, max_decimal=4): """Round large numbers as integers, smaller numbers as decimals.""" n_digits = len(str(int(figure))) n_decimal = width - (n_digits + 1) if n_decimal <= 1: return str(int(figure)) else: n_decimal = min(n_decimal, max_decimal) format_str = "%." + str(n_decimal) + "f" return format_str % figure
def _smart_round(figure, width=10, max_decimal=4): """Round large numbers as integers, smaller numbers as decimals.""" n_digits = len(str(int(figure))) n_decimal = width - (n_digits + 1) if n_decimal <= 1: return str(int(figure)) else: n_decimal = min(n_decimal, max_decimal) format_str = "%." + str(n_decimal) + "f" return format_str % figure
[ "Round", "large", "numbers", "as", "integers", "smaller", "numbers", "as", "decimals", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/pretrain.py#L295-L304
[ "def", "_smart_round", "(", "figure", ",", "width", "=", "10", ",", "max_decimal", "=", "4", ")", ":", "n_digits", "=", "len", "(", "str", "(", "int", "(", "figure", ")", ")", ")", "n_decimal", "=", "width", "-", "(", "n_digits", "+", "1", ")", "if", "n_decimal", "<=", "1", ":", "return", "str", "(", "int", "(", "figure", ")", ")", "else", ":", "n_decimal", "=", "min", "(", "n_decimal", ",", "max_decimal", ")", "format_str", "=", "\"%.\"", "+", "str", "(", "n_decimal", ")", "+", "\"f\"", "return", "format_str", "%", "figure" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
noun_chunks
Detect base noun phrases. Works on both Doc and Span.
spacy/lang/el/syntax_iterators.py
def noun_chunks(obj): """ Detect base noun phrases. Works on both Doc and Span. """ # It follows the logic of the noun chunks finder of English language, # adjusted to some Greek language special characteristics. # obj tag corrects some DEP tagger mistakes. # Further improvement of the models will eliminate the need for this tag. labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"] doc = obj.doc # Ensure works on both Doc and Span. np_deps = [doc.vocab.strings.add(label) for label in labels] conj = doc.vocab.strings.add("conj") nmod = doc.vocab.strings.add("nmod") np_label = doc.vocab.strings.add("NP") seen = set() for i, word in enumerate(obj): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.i in seen: continue if word.dep in np_deps: if any(w.i in seen for w in word.subtree): continue flag = False if word.pos == NOUN: # check for patterns such as γραμμή παραγωγής for potential_nmod in word.rights: if potential_nmod.dep == nmod: seen.update( j for j in range(word.left_edge.i, potential_nmod.i + 1) ) yield word.left_edge.i, potential_nmod.i + 1, np_label flag = True break if flag is False: seen.update(j for j in range(word.left_edge.i, word.i + 1)) yield word.left_edge.i, word.i + 1, np_label elif word.dep == conj: # covers the case: έχει όμορφα και έξυπνα παιδιά head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: if any(w.i in seen for w in word.subtree): continue seen.update(j for j in range(word.left_edge.i, word.i + 1)) yield word.left_edge.i, word.i + 1, np_label
def noun_chunks(obj): """ Detect base noun phrases. Works on both Doc and Span. """ # It follows the logic of the noun chunks finder of English language, # adjusted to some Greek language special characteristics. # obj tag corrects some DEP tagger mistakes. # Further improvement of the models will eliminate the need for this tag. labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"] doc = obj.doc # Ensure works on both Doc and Span. np_deps = [doc.vocab.strings.add(label) for label in labels] conj = doc.vocab.strings.add("conj") nmod = doc.vocab.strings.add("nmod") np_label = doc.vocab.strings.add("NP") seen = set() for i, word in enumerate(obj): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.i in seen: continue if word.dep in np_deps: if any(w.i in seen for w in word.subtree): continue flag = False if word.pos == NOUN: # check for patterns such as γραμμή παραγωγής for potential_nmod in word.rights: if potential_nmod.dep == nmod: seen.update( j for j in range(word.left_edge.i, potential_nmod.i + 1) ) yield word.left_edge.i, potential_nmod.i + 1, np_label flag = True break if flag is False: seen.update(j for j in range(word.left_edge.i, word.i + 1)) yield word.left_edge.i, word.i + 1, np_label elif word.dep == conj: # covers the case: έχει όμορφα και έξυπνα παιδιά head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: if any(w.i in seen for w in word.subtree): continue seen.update(j for j in range(word.left_edge.i, word.i + 1)) yield word.left_edge.i, word.i + 1, np_label
[ "Detect", "base", "noun", "phrases", ".", "Works", "on", "both", "Doc", "and", "Span", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/el/syntax_iterators.py#L7-L55
[ "def", "noun_chunks", "(", "obj", ")", ":", "# It follows the logic of the noun chunks finder of English language,", "# adjusted to some Greek language special characteristics.", "# obj tag corrects some DEP tagger mistakes.", "# Further improvement of the models will eliminate the need for this tag.", "labels", "=", "[", "\"nsubj\"", ",", "\"obj\"", ",", "\"iobj\"", ",", "\"appos\"", ",", "\"ROOT\"", ",", "\"obl\"", "]", "doc", "=", "obj", ".", "doc", "# Ensure works on both Doc and Span.", "np_deps", "=", "[", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "label", ")", "for", "label", "in", "labels", "]", "conj", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"conj\"", ")", "nmod", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"nmod\"", ")", "np_label", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"NP\"", ")", "seen", "=", "set", "(", ")", "for", "i", ",", "word", "in", "enumerate", "(", "obj", ")", ":", "if", "word", ".", "pos", "not", "in", "(", "NOUN", ",", "PROPN", ",", "PRON", ")", ":", "continue", "# Prevent nested chunks from being produced", "if", "word", ".", "i", "in", "seen", ":", "continue", "if", "word", ".", "dep", "in", "np_deps", ":", "if", "any", "(", "w", ".", "i", "in", "seen", "for", "w", "in", "word", ".", "subtree", ")", ":", "continue", "flag", "=", "False", "if", "word", ".", "pos", "==", "NOUN", ":", "# check for patterns such as γραμμή παραγωγής", "for", "potential_nmod", "in", "word", ".", "rights", ":", "if", "potential_nmod", ".", "dep", "==", "nmod", ":", "seen", ".", "update", "(", "j", "for", "j", "in", "range", "(", "word", ".", "left_edge", ".", "i", ",", "potential_nmod", ".", "i", "+", "1", ")", ")", "yield", "word", ".", "left_edge", ".", "i", ",", "potential_nmod", ".", "i", "+", "1", ",", "np_label", "flag", "=", "True", "break", "if", "flag", "is", "False", ":", "seen", ".", "update", "(", "j", "for", "j", "in", "range", "(", "word", ".", "left_edge", ".", "i", ",", "word", ".", "i", "+", "1", ")", ")", "yield", "word", ".", "left_edge", ".", "i", ",", "word", ".", "i", "+", "1", ",", "np_label", "elif", "word", ".", "dep", "==", "conj", ":", "# covers the case: έχει όμορφα και έξυπνα παιδιά", "head", "=", "word", ".", "head", "while", "head", ".", "dep", "==", "conj", "and", "head", ".", "head", ".", "i", "<", "head", ".", "i", ":", "head", "=", "head", ".", "head", "# If the head is an NP, and we're coordinated to it, we're an NP", "if", "head", ".", "dep", "in", "np_deps", ":", "if", "any", "(", "w", ".", "i", "in", "seen", "for", "w", "in", "word", ".", "subtree", ")", ":", "continue", "seen", ".", "update", "(", "j", "for", "j", "in", "range", "(", "word", ".", "left_edge", ".", "i", ",", "word", ".", "i", "+", "1", ")", ")", "yield", "word", ".", "left_edge", ".", "i", ",", "word", ".", "i", "+", "1", ",", "np_label" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_ext_args
Validate and convert arguments. Reused in Doc, Token and Span.
spacy/tokens/underscore.py
def get_ext_args(**kwargs): """Validate and convert arguments. Reused in Doc, Token and Span.""" default = kwargs.get("default") getter = kwargs.get("getter") setter = kwargs.get("setter") method = kwargs.get("method") if getter is None and setter is not None: raise ValueError(Errors.E089) valid_opts = ("default" in kwargs, method is not None, getter is not None) nr_defined = sum(t is True for t in valid_opts) if nr_defined != 1: raise ValueError(Errors.E083.format(nr_defined=nr_defined)) if setter is not None and not hasattr(setter, "__call__"): raise ValueError(Errors.E091.format(name="setter", value=repr(setter))) if getter is not None and not hasattr(getter, "__call__"): raise ValueError(Errors.E091.format(name="getter", value=repr(getter))) if method is not None and not hasattr(method, "__call__"): raise ValueError(Errors.E091.format(name="method", value=repr(method))) return (default, method, getter, setter)
def get_ext_args(**kwargs): """Validate and convert arguments. Reused in Doc, Token and Span.""" default = kwargs.get("default") getter = kwargs.get("getter") setter = kwargs.get("setter") method = kwargs.get("method") if getter is None and setter is not None: raise ValueError(Errors.E089) valid_opts = ("default" in kwargs, method is not None, getter is not None) nr_defined = sum(t is True for t in valid_opts) if nr_defined != 1: raise ValueError(Errors.E083.format(nr_defined=nr_defined)) if setter is not None and not hasattr(setter, "__call__"): raise ValueError(Errors.E091.format(name="setter", value=repr(setter))) if getter is not None and not hasattr(getter, "__call__"): raise ValueError(Errors.E091.format(name="getter", value=repr(getter))) if method is not None and not hasattr(method, "__call__"): raise ValueError(Errors.E091.format(name="method", value=repr(method))) return (default, method, getter, setter)
[ "Validate", "and", "convert", "arguments", ".", "Reused", "in", "Doc", "Token", "and", "Span", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/underscore.py#L69-L87
[ "def", "get_ext_args", "(", "*", "*", "kwargs", ")", ":", "default", "=", "kwargs", ".", "get", "(", "\"default\"", ")", "getter", "=", "kwargs", ".", "get", "(", "\"getter\"", ")", "setter", "=", "kwargs", ".", "get", "(", "\"setter\"", ")", "method", "=", "kwargs", ".", "get", "(", "\"method\"", ")", "if", "getter", "is", "None", "and", "setter", "is", "not", "None", ":", "raise", "ValueError", "(", "Errors", ".", "E089", ")", "valid_opts", "=", "(", "\"default\"", "in", "kwargs", ",", "method", "is", "not", "None", ",", "getter", "is", "not", "None", ")", "nr_defined", "=", "sum", "(", "t", "is", "True", "for", "t", "in", "valid_opts", ")", "if", "nr_defined", "!=", "1", ":", "raise", "ValueError", "(", "Errors", ".", "E083", ".", "format", "(", "nr_defined", "=", "nr_defined", ")", ")", "if", "setter", "is", "not", "None", "and", "not", "hasattr", "(", "setter", ",", "\"__call__\"", ")", ":", "raise", "ValueError", "(", "Errors", ".", "E091", ".", "format", "(", "name", "=", "\"setter\"", ",", "value", "=", "repr", "(", "setter", ")", ")", ")", "if", "getter", "is", "not", "None", "and", "not", "hasattr", "(", "getter", ",", "\"__call__\"", ")", ":", "raise", "ValueError", "(", "Errors", ".", "E091", ".", "format", "(", "name", "=", "\"getter\"", ",", "value", "=", "repr", "(", "getter", ")", ")", ")", "if", "method", "is", "not", "None", "and", "not", "hasattr", "(", "method", ",", "\"__call__\"", ")", ":", "raise", "ValueError", "(", "Errors", ".", "E091", ".", "format", "(", "name", "=", "\"method\"", ",", "value", "=", "repr", "(", "method", ")", ")", ")", "return", "(", "default", ",", "method", ",", "getter", ",", "setter", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
is_writable_attr
Check if an extension attribute is writable. ext (tuple): The (default, getter, setter, method) tuple available via {Doc,Span,Token}.get_extension. RETURNS (bool): Whether the attribute is writable.
spacy/tokens/underscore.py
def is_writable_attr(ext): """Check if an extension attribute is writable. ext (tuple): The (default, getter, setter, method) tuple available via {Doc,Span,Token}.get_extension. RETURNS (bool): Whether the attribute is writable. """ default, method, getter, setter = ext # Extension is writable if it has a setter (getter + setter), if it has a # default value (or, if its default value is none, none of the other values # should be set). if setter is not None or default is not None or all(e is None for e in ext): return True return False
def is_writable_attr(ext): """Check if an extension attribute is writable. ext (tuple): The (default, getter, setter, method) tuple available via {Doc,Span,Token}.get_extension. RETURNS (bool): Whether the attribute is writable. """ default, method, getter, setter = ext # Extension is writable if it has a setter (getter + setter), if it has a # default value (or, if its default value is none, none of the other values # should be set). if setter is not None or default is not None or all(e is None for e in ext): return True return False
[ "Check", "if", "an", "extension", "attribute", "is", "writable", ".", "ext", "(", "tuple", ")", ":", "The", "(", "default", "getter", "setter", "method", ")", "tuple", "available", "via", "{", "Doc", "Span", "Token", "}", ".", "get_extension", ".", "RETURNS", "(", "bool", ")", ":", "Whether", "the", "attribute", "is", "writable", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/tokens/underscore.py#L90-L102
[ "def", "is_writable_attr", "(", "ext", ")", ":", "default", ",", "method", ",", "getter", ",", "setter", "=", "ext", "# Extension is writable if it has a setter (getter + setter), if it has a", "# default value (or, if its default value is none, none of the other values", "# should be set).", "if", "setter", "is", "not", "None", "or", "default", "is", "not", "None", "or", "all", "(", "e", "is", "None", "for", "e", "in", "ext", ")", ":", "return", "True", "return", "False" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
is_new_osx
Check whether we're on OSX >= 10.10
setup.py
def is_new_osx(): """Check whether we're on OSX >= 10.10""" name = distutils.util.get_platform() if sys.platform != "darwin": return False elif name.startswith("macosx-10"): minor_version = int(name.split("-")[1].split(".")[1]) if minor_version >= 7: return True else: return False else: return False
def is_new_osx(): """Check whether we're on OSX >= 10.10""" name = distutils.util.get_platform() if sys.platform != "darwin": return False elif name.startswith("macosx-10"): minor_version = int(name.split("-")[1].split(".")[1]) if minor_version >= 7: return True else: return False else: return False
[ "Check", "whether", "we", "re", "on", "OSX", ">", "=", "10", ".", "10" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/setup.py#L15-L27
[ "def", "is_new_osx", "(", ")", ":", "name", "=", "distutils", ".", "util", ".", "get_platform", "(", ")", "if", "sys", ".", "platform", "!=", "\"darwin\"", ":", "return", "False", "elif", "name", ".", "startswith", "(", "\"macosx-10\"", ")", ":", "minor_version", "=", "int", "(", "name", ".", "split", "(", "\"-\"", ")", "[", "1", "]", ".", "split", "(", "\".\"", ")", "[", "1", "]", ")", "if", "minor_version", ">=", "7", ":", "return", "True", "else", ":", "return", "False", "else", ":", "return", "False" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_position_label
Return labels indicating the position of the word in the document.
examples/training/ner_multitask_objective.py
def get_position_label(i, words, tags, heads, labels, ents): """Return labels indicating the position of the word in the document. """ if len(words) < 20: return "short-doc" elif i == 0: return "first-word" elif i < 10: return "early-word" elif i < 20: return "mid-word" elif i == len(words) - 1: return "last-word" else: return "late-word"
def get_position_label(i, words, tags, heads, labels, ents): """Return labels indicating the position of the word in the document. """ if len(words) < 20: return "short-doc" elif i == 0: return "first-word" elif i < 10: return "early-word" elif i < 20: return "mid-word" elif i == len(words) - 1: return "last-word" else: return "late-word"
[ "Return", "labels", "indicating", "the", "position", "of", "the", "word", "in", "the", "document", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/ner_multitask_objective.py#L36-L50
[ "def", "get_position_label", "(", "i", ",", "words", ",", "tags", ",", "heads", ",", "labels", ",", "ents", ")", ":", "if", "len", "(", "words", ")", "<", "20", ":", "return", "\"short-doc\"", "elif", "i", "==", "0", ":", "return", "\"first-word\"", "elif", "i", "<", "10", ":", "return", "\"early-word\"", "elif", "i", "<", "20", ":", "return", "\"mid-word\"", "elif", "i", "==", "len", "(", "words", ")", "-", "1", ":", "return", "\"last-word\"", "else", ":", "return", "\"late-word\"" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
download
Download compatible model from default download path using pip. Model can be shortcut, model name or, if --direct flag is set, full model name with version. For direct downloads, the compatibility check will be skipped.
spacy/cli/download.py
def download(model, direct=False, *pip_args): """ Download compatible model from default download path using pip. Model can be shortcut, model name or, if --direct flag is set, full model name with version. For direct downloads, the compatibility check will be skipped. """ dl_tpl = "{m}-{v}/{m}-{v}.tar.gz#egg={m}=={v}" if direct: components = model.split("-") model_name = "".join(components[:-1]) version = components[-1] dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args) else: shortcuts = get_json(about.__shortcuts__, "available shortcuts") model_name = shortcuts.get(model, model) compatibility = get_compatibility() version = get_version(model_name, compatibility) dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args) if dl != 0: # if download subprocess doesn't return 0, exit sys.exit(dl) msg.good( "Download and installation successful", "You can now load the model via spacy.load('{}')".format(model_name), ) # Only create symlink if the model is installed via a shortcut like 'en'. # There's no real advantage over an additional symlink for en_core_web_sm # and if anything, it's more error prone and causes more confusion. if model in shortcuts: try: # Get package path here because link uses # pip.get_installed_distributions() to check if model is a # package, which fails if model was just installed via # subprocess package_path = get_package_path(model_name) link(model_name, model, force=True, model_path=package_path) except: # noqa: E722 # Dirty, but since spacy.download and the auto-linking is # mostly a convenience wrapper, it's best to show a success # message and loading instructions, even if linking fails. msg.warn( "Download successful but linking failed", "Creating a shortcut link for '{}' didn't work (maybe you " "don't have admin permissions?), but you can still load " "the model via its full package name: " "nlp = spacy.load('{}')".format(model, model_name), )
def download(model, direct=False, *pip_args): """ Download compatible model from default download path using pip. Model can be shortcut, model name or, if --direct flag is set, full model name with version. For direct downloads, the compatibility check will be skipped. """ dl_tpl = "{m}-{v}/{m}-{v}.tar.gz#egg={m}=={v}" if direct: components = model.split("-") model_name = "".join(components[:-1]) version = components[-1] dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args) else: shortcuts = get_json(about.__shortcuts__, "available shortcuts") model_name = shortcuts.get(model, model) compatibility = get_compatibility() version = get_version(model_name, compatibility) dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args) if dl != 0: # if download subprocess doesn't return 0, exit sys.exit(dl) msg.good( "Download and installation successful", "You can now load the model via spacy.load('{}')".format(model_name), ) # Only create symlink if the model is installed via a shortcut like 'en'. # There's no real advantage over an additional symlink for en_core_web_sm # and if anything, it's more error prone and causes more confusion. if model in shortcuts: try: # Get package path here because link uses # pip.get_installed_distributions() to check if model is a # package, which fails if model was just installed via # subprocess package_path = get_package_path(model_name) link(model_name, model, force=True, model_path=package_path) except: # noqa: E722 # Dirty, but since spacy.download and the auto-linking is # mostly a convenience wrapper, it's best to show a success # message and loading instructions, even if linking fails. msg.warn( "Download successful but linking failed", "Creating a shortcut link for '{}' didn't work (maybe you " "don't have admin permissions?), but you can still load " "the model via its full package name: " "nlp = spacy.load('{}')".format(model, model_name), )
[ "Download", "compatible", "model", "from", "default", "download", "path", "using", "pip", ".", "Model", "can", "be", "shortcut", "model", "name", "or", "if", "--", "direct", "flag", "is", "set", "full", "model", "name", "with", "version", ".", "For", "direct", "downloads", "the", "compatibility", "check", "will", "be", "skipped", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/download.py#L24-L69
[ "def", "download", "(", "model", ",", "direct", "=", "False", ",", "*", "pip_args", ")", ":", "dl_tpl", "=", "\"{m}-{v}/{m}-{v}.tar.gz#egg={m}=={v}\"", "if", "direct", ":", "components", "=", "model", ".", "split", "(", "\"-\"", ")", "model_name", "=", "\"\"", ".", "join", "(", "components", "[", ":", "-", "1", "]", ")", "version", "=", "components", "[", "-", "1", "]", "dl", "=", "download_model", "(", "dl_tpl", ".", "format", "(", "m", "=", "model_name", ",", "v", "=", "version", ")", ",", "pip_args", ")", "else", ":", "shortcuts", "=", "get_json", "(", "about", ".", "__shortcuts__", ",", "\"available shortcuts\"", ")", "model_name", "=", "shortcuts", ".", "get", "(", "model", ",", "model", ")", "compatibility", "=", "get_compatibility", "(", ")", "version", "=", "get_version", "(", "model_name", ",", "compatibility", ")", "dl", "=", "download_model", "(", "dl_tpl", ".", "format", "(", "m", "=", "model_name", ",", "v", "=", "version", ")", ",", "pip_args", ")", "if", "dl", "!=", "0", ":", "# if download subprocess doesn't return 0, exit", "sys", ".", "exit", "(", "dl", ")", "msg", ".", "good", "(", "\"Download and installation successful\"", ",", "\"You can now load the model via spacy.load('{}')\"", ".", "format", "(", "model_name", ")", ",", ")", "# Only create symlink if the model is installed via a shortcut like 'en'.", "# There's no real advantage over an additional symlink for en_core_web_sm", "# and if anything, it's more error prone and causes more confusion.", "if", "model", "in", "shortcuts", ":", "try", ":", "# Get package path here because link uses", "# pip.get_installed_distributions() to check if model is a", "# package, which fails if model was just installed via", "# subprocess", "package_path", "=", "get_package_path", "(", "model_name", ")", "link", "(", "model_name", ",", "model", ",", "force", "=", "True", ",", "model_path", "=", "package_path", ")", "except", ":", "# noqa: E722", "# Dirty, but since spacy.download and the auto-linking is", "# mostly a convenience wrapper, it's best to show a success", "# message and loading instructions, even if linking fails.", "msg", ".", "warn", "(", "\"Download successful but linking failed\"", ",", "\"Creating a shortcut link for '{}' didn't work (maybe you \"", "\"don't have admin permissions?), but you can still load \"", "\"the model via its full package name: \"", "\"nlp = spacy.load('{}')\"", ".", "format", "(", "model", ",", "model_name", ")", ",", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
convert
Convert files into JSON format for use with train command and other experiment management functions. If no output_dir is specified, the data is written to stdout, so you can pipe them forward to a JSON file: $ spacy convert some_file.conllu > some_file.json
spacy/cli/convert.py
def convert( input_file, output_dir="-", file_type="json", n_sents=1, morphology=False, converter="auto", lang=None, ): """ Convert files into JSON format for use with train command and other experiment management functions. If no output_dir is specified, the data is written to stdout, so you can pipe them forward to a JSON file: $ spacy convert some_file.conllu > some_file.json """ msg = Printer() input_path = Path(input_file) if file_type not in FILE_TYPES: msg.fail( "Unknown file type: '{}'".format(file_type), "Supported file types: '{}'".format(", ".join(FILE_TYPES)), exits=1, ) if file_type not in FILE_TYPES_STDOUT and output_dir == "-": # TODO: support msgpack via stdout in srsly? msg.fail( "Can't write .{} data to stdout.".format(file_type), "Please specify an output directory.", exits=1, ) if not input_path.exists(): msg.fail("Input file not found", input_path, exits=1) if output_dir != "-" and not Path(output_dir).exists(): msg.fail("Output directory not found", output_dir, exits=1) if converter == "auto": converter = input_path.suffix[1:] if converter not in CONVERTERS: msg.fail("Can't find converter for {}".format(converter), exits=1) # Use converter function to convert data func = CONVERTERS[converter] input_data = input_path.open("r", encoding="utf-8").read() data = func(input_data, n_sents=n_sents, use_morphology=morphology, lang=lang) if output_dir != "-": # Export data to a file suffix = ".{}".format(file_type) output_file = Path(output_dir) / Path(input_path.parts[-1]).with_suffix(suffix) if file_type == "json": srsly.write_json(output_file, data) elif file_type == "jsonl": srsly.write_jsonl(output_file, data) elif file_type == "msg": srsly.write_msgpack(output_file, data) msg.good("Generated output file ({} documents)".format(len(data)), output_file) else: # Print to stdout if file_type == "json": srsly.write_json("-", data) elif file_type == "jsonl": srsly.write_jsonl("-", data)
def convert( input_file, output_dir="-", file_type="json", n_sents=1, morphology=False, converter="auto", lang=None, ): """ Convert files into JSON format for use with train command and other experiment management functions. If no output_dir is specified, the data is written to stdout, so you can pipe them forward to a JSON file: $ spacy convert some_file.conllu > some_file.json """ msg = Printer() input_path = Path(input_file) if file_type not in FILE_TYPES: msg.fail( "Unknown file type: '{}'".format(file_type), "Supported file types: '{}'".format(", ".join(FILE_TYPES)), exits=1, ) if file_type not in FILE_TYPES_STDOUT and output_dir == "-": # TODO: support msgpack via stdout in srsly? msg.fail( "Can't write .{} data to stdout.".format(file_type), "Please specify an output directory.", exits=1, ) if not input_path.exists(): msg.fail("Input file not found", input_path, exits=1) if output_dir != "-" and not Path(output_dir).exists(): msg.fail("Output directory not found", output_dir, exits=1) if converter == "auto": converter = input_path.suffix[1:] if converter not in CONVERTERS: msg.fail("Can't find converter for {}".format(converter), exits=1) # Use converter function to convert data func = CONVERTERS[converter] input_data = input_path.open("r", encoding="utf-8").read() data = func(input_data, n_sents=n_sents, use_morphology=morphology, lang=lang) if output_dir != "-": # Export data to a file suffix = ".{}".format(file_type) output_file = Path(output_dir) / Path(input_path.parts[-1]).with_suffix(suffix) if file_type == "json": srsly.write_json(output_file, data) elif file_type == "jsonl": srsly.write_jsonl(output_file, data) elif file_type == "msg": srsly.write_msgpack(output_file, data) msg.good("Generated output file ({} documents)".format(len(data)), output_file) else: # Print to stdout if file_type == "json": srsly.write_json("-", data) elif file_type == "jsonl": srsly.write_jsonl("-", data)
[ "Convert", "files", "into", "JSON", "format", "for", "use", "with", "train", "command", "and", "other", "experiment", "management", "functions", ".", "If", "no", "output_dir", "is", "specified", "the", "data", "is", "written", "to", "stdout", "so", "you", "can", "pipe", "them", "forward", "to", "a", "JSON", "file", ":", "$", "spacy", "convert", "some_file", ".", "conllu", ">", "some_file", ".", "json" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/convert.py#L39-L97
[ "def", "convert", "(", "input_file", ",", "output_dir", "=", "\"-\"", ",", "file_type", "=", "\"json\"", ",", "n_sents", "=", "1", ",", "morphology", "=", "False", ",", "converter", "=", "\"auto\"", ",", "lang", "=", "None", ",", ")", ":", "msg", "=", "Printer", "(", ")", "input_path", "=", "Path", "(", "input_file", ")", "if", "file_type", "not", "in", "FILE_TYPES", ":", "msg", ".", "fail", "(", "\"Unknown file type: '{}'\"", ".", "format", "(", "file_type", ")", ",", "\"Supported file types: '{}'\"", ".", "format", "(", "\", \"", ".", "join", "(", "FILE_TYPES", ")", ")", ",", "exits", "=", "1", ",", ")", "if", "file_type", "not", "in", "FILE_TYPES_STDOUT", "and", "output_dir", "==", "\"-\"", ":", "# TODO: support msgpack via stdout in srsly?", "msg", ".", "fail", "(", "\"Can't write .{} data to stdout.\"", ".", "format", "(", "file_type", ")", ",", "\"Please specify an output directory.\"", ",", "exits", "=", "1", ",", ")", "if", "not", "input_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Input file not found\"", ",", "input_path", ",", "exits", "=", "1", ")", "if", "output_dir", "!=", "\"-\"", "and", "not", "Path", "(", "output_dir", ")", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Output directory not found\"", ",", "output_dir", ",", "exits", "=", "1", ")", "if", "converter", "==", "\"auto\"", ":", "converter", "=", "input_path", ".", "suffix", "[", "1", ":", "]", "if", "converter", "not", "in", "CONVERTERS", ":", "msg", ".", "fail", "(", "\"Can't find converter for {}\"", ".", "format", "(", "converter", ")", ",", "exits", "=", "1", ")", "# Use converter function to convert data", "func", "=", "CONVERTERS", "[", "converter", "]", "input_data", "=", "input_path", ".", "open", "(", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", ".", "read", "(", ")", "data", "=", "func", "(", "input_data", ",", "n_sents", "=", "n_sents", ",", "use_morphology", "=", "morphology", ",", "lang", "=", "lang", ")", "if", "output_dir", "!=", "\"-\"", ":", "# Export data to a file", "suffix", "=", "\".{}\"", ".", "format", "(", "file_type", ")", "output_file", "=", "Path", "(", "output_dir", ")", "/", "Path", "(", "input_path", ".", "parts", "[", "-", "1", "]", ")", ".", "with_suffix", "(", "suffix", ")", "if", "file_type", "==", "\"json\"", ":", "srsly", ".", "write_json", "(", "output_file", ",", "data", ")", "elif", "file_type", "==", "\"jsonl\"", ":", "srsly", ".", "write_jsonl", "(", "output_file", ",", "data", ")", "elif", "file_type", "==", "\"msg\"", ":", "srsly", ".", "write_msgpack", "(", "output_file", ",", "data", ")", "msg", ".", "good", "(", "\"Generated output file ({} documents)\"", ".", "format", "(", "len", "(", "data", ")", ")", ",", "output_file", ")", "else", ":", "# Print to stdout", "if", "file_type", "==", "\"json\"", ":", "srsly", ".", "write_json", "(", "\"-\"", ",", "data", ")", "elif", "file_type", "==", "\"jsonl\"", ":", "srsly", ".", "write_jsonl", "(", "\"-\"", ",", "data", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
load_model
Load a specific spaCy model
bin/ud/run_eval.py
def load_model(modelname, add_sentencizer=False): """ Load a specific spaCy model """ loading_start = time.time() nlp = spacy.load(modelname) if add_sentencizer: nlp.add_pipe(nlp.create_pipe('sentencizer')) loading_end = time.time() loading_time = loading_end - loading_start if add_sentencizer: return nlp, loading_time, modelname + '_sentencizer' return nlp, loading_time, modelname
def load_model(modelname, add_sentencizer=False): """ Load a specific spaCy model """ loading_start = time.time() nlp = spacy.load(modelname) if add_sentencizer: nlp.add_pipe(nlp.create_pipe('sentencizer')) loading_end = time.time() loading_time = loading_end - loading_start if add_sentencizer: return nlp, loading_time, modelname + '_sentencizer' return nlp, loading_time, modelname
[ "Load", "a", "specific", "spaCy", "model" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L34-L44
[ "def", "load_model", "(", "modelname", ",", "add_sentencizer", "=", "False", ")", ":", "loading_start", "=", "time", ".", "time", "(", ")", "nlp", "=", "spacy", ".", "load", "(", "modelname", ")", "if", "add_sentencizer", ":", "nlp", ".", "add_pipe", "(", "nlp", ".", "create_pipe", "(", "'sentencizer'", ")", ")", "loading_end", "=", "time", ".", "time", "(", ")", "loading_time", "=", "loading_end", "-", "loading_start", "if", "add_sentencizer", ":", "return", "nlp", ",", "loading_time", ",", "modelname", "+", "'_sentencizer'", "return", "nlp", ",", "loading_time", ",", "modelname" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
load_default_model_sentencizer
Load a generic spaCy model and add the sentencizer for sentence tokenization
bin/ud/run_eval.py
def load_default_model_sentencizer(lang): """ Load a generic spaCy model and add the sentencizer for sentence tokenization""" loading_start = time.time() lang_class = get_lang_class(lang) nlp = lang_class() nlp.add_pipe(nlp.create_pipe('sentencizer')) loading_end = time.time() loading_time = loading_end - loading_start return nlp, loading_time, lang + "_default_" + 'sentencizer'
def load_default_model_sentencizer(lang): """ Load a generic spaCy model and add the sentencizer for sentence tokenization""" loading_start = time.time() lang_class = get_lang_class(lang) nlp = lang_class() nlp.add_pipe(nlp.create_pipe('sentencizer')) loading_end = time.time() loading_time = loading_end - loading_start return nlp, loading_time, lang + "_default_" + 'sentencizer'
[ "Load", "a", "generic", "spaCy", "model", "and", "add", "the", "sentencizer", "for", "sentence", "tokenization" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L47-L55
[ "def", "load_default_model_sentencizer", "(", "lang", ")", ":", "loading_start", "=", "time", ".", "time", "(", ")", "lang_class", "=", "get_lang_class", "(", "lang", ")", "nlp", "=", "lang_class", "(", ")", "nlp", ".", "add_pipe", "(", "nlp", ".", "create_pipe", "(", "'sentencizer'", ")", ")", "loading_end", "=", "time", ".", "time", "(", ")", "loading_time", "=", "loading_end", "-", "loading_start", "return", "nlp", ",", "loading_time", ",", "lang", "+", "\"_default_\"", "+", "'sentencizer'" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
get_freq_tuples
Turn a list of errors into frequency-sorted tuples thresholded by a certain total number
bin/ud/run_eval.py
def get_freq_tuples(my_list, print_total_threshold): """ Turn a list of errors into frequency-sorted tuples thresholded by a certain total number """ d = {} for token in my_list: d.setdefault(token, 0) d[token] += 1 return sorted(d.items(), key=operator.itemgetter(1), reverse=True)[:print_total_threshold]
def get_freq_tuples(my_list, print_total_threshold): """ Turn a list of errors into frequency-sorted tuples thresholded by a certain total number """ d = {} for token in my_list: d.setdefault(token, 0) d[token] += 1 return sorted(d.items(), key=operator.itemgetter(1), reverse=True)[:print_total_threshold]
[ "Turn", "a", "list", "of", "errors", "into", "frequency", "-", "sorted", "tuples", "thresholded", "by", "a", "certain", "total", "number" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L62-L68
[ "def", "get_freq_tuples", "(", "my_list", ",", "print_total_threshold", ")", ":", "d", "=", "{", "}", "for", "token", "in", "my_list", ":", "d", ".", "setdefault", "(", "token", ",", "0", ")", "d", "[", "token", "]", "+=", "1", "return", "sorted", "(", "d", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "[", ":", "print_total_threshold", "]" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
_contains_blinded_text
Heuristic to determine whether the treebank has blinded texts or not
bin/ud/run_eval.py
def _contains_blinded_text(stats_xml): """ Heuristic to determine whether the treebank has blinded texts or not """ tree = ET.parse(stats_xml) root = tree.getroot() total_tokens = int(root.find('size/total/tokens').text) unique_lemmas = int(root.find('lemmas').get('unique')) # assume the corpus is largely blinded when there are less than 1% unique tokens return (unique_lemmas / total_tokens) < 0.01
def _contains_blinded_text(stats_xml): """ Heuristic to determine whether the treebank has blinded texts or not """ tree = ET.parse(stats_xml) root = tree.getroot() total_tokens = int(root.find('size/total/tokens').text) unique_lemmas = int(root.find('lemmas').get('unique')) # assume the corpus is largely blinded when there are less than 1% unique tokens return (unique_lemmas / total_tokens) < 0.01
[ "Heuristic", "to", "determine", "whether", "the", "treebank", "has", "blinded", "texts", "or", "not" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L71-L79
[ "def", "_contains_blinded_text", "(", "stats_xml", ")", ":", "tree", "=", "ET", ".", "parse", "(", "stats_xml", ")", "root", "=", "tree", ".", "getroot", "(", ")", "total_tokens", "=", "int", "(", "root", ".", "find", "(", "'size/total/tokens'", ")", ".", "text", ")", "unique_lemmas", "=", "int", "(", "root", ".", "find", "(", "'lemmas'", ")", ".", "get", "(", "'unique'", ")", ")", "# assume the corpus is largely blinded when there are less than 1% unique tokens", "return", "(", "unique_lemmas", "/", "total_tokens", ")", "<", "0.01" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
fetch_all_treebanks
Fetch the txt files for all treebanks for a given set of languages
bin/ud/run_eval.py
def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language): """" Fetch the txt files for all treebanks for a given set of languages """ all_treebanks = dict() treebank_size = dict() for l in languages: all_treebanks[l] = [] treebank_size[l] = 0 for treebank_dir in ud_dir.iterdir(): if treebank_dir.is_dir(): for txt_path in treebank_dir.iterdir(): if txt_path.name.endswith('-ud-' + corpus + '.txt'): file_lang = txt_path.name.split('_')[0] if file_lang in languages: gold_path = treebank_dir / txt_path.name.replace('.txt', '.conllu') stats_xml = treebank_dir / "stats.xml" # ignore treebanks where the texts are not publicly available if not _contains_blinded_text(stats_xml): if not best_per_language: all_treebanks[file_lang].append(txt_path) # check the tokens in the gold annotation to keep only the biggest treebank per language else: with gold_path.open(mode='r', encoding='utf-8') as gold_file: gold_ud = conll17_ud_eval.load_conllu(gold_file) gold_tokens = len(gold_ud.tokens) if treebank_size[file_lang] < gold_tokens: all_treebanks[file_lang] = [txt_path] treebank_size[file_lang] = gold_tokens return all_treebanks
def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language): """" Fetch the txt files for all treebanks for a given set of languages """ all_treebanks = dict() treebank_size = dict() for l in languages: all_treebanks[l] = [] treebank_size[l] = 0 for treebank_dir in ud_dir.iterdir(): if treebank_dir.is_dir(): for txt_path in treebank_dir.iterdir(): if txt_path.name.endswith('-ud-' + corpus + '.txt'): file_lang = txt_path.name.split('_')[0] if file_lang in languages: gold_path = treebank_dir / txt_path.name.replace('.txt', '.conllu') stats_xml = treebank_dir / "stats.xml" # ignore treebanks where the texts are not publicly available if not _contains_blinded_text(stats_xml): if not best_per_language: all_treebanks[file_lang].append(txt_path) # check the tokens in the gold annotation to keep only the biggest treebank per language else: with gold_path.open(mode='r', encoding='utf-8') as gold_file: gold_ud = conll17_ud_eval.load_conllu(gold_file) gold_tokens = len(gold_ud.tokens) if treebank_size[file_lang] < gold_tokens: all_treebanks[file_lang] = [txt_path] treebank_size[file_lang] = gold_tokens return all_treebanks
[ "Fetch", "the", "txt", "files", "for", "all", "treebanks", "for", "a", "given", "set", "of", "languages" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L82-L111
[ "def", "fetch_all_treebanks", "(", "ud_dir", ",", "languages", ",", "corpus", ",", "best_per_language", ")", ":", "all_treebanks", "=", "dict", "(", ")", "treebank_size", "=", "dict", "(", ")", "for", "l", "in", "languages", ":", "all_treebanks", "[", "l", "]", "=", "[", "]", "treebank_size", "[", "l", "]", "=", "0", "for", "treebank_dir", "in", "ud_dir", ".", "iterdir", "(", ")", ":", "if", "treebank_dir", ".", "is_dir", "(", ")", ":", "for", "txt_path", "in", "treebank_dir", ".", "iterdir", "(", ")", ":", "if", "txt_path", ".", "name", ".", "endswith", "(", "'-ud-'", "+", "corpus", "+", "'.txt'", ")", ":", "file_lang", "=", "txt_path", ".", "name", ".", "split", "(", "'_'", ")", "[", "0", "]", "if", "file_lang", "in", "languages", ":", "gold_path", "=", "treebank_dir", "/", "txt_path", ".", "name", ".", "replace", "(", "'.txt'", ",", "'.conllu'", ")", "stats_xml", "=", "treebank_dir", "/", "\"stats.xml\"", "# ignore treebanks where the texts are not publicly available", "if", "not", "_contains_blinded_text", "(", "stats_xml", ")", ":", "if", "not", "best_per_language", ":", "all_treebanks", "[", "file_lang", "]", ".", "append", "(", "txt_path", ")", "# check the tokens in the gold annotation to keep only the biggest treebank per language", "else", ":", "with", "gold_path", ".", "open", "(", "mode", "=", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "gold_file", ":", "gold_ud", "=", "conll17_ud_eval", ".", "load_conllu", "(", "gold_file", ")", "gold_tokens", "=", "len", "(", "gold_ud", ".", "tokens", ")", "if", "treebank_size", "[", "file_lang", "]", "<", "gold_tokens", ":", "all_treebanks", "[", "file_lang", "]", "=", "[", "txt_path", "]", "treebank_size", "[", "file_lang", "]", "=", "gold_tokens", "return", "all_treebanks" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
run_single_eval
Run an evaluation of a model nlp on a certain specified treebank
bin/ud/run_eval.py
def run_single_eval(nlp, loading_time, print_name, text_path, gold_ud, tmp_output_path, out_file, print_header, check_parse, print_freq_tasks): """" Run an evaluation of a model nlp on a certain specified treebank """ with text_path.open(mode='r', encoding='utf-8') as f: flat_text = f.read() # STEP 1: tokenize text tokenization_start = time.time() texts = split_text(flat_text) docs = list(nlp.pipe(texts)) tokenization_end = time.time() tokenization_time = tokenization_end - tokenization_start # STEP 2: record stats and timings tokens_per_s = int(len(gold_ud.tokens) / tokenization_time) print_header_1 = ['date', 'text_path', 'gold_tokens', 'model', 'loading_time', 'tokenization_time', 'tokens_per_s'] print_string_1 = [str(datetime.date.today()), text_path.name, len(gold_ud.tokens), print_name, "%.2f" % loading_time, "%.2f" % tokenization_time, tokens_per_s] # STEP 3: evaluate predicted tokens and features with tmp_output_path.open(mode="w", encoding="utf8") as tmp_out_file: write_conllu(docs, tmp_out_file) with tmp_output_path.open(mode="r", encoding="utf8") as sys_file: sys_ud = conll17_ud_eval.load_conllu(sys_file, check_parse=check_parse) tmp_output_path.unlink() scores = conll17_ud_eval.evaluate(gold_ud, sys_ud, check_parse=check_parse) # STEP 4: format the scoring results eval_headers = EVAL_PARSE if not check_parse: eval_headers = EVAL_NO_PARSE for score_name in eval_headers: score = scores[score_name] print_string_1.extend(["%.2f" % score.precision, "%.2f" % score.recall, "%.2f" % score.f1]) print_string_1.append("-" if score.aligned_accuracy is None else "%.2f" % score.aligned_accuracy) print_string_1.append("-" if score.undersegmented is None else "%.4f" % score.under_perc) print_string_1.append("-" if score.oversegmented is None else "%.4f" % score.over_perc) print_header_1.extend([score_name + '_p', score_name + '_r', score_name + '_F', score_name + '_acc', score_name + '_under', score_name + '_over']) if score_name in print_freq_tasks: print_header_1.extend([score_name + '_word_under_ex', score_name + '_shape_under_ex', score_name + '_word_over_ex', score_name + '_shape_over_ex']) d_under_words = get_freq_tuples(score.undersegmented, PRINT_TOTAL) d_under_shapes = get_freq_tuples([word_shape(x) for x in score.undersegmented], PRINT_TOTAL) d_over_words = get_freq_tuples(score.oversegmented, PRINT_TOTAL) d_over_shapes = get_freq_tuples([word_shape(x) for x in score.oversegmented], PRINT_TOTAL) # saving to CSV with ; seperator so blinding ; in the example output print_string_1.append( str({k: v for k, v in d_under_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) print_string_1.append( str({k: v for k, v in d_under_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) print_string_1.append( str({k: v for k, v in d_over_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) print_string_1.append( str({k: v for k, v in d_over_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) # STEP 5: print the formatted results to CSV if print_header: out_file.write(';'.join(map(str, print_header_1)) + '\n') out_file.write(';'.join(map(str, print_string_1)) + '\n')
def run_single_eval(nlp, loading_time, print_name, text_path, gold_ud, tmp_output_path, out_file, print_header, check_parse, print_freq_tasks): """" Run an evaluation of a model nlp on a certain specified treebank """ with text_path.open(mode='r', encoding='utf-8') as f: flat_text = f.read() # STEP 1: tokenize text tokenization_start = time.time() texts = split_text(flat_text) docs = list(nlp.pipe(texts)) tokenization_end = time.time() tokenization_time = tokenization_end - tokenization_start # STEP 2: record stats and timings tokens_per_s = int(len(gold_ud.tokens) / tokenization_time) print_header_1 = ['date', 'text_path', 'gold_tokens', 'model', 'loading_time', 'tokenization_time', 'tokens_per_s'] print_string_1 = [str(datetime.date.today()), text_path.name, len(gold_ud.tokens), print_name, "%.2f" % loading_time, "%.2f" % tokenization_time, tokens_per_s] # STEP 3: evaluate predicted tokens and features with tmp_output_path.open(mode="w", encoding="utf8") as tmp_out_file: write_conllu(docs, tmp_out_file) with tmp_output_path.open(mode="r", encoding="utf8") as sys_file: sys_ud = conll17_ud_eval.load_conllu(sys_file, check_parse=check_parse) tmp_output_path.unlink() scores = conll17_ud_eval.evaluate(gold_ud, sys_ud, check_parse=check_parse) # STEP 4: format the scoring results eval_headers = EVAL_PARSE if not check_parse: eval_headers = EVAL_NO_PARSE for score_name in eval_headers: score = scores[score_name] print_string_1.extend(["%.2f" % score.precision, "%.2f" % score.recall, "%.2f" % score.f1]) print_string_1.append("-" if score.aligned_accuracy is None else "%.2f" % score.aligned_accuracy) print_string_1.append("-" if score.undersegmented is None else "%.4f" % score.under_perc) print_string_1.append("-" if score.oversegmented is None else "%.4f" % score.over_perc) print_header_1.extend([score_name + '_p', score_name + '_r', score_name + '_F', score_name + '_acc', score_name + '_under', score_name + '_over']) if score_name in print_freq_tasks: print_header_1.extend([score_name + '_word_under_ex', score_name + '_shape_under_ex', score_name + '_word_over_ex', score_name + '_shape_over_ex']) d_under_words = get_freq_tuples(score.undersegmented, PRINT_TOTAL) d_under_shapes = get_freq_tuples([word_shape(x) for x in score.undersegmented], PRINT_TOTAL) d_over_words = get_freq_tuples(score.oversegmented, PRINT_TOTAL) d_over_shapes = get_freq_tuples([word_shape(x) for x in score.oversegmented], PRINT_TOTAL) # saving to CSV with ; seperator so blinding ; in the example output print_string_1.append( str({k: v for k, v in d_under_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) print_string_1.append( str({k: v for k, v in d_under_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) print_string_1.append( str({k: v for k, v in d_over_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) print_string_1.append( str({k: v for k, v in d_over_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*")) # STEP 5: print the formatted results to CSV if print_header: out_file.write(';'.join(map(str, print_header_1)) + '\n') out_file.write(';'.join(map(str, print_string_1)) + '\n')
[ "Run", "an", "evaluation", "of", "a", "model", "nlp", "on", "a", "certain", "specified", "treebank" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L114-L181
[ "def", "run_single_eval", "(", "nlp", ",", "loading_time", ",", "print_name", ",", "text_path", ",", "gold_ud", ",", "tmp_output_path", ",", "out_file", ",", "print_header", ",", "check_parse", ",", "print_freq_tasks", ")", ":", "with", "text_path", ".", "open", "(", "mode", "=", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "flat_text", "=", "f", ".", "read", "(", ")", "# STEP 1: tokenize text", "tokenization_start", "=", "time", ".", "time", "(", ")", "texts", "=", "split_text", "(", "flat_text", ")", "docs", "=", "list", "(", "nlp", ".", "pipe", "(", "texts", ")", ")", "tokenization_end", "=", "time", ".", "time", "(", ")", "tokenization_time", "=", "tokenization_end", "-", "tokenization_start", "# STEP 2: record stats and timings", "tokens_per_s", "=", "int", "(", "len", "(", "gold_ud", ".", "tokens", ")", "/", "tokenization_time", ")", "print_header_1", "=", "[", "'date'", ",", "'text_path'", ",", "'gold_tokens'", ",", "'model'", ",", "'loading_time'", ",", "'tokenization_time'", ",", "'tokens_per_s'", "]", "print_string_1", "=", "[", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "text_path", ".", "name", ",", "len", "(", "gold_ud", ".", "tokens", ")", ",", "print_name", ",", "\"%.2f\"", "%", "loading_time", ",", "\"%.2f\"", "%", "tokenization_time", ",", "tokens_per_s", "]", "# STEP 3: evaluate predicted tokens and features", "with", "tmp_output_path", ".", "open", "(", "mode", "=", "\"w\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "tmp_out_file", ":", "write_conllu", "(", "docs", ",", "tmp_out_file", ")", "with", "tmp_output_path", ".", "open", "(", "mode", "=", "\"r\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "sys_file", ":", "sys_ud", "=", "conll17_ud_eval", ".", "load_conllu", "(", "sys_file", ",", "check_parse", "=", "check_parse", ")", "tmp_output_path", ".", "unlink", "(", ")", "scores", "=", "conll17_ud_eval", ".", "evaluate", "(", "gold_ud", ",", "sys_ud", ",", "check_parse", "=", "check_parse", ")", "# STEP 4: format the scoring results", "eval_headers", "=", "EVAL_PARSE", "if", "not", "check_parse", ":", "eval_headers", "=", "EVAL_NO_PARSE", "for", "score_name", "in", "eval_headers", ":", "score", "=", "scores", "[", "score_name", "]", "print_string_1", ".", "extend", "(", "[", "\"%.2f\"", "%", "score", ".", "precision", ",", "\"%.2f\"", "%", "score", ".", "recall", ",", "\"%.2f\"", "%", "score", ".", "f1", "]", ")", "print_string_1", ".", "append", "(", "\"-\"", "if", "score", ".", "aligned_accuracy", "is", "None", "else", "\"%.2f\"", "%", "score", ".", "aligned_accuracy", ")", "print_string_1", ".", "append", "(", "\"-\"", "if", "score", ".", "undersegmented", "is", "None", "else", "\"%.4f\"", "%", "score", ".", "under_perc", ")", "print_string_1", ".", "append", "(", "\"-\"", "if", "score", ".", "oversegmented", "is", "None", "else", "\"%.4f\"", "%", "score", ".", "over_perc", ")", "print_header_1", ".", "extend", "(", "[", "score_name", "+", "'_p'", ",", "score_name", "+", "'_r'", ",", "score_name", "+", "'_F'", ",", "score_name", "+", "'_acc'", ",", "score_name", "+", "'_under'", ",", "score_name", "+", "'_over'", "]", ")", "if", "score_name", "in", "print_freq_tasks", ":", "print_header_1", ".", "extend", "(", "[", "score_name", "+", "'_word_under_ex'", ",", "score_name", "+", "'_shape_under_ex'", ",", "score_name", "+", "'_word_over_ex'", ",", "score_name", "+", "'_shape_over_ex'", "]", ")", "d_under_words", "=", "get_freq_tuples", "(", "score", ".", "undersegmented", ",", "PRINT_TOTAL", ")", "d_under_shapes", "=", "get_freq_tuples", "(", "[", "word_shape", "(", "x", ")", "for", "x", "in", "score", ".", "undersegmented", "]", ",", "PRINT_TOTAL", ")", "d_over_words", "=", "get_freq_tuples", "(", "score", ".", "oversegmented", ",", "PRINT_TOTAL", ")", "d_over_shapes", "=", "get_freq_tuples", "(", "[", "word_shape", "(", "x", ")", "for", "x", "in", "score", ".", "oversegmented", "]", ",", "PRINT_TOTAL", ")", "# saving to CSV with ; seperator so blinding ; in the example output", "print_string_1", ".", "append", "(", "str", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d_under_words", "if", "v", ">", "PRINT_FREQ", "}", ")", ".", "replace", "(", "\";\"", ",", "\"*SEMICOLON*\"", ")", ")", "print_string_1", ".", "append", "(", "str", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d_under_shapes", "if", "v", ">", "PRINT_FREQ", "}", ")", ".", "replace", "(", "\";\"", ",", "\"*SEMICOLON*\"", ")", ")", "print_string_1", ".", "append", "(", "str", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d_over_words", "if", "v", ">", "PRINT_FREQ", "}", ")", ".", "replace", "(", "\";\"", ",", "\"*SEMICOLON*\"", ")", ")", "print_string_1", ".", "append", "(", "str", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d_over_shapes", "if", "v", ">", "PRINT_FREQ", "}", ")", ".", "replace", "(", "\";\"", ",", "\"*SEMICOLON*\"", ")", ")", "# STEP 5: print the formatted results to CSV", "if", "print_header", ":", "out_file", ".", "write", "(", "';'", ".", "join", "(", "map", "(", "str", ",", "print_header_1", ")", ")", "+", "'\\n'", ")", "out_file", ".", "write", "(", "';'", ".", "join", "(", "map", "(", "str", ",", "print_string_1", ")", ")", "+", "'\\n'", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
run_all_evals
Run an evaluation for each language with its specified models and treebanks
bin/ud/run_eval.py
def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks): """" Run an evaluation for each language with its specified models and treebanks """ print_header = True for tb_lang, treebank_list in treebanks.items(): print() print("Language", tb_lang) for text_path in treebank_list: print(" Evaluating on", text_path) gold_path = text_path.parent / (text_path.stem + '.conllu') print(" Gold data from ", gold_path) # nested try blocks to ensure the code can continue with the next iteration after a failure try: with gold_path.open(mode='r', encoding='utf-8') as gold_file: gold_ud = conll17_ud_eval.load_conllu(gold_file) for nlp, nlp_loading_time, nlp_name in models[tb_lang]: try: print(" Benchmarking", nlp_name) tmp_output_path = text_path.parent / str('tmp_' + nlp_name + '.conllu') run_single_eval(nlp, nlp_loading_time, nlp_name, text_path, gold_ud, tmp_output_path, out_file, print_header, check_parse, print_freq_tasks) print_header = False except Exception as e: print(" Ran into trouble: ", str(e)) except Exception as e: print(" Ran into trouble: ", str(e))
def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks): """" Run an evaluation for each language with its specified models and treebanks """ print_header = True for tb_lang, treebank_list in treebanks.items(): print() print("Language", tb_lang) for text_path in treebank_list: print(" Evaluating on", text_path) gold_path = text_path.parent / (text_path.stem + '.conllu') print(" Gold data from ", gold_path) # nested try blocks to ensure the code can continue with the next iteration after a failure try: with gold_path.open(mode='r', encoding='utf-8') as gold_file: gold_ud = conll17_ud_eval.load_conllu(gold_file) for nlp, nlp_loading_time, nlp_name in models[tb_lang]: try: print(" Benchmarking", nlp_name) tmp_output_path = text_path.parent / str('tmp_' + nlp_name + '.conllu') run_single_eval(nlp, nlp_loading_time, nlp_name, text_path, gold_ud, tmp_output_path, out_file, print_header, check_parse, print_freq_tasks) print_header = False except Exception as e: print(" Ran into trouble: ", str(e)) except Exception as e: print(" Ran into trouble: ", str(e))
[ "Run", "an", "evaluation", "for", "each", "language", "with", "its", "specified", "models", "and", "treebanks" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L184-L212
[ "def", "run_all_evals", "(", "models", ",", "treebanks", ",", "out_file", ",", "check_parse", ",", "print_freq_tasks", ")", ":", "print_header", "=", "True", "for", "tb_lang", ",", "treebank_list", "in", "treebanks", ".", "items", "(", ")", ":", "print", "(", ")", "print", "(", "\"Language\"", ",", "tb_lang", ")", "for", "text_path", "in", "treebank_list", ":", "print", "(", "\" Evaluating on\"", ",", "text_path", ")", "gold_path", "=", "text_path", ".", "parent", "/", "(", "text_path", ".", "stem", "+", "'.conllu'", ")", "print", "(", "\" Gold data from \"", ",", "gold_path", ")", "# nested try blocks to ensure the code can continue with the next iteration after a failure", "try", ":", "with", "gold_path", ".", "open", "(", "mode", "=", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "gold_file", ":", "gold_ud", "=", "conll17_ud_eval", ".", "load_conllu", "(", "gold_file", ")", "for", "nlp", ",", "nlp_loading_time", ",", "nlp_name", "in", "models", "[", "tb_lang", "]", ":", "try", ":", "print", "(", "\" Benchmarking\"", ",", "nlp_name", ")", "tmp_output_path", "=", "text_path", ".", "parent", "/", "str", "(", "'tmp_'", "+", "nlp_name", "+", "'.conllu'", ")", "run_single_eval", "(", "nlp", ",", "nlp_loading_time", ",", "nlp_name", ",", "text_path", ",", "gold_ud", ",", "tmp_output_path", ",", "out_file", ",", "print_header", ",", "check_parse", ",", "print_freq_tasks", ")", "print_header", "=", "False", "except", "Exception", "as", "e", ":", "print", "(", "\" Ran into trouble: \"", ",", "str", "(", "e", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "\" Ran into trouble: \"", ",", "str", "(", "e", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
main
Assemble all treebanks and models to run evaluations with. When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality
bin/ud/run_eval.py
def main(out_path, ud_dir, check_parse=False, langs=ALL_LANGUAGES, exclude_trained_models=False, exclude_multi=False, hide_freq=False, corpus='train', best_per_language=False): """" Assemble all treebanks and models to run evaluations with. When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality """ languages = [lang.strip() for lang in langs.split(",")] print_freq_tasks = [] if not hide_freq: print_freq_tasks = ['Tokens'] # fetching all relevant treebank from the directory treebanks = fetch_all_treebanks(ud_dir, languages, corpus, best_per_language) print() print("Loading all relevant models for", languages) models = dict() # multi-lang model multi = None if not exclude_multi and not check_parse: multi = load_model('xx_ent_wiki_sm', add_sentencizer=True) # initialize all models with the multi-lang model for lang in languages: models[lang] = [multi] if multi else [] # add default models if we don't want to evaluate parsing info if not check_parse: # Norwegian is 'nb' in spaCy but 'no' in the UD corpora if lang == 'no': models['no'].append(load_default_model_sentencizer('nb')) else: models[lang].append(load_default_model_sentencizer(lang)) # language-specific trained models if not exclude_trained_models: if 'de' in models: models['de'].append(load_model('de_core_news_sm')) if 'es' in models: models['es'].append(load_model('es_core_news_sm')) models['es'].append(load_model('es_core_news_md')) if 'pt' in models: models['pt'].append(load_model('pt_core_news_sm')) if 'it' in models: models['it'].append(load_model('it_core_news_sm')) if 'nl' in models: models['nl'].append(load_model('nl_core_news_sm')) if 'en' in models: models['en'].append(load_model('en_core_web_sm')) models['en'].append(load_model('en_core_web_md')) models['en'].append(load_model('en_core_web_lg')) if 'fr' in models: models['fr'].append(load_model('fr_core_news_sm')) models['fr'].append(load_model('fr_core_news_md')) with out_path.open(mode='w', encoding='utf-8') as out_file: run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks)
def main(out_path, ud_dir, check_parse=False, langs=ALL_LANGUAGES, exclude_trained_models=False, exclude_multi=False, hide_freq=False, corpus='train', best_per_language=False): """" Assemble all treebanks and models to run evaluations with. When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality """ languages = [lang.strip() for lang in langs.split(",")] print_freq_tasks = [] if not hide_freq: print_freq_tasks = ['Tokens'] # fetching all relevant treebank from the directory treebanks = fetch_all_treebanks(ud_dir, languages, corpus, best_per_language) print() print("Loading all relevant models for", languages) models = dict() # multi-lang model multi = None if not exclude_multi and not check_parse: multi = load_model('xx_ent_wiki_sm', add_sentencizer=True) # initialize all models with the multi-lang model for lang in languages: models[lang] = [multi] if multi else [] # add default models if we don't want to evaluate parsing info if not check_parse: # Norwegian is 'nb' in spaCy but 'no' in the UD corpora if lang == 'no': models['no'].append(load_default_model_sentencizer('nb')) else: models[lang].append(load_default_model_sentencizer(lang)) # language-specific trained models if not exclude_trained_models: if 'de' in models: models['de'].append(load_model('de_core_news_sm')) if 'es' in models: models['es'].append(load_model('es_core_news_sm')) models['es'].append(load_model('es_core_news_md')) if 'pt' in models: models['pt'].append(load_model('pt_core_news_sm')) if 'it' in models: models['it'].append(load_model('it_core_news_sm')) if 'nl' in models: models['nl'].append(load_model('nl_core_news_sm')) if 'en' in models: models['en'].append(load_model('en_core_web_sm')) models['en'].append(load_model('en_core_web_md')) models['en'].append(load_model('en_core_web_lg')) if 'fr' in models: models['fr'].append(load_model('fr_core_news_sm')) models['fr'].append(load_model('fr_core_news_md')) with out_path.open(mode='w', encoding='utf-8') as out_file: run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks)
[ "Assemble", "all", "treebanks", "and", "models", "to", "run", "evaluations", "with", ".", "When", "setting", "check_parse", "to", "True", "the", "default", "models", "will", "not", "be", "evaluated", "as", "they", "don", "t", "have", "parsing", "functionality" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/bin/ud/run_eval.py#L226-L283
[ "def", "main", "(", "out_path", ",", "ud_dir", ",", "check_parse", "=", "False", ",", "langs", "=", "ALL_LANGUAGES", ",", "exclude_trained_models", "=", "False", ",", "exclude_multi", "=", "False", ",", "hide_freq", "=", "False", ",", "corpus", "=", "'train'", ",", "best_per_language", "=", "False", ")", ":", "languages", "=", "[", "lang", ".", "strip", "(", ")", "for", "lang", "in", "langs", ".", "split", "(", "\",\"", ")", "]", "print_freq_tasks", "=", "[", "]", "if", "not", "hide_freq", ":", "print_freq_tasks", "=", "[", "'Tokens'", "]", "# fetching all relevant treebank from the directory", "treebanks", "=", "fetch_all_treebanks", "(", "ud_dir", ",", "languages", ",", "corpus", ",", "best_per_language", ")", "print", "(", ")", "print", "(", "\"Loading all relevant models for\"", ",", "languages", ")", "models", "=", "dict", "(", ")", "# multi-lang model", "multi", "=", "None", "if", "not", "exclude_multi", "and", "not", "check_parse", ":", "multi", "=", "load_model", "(", "'xx_ent_wiki_sm'", ",", "add_sentencizer", "=", "True", ")", "# initialize all models with the multi-lang model", "for", "lang", "in", "languages", ":", "models", "[", "lang", "]", "=", "[", "multi", "]", "if", "multi", "else", "[", "]", "# add default models if we don't want to evaluate parsing info", "if", "not", "check_parse", ":", "# Norwegian is 'nb' in spaCy but 'no' in the UD corpora", "if", "lang", "==", "'no'", ":", "models", "[", "'no'", "]", ".", "append", "(", "load_default_model_sentencizer", "(", "'nb'", ")", ")", "else", ":", "models", "[", "lang", "]", ".", "append", "(", "load_default_model_sentencizer", "(", "lang", ")", ")", "# language-specific trained models", "if", "not", "exclude_trained_models", ":", "if", "'de'", "in", "models", ":", "models", "[", "'de'", "]", ".", "append", "(", "load_model", "(", "'de_core_news_sm'", ")", ")", "if", "'es'", "in", "models", ":", "models", "[", "'es'", "]", ".", "append", "(", "load_model", "(", "'es_core_news_sm'", ")", ")", "models", "[", "'es'", "]", ".", "append", "(", "load_model", "(", "'es_core_news_md'", ")", ")", "if", "'pt'", "in", "models", ":", "models", "[", "'pt'", "]", ".", "append", "(", "load_model", "(", "'pt_core_news_sm'", ")", ")", "if", "'it'", "in", "models", ":", "models", "[", "'it'", "]", ".", "append", "(", "load_model", "(", "'it_core_news_sm'", ")", ")", "if", "'nl'", "in", "models", ":", "models", "[", "'nl'", "]", ".", "append", "(", "load_model", "(", "'nl_core_news_sm'", ")", ")", "if", "'en'", "in", "models", ":", "models", "[", "'en'", "]", ".", "append", "(", "load_model", "(", "'en_core_web_sm'", ")", ")", "models", "[", "'en'", "]", ".", "append", "(", "load_model", "(", "'en_core_web_md'", ")", ")", "models", "[", "'en'", "]", ".", "append", "(", "load_model", "(", "'en_core_web_lg'", ")", ")", "if", "'fr'", "in", "models", ":", "models", "[", "'fr'", "]", ".", "append", "(", "load_model", "(", "'fr_core_news_sm'", ")", ")", "models", "[", "'fr'", "]", ".", "append", "(", "load_model", "(", "'fr_core_news_md'", ")", ")", "with", "out_path", ".", "open", "(", "mode", "=", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "out_file", ":", "run_all_evals", "(", "models", ",", "treebanks", ",", "out_file", ",", "check_parse", ",", "print_freq_tasks", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
noun_chunks
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
spacy/lang/de/syntax_iterators.py
def noun_chunks(obj): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". labels = [ "sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app", ] doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 yield word.left_edge.i, rbracket, np_label
def noun_chunks(obj): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". labels = [ "sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app", ] doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 yield word.left_edge.i, rbracket, np_label
[ "Detect", "base", "noun", "phrases", "from", "a", "dependency", "parse", ".", "Works", "on", "both", "Doc", "and", "Span", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/de/syntax_iterators.py#L7-L46
[ "def", "noun_chunks", "(", "obj", ")", ":", "# this iterator extracts spans headed by NOUNs starting from the left-most", "# syntactic dependent until the NOUN itself for close apposition and", "# measurement construction, the span is sometimes extended to the right of", "# the NOUN. Example: \"eine Tasse Tee\" (a cup (of) tea) returns \"eine Tasse Tee\"", "# and not just \"eine Tasse\", same for \"das Thema Familie\".", "labels", "=", "[", "\"sb\"", ",", "\"oa\"", ",", "\"da\"", ",", "\"nk\"", ",", "\"mo\"", ",", "\"ag\"", ",", "\"ROOT\"", ",", "\"root\"", ",", "\"cj\"", ",", "\"pd\"", ",", "\"og\"", ",", "\"app\"", ",", "]", "doc", "=", "obj", ".", "doc", "# Ensure works on both Doc and Span.", "np_label", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"NP\"", ")", "np_deps", "=", "set", "(", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "label", ")", "for", "label", "in", "labels", ")", "close_app", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"nk\"", ")", "rbracket", "=", "0", "for", "i", ",", "word", "in", "enumerate", "(", "obj", ")", ":", "if", "i", "<", "rbracket", ":", "continue", "if", "word", ".", "pos", "in", "(", "NOUN", ",", "PROPN", ",", "PRON", ")", "and", "word", ".", "dep", "in", "np_deps", ":", "rbracket", "=", "word", ".", "i", "+", "1", "# try to extend the span to the right", "# to capture close apposition/measurement constructions", "for", "rdep", "in", "doc", "[", "word", ".", "i", "]", ".", "rights", ":", "if", "rdep", ".", "pos", "in", "(", "NOUN", ",", "PROPN", ")", "and", "rdep", ".", "dep", "==", "close_app", ":", "rbracket", "=", "rdep", ".", "i", "+", "1", "yield", "word", ".", "left_edge", ".", "i", ",", "rbracket", ",", "np_label" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
with_cpu
Wrap a model that should run on CPU, transferring inputs and outputs as necessary.
spacy/_ml.py
def with_cpu(ops, model): """Wrap a model that should run on CPU, transferring inputs and outputs as necessary.""" model.to_cpu() def with_cpu_forward(inputs, drop=0.0): cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop) gpu_outputs = _to_device(ops, cpu_outputs) def with_cpu_backprop(d_outputs, sgd=None): cpu_d_outputs = _to_cpu(d_outputs) return backprop(cpu_d_outputs, sgd=sgd) return gpu_outputs, with_cpu_backprop return wrap(with_cpu_forward, model)
def with_cpu(ops, model): """Wrap a model that should run on CPU, transferring inputs and outputs as necessary.""" model.to_cpu() def with_cpu_forward(inputs, drop=0.0): cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop) gpu_outputs = _to_device(ops, cpu_outputs) def with_cpu_backprop(d_outputs, sgd=None): cpu_d_outputs = _to_cpu(d_outputs) return backprop(cpu_d_outputs, sgd=sgd) return gpu_outputs, with_cpu_backprop return wrap(with_cpu_forward, model)
[ "Wrap", "a", "model", "that", "should", "run", "on", "CPU", "transferring", "inputs", "and", "outputs", "as", "necessary", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L84-L99
[ "def", "with_cpu", "(", "ops", ",", "model", ")", ":", "model", ".", "to_cpu", "(", ")", "def", "with_cpu_forward", "(", "inputs", ",", "drop", "=", "0.0", ")", ":", "cpu_outputs", ",", "backprop", "=", "model", ".", "begin_update", "(", "_to_cpu", "(", "inputs", ")", ",", "drop", "=", "drop", ")", "gpu_outputs", "=", "_to_device", "(", "ops", ",", "cpu_outputs", ")", "def", "with_cpu_backprop", "(", "d_outputs", ",", "sgd", "=", "None", ")", ":", "cpu_d_outputs", "=", "_to_cpu", "(", "d_outputs", ")", "return", "backprop", "(", "cpu_d_outputs", ",", "sgd", "=", "sgd", ")", "return", "gpu_outputs", ",", "with_cpu_backprop", "return", "wrap", "(", "with_cpu_forward", ",", "model", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
build_simple_cnn_text_classifier
Build a simple CNN text classifier, given a token-to-vector model as inputs. If exclusive_classes=True, a softmax non-linearity is applied, so that the outputs sum to 1. If exclusive_classes=False, a logistic non-linearity is applied instead, so that outputs are in the range [0, 1].
spacy/_ml.py
def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg): """ Build a simple CNN text classifier, given a token-to-vector model as inputs. If exclusive_classes=True, a softmax non-linearity is applied, so that the outputs sum to 1. If exclusive_classes=False, a logistic non-linearity is applied instead, so that outputs are in the range [0, 1]. """ with Model.define_operators({">>": chain}): if exclusive_classes: output_layer = Softmax(nr_class, tok2vec.nO) else: output_layer = ( zero_init(Affine(nr_class, tok2vec.nO, drop_factor=0.0)) >> logistic ) model = tok2vec >> flatten_add_lengths >> Pooling(mean_pool) >> output_layer model.tok2vec = chain(tok2vec, flatten) model.nO = nr_class return model
def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg): """ Build a simple CNN text classifier, given a token-to-vector model as inputs. If exclusive_classes=True, a softmax non-linearity is applied, so that the outputs sum to 1. If exclusive_classes=False, a logistic non-linearity is applied instead, so that outputs are in the range [0, 1]. """ with Model.define_operators({">>": chain}): if exclusive_classes: output_layer = Softmax(nr_class, tok2vec.nO) else: output_layer = ( zero_init(Affine(nr_class, tok2vec.nO, drop_factor=0.0)) >> logistic ) model = tok2vec >> flatten_add_lengths >> Pooling(mean_pool) >> output_layer model.tok2vec = chain(tok2vec, flatten) model.nO = nr_class return model
[ "Build", "a", "simple", "CNN", "text", "classifier", "given", "a", "token", "-", "to", "-", "vector", "model", "as", "inputs", ".", "If", "exclusive_classes", "=", "True", "a", "softmax", "non", "-", "linearity", "is", "applied", "so", "that", "the", "outputs", "sum", "to", "1", ".", "If", "exclusive_classes", "=", "False", "a", "logistic", "non", "-", "linearity", "is", "applied", "instead", "so", "that", "outputs", "are", "in", "the", "range", "[", "0", "1", "]", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L635-L652
[ "def", "build_simple_cnn_text_classifier", "(", "tok2vec", ",", "nr_class", ",", "exclusive_classes", "=", "False", ",", "*", "*", "cfg", ")", ":", "with", "Model", ".", "define_operators", "(", "{", "\">>\"", ":", "chain", "}", ")", ":", "if", "exclusive_classes", ":", "output_layer", "=", "Softmax", "(", "nr_class", ",", "tok2vec", ".", "nO", ")", "else", ":", "output_layer", "=", "(", "zero_init", "(", "Affine", "(", "nr_class", ",", "tok2vec", ".", "nO", ",", "drop_factor", "=", "0.0", ")", ")", ">>", "logistic", ")", "model", "=", "tok2vec", ">>", "flatten_add_lengths", ">>", "Pooling", "(", "mean_pool", ")", ">>", "output_layer", "model", ".", "tok2vec", "=", "chain", "(", "tok2vec", ",", "flatten", ")", "model", ".", "nO", "=", "nr_class", "return", "model" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
concatenate_lists
Compose two or more models `f`, `g`, etc, such that their outputs are concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`
spacy/_ml.py
def concatenate_lists(*layers, **kwargs): # pragma: no cover """Compose two or more models `f`, `g`, etc, such that their outputs are concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))` """ if not layers: return noop() drop_factor = kwargs.get("drop_factor", 1.0) ops = layers[0].ops layers = [chain(layer, flatten) for layer in layers] concat = concatenate(*layers) def concatenate_lists_fwd(Xs, drop=0.0): drop *= drop_factor lengths = ops.asarray([len(X) for X in Xs], dtype="i") flat_y, bp_flat_y = concat.begin_update(Xs, drop=drop) ys = ops.unflatten(flat_y, lengths) def concatenate_lists_bwd(d_ys, sgd=None): return bp_flat_y(ops.flatten(d_ys), sgd=sgd) return ys, concatenate_lists_bwd model = wrap(concatenate_lists_fwd, concat) return model
def concatenate_lists(*layers, **kwargs): # pragma: no cover """Compose two or more models `f`, `g`, etc, such that their outputs are concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))` """ if not layers: return noop() drop_factor = kwargs.get("drop_factor", 1.0) ops = layers[0].ops layers = [chain(layer, flatten) for layer in layers] concat = concatenate(*layers) def concatenate_lists_fwd(Xs, drop=0.0): drop *= drop_factor lengths = ops.asarray([len(X) for X in Xs], dtype="i") flat_y, bp_flat_y = concat.begin_update(Xs, drop=drop) ys = ops.unflatten(flat_y, lengths) def concatenate_lists_bwd(d_ys, sgd=None): return bp_flat_y(ops.flatten(d_ys), sgd=sgd) return ys, concatenate_lists_bwd model = wrap(concatenate_lists_fwd, concat) return model
[ "Compose", "two", "or", "more", "models", "f", "g", "etc", "such", "that", "their", "outputs", "are", "concatenated", "i", ".", "e", ".", "concatenate", "(", "f", "g", ")", "(", "x", ")", "computes", "hstack", "(", "f", "(", "x", ")", "g", "(", "x", "))" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L667-L690
[ "def", "concatenate_lists", "(", "*", "layers", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "if", "not", "layers", ":", "return", "noop", "(", ")", "drop_factor", "=", "kwargs", ".", "get", "(", "\"drop_factor\"", ",", "1.0", ")", "ops", "=", "layers", "[", "0", "]", ".", "ops", "layers", "=", "[", "chain", "(", "layer", ",", "flatten", ")", "for", "layer", "in", "layers", "]", "concat", "=", "concatenate", "(", "*", "layers", ")", "def", "concatenate_lists_fwd", "(", "Xs", ",", "drop", "=", "0.0", ")", ":", "drop", "*=", "drop_factor", "lengths", "=", "ops", ".", "asarray", "(", "[", "len", "(", "X", ")", "for", "X", "in", "Xs", "]", ",", "dtype", "=", "\"i\"", ")", "flat_y", ",", "bp_flat_y", "=", "concat", ".", "begin_update", "(", "Xs", ",", "drop", "=", "drop", ")", "ys", "=", "ops", ".", "unflatten", "(", "flat_y", ",", "lengths", ")", "def", "concatenate_lists_bwd", "(", "d_ys", ",", "sgd", "=", "None", ")", ":", "return", "bp_flat_y", "(", "ops", ".", "flatten", "(", "d_ys", ")", ",", "sgd", "=", "sgd", ")", "return", "ys", ",", "concatenate_lists_bwd", "model", "=", "wrap", "(", "concatenate_lists_fwd", ",", "concat", ")", "return", "model" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
masked_language_model
Convert a model into a BERT-style masked language model
spacy/_ml.py
def masked_language_model(vocab, model, mask_prob=0.15): """Convert a model into a BERT-style masked language model""" random_words = _RandomWords(vocab) def mlm_forward(docs, drop=0.0): mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob) mask = model.ops.asarray(mask).reshape((mask.shape[0], 1)) output, backprop = model.begin_update(docs, drop=drop) def mlm_backward(d_output, sgd=None): d_output *= 1 - mask return backprop(d_output, sgd=sgd) return output, mlm_backward return wrap(mlm_forward, model)
def masked_language_model(vocab, model, mask_prob=0.15): """Convert a model into a BERT-style masked language model""" random_words = _RandomWords(vocab) def mlm_forward(docs, drop=0.0): mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob) mask = model.ops.asarray(mask).reshape((mask.shape[0], 1)) output, backprop = model.begin_update(docs, drop=drop) def mlm_backward(d_output, sgd=None): d_output *= 1 - mask return backprop(d_output, sgd=sgd) return output, mlm_backward return wrap(mlm_forward, model)
[ "Convert", "a", "model", "into", "a", "BERT", "-", "style", "masked", "language", "model" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L693-L709
[ "def", "masked_language_model", "(", "vocab", ",", "model", ",", "mask_prob", "=", "0.15", ")", ":", "random_words", "=", "_RandomWords", "(", "vocab", ")", "def", "mlm_forward", "(", "docs", ",", "drop", "=", "0.0", ")", ":", "mask", ",", "docs", "=", "_apply_mask", "(", "docs", ",", "random_words", ",", "mask_prob", "=", "mask_prob", ")", "mask", "=", "model", ".", "ops", ".", "asarray", "(", "mask", ")", ".", "reshape", "(", "(", "mask", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "output", ",", "backprop", "=", "model", ".", "begin_update", "(", "docs", ",", "drop", "=", "drop", ")", "def", "mlm_backward", "(", "d_output", ",", "sgd", "=", "None", ")", ":", "d_output", "*=", "1", "-", "mask", "return", "backprop", "(", "d_output", ",", "sgd", "=", "sgd", ")", "return", "output", ",", "mlm_backward", "return", "wrap", "(", "mlm_forward", ",", "model", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
SimilarityHook.begin_training
Allocate model, using width from tensorizer in pipeline. gold_tuples (iterable): Gold-standard training data. pipeline (list): The pipeline the model is part of.
spacy/pipeline/hooks.py
def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs): """Allocate model, using width from tensorizer in pipeline. gold_tuples (iterable): Gold-standard training data. pipeline (list): The pipeline the model is part of. """ if self.model is True: self.model = self.Model(pipeline[0].model.nO) link_vectors_to_models(self.vocab) if sgd is None: sgd = self.create_optimizer() return sgd
def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs): """Allocate model, using width from tensorizer in pipeline. gold_tuples (iterable): Gold-standard training data. pipeline (list): The pipeline the model is part of. """ if self.model is True: self.model = self.Model(pipeline[0].model.nO) link_vectors_to_models(self.vocab) if sgd is None: sgd = self.create_optimizer() return sgd
[ "Allocate", "model", "using", "width", "from", "tensorizer", "in", "pipeline", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/hooks.py#L89-L100
[ "def", "begin_training", "(", "self", ",", "_", "=", "tuple", "(", ")", ",", "pipeline", "=", "None", ",", "sgd", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "model", "is", "True", ":", "self", ".", "model", "=", "self", ".", "Model", "(", "pipeline", "[", "0", "]", ".", "model", ".", "nO", ")", "link_vectors_to_models", "(", "self", ".", "vocab", ")", "if", "sgd", "is", "None", ":", "sgd", "=", "self", ".", "create_optimizer", "(", ")", "return", "sgd" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.render
Render complete markup. parsed (list): Dependency parses to render. page (bool): Render parses wrapped as full HTML page. minify (bool): Minify HTML markup. RETURNS (unicode): Rendered SVG or HTML markup.
spacy/displacy/render.py
def render(self, parsed, page=False, minify=False): """Render complete markup. parsed (list): Dependency parses to render. page (bool): Render parses wrapped as full HTML page. minify (bool): Minify HTML markup. RETURNS (unicode): Rendered SVG or HTML markup. """ # Create a random ID prefix to make sure parses don't receive the # same ID, even if they're identical id_prefix = uuid.uuid4().hex rendered = [] for i, p in enumerate(parsed): if i == 0: settings = p.get("settings", {}) self.direction = settings.get("direction", DEFAULT_DIR) self.lang = settings.get("lang", DEFAULT_LANG) render_id = "{}-{}".format(id_prefix, i) svg = self.render_svg(render_id, p["words"], p["arcs"]) rendered.append(svg) if page: content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered]) markup = TPL_PAGE.format( content=content, lang=self.lang, dir=self.direction ) else: markup = "".join(rendered) if minify: return minify_html(markup) return markup
def render(self, parsed, page=False, minify=False): """Render complete markup. parsed (list): Dependency parses to render. page (bool): Render parses wrapped as full HTML page. minify (bool): Minify HTML markup. RETURNS (unicode): Rendered SVG or HTML markup. """ # Create a random ID prefix to make sure parses don't receive the # same ID, even if they're identical id_prefix = uuid.uuid4().hex rendered = [] for i, p in enumerate(parsed): if i == 0: settings = p.get("settings", {}) self.direction = settings.get("direction", DEFAULT_DIR) self.lang = settings.get("lang", DEFAULT_LANG) render_id = "{}-{}".format(id_prefix, i) svg = self.render_svg(render_id, p["words"], p["arcs"]) rendered.append(svg) if page: content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered]) markup = TPL_PAGE.format( content=content, lang=self.lang, dir=self.direction ) else: markup = "".join(rendered) if minify: return minify_html(markup) return markup
[ "Render", "complete", "markup", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L39-L68
[ "def", "render", "(", "self", ",", "parsed", ",", "page", "=", "False", ",", "minify", "=", "False", ")", ":", "# Create a random ID prefix to make sure parses don't receive the", "# same ID, even if they're identical", "id_prefix", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "rendered", "=", "[", "]", "for", "i", ",", "p", "in", "enumerate", "(", "parsed", ")", ":", "if", "i", "==", "0", ":", "settings", "=", "p", ".", "get", "(", "\"settings\"", ",", "{", "}", ")", "self", ".", "direction", "=", "settings", ".", "get", "(", "\"direction\"", ",", "DEFAULT_DIR", ")", "self", ".", "lang", "=", "settings", ".", "get", "(", "\"lang\"", ",", "DEFAULT_LANG", ")", "render_id", "=", "\"{}-{}\"", ".", "format", "(", "id_prefix", ",", "i", ")", "svg", "=", "self", ".", "render_svg", "(", "render_id", ",", "p", "[", "\"words\"", "]", ",", "p", "[", "\"arcs\"", "]", ")", "rendered", ".", "append", "(", "svg", ")", "if", "page", ":", "content", "=", "\"\"", ".", "join", "(", "[", "TPL_FIGURE", ".", "format", "(", "content", "=", "svg", ")", "for", "svg", "in", "rendered", "]", ")", "markup", "=", "TPL_PAGE", ".", "format", "(", "content", "=", "content", ",", "lang", "=", "self", ".", "lang", ",", "dir", "=", "self", ".", "direction", ")", "else", ":", "markup", "=", "\"\"", ".", "join", "(", "rendered", ")", "if", "minify", ":", "return", "minify_html", "(", "markup", ")", "return", "markup" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.render_svg
Render SVG. render_id (int): Unique ID, typically index of document. words (list): Individual words and their tags. arcs (list): Individual arcs and their start, end, direction and label. RETURNS (unicode): Rendered SVG markup.
spacy/displacy/render.py
def render_svg(self, render_id, words, arcs): """Render SVG. render_id (int): Unique ID, typically index of document. words (list): Individual words and their tags. arcs (list): Individual arcs and their start, end, direction and label. RETURNS (unicode): Rendered SVG markup. """ self.levels = self.get_levels(arcs) self.highest_level = len(self.levels) self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke self.width = self.offset_x + len(words) * self.distance self.height = self.offset_y + 3 * self.word_spacing self.id = render_id words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)] arcs = [ self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i) for i, a in enumerate(arcs) ] content = "".join(words) + "".join(arcs) return TPL_DEP_SVG.format( id=self.id, width=self.width, height=self.height, color=self.color, bg=self.bg, font=self.font, content=content, dir=self.direction, lang=self.lang, )
def render_svg(self, render_id, words, arcs): """Render SVG. render_id (int): Unique ID, typically index of document. words (list): Individual words and their tags. arcs (list): Individual arcs and their start, end, direction and label. RETURNS (unicode): Rendered SVG markup. """ self.levels = self.get_levels(arcs) self.highest_level = len(self.levels) self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke self.width = self.offset_x + len(words) * self.distance self.height = self.offset_y + 3 * self.word_spacing self.id = render_id words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)] arcs = [ self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i) for i, a in enumerate(arcs) ] content = "".join(words) + "".join(arcs) return TPL_DEP_SVG.format( id=self.id, width=self.width, height=self.height, color=self.color, bg=self.bg, font=self.font, content=content, dir=self.direction, lang=self.lang, )
[ "Render", "SVG", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L70-L100
[ "def", "render_svg", "(", "self", ",", "render_id", ",", "words", ",", "arcs", ")", ":", "self", ".", "levels", "=", "self", ".", "get_levels", "(", "arcs", ")", "self", ".", "highest_level", "=", "len", "(", "self", ".", "levels", ")", "self", ".", "offset_y", "=", "self", ".", "distance", "/", "2", "*", "self", ".", "highest_level", "+", "self", ".", "arrow_stroke", "self", ".", "width", "=", "self", ".", "offset_x", "+", "len", "(", "words", ")", "*", "self", ".", "distance", "self", ".", "height", "=", "self", ".", "offset_y", "+", "3", "*", "self", ".", "word_spacing", "self", ".", "id", "=", "render_id", "words", "=", "[", "self", ".", "render_word", "(", "w", "[", "\"text\"", "]", ",", "w", "[", "\"tag\"", "]", ",", "i", ")", "for", "i", ",", "w", "in", "enumerate", "(", "words", ")", "]", "arcs", "=", "[", "self", ".", "render_arrow", "(", "a", "[", "\"label\"", "]", ",", "a", "[", "\"start\"", "]", ",", "a", "[", "\"end\"", "]", ",", "a", "[", "\"dir\"", "]", ",", "i", ")", "for", "i", ",", "a", "in", "enumerate", "(", "arcs", ")", "]", "content", "=", "\"\"", ".", "join", "(", "words", ")", "+", "\"\"", ".", "join", "(", "arcs", ")", "return", "TPL_DEP_SVG", ".", "format", "(", "id", "=", "self", ".", "id", ",", "width", "=", "self", ".", "width", ",", "height", "=", "self", ".", "height", ",", "color", "=", "self", ".", "color", ",", "bg", "=", "self", ".", "bg", ",", "font", "=", "self", ".", "font", ",", "content", "=", "content", ",", "dir", "=", "self", ".", "direction", ",", "lang", "=", "self", ".", "lang", ",", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.render_word
Render individual word. text (unicode): Word text. tag (unicode): Part-of-speech tag. i (int): Unique ID, typically word index. RETURNS (unicode): Rendered SVG markup.
spacy/displacy/render.py
def render_word(self, text, tag, i): """Render individual word. text (unicode): Word text. tag (unicode): Part-of-speech tag. i (int): Unique ID, typically word index. RETURNS (unicode): Rendered SVG markup. """ y = self.offset_y + self.word_spacing x = self.offset_x + i * self.distance if self.direction == "rtl": x = self.width - x html_text = escape_html(text) return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
def render_word(self, text, tag, i): """Render individual word. text (unicode): Word text. tag (unicode): Part-of-speech tag. i (int): Unique ID, typically word index. RETURNS (unicode): Rendered SVG markup. """ y = self.offset_y + self.word_spacing x = self.offset_x + i * self.distance if self.direction == "rtl": x = self.width - x html_text = escape_html(text) return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
[ "Render", "individual", "word", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L102-L115
[ "def", "render_word", "(", "self", ",", "text", ",", "tag", ",", "i", ")", ":", "y", "=", "self", ".", "offset_y", "+", "self", ".", "word_spacing", "x", "=", "self", ".", "offset_x", "+", "i", "*", "self", ".", "distance", "if", "self", ".", "direction", "==", "\"rtl\"", ":", "x", "=", "self", ".", "width", "-", "x", "html_text", "=", "escape_html", "(", "text", ")", "return", "TPL_DEP_WORDS", ".", "format", "(", "text", "=", "html_text", ",", "tag", "=", "tag", ",", "x", "=", "x", ",", "y", "=", "y", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.render_arrow
Render individual arrow. label (unicode): Dependency label. start (int): Index of start word. end (int): Index of end word. direction (unicode): Arrow direction, 'left' or 'right'. i (int): Unique ID, typically arrow index. RETURNS (unicode): Rendered SVG markup.
spacy/displacy/render.py
def render_arrow(self, label, start, end, direction, i): """Render individual arrow. label (unicode): Dependency label. start (int): Index of start word. end (int): Index of end word. direction (unicode): Arrow direction, 'left' or 'right'. i (int): Unique ID, typically arrow index. RETURNS (unicode): Rendered SVG markup. """ level = self.levels.index(end - start) + 1 x_start = self.offset_x + start * self.distance + self.arrow_spacing if self.direction == "rtl": x_start = self.width - x_start y = self.offset_y x_end = ( self.offset_x + (end - start) * self.distance + start * self.distance - self.arrow_spacing * (self.highest_level - level) / 4 ) if self.direction == "rtl": x_end = self.width - x_end y_curve = self.offset_y - level * self.distance / 2 if self.compact: y_curve = self.offset_y - level * self.distance / 6 if y_curve == 0 and len(self.levels) > 5: y_curve = -self.distance arrowhead = self.get_arrowhead(direction, x_start, y, x_end) arc = self.get_arc(x_start, y, y_curve, x_end) label_side = "right" if self.direction == "rtl" else "left" return TPL_DEP_ARCS.format( id=self.id, i=i, stroke=self.arrow_stroke, head=arrowhead, label=label, label_side=label_side, arc=arc, )
def render_arrow(self, label, start, end, direction, i): """Render individual arrow. label (unicode): Dependency label. start (int): Index of start word. end (int): Index of end word. direction (unicode): Arrow direction, 'left' or 'right'. i (int): Unique ID, typically arrow index. RETURNS (unicode): Rendered SVG markup. """ level = self.levels.index(end - start) + 1 x_start = self.offset_x + start * self.distance + self.arrow_spacing if self.direction == "rtl": x_start = self.width - x_start y = self.offset_y x_end = ( self.offset_x + (end - start) * self.distance + start * self.distance - self.arrow_spacing * (self.highest_level - level) / 4 ) if self.direction == "rtl": x_end = self.width - x_end y_curve = self.offset_y - level * self.distance / 2 if self.compact: y_curve = self.offset_y - level * self.distance / 6 if y_curve == 0 and len(self.levels) > 5: y_curve = -self.distance arrowhead = self.get_arrowhead(direction, x_start, y, x_end) arc = self.get_arc(x_start, y, y_curve, x_end) label_side = "right" if self.direction == "rtl" else "left" return TPL_DEP_ARCS.format( id=self.id, i=i, stroke=self.arrow_stroke, head=arrowhead, label=label, label_side=label_side, arc=arc, )
[ "Render", "individual", "arrow", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L117-L156
[ "def", "render_arrow", "(", "self", ",", "label", ",", "start", ",", "end", ",", "direction", ",", "i", ")", ":", "level", "=", "self", ".", "levels", ".", "index", "(", "end", "-", "start", ")", "+", "1", "x_start", "=", "self", ".", "offset_x", "+", "start", "*", "self", ".", "distance", "+", "self", ".", "arrow_spacing", "if", "self", ".", "direction", "==", "\"rtl\"", ":", "x_start", "=", "self", ".", "width", "-", "x_start", "y", "=", "self", ".", "offset_y", "x_end", "=", "(", "self", ".", "offset_x", "+", "(", "end", "-", "start", ")", "*", "self", ".", "distance", "+", "start", "*", "self", ".", "distance", "-", "self", ".", "arrow_spacing", "*", "(", "self", ".", "highest_level", "-", "level", ")", "/", "4", ")", "if", "self", ".", "direction", "==", "\"rtl\"", ":", "x_end", "=", "self", ".", "width", "-", "x_end", "y_curve", "=", "self", ".", "offset_y", "-", "level", "*", "self", ".", "distance", "/", "2", "if", "self", ".", "compact", ":", "y_curve", "=", "self", ".", "offset_y", "-", "level", "*", "self", ".", "distance", "/", "6", "if", "y_curve", "==", "0", "and", "len", "(", "self", ".", "levels", ")", ">", "5", ":", "y_curve", "=", "-", "self", ".", "distance", "arrowhead", "=", "self", ".", "get_arrowhead", "(", "direction", ",", "x_start", ",", "y", ",", "x_end", ")", "arc", "=", "self", ".", "get_arc", "(", "x_start", ",", "y", ",", "y_curve", ",", "x_end", ")", "label_side", "=", "\"right\"", "if", "self", ".", "direction", "==", "\"rtl\"", "else", "\"left\"", "return", "TPL_DEP_ARCS", ".", "format", "(", "id", "=", "self", ".", "id", ",", "i", "=", "i", ",", "stroke", "=", "self", ".", "arrow_stroke", ",", "head", "=", "arrowhead", ",", "label", "=", "label", ",", "label_side", "=", "label_side", ",", "arc", "=", "arc", ",", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.get_arc
Render individual arc. x_start (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. y_curve (int): Y-corrdinate of Cubic Bézier y_curve point. x_end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arc path ('d' attribute).
spacy/displacy/render.py
def get_arc(self, x_start, y, y_curve, x_end): """Render individual arc. x_start (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. y_curve (int): Y-corrdinate of Cubic Bézier y_curve point. x_end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arc path ('d' attribute). """ template = "M{x},{y} C{x},{c} {e},{c} {e},{y}" if self.compact: template = "M{x},{y} {x},{c} {e},{c} {e},{y}" return template.format(x=x_start, y=y, c=y_curve, e=x_end)
def get_arc(self, x_start, y, y_curve, x_end): """Render individual arc. x_start (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. y_curve (int): Y-corrdinate of Cubic Bézier y_curve point. x_end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arc path ('d' attribute). """ template = "M{x},{y} C{x},{c} {e},{c} {e},{y}" if self.compact: template = "M{x},{y} {x},{c} {e},{c} {e},{y}" return template.format(x=x_start, y=y, c=y_curve, e=x_end)
[ "Render", "individual", "arc", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L158-L170
[ "def", "get_arc", "(", "self", ",", "x_start", ",", "y", ",", "y_curve", ",", "x_end", ")", ":", "template", "=", "\"M{x},{y} C{x},{c} {e},{c} {e},{y}\"", "if", "self", ".", "compact", ":", "template", "=", "\"M{x},{y} {x},{c} {e},{c} {e},{y}\"", "return", "template", ".", "format", "(", "x", "=", "x_start", ",", "y", "=", "y", ",", "c", "=", "y_curve", ",", "e", "=", "x_end", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.get_arrowhead
Render individual arrow head. direction (unicode): Arrow direction, 'left' or 'right'. x (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arrow head path ('d' attribute).
spacy/displacy/render.py
def get_arrowhead(self, direction, x, y, end): """Render individual arrow head. direction (unicode): Arrow direction, 'left' or 'right'. x (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arrow head path ('d' attribute). """ if direction == "left": pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2) else: pos1, pos2, pos3 = ( end, end + self.arrow_width - 2, end - self.arrow_width + 2, ) arrowhead = ( pos1, y + 2, pos2, y - self.arrow_width, pos3, y - self.arrow_width, ) return "M{},{} L{},{} {},{}".format(*arrowhead)
def get_arrowhead(self, direction, x, y, end): """Render individual arrow head. direction (unicode): Arrow direction, 'left' or 'right'. x (int): X-coordinate of arrow start point. y (int): Y-coordinate of arrow start and end point. end (int): X-coordinate of arrow end point. RETURNS (unicode): Definition of the arrow head path ('d' attribute). """ if direction == "left": pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2) else: pos1, pos2, pos3 = ( end, end + self.arrow_width - 2, end - self.arrow_width + 2, ) arrowhead = ( pos1, y + 2, pos2, y - self.arrow_width, pos3, y - self.arrow_width, ) return "M{},{} L{},{} {},{}".format(*arrowhead)
[ "Render", "individual", "arrow", "head", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L172-L197
[ "def", "get_arrowhead", "(", "self", ",", "direction", ",", "x", ",", "y", ",", "end", ")", ":", "if", "direction", "==", "\"left\"", ":", "pos1", ",", "pos2", ",", "pos3", "=", "(", "x", ",", "x", "-", "self", ".", "arrow_width", "+", "2", ",", "x", "+", "self", ".", "arrow_width", "-", "2", ")", "else", ":", "pos1", ",", "pos2", ",", "pos3", "=", "(", "end", ",", "end", "+", "self", ".", "arrow_width", "-", "2", ",", "end", "-", "self", ".", "arrow_width", "+", "2", ",", ")", "arrowhead", "=", "(", "pos1", ",", "y", "+", "2", ",", "pos2", ",", "y", "-", "self", ".", "arrow_width", ",", "pos3", ",", "y", "-", "self", ".", "arrow_width", ",", ")", "return", "\"M{},{} L{},{} {},{}\"", ".", "format", "(", "*", "arrowhead", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
DependencyRenderer.get_levels
Calculate available arc height "levels". Used to calculate arrow heights dynamically and without wasting space. args (list): Individual arcs and their start, end, direction and label. RETURNS (list): Arc levels sorted from lowest to highest.
spacy/displacy/render.py
def get_levels(self, arcs): """Calculate available arc height "levels". Used to calculate arrow heights dynamically and without wasting space. args (list): Individual arcs and their start, end, direction and label. RETURNS (list): Arc levels sorted from lowest to highest. """ levels = set(map(lambda arc: arc["end"] - arc["start"], arcs)) return sorted(list(levels))
def get_levels(self, arcs): """Calculate available arc height "levels". Used to calculate arrow heights dynamically and without wasting space. args (list): Individual arcs and their start, end, direction and label. RETURNS (list): Arc levels sorted from lowest to highest. """ levels = set(map(lambda arc: arc["end"] - arc["start"], arcs)) return sorted(list(levels))
[ "Calculate", "available", "arc", "height", "levels", ".", "Used", "to", "calculate", "arrow", "heights", "dynamically", "and", "without", "wasting", "space", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L199-L207
[ "def", "get_levels", "(", "self", ",", "arcs", ")", ":", "levels", "=", "set", "(", "map", "(", "lambda", "arc", ":", "arc", "[", "\"end\"", "]", "-", "arc", "[", "\"start\"", "]", ",", "arcs", ")", ")", "return", "sorted", "(", "list", "(", "levels", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRenderer.render
Render complete markup. parsed (list): Dependency parses to render. page (bool): Render parses wrapped as full HTML page. minify (bool): Minify HTML markup. RETURNS (unicode): Rendered HTML markup.
spacy/displacy/render.py
def render(self, parsed, page=False, minify=False): """Render complete markup. parsed (list): Dependency parses to render. page (bool): Render parses wrapped as full HTML page. minify (bool): Minify HTML markup. RETURNS (unicode): Rendered HTML markup. """ rendered = [] for i, p in enumerate(parsed): if i == 0: settings = p.get("settings", {}) self.direction = settings.get("direction", DEFAULT_DIR) self.lang = settings.get("lang", DEFAULT_LANG) rendered.append(self.render_ents(p["text"], p["ents"], p.get("title"))) if page: docs = "".join([TPL_FIGURE.format(content=doc) for doc in rendered]) markup = TPL_PAGE.format(content=docs, lang=self.lang, dir=self.direction) else: markup = "".join(rendered) if minify: return minify_html(markup) return markup
def render(self, parsed, page=False, minify=False): """Render complete markup. parsed (list): Dependency parses to render. page (bool): Render parses wrapped as full HTML page. minify (bool): Minify HTML markup. RETURNS (unicode): Rendered HTML markup. """ rendered = [] for i, p in enumerate(parsed): if i == 0: settings = p.get("settings", {}) self.direction = settings.get("direction", DEFAULT_DIR) self.lang = settings.get("lang", DEFAULT_LANG) rendered.append(self.render_ents(p["text"], p["ents"], p.get("title"))) if page: docs = "".join([TPL_FIGURE.format(content=doc) for doc in rendered]) markup = TPL_PAGE.format(content=docs, lang=self.lang, dir=self.direction) else: markup = "".join(rendered) if minify: return minify_html(markup) return markup
[ "Render", "complete", "markup", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L247-L269
[ "def", "render", "(", "self", ",", "parsed", ",", "page", "=", "False", ",", "minify", "=", "False", ")", ":", "rendered", "=", "[", "]", "for", "i", ",", "p", "in", "enumerate", "(", "parsed", ")", ":", "if", "i", "==", "0", ":", "settings", "=", "p", ".", "get", "(", "\"settings\"", ",", "{", "}", ")", "self", ".", "direction", "=", "settings", ".", "get", "(", "\"direction\"", ",", "DEFAULT_DIR", ")", "self", ".", "lang", "=", "settings", ".", "get", "(", "\"lang\"", ",", "DEFAULT_LANG", ")", "rendered", ".", "append", "(", "self", ".", "render_ents", "(", "p", "[", "\"text\"", "]", ",", "p", "[", "\"ents\"", "]", ",", "p", ".", "get", "(", "\"title\"", ")", ")", ")", "if", "page", ":", "docs", "=", "\"\"", ".", "join", "(", "[", "TPL_FIGURE", ".", "format", "(", "content", "=", "doc", ")", "for", "doc", "in", "rendered", "]", ")", "markup", "=", "TPL_PAGE", ".", "format", "(", "content", "=", "docs", ",", "lang", "=", "self", ".", "lang", ",", "dir", "=", "self", ".", "direction", ")", "else", ":", "markup", "=", "\"\"", ".", "join", "(", "rendered", ")", "if", "minify", ":", "return", "minify_html", "(", "markup", ")", "return", "markup" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
EntityRenderer.render_ents
Render entities in text. text (unicode): Original text. spans (list): Individual entity spans and their start, end and label. title (unicode or None): Document title set in Doc.user_data['title'].
spacy/displacy/render.py
def render_ents(self, text, spans, title): """Render entities in text. text (unicode): Original text. spans (list): Individual entity spans and their start, end and label. title (unicode or None): Document title set in Doc.user_data['title']. """ markup = "" offset = 0 for span in spans: label = span["label"] start = span["start"] end = span["end"] entity = escape_html(text[start:end]) fragments = text[offset:start].split("\n") for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: markup += "</br>" if self.ents is None or label.upper() in self.ents: color = self.colors.get(label.upper(), self.default_color) ent_settings = {"label": label, "text": entity, "bg": color} if self.direction == "rtl": markup += TPL_ENT_RTL.format(**ent_settings) else: markup += TPL_ENT.format(**ent_settings) else: markup += entity offset = end markup += escape_html(text[offset:]) markup = TPL_ENTS.format(content=markup, dir=self.direction) if title: markup = TPL_TITLE.format(title=title) + markup return markup
def render_ents(self, text, spans, title): """Render entities in text. text (unicode): Original text. spans (list): Individual entity spans and their start, end and label. title (unicode or None): Document title set in Doc.user_data['title']. """ markup = "" offset = 0 for span in spans: label = span["label"] start = span["start"] end = span["end"] entity = escape_html(text[start:end]) fragments = text[offset:start].split("\n") for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: markup += "</br>" if self.ents is None or label.upper() in self.ents: color = self.colors.get(label.upper(), self.default_color) ent_settings = {"label": label, "text": entity, "bg": color} if self.direction == "rtl": markup += TPL_ENT_RTL.format(**ent_settings) else: markup += TPL_ENT.format(**ent_settings) else: markup += entity offset = end markup += escape_html(text[offset:]) markup = TPL_ENTS.format(content=markup, dir=self.direction) if title: markup = TPL_TITLE.format(title=title) + markup return markup
[ "Render", "entities", "in", "text", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L271-L304
[ "def", "render_ents", "(", "self", ",", "text", ",", "spans", ",", "title", ")", ":", "markup", "=", "\"\"", "offset", "=", "0", "for", "span", "in", "spans", ":", "label", "=", "span", "[", "\"label\"", "]", "start", "=", "span", "[", "\"start\"", "]", "end", "=", "span", "[", "\"end\"", "]", "entity", "=", "escape_html", "(", "text", "[", "start", ":", "end", "]", ")", "fragments", "=", "text", "[", "offset", ":", "start", "]", ".", "split", "(", "\"\\n\"", ")", "for", "i", ",", "fragment", "in", "enumerate", "(", "fragments", ")", ":", "markup", "+=", "escape_html", "(", "fragment", ")", "if", "len", "(", "fragments", ")", ">", "1", "and", "i", "!=", "len", "(", "fragments", ")", "-", "1", ":", "markup", "+=", "\"</br>\"", "if", "self", ".", "ents", "is", "None", "or", "label", ".", "upper", "(", ")", "in", "self", ".", "ents", ":", "color", "=", "self", ".", "colors", ".", "get", "(", "label", ".", "upper", "(", ")", ",", "self", ".", "default_color", ")", "ent_settings", "=", "{", "\"label\"", ":", "label", ",", "\"text\"", ":", "entity", ",", "\"bg\"", ":", "color", "}", "if", "self", ".", "direction", "==", "\"rtl\"", ":", "markup", "+=", "TPL_ENT_RTL", ".", "format", "(", "*", "*", "ent_settings", ")", "else", ":", "markup", "+=", "TPL_ENT", ".", "format", "(", "*", "*", "ent_settings", ")", "else", ":", "markup", "+=", "entity", "offset", "=", "end", "markup", "+=", "escape_html", "(", "text", "[", "offset", ":", "]", ")", "markup", "=", "TPL_ENTS", ".", "format", "(", "content", "=", "markup", ",", "dir", "=", "self", ".", "direction", ")", "if", "title", ":", "markup", "=", "TPL_TITLE", ".", "format", "(", "title", "=", "title", ")", "+", "markup", "return", "markup" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
merge_noun_chunks
Merge noun chunks into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged noun chunks. DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
spacy/pipeline/functions.py
def merge_noun_chunks(doc): """Merge noun chunks into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged noun chunks. DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks """ if not doc.is_parsed: return doc with doc.retokenize() as retokenizer: for np in doc.noun_chunks: attrs = {"tag": np.root.tag, "dep": np.root.dep} retokenizer.merge(np, attrs=attrs) return doc
def merge_noun_chunks(doc): """Merge noun chunks into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged noun chunks. DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks """ if not doc.is_parsed: return doc with doc.retokenize() as retokenizer: for np in doc.noun_chunks: attrs = {"tag": np.root.tag, "dep": np.root.dep} retokenizer.merge(np, attrs=attrs) return doc
[ "Merge", "noun", "chunks", "into", "a", "single", "token", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L7-L21
[ "def", "merge_noun_chunks", "(", "doc", ")", ":", "if", "not", "doc", ".", "is_parsed", ":", "return", "doc", "with", "doc", ".", "retokenize", "(", ")", "as", "retokenizer", ":", "for", "np", "in", "doc", ".", "noun_chunks", ":", "attrs", "=", "{", "\"tag\"", ":", "np", ".", "root", ".", "tag", ",", "\"dep\"", ":", "np", ".", "root", ".", "dep", "}", "retokenizer", ".", "merge", "(", "np", ",", "attrs", "=", "attrs", ")", "return", "doc" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
merge_entities
Merge entities into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged entities. DOCS: https://spacy.io/api/pipeline-functions#merge_entities
spacy/pipeline/functions.py
def merge_entities(doc): """Merge entities into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged entities. DOCS: https://spacy.io/api/pipeline-functions#merge_entities """ with doc.retokenize() as retokenizer: for ent in doc.ents: attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label} retokenizer.merge(ent, attrs=attrs) return doc
def merge_entities(doc): """Merge entities into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged entities. DOCS: https://spacy.io/api/pipeline-functions#merge_entities """ with doc.retokenize() as retokenizer: for ent in doc.ents: attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label} retokenizer.merge(ent, attrs=attrs) return doc
[ "Merge", "entities", "into", "a", "single", "token", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L24-L36
[ "def", "merge_entities", "(", "doc", ")", ":", "with", "doc", ".", "retokenize", "(", ")", "as", "retokenizer", ":", "for", "ent", "in", "doc", ".", "ents", ":", "attrs", "=", "{", "\"tag\"", ":", "ent", ".", "root", ".", "tag", ",", "\"dep\"", ":", "ent", ".", "root", ".", "dep", ",", "\"ent_type\"", ":", "ent", ".", "label", "}", "retokenizer", ".", "merge", "(", "ent", ",", "attrs", "=", "attrs", ")", "return", "doc" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
merge_subtokens
Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
spacy/pipeline/functions.py
def merge_subtokens(doc, label="subtok"): """Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens """ merger = Matcher(doc.vocab) merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}]) matches = merger(doc) spans = [doc[start : end + 1] for _, start, end in matches] with doc.retokenize() as retokenizer: for span in spans: retokenizer.merge(span) return doc
def merge_subtokens(doc, label="subtok"): """Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens """ merger = Matcher(doc.vocab) merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}]) matches = merger(doc) spans = [doc[start : end + 1] for _, start, end in matches] with doc.retokenize() as retokenizer: for span in spans: retokenizer.merge(span) return doc
[ "Merge", "subtokens", "into", "a", "single", "token", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L39-L55
[ "def", "merge_subtokens", "(", "doc", ",", "label", "=", "\"subtok\"", ")", ":", "merger", "=", "Matcher", "(", "doc", ".", "vocab", ")", "merger", ".", "add", "(", "\"SUBTOK\"", ",", "None", ",", "[", "{", "\"DEP\"", ":", "label", ",", "\"op\"", ":", "\"+\"", "}", "]", ")", "matches", "=", "merger", "(", "doc", ")", "spans", "=", "[", "doc", "[", "start", ":", "end", "+", "1", "]", "for", "_", ",", "start", ",", "end", "in", "matches", "]", "with", "doc", ".", "retokenize", "(", ")", "as", "retokenizer", ":", "for", "span", "in", "spans", ":", "retokenizer", ".", "merge", "(", "span", ")", "return", "doc" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
train
Train or update a spaCy model. Requires data to be formatted in spaCy's JSON format. To convert data from other formats, use the `spacy convert` command.
spacy/cli/train.py
def train( lang, output_path, train_path, dev_path, raw_text=None, base_model=None, pipeline="tagger,parser,ner", vectors=None, n_iter=30, n_early_stopping=None, n_examples=0, use_gpu=-1, version="0.0.0", meta_path=None, init_tok2vec=None, parser_multitasks="", entity_multitasks="", noise_level=0.0, eval_beam_widths="", gold_preproc=False, learn_tokens=False, verbose=False, debug=False, ): """ Train or update a spaCy model. Requires data to be formatted in spaCy's JSON format. To convert data from other formats, use the `spacy convert` command. """ msg = Printer() util.fix_random_seed() util.set_env_log(verbose) # Make sure all files and paths exists if they are needed train_path = util.ensure_path(train_path) dev_path = util.ensure_path(dev_path) meta_path = util.ensure_path(meta_path) output_path = util.ensure_path(output_path) if raw_text is not None: raw_text = list(srsly.read_jsonl(raw_text)) if not train_path or not train_path.exists(): msg.fail("Training data not found", train_path, exits=1) if not dev_path or not dev_path.exists(): msg.fail("Development data not found", dev_path, exits=1) if meta_path is not None and not meta_path.exists(): msg.fail("Can't find model meta.json", meta_path, exits=1) meta = srsly.read_json(meta_path) if meta_path else {} if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]: msg.warn( "Output directory is not empty", "This can lead to unintended side effects when saving the model. " "Please use an empty directory or a different path instead. If " "the specified output path doesn't exist, the directory will be " "created for you.", ) if not output_path.exists(): output_path.mkdir() # Take dropout and batch size as generators of values -- dropout # starts high and decays sharply, to force the optimizer to explore. # Batch size starts at 1 and grows, so that we make updates quickly # at the beginning of training. dropout_rates = util.decaying( util.env_opt("dropout_from", 0.2), util.env_opt("dropout_to", 0.2), util.env_opt("dropout_decay", 0.0), ) batch_sizes = util.compounding( util.env_opt("batch_from", 100.0), util.env_opt("batch_to", 1000.0), util.env_opt("batch_compound", 1.001), ) if not eval_beam_widths: eval_beam_widths = [1] else: eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(",")] if 1 not in eval_beam_widths: eval_beam_widths.append(1) eval_beam_widths.sort() has_beam_widths = eval_beam_widths != [1] # Set up the base model and pipeline. If a base model is specified, load # the model and make sure the pipeline matches the pipeline setting. If # training starts from a blank model, intitalize the language class. pipeline = [p.strip() for p in pipeline.split(",")] msg.text("Training pipeline: {}".format(pipeline)) if base_model: msg.text("Starting with base model '{}'".format(base_model)) nlp = util.load_model(base_model) if nlp.lang != lang: msg.fail( "Model language ('{}') doesn't match language specified as " "`lang` argument ('{}') ".format(nlp.lang, lang), exits=1, ) other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipeline] nlp.disable_pipes(*other_pipes) for pipe in pipeline: if pipe not in nlp.pipe_names: nlp.add_pipe(nlp.create_pipe(pipe)) else: msg.text("Starting with blank model '{}'".format(lang)) lang_cls = util.get_lang_class(lang) nlp = lang_cls() for pipe in pipeline: nlp.add_pipe(nlp.create_pipe(pipe)) if learn_tokens: nlp.add_pipe(nlp.create_pipe("merge_subtokens")) if vectors: msg.text("Loading vector from model '{}'".format(vectors)) _load_vectors(nlp, vectors) # Multitask objectives multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)] for pipe_name, multitasks in multitask_options: if multitasks: if pipe_name not in pipeline: msg.fail( "Can't use multitask objective without '{}' in the " "pipeline".format(pipe_name) ) pipe = nlp.get_pipe(pipe_name) for objective in multitasks.split(","): pipe.add_multitask_objective(objective) # Prepare training corpus msg.text("Counting training words (limit={})".format(n_examples)) corpus = GoldCorpus(train_path, dev_path, limit=n_examples) n_train_words = corpus.count_train() if base_model: # Start with an existing model, use default optimizer optimizer = create_default_optimizer(Model.ops) else: # Start with a blank model, call begin_training optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu) nlp._optimizer = None # Load in pre-trained weights if init_tok2vec is not None: components = _load_pretrained_tok2vec(nlp, init_tok2vec) msg.text("Loaded pretrained tok2vec for: {}".format(components)) # fmt: off row_head = ["Itn", "Dep Loss", "NER Loss", "UAS", "NER P", "NER R", "NER F", "Tag %", "Token %", "CPU WPS", "GPU WPS"] row_widths = [3, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7] if has_beam_widths: row_head.insert(1, "Beam W.") row_widths.insert(1, 7) row_settings = {"widths": row_widths, "aligns": tuple(["r" for i in row_head]), "spacing": 2} # fmt: on print("") msg.row(row_head, **row_settings) msg.row(["-" * width for width in row_settings["widths"]], **row_settings) try: iter_since_best = 0 best_score = 0.0 for i in range(n_iter): train_docs = corpus.train_docs( nlp, noise_level=noise_level, gold_preproc=gold_preproc, max_length=0 ) if raw_text: random.shuffle(raw_text) raw_batches = util.minibatch( (nlp.make_doc(rt["text"]) for rt in raw_text), size=8 ) words_seen = 0 with tqdm.tqdm(total=n_train_words, leave=False) as pbar: losses = {} for batch in util.minibatch_by_words(train_docs, size=batch_sizes): if not batch: continue docs, golds = zip(*batch) nlp.update( docs, golds, sgd=optimizer, drop=next(dropout_rates), losses=losses, ) if raw_text: # If raw text is available, perform 'rehearsal' updates, # which use unlabelled data to reduce overfitting. raw_batch = list(next(raw_batches)) nlp.rehearse(raw_batch, sgd=optimizer, losses=losses) if not int(os.environ.get("LOG_FRIENDLY", 0)): pbar.update(sum(len(doc) for doc in docs)) words_seen += sum(len(doc) for doc in docs) with nlp.use_params(optimizer.averages): util.set_env_log(False) epoch_model_path = output_path / ("model%d" % i) nlp.to_disk(epoch_model_path) nlp_loaded = util.load_model_from_path(epoch_model_path) for beam_width in eval_beam_widths: for name, component in nlp_loaded.pipeline: if hasattr(component, "cfg"): component.cfg["beam_width"] = beam_width dev_docs = list( corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc) ) nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs) start_time = timer() scorer = nlp_loaded.evaluate(dev_docs, debug) end_time = timer() if use_gpu < 0: gpu_wps = None cpu_wps = nwords / (end_time - start_time) else: gpu_wps = nwords / (end_time - start_time) with Model.use_device("cpu"): nlp_loaded = util.load_model_from_path(epoch_model_path) for name, component in nlp_loaded.pipeline: if hasattr(component, "cfg"): component.cfg["beam_width"] = beam_width dev_docs = list( corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc) ) start_time = timer() scorer = nlp_loaded.evaluate(dev_docs) end_time = timer() cpu_wps = nwords / (end_time - start_time) acc_loc = output_path / ("model%d" % i) / "accuracy.json" srsly.write_json(acc_loc, scorer.scores) # Update model meta.json meta["lang"] = nlp.lang meta["pipeline"] = nlp.pipe_names meta["spacy_version"] = ">=%s" % about.__version__ if beam_width == 1: meta["speed"] = { "nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps, } meta["accuracy"] = scorer.scores else: meta.setdefault("beam_accuracy", {}) meta.setdefault("beam_speed", {}) meta["beam_accuracy"][beam_width] = scorer.scores meta["beam_speed"][beam_width] = { "nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps, } meta["vectors"] = { "width": nlp.vocab.vectors_length, "vectors": len(nlp.vocab.vectors), "keys": nlp.vocab.vectors.n_keys, "name": nlp.vocab.vectors.name, } meta.setdefault("name", "model%d" % i) meta.setdefault("version", version) meta_loc = output_path / ("model%d" % i) / "meta.json" srsly.write_json(meta_loc, meta) util.set_env_log(verbose) progress = _get_progress( i, losses, scorer.scores, beam_width=beam_width if has_beam_widths else None, cpu_wps=cpu_wps, gpu_wps=gpu_wps, ) msg.row(progress, **row_settings) # Early stopping if n_early_stopping is not None: current_score = _score_for_model(meta) if current_score < best_score: iter_since_best += 1 else: iter_since_best = 0 best_score = current_score if iter_since_best >= n_early_stopping: msg.text( "Early stopping, best iteration " "is: {}".format(i - iter_since_best) ) msg.text( "Best score = {}; Final iteration " "score = {}".format(best_score, current_score) ) break finally: with nlp.use_params(optimizer.averages): final_model_path = output_path / "model-final" nlp.to_disk(final_model_path) msg.good("Saved model to output directory", final_model_path) with msg.loading("Creating best model..."): best_model_path = _collate_best_model(meta, output_path, nlp.pipe_names) msg.good("Created best model", best_model_path)
def train( lang, output_path, train_path, dev_path, raw_text=None, base_model=None, pipeline="tagger,parser,ner", vectors=None, n_iter=30, n_early_stopping=None, n_examples=0, use_gpu=-1, version="0.0.0", meta_path=None, init_tok2vec=None, parser_multitasks="", entity_multitasks="", noise_level=0.0, eval_beam_widths="", gold_preproc=False, learn_tokens=False, verbose=False, debug=False, ): """ Train or update a spaCy model. Requires data to be formatted in spaCy's JSON format. To convert data from other formats, use the `spacy convert` command. """ msg = Printer() util.fix_random_seed() util.set_env_log(verbose) # Make sure all files and paths exists if they are needed train_path = util.ensure_path(train_path) dev_path = util.ensure_path(dev_path) meta_path = util.ensure_path(meta_path) output_path = util.ensure_path(output_path) if raw_text is not None: raw_text = list(srsly.read_jsonl(raw_text)) if not train_path or not train_path.exists(): msg.fail("Training data not found", train_path, exits=1) if not dev_path or not dev_path.exists(): msg.fail("Development data not found", dev_path, exits=1) if meta_path is not None and not meta_path.exists(): msg.fail("Can't find model meta.json", meta_path, exits=1) meta = srsly.read_json(meta_path) if meta_path else {} if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]: msg.warn( "Output directory is not empty", "This can lead to unintended side effects when saving the model. " "Please use an empty directory or a different path instead. If " "the specified output path doesn't exist, the directory will be " "created for you.", ) if not output_path.exists(): output_path.mkdir() # Take dropout and batch size as generators of values -- dropout # starts high and decays sharply, to force the optimizer to explore. # Batch size starts at 1 and grows, so that we make updates quickly # at the beginning of training. dropout_rates = util.decaying( util.env_opt("dropout_from", 0.2), util.env_opt("dropout_to", 0.2), util.env_opt("dropout_decay", 0.0), ) batch_sizes = util.compounding( util.env_opt("batch_from", 100.0), util.env_opt("batch_to", 1000.0), util.env_opt("batch_compound", 1.001), ) if not eval_beam_widths: eval_beam_widths = [1] else: eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(",")] if 1 not in eval_beam_widths: eval_beam_widths.append(1) eval_beam_widths.sort() has_beam_widths = eval_beam_widths != [1] # Set up the base model and pipeline. If a base model is specified, load # the model and make sure the pipeline matches the pipeline setting. If # training starts from a blank model, intitalize the language class. pipeline = [p.strip() for p in pipeline.split(",")] msg.text("Training pipeline: {}".format(pipeline)) if base_model: msg.text("Starting with base model '{}'".format(base_model)) nlp = util.load_model(base_model) if nlp.lang != lang: msg.fail( "Model language ('{}') doesn't match language specified as " "`lang` argument ('{}') ".format(nlp.lang, lang), exits=1, ) other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipeline] nlp.disable_pipes(*other_pipes) for pipe in pipeline: if pipe not in nlp.pipe_names: nlp.add_pipe(nlp.create_pipe(pipe)) else: msg.text("Starting with blank model '{}'".format(lang)) lang_cls = util.get_lang_class(lang) nlp = lang_cls() for pipe in pipeline: nlp.add_pipe(nlp.create_pipe(pipe)) if learn_tokens: nlp.add_pipe(nlp.create_pipe("merge_subtokens")) if vectors: msg.text("Loading vector from model '{}'".format(vectors)) _load_vectors(nlp, vectors) # Multitask objectives multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)] for pipe_name, multitasks in multitask_options: if multitasks: if pipe_name not in pipeline: msg.fail( "Can't use multitask objective without '{}' in the " "pipeline".format(pipe_name) ) pipe = nlp.get_pipe(pipe_name) for objective in multitasks.split(","): pipe.add_multitask_objective(objective) # Prepare training corpus msg.text("Counting training words (limit={})".format(n_examples)) corpus = GoldCorpus(train_path, dev_path, limit=n_examples) n_train_words = corpus.count_train() if base_model: # Start with an existing model, use default optimizer optimizer = create_default_optimizer(Model.ops) else: # Start with a blank model, call begin_training optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu) nlp._optimizer = None # Load in pre-trained weights if init_tok2vec is not None: components = _load_pretrained_tok2vec(nlp, init_tok2vec) msg.text("Loaded pretrained tok2vec for: {}".format(components)) # fmt: off row_head = ["Itn", "Dep Loss", "NER Loss", "UAS", "NER P", "NER R", "NER F", "Tag %", "Token %", "CPU WPS", "GPU WPS"] row_widths = [3, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7] if has_beam_widths: row_head.insert(1, "Beam W.") row_widths.insert(1, 7) row_settings = {"widths": row_widths, "aligns": tuple(["r" for i in row_head]), "spacing": 2} # fmt: on print("") msg.row(row_head, **row_settings) msg.row(["-" * width for width in row_settings["widths"]], **row_settings) try: iter_since_best = 0 best_score = 0.0 for i in range(n_iter): train_docs = corpus.train_docs( nlp, noise_level=noise_level, gold_preproc=gold_preproc, max_length=0 ) if raw_text: random.shuffle(raw_text) raw_batches = util.minibatch( (nlp.make_doc(rt["text"]) for rt in raw_text), size=8 ) words_seen = 0 with tqdm.tqdm(total=n_train_words, leave=False) as pbar: losses = {} for batch in util.minibatch_by_words(train_docs, size=batch_sizes): if not batch: continue docs, golds = zip(*batch) nlp.update( docs, golds, sgd=optimizer, drop=next(dropout_rates), losses=losses, ) if raw_text: # If raw text is available, perform 'rehearsal' updates, # which use unlabelled data to reduce overfitting. raw_batch = list(next(raw_batches)) nlp.rehearse(raw_batch, sgd=optimizer, losses=losses) if not int(os.environ.get("LOG_FRIENDLY", 0)): pbar.update(sum(len(doc) for doc in docs)) words_seen += sum(len(doc) for doc in docs) with nlp.use_params(optimizer.averages): util.set_env_log(False) epoch_model_path = output_path / ("model%d" % i) nlp.to_disk(epoch_model_path) nlp_loaded = util.load_model_from_path(epoch_model_path) for beam_width in eval_beam_widths: for name, component in nlp_loaded.pipeline: if hasattr(component, "cfg"): component.cfg["beam_width"] = beam_width dev_docs = list( corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc) ) nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs) start_time = timer() scorer = nlp_loaded.evaluate(dev_docs, debug) end_time = timer() if use_gpu < 0: gpu_wps = None cpu_wps = nwords / (end_time - start_time) else: gpu_wps = nwords / (end_time - start_time) with Model.use_device("cpu"): nlp_loaded = util.load_model_from_path(epoch_model_path) for name, component in nlp_loaded.pipeline: if hasattr(component, "cfg"): component.cfg["beam_width"] = beam_width dev_docs = list( corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc) ) start_time = timer() scorer = nlp_loaded.evaluate(dev_docs) end_time = timer() cpu_wps = nwords / (end_time - start_time) acc_loc = output_path / ("model%d" % i) / "accuracy.json" srsly.write_json(acc_loc, scorer.scores) # Update model meta.json meta["lang"] = nlp.lang meta["pipeline"] = nlp.pipe_names meta["spacy_version"] = ">=%s" % about.__version__ if beam_width == 1: meta["speed"] = { "nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps, } meta["accuracy"] = scorer.scores else: meta.setdefault("beam_accuracy", {}) meta.setdefault("beam_speed", {}) meta["beam_accuracy"][beam_width] = scorer.scores meta["beam_speed"][beam_width] = { "nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps, } meta["vectors"] = { "width": nlp.vocab.vectors_length, "vectors": len(nlp.vocab.vectors), "keys": nlp.vocab.vectors.n_keys, "name": nlp.vocab.vectors.name, } meta.setdefault("name", "model%d" % i) meta.setdefault("version", version) meta_loc = output_path / ("model%d" % i) / "meta.json" srsly.write_json(meta_loc, meta) util.set_env_log(verbose) progress = _get_progress( i, losses, scorer.scores, beam_width=beam_width if has_beam_widths else None, cpu_wps=cpu_wps, gpu_wps=gpu_wps, ) msg.row(progress, **row_settings) # Early stopping if n_early_stopping is not None: current_score = _score_for_model(meta) if current_score < best_score: iter_since_best += 1 else: iter_since_best = 0 best_score = current_score if iter_since_best >= n_early_stopping: msg.text( "Early stopping, best iteration " "is: {}".format(i - iter_since_best) ) msg.text( "Best score = {}; Final iteration " "score = {}".format(best_score, current_score) ) break finally: with nlp.use_params(optimizer.averages): final_model_path = output_path / "model-final" nlp.to_disk(final_model_path) msg.good("Saved model to output directory", final_model_path) with msg.loading("Creating best model..."): best_model_path = _collate_best_model(meta, output_path, nlp.pipe_names) msg.good("Created best model", best_model_path)
[ "Train", "or", "update", "a", "spaCy", "model", ".", "Requires", "data", "to", "be", "formatted", "in", "spaCy", "s", "JSON", "format", ".", "To", "convert", "data", "from", "other", "formats", "use", "the", "spacy", "convert", "command", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L73-L368
[ "def", "train", "(", "lang", ",", "output_path", ",", "train_path", ",", "dev_path", ",", "raw_text", "=", "None", ",", "base_model", "=", "None", ",", "pipeline", "=", "\"tagger,parser,ner\"", ",", "vectors", "=", "None", ",", "n_iter", "=", "30", ",", "n_early_stopping", "=", "None", ",", "n_examples", "=", "0", ",", "use_gpu", "=", "-", "1", ",", "version", "=", "\"0.0.0\"", ",", "meta_path", "=", "None", ",", "init_tok2vec", "=", "None", ",", "parser_multitasks", "=", "\"\"", ",", "entity_multitasks", "=", "\"\"", ",", "noise_level", "=", "0.0", ",", "eval_beam_widths", "=", "\"\"", ",", "gold_preproc", "=", "False", ",", "learn_tokens", "=", "False", ",", "verbose", "=", "False", ",", "debug", "=", "False", ",", ")", ":", "msg", "=", "Printer", "(", ")", "util", ".", "fix_random_seed", "(", ")", "util", ".", "set_env_log", "(", "verbose", ")", "# Make sure all files and paths exists if they are needed", "train_path", "=", "util", ".", "ensure_path", "(", "train_path", ")", "dev_path", "=", "util", ".", "ensure_path", "(", "dev_path", ")", "meta_path", "=", "util", ".", "ensure_path", "(", "meta_path", ")", "output_path", "=", "util", ".", "ensure_path", "(", "output_path", ")", "if", "raw_text", "is", "not", "None", ":", "raw_text", "=", "list", "(", "srsly", ".", "read_jsonl", "(", "raw_text", ")", ")", "if", "not", "train_path", "or", "not", "train_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Training data not found\"", ",", "train_path", ",", "exits", "=", "1", ")", "if", "not", "dev_path", "or", "not", "dev_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Development data not found\"", ",", "dev_path", ",", "exits", "=", "1", ")", "if", "meta_path", "is", "not", "None", "and", "not", "meta_path", ".", "exists", "(", ")", ":", "msg", ".", "fail", "(", "\"Can't find model meta.json\"", ",", "meta_path", ",", "exits", "=", "1", ")", "meta", "=", "srsly", ".", "read_json", "(", "meta_path", ")", "if", "meta_path", "else", "{", "}", "if", "output_path", ".", "exists", "(", ")", "and", "[", "p", "for", "p", "in", "output_path", ".", "iterdir", "(", ")", "if", "p", ".", "is_dir", "(", ")", "]", ":", "msg", ".", "warn", "(", "\"Output directory is not empty\"", ",", "\"This can lead to unintended side effects when saving the model. \"", "\"Please use an empty directory or a different path instead. If \"", "\"the specified output path doesn't exist, the directory will be \"", "\"created for you.\"", ",", ")", "if", "not", "output_path", ".", "exists", "(", ")", ":", "output_path", ".", "mkdir", "(", ")", "# Take dropout and batch size as generators of values -- dropout", "# starts high and decays sharply, to force the optimizer to explore.", "# Batch size starts at 1 and grows, so that we make updates quickly", "# at the beginning of training.", "dropout_rates", "=", "util", ".", "decaying", "(", "util", ".", "env_opt", "(", "\"dropout_from\"", ",", "0.2", ")", ",", "util", ".", "env_opt", "(", "\"dropout_to\"", ",", "0.2", ")", ",", "util", ".", "env_opt", "(", "\"dropout_decay\"", ",", "0.0", ")", ",", ")", "batch_sizes", "=", "util", ".", "compounding", "(", "util", ".", "env_opt", "(", "\"batch_from\"", ",", "100.0", ")", ",", "util", ".", "env_opt", "(", "\"batch_to\"", ",", "1000.0", ")", ",", "util", ".", "env_opt", "(", "\"batch_compound\"", ",", "1.001", ")", ",", ")", "if", "not", "eval_beam_widths", ":", "eval_beam_widths", "=", "[", "1", "]", "else", ":", "eval_beam_widths", "=", "[", "int", "(", "bw", ")", "for", "bw", "in", "eval_beam_widths", ".", "split", "(", "\",\"", ")", "]", "if", "1", "not", "in", "eval_beam_widths", ":", "eval_beam_widths", ".", "append", "(", "1", ")", "eval_beam_widths", ".", "sort", "(", ")", "has_beam_widths", "=", "eval_beam_widths", "!=", "[", "1", "]", "# Set up the base model and pipeline. If a base model is specified, load", "# the model and make sure the pipeline matches the pipeline setting. If", "# training starts from a blank model, intitalize the language class.", "pipeline", "=", "[", "p", ".", "strip", "(", ")", "for", "p", "in", "pipeline", ".", "split", "(", "\",\"", ")", "]", "msg", ".", "text", "(", "\"Training pipeline: {}\"", ".", "format", "(", "pipeline", ")", ")", "if", "base_model", ":", "msg", ".", "text", "(", "\"Starting with base model '{}'\"", ".", "format", "(", "base_model", ")", ")", "nlp", "=", "util", ".", "load_model", "(", "base_model", ")", "if", "nlp", ".", "lang", "!=", "lang", ":", "msg", ".", "fail", "(", "\"Model language ('{}') doesn't match language specified as \"", "\"`lang` argument ('{}') \"", ".", "format", "(", "nlp", ".", "lang", ",", "lang", ")", ",", "exits", "=", "1", ",", ")", "other_pipes", "=", "[", "pipe", "for", "pipe", "in", "nlp", ".", "pipe_names", "if", "pipe", "not", "in", "pipeline", "]", "nlp", ".", "disable_pipes", "(", "*", "other_pipes", ")", "for", "pipe", "in", "pipeline", ":", "if", "pipe", "not", "in", "nlp", ".", "pipe_names", ":", "nlp", ".", "add_pipe", "(", "nlp", ".", "create_pipe", "(", "pipe", ")", ")", "else", ":", "msg", ".", "text", "(", "\"Starting with blank model '{}'\"", ".", "format", "(", "lang", ")", ")", "lang_cls", "=", "util", ".", "get_lang_class", "(", "lang", ")", "nlp", "=", "lang_cls", "(", ")", "for", "pipe", "in", "pipeline", ":", "nlp", ".", "add_pipe", "(", "nlp", ".", "create_pipe", "(", "pipe", ")", ")", "if", "learn_tokens", ":", "nlp", ".", "add_pipe", "(", "nlp", ".", "create_pipe", "(", "\"merge_subtokens\"", ")", ")", "if", "vectors", ":", "msg", ".", "text", "(", "\"Loading vector from model '{}'\"", ".", "format", "(", "vectors", ")", ")", "_load_vectors", "(", "nlp", ",", "vectors", ")", "# Multitask objectives", "multitask_options", "=", "[", "(", "\"parser\"", ",", "parser_multitasks", ")", ",", "(", "\"ner\"", ",", "entity_multitasks", ")", "]", "for", "pipe_name", ",", "multitasks", "in", "multitask_options", ":", "if", "multitasks", ":", "if", "pipe_name", "not", "in", "pipeline", ":", "msg", ".", "fail", "(", "\"Can't use multitask objective without '{}' in the \"", "\"pipeline\"", ".", "format", "(", "pipe_name", ")", ")", "pipe", "=", "nlp", ".", "get_pipe", "(", "pipe_name", ")", "for", "objective", "in", "multitasks", ".", "split", "(", "\",\"", ")", ":", "pipe", ".", "add_multitask_objective", "(", "objective", ")", "# Prepare training corpus", "msg", ".", "text", "(", "\"Counting training words (limit={})\"", ".", "format", "(", "n_examples", ")", ")", "corpus", "=", "GoldCorpus", "(", "train_path", ",", "dev_path", ",", "limit", "=", "n_examples", ")", "n_train_words", "=", "corpus", ".", "count_train", "(", ")", "if", "base_model", ":", "# Start with an existing model, use default optimizer", "optimizer", "=", "create_default_optimizer", "(", "Model", ".", "ops", ")", "else", ":", "# Start with a blank model, call begin_training", "optimizer", "=", "nlp", ".", "begin_training", "(", "lambda", ":", "corpus", ".", "train_tuples", ",", "device", "=", "use_gpu", ")", "nlp", ".", "_optimizer", "=", "None", "# Load in pre-trained weights", "if", "init_tok2vec", "is", "not", "None", ":", "components", "=", "_load_pretrained_tok2vec", "(", "nlp", ",", "init_tok2vec", ")", "msg", ".", "text", "(", "\"Loaded pretrained tok2vec for: {}\"", ".", "format", "(", "components", ")", ")", "# fmt: off", "row_head", "=", "[", "\"Itn\"", ",", "\"Dep Loss\"", ",", "\"NER Loss\"", ",", "\"UAS\"", ",", "\"NER P\"", ",", "\"NER R\"", ",", "\"NER F\"", ",", "\"Tag %\"", ",", "\"Token %\"", ",", "\"CPU WPS\"", ",", "\"GPU WPS\"", "]", "row_widths", "=", "[", "3", ",", "10", ",", "10", ",", "7", ",", "7", ",", "7", ",", "7", ",", "7", ",", "7", ",", "7", ",", "7", "]", "if", "has_beam_widths", ":", "row_head", ".", "insert", "(", "1", ",", "\"Beam W.\"", ")", "row_widths", ".", "insert", "(", "1", ",", "7", ")", "row_settings", "=", "{", "\"widths\"", ":", "row_widths", ",", "\"aligns\"", ":", "tuple", "(", "[", "\"r\"", "for", "i", "in", "row_head", "]", ")", ",", "\"spacing\"", ":", "2", "}", "# fmt: on", "print", "(", "\"\"", ")", "msg", ".", "row", "(", "row_head", ",", "*", "*", "row_settings", ")", "msg", ".", "row", "(", "[", "\"-\"", "*", "width", "for", "width", "in", "row_settings", "[", "\"widths\"", "]", "]", ",", "*", "*", "row_settings", ")", "try", ":", "iter_since_best", "=", "0", "best_score", "=", "0.0", "for", "i", "in", "range", "(", "n_iter", ")", ":", "train_docs", "=", "corpus", ".", "train_docs", "(", "nlp", ",", "noise_level", "=", "noise_level", ",", "gold_preproc", "=", "gold_preproc", ",", "max_length", "=", "0", ")", "if", "raw_text", ":", "random", ".", "shuffle", "(", "raw_text", ")", "raw_batches", "=", "util", ".", "minibatch", "(", "(", "nlp", ".", "make_doc", "(", "rt", "[", "\"text\"", "]", ")", "for", "rt", "in", "raw_text", ")", ",", "size", "=", "8", ")", "words_seen", "=", "0", "with", "tqdm", ".", "tqdm", "(", "total", "=", "n_train_words", ",", "leave", "=", "False", ")", "as", "pbar", ":", "losses", "=", "{", "}", "for", "batch", "in", "util", ".", "minibatch_by_words", "(", "train_docs", ",", "size", "=", "batch_sizes", ")", ":", "if", "not", "batch", ":", "continue", "docs", ",", "golds", "=", "zip", "(", "*", "batch", ")", "nlp", ".", "update", "(", "docs", ",", "golds", ",", "sgd", "=", "optimizer", ",", "drop", "=", "next", "(", "dropout_rates", ")", ",", "losses", "=", "losses", ",", ")", "if", "raw_text", ":", "# If raw text is available, perform 'rehearsal' updates,", "# which use unlabelled data to reduce overfitting.", "raw_batch", "=", "list", "(", "next", "(", "raw_batches", ")", ")", "nlp", ".", "rehearse", "(", "raw_batch", ",", "sgd", "=", "optimizer", ",", "losses", "=", "losses", ")", "if", "not", "int", "(", "os", ".", "environ", ".", "get", "(", "\"LOG_FRIENDLY\"", ",", "0", ")", ")", ":", "pbar", ".", "update", "(", "sum", "(", "len", "(", "doc", ")", "for", "doc", "in", "docs", ")", ")", "words_seen", "+=", "sum", "(", "len", "(", "doc", ")", "for", "doc", "in", "docs", ")", "with", "nlp", ".", "use_params", "(", "optimizer", ".", "averages", ")", ":", "util", ".", "set_env_log", "(", "False", ")", "epoch_model_path", "=", "output_path", "/", "(", "\"model%d\"", "%", "i", ")", "nlp", ".", "to_disk", "(", "epoch_model_path", ")", "nlp_loaded", "=", "util", ".", "load_model_from_path", "(", "epoch_model_path", ")", "for", "beam_width", "in", "eval_beam_widths", ":", "for", "name", ",", "component", "in", "nlp_loaded", ".", "pipeline", ":", "if", "hasattr", "(", "component", ",", "\"cfg\"", ")", ":", "component", ".", "cfg", "[", "\"beam_width\"", "]", "=", "beam_width", "dev_docs", "=", "list", "(", "corpus", ".", "dev_docs", "(", "nlp_loaded", ",", "gold_preproc", "=", "gold_preproc", ")", ")", "nwords", "=", "sum", "(", "len", "(", "doc_gold", "[", "0", "]", ")", "for", "doc_gold", "in", "dev_docs", ")", "start_time", "=", "timer", "(", ")", "scorer", "=", "nlp_loaded", ".", "evaluate", "(", "dev_docs", ",", "debug", ")", "end_time", "=", "timer", "(", ")", "if", "use_gpu", "<", "0", ":", "gpu_wps", "=", "None", "cpu_wps", "=", "nwords", "/", "(", "end_time", "-", "start_time", ")", "else", ":", "gpu_wps", "=", "nwords", "/", "(", "end_time", "-", "start_time", ")", "with", "Model", ".", "use_device", "(", "\"cpu\"", ")", ":", "nlp_loaded", "=", "util", ".", "load_model_from_path", "(", "epoch_model_path", ")", "for", "name", ",", "component", "in", "nlp_loaded", ".", "pipeline", ":", "if", "hasattr", "(", "component", ",", "\"cfg\"", ")", ":", "component", ".", "cfg", "[", "\"beam_width\"", "]", "=", "beam_width", "dev_docs", "=", "list", "(", "corpus", ".", "dev_docs", "(", "nlp_loaded", ",", "gold_preproc", "=", "gold_preproc", ")", ")", "start_time", "=", "timer", "(", ")", "scorer", "=", "nlp_loaded", ".", "evaluate", "(", "dev_docs", ")", "end_time", "=", "timer", "(", ")", "cpu_wps", "=", "nwords", "/", "(", "end_time", "-", "start_time", ")", "acc_loc", "=", "output_path", "/", "(", "\"model%d\"", "%", "i", ")", "/", "\"accuracy.json\"", "srsly", ".", "write_json", "(", "acc_loc", ",", "scorer", ".", "scores", ")", "# Update model meta.json", "meta", "[", "\"lang\"", "]", "=", "nlp", ".", "lang", "meta", "[", "\"pipeline\"", "]", "=", "nlp", ".", "pipe_names", "meta", "[", "\"spacy_version\"", "]", "=", "\">=%s\"", "%", "about", ".", "__version__", "if", "beam_width", "==", "1", ":", "meta", "[", "\"speed\"", "]", "=", "{", "\"nwords\"", ":", "nwords", ",", "\"cpu\"", ":", "cpu_wps", ",", "\"gpu\"", ":", "gpu_wps", ",", "}", "meta", "[", "\"accuracy\"", "]", "=", "scorer", ".", "scores", "else", ":", "meta", ".", "setdefault", "(", "\"beam_accuracy\"", ",", "{", "}", ")", "meta", ".", "setdefault", "(", "\"beam_speed\"", ",", "{", "}", ")", "meta", "[", "\"beam_accuracy\"", "]", "[", "beam_width", "]", "=", "scorer", ".", "scores", "meta", "[", "\"beam_speed\"", "]", "[", "beam_width", "]", "=", "{", "\"nwords\"", ":", "nwords", ",", "\"cpu\"", ":", "cpu_wps", ",", "\"gpu\"", ":", "gpu_wps", ",", "}", "meta", "[", "\"vectors\"", "]", "=", "{", "\"width\"", ":", "nlp", ".", "vocab", ".", "vectors_length", ",", "\"vectors\"", ":", "len", "(", "nlp", ".", "vocab", ".", "vectors", ")", ",", "\"keys\"", ":", "nlp", ".", "vocab", ".", "vectors", ".", "n_keys", ",", "\"name\"", ":", "nlp", ".", "vocab", ".", "vectors", ".", "name", ",", "}", "meta", ".", "setdefault", "(", "\"name\"", ",", "\"model%d\"", "%", "i", ")", "meta", ".", "setdefault", "(", "\"version\"", ",", "version", ")", "meta_loc", "=", "output_path", "/", "(", "\"model%d\"", "%", "i", ")", "/", "\"meta.json\"", "srsly", ".", "write_json", "(", "meta_loc", ",", "meta", ")", "util", ".", "set_env_log", "(", "verbose", ")", "progress", "=", "_get_progress", "(", "i", ",", "losses", ",", "scorer", ".", "scores", ",", "beam_width", "=", "beam_width", "if", "has_beam_widths", "else", "None", ",", "cpu_wps", "=", "cpu_wps", ",", "gpu_wps", "=", "gpu_wps", ",", ")", "msg", ".", "row", "(", "progress", ",", "*", "*", "row_settings", ")", "# Early stopping", "if", "n_early_stopping", "is", "not", "None", ":", "current_score", "=", "_score_for_model", "(", "meta", ")", "if", "current_score", "<", "best_score", ":", "iter_since_best", "+=", "1", "else", ":", "iter_since_best", "=", "0", "best_score", "=", "current_score", "if", "iter_since_best", ">=", "n_early_stopping", ":", "msg", ".", "text", "(", "\"Early stopping, best iteration \"", "\"is: {}\"", ".", "format", "(", "i", "-", "iter_since_best", ")", ")", "msg", ".", "text", "(", "\"Best score = {}; Final iteration \"", "\"score = {}\"", ".", "format", "(", "best_score", ",", "current_score", ")", ")", "break", "finally", ":", "with", "nlp", ".", "use_params", "(", "optimizer", ".", "averages", ")", ":", "final_model_path", "=", "output_path", "/", "\"model-final\"", "nlp", ".", "to_disk", "(", "final_model_path", ")", "msg", ".", "good", "(", "\"Saved model to output directory\"", ",", "final_model_path", ")", "with", "msg", ".", "loading", "(", "\"Creating best model...\"", ")", ":", "best_model_path", "=", "_collate_best_model", "(", "meta", ",", "output_path", ",", "nlp", ".", "pipe_names", ")", "msg", ".", "good", "(", "\"Created best model\"", ",", "best_model_path", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
_score_for_model
Returns mean score between tasks in pipeline that can be used for early stopping.
spacy/cli/train.py
def _score_for_model(meta): """ Returns mean score between tasks in pipeline that can be used for early stopping. """ mean_acc = list() pipes = meta["pipeline"] acc = meta["accuracy"] if "tagger" in pipes: mean_acc.append(acc["tags_acc"]) if "parser" in pipes: mean_acc.append((acc["uas"] + acc["las"]) / 2) if "ner" in pipes: mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3) return sum(mean_acc) / len(mean_acc)
def _score_for_model(meta): """ Returns mean score between tasks in pipeline that can be used for early stopping. """ mean_acc = list() pipes = meta["pipeline"] acc = meta["accuracy"] if "tagger" in pipes: mean_acc.append(acc["tags_acc"]) if "parser" in pipes: mean_acc.append((acc["uas"] + acc["las"]) / 2) if "ner" in pipes: mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3) return sum(mean_acc) / len(mean_acc)
[ "Returns", "mean", "score", "between", "tasks", "in", "pipeline", "that", "can", "be", "used", "for", "early", "stopping", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L371-L382
[ "def", "_score_for_model", "(", "meta", ")", ":", "mean_acc", "=", "list", "(", ")", "pipes", "=", "meta", "[", "\"pipeline\"", "]", "acc", "=", "meta", "[", "\"accuracy\"", "]", "if", "\"tagger\"", "in", "pipes", ":", "mean_acc", ".", "append", "(", "acc", "[", "\"tags_acc\"", "]", ")", "if", "\"parser\"", "in", "pipes", ":", "mean_acc", ".", "append", "(", "(", "acc", "[", "\"uas\"", "]", "+", "acc", "[", "\"las\"", "]", ")", "/", "2", ")", "if", "\"ner\"", "in", "pipes", ":", "mean_acc", ".", "append", "(", "(", "acc", "[", "\"ents_p\"", "]", "+", "acc", "[", "\"ents_r\"", "]", "+", "acc", "[", "\"ents_f\"", "]", ")", "/", "3", ")", "return", "sum", "(", "mean_acc", ")", "/", "len", "(", "mean_acc", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
_load_pretrained_tok2vec
Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental.
spacy/cli/train.py
def _load_pretrained_tok2vec(nlp, loc): """Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental. """ with loc.open("rb") as file_: weights_data = file_.read() loaded = [] for name, component in nlp.pipeline: if hasattr(component, "model") and hasattr(component.model, "tok2vec"): component.tok2vec.from_bytes(weights_data) loaded.append(name) return loaded
def _load_pretrained_tok2vec(nlp, loc): """Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental. """ with loc.open("rb") as file_: weights_data = file_.read() loaded = [] for name, component in nlp.pipeline: if hasattr(component, "model") and hasattr(component.model, "tok2vec"): component.tok2vec.from_bytes(weights_data) loaded.append(name) return loaded
[ "Load", "pre", "-", "trained", "weights", "for", "the", "token", "-", "to", "-", "vector", "part", "of", "the", "component", "models", "which", "is", "typically", "a", "CNN", ".", "See", "spacy", "pretrain", ".", "Experimental", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L407-L418
[ "def", "_load_pretrained_tok2vec", "(", "nlp", ",", "loc", ")", ":", "with", "loc", ".", "open", "(", "\"rb\"", ")", "as", "file_", ":", "weights_data", "=", "file_", ".", "read", "(", ")", "loaded", "=", "[", "]", "for", "name", ",", "component", "in", "nlp", ".", "pipeline", ":", "if", "hasattr", "(", "component", ",", "\"model\"", ")", "and", "hasattr", "(", "component", ".", "model", ",", "\"tok2vec\"", ")", ":", "component", ".", "tok2vec", ".", "from_bytes", "(", "weights_data", ")", "loaded", ".", "append", "(", "name", ")", "return", "loaded" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
conllu2json
Convert conllu files into JSON format for use with train cli. use_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme
spacy/cli/converters/conllu2json.py
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None): """ Convert conllu files into JSON format for use with train cli. use_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme """ # by @dvsrepo, via #11 explosion/spacy-dev-resources # by @katarkor docs = [] sentences = [] conll_tuples = read_conllx(input_data, use_morphology=use_morphology) checked_for_ner = False has_ner_tags = False for i, (raw_text, tokens) in enumerate(conll_tuples): sentence, brackets = tokens[0] if not checked_for_ner: has_ner_tags = is_ner(sentence[5][0]) checked_for_ner = True sentences.append(generate_sentence(sentence, has_ner_tags)) # Real-sized documents could be extracted using the comments on the # conluu document if len(sentences) % n_sents == 0: doc = create_doc(sentences, i) docs.append(doc) sentences = [] return docs
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None): """ Convert conllu files into JSON format for use with train cli. use_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme """ # by @dvsrepo, via #11 explosion/spacy-dev-resources # by @katarkor docs = [] sentences = [] conll_tuples = read_conllx(input_data, use_morphology=use_morphology) checked_for_ner = False has_ner_tags = False for i, (raw_text, tokens) in enumerate(conll_tuples): sentence, brackets = tokens[0] if not checked_for_ner: has_ner_tags = is_ner(sentence[5][0]) checked_for_ner = True sentences.append(generate_sentence(sentence, has_ner_tags)) # Real-sized documents could be extracted using the comments on the # conluu document if len(sentences) % n_sents == 0: doc = create_doc(sentences, i) docs.append(doc) sentences = [] return docs
[ "Convert", "conllu", "files", "into", "JSON", "format", "for", "use", "with", "train", "cli", ".", "use_morphology", "parameter", "enables", "appending", "morphology", "to", "tags", "which", "is", "useful", "for", "languages", "such", "as", "Spanish", "where", "UD", "tags", "are", "not", "so", "rich", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conllu2json.py#L9-L37
[ "def", "conllu2json", "(", "input_data", ",", "n_sents", "=", "10", ",", "use_morphology", "=", "False", ",", "lang", "=", "None", ")", ":", "# by @dvsrepo, via #11 explosion/spacy-dev-resources", "# by @katarkor", "docs", "=", "[", "]", "sentences", "=", "[", "]", "conll_tuples", "=", "read_conllx", "(", "input_data", ",", "use_morphology", "=", "use_morphology", ")", "checked_for_ner", "=", "False", "has_ner_tags", "=", "False", "for", "i", ",", "(", "raw_text", ",", "tokens", ")", "in", "enumerate", "(", "conll_tuples", ")", ":", "sentence", ",", "brackets", "=", "tokens", "[", "0", "]", "if", "not", "checked_for_ner", ":", "has_ner_tags", "=", "is_ner", "(", "sentence", "[", "5", "]", "[", "0", "]", ")", "checked_for_ner", "=", "True", "sentences", ".", "append", "(", "generate_sentence", "(", "sentence", ",", "has_ner_tags", ")", ")", "# Real-sized documents could be extracted using the comments on the", "# conluu document", "if", "len", "(", "sentences", ")", "%", "n_sents", "==", "0", ":", "doc", "=", "create_doc", "(", "sentences", ",", "i", ")", "docs", ".", "append", "(", "doc", ")", "sentences", "=", "[", "]", "return", "docs" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
is_ner
Check the 10th column of the first token to determine if the file contains NER tags
spacy/cli/converters/conllu2json.py
def is_ner(tag): """ Check the 10th column of the first token to determine if the file contains NER tags """ tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag) if tag_match: return True elif tag == "O": return True else: return False
def is_ner(tag): """ Check the 10th column of the first token to determine if the file contains NER tags """ tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag) if tag_match: return True elif tag == "O": return True else: return False
[ "Check", "the", "10th", "column", "of", "the", "first", "token", "to", "determine", "if", "the", "file", "contains", "NER", "tags" ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conllu2json.py#L40-L51
[ "def", "is_ner", "(", "tag", ")", ":", "tag_match", "=", "re", ".", "match", "(", "\"([A-Z_]+)-([A-Z_]+)\"", ",", "tag", ")", "if", "tag_match", ":", "return", "True", "elif", "tag", "==", "\"O\"", ":", "return", "True", "else", ":", "return", "False" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
simplify_tags
Simplify tags obtained from the dataset in order to follow Wikipedia scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while 'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to 'MISC'.
spacy/cli/converters/conllu2json.py
def simplify_tags(iob): """ Simplify tags obtained from the dataset in order to follow Wikipedia scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while 'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to 'MISC'. """ new_iob = [] for tag in iob: tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag) if tag_match: prefix = tag_match.group(1) suffix = tag_match.group(2) if suffix == "GPE_LOC": suffix = "LOC" elif suffix == "GPE_ORG": suffix = "ORG" elif suffix != "PER" and suffix != "LOC" and suffix != "ORG": suffix = "MISC" tag = prefix + "-" + suffix new_iob.append(tag) return new_iob
def simplify_tags(iob): """ Simplify tags obtained from the dataset in order to follow Wikipedia scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while 'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to 'MISC'. """ new_iob = [] for tag in iob: tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag) if tag_match: prefix = tag_match.group(1) suffix = tag_match.group(2) if suffix == "GPE_LOC": suffix = "LOC" elif suffix == "GPE_ORG": suffix = "ORG" elif suffix != "PER" and suffix != "LOC" and suffix != "ORG": suffix = "MISC" tag = prefix + "-" + suffix new_iob.append(tag) return new_iob
[ "Simplify", "tags", "obtained", "from", "the", "dataset", "in", "order", "to", "follow", "Wikipedia", "scheme", "(", "PER", "LOC", "ORG", "MISC", ")", ".", "PER", "LOC", "and", "ORG", "keep", "their", "tags", "while", "GPE_LOC", "is", "simplified", "to", "LOC", "GPE_ORG", "to", "ORG", "and", "all", "remaining", "tags", "to", "MISC", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conllu2json.py#L86-L107
[ "def", "simplify_tags", "(", "iob", ")", ":", "new_iob", "=", "[", "]", "for", "tag", "in", "iob", ":", "tag_match", "=", "re", ".", "match", "(", "\"([A-Z_]+)-([A-Z_]+)\"", ",", "tag", ")", "if", "tag_match", ":", "prefix", "=", "tag_match", ".", "group", "(", "1", ")", "suffix", "=", "tag_match", ".", "group", "(", "2", ")", "if", "suffix", "==", "\"GPE_LOC\"", ":", "suffix", "=", "\"LOC\"", "elif", "suffix", "==", "\"GPE_ORG\"", ":", "suffix", "=", "\"ORG\"", "elif", "suffix", "!=", "\"PER\"", "and", "suffix", "!=", "\"LOC\"", "and", "suffix", "!=", "\"ORG\"", ":", "suffix", "=", "\"MISC\"", "tag", "=", "prefix", "+", "\"-\"", "+", "suffix", "new_iob", ".", "append", "(", "tag", ")", "return", "new_iob" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
info
Print info about spaCy installation. If a model shortcut link is speficied as an argument, print model information. Flag --markdown prints details in Markdown for easy copy-pasting to GitHub issues.
spacy/cli/info.py
def info(model=None, markdown=False, silent=False): """ Print info about spaCy installation. If a model shortcut link is speficied as an argument, print model information. Flag --markdown prints details in Markdown for easy copy-pasting to GitHub issues. """ msg = Printer() if model: if util.is_package(model): model_path = util.get_package_path(model) else: model_path = util.get_data_path() / model meta_path = model_path / "meta.json" if not meta_path.is_file(): msg.fail("Can't find model meta.json", meta_path, exits=1) meta = srsly.read_json(meta_path) if model_path.resolve() != model_path: meta["link"] = path2str(model_path) meta["source"] = path2str(model_path.resolve()) else: meta["source"] = path2str(model_path) if not silent: title = "Info about model '{}'".format(model) model_meta = { k: v for k, v in meta.items() if k not in ("accuracy", "speed") } if markdown: print_markdown(model_meta, title=title) else: msg.table(model_meta, title=title) return meta data = { "spaCy version": about.__version__, "Location": path2str(Path(__file__).parent.parent), "Platform": platform.platform(), "Python version": platform.python_version(), "Models": list_models(), } if not silent: title = "Info about spaCy" if markdown: print_markdown(data, title=title) else: msg.table(data, title=title) return data
def info(model=None, markdown=False, silent=False): """ Print info about spaCy installation. If a model shortcut link is speficied as an argument, print model information. Flag --markdown prints details in Markdown for easy copy-pasting to GitHub issues. """ msg = Printer() if model: if util.is_package(model): model_path = util.get_package_path(model) else: model_path = util.get_data_path() / model meta_path = model_path / "meta.json" if not meta_path.is_file(): msg.fail("Can't find model meta.json", meta_path, exits=1) meta = srsly.read_json(meta_path) if model_path.resolve() != model_path: meta["link"] = path2str(model_path) meta["source"] = path2str(model_path.resolve()) else: meta["source"] = path2str(model_path) if not silent: title = "Info about model '{}'".format(model) model_meta = { k: v for k, v in meta.items() if k not in ("accuracy", "speed") } if markdown: print_markdown(model_meta, title=title) else: msg.table(model_meta, title=title) return meta data = { "spaCy version": about.__version__, "Location": path2str(Path(__file__).parent.parent), "Platform": platform.platform(), "Python version": platform.python_version(), "Models": list_models(), } if not silent: title = "Info about spaCy" if markdown: print_markdown(data, title=title) else: msg.table(data, title=title) return data
[ "Print", "info", "about", "spaCy", "installation", ".", "If", "a", "model", "shortcut", "link", "is", "speficied", "as", "an", "argument", "print", "model", "information", ".", "Flag", "--", "markdown", "prints", "details", "in", "Markdown", "for", "easy", "copy", "-", "pasting", "to", "GitHub", "issues", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/info.py#L20-L64
[ "def", "info", "(", "model", "=", "None", ",", "markdown", "=", "False", ",", "silent", "=", "False", ")", ":", "msg", "=", "Printer", "(", ")", "if", "model", ":", "if", "util", ".", "is_package", "(", "model", ")", ":", "model_path", "=", "util", ".", "get_package_path", "(", "model", ")", "else", ":", "model_path", "=", "util", ".", "get_data_path", "(", ")", "/", "model", "meta_path", "=", "model_path", "/", "\"meta.json\"", "if", "not", "meta_path", ".", "is_file", "(", ")", ":", "msg", ".", "fail", "(", "\"Can't find model meta.json\"", ",", "meta_path", ",", "exits", "=", "1", ")", "meta", "=", "srsly", ".", "read_json", "(", "meta_path", ")", "if", "model_path", ".", "resolve", "(", ")", "!=", "model_path", ":", "meta", "[", "\"link\"", "]", "=", "path2str", "(", "model_path", ")", "meta", "[", "\"source\"", "]", "=", "path2str", "(", "model_path", ".", "resolve", "(", ")", ")", "else", ":", "meta", "[", "\"source\"", "]", "=", "path2str", "(", "model_path", ")", "if", "not", "silent", ":", "title", "=", "\"Info about model '{}'\"", ".", "format", "(", "model", ")", "model_meta", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "meta", ".", "items", "(", ")", "if", "k", "not", "in", "(", "\"accuracy\"", ",", "\"speed\"", ")", "}", "if", "markdown", ":", "print_markdown", "(", "model_meta", ",", "title", "=", "title", ")", "else", ":", "msg", ".", "table", "(", "model_meta", ",", "title", "=", "title", ")", "return", "meta", "data", "=", "{", "\"spaCy version\"", ":", "about", ".", "__version__", ",", "\"Location\"", ":", "path2str", "(", "Path", "(", "__file__", ")", ".", "parent", ".", "parent", ")", ",", "\"Platform\"", ":", "platform", ".", "platform", "(", ")", ",", "\"Python version\"", ":", "platform", ".", "python_version", "(", ")", ",", "\"Models\"", ":", "list_models", "(", ")", ",", "}", "if", "not", "silent", ":", "title", "=", "\"Info about spaCy\"", "if", "markdown", ":", "print_markdown", "(", "data", ",", "title", "=", "title", ")", "else", ":", "msg", ".", "table", "(", "data", ",", "title", "=", "title", ")", "return", "data" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
print_markdown
Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2.
spacy/cli/info.py
def print_markdown(data, title=None): """Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2. """ markdown = [] for key, value in data.items(): if isinstance(value, basestring_) and Path(value).exists(): continue markdown.append("* **{}:** {}".format(key, unicode_(value))) if title: print("\n## {}".format(title)) print("\n{}\n".format("\n".join(markdown)))
def print_markdown(data, title=None): """Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2. """ markdown = [] for key, value in data.items(): if isinstance(value, basestring_) and Path(value).exists(): continue markdown.append("* **{}:** {}".format(key, unicode_(value))) if title: print("\n## {}".format(title)) print("\n{}\n".format("\n".join(markdown)))
[ "Print", "data", "in", "GitHub", "-", "flavoured", "Markdown", "format", "for", "issues", "etc", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/info.py#L80-L93
[ "def", "print_markdown", "(", "data", ",", "title", "=", "None", ")", ":", "markdown", "=", "[", "]", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "basestring_", ")", "and", "Path", "(", "value", ")", ".", "exists", "(", ")", ":", "continue", "markdown", ".", "append", "(", "\"* **{}:** {}\"", ".", "format", "(", "key", ",", "unicode_", "(", "value", ")", ")", ")", "if", "title", ":", "print", "(", "\"\\n## {}\"", ".", "format", "(", "title", ")", ")", "print", "(", "\"\\n{}\\n\"", ".", "format", "(", "\"\\n\"", ".", "join", "(", "markdown", ")", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
main
Load the model, set up the pipeline and train the parser.
examples/training/train_intent_parser.py
def main(model=None, output_dir=None, n_iter=15): """Load the model, set up the pipeline and train the parser.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # We'll use the built-in dependency parser class, but we want to create a # fresh instance – just in case. if "parser" in nlp.pipe_names: nlp.remove_pipe("parser") parser = nlp.create_pipe("parser") nlp.add_pipe(parser, first=True) for text, annotations in TRAIN_DATA: for dep in annotations.get("deps", []): parser.add_label(dep) other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"] with nlp.disable_pipes(*other_pipes): # only train parser optimizer = nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, losses=losses) print("Losses", losses) # test the trained model test_model(nlp) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) test_model(nlp2)
def main(model=None, output_dir=None, n_iter=15): """Load the model, set up the pipeline and train the parser.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # We'll use the built-in dependency parser class, but we want to create a # fresh instance – just in case. if "parser" in nlp.pipe_names: nlp.remove_pipe("parser") parser = nlp.create_pipe("parser") nlp.add_pipe(parser, first=True) for text, annotations in TRAIN_DATA: for dep in annotations.get("deps", []): parser.add_label(dep) other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"] with nlp.disable_pipes(*other_pipes): # only train parser optimizer = nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, losses=losses) print("Losses", losses) # test the trained model test_model(nlp) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) test_model(nlp2)
[ "Load", "the", "model", "set", "up", "the", "pipeline", "and", "train", "the", "parser", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_intent_parser.py#L107-L154
[ "def", "main", "(", "model", "=", "None", ",", "output_dir", "=", "None", ",", "n_iter", "=", "15", ")", ":", "if", "model", "is", "not", "None", ":", "nlp", "=", "spacy", ".", "load", "(", "model", ")", "# load existing spaCy model", "print", "(", "\"Loaded model '%s'\"", "%", "model", ")", "else", ":", "nlp", "=", "spacy", ".", "blank", "(", "\"en\"", ")", "# create blank Language class", "print", "(", "\"Created blank 'en' model\"", ")", "# We'll use the built-in dependency parser class, but we want to create a", "# fresh instance – just in case.", "if", "\"parser\"", "in", "nlp", ".", "pipe_names", ":", "nlp", ".", "remove_pipe", "(", "\"parser\"", ")", "parser", "=", "nlp", ".", "create_pipe", "(", "\"parser\"", ")", "nlp", ".", "add_pipe", "(", "parser", ",", "first", "=", "True", ")", "for", "text", ",", "annotations", "in", "TRAIN_DATA", ":", "for", "dep", "in", "annotations", ".", "get", "(", "\"deps\"", ",", "[", "]", ")", ":", "parser", ".", "add_label", "(", "dep", ")", "other_pipes", "=", "[", "pipe", "for", "pipe", "in", "nlp", ".", "pipe_names", "if", "pipe", "!=", "\"parser\"", "]", "with", "nlp", ".", "disable_pipes", "(", "*", "other_pipes", ")", ":", "# only train parser", "optimizer", "=", "nlp", ".", "begin_training", "(", ")", "for", "itn", "in", "range", "(", "n_iter", ")", ":", "random", ".", "shuffle", "(", "TRAIN_DATA", ")", "losses", "=", "{", "}", "# batch up the examples using spaCy's minibatch", "batches", "=", "minibatch", "(", "TRAIN_DATA", ",", "size", "=", "compounding", "(", "4.0", ",", "32.0", ",", "1.001", ")", ")", "for", "batch", "in", "batches", ":", "texts", ",", "annotations", "=", "zip", "(", "*", "batch", ")", "nlp", ".", "update", "(", "texts", ",", "annotations", ",", "sgd", "=", "optimizer", ",", "losses", "=", "losses", ")", "print", "(", "\"Losses\"", ",", "losses", ")", "# test the trained model", "test_model", "(", "nlp", ")", "# save model to output directory", "if", "output_dir", "is", "not", "None", ":", "output_dir", "=", "Path", "(", "output_dir", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "nlp", ".", "to_disk", "(", "output_dir", ")", "print", "(", "\"Saved model to\"", ",", "output_dir", ")", "# test the saved model", "print", "(", "\"Loading from\"", ",", "output_dir", ")", "nlp2", "=", "spacy", ".", "load", "(", "output_dir", ")", "test_model", "(", "nlp2", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.get_pipe
Get a pipeline component for a given component name. name (unicode): Name of pipeline component to get. RETURNS (callable): The pipeline component. DOCS: https://spacy.io/api/language#get_pipe
spacy/language.py
def get_pipe(self, name): """Get a pipeline component for a given component name. name (unicode): Name of pipeline component to get. RETURNS (callable): The pipeline component. DOCS: https://spacy.io/api/language#get_pipe """ for pipe_name, component in self.pipeline: if pipe_name == name: return component raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
def get_pipe(self, name): """Get a pipeline component for a given component name. name (unicode): Name of pipeline component to get. RETURNS (callable): The pipeline component. DOCS: https://spacy.io/api/language#get_pipe """ for pipe_name, component in self.pipeline: if pipe_name == name: return component raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
[ "Get", "a", "pipeline", "component", "for", "a", "given", "component", "name", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L232-L243
[ "def", "get_pipe", "(", "self", ",", "name", ")", ":", "for", "pipe_name", ",", "component", "in", "self", ".", "pipeline", ":", "if", "pipe_name", "==", "name", ":", "return", "component", "raise", "KeyError", "(", "Errors", ".", "E001", ".", "format", "(", "name", "=", "name", ",", "opts", "=", "self", ".", "pipe_names", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.create_pipe
Create a pipeline component from a factory. name (unicode): Factory name to look up in `Language.factories`. config (dict): Configuration parameters to initialise component. RETURNS (callable): Pipeline component. DOCS: https://spacy.io/api/language#create_pipe
spacy/language.py
def create_pipe(self, name, config=dict()): """Create a pipeline component from a factory. name (unicode): Factory name to look up in `Language.factories`. config (dict): Configuration parameters to initialise component. RETURNS (callable): Pipeline component. DOCS: https://spacy.io/api/language#create_pipe """ if name not in self.factories: if name == "sbd": raise KeyError(Errors.E108.format(name=name)) else: raise KeyError(Errors.E002.format(name=name)) factory = self.factories[name] return factory(self, **config)
def create_pipe(self, name, config=dict()): """Create a pipeline component from a factory. name (unicode): Factory name to look up in `Language.factories`. config (dict): Configuration parameters to initialise component. RETURNS (callable): Pipeline component. DOCS: https://spacy.io/api/language#create_pipe """ if name not in self.factories: if name == "sbd": raise KeyError(Errors.E108.format(name=name)) else: raise KeyError(Errors.E002.format(name=name)) factory = self.factories[name] return factory(self, **config)
[ "Create", "a", "pipeline", "component", "from", "a", "factory", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L245-L260
[ "def", "create_pipe", "(", "self", ",", "name", ",", "config", "=", "dict", "(", ")", ")", ":", "if", "name", "not", "in", "self", ".", "factories", ":", "if", "name", "==", "\"sbd\"", ":", "raise", "KeyError", "(", "Errors", ".", "E108", ".", "format", "(", "name", "=", "name", ")", ")", "else", ":", "raise", "KeyError", "(", "Errors", ".", "E002", ".", "format", "(", "name", "=", "name", ")", ")", "factory", "=", "self", ".", "factories", "[", "name", "]", "return", "factory", "(", "self", ",", "*", "*", "config", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.add_pipe
Add a component to the processing pipeline. Valid components are callables that take a `Doc` object, modify it and return it. Only one of before/after/first/last can be set. Default behaviour is "last". component (callable): The pipeline component. name (unicode): Name of pipeline component. Overwrites existing component.name attribute if available. If no name is set and the component exposes no name attribute, component.__name__ is used. An error is raised if a name already exists in the pipeline. before (unicode): Component name to insert component directly before. after (unicode): Component name to insert component directly after. first (bool): Insert component first / not first in the pipeline. last (bool): Insert component last / not last in the pipeline. DOCS: https://spacy.io/api/language#add_pipe
spacy/language.py
def add_pipe( self, component, name=None, before=None, after=None, first=None, last=None ): """Add a component to the processing pipeline. Valid components are callables that take a `Doc` object, modify it and return it. Only one of before/after/first/last can be set. Default behaviour is "last". component (callable): The pipeline component. name (unicode): Name of pipeline component. Overwrites existing component.name attribute if available. If no name is set and the component exposes no name attribute, component.__name__ is used. An error is raised if a name already exists in the pipeline. before (unicode): Component name to insert component directly before. after (unicode): Component name to insert component directly after. first (bool): Insert component first / not first in the pipeline. last (bool): Insert component last / not last in the pipeline. DOCS: https://spacy.io/api/language#add_pipe """ if not hasattr(component, "__call__"): msg = Errors.E003.format(component=repr(component), name=name) if isinstance(component, basestring_) and component in self.factories: msg += Errors.E004.format(component=component) raise ValueError(msg) if name is None: if hasattr(component, "name"): name = component.name elif hasattr(component, "__name__"): name = component.__name__ elif hasattr(component, "__class__") and hasattr( component.__class__, "__name__" ): name = component.__class__.__name__ else: name = repr(component) if name in self.pipe_names: raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names)) if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2: raise ValueError(Errors.E006) pipe = (name, component) if last or not any([first, before, after]): self.pipeline.append(pipe) elif first: self.pipeline.insert(0, pipe) elif before and before in self.pipe_names: self.pipeline.insert(self.pipe_names.index(before), pipe) elif after and after in self.pipe_names: self.pipeline.insert(self.pipe_names.index(after) + 1, pipe) else: raise ValueError( Errors.E001.format(name=before or after, opts=self.pipe_names) )
def add_pipe( self, component, name=None, before=None, after=None, first=None, last=None ): """Add a component to the processing pipeline. Valid components are callables that take a `Doc` object, modify it and return it. Only one of before/after/first/last can be set. Default behaviour is "last". component (callable): The pipeline component. name (unicode): Name of pipeline component. Overwrites existing component.name attribute if available. If no name is set and the component exposes no name attribute, component.__name__ is used. An error is raised if a name already exists in the pipeline. before (unicode): Component name to insert component directly before. after (unicode): Component name to insert component directly after. first (bool): Insert component first / not first in the pipeline. last (bool): Insert component last / not last in the pipeline. DOCS: https://spacy.io/api/language#add_pipe """ if not hasattr(component, "__call__"): msg = Errors.E003.format(component=repr(component), name=name) if isinstance(component, basestring_) and component in self.factories: msg += Errors.E004.format(component=component) raise ValueError(msg) if name is None: if hasattr(component, "name"): name = component.name elif hasattr(component, "__name__"): name = component.__name__ elif hasattr(component, "__class__") and hasattr( component.__class__, "__name__" ): name = component.__class__.__name__ else: name = repr(component) if name in self.pipe_names: raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names)) if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2: raise ValueError(Errors.E006) pipe = (name, component) if last or not any([first, before, after]): self.pipeline.append(pipe) elif first: self.pipeline.insert(0, pipe) elif before and before in self.pipe_names: self.pipeline.insert(self.pipe_names.index(before), pipe) elif after and after in self.pipe_names: self.pipeline.insert(self.pipe_names.index(after) + 1, pipe) else: raise ValueError( Errors.E001.format(name=before or after, opts=self.pipe_names) )
[ "Add", "a", "component", "to", "the", "processing", "pipeline", ".", "Valid", "components", "are", "callables", "that", "take", "a", "Doc", "object", "modify", "it", "and", "return", "it", ".", "Only", "one", "of", "before", "/", "after", "/", "first", "/", "last", "can", "be", "set", ".", "Default", "behaviour", "is", "last", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L262-L313
[ "def", "add_pipe", "(", "self", ",", "component", ",", "name", "=", "None", ",", "before", "=", "None", ",", "after", "=", "None", ",", "first", "=", "None", ",", "last", "=", "None", ")", ":", "if", "not", "hasattr", "(", "component", ",", "\"__call__\"", ")", ":", "msg", "=", "Errors", ".", "E003", ".", "format", "(", "component", "=", "repr", "(", "component", ")", ",", "name", "=", "name", ")", "if", "isinstance", "(", "component", ",", "basestring_", ")", "and", "component", "in", "self", ".", "factories", ":", "msg", "+=", "Errors", ".", "E004", ".", "format", "(", "component", "=", "component", ")", "raise", "ValueError", "(", "msg", ")", "if", "name", "is", "None", ":", "if", "hasattr", "(", "component", ",", "\"name\"", ")", ":", "name", "=", "component", ".", "name", "elif", "hasattr", "(", "component", ",", "\"__name__\"", ")", ":", "name", "=", "component", ".", "__name__", "elif", "hasattr", "(", "component", ",", "\"__class__\"", ")", "and", "hasattr", "(", "component", ".", "__class__", ",", "\"__name__\"", ")", ":", "name", "=", "component", ".", "__class__", ".", "__name__", "else", ":", "name", "=", "repr", "(", "component", ")", "if", "name", "in", "self", ".", "pipe_names", ":", "raise", "ValueError", "(", "Errors", ".", "E007", ".", "format", "(", "name", "=", "name", ",", "opts", "=", "self", ".", "pipe_names", ")", ")", "if", "sum", "(", "[", "bool", "(", "before", ")", ",", "bool", "(", "after", ")", ",", "bool", "(", "first", ")", ",", "bool", "(", "last", ")", "]", ")", ">=", "2", ":", "raise", "ValueError", "(", "Errors", ".", "E006", ")", "pipe", "=", "(", "name", ",", "component", ")", "if", "last", "or", "not", "any", "(", "[", "first", ",", "before", ",", "after", "]", ")", ":", "self", ".", "pipeline", ".", "append", "(", "pipe", ")", "elif", "first", ":", "self", ".", "pipeline", ".", "insert", "(", "0", ",", "pipe", ")", "elif", "before", "and", "before", "in", "self", ".", "pipe_names", ":", "self", ".", "pipeline", ".", "insert", "(", "self", ".", "pipe_names", ".", "index", "(", "before", ")", ",", "pipe", ")", "elif", "after", "and", "after", "in", "self", ".", "pipe_names", ":", "self", ".", "pipeline", ".", "insert", "(", "self", ".", "pipe_names", ".", "index", "(", "after", ")", "+", "1", ",", "pipe", ")", "else", ":", "raise", "ValueError", "(", "Errors", ".", "E001", ".", "format", "(", "name", "=", "before", "or", "after", ",", "opts", "=", "self", ".", "pipe_names", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.replace_pipe
Replace a component in the pipeline. name (unicode): Name of the component to replace. component (callable): Pipeline component. DOCS: https://spacy.io/api/language#replace_pipe
spacy/language.py
def replace_pipe(self, name, component): """Replace a component in the pipeline. name (unicode): Name of the component to replace. component (callable): Pipeline component. DOCS: https://spacy.io/api/language#replace_pipe """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) self.pipeline[self.pipe_names.index(name)] = (name, component)
def replace_pipe(self, name, component): """Replace a component in the pipeline. name (unicode): Name of the component to replace. component (callable): Pipeline component. DOCS: https://spacy.io/api/language#replace_pipe """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) self.pipeline[self.pipe_names.index(name)] = (name, component)
[ "Replace", "a", "component", "in", "the", "pipeline", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L326-L336
[ "def", "replace_pipe", "(", "self", ",", "name", ",", "component", ")", ":", "if", "name", "not", "in", "self", ".", "pipe_names", ":", "raise", "ValueError", "(", "Errors", ".", "E001", ".", "format", "(", "name", "=", "name", ",", "opts", "=", "self", ".", "pipe_names", ")", ")", "self", ".", "pipeline", "[", "self", ".", "pipe_names", ".", "index", "(", "name", ")", "]", "=", "(", "name", ",", "component", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.rename_pipe
Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe
spacy/language.py
def rename_pipe(self, old_name, new_name): """Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe """ if old_name not in self.pipe_names: raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names)) if new_name in self.pipe_names: raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names)) i = self.pipe_names.index(old_name) self.pipeline[i] = (new_name, self.pipeline[i][1])
def rename_pipe(self, old_name, new_name): """Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe """ if old_name not in self.pipe_names: raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names)) if new_name in self.pipe_names: raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names)) i = self.pipe_names.index(old_name) self.pipeline[i] = (new_name, self.pipeline[i][1])
[ "Rename", "a", "pipeline", "component", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L338-L351
[ "def", "rename_pipe", "(", "self", ",", "old_name", ",", "new_name", ")", ":", "if", "old_name", "not", "in", "self", ".", "pipe_names", ":", "raise", "ValueError", "(", "Errors", ".", "E001", ".", "format", "(", "name", "=", "old_name", ",", "opts", "=", "self", ".", "pipe_names", ")", ")", "if", "new_name", "in", "self", ".", "pipe_names", ":", "raise", "ValueError", "(", "Errors", ".", "E007", ".", "format", "(", "name", "=", "new_name", ",", "opts", "=", "self", ".", "pipe_names", ")", ")", "i", "=", "self", ".", "pipe_names", ".", "index", "(", "old_name", ")", "self", ".", "pipeline", "[", "i", "]", "=", "(", "new_name", ",", "self", ".", "pipeline", "[", "i", "]", "[", "1", "]", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.remove_pipe
Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe
spacy/language.py
def remove_pipe(self, name): """Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) return self.pipeline.pop(self.pipe_names.index(name))
def remove_pipe(self, name): """Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) return self.pipeline.pop(self.pipe_names.index(name))
[ "Remove", "a", "component", "from", "the", "pipeline", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L353-L363
[ "def", "remove_pipe", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "pipe_names", ":", "raise", "ValueError", "(", "Errors", ".", "E001", ".", "format", "(", "name", "=", "name", ",", "opts", "=", "self", ".", "pipe_names", ")", ")", "return", "self", ".", "pipeline", ".", "pop", "(", "self", ".", "pipe_names", ".", "index", "(", "name", ")", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.update
Update the models in the pipeline. docs (iterable): A batch of `Doc` objects. golds (iterable): A batch of `GoldParse` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. DOCS: https://spacy.io/api/language#update
spacy/language.py
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None): """Update the models in the pipeline. docs (iterable): A batch of `Doc` objects. golds (iterable): A batch of `GoldParse` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. DOCS: https://spacy.io/api/language#update """ if len(docs) != len(golds): raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds))) if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer # Allow dict of args to GoldParse, instead of GoldParse objects. gold_objs = [] doc_objs = [] for doc, gold in zip(docs, golds): if isinstance(doc, basestring_): doc = self.make_doc(doc) if not isinstance(gold, GoldParse): gold = GoldParse(doc, **gold) doc_objs.append(doc) gold_objs.append(gold) golds = gold_objs docs = doc_objs grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 pipes = list(self.pipeline) random.shuffle(pipes) if component_cfg is None: component_cfg = {} for name, proc in pipes: if not hasattr(proc, "update"): continue grads = {} kwargs = component_cfg.get(name, {}) kwargs.setdefault("drop", drop) proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs) for key, (W, dW) in grads.items(): sgd(W, dW, key=key)
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None): """Update the models in the pipeline. docs (iterable): A batch of `Doc` objects. golds (iterable): A batch of `GoldParse` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. DOCS: https://spacy.io/api/language#update """ if len(docs) != len(golds): raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds))) if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer # Allow dict of args to GoldParse, instead of GoldParse objects. gold_objs = [] doc_objs = [] for doc, gold in zip(docs, golds): if isinstance(doc, basestring_): doc = self.make_doc(doc) if not isinstance(gold, GoldParse): gold = GoldParse(doc, **gold) doc_objs.append(doc) gold_objs.append(gold) golds = gold_objs docs = doc_objs grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 pipes = list(self.pipeline) random.shuffle(pipes) if component_cfg is None: component_cfg = {} for name, proc in pipes: if not hasattr(proc, "update"): continue grads = {} kwargs = component_cfg.get(name, {}) kwargs.setdefault("drop", drop) proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs) for key, (W, dW) in grads.items(): sgd(W, dW, key=key)
[ "Update", "the", "models", "in", "the", "pipeline", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L408-L459
[ "def", "update", "(", "self", ",", "docs", ",", "golds", ",", "drop", "=", "0.0", ",", "sgd", "=", "None", ",", "losses", "=", "None", ",", "component_cfg", "=", "None", ")", ":", "if", "len", "(", "docs", ")", "!=", "len", "(", "golds", ")", ":", "raise", "IndexError", "(", "Errors", ".", "E009", ".", "format", "(", "n_docs", "=", "len", "(", "docs", ")", ",", "n_golds", "=", "len", "(", "golds", ")", ")", ")", "if", "len", "(", "docs", ")", "==", "0", ":", "return", "if", "sgd", "is", "None", ":", "if", "self", ".", "_optimizer", "is", "None", ":", "self", ".", "_optimizer", "=", "create_default_optimizer", "(", "Model", ".", "ops", ")", "sgd", "=", "self", ".", "_optimizer", "# Allow dict of args to GoldParse, instead of GoldParse objects.", "gold_objs", "=", "[", "]", "doc_objs", "=", "[", "]", "for", "doc", ",", "gold", "in", "zip", "(", "docs", ",", "golds", ")", ":", "if", "isinstance", "(", "doc", ",", "basestring_", ")", ":", "doc", "=", "self", ".", "make_doc", "(", "doc", ")", "if", "not", "isinstance", "(", "gold", ",", "GoldParse", ")", ":", "gold", "=", "GoldParse", "(", "doc", ",", "*", "*", "gold", ")", "doc_objs", ".", "append", "(", "doc", ")", "gold_objs", ".", "append", "(", "gold", ")", "golds", "=", "gold_objs", "docs", "=", "doc_objs", "grads", "=", "{", "}", "def", "get_grads", "(", "W", ",", "dW", ",", "key", "=", "None", ")", ":", "grads", "[", "key", "]", "=", "(", "W", ",", "dW", ")", "get_grads", ".", "alpha", "=", "sgd", ".", "alpha", "get_grads", ".", "b1", "=", "sgd", ".", "b1", "get_grads", ".", "b2", "=", "sgd", ".", "b2", "pipes", "=", "list", "(", "self", ".", "pipeline", ")", "random", ".", "shuffle", "(", "pipes", ")", "if", "component_cfg", "is", "None", ":", "component_cfg", "=", "{", "}", "for", "name", ",", "proc", "in", "pipes", ":", "if", "not", "hasattr", "(", "proc", ",", "\"update\"", ")", ":", "continue", "grads", "=", "{", "}", "kwargs", "=", "component_cfg", ".", "get", "(", "name", ",", "{", "}", ")", "kwargs", ".", "setdefault", "(", "\"drop\"", ",", "drop", ")", "proc", ".", "update", "(", "docs", ",", "golds", ",", "sgd", "=", "get_grads", ",", "losses", "=", "losses", ",", "*", "*", "kwargs", ")", "for", "key", ",", "(", "W", ",", "dW", ")", "in", "grads", ".", "items", "(", ")", ":", "sgd", "(", "W", ",", "dW", ",", "key", "=", "key", ")" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.rehearse
Make a "rehearsal" update to the models in the pipeline, to prevent forgetting. Rehearsal updates run an initial copy of the model over some data, and update the model so its current predictions are more like the initial ones. This is useful for keeping a pre-trained model on-track, even if you're updating it with a smaller set of examples. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. EXAMPLE: >>> raw_text_batches = minibatch(raw_texts) >>> for labelled_batch in minibatch(zip(train_docs, train_golds)): >>> docs, golds = zip(*train_docs) >>> nlp.update(docs, golds) >>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)] >>> nlp.rehearse(raw_batch)
spacy/language.py
def rehearse(self, docs, sgd=None, losses=None, config=None): """Make a "rehearsal" update to the models in the pipeline, to prevent forgetting. Rehearsal updates run an initial copy of the model over some data, and update the model so its current predictions are more like the initial ones. This is useful for keeping a pre-trained model on-track, even if you're updating it with a smaller set of examples. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. EXAMPLE: >>> raw_text_batches = minibatch(raw_texts) >>> for labelled_batch in minibatch(zip(train_docs, train_golds)): >>> docs, golds = zip(*train_docs) >>> nlp.update(docs, golds) >>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)] >>> nlp.rehearse(raw_batch) """ # TODO: document if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer docs = list(docs) for i, doc in enumerate(docs): if isinstance(doc, basestring_): docs[i] = self.make_doc(doc) pipes = list(self.pipeline) random.shuffle(pipes) if config is None: config = {} grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 for name, proc in pipes: if not hasattr(proc, "rehearse"): continue grads = {} proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {})) for key, (W, dW) in grads.items(): sgd(W, dW, key=key) return losses
def rehearse(self, docs, sgd=None, losses=None, config=None): """Make a "rehearsal" update to the models in the pipeline, to prevent forgetting. Rehearsal updates run an initial copy of the model over some data, and update the model so its current predictions are more like the initial ones. This is useful for keeping a pre-trained model on-track, even if you're updating it with a smaller set of examples. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. EXAMPLE: >>> raw_text_batches = minibatch(raw_texts) >>> for labelled_batch in minibatch(zip(train_docs, train_golds)): >>> docs, golds = zip(*train_docs) >>> nlp.update(docs, golds) >>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)] >>> nlp.rehearse(raw_batch) """ # TODO: document if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer docs = list(docs) for i, doc in enumerate(docs): if isinstance(doc, basestring_): docs[i] = self.make_doc(doc) pipes = list(self.pipeline) random.shuffle(pipes) if config is None: config = {} grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 for name, proc in pipes: if not hasattr(proc, "rehearse"): continue grads = {} proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {})) for key, (W, dW) in grads.items(): sgd(W, dW, key=key) return losses
[ "Make", "a", "rehearsal", "update", "to", "the", "models", "in", "the", "pipeline", "to", "prevent", "forgetting", ".", "Rehearsal", "updates", "run", "an", "initial", "copy", "of", "the", "model", "over", "some", "data", "and", "update", "the", "model", "so", "its", "current", "predictions", "are", "more", "like", "the", "initial", "ones", ".", "This", "is", "useful", "for", "keeping", "a", "pre", "-", "trained", "model", "on", "-", "track", "even", "if", "you", "re", "updating", "it", "with", "a", "smaller", "set", "of", "examples", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L461-L511
[ "def", "rehearse", "(", "self", ",", "docs", ",", "sgd", "=", "None", ",", "losses", "=", "None", ",", "config", "=", "None", ")", ":", "# TODO: document", "if", "len", "(", "docs", ")", "==", "0", ":", "return", "if", "sgd", "is", "None", ":", "if", "self", ".", "_optimizer", "is", "None", ":", "self", ".", "_optimizer", "=", "create_default_optimizer", "(", "Model", ".", "ops", ")", "sgd", "=", "self", ".", "_optimizer", "docs", "=", "list", "(", "docs", ")", "for", "i", ",", "doc", "in", "enumerate", "(", "docs", ")", ":", "if", "isinstance", "(", "doc", ",", "basestring_", ")", ":", "docs", "[", "i", "]", "=", "self", ".", "make_doc", "(", "doc", ")", "pipes", "=", "list", "(", "self", ".", "pipeline", ")", "random", ".", "shuffle", "(", "pipes", ")", "if", "config", "is", "None", ":", "config", "=", "{", "}", "grads", "=", "{", "}", "def", "get_grads", "(", "W", ",", "dW", ",", "key", "=", "None", ")", ":", "grads", "[", "key", "]", "=", "(", "W", ",", "dW", ")", "get_grads", ".", "alpha", "=", "sgd", ".", "alpha", "get_grads", ".", "b1", "=", "sgd", ".", "b1", "get_grads", ".", "b2", "=", "sgd", ".", "b2", "for", "name", ",", "proc", "in", "pipes", ":", "if", "not", "hasattr", "(", "proc", ",", "\"rehearse\"", ")", ":", "continue", "grads", "=", "{", "}", "proc", ".", "rehearse", "(", "docs", ",", "sgd", "=", "get_grads", ",", "losses", "=", "losses", ",", "*", "*", "config", ".", "get", "(", "name", ",", "{", "}", ")", ")", "for", "key", ",", "(", "W", ",", "dW", ")", "in", "grads", ".", "items", "(", ")", ":", "sgd", "(", "W", ",", "dW", ",", "key", "=", "key", ")", "return", "losses" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.preprocess_gold
Can be called before training to pre-process gold data. By default, it handles nonprojectivity and adds missing tags to the tag map. docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
spacy/language.py
def preprocess_gold(self, docs_golds): """Can be called before training to pre-process gold data. By default, it handles nonprojectivity and adds missing tags to the tag map. docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects. """ for name, proc in self.pipeline: if hasattr(proc, "preprocess_gold"): docs_golds = proc.preprocess_gold(docs_golds) for doc, gold in docs_golds: yield doc, gold
def preprocess_gold(self, docs_golds): """Can be called before training to pre-process gold data. By default, it handles nonprojectivity and adds missing tags to the tag map. docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects. """ for name, proc in self.pipeline: if hasattr(proc, "preprocess_gold"): docs_golds = proc.preprocess_gold(docs_golds) for doc, gold in docs_golds: yield doc, gold
[ "Can", "be", "called", "before", "training", "to", "pre", "-", "process", "gold", "data", ".", "By", "default", "it", "handles", "nonprojectivity", "and", "adds", "missing", "tags", "to", "the", "tag", "map", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L513-L524
[ "def", "preprocess_gold", "(", "self", ",", "docs_golds", ")", ":", "for", "name", ",", "proc", "in", "self", ".", "pipeline", ":", "if", "hasattr", "(", "proc", ",", "\"preprocess_gold\"", ")", ":", "docs_golds", "=", "proc", ".", "preprocess_gold", "(", "docs_golds", ")", "for", "doc", ",", "gold", "in", "docs_golds", ":", "yield", "doc", ",", "gold" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.begin_training
Allocate models, pre-process training data and acquire a trainer and optimizer. Used as a contextmanager. get_gold_tuples (function): Function returning gold data component_cfg (dict): Config parameters for specific components. **cfg: Config parameters. RETURNS: An optimizer. DOCS: https://spacy.io/api/language#begin_training
spacy/language.py
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg): """Allocate models, pre-process training data and acquire a trainer and optimizer. Used as a contextmanager. get_gold_tuples (function): Function returning gold data component_cfg (dict): Config parameters for specific components. **cfg: Config parameters. RETURNS: An optimizer. DOCS: https://spacy.io/api/language#begin_training """ if get_gold_tuples is None: get_gold_tuples = lambda: [] # Populate vocab else: for _, annots_brackets in get_gold_tuples(): for annots, _ in annots_brackets: for word in annots[1]: _ = self.vocab[word] # noqa: F841 if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd if component_cfg is None: component_cfg = {} for name, proc in self.pipeline: if hasattr(proc, "begin_training"): kwargs = component_cfg.get(name, {}) kwargs.update(cfg) proc.begin_training( get_gold_tuples, pipeline=self.pipeline, sgd=self._optimizer, **kwargs ) return self._optimizer
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg): """Allocate models, pre-process training data and acquire a trainer and optimizer. Used as a contextmanager. get_gold_tuples (function): Function returning gold data component_cfg (dict): Config parameters for specific components. **cfg: Config parameters. RETURNS: An optimizer. DOCS: https://spacy.io/api/language#begin_training """ if get_gold_tuples is None: get_gold_tuples = lambda: [] # Populate vocab else: for _, annots_brackets in get_gold_tuples(): for annots, _ in annots_brackets: for word in annots[1]: _ = self.vocab[word] # noqa: F841 if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd if component_cfg is None: component_cfg = {} for name, proc in self.pipeline: if hasattr(proc, "begin_training"): kwargs = component_cfg.get(name, {}) kwargs.update(cfg) proc.begin_training( get_gold_tuples, pipeline=self.pipeline, sgd=self._optimizer, **kwargs ) return self._optimizer
[ "Allocate", "models", "pre", "-", "process", "training", "data", "and", "acquire", "a", "trainer", "and", "optimizer", ".", "Used", "as", "a", "contextmanager", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L526-L567
[ "def", "begin_training", "(", "self", ",", "get_gold_tuples", "=", "None", ",", "sgd", "=", "None", ",", "component_cfg", "=", "None", ",", "*", "*", "cfg", ")", ":", "if", "get_gold_tuples", "is", "None", ":", "get_gold_tuples", "=", "lambda", ":", "[", "]", "# Populate vocab", "else", ":", "for", "_", ",", "annots_brackets", "in", "get_gold_tuples", "(", ")", ":", "for", "annots", ",", "_", "in", "annots_brackets", ":", "for", "word", "in", "annots", "[", "1", "]", ":", "_", "=", "self", ".", "vocab", "[", "word", "]", "# noqa: F841", "if", "cfg", ".", "get", "(", "\"device\"", ",", "-", "1", ")", ">=", "0", ":", "util", ".", "use_gpu", "(", "cfg", "[", "\"device\"", "]", ")", "if", "self", ".", "vocab", ".", "vectors", ".", "data", ".", "shape", "[", "1", "]", ">=", "1", ":", "self", ".", "vocab", ".", "vectors", ".", "data", "=", "Model", ".", "ops", ".", "asarray", "(", "self", ".", "vocab", ".", "vectors", ".", "data", ")", "link_vectors_to_models", "(", "self", ".", "vocab", ")", "if", "self", ".", "vocab", ".", "vectors", ".", "data", ".", "shape", "[", "1", "]", ":", "cfg", "[", "\"pretrained_vectors\"", "]", "=", "self", ".", "vocab", ".", "vectors", ".", "name", "if", "sgd", "is", "None", ":", "sgd", "=", "create_default_optimizer", "(", "Model", ".", "ops", ")", "self", ".", "_optimizer", "=", "sgd", "if", "component_cfg", "is", "None", ":", "component_cfg", "=", "{", "}", "for", "name", ",", "proc", "in", "self", ".", "pipeline", ":", "if", "hasattr", "(", "proc", ",", "\"begin_training\"", ")", ":", "kwargs", "=", "component_cfg", ".", "get", "(", "name", ",", "{", "}", ")", "kwargs", ".", "update", "(", "cfg", ")", "proc", ".", "begin_training", "(", "get_gold_tuples", ",", "pipeline", "=", "self", ".", "pipeline", ",", "sgd", "=", "self", ".", "_optimizer", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_optimizer" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709
train
Language.resume_training
Continue training a pre-trained model. Create and return an optimizer, and initialize "rehearsal" for any pipeline component that has a .rehearse() method. Rehearsal is used to prevent models from "forgetting" their initialised "knowledge". To perform rehearsal, collect samples of text you want the models to retain performance on, and call nlp.rehearse() with a batch of Doc objects.
spacy/language.py
def resume_training(self, sgd=None, **cfg): """Continue training a pre-trained model. Create and return an optimizer, and initialize "rehearsal" for any pipeline component that has a .rehearse() method. Rehearsal is used to prevent models from "forgetting" their initialised "knowledge". To perform rehearsal, collect samples of text you want the models to retain performance on, and call nlp.rehearse() with a batch of Doc objects. """ if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd for name, proc in self.pipeline: if hasattr(proc, "_rehearsal_model"): proc._rehearsal_model = deepcopy(proc.model) return self._optimizer
def resume_training(self, sgd=None, **cfg): """Continue training a pre-trained model. Create and return an optimizer, and initialize "rehearsal" for any pipeline component that has a .rehearse() method. Rehearsal is used to prevent models from "forgetting" their initialised "knowledge". To perform rehearsal, collect samples of text you want the models to retain performance on, and call nlp.rehearse() with a batch of Doc objects. """ if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd for name, proc in self.pipeline: if hasattr(proc, "_rehearsal_model"): proc._rehearsal_model = deepcopy(proc.model) return self._optimizer
[ "Continue", "training", "a", "pre", "-", "trained", "model", "." ]
explosion/spaCy
python
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L569-L591
[ "def", "resume_training", "(", "self", ",", "sgd", "=", "None", ",", "*", "*", "cfg", ")", ":", "if", "cfg", ".", "get", "(", "\"device\"", ",", "-", "1", ")", ">=", "0", ":", "util", ".", "use_gpu", "(", "cfg", "[", "\"device\"", "]", ")", "if", "self", ".", "vocab", ".", "vectors", ".", "data", ".", "shape", "[", "1", "]", ">=", "1", ":", "self", ".", "vocab", ".", "vectors", ".", "data", "=", "Model", ".", "ops", ".", "asarray", "(", "self", ".", "vocab", ".", "vectors", ".", "data", ")", "link_vectors_to_models", "(", "self", ".", "vocab", ")", "if", "self", ".", "vocab", ".", "vectors", ".", "data", ".", "shape", "[", "1", "]", ":", "cfg", "[", "\"pretrained_vectors\"", "]", "=", "self", ".", "vocab", ".", "vectors", ".", "name", "if", "sgd", "is", "None", ":", "sgd", "=", "create_default_optimizer", "(", "Model", ".", "ops", ")", "self", ".", "_optimizer", "=", "sgd", "for", "name", ",", "proc", "in", "self", ".", "pipeline", ":", "if", "hasattr", "(", "proc", ",", "\"_rehearsal_model\"", ")", ":", "proc", ".", "_rehearsal_model", "=", "deepcopy", "(", "proc", ".", "model", ")", "return", "self", ".", "_optimizer" ]
8ee4100f8ffb336886208a1ea827bf4c745e2709