Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/lang/fr/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_ELLIPSES,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
merge_chars,
)
from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES
ELISION = "' ’".replace(" ", "")
HYPHENS = r"- – — ‐ ‑".replace(" ... | 1,452 | 24.491228 | 68 | py |
spaCy | spaCy-master/spacy/lang/fr/stop_words.py | STOP_WORDS = set(
"""
a à â abord afin ah ai aie ainsi ait allaient allons
alors anterieur anterieure anterieures antérieur antérieure antérieures
apres après as assez attendu au
aupres auquel aura auraient aurait auront
aussi autre autrement autres autrui aux auxquelles auxquels avaient
avais avait avant avec avoi... | 3,403 | 39.047059 | 96 | py |
spaCy | spaCy-master/spacy/lang/fr/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 3,124 | 35.337209 | 88 | py |
spaCy | spaCy-master/spacy/lang/fr/tokenizer_exceptions.py | import re
from ...symbols import ORTH
from ...util import update_exc
from ..char_classes import ALPHA, ALPHA_LOWER
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .punctuation import ELISION, HYPHENS
# not using the large _tokenizer_exceptions_list by default as it slows down the tokenizer
# from ._tokenizer_... | 11,174 | 24.168919 | 118 | py |
spaCy | spaCy-master/spacy/lang/ga/__init__.py | from typing import Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import IrishLemmatizer
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class IrishDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
... | 819 | 23.117647 | 82 | py |
spaCy | spaCy-master/spacy/lang/ga/lemmatizer.py | from typing import Dict, List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class IrishLemmatizer(Lemmatizer):
# This is a lookup-based lemmatiser using data extracted from
# BuNaMo (https://github.com/michmech/BuNaMo)
@classmethod
def get_lookups_config(cls, mode: str) -> Tu... | 4,889 | 29 | 80 | py |
spaCy | spaCy-master/spacy/lang/ga/stop_words.py | STOP_WORDS = set(
"""
a ach ag agus an aon ar arna as
ba beirt bhúr
caoga ceathair ceathrar chomh chuig chun cois céad cúig cúigear
daichead dar de deich deichniúr den dhá do don dtí dá dár dó
faoi faoin faoina faoinár fara fiche
gach gan go gur
haon hocht
i iad idir in ina ins inár is
le leis lena lenár
m... | 567 | 11.909091 | 66 | py |
spaCy | spaCy-master/spacy/lang/ga/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"'acha'n": [{ORTH: "'ach", NORM: "gach"}, {ORTH: "a'n", NORM: "aon"}],
"dem'": [{ORTH: "de", NORM: "de"}, {ORTH: "m'", NORM: "mo"}],
"ded'": [{ORTH: "de", NORM: "de"}, {ORTH: "d'", N... | 1,868 | 24.958333 | 74 | py |
spaCy | spaCy-master/spacy/lang/grc/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class AncientGreekDefaults(BaseDefaults):
tokenizer_exception... | 620 | 26 | 82 | py |
spaCy | spaCy-master/spacy/lang/grc/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.grc.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"ἐρᾷ μὲν ἁγνὸς οὐρανὸς τρῶσαι χθόνα, ἔρως δὲ γαῖαν λαμβάνει γάμου τυχεῖν·",
"εὐδαίμων Χαρίτων καὶ Μελάνιππος ἔφυ, θείας ἁγητῆρες ἐφαμερίοις φιλότατος... | 650 | 35.166667 | 110 | py |
spaCy | spaCy-master/spacy/lang/grc/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
# CARDINALS
"εἷς",
"ἑνός",
"ἑνί",
"ἕνα",
"μία",
"μιᾶς",
"μιᾷ",
"μίαν",
"ἕν",
"δύο",
"δυοῖν",
"τρεῖς",
"τριῶν",
"τρισί",
"τρία",
"τέτταρες",
"τεττάρων",
"τέτταρσι",
"τέτταρα",
"τέτταρας",
... | 4,587 | 13.611465 | 36 | py |
spaCy | spaCy-master/spacy/lang/grc/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
)
_prefixes = (
[
"†",
"⸏",
]
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_CURREN... | 1,063 | 18 | 68 | py |
spaCy | spaCy-master/spacy/lang/grc/stop_words.py | STOP_WORDS = set(
"""
αὐτῷ αὐτοῦ αὐτῆς αὐτόν αὐτὸν αὐτῶν αὐτὸς αὐτὸ αὐτό αὐτός αὐτὴν αὐτοῖς αὐτοὺς αὔτ' αὐτὰ αὐτῇ αὐτὴ
αὐτὼ αὑταὶ καὐτὸς αὐτά αὑτός αὐτοῖσι αὐτοῖσιν αὑτὸς αὐτήν αὐτοῖσί αὐτοί αὐτοὶ αὐτοῖο αὐτάων αὐτὰς
αὐτέων αὐτώ αὐτάς αὐτούς αὐτή αὐταί αὐταὶ αὐτῇσιν τὠυτῷ τὠυτὸ ταὐτὰ ταύτῃ αὐτῇσι αὐτῇς αὐταῖς αὐτᾶς... | 4,757 | 75.741935 | 157 | py |
spaCy | spaCy-master/spacy/lang/grc/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for token in ["᾽Απ'", "᾽ΑΠ'", "ἀφ'", "᾽Αφ", "ἀπὸ"]:
_exc[token] = [{ORTH: token, NORM: "από"}]
for token in ["᾽Αλλ'", "ἀλλ'", "ἀλλὰ"]:
_exc[token] = [{ORTH: token, NORM: "ἀλλά"}]
for... | 5,395 | 47.178571 | 80 | py |
spaCy | spaCy-master/spacy/lang/gu/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class GujaratiDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Gujarati(Language):
lang = "gu"
Defaults = GujaratiDefaults
__all__ = ["Gujarati"]
| 251 | 15.8 | 46 | py |
spaCy | spaCy-master/spacy/lang/gu/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.gu.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"લોકશાહી એ સરકારનું એક એવું તંત્ર છે જ્યાં નાગરિકો મત દ્વારા સત્તાનો ઉપયોગ કરે છે.",
"તે ગુજરાત રાજ્યના ધરમપુર શહેરમાં આવેલું હતું",
"કર્ણદેવ પહેલ... | 595 | 30.368421 | 88 | py |
spaCy | spaCy-master/spacy/lang/gu/stop_words.py | STOP_WORDS = set(
"""
એમ
આ
એ
રહી
છે
છો
હતા
હતું
હતી
હોય
હતો
શકે
તે
તેના
તેનું
તેને
તેની
તેઓ
તેમને
તેમના
તેમણે
તેમનું
તેમાં
અને
અહીં
થી
થઈ
થાય
જે
ને
કે
ના
ની
નો
ને
નું
શું
માં
પણ
પર
જેવા
જેવું
જાય
જેમ
જેથી
માત્ર
માટે
પરથી
આવ્યું
એવી
આવી
રીતે
સુધી
થાય
થઈ
સાથે
લાગે
હોવા
છતાં
રહેલા
કરી
કરે
કેટલા
કોઈ
કેમ
કર્યો
કર્યુ
કર... | 418 | 3.707865 | 17 | py |
spaCy | spaCy-master/spacy/lang/he/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class HebrewDefaults(BaseDefaults):
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
class Hebrew(Language)... | 391 | 20.777778 | 81 | py |
spaCy | spaCy-master/spacy/lang/he/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.he.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"סין מקימה קרן של 440 מיליון דולר להשקעה בהייטק בישראל",
'רה"מ הודיע כי יחרים טקס בחסותו',
"הכנסת צפויה לאשר איכון אוטומטי של שיחות למוקד 100",
... | 676 | 26.08 | 60 | py |
spaCy | spaCy-master/spacy/lang/he/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"אפס",
"אחד",
"אחת",
"שתיים",
"שתים",
"שניים",
"שנים",
"שלוש",
"שלושה",
"ארבע",
"ארבעה",
"חמש",
"חמישה",
"שש",
"שישה",
"שבע",
"שבעה",
"שמונה",
"תשע",
"תשעה",
"עשר",
"עשרה",
"אחד עשר"... | 1,426 | 13.864583 | 49 | py |
spaCy | spaCy-master/spacy/lang/he/stop_words.py | STOP_WORDS = set(
"""
אני
את
אתה
אנחנו
אתן
אתם
הם
הן
היא
הוא
שלי
שלו
שלך
שלה
שלנו
שלכם
שלכן
שלהם
שלהן
לי
לו
לה
לנו
לכם
לכן
להם
להן
אותה
אותו
זה
זאת
אלה
אלו
תחת
מתחת
מעל
בין
עם
עד
על
אל
מול
של
אצל
כמו
אחר
אותו
בלי
לפני
אחרי
מאחורי
עלי
עליו
עליה
עליך
עלינו
עליכם
עליכן
עליהם
עליהן
כל
כולם
כולן
כך
ככה
כזה
כזאת
זה
אותי
... | 1,061 | 3.762332 | 17 | py |
spaCy | spaCy-master/spacy/lang/hi/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class HindiDefaults(BaseDefaults):
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Hindi(Language):
lang = "hi"
Defaults = HindiDefaults
__all__ = ["Hindi"]
| 305 | 17 | 46 | py |
spaCy | spaCy-master/spacy/lang/hi/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.hi.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"एप्पल 1 अरब डॉलर के लिए यू.के. स्टार्टअप खरीदने पर विचार कर रहा है।",
"स्वायत्त कारें निर्माताओं की ओर बीमा दायित्व रखतीं हैं।",
"सैन फ्रांसिस्को... | 726 | 33.619048 | 107 | py |
spaCy | spaCy-master/spacy/lang/hi/lex_attrs.py | from ...attrs import LIKE_NUM, NORM
from ..norm_exceptions import BASE_NORMS
# fmt: off
_stem_suffixes = [
["ो", "े", "ू", "ु", "ी", "ि", "ा"],
["कर", "ाओ", "िए", "ाई", "ाए", "ने", "नी", "ना", "ते", "ीं", "ती", "ता", "ाँ", "ां", "ों", "ें"],
["ाकर", "ाइए", "ाईं", "ाया", "ेगी", "ेगा", "ोगी", "ोगे", "ाने", "... | 4,095 | 20.671958 | 149 | py |
spaCy | spaCy-master/spacy/lang/hi/stop_words.py | # Source: https://github.com/taranjeet/hindi-tokenizer/blob/master/stopwords.txt, https://data.mendeley.com/datasets/bsr3frvvjc/1#file-a21d5092-99d7-45d8-b044-3ae9edd391c6
STOP_WORDS = set(
"""
अंदर
अत
अदि
अप
अपना
अपनि
अपनी
अपने
अभि
अभी
अंदर
आदि
आप
अगर
इंहिं
इंहें
इंहों
इतयादि
इत्यादि
इन
इनका
इन्हीं
इन्हें
इन्हों
... | 1,289 | 4.375 | 171 | py |
spaCy | spaCy-master/spacy/lang/hr/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class CroatianDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Croatian(Language):
lang = "hr"
Defaults = CroatianDefaults
__all__ = ["Croatian"]
| 251 | 15.8 | 46 | py |
spaCy | spaCy-master/spacy/lang/hr/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.hr.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Ovo je rečenica.",
"Kako se popravlja auto?",
"Zagreb je udaljen od Ljubljane svega 150 km.",
"Nećete vjerovati što se dogodilo na ovogodišnje... | 483 | 29.25 | 82 | py |
spaCy | spaCy-master/spacy/lang/hr/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-hr
STOP_WORDS = set(
"""
a
ah
aha
aj
ako
al
ali
arh
au
avaj
bar
baš
bez
bi
bih
bijah
bijahu
bijaše
bijasmo
bijaste
bila
bili
bilo
bio
bismo
biste
biti
brr
buć
budavši
bude
budimo
budite
budu
budući
bum
bumo
će
ćemo
ćeš
ćete
čijem
čijim
čijima
ću
da
daj
dakle
de
d... | 1,936 | 4.614493 | 55 | py |
spaCy | spaCy-master/spacy/lang/hsb/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class UpperSorbianDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIO... | 437 | 22.052632 | 54 | py |
spaCy | spaCy-master/spacy/lang/hsb/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.hsb.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"To běšo wjelgin raźone a jo se wót luźi derje pśiwzeło. Tak som dožywiła wjelgin",
"Jogo pśewóźowarce stej groniłej, až how w serbskich stronach nja... | 608 | 37.0625 | 114 | py |
spaCy | spaCy-master/spacy/lang/hsb/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"nul",
"jedyn",
"jedna",
"jedne",
"dwaj",
"dwě",
"tři",
"třo",
"štyri",
"štyrjo",
"pjeć",
"šěsć",
"sydom",
"wosom",
"dźewjeć",
"dźesać",
"jědnaće",
"dwanaće",
"třinaće",
"štyrnaće",
"pjatnać... | 1,716 | 15.046729 | 49 | py |
spaCy | spaCy-master/spacy/lang/hsb/stop_words.py | STOP_WORDS = set(
"""
a abo ale ani
dokelž
hdyž
jeli jelizo
kaž
pak potom
tež tohodla
zo zoby
""".split()
)
| 119 | 5 | 17 | py |
spaCy | spaCy-master/spacy/lang/hsb/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = dict()
for exc_data in [
{ORTH: "mil.", NORM: "milion"},
{ORTH: "wob.", NORM: "wobydler"},
]:
_exc[exc_data[ORTH]] = [exc_data]
for orth in [
"resp.",
]:
_exc[orth] = [{ORTH: ... | 386 | 19.368421 | 56 | py |
spaCy | spaCy-master/spacy/lang/hu/__init__.py | from ...language import BaseDefaults, Language
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS
class HungarianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTION... | 584 | 25.590909 | 82 | py |
spaCy | spaCy-master/spacy/lang/hu/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.hu.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Az Apple egy brit startup vásárlását tervezi 1 milliárd dollár értékben.",
"San Francisco vezetése mérlegeli a járdát használó szállító robotok betil... | 384 | 26.5 | 86 | py |
spaCy | spaCy-master/spacy/lang/hu/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_ICONS,
CONCAT_QUOTES,
LIST_ELLIPSES,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
)
# removing ° from the special icons to keep e.g. 99° as one token
_concat_icons = CONCAT_ICONS.replace("\u00B0", "")
_currency = r"\$¢£€¥฿"
_q... | 1,494 | 23.112903 | 75 | py |
spaCy | spaCy-master/spacy/lang/hu/stop_words.py | STOP_WORDS = set(
"""
a abban ahhoz ahogy ahol aki akik akkor akár alatt amely amelyek amelyekben
amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az
azok azon azonban azt aztán azután azzal azért
be belül benne bár
cikk cikkek cikkeket csak
de
e ebben eddig egy egyes egyetlen egyik egyr... | 1,309 | 19.793651 | 77 | py |
spaCy | spaCy-master/spacy/lang/hu/tokenizer_exceptions.py | import re
from ...symbols import ORTH
from ...util import update_exc
from ..punctuation import ALPHA_LOWER, CURRENCY
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for orth in [
"-e",
"A.",
"AG.",
"AkH.",
"Aö.",
"B.",
"B.CS.",
"B.S.",
"B.Sc.",
"B.ú.é.k.",
"BE... | 8,299 | 11.671756 | 78 | py |
spaCy | spaCy-master/spacy/lang/hy/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class ArmenianDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Armenian(Language):
lang = "hy"
Defaults = ArmenianDefaults
__all__ = ["Armenian"]
| 317 | 17.705882 | 46 | py |
spaCy | spaCy-master/spacy/lang/hy/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.hy.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Լոնդոնը Միացյալ Թագավորության մեծ քաղաք է։",
"Ո՞վ է Ֆրանսիայի նախագահը։",
"Ո՞րն է Միացյալ Նահանգների մայրաքաղաքը։",
"Ե՞րբ է ծնվել Բարաք Օբաման... | 326 | 22.357143 | 56 | py |
spaCy | spaCy-master/spacy/lang/hy/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"զրո",
"մեկ",
"երկու",
"երեք",
"չորս",
"հինգ",
"վեց",
"յոթ",
"ութ",
"ինը",
"տասը",
"տասնմեկ",
"տասներկու",
"տասներեք",
"տասնչորս",
"տասնհինգ",
"տասնվեց",
"տասնյոթ",
"տասնութ",
"տասնինը",
"քս... | 953 | 15.736842 | 49 | py |
spaCy | spaCy-master/spacy/lang/hy/stop_words.py | STOP_WORDS = set(
"""
նա
ողջը
այստեղ
ենք
նա
էիր
որպես
ուրիշ
բոլորը
այն
այլ
նույնչափ
էի
մի
և
ողջ
ես
ոմն
հետ
նրանք
ամենքը
ըստ
ինչ-ինչ
այսպես
համայն
մի
նաև
նույնքան
դա
ովևէ
համար
այնտեղ
էին
որոնք
սույն
ինչ-որ
ամենը
նույնպիսի
ու
իր
որոշ
միևնույն
ի
այնպիսի
մենք
ամեն ոք
նույն
երբևէ
այն
որևէ
ին
այդպես
նրա
որը
վրա
դու
էինք... | 607 | 4.62963 | 17 | py |
spaCy | spaCy-master/spacy/lang/id/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class IndonesianDe... | 698 | 26.96 | 82 | py |
spaCy | spaCy-master/spacy/lang/id/_tokenizer_exceptions_list.py | ID_BASE_EXCEPTIONS = set(
"""
aba-aba
abah-abah
abal-abal
abang-abang
abar-abar
abong-abong
abrit-abrit
abrit-abritan
abu-abu
abuh-abuhan
abuk-abuk
abun-abun
acak-acak
acak-acakan
acang-acang
acap-acap
aci-aci
aci-acian
aci-acinya
aco-acoan
ad-blocker
ad-interim
ada-ada
ada-adanya
ada-adanyakah
adang-adang
adap-ada... | 53,599 | 12.733026 | 25 | py |
spaCy | spaCy-master/spacy/lang/id/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.id.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Indonesia merupakan negara kepulauan yang kaya akan budaya.",
"Berapa banyak warga yang dibutuhkan saat kerja bakti?",
"Penyaluran pupuk berasal ... | 726 | 37.263158 | 127 | py |
spaCy | spaCy-master/spacy/lang/id/lex_attrs.py | import unicodedata
from ...attrs import IS_CURRENCY, LIKE_NUM
from .punctuation import LIST_CURRENCY
_num_words = [
"nol",
"satu",
"dua",
"tiga",
"empat",
"lima",
"enam",
"tujuh",
"delapan",
"sembilan",
"sepuluh",
"sebelas",
"belas",
"puluh",
"ratus",
"r... | 1,275 | 18.044776 | 58 | py |
spaCy | spaCy-master/spacy/lang/id/punctuation.py | from ..char_classes import ALPHA, _currency, _units, merge_chars, split_chars
from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
_units = (
_units + "s bit Gbps Mbps mbps Kbps kbps ƒ ppi px "
"Hz kHz MHz GHz mAh "
"ratus rb ribu ribuan "
"juta jt jutaan mill?iar million... | 2,131 | 33.95082 | 83 | py |
spaCy | spaCy-master/spacy/lang/id/stop_words.py | STOP_WORDS = set(
"""
ada adalah adanya adapun agak agaknya agar akan akankah akhir akhiri akhirnya
aku akulah amat amatlah anda andalah antar antara antaranya apa apaan apabila
apakah apalagi apatah artinya asal asalkan atas atau ataukah ataupun awal
awalnya
bagai bagaikan bagaimana bagaimanakah bagaimanapun bagi... | 6,507 | 53.689076 | 79 | py |
spaCy | spaCy-master/spacy/lang/id/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 1,538 | 35.642857 | 89 | py |
spaCy | spaCy-master/spacy/lang/id/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS
# Daftar singkatan dan Akronim dari:
# https://id.wiktionary.org/wiki/Wiktionary:Daftar_singkatan_dan_akronim_bahasa_Indonesia#A
_exc = {}
for... | 4,204 | 18.027149 | 91 | py |
spaCy | spaCy-master/spacy/lang/is/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class IcelandicDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Icelandic(Language):
lang = "is"
Defaults = IcelandicDefaults
__all__ = ["Icelandic"]
| 255 | 16.066667 | 46 | py |
spaCy | spaCy-master/spacy/lang/is/stop_words.py | # Source: https://github.com/Xangis/extra-stopwords
STOP_WORDS = set(
"""
afhverju
aftan
aftur
afþví
aldrei
allir
allt
alveg
annað
annars
bara
dag
eða
eftir
eiga
einhver
einhverjir
einhvers
eins
einu
eitthvað
ekkert
ekki
ennþá
eru
fara
fer
finna
fjöldi
fólk
framan
frá
frekar
fyrir
gegnum
geta
getur
gmg
gott
hann
h... | 938 | 4.90566 | 51 | py |
spaCy | spaCy-master/spacy/lang/it/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import ItalianLemmatizer
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_ex... | 1,230 | 23.137255 | 77 | py |
spaCy | spaCy-master/spacy/lang/it/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.it.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple vuole comprare una startup del Regno Unito per un miliardo di dollari",
"Le automobili a guida autonoma spostano la responsabilità assicurativa... | 468 | 30.266667 | 96 | py |
spaCy | spaCy-master/spacy/lang/it/lemmatizer.py | from typing import Dict, List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class ItalianLemmatizer(Lemmatizer):
"""This lemmatizer was adapted from the Polish one (version of April 2021).
It implements lookup lemmatization based on the morphological lexicon
morph-it (Baroni and Z... | 4,615 | 33.706767 | 79 | py |
spaCy | spaCy-master/spacy/lang/it/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
ELISION = "'’"
_prefixes = [r"'[0-9][0-9]", r"[0-9]+°"] + BASE_TOKENIZER_PREFIXES
_infixes = (
L... | 850 | 23.314286 | 82 | py |
spaCy | spaCy-master/spacy/lang/it/stop_words.py | STOP_WORDS = set(
"""
a abbastanza abbia abbiamo abbiano abbiate accidenti ad adesso affinche agl
agli ahime ahimè ai al alcuna alcuni alcuno all alla alle allo allora altri
altrimenti altro altrove altrui anche ancora anni anno ansa anticipo assai
attesa attraverso avanti avemmo avendo avente aver avere averlo ave... | 4,094 | 47.75 | 90 | py |
spaCy | spaCy-master/spacy/lang/it/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 3,137 | 35.068966 | 85 | py |
spaCy | spaCy-master/spacy/lang/it/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"all'art.": [{ORTH: "all'"}, {ORTH: "art."}],
"dall'art.": [{ORTH: "dall'"}, {ORTH: "art."}],
"dell'art.": [{ORTH: "dell'"}, {ORTH: "art."}],
"L'art.": [{ORTH: "L'"}, {ORTH: "art."}],
... | 1,159 | 16.846154 | 56 | py |
spaCy | spaCy-master/spacy/lang/ja/__init__.py | import re
from collections import namedtuple
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import srsly
from thinc.api import Model
from ... import util
from ...errors import Errors
from ...language import BaseDefaults, Language
from ...pipeline import Morphologizer
from ...pipeline... | 12,609 | 35.763848 | 99 | py |
spaCy | spaCy-master/spacy/lang/ja/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ja.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"アップルがイギリスの新興企業を10億ドルで購入を検討",
"自動運転車の損害賠償責任、自動車メーカーに一定の負担を求める",
"歩道を走る自動配達ロボ、サンフランシスコ市が走行禁止を検討",
"ロンドンはイギリスの大都市です。",
]
| 297 | 18.866667 | 56 | py |
spaCy | spaCy-master/spacy/lang/ja/stop_words.py | # This list was created by taking the top 2000 words from a Wikipedia dump and
# filtering out everything that wasn't hiragana. ー (one) was also added.
# Considered keeping some non-hiragana words but too many place names were
# present.
STOP_WORDS = set(
"""
あ あっ あまり あり ある あるいは あれ
い いい いう いく いずれ いっ いつ いる いわ
うち
え
お... | 730 | 13.918367 | 78 | py |
spaCy | spaCy-master/spacy/lang/ja/syntax_iterators.py | from typing import Iterator, Set, Tuple, Union
from ...symbols import NOUN, PRON, PROPN, VERB
from ...tokens import Doc, Span
# TODO: this can probably be pruned a bit
# fmt: off
labels = ["nsubj", "nmod", "ddoclike", "nsubjpass", "pcomp", "pdoclike", "doclike", "obl", "dative", "appos", "attr", "ROOT"]
# fmt: on
d... | 1,638 | 38.02381 | 125 | py |
spaCy | spaCy-master/spacy/lang/ja/tag_bigram_map.py | from ...symbols import ADJ, AUX, NOUN, PART, VERB
# mapping from tag bi-gram to pos of previous token
TAG_BIGRAM_MAP = {
# This covers only small part of AUX.
("形容詞-非自立可能", "助詞-終助詞"): (AUX, None),
("名詞-普通名詞-形状詞可能", "助動詞"): (ADJ, None),
# ("副詞", "名詞-普通名詞-形状詞可能"): (None, ADJ),
# This covers acl, advc... | 1,137 | 38.241379 | 77 | py |
spaCy | spaCy-master/spacy/lang/ja/tag_map.py | from ...symbols import (
ADJ,
ADP,
ADV,
AUX,
CCONJ,
DET,
INTJ,
NOUN,
NUM,
PART,
POS,
PRON,
PROPN,
PUNCT,
SCONJ,
SPACE,
SYM,
VERB,
)
TAG_MAP = {
# Explanation of Unidic tags:
# https://www.gavo.t.u-tokyo.ac.jp/~mine/japanese/nlp+slp/UNIDIC_... | 3,001 | 33.906977 | 113 | py |
spaCy | spaCy-master/spacy/lang/ja/tag_orth_map.py | from ...symbols import DET, PART, PRON, SPACE, X
# mapping from tag bi-gram to pos of previous token
TAG_ORTH_MAP = {
"空白": {" ": SPACE, " ": X},
"助詞-副助詞": {"たり": PART},
"連体詞": {
"あの": DET,
"かの": DET,
"この": DET,
"その": DET,
"どの": DET,
"彼の": DET,
"此の": ... | 458 | 18.956522 | 51 | py |
spaCy | spaCy-master/spacy/lang/kn/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class KannadaDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Kannada(Language):
lang = "kn"
Defaults = KannadaDefaults
__all__ = ["Kannada"]
| 247 | 15.533333 | 46 | py |
spaCy | spaCy-master/spacy/lang/kn/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.en.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"ಆಪಲ್ ಒಂದು ಯು.ಕೆ. ಸ್ಟಾರ್ಟ್ಅಪ್ ಅನ್ನು ೧ ಶತಕೋಟಿ ಡಾಲರ್ಗಳಿಗೆ ಖರೀದಿಸಲು ನೋಡುತ್ತಿದೆ.",
"ಸ್ವಾಯತ್ತ ಕಾರುಗಳು ವಿಮಾ ಹೊಣೆಗಾರಿಕೆಯನ್ನು ತಯಾರಕರ ಕಡೆಗೆ ಬದಲಾಯಿಸುತ್ತವೆ.",
... | 585 | 29.842105 | 89 | py |
spaCy | spaCy-master/spacy/lang/kn/stop_words.py | STOP_WORDS = set(
"""
ಹಲವು
ಮೂಲಕ
ಹಾಗೂ
ಅದು
ನೀಡಿದ್ದಾರೆ
ಯಾವ
ಎಂದರು
ಅವರು
ಈಗ
ಎಂಬ
ಹಾಗಾಗಿ
ಅಷ್ಟೇ
ನಾವು
ಇದೇ
ಹೇಳಿ
ತಮ್ಮ
ಹೀಗೆ
ನಮ್ಮ
ಬೇರೆ
ನೀಡಿದರು
ಮತ್ತೆ
ಇದು
ಈ
ನೀವು
ನಾನು
ಇತ್ತು
ಎಲ್ಲಾ
ಯಾವುದೇ
ನಡೆದ
ಅದನ್ನು
ಎಂದರೆ
ನೀಡಿದೆ
ಹೀಗಾಗಿ
ಜೊತೆಗೆ
ಇದರಿಂದ
ನನಗೆ
ಅಲ್ಲದೆ
ಎಷ್ಟು
ಇದರ
ಇಲ್ಲ
ಕಳೆದ
ತುಂಬಾ
ಈಗಾಗಲೇ
ಮಾಡಿ
ಅದಕ್ಕೆ
ಬಗ್ಗೆ
ಅವರ
ಇದನ್ನು
ಆ
ಇದೆ
ಹೆಚ್ಚು
ಇನ್ನು
ಎಲ್ಲ
ಇ... | 499 | 4.747126 | 17 | py |
spaCy | spaCy-master/spacy/lang/ko/__init__.py | from typing import Any, Dict, Iterator
from ...language import BaseDefaults, Language
from ...scorer import Scorer
from ...symbols import POS, X
from ...tokens import Doc
from ...training import validate_examples
from ...util import DummyTokenizer, load_config_from_str, registry
from ...vocab import Vocab
from .lex_at... | 4,230 | 32.314961 | 88 | py |
spaCy | spaCy-master/spacy/lang/ko/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ko.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"애플이 영국의 스타트업을 10억 달러에 인수하는 것을 알아보고 있다.",
"자율주행 자동차의 손해 배상 책임이 제조 업체로 옮겨 가다",
"샌프란시스코 시가 자동 배달 로봇의 보도 주행 금지를 검토 중이라고 합니다.",
"런던은 영국의 수도이자 가장 큰 ... | 331 | 22.714286 | 56 | py |
spaCy | spaCy-master/spacy/lang/ko/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"영",
"공",
# Native Korean number system
"하나",
"둘",
"셋",
"넷",
"다섯",
"여섯",
"일곱",
"여덟",
"아홉",
"열",
"스물",
"서른",
"마흔",
"쉰",
"예순",
"일흔",
"여든",
"아흔",
# Sino-Korean number system
"일",
"이... | 934 | 13.609375 | 56 | py |
spaCy | spaCy-master/spacy/lang/ko/punctuation.py | from ..char_classes import LIST_QUOTES
from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES
_infixes = (
["·", "ㆍ", r"\(", r"\)"]
+ [r"(?<=[0-9])~(?=[0-9-])"]
+ LIST_QUOTES
+ BASE_TOKENIZER_INFIXES
)
TOKENIZER_INFIXES = _infixes
| 264 | 21.083333 | 69 | py |
spaCy | spaCy-master/spacy/lang/ko/stop_words.py | STOP_WORDS = set(
"""
이
있
하
것
들
그
되
수
이
보
않
없
나
주
아니
등
같
때
년
가
한
지
오
말
일
그렇
위하
때문
그것
두
말하
알
그러나
받
못하
일
그런
또
더
많
그리고
좋
크
시키
그러
하나
살
데
안
어떤
번
나
다른
어떻
들
이렇
점
싶
말
좀
원
잘
놓
""".split()
)
| 185 | 1.735294 | 17 | py |
spaCy | spaCy-master/spacy/lang/ko/tag_map.py | from ...symbols import (
ADJ,
ADP,
ADV,
AUX,
CONJ,
DET,
INTJ,
NOUN,
NUM,
POS,
PRON,
PROPN,
PUNCT,
SYM,
VERB,
X,
)
# 은전한닢(mecab-ko-dic)의 품사 태그를 universal pos tag로 대응시킴
# https://docs.google.com/spreadsheets/d/1-9blXKjtjeKZqsf4NzHeYJCrr49-nXeRF6D80udfcwY/ed... | 1,751 | 23 | 104 | py |
spaCy | spaCy-master/spacy/lang/ky/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class KyrgyzDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZ... | 487 | 22.238095 | 54 | py |
spaCy | spaCy-master/spacy/lang/ky/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ky.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple Улуу Британия стартабын $1 миллиардга сатып алууну көздөөдө.",
"Автоном автомобилдерди камсыздоо жоопкерчилиги өндүрүүчүлөргө артылды.",
"Сан... | 599 | 34.294118 | 86 | py |
spaCy | spaCy-master/spacy/lang/ky/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"нөл",
"ноль",
"бир",
"эки",
"үч",
"төрт",
"беш",
"алты",
"жети",
"сегиз",
"тогуз",
"он",
"жыйырма",
"отуз",
"кырк",
"элүү",
"алтымыш",
"жетмиш",
"сексен",
"токсон",
"жүз",
"миң",
"м... | 800 | 15.346939 | 49 | py |
spaCy | spaCy-master/spacy/lang/ky/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_hyphens_no_dash = HYPHENS.replace("-", "").strip("|").replace("||", "")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[{al}])\.(?=[{au}])".format(al=AL... | 855 | 28.517241 | 81 | py |
spaCy | spaCy-master/spacy/lang/ky/stop_words.py | STOP_WORDS = set(
"""
ага адам айтты айтымында айтып ал алар
алардын алган алуу алып анда андан аны
анын ар
бар басма баш башка башкы башчысы берген
биз билдирген билдирди бир биринчи бирок
бишкек болгон болот болсо болуп боюнча
буга бул
гана
да дагы деген деди деп
жана жатат жаткан жаңы же жогорку жок жол
жолу... | 607 | 13.139535 | 41 | py |
spaCy | spaCy-master/spacy/lang/ky/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
_abbrev_exc = [
# Weekdays abbreviations
{ORTH: "дүй", NORM: "дүйшөмбү"},
{ORTH: "шей", NORM: "шейшемби"},
{ORTH: "шар", NORM: "шаршемби"},
{ORTH: "бей", NORM: "бейшемби"},... | 1,736 | 31.166667 | 74 | py |
spaCy | spaCy-master/spacy/lang/la/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class LatinDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
stop_words = S... | 495 | 22.619048 | 54 | py |
spaCy | spaCy-master/spacy/lang/la/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.la.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# > Caes. BG 1.1
# > Cic. De Amic. 1
# > V. Georg. 1.1-5
# > Gen. 1:1
# > Galileo, Sid. Nunc.
# > van Schurman, Opusc. arg. 1
sentences = [
"Gallia est omnis divisa in ... | 1,146 | 48.869565 | 209 | py |
spaCy | spaCy-master/spacy/lang/la/lex_attrs.py | import re
from ...attrs import LIKE_NUM
# cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4
roman_numerals_compile = re.compile(
r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$"
)
_num_words = """unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem undecim du... | 2,372 | 66.8 | 1,111 | py |
spaCy | spaCy-master/spacy/lang/la/stop_words.py | # Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin
STOP_WORDS = set(
"""
ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem
cum cur
de deinde dum
ego enim ergo es est et etiam etsi ex
fio
haud hic
iam idem igitur ille in infra inter interim ipse is... | 619 | 15.315789 | 102 | py |
spaCy | spaCy-master/spacy/lang/la/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import AUX, NOUN, PRON, PROPN, VERB
from ...tokens import Doc, Span
# NB: Modified from da on suggestion from https://github.com/explosion/spaCy/issues/7457#issuecomment-800349751 [PJB]
def noun_chunks(doclike: Union[Doc, Span]) ... | 2,392 | 26.505747 | 117 | py |
spaCy | spaCy-master/spacy/lang/la/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
## TODO: Look into systematically handling u/v
_exc = {
"mecum": [{ORTH: "me"}, {ORTH: "cum"}],
"tecum": [{ORTH: "te"}, {ORTH: "cum"}],
"nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}],
"vobiscum":... | 1,235 | 46.538462 | 489 | py |
spaCy | spaCy-master/spacy/lang/lb/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class LuxembourgishDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = ... | 515 | 23.571429 | 54 | py |
spaCy | spaCy-master/spacy/lang/lb/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.lb.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"An der Zäit hunn sech den Nordwand an d’Sonn gestridden, wie vun hinnen zwee wuel méi staark wier, wéi e Wanderer, deen an ee waarme Mantel agepak war, iw... | 880 | 54.0625 | 178 | py |
spaCy | spaCy-master/spacy/lang/lb/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = set(
"""
null eent zwee dräi véier fënnef sechs ziwen aacht néng zéng eelef zwielef dräizéng
véierzéng foffzéng siechzéng siwwenzéng uechtzeng uechzeng nonnzéng nongzéng zwanzeg drësseg véierzeg foffzeg sechzeg siechzeg siwenzeg achtzeg achzeg uechtzeg uechzeg nonnzeg
hon... | 1,308 | 30.926829 | 175 | py |
spaCy | spaCy-master/spacy/lang/lb/punctuation.py | from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, LIST_ELLIPSES, LIST_ICONS
ELISION = " ' ’ ".strip().replace(" ", "")
abbrev = ("d", "D")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=^[{ab}][{el}])(?=[{a}])".format(ab=abbrev, a=ALPHA, el=ELISION),
r"(?<=[{al}])\.(?=[{au}])... | 639 | 28.090909 | 85 | py |
spaCy | spaCy-master/spacy/lang/lb/stop_words.py | STOP_WORDS = set(
"""
a
à
äis
är
ärt
äert
ären
all
allem
alles
alleguer
als
also
am
an
anerefalls
ass
aus
awer
bei
beim
bis
bis
d'
dach
datt
däin
där
dat
de
dee
den
deel
deem
deen
deene
déi
den
deng
denger
dem
der
dësem
di
dir
do
da
dann
domat
dozou
drop
du
duerch
duerno
e
ee
em
een
eent
ë
en
ënner
ëm
ech
eis
eise
... | 1,088 | 4.136792 | 17 | py |
spaCy | spaCy-master/spacy/lang/lb/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
# TODO
# treat other apostrophes within words as part of the word: [op d'mannst], [fir d'éischt] (= exceptions)
_exc = {}
# translate / delete what is not necessary
for exc_data in [
{ORTH: "’t", N... | 1,160 | 20.90566 | 104 | py |
spaCy | spaCy-master/spacy/lang/lg/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
class LugandaDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
class Luganda(Language):
... | 388 | 19.473684 | 46 | py |
spaCy | spaCy-master/spacy/lang/lg/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.lg.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Mpa ebyafaayo ku byalo Nakatu ne Nkajja",
"Okuyita Ttembo kitegeeza kugwa ddalu",
"Ekifumu kino kyali kya mulimu ki?",
"Ekkovu we liyise wayit... | 520 | 27.944444 | 56 | py |
spaCy | spaCy-master/spacy/lang/lg/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"nnooti", # Zero
"zeero", # zero
"emu", # one
"bbiri", # two
"ssatu", # three
"nnya", # four
"ttaano", # five
"mukaaga", # six
"musanvu", # seven
"munaana", # eight
"mwenda", # nine
"kkumi", # ten
"kkumi n'emu"... | 2,679 | 26.916667 | 49 | py |
spaCy | spaCy-master/spacy/lang/lg/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au... | 576 | 20.37037 | 68 | py |
spaCy | spaCy-master/spacy/lang/lg/stop_words.py | STOP_WORDS = set(
"""
abadde abalala abamu abangi abava ajja ali alina ani anti ateekeddwa atewamu
atya awamu aweebwa ayinza ba baali babadde babalina bajja
bajjanewankubade bali balina bandi bangi bano bateekeddwa baweebwa bayina bebombi beera bibye
bimu bingi bino bo bokka bonna buli bulijjo bulungi bwabwe bwaffe... | 1,361 | 67.1 | 99 | py |
spaCy | spaCy-master/spacy/lang/lij/__init__.py | from ...language import BaseDefaults, Language
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class LigurianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZER_INFIXES
stop_words = STO... | 430 | 21.684211 | 54 | py |
spaCy | spaCy-master/spacy/lang/lij/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.lij.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Sciusciâ e sciorbî no se peu.",
"Graçie di çetroin, che me son arrivæ.",
"Vegnime apreuvo, che ve fasso pescâ di òmmi.",
"Bella pe sempre l'... | 386 | 24.8 | 86 | py |