Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/lang/lij/punctuation.py | from ..char_classes import ALPHA
from ..punctuation import TOKENIZER_INFIXES
ELISION = " ' ’ ".strip().replace(" ", "").replace("\n", "")
_infixes = TOKENIZER_INFIXES + [
r"(?<=[{a}][{el}])(?=[{a}])".format(a=ALPHA, el=ELISION)
]
TOKENIZER_INFIXES = _infixes
| 267 | 21.333333 | 60 | py |
spaCy | spaCy-master/spacy/lang/lij/stop_words.py | STOP_WORDS = set(
"""
a à â a-a a-e a-i a-o aiva aloa an ancheu ancon apreuvo ascì atra atre atri atro avanti avei
bella belle belli bello ben
ch' che chì chi ciù co-a co-e co-i co-o comm' comme con cösa coscì cöse
d' da da-a da-e da-i da-o dapeu de delongo derê di do doe doî donde dòppo
é e ê ea ean emmo en ës... | 815 | 19.4 | 100 | py |
spaCy | spaCy-master/spacy/lang/lij/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for raw in [
"a-e",
"a-o",
"a-i",
"a-a",
"co-a",
"co-e",
"co-i",
"co-o",
"da-a",
"da-e",
"da-i",
"da-o",
"pe-a",
"pe-e",
"pe-i",
"pe-o... | 861 | 16.24 | 74 | py |
spaCy | spaCy-master/spacy/lang/lt/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class LithuanianDefaults(BaseDefaults):
infixes = TOKENIZER_INFIXES
suffixes ... | 557 | 24.363636 | 62 | py |
spaCy | spaCy-master/spacy/lang/lt/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.lt.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Jaunikis pirmąją vestuvinę naktį iškeitė į areštinės gultą",
"Bepiločiai automobiliai išnaikins vairavimo mokyklas, autoservisus ir eismo nelaimes",
... | 579 | 29.526316 | 91 | py |
spaCy | spaCy-master/spacy/lang/lt/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = {
"antra",
"antrai",
"antrais",
"antram",
"antrame",
"antras",
"antri",
"antriems",
"antro",
"antroje",
"antromis",
"antroms",
"antros",
"antrose",
"antru",
"antruose",
"antrus",
"antrą",
"antrų",
... | 21,536 | 17.711555 | 49 | py |
spaCy | spaCy-master/spacy/lang/lt/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
from ..punctuation import TOKENIZER_SUFFIXES
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q... | 696 | 20.78125 | 65 | py |
spaCy | spaCy-master/spacy/lang/lt/stop_words.py | STOP_WORDS = {
"a",
"abejais",
"abejas",
"abejetam",
"abejetame",
"abejetas",
"abejeto",
"abejetu",
"abejetą",
"abeji",
"abejiems",
"abejomis",
"abejoms",
"abejos",
"abejose",
"abejuose",
"abejus",
"abejų",
"abi",
"abidvi",
"abiejose",
... | 19,027 | 13.447988 | 21 | py |
spaCy | spaCy-master/spacy/lang/lt/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for orth in ["n-tosios", "?!"]:
_exc[orth] = [{ORTH: orth}]
mod_base_exceptions = {
exc: val for exc, val in BASE_EXCEPTIONS.items() if not exc.endswith(".")
}
del mod_base_exceptions["8)"]... | 382 | 24.533333 | 77 | py |
spaCy | spaCy-master/spacy/lang/lv/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class LatvianDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Latvian(Language):
lang = "lv"
Defaults = LatvianDefaults
__all__ = ["Latvian"]
| 247 | 15.533333 | 46 | py |
spaCy | spaCy-master/spacy/lang/lv/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-lv
STOP_WORDS = set(
"""
aiz
ap
apakš
apakšpus
ar
arī
augšpus
bet
bez
bija
biji
biju
bijām
bijāt
būs
būsi
būsiet
būsim
būt
būšu
caur
diemžēl
diezin
droši
dēļ
esam
esat
esi
esmu
gan
gar
iekam
iekams
iekām
iekāms
iekš
iekšpus
ik
ir
it
itin
iz
ja
jau
jeb
jebšu
jel
... | 986 | 4.875 | 55 | py |
spaCy | spaCy-master/spacy/lang/mk/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...attrs import LANG
from ...language import BaseDefaults, Language
from ...lookups import Lookups
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .lemmatizer import MacedonianLemmatizer
from .lex_attrs impor... | 1,690 | 25.421875 | 77 | py |
spaCy | spaCy-master/spacy/lang/mk/lemmatizer.py | from collections import OrderedDict
from typing import List
from ...pipeline import Lemmatizer
from ...tokens import Token
class MacedonianLemmatizer(Lemmatizer):
def rule_lemmatize(self, token: Token) -> List[str]:
string = token.text
univ_pos = token.pos_.lower()
if univ_pos in ("", "e... | 1,715 | 28.084746 | 63 | py |
spaCy | spaCy-master/spacy/lang/mk/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"нула",
"еден",
"една",
"едно",
"два",
"две",
"три",
"четири",
"пет",
"шест",
"седум",
"осум",
"девет",
"десет",
"единаесет",
"дванаесет",
"тринаесет",
"четиринаесет",
"петнаесет",
"шеснаесет",
... | 2,612 | 17.798561 | 64 | py |
spaCy | spaCy-master/spacy/lang/mk/stop_words.py | STOP_WORDS = set(
"""
а
абре
aв
аи
ако
алало
ам
ама
аман
ами
амин
априли-ли-ли
ау
аух
ауч
ах
аха
аха-ха
аш
ашколсум
ашколсун
ај
ајде
ајс
аџаба
бавно
бам
бам-бум
бап
бар
баре
барем
бау
бау-бау
баш
бај
бе
беа
бев
бевме
бевте
без
безбели
бездруго
белки
беше
би
бидејќи
бим
бис
бла
блазе
богами
божем
боц
браво
бравос
бр... | 4,993 | 5.120098 | 17 | py |
spaCy | spaCy-master/spacy/lang/mk/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
_exc = {}
_abbr_exc = [
{ORTH: "м", NORM: "метар"},
{ORTH: "мм", NORM: "милиметар"},
{ORTH: "цм", NORM: "центиметар"},
{ORTH: "см", NORM: "сантиметар"},
{ORTH: "дм", NORM: "дециметар"},
{ORTH: "км", NORM: "километар"},
{ORTH: "кг", NORM: "килограм"},
... | 2,878 | 29.62766 | 48 | py |
spaCy | spaCy-master/spacy/lang/ml/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class MalayalamDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Malayalam(Language):
lang = "ml"
Defaults = MalayalamDefaults
__all__ = ["Malayalam"]... | 321 | 17.941176 | 46 | py |
spaCy | spaCy-master/spacy/lang/ml/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ml.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"അനാവശ്യമായി കണ്ണിലും മൂക്കിലും വായിലും സ്പർശിക്കാതിരിക്കുക",
"പൊതുരംഗത്ത് മലയാള ഭാഷയുടെ സമഗ്രപുരോഗതി ലക്ഷ്യമാക്കി പ്രവർത്തിക്കുന്ന സംഘടനയായ മലയാളഐക്യ... | 625 | 38.125 | 151 | py |
spaCy | spaCy-master/spacy/lang/ml/lex_attrs.py | from ...attrs import LIKE_NUM
# reference 2: https://www.omniglot.com/language/numbers/malayalam.htm
_num_words = [
"പൂജ്യം ",
"ഒന്ന് ",
"രണ്ട് ",
"മൂന്ന് ",
"നാല് ",
"അഞ്ച് ",
"ആറ് ",
"ഏഴ് ",
"എട്ട് ",
"ഒന്പത് ",
"പത്ത് ",
"പതിനൊന്ന്",
"പന്ത്രണ്ട്",
"പതി മൂന്... | 1,451 | 17.857143 | 70 | py |
spaCy | spaCy-master/spacy/lang/ml/stop_words.py | STOP_WORDS = set(
"""
അത്
ഇത്
ആയിരുന്നു
ആകുന്നു
വരെ
അന്നേരം
അന്ന്
ഇന്ന്
ആണ്
""".split()
)
| 94 | 5.785714 | 17 | py |
spaCy | spaCy-master/spacy/lang/mr/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class MarathiDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Marathi(Language):
lang = "mr"
Defaults = MarathiDefaults
__all__ = ["Marathi"]
| 247 | 15.533333 | 46 | py |
spaCy | spaCy-master/spacy/lang/mr/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-mr/blob/master/stopwords-mr.txt, https://github.com/6/stopwords-json/edit/master/dist/mr.json
STOP_WORDS = set(
"""
न
अतरी
तो
हें
तें
कां
आणि
जें
जे
मग
ते
मी
जो
परी
गा
हे
ऐसें
आतां
नाहीं
तेथ
हा
तया
असे
म्हणे
काय
कीं
जैसें
तंव
तूं
होय
जैसा
आहे
पैं
तैसा
जरी
म्हणोनि... | 1,068 | 4.53886 | 146 | py |
spaCy | spaCy-master/spacy/lang/ms/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class MalayDefault... | 678 | 26.16 | 82 | py |
spaCy | spaCy-master/spacy/lang/ms/_tokenizer_exceptions_list.py | # from https://prpm.dbp.gov.my/cari1?keyword=
# dbp https://en.wikipedia.org/wiki/Dewan_Bahasa_dan_Pustaka
MS_BASE_EXCEPTIONS = set(
"""
aba-aba
abah-abah
abar-abar
abrit-abritan
abu-abu
abuk-abuk
abun-abun
acak-acak
acak-acakan
acang-acang
aci-aci
aci-acian
aci-acinya
adang-adang
adap-adapan
adik-beradik
aduk-aduk... | 27,445 | 13.118313 | 60 | py |
spaCy | spaCy-master/spacy/lang/ms/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ms.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Malaysia ialah sebuah negara yang terletak di Asia Tenggara.",
"Berapa banyak pelajar yang akan menghadiri majlis perpisahan sekolah?",
"Pengelua... | 680 | 36.833333 | 109 | py |
spaCy | spaCy-master/spacy/lang/ms/lex_attrs.py | import unicodedata
from ...attrs import IS_CURRENCY, LIKE_NUM
from .punctuation import LIST_CURRENCY
_num_words = [
"kosong",
"satu",
"dua",
"tiga",
"empat",
"lima",
"enam",
"tujuh",
"lapan",
"sembilan",
"sepuluh",
"sebelas",
"belas",
"puluh",
"ratus",
"... | 1,264 | 18.166667 | 58 | py |
spaCy | spaCy-master/spacy/lang/ms/punctuation.py | from ..char_classes import ALPHA, _currency, _units, merge_chars, split_chars
from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
_units = (
_units + "s bit Gbps Mbps mbps Kbps kbps ƒ ppi px "
"Hz kHz MHz GHz mAh "
"ratus rb ribu ribuan "
"juta jt jutaan mill?iar million... | 2,127 | 33.885246 | 83 | py |
spaCy | spaCy-master/spacy/lang/ms/stop_words.py | STOP_WORDS = set(
"""
ada adalah adanya adapun agak agaknya agar akan akankah akhir akhiri akhirnya
aku akulah amat amatlah anda andalah antar antara antaranya apa apaan apabila
apakah apalagi apatah artinya asal asalkan atas atau ataukah ataupun awal
awalnya
bagai bagaikan bagaimana bagaimanakah bagaimanapun bagi... | 6,507 | 53.689076 | 79 | py |
spaCy | spaCy-master/spacy/lang/ms/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 1,538 | 35.642857 | 89 | py |
spaCy | spaCy-master/spacy/lang/ms/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ._tokenizer_exceptions_list import MS_BASE_EXCEPTIONS
# Daftar singkatan dan Akronim dari:
# https://ms.wiktionary.org/wiki/Wiktionary:Senarai_akronim_dan_singkatan
_exc = {}
for orth in MS_BASE_E... | 19,109 | 11.465753 | 73 | py |
spaCy | spaCy-master/spacy/lang/nb/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ...pipeline import Lemmatizer
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from ... | 1,274 | 23.519231 | 82 | py |
spaCy | spaCy-master/spacy/lang/nb/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.nb.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple vurderer å kjøpe britisk oppstartfirma for en milliard dollar.",
"Selvkjørende biler flytter forsikringsansvaret over på produsentene.",
"S... | 422 | 27.2 | 75 | py |
spaCy | spaCy-master/spacy/lang/nb/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
UNITS,
)
# Punctuation adapted from Danish
_quotes = CONCAT_QUOTES.replace("'", "")
_list_punct = [x for x in ... | 1,651 | 22.6 | 74 | py |
spaCy | spaCy-master/spacy/lang/nb/stop_words.py | STOP_WORDS = set(
"""
alle allerede alt and andre annen annet at av
bak bare bedre beste blant ble bli blir blitt bris by både
da dag de del dem den denne der dermed det dette disse du
eller en enn er et ett etter
fem fikk fire fjor flere folk for fortsatt fra fram
funnet få får fått før først første
gang gi g... | 1,131 | 21.196078 | 76 | py |
spaCy | spaCy-master/spacy/lang/nb/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
# fmt: o... | 1,523 | 37.1 | 89 | py |
spaCy | spaCy-master/spacy/lang/nb/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for exc_data in [
{ORTH: "jan.", NORM: "januar"},
{ORTH: "feb.", NORM: "februar"},
{ORTH: "mar.", NORM: "mars"},
{ORTH: "apr.", NORM: "april"},
{ORTH: "jun.", NORM: "juni"... | 3,058 | 12.717489 | 56 | py |
spaCy | spaCy-master/spacy/lang/ne/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class NepaliDefaults(BaseDefaults):
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Nepali(Language):
lang = "ne"
Defaults = NepaliDefaults
__all__ = ["Nepali"]
| 309 | 17.235294 | 46 | py |
spaCy | spaCy-master/spacy/lang/ne/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ne.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"एप्पलले अमेरिकी स्टार्टअप १ अर्ब डलरमा किन्ने सोच्दै छ",
"स्वायत्त कारहरूले बीमा दायित्व निर्माताहरु तिर बदल्छन्",
"स्यान फ्रांसिस्कोले फुटपाथ वि... | 552 | 28.105263 | 77 | py |
spaCy | spaCy-master/spacy/lang/ne/lex_attrs.py | from ...attrs import LIKE_NUM, NORM
from ..norm_exceptions import BASE_NORMS
# fmt: off
_stem_suffixes = [
["ा", "ि", "ी", "ु", "ू", "ृ", "े", "ै", "ो", "ौ"],
["ँ", "ं", "्", "ः"],
["लाई", "ले", "बाट", "को", "मा", "हरू"],
["हरूलाई", "हरूले", "हरूबाट", "हरूको", "हरूमा"],
["इलो", "िलो", "नु", "ाउनु",... | 2,476 | 25.073684 | 84 | py |
spaCy | spaCy-master/spacy/lang/ne/stop_words.py | # Source: https://github.com/sanjaalcorps/NepaliStopWords/blob/master/NepaliStopWords.txt
STOP_WORDS = set(
"""
अक्सर
अगाडि
अगाडी
अघि
अझै
अठार
अथवा
अनि
अनुसार
अन्तर्गत
अन्य
अन्यत्र
अन्यथा
अब
अरु
अरुलाई
अरू
अर्को
अर्थात
अर्थात्
अलग
अलि
अवस्था
अहिले
आए
आएका
आएको
आज
आजको
आठ
आत्म
आदि
आदिलाई
आफनो
आफू
आफूलाई
आफै
आफैँ
आफ... | 2,791 | 4.640404 | 89 | py |
spaCy | spaCy-master/spacy/lang/nl/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import DutchLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_i... | 1,330 | 23.648148 | 82 | py |
spaCy | spaCy-master/spacy/lang/nl/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.nl.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple overweegt om voor 1 miljard een U.K. startup te kopen",
"Autonome auto's verschuiven de verzekeringverantwoordelijkheid naar producenten",
... | 441 | 28.466667 | 86 | py |
spaCy | spaCy-master/spacy/lang/nl/lemmatizer.py | from typing import List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class DutchLemmatizer(Lemmatizer):
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "rule":
required = ["lemma_lookup", "lemma_rules", "lemma_exc", "... | 4,618 | 36.552846 | 82 | py |
spaCy | spaCy-master/spacy/lang/nl/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = set(
"""
nul een één twee drie vier vijf zes zeven acht negen tien elf twaalf dertien
veertien twintig dertig veertig vijftig zestig zeventig tachtig negentig honderd
duizend miljoen miljard biljoen biljard triljoen triljard
""".split()
)
_ordinal_words = set(
"""
ee... | 1,299 | 30.707317 | 80 | py |
spaCy | spaCy-master/spacy/lang/nl/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
LIST_UNITS,
PUNCT,
merge_chars,
)
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
_prefixes = [",,"] + BASE_... | 1,532 | 22.584615 | 74 | py |
spaCy | spaCy-master/spacy/lang/nl/stop_words.py | # The original stop words list (added in f46ffe3) was taken from
# http://www.damienvanholten.com/downloads/dutch-stop-words.txt
# and consisted of about 100 tokens.
# In order to achieve parity with some of the better-supported
# languages, e.g., English, French, and German, this original list has been
# extended with... | 3,086 | 41.287671 | 120 | py |
spaCy | spaCy-master/spacy/lang/nl/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on Doc and Span.
The definitio... | 2,868 | 36.75 | 97 | py |
spaCy | spaCy-master/spacy/lang/nl/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
# Extensive list of both common and uncommon dutch abbreviations copied from
# github.com/diasks2/pragmatic_segmenter, a Ruby library for rule-based
# sentence boundary detection (MIT, Copyright 2015 Kevin S. ... | 24,278 | 14.098881 | 116 | py |
spaCy | spaCy-master/spacy/lang/pl/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .lemmatizer import PolishLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIX... | 1,358 | 23.267857 | 82 | py |
spaCy | spaCy-master/spacy/lang/pl/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.pl.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Poczuł przyjemną woń mocnej kawy.",
"Istnieje wiele dróg oddziaływania substancji psychoaktywnej na układ nerwowy.",
"Powitał mnie biało-czarny k... | 654 | 37.529412 | 148 | py |
spaCy | spaCy-master/spacy/lang/pl/lemmatizer.py | from typing import Dict, List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class PolishLemmatizer(Lemmatizer):
# This lemmatizer implements lookup lemmatization based on the Morfeusz
# dictionary (morfeusz.sgjp.pl/en) by Institute of Computer Science PAS.
# It utilizes some prefi... | 3,566 | 40 | 79 | py |
spaCy | spaCy-master/spacy/lang/pl/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"zero",
"jeden",
"dwa",
"trzy",
"cztery",
"pięć",
"sześć",
"siedem",
"osiem",
"dziewięć",
"dziesięć",
"jedenaście",
"dwanaście",
"trzynaście",
"czternaście",
"pietnaście",
"szesnaście",
"siedemnaście",
... | 1,152 | 16.469697 | 49 | py |
spaCy | spaCy-master/spacy/lang/pl/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_ELLIPSES,
LIST_HYPHENS,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
UNITS,
)
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
_quotes = CONCAT_QUOTES.replac... | 1,353 | 22.754386 | 75 | py |
spaCy | spaCy-master/spacy/lang/pl/stop_words.py | # sources: https://github.com/bieli/stopwords/blob/master/polish.stopwords.txt and https://github.com/stopwords-iso/stopwords-pl
STOP_WORDS = set(
"""
a aby ach acz aczkolwiek aj albo ale alez
ależ ani az aż
bardziej bardzo beda bede bedzie bez bo bowiem by
byc byl byla byli bylo byly bym bynajmniej być był
była ... | 2,268 | 27.721519 | 128 | py |
spaCy | spaCy-master/spacy/lang/pt/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class PortugueseDefaults(BaseDefaults)... | 644 | 25.875 | 62 | py |
spaCy | spaCy-master/spacy/lang/pt/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.pt.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple está querendo comprar uma startup do Reino Unido por 100 milhões de dólares",
"Carros autônomos empurram a responsabilidade do seguro para os f... | 464 | 30 | 88 | py |
spaCy | spaCy-master/spacy/lang/pt/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"zero",
"um",
"dois",
"três",
"tres",
"quatro",
"cinco",
"seis",
"sete",
"oito",
"nove",
"dez",
"onze",
"doze",
"dúzia",
"dúzias",
"duzia",
"duzias",
"treze",
"catorze",
"quinze",
"dezas... | 2,001 | 15.823529 | 83 | py |
spaCy | spaCy-master/spacy/lang/pt/punctuation.py | from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
from ..punctuation import TOKENIZER_SUFFIXES as BASE_TOKENIZER_SUFFIXES
_prefixes = [r"\w{1,3}\$"] + BASE_TOKENIZER_PREFIXES
_suffixes = BASE_TOKENIZER_SUFFIXES
_infixes = [r"... | 456 | 31.642857 | 71 | py |
spaCy | spaCy-master/spacy/lang/pt/stop_words.py | STOP_WORDS = set(
"""
a à às área acerca ademais adeus agora ainda algo algumas alguns ali além ambas ambos antes
ao aos apenas apoia apoio apontar após aquela aquelas aquele aqueles aqui aquilo
as assim através atrás até aí
baixo bastante bem boa bom breve
cada caminho catorze cedo cento certamente certeza cima ... | 2,503 | 36.373134 | 91 | py |
spaCy | spaCy-master/spacy/lang/pt/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 3,088 | 34.918605 | 85 | py |
spaCy | spaCy-master/spacy/lang/pt/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for orth in [
"Adm.",
"Art.",
"art.",
"Av.",
"av.",
"Cia.",
"dom.",
"Dr.",
"dr.",
"e.g.",
"E.g.",
"E.G.",
"e/ou",
"ed.",
"eng.",
"etc... | 714 | 12 | 56 | py |
spaCy | spaCy-master/spacy/lang/ro/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
# Lemma data note:
# Original pairs downloaded from http://www.lex... | 776 | 27.777778 | 83 | py |
spaCy | spaCy-master/spacy/lang/ro/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ro import Romanian
>>> from spacy.lang.ro.examples import sentences
>>> nlp = Romanian()
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple plănuiește să cumpere o companie britanică pentru un miliard de dolari",
"Municipal... | 588 | 28.45 | 95 | py |
spaCy | spaCy-master/spacy/lang/ro/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = set(
"""
zero unu doi două trei patru cinci șase șapte opt nouă zece
unsprezece doisprezece douăsprezece treisprezece patrusprezece cincisprezece șaisprezece șaptesprezece optsprezece nouăsprezece
douăzeci treizeci patruzeci cincizeci șaizeci șaptezeci optzeci nouăzeci
su... | 1,642 | 37.209302 | 141 | py |
spaCy | spaCy-master/spacy/lang/ro/punctuation.py | import itertools
from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
)
_list_icons = [x for x in LIST_ICONS if x != "°"]
_list_icons = [x.replace("\\u00B0", "") for ... | 3,113 | 17.210526 | 86 | py |
spaCy | spaCy-master/spacy/lang/ro/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-ro
STOP_WORDS = set(
"""
a
abia
acea
aceasta
această
aceea
aceeasi
aceeași
acei
aceia
acel
acela
acelasi
același
acele
acelea
acest
acesta
aceste
acestea
acestei
acestia
acestui
aceşti
aceştia
acolo
acord
acum
adica
adică
ai
aia
aibă
aici
aiurea
al
ala
alaturi
al... | 2,777 | 4.556 | 55 | py |
spaCy | spaCy-master/spacy/lang/ro/tokenizer_exceptions.py | from ...symbols import ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .punctuation import _make_ro_variants
_exc = {}
# Source: https://en.wiktionary.org/wiki/Category:Romanian_abbreviations
for orth in [
"1-a",
"2-a",
"3-a",
"4-a",
"5-a",
"6-a",
... | 1,452 | 14.135417 | 72 | py |
spaCy | spaCy-master/spacy/lang/ru/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ..punctuation import (
COMBINING_DIACRITICS_TOKENIZER_INFIXES,
COMBINING_DIACRITICS_TOKENIZER_SUFFIXES,
)
from .lemmatizer import RussianLemmatizer
from .lex_attrs import LEX_ATTRS
from .stop_... | 1,306 | 23.203704 | 77 | py |
spaCy | spaCy-master/spacy/lang/ru/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ru.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
# Translations from English:
"Apple рассматривает возможность покупки стартапа из Соединённого Королевства за $1 млрд",
"Беспилотные автомобили пе... | 2,840 | 68.292683 | 280 | py |
spaCy | spaCy-master/spacy/lang/ru/lemmatizer.py | from typing import Callable, Dict, List, Optional, Tuple
from thinc.api import Model
from ...pipeline import Lemmatizer
from ...pipeline.lemmatizer import lemmatizer_score
from ...symbols import POS
from ...tokens import Token
from ...vocab import Vocab
PUNCT_RULES = {"«": '"', "»": '"'}
class RussianLemmatizer(Le... | 7,973 | 35.577982 | 88 | py |
spaCy | spaCy-master/spacy/lang/ru/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = list(
set(
"""
ноль ноля нолю нолём ноле нулевой нулевого нулевому нулевым нулевом нулевая нулевую нулевое нулевые нулевых нулевыми
четверть четверти четвертью четвертей четвертям четвертями четвертях
треть трети третью третей третям третями третях
половина ... | 10,247 | 43.751092 | 128 | py |
spaCy | spaCy-master/spacy/lang/ru/stop_words.py | STOP_WORDS = set(
"""
а авось ага агу аж ай али алло ау ах ая
б будем будет будете будешь буду будут будучи будь будьте бы был была были было
быть бац без безусловно бишь благо благодаря ближайшие близко более больше
будто бывает бывала бывали бываю бывают бытует
в вам вами вас весь во вот все всё всего всей всем... | 4,596 | 40.044643 | 87 | py |
spaCy | spaCy-master/spacy/lang/ru/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
_abbrev_exc = [
# Weekdays abbreviations
{ORTH: "пн", NORM: "понедельник"},
{ORTH: "вт", NORM: "вторник"},
{ORTH: "ср", NORM: "среда"},
{ORTH: "чт", NORM: "четверг"},
{... | 18,289 | 43.828431 | 88 | py |
spaCy | spaCy-master/spacy/lang/sa/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class SanskritDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Sanskrit(Language):
lang = "sa"
Defaults = SanskritDefaults
__all__ = ["Sanskrit"]
| 317 | 17.705882 | 46 | py |
spaCy | spaCy-master/spacy/lang/sa/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sa.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"अभ्यावहति कल्याणं विविधं वाक् सुभाषिता ।",
"मनसि व्याकुले चक्षुः पश्यन्नपि न पश्यति ।",
"यस्य बुद्धिर्बलं तस्य निर्बुद्धेस्तु कुतो बलम्?",
"प... | 417 | 25.125 | 56 | py |
spaCy | spaCy-master/spacy/lang/sa/lex_attrs.py | from ...attrs import LIKE_NUM
# reference 1: https://en.wikibooks.org/wiki/Sanskrit/Numbers
_num_words = [
"एकः",
"द्वौ",
"त्रयः",
"चत्वारः",
"पञ्च",
"षट्",
"सप्त",
"अष्ट",
"नव",
"दश",
"एकादश",
"द्वादश",
"त्रयोदश",
"चतुर्दश",
"पञ्चदश",
"षोडश",
"सप्तद... | 2,326 | 17.179688 | 61 | py |
spaCy | spaCy-master/spacy/lang/sa/stop_words.py | # Source: https://gist.github.com/Akhilesh28/fe8b8e180f64b72e64751bc31cb6d323
STOP_WORDS = set(
"""
अहम्
आवाम्
वयम्
माम् मा
आवाम्
अस्मान् नः
मया
आवाभ्याम्
अस्माभिस्
मह्यम् मे
आवाभ्याम् नौ
अस्मभ्यम् नः
मत्
आवाभ्याम्
अस्मत्
मम मे
आवयोः
अस्माकम् नः
मयि
आवयोः
अस्मासु
त्वम्
युवाम्
यूयम्
त्वाम् त्वा
युवाम् वाम्... | 3,598 | 5.974806 | 77 | py |
spaCy | spaCy-master/spacy/lang/si/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class SinhalaDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Sinhala(Language):
lang = "si"
Defaults = SinhalaDefaults
__all__ = ["Sinhala"]
| 313 | 17.470588 | 46 | py |
spaCy | spaCy-master/spacy/lang/si/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.si.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"මෙය වාක්යයකි.",
"ඔබ කවුද?",
"ගූගල් සමාගම ඩොලර් මිලියන 500 කට එම ආයතනය මිලදී ගන්නා ලදී.",
"කොළඹ ශ්රී ලංකාවේ ප්රධානතම නගරය යි.",
"ප්රංශ... | 512 | 24.65 | 64 | py |
spaCy | spaCy-master/spacy/lang/si/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"බින්දුව",
"බිංදුව",
"එක",
"දෙක",
"තුන",
"හතර",
"පහ",
"හය",
"හත",
"අට",
"නවය",
"නමය",
"දහය",
"එකොළහ",
"දොළහ",
"දහතුන",
"දහහතර",
"දාහතර",
"පහළව",
"පහළොව",
"දහසය",
"දහහත",
"දාහත",
... | 907 | 13.645161 | 49 | py |
spaCy | spaCy-master/spacy/lang/si/stop_words.py | STOP_WORDS = set(
"""
සහ
සමග
සමඟ
අහා
ආහ්
ආ
ඕහෝ
අනේ
අඳෝ
අපොයි
අපෝ
අයියෝ
ආයි
ඌයි
චී
චිහ්
චික්
හෝ
දෝ
දෝහෝ
මෙන්
සේ
වැනි
බඳු
වන්
අයුරු
අයුරින්
ලෙස
වැඩි
ශ්රී
හා
ය
නිසා
නිසාවෙන්
බවට
බව
බවෙන්
නම්
වැඩි
සිට
දී
මහා
මහ
පමණ
පමණින්
පමන
වන
විට
විටින්
මේ
මෙලෙස
මෙයින්
ඇති
ලෙස
සිදු
වශයෙන්
යන
සඳහා
මගින්
හෝ
ඉතා
ඒ
එම
ද
අතර
විසින්
සම... | 957 | 3.887755 | 17 | py |
spaCy | spaCy-master/spacy/lang/sk/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class SlovakDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Slovak(Language):
lang = "sk"
Defaults = SlovakDefaults
__all__ = ["Slovak"]
| 309 | 17.235294 | 46 | py |
spaCy | spaCy-master/spacy/lang/sk/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sk.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Ardevop, s.r.o. je malá startup firma na území SR.",
"Samojazdiace autá presúvajú poistnú zodpovednosť na výrobcov automobilov.",
"Košice sú na v... | 704 | 28.375 | 80 | py |
spaCy | spaCy-master/spacy/lang/sk/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"nula",
"jeden",
"dva",
"tri",
"štyri",
"päť",
"šesť",
"sedem",
"osem",
"deväť",
"desať",
"jedenásť",
"dvanásť",
"trinásť",
"štrnásť",
"pätnásť",
"šestnásť",
"sedemnásť",
"osemnásť",
"devätnásť"... | 1,024 | 16.083333 | 49 | py |
spaCy | spaCy-master/spacy/lang/sk/stop_words.py | # Source: https://github.com/Ardevop-sk/stopwords-sk
STOP_WORDS = set(
"""
a
aby
aj
ak
akej
akejže
ako
akom
akomže
akou
akouže
akože
aká
akáže
aké
akého
akéhože
akému
akémuže
akéže
akú
akúže
aký
akých
akýchže
akým
akými
akýmiže
akýmže
akýže
ale
alebo
ani
asi
avšak
až
ba
bez
bezo
bol
bola
boli
bolo
bude
budem
budem... | 2,346 | 4.522353 | 52 | py |
spaCy | spaCy-master/spacy/lang/sl/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class SlovenianDefaults(BaseDefaults):
stop_words = STOP_WORD... | 607 | 25.434783 | 82 | py |
spaCy | spaCy-master/spacy/lang/sl/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sl.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple načrtuje nakup britanskega startupa za 1 bilijon dolarjev",
"France Prešeren je umrl 8. februarja 1849 v Kranju",
"Staro ljubljansko letali... | 564 | 28.736842 | 70 | py |
spaCy | spaCy-master/spacy/lang/sl/lex_attrs.py | import unicodedata
from ...attrs import IS_CURRENCY, LIKE_NUM
_num_words = set(
"""
nula ničla nič ena dva tri štiri pet šest sedem osem
devet deset enajst dvanajst trinajst štirinajst petnajst
šestnajst sedemnajst osemnajst devetnajst dvajset trideset štirideset
petdeset šestdest sedemdeset osemdeset devedes... | 6,474 | 43.655172 | 89 | py |
spaCy | spaCy-master/spacy/lang/sl/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
HYPHENS,
LIST_CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
UNITS,
merge_chars,
)
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
... | 3,216 | 36.406977 | 107 | py |
spaCy | spaCy-master/spacy/lang/sl/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-sl
STOP_WORDS = set(
"""
a ali
b bi bil bila bile bili bilo biti blizu bo bodo bojo bolj bom bomo
boste bova boš brez
c cel cela celi celo
č če često četrta četrtek četrti četrto čez čigav
d da daleč dan danes datum deset deseta deseti deseto devet
deveta ... | 2,440 | 27.717647 | 67 | py |
spaCy | spaCy-master/spacy/lang/sl/tokenizer_exceptions.py | from typing import Dict, List
from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc: Dict[str, List[Dict]] = {}
_other_exc = {
"t.i.": [{ORTH: "t.", NORM: "tako"}, {ORTH: "i.", NORM: "imenovano"}],
"t.j.": [{ORTH: "t.", NORM: "to"}, {ORTH: "j... | 13,288 | 47.5 | 117 | py |
spaCy | spaCy-master/spacy/lang/sq/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class AlbanianDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Albanian(Language):
lang = "sq"
Defaults = AlbanianDefaults
__all__ = ["Albanian"]
| 251 | 15.8 | 46 | py |
spaCy | spaCy-master/spacy/lang/sq/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sq.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple po shqyrton blerjen e nje shoqërie të U.K. për 1 miliard dollarë",
"Makinat autonome ndryshojnë përgjegjësinë e sigurimit ndaj prodhuesve",
... | 450 | 29.066667 | 77 | py |
spaCy | spaCy-master/spacy/lang/sq/stop_words.py | # Source: https://github.com/andrixh/index-albanian
STOP_WORDS = set(
"""
a
afert
ai
ajo
andej
anes
aq
as
asaj
ashtu
ata
ate
atij
atje
ato
aty
atyre
b
be
behem
behet
bej
beje
bejne
ben
bene
bere
beri
bie
c
ca
cdo
cfare
cila
cilat
cilave
cilen
ciles
cilet
cili
cilin
cilit
deri
dhe
dic
dicka
dickaje
dike
dikujt
diku... | 1,210 | 4.265217 | 51 | py |
spaCy | spaCy-master/spacy/lang/sr/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class SerbianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
... | 545 | 23.818182 | 62 | py |
spaCy | spaCy-master/spacy/lang/sr/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sr.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
# Translations from English
"Apple планира куповину америчког стартапа за $1 милијарду.",
"Беспилотни аутомобили пребацују одговорност осигурања н... | 635 | 27.909091 | 76 | py |
spaCy | spaCy-master/spacy/lang/sr/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"нула",
"један",
"два",
"три",
"четири",
"пет",
"шест",
"седам",
"осам",
"девет",
"десет",
"једанаест",
"дванаест",
"тринаест",
"четрнаест",
"петнаест",
"шеснаест",
"седамнаест",
"осамнаест",
"д... | 1,122 | 16.015152 | 49 | py |
spaCy | spaCy-master/spacy/lang/sr/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
UNITS,
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}]... | 958 | 19.847826 | 59 | py |
spaCy | spaCy-master/spacy/lang/sr/stop_words.py | STOP_WORDS = set(
"""
а
авај
ако
ал
али
арх
ау
ах
аха
ај
бар
би
била
били
било
бисмо
бисте
бих
бијасмо
бијасте
бијах
бијаху
бијаше
биће
близу
број
брр
буде
будимо
будите
буду
будући
бум
бућ
вам
вама
вас
ваша
ваше
вашим
вашима
ваљда
веома
вероватно
већ
већина
ви
видео
више
врло
врх
га
где
гиц
год
горе
гђекоје
да
дак... | 2,162 | 4.489848 | 17 | py |
spaCy | spaCy-master/spacy/lang/sr/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
_abbrev_exc = [
# Weekdays abbreviations
{ORTH: "пoн", NORM: "понедељак"},
{ORTH: "уто", NORM: "уторак"},
{ORTH: "сре", NORM: "среда"},
{ORTH: "чет", NORM: "четвртак"},
... | 2,922 | 30.430108 | 72 | py |