Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/lang/sv/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ...pipeline import Lemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_IT... | 1,276 | 23.09434 | 77 | py |
spaCy | spaCy-master/spacy/lang/sv/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sv.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple överväger att köpa brittisk startup för 1 miljard dollar.",
"Självkörande bilar förskjuter försäkringsansvar mot tillverkare.",
"San Fransi... | 427 | 27.533333 | 72 | py |
spaCy | spaCy-master/spacy/lang/sv/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"noll",
"en",
"ett",
"två",
"tre",
"fyra",
"fem",
"sex",
"sju",
"åtta",
"nio",
"tio",
"elva",
"tolv",
"tretton",
"fjorton",
"femton",
"sexton",
"sjutton",
"arton",
"nitton",
"tjugo",
... | 949 | 15.101695 | 49 | py |
spaCy | spaCy-master/spacy/lang/sv/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
LIST_ELLIPSES,
LIST_ICONS,
)
from ..punctuation import TOKENIZER_SUFFIXES
_quotes = CONCAT_QUOTES.replace("'", "")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[{al}])\.(?=[{au}])".format(al=A... | 1,021 | 25.205128 | 74 | py |
spaCy | spaCy-master/spacy/lang/sv/stop_words.py | STOP_WORDS = set(
"""
aderton adertonde adjö aldrig alla allas allt alltid alltså än andra andras
annan annat ännu artonde arton åtminstone att åtta åttio åttionde åttonde av
även
båda bådas bakom bara bäst bättre behöva behövas behövde behövt beslut beslutat
beslutit bland blev bli blir blivit bort borta bra
då ... | 2,428 | 35.253731 | 79 | py |
spaCy | spaCy-master/spacy/lang/sv/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
# fmt: o... | 1,531 | 37.3 | 97 | py |
spaCy | spaCy-master/spacy/lang/sv/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
# Verbs
for verb_data in [
{ORTH: "driver"},
{ORTH: "kör"},
{ORTH: "hörr"},
{ORTH: "fattar"},
{ORTH: "hajar"},
{ORTH: "lever"},
{ORTH: "serr"},
{ORTH: "fixar"... | 3,636 | 22.165605 | 114 | py |
spaCy | spaCy-master/spacy/lang/ta/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class TamilDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Tamil(Language):
lang = "ta"
Defaults = TamilDefaults
__all__ = ["Tamil"]
| 305 | 17 | 46 | py |
spaCy | spaCy-master/spacy/lang/ta/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ta.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"கிறிஸ்துமஸ் மற்றும் இனிய புத்தாண்டு வாழ்த்துக்கள்",
"எனக்கு என் குழந்தைப் பருவம் நினைவிருக்கிறது",
"உங்கள் பெயர் என்ன?",
"ஏறத்தாழ இலங்கைத் தம... | 1,155 | 45.24 | 149 | py |
spaCy | spaCy-master/spacy/lang/ta/lex_attrs.py | from ...attrs import LIKE_NUM
_numeral_suffixes = {"பத்து": "பது", "ற்று": "று", "ரத்து": "ரம்", "சத்து": "சம்"}
_num_words = [
"பூச்சியம்",
"ஒரு",
"ஒன்று",
"இரண்டு",
"மூன்று",
"நான்கு",
"ஐந்து",
"ஆறு",
"ஏழு",
"எட்டு",
"ஒன்பது",
"பத்து",
"பதினொன்று",
"பன்னிரண்டு"... | 1,601 | 18.777778 | 82 | py |
spaCy | spaCy-master/spacy/lang/ta/stop_words.py | # Stop words
STOP_WORDS = set(
"""
ஒரு
என்று
மற்றும்
இந்த
இது
என்ற
கொண்டு
என்பது
பல
ஆகும்
அல்லது
அவர்
நான்
உள்ள
அந்த
இவர்
என
முதல்
என்ன
இருந்து
சில
என்
போன்ற
வேண்டும்
வந்து
இதன்
அது
அவன்
தான்
பலரும்
என்னும்
மேலும்
பின்னர்
கொண்ட
இருக்கும்
தனது
உள்ளது
போது
என்றும்
அதன்
தன்
பிறகு
அவர்கள்
வரை
அவள்
நீ
ஆகிய
இருந்தது
உள்... | 792 | 5.007576 | 17 | py |
spaCy | spaCy-master/spacy/lang/te/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class TeluguDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Telugu(Language):
lang = "te"
Defaults = TeluguDefaults
__all__ = ["Telugu"]
| 309 | 17.235294 | 46 | py |
spaCy | spaCy-master/spacy/lang/te/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.te import Telugu
>>> nlp = Telugu()
>>> from spacy.lang.te.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"ఆపిల్ 1 బిలియన్ డాలర్స్ కి యూ.కె. స్టార్ట్అప్ ని కొనాలని అనుకుంటుంది.",
"ఆటోనోమోస్ కార్లు భీమ... | 635 | 29.285714 | 79 | py |
spaCy | spaCy-master/spacy/lang/te/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"సున్నా",
"శూన్యం",
"ఒకటి",
"రెండు",
"మూడు",
"నాలుగు",
"ఐదు",
"ఆరు",
"ఏడు",
"ఎనిమిది",
"తొమ్మిది",
"పది",
"పదకొండు",
"పన్నెండు",
"పదమూడు",
"పద్నాలుగు",
"పదిహేను",
"పదహారు",
"పదిహేడు",
"పద్దెనిమి... | 873 | 14.890909 | 49 | py |
spaCy | spaCy-master/spacy/lang/te/stop_words.py | # Source: https://github.com/Xangis/extra-stopwords (MIT License)
STOP_WORDS = set(
"""
అందరూ
అందుబాటులో
అడగండి
అడగడం
అడ్డంగా
అనుగుణంగా
అనుమతించు
అనుమతిస్తుంది
అయితే
ఇప్పటికే
ఉన్నారు
ఎక్కడైనా
ఎప్పుడు
ఎవరైనా
ఎవరో ఒకరు
ఏ
ఏదైనా
ఏమైనప్పటికి
ఏమైనప్పటికి
ఒక
ఒక ప్రక్కన
కనిపిస్తాయి
కాదు
కాదు
కూడా
గా
గురించి
చుట్టూ
చేయగలిగ... | 475 | 7.350877 | 65 | py |
spaCy | spaCy-master/spacy/lang/th/__init__.py | from ...language import BaseDefaults, Language
from ...tokens import Doc
from ...util import DummyTokenizer, load_config_from_str, registry
from ...vocab import Vocab
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
DEFAULT_CONFIG = """
[nlp]
[nlp.tokenizer]
@tokenizers = "spacy.th.ThaiTokenizer"
"... | 1,371 | 24.407407 | 69 | py |
spaCy | spaCy-master/spacy/lang/th/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"ศูนย์",
"หนึ่ง",
"สอง",
"สาม",
"สี่",
"ห้า",
"หก",
"เจ็ด",
"แปด",
"เก้า",
"สิบ",
"สิบเอ็ด",
"ยี่สิบ",
"ยี่สิบเอ็ด",
"สามสิบ",
"สามสิบเอ็ด",
"สี่สิบ",
"สี่สิบเอ็ด",
"ห้าสิบ",
"ห้าสิบเอ็ด",
"... | 996 | 15.898305 | 49 | py |
spaCy | spaCy-master/spacy/lang/th/stop_words.py | STOP_WORDS = set(
"""
ทั้งนี้ ดัง ขอ รวม หลังจาก เป็น หลัง หรือ ๆ เกี่ยวกับ ซึ่งได้แก่ ด้วยเพราะ ด้วยว่า ด้วยเหตุเพราะ
ด้วยเหตุว่า สุดๆ เสร็จแล้ว เช่น เข้า ถ้า ถูก ถึง ต่างๆ ใคร เปิดเผย ครา รือ ตาม ใน ได้แก่ ได้แต่
ได้ที่ ตลอดถึง นอกจากว่า นอกนั้น จริง อย่างดี ส่วน เพียงเพื่อ เดียว จัด ทั้งที ทั้งคน ทั้งตัว ไกลๆ
ถึ... | 7,187 | 93.578947 | 125 | py |
spaCy | spaCy-master/spacy/lang/th/tokenizer_exceptions.py | from ...symbols import ORTH
_exc = {
# หน่วยงานรัฐ / government agency
"กกต.": [{ORTH: "กกต."}],
"กทท.": [{ORTH: "กทท."}],
"กทพ.": [{ORTH: "กทพ."}],
"กบข.": [{ORTH: "กบข."}],
"กบว.": [{ORTH: "กบว."}],
"กปน.": [{ORTH: "กปน."}],
"กปภ.": [{ORTH: "กปภ."}],
"กปส.": [{ORTH: "กปส."}],
... | 13,343 | 29.396355 | 52 | py |
spaCy | spaCy-master/spacy/lang/ti/__init__.py | from ...attrs import LANG
from ...language import BaseDefaults, Language
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTION... | 834 | 29.925926 | 81 | py |
spaCy | spaCy-master/spacy/lang/ti/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ti.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"አፕል ብዩኬ ትርከብ ንግድ ብ1 ቢሊዮን ዶላር ንምግዛዕ ሐሲባ።",
"ፈላማይ ክታበት ኮቪድ 19 ተጀሚሩ፤ሓዱሽ ተስፋ ሂቡ ኣሎ",
"ቻንስለር ጀርመን ኣንገላ መርከል ዝርግሓ ቫይረስ ኮሮና ንምክልካል ጽኑዕ እገዳ ክግበር ጸዊዓ",
... | 457 | 23.105263 | 65 | py |
spaCy | spaCy-master/spacy/lang/ti/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"ዜሮ",
"ሓደ",
"ክልተ",
"ሰለስተ",
"ኣርባዕተ",
"ሓሙሽተ",
"ሽድሽተ",
"ሸውዓተ",
"ሽሞንተ",
"ትሽዓተ",
"ዓሰርተ",
"ዕስራ",
"ሰላሳ",
"ኣርብዓ",
"ሓምሳ",
"ሱሳ",
"ሰብዓ",
"ሰማንያ",
"ቴስዓ",
"ሚእቲ",
"ሺሕ",
"ሚልዮን",
"ቢልዮን",
"ትሪልዮን",... | 1,221 | 15.513514 | 77 | py |
spaCy | spaCy-master/spacy/lang/ti/punctuation.py | from ..char_classes import (
ALPHA_UPPER,
CURRENCY,
LIST_ELLIPSES,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
)
_list_punct = LIST_PUNCT + "፡ ። ፣ ፤ ፥ ፦ ፧ ፠ ፨".strip().split()
_suffixes = (
_list_punct
+ LIST_ELLIPSES
+ LIST_QUOTES
+ [
r"(?<=[0-9])\+",
# Tigrinya is written... | 530 | 19.423077 | 62 | py |
spaCy | spaCy-master/spacy/lang/ti/stop_words.py | # Stop words from Tigrinya Wordcount: https://github.com/fgaim/Tigrinya-WordCount/blob/main/ti_stop_words.txt
# Stop words
STOP_WORDS = set(
"""
'ምበር 'ሞ 'ቲ 'ታ 'ኳ 'ውን 'ዚ 'የ 'ዩ 'ያ 'ዮም 'ዮን
ልዕሊ ሒዙ ሒዛ ሕጂ መበል መን መንጎ መጠን ማለት ምስ ምባል
ምእንቲ ምኽንያቱ ምኽንያት ምዃኑ ምዃንና ምዃኖም
ስለ ስለዚ ስለዝበላ ሽዑ ቅድሚ በለ በቲ በዚ ብምባል ብተወሳኺ ብኸመይ
ብዘይ ብዘይካ ብዙሕ ብ... | 899 | 31.142857 | 109 | py |
spaCy | spaCy-master/spacy/lang/ti/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
_exc = {}
for exc_data in [
{ORTH: "ት/ቤት"},
{ORTH: "ወ/ሮ", NORM: "ወይዘሮ"},
{ORTH: "ወ/ሪ", NORM: "ወይዘሪት"},
]:
_exc[exc_data[ORTH]] = [exc_data]
for orth in [
"ዓ.ም.",
"ኪ.ሜ.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = _exc
| 298 | 12.590909 | 37 | py |
spaCy | spaCy-master/spacy/lang/tl/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class TagalogDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
... | 416 | 20.947368 | 54 | py |
spaCy | spaCy-master/spacy/lang/tl/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"sero",
"isa",
"dalawa",
"tatlo",
"apat",
"lima",
"anim",
"pito",
"walo",
"siyam",
"sampu",
"labing-isa",
"labindalawa",
"labintatlo",
"labing-apat",
"labinlima",
"labing-anim",
"labimpito",
"labing... | 942 | 15.54386 | 49 | py |
spaCy | spaCy-master/spacy/lang/tl/stop_words.py | STOP_WORDS = set(
"""
akin
aking
ako
alin
am
amin
aming
ang
ano
anumang
apat
at
atin
ating
ay
bababa
bago
bakit
bawat
bilang
dahil
dalawa
dapat
din
dito
doon
gagawin
gayunman
ginagawa
ginawa
ginawang
gumawa
gusto
habang
hanggang
hindi
huwag
iba
ibaba
ibabaw
ibig
ikaw
ilagay
ilalim
ilan
inyong
isa
isang
itaas
ito
iy... | 965 | 5.355263 | 17 | py |
spaCy | spaCy-master/spacy/lang/tl/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"tayo'y": [{ORTH: "tayo"}, {ORTH: "'y", NORM: "ay"}],
"isa'y": [{ORTH: "isa"}, {ORTH: "'y", NORM: "ay"}],
"baya'y": [{ORTH: "baya"}, {ORTH: "'y", NORM: "ay"}],
"sa'yo": [{ORTH: "... | 687 | 35.210526 | 58 | py |
spaCy | spaCy-master/spacy/lang/tn/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
class SetswanaDefaults(BaseDefaults):
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Setswana(Language):
... | 392 | 19.684211 | 46 | py |
spaCy | spaCy-master/spacy/lang/tn/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.tn.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple e nyaka go reka JSE ka tlhwatlhwa ta R1 billion",
"Johannesburg ke toropo e kgolo mo Afrika Borwa.",
"O ko kae?",
"ke mang presidente ya... | 421 | 25.375 | 60 | py |
spaCy | spaCy-master/spacy/lang/tn/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"lefela",
"nngwe",
"pedi",
"tharo",
"nne",
"tlhano",
"thataro",
"supa",
"robedi",
"robongwe",
"lesome",
"lesomenngwe",
"lesomepedi",
"sometharo",
"somenne",
"sometlhano",
"somethataro",
"somesupa",
... | 1,942 | 16.990741 | 49 | py |
spaCy | spaCy-master/spacy/lang/tn/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au... | 576 | 20.37037 | 68 | py |
spaCy | spaCy-master/spacy/lang/tn/stop_words.py | # Stop words
STOP_WORDS = set(
"""
ke gareng ga selekanyo tlhwatlhwa yo mongwe se
sengwe fa go le jalo gongwe ba na mo tikologong
jaaka kwa morago nna gonne ka sa pele nako teng
tlase fela ntle magareng tsona feta bobedi kgabaganya
moo gape kgatlhanong botlhe tsotlhe bokana e esi
setseng mororo dinako golo kgolo nn... | 796 | 36.952381 | 65 | py |
spaCy | spaCy-master/spacy/lang/tr/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS
class TurkishDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
... | 546 | 23.863636 | 67 | py |
spaCy | spaCy-master/spacy/lang/tr/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.tr.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Neredesin?",
"Neredesiniz?",
"Bu bir cümledir.",
"Sürücüsüz araçlar sigorta yükümlülüğünü üreticilere kaydırıyor.",
"San Francisco kaldırı... | 574 | 29.263158 | 79 | py |
spaCy | spaCy-master/spacy/lang/tr/lex_attrs.py | from ...attrs import LIKE_NUM
# Thirteen, fifteen etc. are written separate: on üç
_num_words = [
"bir",
"iki",
"üç",
"dört",
"beş",
"altı",
"yedi",
"sekiz",
"dokuz",
"on",
"yirmi",
"otuz",
"kırk",
"elli",
"altmış",
"yetmiş",
"seksen",
"doksan",
... | 1,630 | 17.325843 | 66 | py |
spaCy | spaCy-master/spacy/lang/tr/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-tr
STOP_WORDS = set(
"""
acaba
acep
adamakıllı
adeta
ait
ama
amma
anca
ancak
arada
artık
aslında
aynen
ayrıca
az
açıkça
açıkçası
bana
bari
bazen
bazı
bazısı
bazısına
bazısında
bazısından
bazısını
bazısının
başkası
başkasına
başkasında
başkasından
başkasını
başkas... | 4,187 | 6.505376 | 55 | py |
spaCy | spaCy-master/spacy/lang/tr/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 1,854 | 30.440678 | 83 | py |
spaCy | spaCy-master/spacy/lang/tr/tokenizer_exceptions.py | import re
from ...symbols import NORM, ORTH
from ..punctuation import ALPHA, ALPHA_LOWER
_exc = {}
_abbr_period_exc = [
{ORTH: "A.B.D.", NORM: "Amerika"},
{ORTH: "Alb.", NORM: "albay"},
{ORTH: "Ank.", NORM: "Ankara"},
{ORTH: "Ar.Gör."},
{ORTH: "Arş.Gör."},
{ORTH: "Asb.", NORM: "astsubay"},
... | 5,945 | 30.13089 | 75 | py |
spaCy | spaCy-master/spacy/lang/tt/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class TatarDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZE... | 483 | 22.047619 | 54 | py |
spaCy | spaCy-master/spacy/lang/tt/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.tt.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple Бөекбритания стартабын $1 миллиард өчен сатып алыун исәпли.",
"Автоном автомобильләр иминият җаваплылыкны җитештерүчеләргә күчерә.",
"Сан-Фра... | 589 | 33.705882 | 88 | py |
spaCy | spaCy-master/spacy/lang/tt/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"нуль",
"ноль",
"бер",
"ике",
"өч",
"дүрт",
"биш",
"алты",
"җиде",
"сигез",
"тугыз",
"ун",
"унбер",
"унике",
"унөч",
"ундүрт",
"унбиш",
"уналты",
"унҗиде",
"унсигез",
"унтугыз",
"егерме"... | 936 | 14.881356 | 49 | py |
spaCy | spaCy-master/spacy/lang/tt/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_hyphens_no_dash = HYPHENS.replace("-", "").strip("|").replace("||", "")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[{al}])\.(?=[{au}])".format(al=AL... | 806 | 27.821429 | 81 | py |
spaCy | spaCy-master/spacy/lang/tt/stop_words.py | # Tatar stopwords are from https://github.com/aliiae/stopwords-tt
STOP_WORDS = set(
"""алай алайса алар аларга аларда алардан аларны аларның аларча
алары аларын аларынга аларында аларыннан аларының алтмыш алтмышынчы алтмышынчыга
алтмышынчыда алтмышынчыдан алтмышынчылар алтмышынчыларга алтмышынчыларда
алтмышынчылар... | 9,852 | 55.626437 | 81 | py |
spaCy | spaCy-master/spacy/lang/tt/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
_abbrev_exc = [
# Weekdays abbreviations
{ORTH: "дш", NORM: "дүшәмбе"},
{ORTH: "сш", NORM: "сишәмбе"},
{ORTH: "чш", NORM: "чәршәмбе"},
{ORTH: "пш", NORM: "пәнҗешәмбе"},
... | 1,509 | 30.458333 | 74 | py |
spaCy | spaCy-master/spacy/lang/uk/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ..punctuation import (
COMBINING_DIACRITICS_TOKENIZER_INFIXES,
COMBINING_DIACRITICS_TOKENIZER_SUFFIXES,
)
from .lemmatizer import UkrainianLemmatizer
from .lex_attrs import LEX_ATTRS
from .sto... | 1,320 | 23.462963 | 77 | py |
spaCy | spaCy-master/spacy/lang/uk/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.uk.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Ніч на середу буде морозною.",
"Чим кращі книги ти читав, тим гірше спиш.", # Serhiy Zhadan
"Найстаріші ґудзики, відомі людству, археологи знайш... | 1,144 | 56.25 | 261 | py |
spaCy | spaCy-master/spacy/lang/uk/lemmatizer.py | from typing import Callable, Optional
from thinc.api import Model
from ...pipeline.lemmatizer import lemmatizer_score
from ...vocab import Vocab
from ..ru.lemmatizer import RussianLemmatizer
class UkrainianLemmatizer(RussianLemmatizer):
def __init__(
self,
vocab: Vocab,
model: Optional[M... | 1,716 | 36.326087 | 77 | py |
spaCy | spaCy-master/spacy/lang/uk/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"більйон",
"вісім",
"вісімдесят",
"вісімнадцять",
"вісімсот",
"восьмий",
"два",
"двадцять",
"дванадцять",
"двісті",
"дев'яносто",
"дев'ятнадцять",
"дев'ятсот",
"дев'ять",
"десять",
"децильйон",
"квадрильйон... | 1,191 | 15.788732 | 49 | py |
spaCy | spaCy-master/spacy/lang/uk/stop_words.py | STOP_WORDS = set(
"""а
або
адже
аж
але
алло
б
багато
без
безперервно
би
більш
більше
біля
близько
бо
був
буває
буде
будемо
будете
будеш
буду
будуть
будь
була
були
було
бути
в
вам
вами
вас
ваш
ваша
ваше
вашим
вашими
ваших
ваші
вашій
вашого
вашої
вашому
вашою
вашу
вгорі
вгору
вдалині
весь
вже
ви
від
відсотків
він
віс... | 2,700 | 4.746809 | 17 | py |
spaCy | spaCy-master/spacy/lang/uk/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for exc_data in [
{ORTH: "обл.", NORM: "область"},
{ORTH: "р-н.", NORM: "район"},
{ORTH: "р-н", NORM: "район"},
{ORTH: "м.", NORM: "місто"},
{ORTH: "вул.", NORM: "вулиця"},... | 1,143 | 29.918919 | 56 | py |
spaCy | spaCy-master/spacy/lang/ur/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
class UrduDefaults(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
writing_system = {"directi... | 461 | 22.1 | 81 | py |
spaCy | spaCy-master/spacy/lang/ur/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.da.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"اردو ہے جس کا نام ہم جانتے ہیں داغ",
"سارے جہاں میں دھوم ہماری زباں کی ہے",
]
| 249 | 18.230769 | 56 | py |
spaCy | spaCy-master/spacy/lang/ur/lex_attrs.py | from ...attrs import LIKE_NUM
# Source https://quizlet.com/4271889/1-100-urdu-number-wordsurdu-numerals-flash-cards/
# http://www.urduword.com/lessons.php?lesson=numbers
# https://en.wikibooks.org/wiki/Urdu/Vocabulary/Numbers
# https://www.urdu-english.com/lessons/beginner/numbers
_num_words = """ایک دو تین چار پانچ ... | 1,617 | 34.173913 | 113 | py |
spaCy | spaCy-master/spacy/lang/ur/punctuation.py | from ..punctuation import TOKENIZER_SUFFIXES
_suffixes = TOKENIZER_SUFFIXES
| 77 | 18.5 | 44 | py |
spaCy | spaCy-master/spacy/lang/ur/stop_words.py | # Source: collected from different resource on internet
STOP_WORDS = set(
"""
ثھی
خو
گی
اپٌے
گئے
ثہت
طرف
ہوبری
پبئے
اپٌب
دوضری
گیب
کت
گب
ثھی
ضے
ہر
پر
اش
دی
گے
لگیں
ہے
ثعذ
ضکتے
تھی
اى
دیب
لئے
والے
یہ
ثدبئے
ضکتی
تھب
اًذر
رریعے
لگی
ہوبرا
ہوًے
ثبہر
ضکتب
ًہیں
تو
اور
رہب
لگے
ہوضکتب
ہوں
کب
ہوبرے
توبم
کیب
ایطے
رہی
هگر
ہوضک... | 2,665 | 4.18677 | 55 | py |
spaCy | spaCy-master/spacy/lang/vi/__init__.py | import re
import string
from pathlib import Path
from typing import Any, Dict, Union
import srsly
from ... import util
from ...language import BaseDefaults, Language
from ...tokens import Doc
from ...util import DummyTokenizer, load_config_from_str, registry
from ...vocab import Vocab
from .lex_attrs import LEX_ATTRS... | 5,574 | 31.988166 | 83 | py |
spaCy | spaCy-master/spacy/lang/vi/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.vi.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Đây là đâu, tôi là ai?",
"Căn phòng có nhiều cửa sổ nên nó khá sáng",
"Đại dịch COVID vừa qua đã gây ảnh hưởng rất lớn tới nhiều doanh nghiệp lớn ... | 625 | 33.777778 | 86 | py |
spaCy | spaCy-master/spacy/lang/vi/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"không", # Zero
"một", # One
"mốt", # Also one, irreplacable in niché cases for unit digit such as "51"="năm mươi mốt"
"hai", # Two
"ba", # Three
"bốn", # Four
"tư", # Also four, used in certain cases for unit digit such as "54"="năm mươi ... | 1,340 | 28.152174 | 95 | py |
spaCy | spaCy-master/spacy/lang/vi/stop_words.py | # Source: https://github.com/stopwords/vietnamese-stopwords
STOP_WORDS = set(
"""
a_lô
a_ha
ai
ai_ai
ai_nấy
ai_đó
alô
amen
anh
anh_ấy
ba
ba_bau
ba_bản
ba_cùng
ba_họ
ba_ngày
ba_ngôi
ba_tăng
bao_giờ
bao_lâu
bao_nhiêu
bao_nả
bay_biến
biết
biết_bao
biết_bao_nhiêu
biết_chắc
biết_chừng_nào
biết_mình
biết_mấy
biết_thế
biế... | 15,387 | 6.891282 | 59 | py |
spaCy | spaCy-master/spacy/lang/xx/__init__.py | from ...language import Language
class MultiLanguage(Language):
"""Language class to be used for models that support multiple languages.
This module allows models to specify their language ID as 'xx'.
"""
lang = "xx"
__all__ = ["MultiLanguage"]
| 266 | 19.538462 | 76 | py |
spaCy | spaCy-master/spacy/lang/xx/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.de.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# combined examples from de/en/es/fr/it/nl/pl/pt/ru
sentences = [
"Die ganze Stadt ist ein Startup: Shenzhen ist das Silicon Valley für Hardware-Firmen",
"Wie deuts... | 6,350 | 65.15625 | 280 | py |
spaCy | spaCy-master/spacy/lang/yo/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class YorubaDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Yoruba(Language):
lang = "yo"
Defaults = YorubaDefaults
__all__ = ["Yoruba"]
| 309 | 17.235294 | 46 | py |
spaCy | spaCy-master/spacy/lang/yo/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.yo.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# 1. https://yo.wikipedia.org/wiki/Wikipedia:%C3%80y%E1%BB%8Dk%C3%A0_p%C3%A0t%C3%A0k%C3%AC
# 2.https://yo.wikipedia.org/wiki/Oj%C3%BAew%C3%A9_%C3%80k%E1%BB%8D%CC%81k%E1%BB%8... | 1,061 | 45.173913 | 196 | py |
spaCy | spaCy-master/spacy/lang/yo/lex_attrs.py | import unicodedata
from ...attrs import LIKE_NUM
_num_words = [
"ení",
"oókàn",
"ọ̀kanlá",
"ẹ́ẹdọ́gbọ̀n",
"àádọ́fà",
"ẹ̀walélúɡba",
"egbèje",
"ẹgbàárin",
"èjì",
"eéjì",
"èjìlá",
"ọgbọ̀n,",
"ọgọ́fà",
"ọ̀ọ́dúrún",
"ẹgbẹ̀jọ",
"ẹ̀ẹ́dẹ́ɡbàárùn",
"ẹ̀ta",
... | 2,103 | 17.785714 | 88 | py |
spaCy | spaCy-master/spacy/lang/yo/stop_words.py | # stop words as whitespace-separated list.
# Source: https://raw.githubusercontent.com/dohliam/more-stoplists/master/yo/yo.txt
STOP_WORDS = set(
"a an b bá bí bẹ̀rẹ̀ d e f fún fẹ́ g gbogbo i inú j jù jẹ jẹ́ k kan kì kí kò "
"l láti lè lọ m mi mo máa mọ̀ n ni náà ní nígbà nítorí nǹkan o p padà pé "
"púpọ̀ p... | 483 | 47.4 | 88 | py |
spaCy | spaCy-master/spacy/lang/zh/__init__.py | import tempfile
import warnings
from enum import Enum
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional
import srsly
from ... import util
from ...errors import Errors, Warnings
from ...language import BaseDefaults, Language
from ...scorer import Scorer
from ...tokens import Doc... | 12,734 | 36.677515 | 161 | py |
spaCy | spaCy-master/spacy/lang/zh/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.zh.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# from https://zh.wikipedia.org/wiki/汉语
sentences = [
"作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",
"汉语有多种分支,当中官话最为流行,为中华人民共和国的国家通用语言(又称为普通话)、以及中华民国的国语。",
"此外,中文还是联合国正... | 430 | 24.352941 | 58 | py |
spaCy | spaCy-master/spacy/lang/zh/lex_attrs.py | import re
from ...attrs import LIKE_NUM
_single_num_words = [
"〇",
"一",
"二",
"三",
"四",
"五",
"六",
"七",
"八",
"九",
"十",
"十一",
"十二",
"十三",
"十四",
"十五",
"十六",
"十七",
"十八",
"十九",
"廿",
"卅",
"卌",
"皕",
"零",
"壹",
"贰",
... | 1,429 | 13.591837 | 83 | py |
spaCy | spaCy-master/spacy/lang/zh/stop_words.py | # stop words as whitespace-separated list
# Chinese stop words,maybe not enough
STOP_WORDS = set(
"""
!
"
#
$
%
&
'
(
)
*
+
,
-
--
.
..
...
......
...................
./
.一
.数
.日
/
//
0
1
2
3
4
5
6
7
8
9
:
://
::
;
<
=
>
>>
?
@
A
Lex
[
\
]
^
_
`
exp
sub
sup
|
}
~
~~~~
·
×
×××
Δ
Ψ
γ
μ
φ
φ.
В
—
——
———
‘
’
’‘
“
”
”,
…... | 5,897 | 2.104211 | 41 | py |
spaCy | spaCy-master/spacy/matcher/__init__.py | from .dependencymatcher import DependencyMatcher
from .levenshtein import levenshtein
from .matcher import Matcher
from .phrasematcher import PhraseMatcher
__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"]
| 232 | 32.285714 | 74 | py |
spaCy | spaCy-master/spacy/matcher/polyleven.c | /*
* Adapted from Polyleven (https://ceptord.net/)
*
* Source: https://github.com/fujimotos/polyleven/blob/c3f95a080626c5652f0151a2e449963288ccae84/polyleven.c
*
* Copyright (c) 2021 Fujimoto Seiji <fujimoto@ceptord.net>
* Copyright (c) 2021 Max Bachmann <kontakt@maxbachmann.de>
* Copyright (c) 2022 Nick Mazuk
... | 9,571 | 23.862338 | 107 | c |
spaCy | spaCy-master/spacy/ml/__init__.py | from .callbacks import create_models_with_nvtx_range # noqa: F401
from .models import * # noqa: F401, F403
| 109 | 35.666667 | 66 | py |
spaCy | spaCy-master/spacy/ml/_character_embed.py | from typing import List
from thinc.api import Model
from thinc.types import Floats2d
from ..tokens import Doc
from ..util import registry
@registry.layers("spacy.CharEmbed.v1")
def CharacterEmbed(nM: int, nC: int) -> Model[List[Doc], List[Floats2d]]:
# nM: Number of dimensions per character. nC: Number of chara... | 1,994 | 31.704918 | 91 | py |
spaCy | spaCy-master/spacy/ml/_precomputable_affine.py | from thinc.api import Model, normal_init
from ..util import registry
@registry.layers("spacy.PrecomputableAffine.v1")
def PrecomputableAffine(nO, nI, nF, nP, dropout=0.1):
model = Model(
"precomputable_affine",
forward,
init=init,
dims={"nO": nO, "nI": nI, "nF": nF, "nP": nP},
... | 5,834 | 34.363636 | 85 | py |
spaCy | spaCy-master/spacy/ml/callbacks.py | import functools
import inspect
import types
import warnings
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Type
from thinc.layers import with_nvtx_range
from thinc.model import Model, wrap_model_recursive
from thinc.util import use_nvtx_range
from ..errors import Warnings
from ..util import r... | 3,787 | 29.304 | 87 | py |
spaCy | spaCy-master/spacy/ml/extract_ngrams.py | from thinc.api import Model
from ..attrs import LOWER
from ..util import registry
@registry.layers("spacy.extract_ngrams.v1")
def extract_ngrams(ngram_size: int, attr: int = LOWER) -> Model:
model: Model = Model("extract_ngrams", forward)
model.attrs["ngram_size"] = ngram_size
model.attrs["attr"] = attr
... | 1,191 | 33.057143 | 84 | py |
spaCy | spaCy-master/spacy/ml/extract_spans.py | from typing import Callable, List, Tuple
from thinc.api import Model, to_numpy
from thinc.types import Ints1d, Ragged
from ..util import registry
@registry.layers("spacy.extract_spans.v1")
def extract_spans() -> Model[Tuple[Ragged, Ragged], Ragged]:
"""Extract spans from a sequence of source arrays, as specifie... | 2,304 | 32.897059 | 111 | py |
spaCy | spaCy-master/spacy/ml/featureextractor.py | from typing import Callable, List, Tuple, Union
from thinc.api import Model, registry
from thinc.types import Ints2d
from ..tokens import Doc
@registry.layers("spacy.FeatureExtractor.v1")
def FeatureExtractor(columns: List[Union[int, str]]) -> Model[List[Doc], List[Ints2d]]:
return Model("extract_features", for... | 970 | 31.366667 | 87 | py |
spaCy | spaCy-master/spacy/ml/staticvectors.py | import warnings
from typing import Callable, List, Optional, Sequence, Tuple, cast
from thinc.api import Model, Ops, registry
from thinc.initializers import glorot_uniform_init
from thinc.types import Floats1d, Floats2d, Ints1d, Ragged
from thinc.util import partial
from ..attrs import ORTH
from ..errors import Error... | 4,000 | 31.528455 | 85 | py |
spaCy | spaCy-master/spacy/ml/tb_framework.py | from thinc.api import Model, noop
from ..util import registry
from .parser_model import ParserStepModel
@registry.layers("spacy.TransitionModel.v1")
def TransitionModel(
tok2vec, lower, upper, resize_output, dropout=0.2, unseen_classes=set()
):
"""Set up a stepwise transition-based model"""
if upper is N... | 1,465 | 27.192308 | 77 | py |
spaCy | spaCy-master/spacy/ml/models/__init__.py | from .entity_linker import * # noqa
from .multi_task import * # noqa
from .parser import * # noqa
from .span_finder import * # noqa
from .spancat import * # noqa
from .tagger import * # noqa
from .textcat import * # noqa
from .tok2vec import * # noqa
| 259 | 27.888889 | 36 | py |
spaCy | spaCy-master/spacy/ml/models/entity_linker.py | from pathlib import Path
from typing import Callable, Iterable, List, Optional, Tuple
from thinc.api import (
Linear,
Maxout,
Model,
Ragged,
chain,
list2ragged,
reduce_mean,
residual,
tuplify,
)
from thinc.types import Floats2d
from ...errors import Errors
from ...kb import (
C... | 4,291 | 30.792593 | 98 | py |
spaCy | spaCy-master/spacy/ml/models/multi_task.py | from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, cast
import numpy
from thinc.api import (
CosineDistance,
L2Distance,
LayerNorm,
Linear,
Maxout,
Model,
MultiSoftmax,
Softmax,
chain,
list2array,
to_categorical,
... | 9,271 | 32.96337 | 88 | py |
spaCy | spaCy-master/spacy/ml/models/parser.py | from typing import List, Optional, cast
from thinc.api import Linear, Model, chain, list2array, use_ops, zero_init
from thinc.types import Floats2d
from ...compat import Literal
from ...errors import Errors
from ...tokens import Doc
from ...util import registry
from .._precomputable_affine import PrecomputableAffine
... | 6,897 | 38.193182 | 85 | py |
spaCy | spaCy-master/spacy/ml/models/span_finder.py | from typing import Callable, List, Tuple
from thinc.api import Model, chain, with_array
from thinc.types import Floats1d, Floats2d
from ...tokens import Doc
from ...util import registry
InT = List[Doc]
OutT = Floats2d
@registry.architectures("spacy.SpanFinder.v1")
def build_finder_model(
tok2vec: Model[InT, Li... | 1,230 | 28.309524 | 78 | py |
spaCy | spaCy-master/spacy/ml/models/spancat.py | from typing import List, Tuple, cast
from thinc.api import (
Linear,
Logistic,
Maxout,
Model,
chain,
concatenate,
glorot_uniform_init,
list2ragged,
reduce_first,
reduce_last,
reduce_max,
reduce_mean,
with_getitem,
)
from thinc.types import Floats2d, Ragged
from ...t... | 2,386 | 29.602564 | 85 | py |
spaCy | spaCy-master/spacy/ml/models/tagger.py | from typing import List, Optional
from thinc.api import Model, Softmax_v2, chain, with_array, zero_init
from thinc.types import Floats2d
from ...tokens import Doc
from ...util import registry
@registry.architectures("spacy.Tagger.v2")
def build_tagger_model(
tok2vec: Model[List[Doc], List[Floats2d]], nO: Option... | 1,253 | 38.1875 | 88 | py |
spaCy | spaCy-master/spacy/ml/models/textcat.py | from functools import partial
from typing import List, Optional, cast
from thinc.api import (
Dropout,
LayerNorm,
Linear,
Logistic,
Maxout,
Model,
ParametricAttention,
Relu,
Softmax,
SparseLinear,
chain,
clone,
concatenate,
list2ragged,
reduce_mean,
reduc... | 6,769 | 34.07772 | 88 | py |
spaCy | spaCy-master/spacy/ml/models/tok2vec.py | from typing import List, Optional, Union, cast
from thinc.api import (
HashEmbed,
Maxout,
Mish,
Model,
PyTorchLSTM,
chain,
clone,
concatenate,
expand_window,
list2ragged,
noop,
ragged2list,
residual,
with_array,
with_padded,
)
from thinc.types import Floats2d... | 14,046 | 38.90625 | 89 | py |
spaCy | spaCy-master/spacy/pipeline/__init__.py | from .attributeruler import AttributeRuler
from .dep_parser import DependencyParser
from .edit_tree_lemmatizer import EditTreeLemmatizer
from .entity_linker import EntityLinker
from .entityruler import EntityRuler
from .functions import merge_entities, merge_noun_chunks, merge_subtokens
from .lemmatizer import Lemmatiz... | 1,253 | 26.866667 | 73 | py |
spaCy | spaCy-master/spacy/pipeline/attributeruler.py | from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import srsly
from .. import util
from ..errors import Errors
from ..language import Language
from ..matcher import Matcher
from ..scorer import Scorer
from ..symbols import IDS
from ..tokens import Doc, Span
from .... | 13,675 | 37.201117 | 97 | py |
spaCy | spaCy-master/spacy/pipeline/edit_tree_lemmatizer.py | from collections import Counter
from itertools import islice
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, cast
import numpy as np
import srsly
from thinc.api import Config, Model, NumpyOps, SequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints2d
from .. import util
from ..... | 15,329 | 35.15566 | 91 | py |
spaCy | spaCy-master/spacy/pipeline/entity_linker.py | import random
from itertools import islice
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import srsly
from thinc.api import Config, CosineDistance, Model, Optimizer, set_dropout_rate
from thinc.types import Floats2d
from .. import util
from ..errors import Errors
fro... | 27,412 | 40.284639 | 120 | py |
spaCy | spaCy-master/spacy/pipeline/entityruler.py | import warnings
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import srsly
from ..errors import Errors, Warnings
from ..language import Language
from ..matcher import Matcher, PhraseMatcher
from ..matcher.levenshte... | 21,071 | 37.878229 | 107 | py |
spaCy | spaCy-master/spacy/pipeline/functions.py | import warnings
from typing import Any, Dict
import srsly
from .. import util
from ..errors import Warnings
from ..language import Language
from ..matcher import Matcher
from ..tokens import Doc
@Language.component(
"merge_noun_chunks",
requires=["token.dep", "token.tag", "token.pos"],
retokenizes=True,... | 6,704 | 31.867647 | 114 | py |
spaCy | spaCy-master/spacy/pipeline/lemmatizer.py | import warnings
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from thinc.api import Model
from .. import util
from ..errors import Errors, Warnings
from ..language import Language
from ..lookups import Lookups, load_lookups
from ..scorer import Scorer
from ..t... | 12,178 | 35.139466 | 90 | py |
spaCy | spaCy-master/spacy/pipeline/span_finder.py | from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from thinc.api import Config, Model, Optimizer, set_dropout_rate
from thinc.types import Floats2d
from ..errors import Errors
from ..language import Language
from ..scorer import Scorer
from ..tokens import Doc, Span
from ..training import Exampl... | 12,254 | 35.473214 | 85 | py |
spaCy | spaCy-master/spacy/pipeline/span_ruler.py | import warnings
from functools import partial
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import srsly
from .. import util
from ..errors import Errors, Warnings
from ..language import Lang... | 21,554 | 35.045151 | 88 | py |