Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/tests/lang/ru/test_text.py | import pytest
from spacy.lang.ru.lex_attrs import like_num
@pytest.mark.parametrize("word", ["одиннадцать"])
def test_ru_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 210 | 20.1 | 49 | py |
spaCy | spaCy-master/spacy/tests/lang/ru/test_tokenizer.py | from string import punctuation
import pytest
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_ru_tokenizer_handles_only_punct(ru_tokenizer, text):
tokens = ru_tokenizer... | 5,403 | 32.987421 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/sa/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/sa/test_text.py | import pytest
def test_sa_tokenizer_handles_long_text(sa_tokenizer):
text = """नानाविधानि दिव्यानि नानावर्णाकृतीनि च।।"""
tokens = sa_tokenizer(text)
assert len(tokens) == 6
@pytest.mark.parametrize(
"text,length",
[
("श्री भगवानुवाच पश्य मे पार्थ रूपाणि शतशोऽथ सहस्रशः।", 9),
("ग... | 1,015 | 22.627907 | 67 | py |
spaCy | spaCy-master/spacy/tests/lang/sk/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/sk/test_text.py | import pytest
def test_long_text(sk_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
majúc na zreteli, že cieľom tejto deklarácie je zabezpečiť všeobecné
a účinné uznávanie a dodržiavanie práv v nej vyhlásených;
majúc na zreteli, že cieľom Rady Európy je dosiahnutie väčšej
jednoty medzi j... | 1,413 | 27.857143 | 68 | py |
spaCy | spaCy-master/spacy/tests/lang/sk/test_tokenizer.py | import pytest
SK_BASIC_TOKENIZATION_TESTS = [
(
"Kedy sa narodil Andrej Kiska?",
["Kedy", "sa", "narodil", "Andrej", "Kiska", "?"],
),
]
@pytest.mark.parametrize("text,expected_tokens", SK_BASIC_TOKENIZATION_TESTS)
def test_sk_tokenizer_basic(sk_tokenizer, text, expected_tokens):
tokens =... | 453 | 27.375 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/sl/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/sl/test_text.py | import pytest
def test_long_text(sl_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
upoštevajoč, da si ta deklaracija prizadeva zagotoviti splošno in
učinkovito priznavanje in spoštovanje v njej razglašenih pravic,
upoštevajoč, da je cilj Sveta Evrope doseči večjo enotnost med
njegovimi ... | 968 | 34.888889 | 73 | py |
spaCy | spaCy-master/spacy/tests/lang/sl/test_tokenizer.py | import pytest
SL_BASIC_TOKENIZATION_TESTS = [
(
"Vsakdo ima pravico do spoštovanja njegovega zasebnega in "
"družinskega življenja, doma in dopisovanja.",
[
"Vsakdo",
"ima",
"pravico",
"do",
"spoštovanja",
"njegovega",
... | 825 | 24.030303 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/sq/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/sq/test_text.py | import pytest
def test_long_text(sq_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
Qeveritë nënshkruese, anëtare të Këshillit të Evropës,
Duke pasur parasysh Deklaratën Universale të të Drejtave të
Njeriut, të shpallur nga Asambleja e Përgjithshme e Kombeve të
Bashkuara më 10 dhjetor 19... | 1,100 | 41.346154 | 65 | py |
spaCy | spaCy-master/spacy/tests/lang/sq/test_tokenizer.py | import pytest
SQ_BASIC_TOKENIZATION_TESTS = [
(
"Askush nuk mund t’i nënshtrohet torturës ose dënimeve ose "
"trajtimeve çnjerëzore ose poshtëruese.",
[
"Askush",
"nuk",
"mund",
"t’i",
"nënshtrohet",
"torturës",
... | 801 | 24.0625 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/sr/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/sr/test_exceptions.py | import pytest
@pytest.mark.parametrize(
"text,norms,lemmas",
[
("о.г.", ["ове године"], ["ова година"]),
("чет.", ["четвртак"], ["четвртак"]),
("гђа", ["госпођа"], ["госпођа"]),
("ил'", ["или"], ["или"]),
],
)
def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norm... | 446 | 25.294118 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/sr/test_tokenizer.py | import pytest
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_sr_tokenizer_handles_only_punct(sr_tokenizer, text):
tokens = sr_tokenizer(text)
assert len(tokens) ==... | 4,226 | 33.08871 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/sv/test_exceptions.py | import pytest
SV_TOKEN_EXCEPTION_TESTS = [
(
"Smörsåsen används bl.a. till fisk",
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
),
(
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
),... | 2,425 | 30.102564 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_lex_attrs.py | import pytest
from spacy.lang.sv.lex_attrs import like_num
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10.000", True),
("10.00", True),
("999,0", True),
("en", True),
("två", True),
("miljard", True),
("hund", ... | 683 | 20.375 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
def test_noun_chunks_is_parsed_sv(sv_tokenizer):
"""Test that noun_chunks raises Value Error for 'sv' language if Doc is not parsed."""
doc = sv_tokenizer("Studenten läste den bästa boken")
with pytest.raises(ValueError):
list(doc.noun_chunks)
SV_NP_TE... | 1,844 | 35.9 | 148 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(under)"])
def test_tokenizer_splits_no_special(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["gitta'r", "Björn's", "Lars'"])
def test_tokenizer_handles_no_punct(sv_tokenizer, text):
tokens = sv... | 1,261 | 29.047619 | 66 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_text.py | def test_sv_tokenizer_handles_long_text(sv_tokenizer):
text = """Det var så härligt ute på landet. Det var sommar, majsen var gul, havren grön,
höet var uppställt i stackar nere vid den gröna ängen, och där gick storken på sina långa,
röda ben och snackade engelska, för det språket hade han lärt sig av sin mor.
Ru... | 703 | 45.933333 | 129 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_tokenizer.py | import pytest
SV_TOKEN_EXCEPTION_TESTS = [
(
"Smörsåsen används bl.a. till fisk",
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
),
(
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
),... | 994 | 31.096774 | 84 | py |
spaCy | spaCy-master/spacy/tests/lang/ta/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ta/test_text.py | import pytest
from spacy.lang.ta import Tamil
# Wikipedia excerpt: https://en.wikipedia.org/wiki/Chennai (Tamil Language)
TAMIL_BASIC_TOKENIZER_SENTENCIZER_TEST_TEXT = """சென்னை (Chennai) தமிழ்நாட்டின் தலைநகரமும், இந்தியாவின் நான்காவது பெரிய நகரமும் ஆகும். 1996 ஆம் ஆண்டுக்கு முன்னர் இந்நகரம், மதராசு பட்டினம், மெட்ராஸ... | 1,474 | 53.62963 | 828 | py |
spaCy | spaCy-master/spacy/tests/lang/ta/test_tokenizer.py | import pytest
from spacy.lang.ta import Tamil
from spacy.symbols import ORTH
TA_BASIC_TOKENIZATION_TESTS = [
(
"கிறிஸ்துமஸ் மற்றும் இனிய புத்தாண்டு வாழ்த்துக்கள்",
["கிறிஸ்துமஸ்", "மற்றும்", "இனிய", "புத்தாண்டு", "வாழ்த்துக்கள்"],
),
(
"எனக்கு என் குழந்தைப் பருவம் நினைவிருக்கிறது",... | 4,710 | 23.794737 | 152 | py |
spaCy | spaCy-master/spacy/tests/lang/th/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/th/test_serialize.py | import pickle
from spacy.lang.th import Thai
from ...util import make_tempdir
def test_th_tokenizer_serialize(th_tokenizer):
tokenizer_bytes = th_tokenizer.to_bytes()
nlp = Thai()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
with make_tempdir() as... | 707 | 26.230769 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/th/test_tokenizer.py | import pytest
@pytest.mark.parametrize(
"text,expected_tokens", [("คุณรักผมไหม", ["คุณ", "รัก", "ผม", "ไหม"])]
)
def test_th_tokenizer(th_tokenizer, text, expected_tokens):
tokens = [token.text for token in th_tokenizer(text)]
assert tokens == expected_tokens
| 274 | 26.5 | 74 | py |
spaCy | spaCy-master/spacy/tests/lang/ti/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ti/test_exception.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ti/test_text.py | import pytest
def test_ti_tokenizer_handles_long_text(ti_tokenizer):
text = """ቻንስለር ጀርመን ኣንገላ መርከል ኣብታ ሃገር ቁጽሪ መትሓዝቲ ኮቪድ መዓልታዊ ክብረ መዝገብ ድሕሪ ምህራሙ- ጽኑዕ እገዳ ክግበር ጸዊዓ።
መርከል ሎሚ ንታሕታዋይ ባይቶ ሃገራ ክትገልጽ ከላ፡ ኣብ ወሳኒ ምዕራፍ ቃልሲ ኢና ዘለና-ዳሕራዋይ ማዕበል ካብቲ ቀዳማይ ክገድድ ይኽእል`ዩ ኢላ።
ትካል ምክልኻል ተላገብቲ ሕማማት ጀርመን፡ ኣብ ዝሓለፈ 24 ሰዓታት ኣብ ምልእቲ ጀርመር... | 1,378 | 25.519231 | 100 | py |
spaCy | spaCy-master/spacy/tests/lang/tl/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/tl/test_indices.py | def test_tl_simple_punct(tl_tokenizer):
text = "Sige, punta ka dito"
tokens = tl_tokenizer(text)
assert tokens[0].idx == 0
assert tokens[1].idx == 4
assert tokens[2].idx == 6
assert tokens[3].idx == 12
assert tokens[4].idx == 15
| 257 | 27.666667 | 39 | py |
spaCy | spaCy-master/spacy/tests/lang/tl/test_punct.py | import pytest
from spacy.lang.punctuation import TOKENIZER_PREFIXES
from spacy.util import compile_prefix_regex
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_tl_tokenize... | 4,420 | 33.539063 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/tl/test_text.py | import pytest
from spacy.lang.tl.lex_attrs import like_num
# https://github.com/explosion/spaCy/blob/master/spacy/tests/lang/en/test_text.py
def test_tl_tokenizer_handles_long_text(tl_tokenizer):
# Excerpt: "Sapagkat ang Pilosopiya ay Ginagawa" by Padre Roque Ferriols
text = """
Tingin tayo nang tingin.... | 2,480 | 32.08 | 83 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/tr/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed(tr_tokenizer):
"""Test that noun_chunks raises Value Error for 'tr' language if Doc is not parsed.
To check this test, we're constructing a Doc
with a new Vocab here and forcing is_parsed to 'False'
to make sure the noun chunks don't run.
"""
doc = ... | 419 | 31.307692 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/test_parser.py | from spacy.tokens import Doc
def test_tr_noun_chunks_amod_simple(tr_tokenizer):
text = "sarı kedi"
heads = [1, 1]
deps = ["amod", "ROOT"]
pos = ["ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
... | 19,594 | 33.019097 | 89 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/test_text.py | import pytest
from spacy.lang.tr.lex_attrs import like_num
def test_tr_tokenizer_handles_long_text(tr_tokenizer):
text = """Pamuk nasıl ipliğe dönüştürülür?
Sıkıştırılmış balyalar halindeki pamuk, iplik fabrikasına getirildiğinde hem
lifleri birbirine dolaşmıştır, hem de tarladan toplanırken araya bitkinin
parç... | 1,694 | 33.591837 | 107 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/test_tokenizer.py | import pytest
ABBREV_TESTS = [
("Dr. Murat Bey ile görüştüm.", ["Dr.", "Murat", "Bey", "ile", "görüştüm", "."]),
("Dr.la görüştüm.", ["Dr.la", "görüştüm", "."]),
("Dr.'la görüştüm.", ["Dr.'la", "görüştüm", "."]),
("TBMM'de çalışıyormuş.", ["TBMM'de", "çalışıyormuş", "."]),
(
"Hem İst. hem A... | 18,944 | 26.180775 | 180 | py |
spaCy | spaCy-master/spacy/tests/lang/tt/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/tt/test_tokenizer.py | import pytest
INFIX_HYPHEN_TESTS = [
("Явым-төшем күләме.", "Явым-төшем күләме .".split()),
("Хатын-кыз киеме.", "Хатын-кыз киеме .".split()),
]
PUNC_INSIDE_WORDS_TESTS = [
(
"Пассаҗир саны - 2,13 млн — кеше/көндә (2010), 783,9 млн. кеше/елда.",
"Пассаҗир саны - 2,13 млн — кеше / көндә ( 2... | 2,758 | 31.845238 | 81 | py |
spaCy | spaCy-master/spacy/tests/lang/uk/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/uk/test_lemmatizer.py | import pytest
from spacy.tokens import Doc
pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_uk_lemmatizer(uk_lemmatizer):
"""Check that the default uk lemmatizer runs."""
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
assert uk_lemmatizer.mode == "pymorphy3"
uk_l... | 783 | 27 | 69 | py |
spaCy | spaCy-master/spacy/tests/lang/uk/test_tokenizer.py | import pytest
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_uk_tokenizer_handles_only_punct(uk_tokenizer, text):
tokens = uk_tokenizer(text)
assert len(tokens) ==... | 4,997 | 32.543624 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/uk/test_tokenizer_exc.py | import pytest
@pytest.mark.parametrize(
"text,norms,lemmas",
[("ім.", ["імені"], ["ім'я"]), ("проф.", ["професор"], ["професор"])],
)
def test_uk_tokenizer_abbrev_exceptions(uk_tokenizer, text, norms, lemmas):
tokens = uk_tokenizer(text)
assert len(tokens) == 1
assert [token.norm_ for token in tok... | 334 | 26.916667 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/ur/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ur/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["ہےں۔", "کیا۔"])
def test_contractions(ur_tokenizer, text):
"""Test specific Urdu punctuation character"""
tokens = ur_tokenizer(text)
assert len(tokens) == 2
| 221 | 23.666667 | 50 | py |
spaCy | spaCy-master/spacy/tests/lang/ur/test_text.py | import pytest
def test_ur_tokenizer_handles_long_text(ur_tokenizer):
text = """اصل میں، رسوا ہونے کی ہمیں کچھ عادت سی ہو گئی ہے۔"""
tokens = ur_tokenizer(text)
assert len(tokens) == 14
@pytest.mark.parametrize("text,length", [("تحریر باسط حبیب", 3), ("میرا پاکستان", 2)])
def test_ur_tokenizer_handles_cn... | 417 | 28.857143 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/vi/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/vi/test_serialize.py | import pickle
from spacy.lang.vi import Vietnamese
from ...util import make_tempdir
def test_vi_tokenizer_serialize(vi_tokenizer):
tokenizer_bytes = vi_tokenizer.to_bytes()
nlp = Vietnamese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.... | 1,309 | 29.465116 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/vi/test_tokenizer.py | import pytest
from spacy.lang.vi import Vietnamese
from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS
# fmt: off
TOKENIZER_TESTS = [
("Đây là một văn bản bằng tiếng Việt Sau đó, đây là một văn bản khác bằng ngôn ngữ này", ['Đây', 'là', 'một', 'văn bản', 'bằng', 'tiếng', 'Việt', 'Sau', 'đó', ',', 'đâ... | 1,584 | 32.020833 | 237 | py |
spaCy | spaCy-master/spacy/tests/lang/xx/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/xx/test_text.py | import pytest
def test_long_text(xx_tokenizer):
# Excerpt: Text in Skolt Sami taken from https://www.samediggi.fi
text = """
Säʹmmla lie Euroopp unioon oʹdinakai alggmeer. Säʹmmlai alggmeerstatus lij raʹvvjum Lääʹddjânnam vuâđđlääʹjjest.
Alggmeer kriteeʹr vuâđđâʹvve meeraikõskksaž tuâjjorganisaatio, ILO, su... | 1,482 | 58.32 | 127 | py |
spaCy | spaCy-master/spacy/tests/lang/xx/test_tokenizer.py | import pytest
XX_BASIC_TOKENIZATION_TESTS = [
(
"Lääʹddjânnmest lie nuʹtt 10 000 säʹmmliʹžžed. Seeʹst pâʹjjel",
[
"Lääʹddjânnmest",
"lie",
"nuʹtt",
"10",
"000",
"säʹmmliʹžžed",
".",
"Seeʹst",
... | 643 | 23.769231 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/yo/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/yo/test_text.py | import pytest
from spacy.lang.yo.lex_attrs import like_num
def test_yo_tokenizer_handles_long_text(yo_tokenizer):
text = """Àwọn ọmọ ìlú tí wọ́n ń ṣàmúlò ayélujára ti bẹ̀rẹ̀ ìkọkúkọ sórí àwòrán ààrẹ Nkurunziza nínú ìfẹ̀hónúhàn pẹ̀lú àmì ìdámọ̀: Nkurunziza àti Burundi:
Ọmọ ilé ẹ̀kọ́ gíga ní ẹ̀wọ̀n fún kí... | 1,244 | 39.16129 | 159 | py |
spaCy | spaCy-master/spacy/tests/lang/zh/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/zh/test_serialize.py | import pytest
from spacy.lang.zh import Chinese
from ...util import make_tempdir
def zh_tokenizer_serialize(zh_tokenizer):
tokenizer_bytes = zh_tokenizer.to_bytes()
nlp = Chinese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
with make_tempdir() a... | 1,247 | 25 | 76 | py |
spaCy | spaCy-master/spacy/tests/lang/zh/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("999.0", True),
("一", True),
("二", True),
("〇", True),
("十一", True),
("狗", False),
(",", False),
],
)
def test_lex_attrs_like_number(zh_tokenizer_jieb... | 442 | 19.136364 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/zh/test_tokenizer.py | import pytest
from thinc.api import ConfigValidationError
from spacy.lang.zh import Chinese, _get_pkuseg_trie_data
# fmt: off
TEXTS = ("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",)
JIEBA_TOKENIZER_TESTS = [
(TEXTS[0],
['作为', '语言', '而言', ',', '为', '世界', '使用', '人', '数最多',
'的', '语言', ',', '目前', '世界', '有'... | 2,603 | 31.962025 | 87 | py |
spaCy | spaCy-master/spacy/tests/matcher/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/matcher/test_dependency_matcher.py | import copy
import pickle
import re
import pytest
from mock import Mock
from spacy.matcher import DependencyMatcher
from spacy.tokens import Doc, Token
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def doc(en_vocab):
words = ["The", "quick", "brown", "fox", "jumped", "over", ... | 14,872 | 29.415133 | 88 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_levenshtein.py | import pytest
from spacy.matcher import levenshtein
from spacy.matcher.levenshtein import levenshtein_compare
# empty string plus 10 random ASCII, 10 random unicode, and 2 random long tests
# from polyleven
@pytest.mark.parametrize(
"dist,a,b",
[
(0, "", ""),
(4, "bbcb", "caba"),
(3, ... | 2,662 | 34.506667 | 263 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_matcher_api.py | import pytest
from mock import Mock
from spacy.matcher import Matcher
from spacy.tokens import Doc, Span, Token
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def matcher(en_vocab):
rules = {
"JS": [[{"ORTH": "JavaScript"}]],
"GoogleNow": [[{"ORTH": "Google"}, {... | 29,890 | 31.847253 | 88 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_matcher_logic.py | import re
import pytest
from spacy.attrs import IS_PUNCT, LOWER, ORTH
from spacy.errors import MatchPatternError
from spacy.lang.en import English
from spacy.lang.lex_attrs import LEX_ATTRS
from spacy.matcher import Matcher
from spacy.tokens import Doc, Span, Token
from spacy.vocab import Vocab
pattern1 = [{"ORTH": ... | 27,183 | 33.453739 | 87 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_pattern_validation.py | import pytest
from spacy.errors import MatchPatternError
from spacy.matcher import Matcher
from spacy.schemas import validate_token_pattern
# (pattern, num errors with validation, num errors identified with minimal
# checks)
TEST_PATTERNS = [
# Bad patterns flagged in all cases
([{"XX": "foo"}], 1, 1),
(... | 3,367 | 37.712644 | 87 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_phrase_matcher.py | import warnings
import pytest
import srsly
from mock import Mock
from spacy.lang.en import English
from spacy.matcher import Matcher, PhraseMatcher
from spacy.tokens import Doc, Span
from spacy.vocab import Vocab
from ..util import make_tempdir
@pytest.mark.issue(3248)
def test_issue3248_1():
"""Test that the ... | 17,967 | 34.231373 | 107 | py |
spaCy | spaCy-master/spacy/tests/morphology/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/morphology/test_morph_converters.py | from spacy.morphology import Morphology
def test_feats_converters():
feats = "Case=dat,gen|Number=sing"
feats_dict = {"Case": "dat,gen", "Number": "sing"}
# simple conversions
assert Morphology.dict_to_feats(feats_dict) == feats
assert Morphology.feats_to_dict(feats) == feats_dict
# roundtri... | 856 | 37.954545 | 87 | py |
spaCy | spaCy-master/spacy/tests/morphology/test_morph_features.py | import pytest
from spacy.morphology import Morphology
from spacy.strings import StringStore, get_string_id
@pytest.fixture
def morphology():
return Morphology(StringStore())
def test_init(morphology):
pass
def test_add_morphology_with_string_names(morphology):
morphology.add({"Case": "gen", "Number":... | 1,349 | 25.470588 | 77 | py |
spaCy | spaCy-master/spacy/tests/morphology/test_morph_pickle.py | import pickle
import pytest
from spacy.morphology import Morphology
from spacy.strings import StringStore
@pytest.fixture
def morphology():
morphology = Morphology(StringStore())
morphology.add("Feat1=Val1|Feat2=Val2")
morphology.add("Feat3=Val3|Feat4=Val4")
return morphology
def test_morphology_p... | 670 | 26.958333 | 79 | py |
spaCy | spaCy-master/spacy/tests/package/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/package/test_requirements.py | import re
from pathlib import Path
def test_build_dependencies():
# Check that library requirements are pinned exactly the same across different setup files.
# TODO: correct checks for numpy rather than ignoring
libs_ignore_requirements = [
"pytest",
"pytest-timeout",
"mock",
... | 3,208 | 32.778947 | 95 | py |
spaCy | spaCy-master/spacy/tests/parser/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/parser/test_add_label.py | import pytest
from thinc.api import Adam, fix_random_seed
from spacy import registry
from spacy.attrs import NORM
from spacy.language import Language
from spacy.pipeline import DependencyParser, EntityRecognizer
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.pipeline.ner import DEFAULT_NER_MODEL... | 4,920 | 29.75625 | 81 | py |
spaCy | spaCy-master/spacy/tests/parser/test_arc_eager_oracle.py | import pytest
from spacy import registry
from spacy.pipeline import DependencyParser
from spacy.pipeline._parser_internals.arc_eager import ArcEager
from spacy.pipeline._parser_internals.nonproj import projectivize
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.tokens import Doc
from spacy.train... | 10,086 | 33.19322 | 88 | py |
spaCy | spaCy-master/spacy/tests/parser/test_ner.py | import logging
import random
import pytest
from numpy.testing import assert_equal
from spacy import registry, util
from spacy.attrs import ENT_IOB
from spacy.lang.en import English
from spacy.lang.it import Italian
from spacy.language import Language
from spacy.lookups import Lookups
from spacy.pipeline import Entity... | 29,176 | 34.195416 | 469 | py |
spaCy | spaCy-master/spacy/tests/parser/test_neural_parser.py | import pytest
from thinc.api import Model
from spacy import registry
from spacy.pipeline._parser_internals.arc_eager import ArcEager
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.pipeline.transition_parser import Parser
from spacy.tokens.... | 2,859 | 24.765766 | 81 | py |
spaCy | spaCy-master/spacy/tests/parser/test_nn_beam.py | import hypothesis
import hypothesis.strategies
import numpy
import pytest
from thinc.tests.strategies import ndarrays_of_shape
from spacy.language import Language
from spacy.pipeline._parser_internals._beam_utils import BeamBatch
from spacy.pipeline._parser_internals.arc_eager import ArcEager
from spacy.pipeline._pars... | 3,601 | 24.013889 | 84 | py |
spaCy | spaCy-master/spacy/tests/parser/test_nonproj.py | import pytest
from spacy.pipeline._parser_internals import nonproj
from spacy.pipeline._parser_internals.nonproj import (
ancestors,
contains_cycle,
is_nonproj_arc,
is_nonproj_tree,
)
from spacy.tokens import Doc
@pytest.fixture
def tree():
return [1, 2, 2, 4, 5, 2, 2]
@pytest.fixture
def cycli... | 6,259 | 39.387097 | 137 | py |
spaCy | spaCy-master/spacy/tests/parser/test_parse.py | import pytest
from numpy.testing import assert_equal
from thinc.api import Adam
from spacy import registry, util
from spacy.attrs import DEP, NORM
from spacy.lang.en import English
from spacy.pipeline import DependencyParser
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.pipeline.tok2vec import ... | 20,155 | 35.983486 | 338 | py |
spaCy | spaCy-master/spacy/tests/parser/test_parse_navigate.py | import pytest
from spacy.tokens import Doc
@pytest.fixture
def words():
# fmt: off
return [
"\n", "It", "was", "a", "bright", "cold", "day", "in", "April", ",",
"and", "the", "clocks", "were", "striking", "thirteen", ".", "\n",
"Winston", "Smith", ",", "his", "chin", "nuzzled", "into"... | 6,216 | 47.952756 | 80 | py |
spaCy | spaCy-master/spacy/tests/parser/test_preset_sbd.py | import pytest
from thinc.api import Adam
from spacy import registry
from spacy.attrs import NORM
from spacy.pipeline import DependencyParser
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.tokens import Doc
from spacy.training import Example
from spacy.vocab import Vocab
@pytest.fixture
def voc... | 2,561 | 27.786517 | 82 | py |
spaCy | spaCy-master/spacy/tests/parser/test_space_attachment.py | import pytest
from spacy.tokens import Doc
from ..util import apply_transition_sequence
def test_parser_space_attachment(en_vocab):
# fmt: off
words = ["This", "is", "a", "test", ".", "\n", "To", "ensure", " ", "spaces", "are", "attached", "well", "."]
heads = [1, 1, 3, 1, 1, 4, 7, 11, 7, 11, 11, 11, 11... | 2,928 | 35.6125 | 157 | py |
spaCy | spaCy-master/spacy/tests/parser/test_state.py | import pytest
from spacy.pipeline._parser_internals.stateclass import StateClass
from spacy.tokens.doc import Doc
from spacy.vocab import Vocab
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def doc(vocab):
return Doc(vocab, words=["a", "b", "c", "d"])
def test_init_state(doc):
state = S... | 1,894 | 22.6875 | 66 | py |
spaCy | spaCy-master/spacy/tests/pipeline/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/pipeline/test_analysis.py | import pytest
from mock import Mock
from spacy.language import Language
from spacy.pipe_analysis import get_attr_info, validate_attrs
def test_component_decorator_assigns():
@Language.component("c1", assigns=["token.tag", "doc.tensor"])
def test_component1(doc):
return doc
@Language.component(
... | 3,457 | 28.810345 | 88 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_annotates_on_update.py | from typing import Callable, Iterable, Iterator
import pytest
from thinc.api import Config
from spacy.lang.en import English
from spacy.language import Language
from spacy.training import Example
from spacy.training.loop import train
from spacy.util import load_model_from_config, registry
@pytest.fixture
def config... | 3,193 | 27.017544 | 79 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_attributeruler.py | import numpy
import pytest
from spacy import registry, util
from spacy.lang.en import English
from spacy.pipeline import AttributeRuler
from spacy.tokens import Doc
from spacy.training import Example
from ..util import make_tempdir
@pytest.fixture
def nlp():
return English()
@pytest.fixture
def pattern_dicts(... | 9,536 | 31.328814 | 88 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_edit_tree_lemmatizer.py | import pickle
import hypothesis.strategies as st
import pytest
from hypothesis import given
from spacy import util
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline._edit_tree_internals.edit_trees import EditTrees
from spacy.strings import StringStore
from spacy.training import... | 10,449 | 30.287425 | 90 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_entity_linker.py | from typing import Any, Callable, Dict, Iterable, Tuple
import pytest
from numpy.testing import assert_equal
from spacy import Language, registry, util
from spacy.attrs import ENT_KB_ID
from spacy.compat import pickle
from spacy.kb import Candidate, InMemoryLookupKB, KnowledgeBase, get_candidates
from spacy.lang.en i... | 45,209 | 36.363636 | 115 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_entity_ruler.py | import pytest
from thinc.api import NumpyOps, get_current_ops
from spacy import registry
from spacy.errors import MatchPatternError
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline import EntityRecognizer, EntityRuler, SpanRuler, merge_entities
from spacy.pipeline.ner import DE... | 25,896 | 36.916545 | 111 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_functions.py | import pytest
from spacy.language import Language
from spacy.pipeline.functions import merge_subtokens
from spacy.tokens import Doc, Span
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def doc(en_vocab):
# fmt: off
words = ["This", "is", "a", "sentence", ".", "This", "is", ... | 3,189 | 29.970874 | 125 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_initialize.py | import pytest
from pydantic import StrictBool
from thinc.api import ConfigValidationError
from spacy.lang.en import English
from spacy.language import Language
from spacy.training import Example
def test_initialize_arguments():
name = "test_initialize_arguments"
class CustomTokenizer:
def __init__(s... | 2,351 | 32.126761 | 78 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_lemmatizer.py | import pickle
import pytest
from spacy import registry, util
from spacy.lang.en import English
from spacy.lookups import Lookups
from ..util import make_tempdir
@pytest.fixture
def nlp():
@registry.misc("cope_lookups")
def cope_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup"... | 3,727 | 31.137931 | 76 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_models.py | from typing import List
import numpy
import pytest
from numpy.testing import assert_almost_equal
from thinc.api import Model, data_validation, get_current_ops
from thinc.types import Array2d, Ragged
from spacy.lang.en import English
from spacy.ml import FeatureExtractor, StaticVectors
from spacy.ml._character_embed i... | 3,842 | 34.256881 | 87 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_morphologizer.py | import pytest
from numpy.testing import assert_almost_equal, assert_equal
from thinc.api import get_current_ops
from spacy import util
from spacy.attrs import MORPH
from spacy.lang.en import English
from spacy.language import Language
from spacy.morphology import Morphology
from spacy.tests.util import make_tempdir
fr... | 8,094 | 34.660793 | 102 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_pipe_factories.py | import pytest
from pydantic import StrictInt, StrictStr
from thinc.api import ConfigValidationError, Linear, Model
import spacy
from spacy.lang.de import German
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.tokens import Doc
fr... | 19,895 | 32.270903 | 88 | py |