Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/tests/lang/eu/test_text.py | import pytest
def test_eu_tokenizer_handles_long_text(eu_tokenizer):
text = """ta nere guitarra estrenatu ondoren"""
tokens = eu_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize(
"text,length",
[
("milesker ederra joan zen hitzaldia plazer hutsa", 7),
("astelehen ... | 488 | 23.45 | 63 | py |
spaCy | spaCy-master/spacy/tests/lang/fa/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/fa/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_fa(fa_tokenizer):
"""Test that noun_chunks raises Value Error for 'fa' language if Doc is not parsed."""
doc = fa_tokenizer("این یک جمله نمونه می باشد.")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 276 | 26.7 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/fi/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/fi/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
FI_NP_TEST_EXAMPLES = [
(
"Kaksi tyttöä potkii punaista palloa",
["NUM", "NOUN", "VERB", "ADJ", "NOUN"],
["nummod", "nsubj", "ROOT", "amod", "obj"],
[1, 1, 0, 1, -2],
["Kaksi tyttöä", "punaista palloa"],
),
(
"Eritt... | 7,179 | 36.789474 | 96 | py |
spaCy | spaCy-master/spacy/tests/lang/fi/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10000", True),
("10,00", True),
("-999,0", True),
("yksi", True),
("kolmetoista", True),
("viisikymmentä", True),
("tuhat", True),
("1/2", True),... | 541 | 20.68 | 61 | py |
spaCy | spaCy-master/spacy/tests/lang/fi/test_tokenizer.py | import pytest
ABBREVIATION_TESTS = [
(
"Hyvää uutta vuotta t. siht. Niemelä!",
["Hyvää", "uutta", "vuotta", "t.", "siht.", "Niemelä", "!"],
),
("Paino on n. 2.2 kg", ["Paino", "on", "n.", "2.2", "kg"]),
(
"Vuonna 1 eaa. tapahtui kauheita.",
["Vuonna", "1", "eaa.", "tapah... | 2,875 | 32.835294 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/fr/test_exceptions.py | import pytest
@pytest.mark.parametrize(
"text",
[
"aujourd'hui",
"Aujourd'hui",
"prud'hommes",
"prud’hommal",
"audio-numérique",
"Audio-numérique",
"entr'amis",
"entr'abat",
"rentr'ouvertes",
"grand'hamien",
"Châteauneuf-l... | 2,198 | 25.493976 | 78 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
# fmt: off
@pytest.mark.parametrize(
"words,heads,deps,pos,chunk_offsets",
[
# determiner + noun
# un nom -> un nom
(
["un", "nom"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[(0, 2)... | 8,316 | 34.849138 | 149 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/test_prefix_suffix_infix.py | import pytest
from spacy.lang.char_classes import ALPHA
from spacy.lang.punctuation import TOKENIZER_INFIXES
from spacy.language import BaseDefaults, Language
@pytest.mark.issue(768)
@pytest.mark.parametrize(
"text,expected_tokens", [("l'avion", ["l'", "avion"]), ("j'ai", ["j'", "ai"])]
)
def test_issue768(text,... | 773 | 31.25 | 82 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/test_text.py | import pytest
from spacy.lang.fr.lex_attrs import like_num
def test_tokenizer_handles_long_text(fr_tokenizer):
text = """L'histoire du TAL commence dans les années 1950, bien que l'on puisse \
trouver des travaux antérieurs. En 1950, Alan Turing éditait un article \
célèbre sous le titre « Computing machinery an... | 990 | 40.291667 | 85 | py |
spaCy | spaCy-master/spacy/tests/lang/ga/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ga/test_tokenizer.py | import pytest
# fmt: off
GA_TOKEN_EXCEPTION_TESTS = [
("Niall Ó Domhnaill, Rialtas na hÉireann 1977 (lch. 600).", ["Niall", "Ó", "Domhnaill", ",", "Rialtas", "na", "hÉireann", "1977", "(", "lch.", "600", ")", "."]),
("Daoine a bhfuil Gaeilge acu, m.sh. tusa agus mise", ["Daoine", "a", "bhfuil", "Gaeilge", "acu... | 680 | 41.5625 | 165 | py |
spaCy | spaCy-master/spacy/tests/lang/grc/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/grc/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("ι", True),
("α", True),
("ϟα", True),
("ἑκατόν", True),
("ἐνακόσια", True),
("δισχίλια", True),
("μύρια", True),
("εἷς", True),
("λόγος", False),
(",", False),
("λβ... | 499 | 19.833333 | 59 | py |
spaCy | spaCy-master/spacy/tests/lang/grc/test_tokenizer.py | import pytest
# fmt: off
GRC_TOKEN_EXCEPTION_TESTS = [
("τὸ 〈τῆς〉 φιλοσοφίας ἔργον ἔνιοί φασιν ἀπὸ ⟦βαρβάρων⟧ ἄρξαι.", ["τὸ", "〈", "τῆς", "〉", "φιλοσοφίας", "ἔργον", "ἔνιοί", "φασιν", "ἀπὸ", "⟦", "βαρβάρων", "⟧", "ἄρξαι", "."]),
("τὴν δὲ τῶν Αἰγυπτίων φιλοσοφίαν εἶναι τοιαύτην περί τε †θεῶν† καὶ ὑπὲρ δικαιοσύν... | 934 | 50.944444 | 229 | py |
spaCy | spaCy-master/spacy/tests/lang/gu/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/gu/test_text.py | import pytest
def test_gu_tokenizer_handlers_long_text(gu_tokenizer):
text = """પશ્ચિમ ભારતમાં આવેલું ગુજરાત રાજ્ય જે વ્યક્તિઓની માતૃભૂમિ છે"""
tokens = gu_tokenizer(text)
assert len(tokens) == 9
@pytest.mark.parametrize(
"text,length",
[("ગુજરાતીઓ ખાવાના શોખીન માનવામાં આવે છે", 6), ("ખેતરની ખેડ... | 475 | 27 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/he/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/he/test_tokenizer.py | import pytest
from spacy.lang.he.lex_attrs import like_num
@pytest.mark.parametrize(
"text,expected_tokens",
[("פייתון היא שפת תכנות דינמית", ["פייתון", "היא", "שפת", "תכנות", "דינמית"])],
)
def test_he_tokenizer_handles_abbreviation(he_tokenizer, text, expected_tokens):
tokens = he_tokenizer(text)
t... | 1,947 | 26.43662 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/hi/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/hi/test_lex_attrs.py | import pytest
from spacy.lang.hi.lex_attrs import like_num, norm
def test_hi_tokenizer_handles_long_text(hi_tokenizer):
text = """
ये कहानी 1900 के दशक की है। कौशल्या (स्मिता जयकर) को पता चलता है कि उसका
छोटा बेटा, देवदास (शाहरुख खान) वापस घर आ रहा है। देवदास 10 साल पहले कानून की
पढ़ाई करने के लिए इंग्लैंड गया थ... | 1,138 | 24.311111 | 84 | py |
spaCy | spaCy-master/spacy/tests/lang/hi/test_text.py | import pytest
from spacy.lang.hi import Hindi
@pytest.mark.issue(3625)
def test_issue3625():
"""Test that default punctuation rules applies to hindi unicode characters"""
nlp = Hindi()
doc = nlp("hi. how हुए. होटल, होटल")
expected = ["hi", ".", "how", "हुए", ".", "होटल", ",", "होटल"]
assert [toke... | 357 | 26.538462 | 81 | py |
spaCy | spaCy-master/spacy/tests/lang/hr/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/hr/test_text.py | import pytest
def test_long_text(hr_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
uzimajući u obzir da ta deklaracija nastoji osigurati opće i djelotvorno
priznanje i poštovanje u njoj proglašenih prava;
uzimajući u obzir da je cilj Vijeća Europe postizanje većeg jedinstva
njegovih čla... | 936 | 33.703704 | 72 | py |
spaCy | spaCy-master/spacy/tests/lang/hr/test_tokenizer.py | import pytest
HR_BASIC_TOKENIZATION_TESTS = [
(
"Nitko se ne smije podvrgnuti mučenju ni nečovječnom ili "
"ponižavajućem postupanju ili kazni.",
[
"Nitko",
"se",
"ne",
"smije",
"podvrgnuti",
"mučenju",
"ni"... | 791 | 23.75 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/hsb/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/hsb/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("jedne", True),
("dwanaće", True),
("milion", True),
("sto", True),
("załožene", False),
("wona", False),
... | 562 | 20.653846 | 59 | py |
spaCy | spaCy-master/spacy/tests/lang/hsb/test_tokenizer.py | import pytest
HSB_BASIC_TOKENIZATION_TESTS = [
(
"Hornjoserbšćina wobsteji resp. wobsteješe z wjacorych dialektow, kotrež so zdźěla chětro wot so rozeznawachu.",
[
"Hornjoserbšćina",
"wobsteji",
"resp.",
"wobsteješe",
"z",
"wja... | 852 | 24.848485 | 120 | py |
spaCy | spaCy-master/spacy/tests/lang/hu/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/hu/test_tokenizer.py | import pytest
DEFAULT_TESTS = [
("N. kormányzósági\nszékhely.", ["N.", "kormányzósági", "székhely", "."]),
pytest.param(
"A .hu egy tld.", ["A", ".hu", "egy", "tld", "."], marks=pytest.mark.xfail()
),
("Az egy.ketto pelda.", ["Az", "egy.ketto", "pelda", "."]),
("A pl. rovidites.", ["A", "pl... | 14,261 | 43.56875 | 109 | py |
spaCy | spaCy-master/spacy/tests/lang/hy/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/hy/test_text.py | import pytest
from spacy.lang.hy.lex_attrs import like_num
@pytest.mark.parametrize("word", ["հիսուն"])
def test_hy_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 205 | 19.6 | 44 | py |
spaCy | spaCy-master/spacy/tests/lang/hy/test_tokenizer.py | import pytest
# TODO add test cases with valid punctuation signs.
hy_tokenize_text_test = [
(
"Մետաղագիտությունը պայմանականորեն բաժանվում է տեսականի և կիրառականի (տեխնիկական)",
[
"Մետաղագիտությունը",
"պայմանականորեն",
"բաժանվում",
"է",
"տ... | 1,102 | 23.511111 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/id/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/id/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_id(id_tokenizer):
"""Test that noun_chunks raises Value Error for 'id' language if Doc is not parsed."""
doc = id_tokenizer("sebelas")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 256 | 27.555556 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/id/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_no_special(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Ma'arif"])
def test_id_tokenizer_splits_no_punct(id_tokenizer, text):
tokens = id_tokenizer(te... | 3,492 | 30.1875 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/id/test_text.py | import pytest
from spacy.lang.id.lex_attrs import like_num
@pytest.mark.parametrize("word", ["sebelas"])
def test_id_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 206 | 19.7 | 45 | py |
spaCy | spaCy-master/spacy/tests/lang/is/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/is/test_text.py | import pytest
def test_long_text(is_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
hafa í huga, að yfirlýsing þessi hefur það markmið að tryggja
almenna og raunhæfa viðurkenningu og vernd þeirra réttinda,
sem þar er lýst;
hafa í huga, að markmið Evrópuráðs er að koma á nánari einingu
að... | 920 | 33.111111 | 67 | py |
spaCy | spaCy-master/spacy/tests/lang/is/test_tokenizer.py | import pytest
IS_BASIC_TOKENIZATION_TESTS = [
(
"Enginn maður skal sæta pyndingum eða ómannlegri eða "
"vanvirðandi meðferð eða refsingu. ",
[
"Enginn",
"maður",
"skal",
"sæta",
"pyndingum",
"eða",
"ómannleg... | 765 | 23.709677 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/it/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/it/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
# fmt: off
@pytest.mark.parametrize(
"words,heads,deps,pos,chunk_offsets",
[
# determiner + noun
# un pollo -> un pollo
(
["un", "pollo"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[... | 8,630 | 37.704036 | 220 | py |
spaCy | spaCy-master/spacy/tests/lang/it/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize(
"text,expected_tokens", [("c'è", ["c'", "è"]), ("l'ha", ["l'", "ha"])]
)
def test_contractions(it_tokenizer, text, expected_tokens):
"""Test that the contractions are split into two tokens"""
tokens = it_tokenizer(text)
assert len(tokens) == 2
assert [t.text... | 357 | 28.833333 | 74 | py |
spaCy | spaCy-master/spacy/tests/lang/it/test_stopwords.py | import pytest
@pytest.mark.parametrize(
"word", ["un", "lo", "dell", "dall", "si", "ti", "mi", "quest", "quel", "quello"]
)
def test_stopwords_basic(it_tokenizer, word):
tok = it_tokenizer(word)[0]
assert tok.is_stop
@pytest.mark.parametrize(
"word", ["quest'uomo", "l'ho", "un'amica", "dell'olio", "... | 449 | 24 | 85 | py |
spaCy | spaCy-master/spacy/tests/lang/it/test_text.py | import pytest
@pytest.mark.issue(2822)
def test_issue2822(it_tokenizer):
"""Test that the abbreviation of poco is kept as one word."""
doc = it_tokenizer("Vuoi un po' di zucchero?")
assert len(doc) == 6
assert doc[0].text == "Vuoi"
assert doc[1].text == "un"
assert doc[2].text == "po'"
ass... | 411 | 26.466667 | 65 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ja/test_lemmatization.py | import pytest
@pytest.mark.parametrize(
"word,lemma",
[("新しく", "新しい"), ("赤く", "赤い"), ("すごく", "すごい"), ("いただきました", "いただく"), ("なった", "なる")],
)
def test_ja_lemmatizer_assigns(ja_tokenizer, word, lemma):
test_lemma = ja_tokenizer(word)[0].lemma_
assert test_lemma == lemma
@pytest.mark.parametrize(
"w... | 578 | 22.16 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/test_morphologizer_factory.py | import pytest
from spacy.lang.ja import Japanese
def test_ja_morphologizer_factory():
pytest.importorskip("sudachipy")
nlp = Japanese()
morphologizer = nlp.add_pipe("morphologizer")
assert morphologizer.cfg["extend"] is True
| 244 | 21.272727 | 49 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/test_serialize.py | import pickle
from spacy.lang.ja import Japanese
from ...util import make_tempdir
def test_ja_tokenizer_serialize(ja_tokenizer):
tokenizer_bytes = ja_tokenizer.to_bytes()
nlp = Japanese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.toke... | 1,307 | 29.418605 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/test_tokenizer.py | import pytest
from spacy.lang.ja import DetailedToken, Japanese
from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS
# fmt: off
TOKENIZER_TESTS = [
("日本語だよ", ['日本', '語', 'だ', 'よ']),
("東京タワーの近くに住んでいます。", ['東京', 'タワー', 'の', '近く', 'に', '住ん', 'で', 'い', 'ます', '。']),
("吾輩は猫である。", ['吾輩', 'は', '猫', 'で',... | 6,443 | 37.130178 | 144 | py |
spaCy | spaCy-master/spacy/tests/lang/ko/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ko/test_lemmatization.py | import pytest
@pytest.mark.parametrize(
"word,lemma", [("새로운", "새롭"), ("빨간", "빨갛"), ("클수록", "크"), ("뭡니까", "뭣"), ("됐다", "되")]
)
def test_ko_lemmatizer_assigns(ko_tokenizer, word, lemma):
test_lemma = ko_tokenizer(word)[0].lemma_
assert test_lemma == lemma
| 269 | 26 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/ko/test_serialize.py | import pickle
from spacy.lang.ko import Korean
from ...util import make_tempdir
def test_ko_tokenizer_serialize(ko_tokenizer):
tokenizer_bytes = ko_tokenizer.to_bytes()
nlp = Korean()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
with make_tempdir(... | 713 | 26.461538 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/ko/test_tokenizer.py | import pytest
# fmt: off
TOKENIZER_TESTS = [("서울 타워 근처에 살고 있습니다.", "서울 타워 근처 에 살 고 있 습니다 ."),
("영등포구에 있는 맛집 좀 알려주세요.", "영등포구 에 있 는 맛집 좀 알려 주 세요 ."),
("10$ 할인코드를 적용할까요?", "10 $ 할인 코드 를 적용 할까요 ?")]
TAG_TESTS = [("서울 타워 근처에 살고 있습니다.",
"NNP NNG NNG JKB VV EC VX EF SF"),... | 2,439 | 31.105263 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/ky/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ky/test_tokenizer.py | import pytest
INFIX_HYPHEN_TESTS = [
("Бала-чака жакшыбы?", "Бала-чака жакшыбы ?".split()),
("Кыз-келиндер кийими.", "Кыз-келиндер кийими .".split()),
]
PUNC_INSIDE_WORDS_TESTS = [
(
"Пассажир саны - 2,13 млн — киши/күнүнө (2010), 783,9 млн. киши/жылына.",
"Пассажир саны - 2,13 млн — киши ... | 2,969 | 33.534884 | 84 | py |
spaCy | spaCy-master/spacy/tests/lang/la/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/la/test_exception.py | import pytest
def test_la_tokenizer_handles_exc_in_text(la_tokenizer):
text = "scio te omnia facturum, ut nobiscum quam primum sis"
tokens = la_tokenizer(text)
assert len(tokens) == 11
assert tokens[6].text == "nobis"
| 236 | 25.333333 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/la/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
def test_noun_chunks_is_parsed(la_tokenizer):
"""Test that noun_chunks raises Value Error for 'la' language if Doc is not parsed.
To check this test, we're constructing a Doc
with a new Vocab here and forcing is_parsed to 'False'
to make sure the noun chunks... | 1,628 | 29.166667 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/la/test_text.py | import pytest
from spacy.lang.la.lex_attrs import like_num
@pytest.mark.parametrize(
"text,match",
[
("IIII", True),
("VI", True),
("vi", True),
("IV", True),
("iv", True),
("IX", True),
("ix", True),
("MMXXII", True),
("0", True),
... | 804 | 20.756757 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/lb/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/lb/test_exceptions.py | import pytest
@pytest.mark.parametrize("text", ["z.B.", "Jan."])
def test_lb_tokenizer_handles_abbr(lb_tokenizer, text):
tokens = lb_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["d'Saach", "d'Kanner", "d’Welt", "d’Suen"])
def test_lb_tokenizer_splits_contractions(lb_tokenizer, t... | 586 | 26.952381 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/lb/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text,length", [("z.B.", 1), ("zb.", 2), ("(z.B.", 2)])
def test_lb_tokenizer_splits_prefix_interact(lb_tokenizer, text, length):
tokens = lb_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["z.B.)"])
def test_lb_tokenizer_splits_suffix_in... | 584 | 28.25 | 80 | py |
spaCy | spaCy-master/spacy/tests/lang/lb/test_text.py | import pytest
def test_lb_tokenizer_handles_long_text(lb_tokenizer):
text = """Den Nordwand an d'Sonn An der Zäit hunn sech den Nordwand an d'Sonn gestridden, wie vun hinnen zwee wuel méi staark wier, wéi e Wanderer, deen an ee waarme Mantel agepak war, iwwert de Wee koum. Si goufen sech eens, dass deejéinege fir... | 1,270 | 56.772727 | 714 | py |
spaCy | spaCy-master/spacy/tests/lang/lg/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/lg/test_tokenizer.py | import pytest
LG_BASIC_TOKENIZATION_TESTS = [
(
"Abooluganda ab’emmamba ababiri",
["Abooluganda", "ab’emmamba", "ababiri"],
),
]
@pytest.mark.parametrize("text,expected_tokens", LG_BASIC_TOKENIZATION_TESTS)
def test_lg_tokenizer_basic(lg_tokenizer, text, expected_tokens):
tokens = lg_toke... | 445 | 26.875 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/lt/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/lt/test_text.py | import pytest
def test_lt_tokenizer_handles_long_text(lt_tokenizer):
text = """Tokios sausros kriterijus atitinka pirmadienį atlikti skaičiavimai, palyginus faktinį ir žemiausią vidutinį daugiametį vandens lygį. Nustatyta, kad iš 48 šalies vandens matavimo stočių 28-iose stotyse vandens lygis yra žemesnis arba ly... | 1,619 | 29 | 326 | py |
spaCy | spaCy-master/spacy/tests/lang/lv/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/lv/test_text.py | import pytest
def test_long_text(lv_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
Ievērodamas, ka šī deklarācija paredz nodrošināt vispārēju un
efektīvu tajā pasludināto tiesību atzīšanu un ievērošanu;
Ievērodamas, ka Eiropas Padomes mērķis ir panākt lielāku vienotību
tās dalībvalstu s... | 951 | 33 | 67 | py |
spaCy | spaCy-master/spacy/tests/lang/lv/test_tokenizer.py | import pytest
LV_BASIC_TOKENIZATION_TESTS = [
(
"Nevienu nedrīkst spīdzināt vai cietsirdīgi vai pazemojoši ar viņu "
"apieties vai sodīt.",
[
"Nevienu",
"nedrīkst",
"spīdzināt",
"vai",
"cietsirdīgi",
"vai",
... | 764 | 23.677419 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/mk/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/mk/test_text.py | import pytest
from spacy.lang.mk.lex_attrs import like_num
def test_tokenizer_handles_long_text(mk_tokenizer):
text = """
Во организациските работи или на нашите собранија со членството, никој од нас не зборуваше за
организацијата и идеологијата. Работна беше нашата работа, а не идеолошка. Што се однесув... | 2,948 | 36.329114 | 116 | py |
spaCy | spaCy-master/spacy/tests/lang/ml/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ml/test_text.py | import pytest
def test_ml_tokenizer_handles_long_text(ml_tokenizer):
text = """അനാവശ്യമായി കണ്ണിലും മൂക്കിലും വായിലും സ്പർശിക്കാതിരിക്കുക"""
tokens = ml_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize(
"text,length",
[
(
"എന്നാൽ അച്ചടിയുടെ ആവിർഭാവം ലിപിയിൽ കാ... | 643 | 27 | 133 | py |
spaCy | spaCy-master/spacy/tests/lang/ms/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ms/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_ms(ms_tokenizer):
"""Test that noun_chunks raises Value Error for 'ms' language if Doc is not parsed."""
doc = ms_tokenizer("sebelas")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 256 | 27.555556 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/ms/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_ms_tokenizer_splits_no_special(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Ma'arif"])
def test_ms_tokenizer_splits_no_punct(id_tokenizer, text):
tokens = id_tokenizer(te... | 3,512 | 30.088496 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/ms/test_text.py | import pytest
from spacy.lang.ms.lex_attrs import like_num
@pytest.mark.parametrize("word", ["sebelas"])
def test_ms_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 206 | 19.7 | 45 | py |
spaCy | spaCy-master/spacy/tests/lang/nb/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/nb/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_nb(nb_tokenizer):
"""Test that noun_chunks raises Value Error for 'nb' language if Doc is not parsed."""
doc = nb_tokenizer("Smørsausen brukes bl.a. til")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 276 | 29.777778 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/nb/test_tokenizer.py | import pytest
NB_TOKEN_EXCEPTION_TESTS = [
(
"Smørsausen brukes bl.a. til fisk",
["Smørsausen", "brukes", "bl.a.", "til", "fisk"],
),
(
"Jeg kommer først kl. 13 pga. diverse forsinkelser",
["Jeg", "kommer", "først", "kl.", "13", "pga.", "diverse", "forsinkelser"],
),
]
... | 625 | 30.3 | 83 | py |
spaCy | spaCy-master/spacy/tests/lang/ne/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ne/test_text.py | import pytest
def test_ne_tokenizer_handlers_long_text(ne_tokenizer):
text = """मैले पाएको सर्टिफिकेटलाई म त बोक्रो सम्झन्छु र अभ्यास तब सुरु भयो, जब मैले कलेज पार गरेँ र जीवनको पढाइ सुरु गरेँ ।"""
tokens = ne_tokenizer(text)
assert len(tokens) == 24
@pytest.mark.parametrize(
"text,length", [("समय ज... | 510 | 30.9375 | 131 | py |
spaCy | spaCy-master/spacy/tests/lang/nl/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/nl/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
from spacy.util import filter_spans
@pytest.fixture
def nl_sample(nl_vocab):
# TEXT :
# Haar vriend lacht luid. We kregen alweer ruzie toen we de supermarkt ingingen.
# Aan het begin van de supermarkt is al het fruit en de groentes. Uiteindelijk hebben we dan oo... | 4,304 | 17.964758 | 112 | py |
spaCy | spaCy-master/spacy/tests/lang/nl/test_text.py | import pytest
from spacy.lang.nl.lex_attrs import like_num
@pytest.mark.parametrize("word", ["elf", "elfde"])
def test_nl_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
@pytest.mark.parametrize(
"text,num_tokens",
[
(
"De aftredende minister-presid... | 694 | 25.730769 | 125 | py |
spaCy | spaCy-master/spacy/tests/lang/pl/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/pl/test_text.py | """Words like numbers are recognized correctly."""
import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("jeden", True),
("dwa", True),
("milion", True),
("pies", False),
("... | 522 | 20.791667 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/pl/test_tokenizer.py | import pytest
DOT_TESTS = [
("tel.", ["tel", "."]),
("0 zł 99 gr", ["0", "zł", "99", "gr"]),
]
HYPHEN_TESTS = [
("cztero-", ["cztero-"]),
("jedno-", ["jedno-"]),
("dwu-", ["dwu-"]),
("trzy-", ["trzy-"]),
]
TESTCASES = DOT_TESTS + HYPHEN_TESTS
@pytest.mark.parametrize("text,expected_tokens"... | 553 | 22.083333 | 74 | py |
spaCy | spaCy-master/spacy/tests/lang/pt/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/pt/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
# fmt: off
@pytest.mark.parametrize(
"words,heads,deps,pos,chunk_offsets",
[
# determiner + noun
# um cachorro -> um cachorro
(
["um", "cachorro"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
... | 7,715 | 33.600897 | 130 | py |
spaCy | spaCy-master/spacy/tests/lang/pt/test_text.py | import pytest
from spacy.lang.pt.lex_attrs import like_num
@pytest.mark.parametrize("word", ["onze", "quadragésimo"])
def test_pt_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 219 | 21 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/ro/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ro/test_tokenizer.py | import pytest
TEST_CASES = [
(
"Adresa este str. Principală nr. 5.",
["Adresa", "este", "str.", "Principală", "nr.", "5", "."],
),
("Teste, etc.", ["Teste", ",", "etc."]),
("Lista, ș.a.m.d.", ["Lista", ",", "ș.a.m.d."]),
("Și d.p.d.v. al...", ["Și", "d.p.d.v.", "al", "..."]),
# ... | 727 | 32.090909 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/ru/__init__.py | 0 | 0 | 0 | py | |
spaCy | spaCy-master/spacy/tests/lang/ru/test_exceptions.py | import pytest
@pytest.mark.parametrize(
"text,norms",
[("пн.", ["понедельник"]), ("пт.", ["пятница"]), ("дек.", ["декабрь"])],
)
def test_ru_tokenizer_abbrev_exceptions(ru_tokenizer, text, norms):
tokens = ru_tokenizer(text)
assert len(tokens) == 1
assert [token.norm_ for token in tokens] == norms... | 321 | 25.833333 | 76 | py |
spaCy | spaCy-master/spacy/tests/lang/ru/test_lemmatizer.py | import pytest
from spacy.tokens import Doc
pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_ru_doc_lemmatization(ru_lemmatizer):
words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"]
morphs = [
"Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing",
"Aspec... | 3,676 | 32.427273 | 87 | py |