Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/lang/ar/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ar.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"نال الكاتب خالد توفيق جائزة الرواية العربية في معرض الشارقة الدولي للكتاب",
"أين تقع دمشق ؟",
"كيف حالك ؟",
"هل يمكن ان نلتقي على الساعة الثا... | 590 | 31.833333 | 89 | py |
spaCy | spaCy-master/spacy/lang/ar/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = set(
"""
صفر
واحد
إثنان
اثنان
ثلاثة
ثلاثه
أربعة
أربعه
خمسة
خمسه
ستة
سته
سبعة
سبعه
ثمانية
ثمانيه
تسعة
تسعه
ﻋﺸﺮﺓ
ﻋﺸﺮه
عشرون
عشرين
ثلاثون
ثلاثين
اربعون
اربعين
أربعون
أربعين
خمسون
خمسين
ستون
ستين
سبعون
سبعين
ثمانون
ثمانين
تسعون
تسعين
مائتين
مائتان
ثلاثمائة
خمسمائة
سبعمائة
الف... | 1,002 | 9.234694 | 49 | py |
spaCy | spaCy-master/spacy/lang/ar/punctuation.py | from ..char_classes import (
ALPHA_UPPER,
CURRENCY,
LIST_ELLIPSES,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
)
_suffixes = (
LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ [
r"(?<=[0-9])\+",
# Arabic is written from Right-To-Left
r"(?<=[0-9])(?:{c})".format(c=CURRENCY)... | 463 | 18.333333 | 54 | py |
spaCy | spaCy-master/spacy/lang/ar/stop_words.py | STOP_WORDS = set(
"""
من
نحو
لعل
بما
بين
وبين
ايضا
وبينما
تحت
مثلا
لدي
عنه
مع
هي
وهذا
واذا
هذان
انه
بينما
أمسى
وسوف
ولم
لذلك
إلى
منه
منها
كما
ظل
هنا
به
كذلك
اما
هما
بعد
بينهم
التي
أبو
اذا
بدلا
لها
أمام
يلي
حين
ضد
الذي
قد
صار
إذا
مابرح
قبل
كل
وليست
الذين
لهذا
وثي
انهم
باللتي
مافتئ
ولا
بهذه
بحيث
كيف
وله
علي
بات
لاسيم... | 1,803 | 3.613811 | 17 | py |
spaCy | spaCy-master/spacy/lang/ar/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
# Time
for exc_data in [
{NORM: "قبل الميلاد", ORTH: "ق.م"},
{NORM: "بعد الميلاد", ORTH: "ب. م"},
{NORM: "ميلادي", ORTH: ".م"},
{NORM: "هجري", ORTH: ".هـ"},
{NORM: "توفي",... | 1,289 | 25.875 | 81 | py |
spaCy | spaCy-master/spacy/lang/az/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class AzerbaijaniDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Azerbaijani(Language):
lang = "az"
Defaults = AzerbaijaniDefaults
__all__ = ["Azerb... | 329 | 18.411765 | 46 | py |
spaCy | spaCy-master/spacy/lang/az/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.az.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Bu bir cümlədir.",
"Necəsən?",
"Qarabağ ordeni vətən müharibəsində qələbə münasibəti ilə təsis edilmişdir.",
"Məktəbimizə Bakıdan bir tarix mü... | 675 | 34.578947 | 86 | py |
spaCy | spaCy-master/spacy/lang/az/lex_attrs.py | from ...attrs import LIKE_NUM
# Eleven, twelve etc. are written separate: on bir, on iki
_num_words = [
"bir",
"iki",
"üç",
"dörd",
"beş",
"altı",
"yeddi",
"səkkiz",
"doqquz",
"on",
"iyirmi",
"otuz",
"qırx",
"əlli",
"altmış",
"yetmiş",
"səksən",
... | 1,647 | 17.516854 | 66 | py |
spaCy | spaCy-master/spacy/lang/az/stop_words.py | # Source: https://github.com/eliasdabbas/advertools/blob/master/advertools/stopwords.py
STOP_WORDS = set(
"""
amma
arasında
artıq
ay
az
bax
belə
beş
bilər
bir
biraz
biri
birşey
biz
bizim
bizlər
bu
buna
bundan
bunların
bunu
bunun
buradan
bütün
bəli
bəlkə
bəy
bəzi
bəzən
daha
dedi
deyil
dir
düz
də
dək
dən
dəqiqə
edir
... | 869 | 4.958904 | 87 | py |
spaCy | spaCy-master/spacy/lang/bg/__init__.py | from ...attrs import LANG
from ...language import BaseDefaults, Language
from ...util import update_exc
from ..punctuation import (
COMBINING_DIACRITICS_TOKENIZER_INFIXES,
COMBINING_DIACRITICS_TOKENIZER_SUFFIXES,
)
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .stop_wo... | 907 | 27.375 | 76 | py |
spaCy | spaCy-master/spacy/lang/bg/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.bg.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Епъл иска да купи английски стартъп за 1 милиард долара."
"Автономните коли прехвърлят застрахователната отговорност към производителите."
"Сан Фр... | 431 | 29.857143 | 84 | py |
spaCy | spaCy-master/spacy/lang/bg/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"нула",
"едно",
"един",
"една",
"две",
"три",
"четири",
"пет",
"шест",
"седем",
"осем",
"девет",
"десет",
"единадесет",
"единайсет",
"дванадесет",
"дванайсет",
"тринадесет",
"тринайсет",
"четири... | 1,523 | 16.318182 | 49 | py |
spaCy | spaCy-master/spacy/lang/bg/stop_words.py | """
References:
https://github.com/Alir3z4/stop-words - Original list, serves as a base.
https://postvai.com/books/stop-dumi.pdf - Additions to the original list in order to improve it.
"""
STOP_WORDS = set(
"""
а автентичен аз ако ала
бе без беше би бивш бивша бившо бивши бил била били било благодаря близ... | 2,718 | 32.9875 | 100 | py |
spaCy | spaCy-master/spacy/lang/bg/tokenizer_exceptions.py | """
References:
https://slovored.com/bg/abbr/grammar/ - Additional refs for abbreviations
(countries, occupations, fields of studies and more).
"""
from ...symbols import NORM, ORTH
_exc = {}
# measurements
for abbr in [
{ORTH: "м", NORM: "метър"},
{ORTH: "мм", NORM: "милиметър"},
{ORTH: "см", NO... | 7,184 | 32.732394 | 77 | py |
spaCy | spaCy-master/spacy/lang/bn/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ...pipeline import Lemmatizer
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIO... | 1,177 | 22.56 | 82 | py |
spaCy | spaCy-master/spacy/lang/bn/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.bn.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = ["তুই খুব ভালো", "আজ আমরা ডাক্তার দেখতে যাবো", "আমি জানি না "]
| 223 | 21.4 | 74 | py |
spaCy | spaCy-master/spacy/lang/bn/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
)
_currency = r"\$¢£€¥฿৳"
_quotes = CONCAT_QUOTES.replace("'", "")
_list_punct = LIST_PUNCT + "। ॥".strip().split()
_prefixes = [r"\+"] + _list_punct... | 1,263 | 22.407407 | 76 | py |
spaCy | spaCy-master/spacy/lang/bn/stop_words.py | STOP_WORDS = set(
"""
অতএব অথচ অথবা অনুযায়ী অনেক অনেকে অনেকেই অন্তত অবধি অবশ্য অর্থাৎ অন্য অনুযায়ী অর্ধভাগে
আগামী আগে আগেই আছে আজ আদ্যভাগে আপনার আপনি আবার আমরা আমাকে আমাদের আমার আমি আর আরও
ইত্যাদি ইহা
উচিত উনি উপর উপরে উত্তর
এ এঁদের এঁরা এই এক একই একজন একটা একটি একবার একে এখন এখনও এখানে এখানেই এটা এসো
এটাই এটি ... | 2,384 | 54.465116 | 132 | py |
spaCy | spaCy-master/spacy/lang/bn/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for exc_data in [
{ORTH: "ডঃ", NORM: "ডক্টর"},
{ORTH: "ডাঃ", NORM: "ডাক্তার"},
{ORTH: "ড.", NORM: "ডক্টর"},
{ORTH: "ডা.", NORM: "ডাক্তার"},
{ORTH: "মোঃ", NORM: "মোহাম্মদ"}... | 704 | 26.115385 | 56 | py |
spaCy | spaCy-master/spacy/lang/ca/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import CatalanLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax... | 1,344 | 23.907407 | 82 | py |
spaCy | spaCy-master/spacy/lang/ca/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ca.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple està buscant comprar una startup del Regne Unit per mil milions de dòlars",
"Els cotxes autònoms deleguen la responsabilitat de l'assegurança a... | 588 | 30 | 91 | py |
spaCy | spaCy-master/spacy/lang/ca/lemmatizer.py | from typing import List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class CatalanLemmatizer(Lemmatizer):
"""
Copied from French Lemmatizer
Catalan language lemmatizer applies the default rule based lemmatization
procedure with some modifications for better Catalan language s... | 2,843 | 33.682927 | 82 | py |
spaCy | spaCy-master/spacy/lang/ca/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"zero",
"un",
"dos",
"tres",
"quatre",
"cinc",
"sis",
"set",
"vuit",
"nou",
"deu",
"onze",
"dotze",
"tretze",
"catorze",
"quinze",
"setze",
"disset",
"divuit",
"dinou",
"vint",
"trenta",... | 948 | 15.084746 | 49 | py |
spaCy | spaCy-master/spacy/lang/ca/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
_units,
merge_chars,
)
ELISION = " ' ’ ".strip().replace(" ", "").replace("\n", "")
_prefixes = (
["§... | 1,589 | 22.731343 | 119 | py |
spaCy | spaCy-master/spacy/lang/ca/stop_words.py | STOP_WORDS = set(
"""
a abans ací ah així això al aleshores algun alguna algunes alguns alhora allà allí allò
als altra altre altres amb ambdues ambdós anar ans apa aquell aquella aquelles aquells
aquest aquesta aquestes aquests aquí
baix bastant bé
cada cadascuna cadascunes cadascuns cadascú com consegueixo cons... | 1,589 | 29 | 90 | py |
spaCy | spaCy-master/spacy/lang/ca/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
# fmt: off
... | 1,995 | 38.92 | 96 | py |
spaCy | spaCy-master/spacy/lang/ca/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for exc_data in [
{ORTH: "aprox.", NORM: "aproximadament"},
{ORTH: "pàg.", NORM: "pàgina"},
{ORTH: "p.ex.", NORM: "per exemple"},
{ORTH: "gen.", NORM: "gener"},
{ORTH: "feb... | 1,949 | 27.676471 | 77 | py |
spaCy | spaCy-master/spacy/lang/cs/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class CzechDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Czech(Language):
lang = "cs"
Defaults = CzechDefaults
__all__ = ["Czech"]
| 305 | 17 | 46 | py |
spaCy | spaCy-master/spacy/lang/cs/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.cs.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Máma mele maso.",
"Příliš žluťoučký kůň úpěl ďábelské ódy.",
"ArcGIS je geografický informační systém určený pro práci s prostorovými daty.",
... | 1,408 | 35.128205 | 129 | py |
spaCy | spaCy-master/spacy/lang/cs/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"nula",
"jedna",
"dva",
"tři",
"čtyři",
"pět",
"šest",
"sedm",
"osm",
"devět",
"deset",
"jedenáct",
"dvanáct",
"třináct",
"čtrnáct",
"patnáct",
"šestnáct",
"sedmnáct",
"osmnáct",
"devatenáct",
... | 1,045 | 15.870968 | 49 | py |
spaCy | spaCy-master/spacy/lang/cs/stop_words.py | # Source: https://github.com/Alir3z4/stop-words
# Source: https://github.com/stopwords-iso/stopwords-cs/blob/master/stopwords-cs.txt
STOP_WORDS = set(
"""
a
aby
ahoj
ačkoli
ale
alespoň
anebo
ani
aniž
ano
atd.
atp.
asi
aspoň
až
během
bez
beze
blízko
bohužel
brzo
bude
budeme
budeš
budete
budou
budu
by
byl
byla
byli
... | 2,045 | 4.590164 | 84 | py |
spaCy | spaCy-master/spacy/lang/da/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class DanishDefaults(BaseDefaults):
... | 628 | 25.208333 | 62 | py |
spaCy | spaCy-master/spacy/lang/da/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.da.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple overvejer at købe et britisk startup for 1 milliard dollar.",
"Selvkørende biler flytter forsikringsansvaret over på producenterne.",
"San F... | 562 | 30.277778 | 75 | py |
spaCy | spaCy-master/spacy/lang/da/lex_attrs.py | from ...attrs import LIKE_NUM
# Source http://fjern-uv.dk/tal.php
_num_words = """nul
en et to tre fire fem seks syv otte ni ti
elleve tolv tretten fjorten femten seksten sytten atten nitten tyve
enogtyve toogtyve treogtyve fireogtyve femogtyve seksogtyve syvogtyve otteogtyve niogtyve tredive
enogtredive toogtredive t... | 3,572 | 69.058824 | 264 | py |
spaCy | spaCy-master/spacy/lang/da/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
LIST_ELLIPSES,
LIST_ICONS,
)
from ..punctuation import TOKENIZER_SUFFIXES
_quotes = CONCAT_QUOTES.replace("'", "")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[{al}])\.(?=[{au}])".format(al=A... | 910 | 23.621622 | 74 | py |
spaCy | spaCy-master/spacy/lang/da/stop_words.py | # Source: Handpicked by Jens Dahl Møllerhøj.
STOP_WORDS = set(
"""
af aldrig alene alle allerede alligevel alt altid anden andet andre at
bag begge blandt blev blive bliver burde bør
da de dem den denne dens der derefter deres derfor derfra deri dermed derpå derved det dette dig din dine disse dog du
efter egen... | 1,318 | 27.673913 | 219 | py |
spaCy | spaCy-master/spacy/lang/da/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import AUX, NOUN, PRON, PROPN, VERB
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
def is_verb_token(tok):
return tok.pos in [VERB, AUX]
def get_left_... | 2,189 | 28.2 | 88 | py |
spaCy | spaCy-master/spacy/lang/da/tokenizer_exceptions.py | """
Tokenizer Exceptions.
Source: https://forkortelse.dk/ and various others.
"""
from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
# Abbreviations for weekdays "søn." (for "søndag") as well as "Tor." and "Tors."
# (for "torsdag") are left ou... | 8,931 | 14.373494 | 80 | py |
spaCy | spaCy-master/spacy/lang/de/__init__.py | from ...language import BaseDefaults, Language
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class GermanDefaults(BaseDefaults):
tokenizer_e... | 616 | 25.826087 | 82 | py |
spaCy | spaCy-master/spacy/lang/de/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.de.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Die ganze Stadt ist ein Startup: Shenzhen ist das Silicon Valley für Hardware-Firmen",
"Wie deutsche Startups die Technologie vorantreiben wollen: Kü... | 675 | 34.578947 | 91 | py |
spaCy | spaCy-master/spacy/lang/de/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
PUNCT,
UNITS,
)
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
_prefixes = ["``"] + BASE_TOKENIZER_PREFIXES
_s... | 1,407 | 23.701754 | 74 | py |
spaCy | spaCy-master/spacy/lang/de/stop_words.py | STOP_WORDS = set(
"""
á a ab aber ach acht achte achten achter achtes ag alle allein allem allen
aller allerdings alles allgemeinen als also am an andere anderen anderem andern
anders auch auf aus ausser außer ausserdem außerdem
bald bei beide beiden beim beispiel bekannt bereits besonders besser besten bin
bis bi... | 3,609 | 44.696203 | 80 | py |
spaCy | spaCy-master/spacy/lang/de/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
# this i... | 1,850 | 41.068182 | 90 | py |
spaCy | spaCy-master/spacy/lang/de/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"auf'm": [{ORTH: "auf"}, {ORTH: "'m", NORM: "dem"}],
"du's": [{ORTH: "du"}, {ORTH: "'s", NORM: "es"}],
"er's": [{ORTH: "er"}, {ORTH: "'s", NORM: "es"}],
"hinter'm": [{ORTH: "hint... | 5,866 | 23.965957 | 62 | py |
spaCy | spaCy-master/spacy/lang/dsb/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class LowerSorbianDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class LowerSorbian(Language):
lang = "dsb"
Defaults = LowerSorbianDefaults
__all__ = ["L... | 334 | 18.705882 | 46 | py |
spaCy | spaCy-master/spacy/lang/dsb/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.dsb.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Z tym stwori so wuměnjenje a zakład za dalše wobdźěłanje přez analyzu tekstoweje struktury a semantisku anotaciju a z tym tež za tu předstajenu digitaln... | 527 | 32 | 176 | py |
spaCy | spaCy-master/spacy/lang/dsb/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"nul",
"jaden",
"jadna",
"jadno",
"dwa",
"dwě",
"tśi",
"tśo",
"styri",
"styrjo",
"pěś",
"pěśo",
"šesć",
"šesćo",
"sedym",
"sedymjo",
"wósym",
"wósymjo",
"źewjeś",
"źewjeśo",
"źaseś",
"źa... | 1,821 | 14.982456 | 49 | py |
spaCy | spaCy-master/spacy/lang/dsb/stop_words.py | STOP_WORDS = set(
"""
a abo aby ako ale až
daniž dokulaž
gaž
jolic
pak pótom
teke togodla
""".split()
)
| 113 | 6.125 | 20 | py |
spaCy | spaCy-master/spacy/lang/el/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import GreekLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_i... | 1,330 | 23.648148 | 82 | py |
spaCy | spaCy-master/spacy/lang/el/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.el.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"""Η άνιση κατανομή του πλούτου και του εισοδήματος, η οποία έχει λάβει
τρομερές διαστάσεις, δεν δείχνει τάσεις βελτίωσης.""",
"""Ο στόχος της σύντο... | 1,198 | 46.96 | 78 | py |
spaCy | spaCy-master/spacy/lang/el/get_pos_from_wiktionary.py | def get_pos_from_wiktionary():
import re
from gensim.corpora.wikicorpus import extract_pages
regex = re.compile(r"==={{(\w+)\|el}}===")
regex2 = re.compile(r"==={{(\w+ \w+)\|el}}===")
# get words based on the Wiktionary dump
# check only for specific parts
# ==={{κύριο όνομα|el}}===
... | 1,977 | 29.430769 | 87 | py |
spaCy | spaCy-master/spacy/lang/el/lemmatizer.py | from typing import List
from ...pipeline import Lemmatizer
from ...tokens import Token
class GreekLemmatizer(Lemmatizer):
"""
Greek language lemmatizer applies the default rule based lemmatization
procedure with some modifications for better Greek language support.
The first modification is that it ... | 2,195 | 33.857143 | 78 | py |
spaCy | spaCy-master/spacy/lang/el/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"μηδέν",
"ένας",
"δυο",
"δυό",
"τρεις",
"τέσσερις",
"πέντε",
"έξι",
"εφτά",
"επτά",
"οκτώ",
"οχτώ",
"εννιά",
"εννέα",
"δέκα",
"έντεκα",
"ένδεκα",
"δώδεκα",
"δεκατρείς",
"δεκατέσσερις",
"δεκα... | 1,795 | 16.96 | 78 | py |
spaCy | spaCy-master/spacy/lang/el/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
HYPHENS,
LIST_CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
)
_units = (
"km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft "
"kg g mg µg t lb o... | 3,137 | 28.603774 | 85 | py |
spaCy | spaCy-master/spacy/lang/el/stop_words.py | # Stop words
# Link to greek stop words: https://www.translatum.gr/forum/index.php?topic=3550.0?topic=3550.0
STOP_WORDS = set(
"""
αδιάκοπα αι ακόμα ακόμη ακριβώς άλλα αλλά αλλαχού άλλες άλλη άλλην
άλλης αλλιώς αλλιώτικα άλλο άλλοι αλλοιώς αλλοιώτικα άλλον άλλος άλλοτε αλλού
άλλους άλλων άμα άμεσα αμέσως αν ανά ανά... | 4,482 | 49.943182 | 96 | py |
spaCy | spaCy-master/spacy/lang/el/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
# It fol... | 2,250 | 40.685185 | 82 | py |
spaCy | spaCy-master/spacy/lang/el/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for token in ["Απ'", "ΑΠ'", "αφ'", "Αφ'"]:
_exc[token] = [{ORTH: token, NORM: "από"}]
for token in ["Αλλ'", "αλλ'"]:
_exc[token] = [{ORTH: token, NORM: "αλλά"}]
for token in ["παρ'",... | 8,384 | 20.227848 | 77 | py |
spaCy | spaCy-master/spacy/lang/en/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import EnglishLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from ... | 1,236 | 22.788462 | 77 | py |
spaCy | spaCy-master/spacy/lang/en/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.en.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple is looking at buying U.K. startup for $1 billion",
"Autonomous cars shift insurance liability toward manufacturers",
"San Francisco conside... | 556 | 28.315789 | 69 | py |
spaCy | spaCy-master/spacy/lang/en/lemmatizer.py | from ...pipeline import Lemmatizer
from ...tokens import Token
class EnglishLemmatizer(Lemmatizer):
"""English lemmatizer. Only overrides is_base_form."""
def is_base_form(self, token: Token) -> bool:
"""
Check whether we're dealing with an uninflected paradigm, so we can
avoid lemmat... | 1,480 | 35.121951 | 75 | py |
spaCy | spaCy-master/spacy/lang/en/lex_attrs.py | from ...attrs import LIKE_NUM
# fmt: off
_num_words = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen", "twenty", "thirty", "forty",
"fifty", "sixty", "seventy",... | 1,585 | 34.244444 | 82 | py |
spaCy | spaCy-master/spacy/lang/en/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au... | 576 | 20.37037 | 68 | py |
spaCy | spaCy-master/spacy/lang/en/stop_words.py | # Stop words
STOP_WORDS = set(
"""
a about above across after afterwards again against all almost alone along
already also although always am among amongst amount an and another any anyhow
anyone anything anyway anywhere are around as at
back be became because become becomes becoming been before beforehand behind
... | 2,144 | 27.986486 | 79 | py |
spaCy | spaCy-master/spacy/lang/en/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 1,570 | 29.803922 | 81 | py |
spaCy | spaCy-master/spacy/lang/en/tokenizer_exceptions.py | from typing import Dict, List
from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc: Dict[str, List[Dict]] = {}
_exclude = [
"Ill",
"ill",
"Its",
"its",
"Hell",
"hell",
"Shell",
"shell",
"Shed",
"shed",
"wer... | 14,252 | 26.045541 | 85 | py |
spaCy | spaCy-master/spacy/lang/es/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import SpanishLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SY... | 1,290 | 23.358491 | 77 | py |
spaCy | spaCy-master/spacy/lang/es/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.es.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple está buscando comprar una startup del Reino Unido por mil millones de dólares.",
"Los coches autónomos delegan la responsabilidad del seguro en... | 760 | 32.086957 | 91 | py |
spaCy | spaCy-master/spacy/lang/es/lemmatizer.py | import re
from typing import List, Optional, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class SpanishLemmatizer(Lemmatizer):
"""
Spanish rule-based lemmatizer with morph-based rule selection.
"""
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], Li... | 16,014 | 36.331002 | 114 | py |
spaCy | spaCy-master/spacy/lang/es/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"cero",
"uno",
"dos",
"tres",
"cuatro",
"cinco",
"seis",
"siete",
"ocho",
"nueve",
"diez",
"once",
"doce",
"trece",
"catorce",
"quince",
"dieciséis",
"diecisiete",
"dieciocho",
"diecinueve",
... | 1,770 | 16.028846 | 49 | py |
spaCy | spaCy-master/spacy/lang/es/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
CURRENCY,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
LIST_UNITS,
PUNCT,
merge_chars,
)
_list_units = [u for u in LIST_UNITS if u != "%"]
_units = merge_chars(" ".join(_list_units))
_con... | 1,198 | 21.203704 | 66 | py |
spaCy | spaCy-master/spacy/lang/es/stop_words.py | STOP_WORDS = set(
"""
a acuerdo adelante ademas además afirmó agregó ahi ahora ahí al algo alguna
algunas alguno algunos algún alli allí alrededor ambos ante anterior antes
apenas aproximadamente aquel aquella aquellas aquello aquellos aqui aquél
aquélla aquéllas aquéllos aquí arriba aseguró asi así atras aun aunqu... | 3,292 | 39.654321 | 79 | py |
spaCy | spaCy-master/spacy/lang/es/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 2,714 | 34.25974 | 85 | py |
spaCy | spaCy-master/spacy/lang/es/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"pal": [{ORTH: "pa"}, {ORTH: "l", NORM: "el"}],
}
for exc_data in [
{ORTH: "n°"},
{ORTH: "°C"},
{ORTH: "aprox."},
{ORTH: "dna."},
{ORTH: "dpto."},
{ORTH: "ej."},
... | 1,451 | 16.493976 | 63 | py |
spaCy | spaCy-master/spacy/lang/et/__init__.py | from ...language import BaseDefaults, Language
from .stop_words import STOP_WORDS
class EstonianDefaults(BaseDefaults):
stop_words = STOP_WORDS
class Estonian(Language):
lang = "et"
Defaults = EstonianDefaults
__all__ = ["Estonian"]
| 251 | 15.8 | 46 | py |
spaCy | spaCy-master/spacy/lang/et/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-et
STOP_WORDS = set(
"""
aga
ei
et
ja
jah
kas
kui
kõik
ma
me
mida
midagi
mind
minu
mis
mu
mul
mulle
nad
nii
oled
olen
oli
oma
on
pole
sa
seda
see
selle
siin
siis
ta
te
ära
""".split()
)
| 244 | 4.833333 | 55 | py |
spaCy | spaCy-master/spacy/lang/eu/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
class BasqueDefaults(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Basque(Language):
... | 387 | 19.421053 | 46 | py |
spaCy | spaCy-master/spacy/lang/eu/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.eu.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"bilbon ko castinga egin da eta nik jakin ez zuetako inork egin al du edota parte hartu duen ezagunik ba al du",
"gaur telebistan entzunda denok martet... | 419 | 34 | 138 | py |
spaCy | spaCy-master/spacy/lang/eu/lex_attrs.py | from ...attrs import LIKE_NUM
# Source http://mylanguages.org/basque_numbers.php
_num_words = """
bat
bi
hiru
lau
bost
sei
zazpi
zortzi
bederatzi
hamar
hamaika
hamabi
hamahiru
hamalau
hamabost
hamasei
hamazazpi
Hemezortzi
hemeretzi
hogei
ehun
mila
milioi
""".split()
# source https://www.google.com/intl/ur/inputtool... | 1,092 | 13.194805 | 55 | py |
spaCy | spaCy-master/spacy/lang/eu/punctuation.py | from ..punctuation import TOKENIZER_SUFFIXES
_suffixes = TOKENIZER_SUFFIXES
| 77 | 18.5 | 44 | py |
spaCy | spaCy-master/spacy/lang/eu/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-eu
# https://www.ranks.nl/stopwords/basque
# https://www.mustgo.com/worldlanguages/basque/
STOP_WORDS = set(
"""
al
anitz
arabera
asko
baina
bat
batean
batek
bati
batzuei
batzuek
batzuetan
batzuk
bera
beraiek
berau
berauek
bere
berori
beroriek
beste
bezala
da
dag... | 760 | 6.179245 | 55 | py |
spaCy | spaCy-master/spacy/lang/fa/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ...pipeline import Lemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .token... | 1,307 | 23.679245 | 81 | py |
spaCy | spaCy-master/spacy/lang/fa/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.fa.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"این یک جمله نمونه می باشد.",
"قرار ما، امروز ساعت ۲:۳۰ بعدازظهر هست!",
"دیروز علی به من ۲۰۰۰.۱﷼ پول نقد داد.",
"چطور میتوان از تهران به کاشا... | 377 | 22.625 | 56 | py |
spaCy | spaCy-master/spacy/lang/fa/generate_verbs_exc.py | verb_roots = """
#هست
آخت#آهنج
آراست#آرا
آراماند#آرامان
آرامید#آرام
آرمید#آرام
آزرد#آزار
آزمود#آزما
آسود#آسا
آشامید#آشام
آشفت#آشوب
آشوبید#آشوب
آغازید#آغاز
آغشت#آمیز
آفرید#آفرین
آلود#آلا
آمد#آ
آمرزید#آمرز
آموخت#آموز
آموزاند#آموزان
آمیخت#آمیز
آورد#آر
آورد#آور
آویخت#آویز
آکند#آکن
آگاهانید#آگاهان
ارزید#ارز
افتاد#افت
افراخت... | 8,945 | 12.720859 | 83 | py |
spaCy | spaCy-master/spacy/lang/fa/lex_attrs.py | from ...attrs import LIKE_NUM
MIM = "م"
ZWNJ_O_MIM = "ام"
YE_NUN = "ین"
_num_words = set(
"""
صفر
یک
دو
سه
چهار
پنج
شش
شیش
هفت
هشت
نه
ده
یازده
دوازده
سیزده
چهارده
پانزده
پونزده
شانزده
شونزده
هفده
هجده
هیجده
نوزده
بیست
سی
چهل
پنجاه
شصت
هفتاد
هشتاد
نود
صد
یکصد
یکصد
دویست
سیصد
چهارصد
پانصد
پونصد
ششصد
شیشصد
هفتصد
... | 1,100 | 9.794118 | 63 | py |
spaCy | spaCy-master/spacy/lang/fa/punctuation.py | from ..char_classes import (
ALPHA_UPPER,
CURRENCY,
LIST_ELLIPSES,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
)
_suffixes = (
LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ [
r"(?<=[0-9])\+",
r"(?<=[0-9])%", # 4% -> ["4", "%"]
# Persian is written from Right-To-Left
... | 508 | 19.36 | 54 | py |
spaCy | spaCy-master/spacy/lang/fa/stop_words.py | # Stop words from HAZM package
STOP_WORDS = set(
"""
و
در
به
از
که
این
را
با
است
برای
آن
یک
خود
تا
کرد
بر
هم
نیز
گفت
میشود
وی
شد
دارد
ما
اما
یا
شده
باید
هر
آنها
بود
او
دیگر
دو
مورد
میکند
شود
کند
وجود
بین
پیش
شدهاست
پس
نظر
اگر
همه
یکی
حال
هستند
من
کنند
نیست
باشد
چه
بی
می
بخش
میکنند
همین
افزود
هایی
دارند
راه
همچن... | 2,093 | 4.314721 | 30 | py |
spaCy | spaCy-master/spacy/lang/fa/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"... | 1,556 | 28.942308 | 81 | py |
spaCy | spaCy-master/spacy/lang/fa/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
TOKENIZER_EXCEPTIONS = {
".ق ": [{ORTH: ".ق "}],
".م": [{ORTH: ".م"}],
".هـ": [{ORTH: ".هـ"}],
"ب.م": [{ORTH: "ب.م"}],
"ق.م": [{ORTH: "ق.م"}],
"آبرویت": [{ORTH: "آبروی", NORM: "آبروی"}, {ORTH: "ت", NORM: "ت"}],
"آبنباتش": [{ORTH: "آبنبات", NORM: "آبنبات"... | 50,867 | 67.096386 | 86 | py |
spaCy | spaCy-master/spacy/lang/fi/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class FinnishDefaults(BaseDefaults):
... | 632 | 25.375 | 62 | py |
spaCy | spaCy-master/spacy/lang/fi/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.fi.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Itseajavat autot siirtävät vakuutusvastuun autojen valmistajille",
"San Francisco harkitsee toimitusrobottien liikkumisen kieltämistä jalkakäytävillä",... | 531 | 32.25 | 88 | py |
spaCy | spaCy-master/spacy/lang/fi/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"nolla",
"yksi",
"kaksi",
"kolme",
"neljä",
"viisi",
"kuusi",
"seitsemän",
"kahdeksan",
"yhdeksän",
"kymmenen",
"yksitoista",
"kaksitoista",
"kolmetoista",
"neljätoista",
"viisitoista",
"kuusitoista",
"... | 1,058 | 17.910714 | 49 | py |
spaCy | spaCy-master/spacy/lang/fi/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
LIST_ELLIPSES,
LIST_HYPHENS,
LIST_ICONS,
)
from ..punctuation import TOKENIZER_SUFFIXES
_quotes = CONCAT_QUOTES.replace("'", "")
DASHES = "|".join(x for x in LIST_HYPHENS if x != "-")
_infixes = (
LIST_ELLIPSE... | 859 | 22.888889 | 74 | py |
spaCy | spaCy-master/spacy/lang/fi/stop_words.py | # Source https://github.com/stopwords-iso/stopwords-fi/blob/master/stopwords-fi.txt
# Reformatted with some minor corrections
STOP_WORDS = set(
"""
aiemmin aika aikaa aikaan aikaisemmin aikaisin aikana aikoina aikoo aikovat
aina ainakaan ainakin ainoa ainoat aiomme aion aiotte aivan ajan alas alemmas
alkuisin alkuu... | 6,161 | 54.513514 | 83 | py |
spaCy | spaCy-master/spacy/lang/fi/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on both Doc and Span."""
lab... | 2,380 | 28.395062 | 87 | py |
spaCy | spaCy-master/spacy/lang/fi/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
# Source https://www.cs.tut.fi/~jkorpela/kielenopas/5.5.html
for exc_data in [
{ORTH: "aik."},
{ORTH: "alk."},
{ORTH: "alv."},
{ORTH: "ark."},
{ORTH: "as."},
{ORTH: "e... | 2,460 | 20.973214 | 64 | py |
spaCy | spaCy-master/spacy/lang/fr/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from .lemmatizer import FrenchLemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_... | 1,380 | 24.109091 | 82 | py |
spaCy | spaCy-master/spacy/lang/fr/_tokenizer_exceptions_list.py | FR_BASE_EXCEPTIONS = [
"(+)-amphétamine",
"(5R,6S)-7,8-didehydro-4,5-époxy-3-méthoxy-N-méthylmorphinan-6-ol",
"(R)-amphétamine",
"(S)-amphétamine",
"(−)-amphétamine",
"0-day",
"0-days",
"1,1-diméthylhydrazine",
"1,2,3-tris-nitrooxy-propane",
"1,2-diazine",
"1,2-dichloropropan... | 354,363 | 21.67059 | 74 | py |
spaCy | spaCy-master/spacy/lang/fr/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.fr.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple cherche à acheter une start-up anglaise pour 1 milliard de dollars",
"Les voitures autonomes déplacent la responsabilité de l'assurance vers le... | 919 | 39 | 95 | py |
spaCy | spaCy-master/spacy/lang/fr/lemmatizer.py | from typing import List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class FrenchLemmatizer(Lemmatizer):
"""
French language lemmatizer applies the default rule based lemmatization
procedure with some modifications for better French language support.
The parts of speech 'ADV... | 3,016 | 33.284091 | 82 | py |
spaCy | spaCy-master/spacy/lang/fr/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = set(
"""
zero un une deux trois quatre cinq six sept huit neuf dix
onze douze treize quatorze quinze seize dix-sept dix-huit dix-neuf
vingt trente quarante cinquante soixante soixante-dix septante quatre-vingt huitante quatre-vingt-dix nonante
cent mille mil million milli... | 1,583 | 35 | 146 | py |