code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
langdata = Language.get(tag, normalize=True) if macro: langdata = langdata.prefer_macrolanguage() return langdata.simplify_script().to_tag()
def standardize_tag(tag: {str, Language}, macro: bool=False) -> str
Standardize a language tag: - Replace deprecated values with their updated versions (if those exist) - Remove script tags that are redundant with the language - If *macro* is True, use a macrolanguage to represent the most common standardized language within that macrolanguage. For example, 'cmn' (Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic) becomes 'ar' (Arabic). - Format the result according to the conventions of BCP 47 Macrolanguage replacement is not required by BCP 47, but it is required by the Unicode CLDR. >>> standardize_tag('en_US') 'en-US' >>> standardize_tag('en-Latn') 'en' >>> standardize_tag('en-uk') 'en-GB' >>> standardize_tag('eng') 'en' >>> standardize_tag('arb-Arab', macro=True) 'ar' >>> standardize_tag('sh-QU') 'sr-Latn-EU' >>> standardize_tag('sgn-US') 'ase' >>> standardize_tag('zh-cmn-hans-cn') 'cmn-Hans-CN' >>> standardize_tag('zh-cmn-hans-cn', macro=True) 'zh-Hans-CN' >>> standardize_tag('zsm', macro=True) 'ms' >>> standardize_tag('ja-latn-hepburn') 'ja-Latn-hepburn' >>> standardize_tag('spa-latn-mx') 'es-MX' If the tag can't be parsed according to BCP 47, this will raise a LanguageTagError (a subclass of ValueError): >>> standardize_tag('spa-mx-latn') Traceback (most recent call last): ... langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
10.948833
9.682935
1.130735
# Quickly return if the desired language is directly supported if desired_language in supported_languages: return desired_language, 100 # Reduce the desired language to a standard form that could also match desired_language = standardize_tag(desired_language) if desired_language in supported_languages: return desired_language, 100 match_scores = [ (supported, tag_match_score(desired_language, supported)) for supported in supported_languages ] match_scores = [ (supported, score) for (supported, score) in match_scores if score >= min_score ] + [('und', 0)] match_scores.sort(key=lambda item: -item[1]) return match_scores[0]
def best_match(desired_language: {str, Language}, supported_languages: list, min_score: int=75) -> (str, int)
You have software that supports any of the `supported_languages`. You want to use `desired_language`. This function lets you choose the right language, even if there isn't an exact match. Returns: - The best-matching language code, which will be one of the `supported_languages` or 'und' - The score of the match, from 0 to 100 `min_score` sets the minimum match score. If all languages match with a lower score than that, the result will be 'und' with a score of 0. When there is a tie for the best matching language, the first one in the tie will be used. Setting `min_score` lower will enable more things to match, at the cost of possibly mis-handling data or upsetting users. Read the documentation for :func:`tag_match_score` to understand what the numbers mean. >>> best_match('fr', ['de', 'en', 'fr']) ('fr', 100) >>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl']) ('sr-Latn', 100) >>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan']) ('zh-Hans', 100) >>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan']) ('cmn-Hans', 100) >>> best_match('pt', ['pt-BR', 'pt-PT']) ('pt-BR', 100) >>> best_match('en-AU', ['en-GB', 'en-US']) ('en-GB', 96) >>> best_match('es-MX', ['es-ES', 'es-419', 'en-US']) ('es-419', 96) >>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY']) ('es-PU', 95) >>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY']) ('es-AR', 95) >>> best_match('zsm', ['id', 'mhp']) ('id', 86) >>> best_match('eu', ['el', 'en', 'es']) ('es', 90) >>> best_match('eu', ['el', 'en', 'es'], min_score=92) ('und', 0)
2.900664
3.011845
0.963085
values = (language, tuple(extlangs or ()), script, region, tuple(variants or ()), tuple(extensions or ()), private) if values in cls._INSTANCES: return cls._INSTANCES[values] instance = cls( language=language, extlangs=extlangs, script=script, region=region, variants=variants, extensions=extensions, private=private ) cls._INSTANCES[values] = instance return instance
def make(cls, language=None, extlangs=None, script=None, region=None, variants=None, extensions=None, private=None)
Create a Language object by giving any subset of its attributes. If this value has been created before, return the existing value.
2.126853
2.251668
0.944568
if self._str_tag is not None: return self._str_tag subtags = ['und'] if self.language: subtags[0] = self.language if self.extlangs: for extlang in sorted(self.extlangs): subtags.append(extlang) if self.script: subtags.append(self.script) if self.region: subtags.append(self.region) if self.variants: for variant in sorted(self.variants): subtags.append(variant) if self.extensions: for ext in self.extensions: subtags.append(ext) if self.private: subtags.append(self.private) self._str_tag = '-'.join(subtags) return self._str_tag
def to_tag(self) -> str
Convert a Language back to a standard language tag, as a string. This is also the str() representation of a Language object. >>> Language.make(language='en', region='GB').to_tag() 'en-GB' >>> Language.make(language='yue', script='Hant', region='HK').to_tag() 'yue-Hant-HK' >>> Language.make(script='Arab').to_tag() 'und-Arab' >>> str(Language.make(region='IN')) 'und-IN'
1.999247
1.888512
1.058636
if self._simplified is not None: return self._simplified if self.language and self.script: if DEFAULT_SCRIPTS.get(self.language) == self.script: result = self.update_dict({'script': None}) self._simplified = result return self._simplified self._simplified = self return self._simplified
def simplify_script(self) -> 'Language'
Remove the script from some parsed language data, if the script is redundant with the language. >>> Language.make(language='en', script='Latn').simplify_script() Language.make(language='en') >>> Language.make(language='yi', script='Latn').simplify_script() Language.make(language='yi', script='Latn') >>> Language.make(language='yi', script='Hebr').simplify_script() Language.make(language='yi')
3.567907
3.602669
0.990351
if self._assumed is not None: return self._assumed if self.language and not self.script: try: self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]}) except KeyError: self._assumed = self else: self._assumed = self return self._assumed
def assume_script(self) -> 'Language'
Fill in the script if it's missing, and if it can be assumed from the language subtag. This is the opposite of `simplify_script`. >>> Language.make(language='en').assume_script() Language.make(language='en', script='Latn') >>> Language.make(language='yi').assume_script() Language.make(language='yi', script='Hebr') >>> Language.make(language='yi', script='Latn').assume_script() Language.make(language='yi', script='Latn') This fills in nothing when the script cannot be assumed -- such as when the language has multiple scripts, or it has no standard orthography: >>> Language.make(language='sr').assume_script() Language.make(language='sr') >>> Language.make(language='eee').assume_script() Language.make(language='eee') It also dosn't fill anything in when the language is unspecified. >>> Language.make(region='US').assume_script() Language.make(region='US')
3.11505
3.511604
0.887073
if self._macrolanguage is not None: return self._macrolanguage language = self.language or 'und' if language in NORMALIZED_MACROLANGUAGES: self._macrolanguage = self.update_dict({ 'language': NORMALIZED_MACROLANGUAGES[language] }) else: self._macrolanguage = self return self._macrolanguage
def prefer_macrolanguage(self) -> 'Language'
BCP 47 doesn't specify what to do with macrolanguages and the languages they contain. The Unicode CLDR, on the other hand, says that when a macrolanguage has a dominant standardized language, the macrolanguage code should be used for that language. For example, Mandarin Chinese is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'. This isn't a rule you'd want to follow in all cases -- for example, you may want to be able to specifically say that 'ms' (the Malay macrolanguage) contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying this rule helps when interoperating with the Unicode CLDR. So, applying `prefer_macrolanguage` to a Language object will return a new object, replacing the language with the macrolanguage if it is the dominant language within that macrolanguage. It will leave non-dominant languages that have macrolanguages alone. >>> Language.get('arb').prefer_macrolanguage() Language.make(language='ar') >>> Language.get('cmn-Hant').prefer_macrolanguage() Language.make(language='zh', script='Hant') >>> Language.get('yue-Hant').prefer_macrolanguage() Language.make(language='yue', script='Hant')
2.67565
3.533335
0.757259
if self._broader is not None: return self._broader self._broader = [self] seen = set(self.to_tag()) for keyset in self.BROADER_KEYSETS: filtered = self._filter_attributes(keyset) tag = filtered.to_tag() if tag not in seen: self._broader.append(filtered) seen.add(tag) return self._broader
def broaden(self) -> 'List[Language]'
Iterate through increasingly general versions of this parsed language tag. This isn't actually that useful for matching two arbitrary language tags against each other, but it is useful for matching them against a known standardized form, such as in the CLDR data. The list of broader versions to try appears in UTR 35, section 4.3, "Likely Subtags". >>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden(): ... print(langdata) nn-Latn-NO-x-thingy nn-Latn-NO nn-NO nn-Latn nn und-Latn und
3.873652
3.800209
1.019326
if self._filled is not None: return self._filled for broader in self.broaden(): tag = broader.to_tag() if tag in LIKELY_SUBTAGS: result = Language.get(LIKELY_SUBTAGS[tag], normalize=False) result = result.update(self) self._filled = result return result raise RuntimeError( "Couldn't fill in likely values. This represents a problem with " "the LIKELY_SUBTAGS data." )
def maximize(self) -> 'Language'
The Unicode CLDR contains a "likelySubtags" data file, which can guess reasonable values for fields that are missing from a language tag. This is particularly useful for comparing, for example, "zh-Hant" and "zh-TW", two common language tags that say approximately the same thing via rather different information. (Using traditional Han characters is not the same as being in Taiwan, but each implies that the other is likely.) These implications are provided in the CLDR supplemental data, and are based on the likelihood of people using the language to transmit information on the Internet. (This is why the overall default is English, not Chinese.) >>> str(Language.get('zh-Hant').maximize()) 'zh-Hant-TW' >>> str(Language.get('zh-TW').maximize()) 'zh-Hant-TW' >>> str(Language.get('ja').maximize()) 'ja-Jpan-JP' >>> str(Language.get('pt').maximize()) 'pt-Latn-BR' >>> str(Language.get('und-Arab').maximize()) 'ar-Arab-EG' >>> str(Language.get('und-CH').maximize()) 'de-Latn-CH' >>> str(Language.make().maximize()) # 'MURICA. 'en-Latn-US' >>> str(Language.get('und-ibe').maximize()) 'en-ibe-Latn-US'
7.446811
6.793438
1.096177
if supported == self: return 100 desired_complete = self.prefer_macrolanguage().maximize() supported_complete = supported.prefer_macrolanguage().maximize() desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region) supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region) return 100 - raw_distance(desired_triple, supported_triple)
def match_score(self, supported: 'Language') -> int
Suppose that `self` is the language that the user desires, and `supported` is a language that is actually supported. This method returns a number from 0 to 100 indicating how similar the supported language is (higher numbers are better). This is not a symmetric relation. The algorithm here is described (badly) in a Unicode technical report at http://unicode.org/reports/tr35/#LanguageMatching. If you find these results bothersome, take it up with Unicode, unless it's particular tweaks we implemented such as macrolanguage matching. See :func:`tag_match_score` for a function that works on strings, instead of requiring you to instantiate Language objects first. Further documentation and examples appear with that function.
3.803367
3.225587
1.179124
return self._get_name('language', language, min_score)
def language_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str
Give the name of the language (not the entire tag, just the language part) in a natural language. The target language can be given as a string or another Language object. By default, things are named in English: >>> Language.get('fr').language_name() 'French' >>> Language.get('el').language_name() 'Greek' But you can ask for language names in numerous other languages: >>> Language.get('fr').language_name('fr') 'français' >>> Language.get('el').language_name('fr') 'grec' Why does everyone get Slovak and Slovenian confused? Let's ask them. >>> Language.get('sl').language_name('sl') 'slovenščina' >>> Language.get('sk').language_name('sk') 'slovenčina' >>> Language.get('sl').language_name('sk') 'slovinčina' >>> Language.get('sk').language_name('sl') 'slovaščina'
9.031861
10.137666
0.890921
return self.language_name(language=self, min_score=min_score)
def autonym(self, min_score: int=95) -> str
Give the name of this language *in* this language. >>> Language.get('fr').autonym() 'français' >>> Language.get('es').autonym() 'español' >>> Language.get('ja').autonym() '日本語' This doesn't give the name of the region or script, but in some cases the language can name itself in multiple scripts: >>> Language.get('sr-Latn').autonym() 'srpski' >>> Language.get('sr-Cyrl').autonym() 'српски' >>> Language.get('pa').autonym() 'ਪੰਜਾਬੀ' >>> Language.get('pa-Arab').autonym() 'پنجابی' This only works for language codes that CLDR has locale data for. You can't ask for the autonym of 'ja-Latn' and get 'nihongo'.
10.458135
9.068421
1.153248
return self._get_name('script', language, min_score)
def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str
Describe the script part of the language tag in a natural language.
8.756913
5.020573
1.744206
return self._get_name('region', language, min_score)
def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str
Describe the region part of the language tag in a natural language.
8.191306
4.772407
1.716389
names = [] for variant in self.variants: var_names = code_to_names('variant', variant) names.append(self._best_name(var_names, language, min_score)) return names
def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list
Describe each of the variant parts of the language tag in a natural language.
5.451348
4.510021
1.208719
names = {} if self.language: names['language'] = self.language_name(language, min_score) if self.script: names['script'] = self.script_name(language, min_score) if self.region: names['region'] = self.region_name(language, min_score) if self.variants: names['variants'] = self.variant_names(language, min_score) return names
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict
Return a dictionary that describes a given language tag in a specified natural language. See `language_name` and related methods for more specific versions of this. The desired `language` will in fact be matched against the available options using the matching technique that this module provides. We can illustrate many aspects of this by asking for a description of Shavian script (a script devised by author George Bernard Shaw), and where you might find it, in various languages. >>> from pprint import pprint >>> shaw = Language.make(script='Shaw').maximize() >>> pprint(shaw.describe('en')) {'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'} >>> pprint(shaw.describe('fr')) {'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'} >>> pprint(shaw.describe('es')) {'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('pt')) {'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('uk')) {'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'} >>> pprint(shaw.describe('arb')) {'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'} >>> pprint(shaw.describe('th')) {'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'} >>> pprint(shaw.describe('zh-Hans')) {'language': '英语', 'region': '英国', 'script': '萧伯纳式文'} >>> pprint(shaw.describe('zh-Hant')) {'language': '英文', 'region': '英國', 'script': '簫柏納字符'} >>> pprint(shaw.describe('ja')) {'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'} When we don't have a localization for the language, we fall back on 'und', which just shows the language codes. >>> pprint(shaw.describe('lol')) {'language': 'en', 'region': 'GB', 'script': 'Shaw'} Wait, is that a real language? >>> pprint(Language.get('lol').maximize().describe()) {'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
2.282089
2.121309
1.075793
# No matter what form of language we got, normalize it to a single # language subtag if isinstance(language, Language): language = language.language elif isinstance(language, str): language = get(language).language if language is None: language = 'und' code = name_to_code(tagtype, name, language) if code is None: raise LookupError("Can't find any %s named %r" % (tagtype, name)) if '-' in code: return Language.get(code) else: data = {tagtype: code} return Language.make(**data)
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None)
Find the subtag of a particular `tagtype` that has the given `name`. The default language, "und", will allow matching names in any language, so you can get the code 'fr' by looking up "French", "Français", or "francés". Occasionally, names are ambiguous in a way that can be resolved by specifying what name the language is supposed to be in. For example, there is a language named 'Malayo' in English, but it's different from the language named 'Malayo' in Spanish (which is Malay). Specifying the language will look up the name in a trie that is only in that language. In a previous version, we thought we were going to deprecate the `language` parameter, as there weren't significant cases of conflicts in names of things between languages. Well, we got more data, and conflicts in names are everywhere. Specifying the language that the name should be in is still not required, but it will help to make sure that names can be round-tripped. >>> Language.find_name('language', 'francés') Language.make(language='fr') >>> Language.find_name('region', 'United Kingdom') Language.make(region='GB') >>> Language.find_name('script', 'Arabic') Language.make(script='Arab') >>> Language.find_name('language', 'norsk bokmål') Language.make(language='nb') >>> Language.find_name('language', 'norsk') Language.make(language='no') >>> Language.find_name('language', 'norsk', 'en') Traceback (most recent call last): ... LookupError: Can't find any language named 'norsk' >>> Language.find_name('language', 'norsk', 'no') Language.make(language='no') >>> Language.find_name('language', 'malayo', 'en') Language.make(language='mbp') >>> Language.find_name('language', 'malayo', 'es') Language.make(language='ms') Some langauge names resolve to more than a language. For example, the name 'Brazilian Portuguese' resolves to a language and a region, and 'Simplified Chinese' resolves to a language and a script. In these cases, a Language object with multiple subtags will be returned. >>> Language.find_name('language', 'Brazilian Portuguese', 'en') Language.make(language='pt', region='BR') >>> Language.find_name('language', 'Simplified Chinese', 'en') Language.make(language='zh', script='Hans') A small amount of fuzzy matching is supported: if the name can be shortened to match a single language name, you get that language. This allows, for example, "Hakka dialect" to match "Hakka". >>> Language.find_name('language', 'Hakka dialect') Language.make(language='hak')
4.702014
3.999284
1.175714
if self._dict is not None: return self._dict result = {} for key in self.ATTRIBUTES: value = getattr(self, key) if value: result[key] = value self._dict = result return result
def to_dict(self)
Get a dictionary of the attributes of this Language object, which can be useful for constructing a similar object.
2.768637
2.405929
1.150756
return Language.make( language=other.language or self.language, extlangs=other.extlangs or self.extlangs, script=other.script or self.script, region=other.region or self.region, variants=other.variants or self.variants, extensions=other.extensions or self.extensions, private=other.private or self.private )
def update(self, other: 'Language') -> 'Language'
Update this Language with the fields of another Language.
2.261539
2.221699
1.017933
return Language.make( language=newdata.get('language', self.language), extlangs=newdata.get('extlangs', self.extlangs), script=newdata.get('script', self.script), region=newdata.get('region', self.region), variants=newdata.get('variants', self.variants), extensions=newdata.get('extensions', self.extensions), private=newdata.get('private', self.private) )
def update_dict(self, newdata: dict) -> 'Language'
Update the attributes of this Language from a dictionary.
1.961825
1.855807
1.057127
return {key: d[key] for key in keys if key in d}
def _filter_keys(d: dict, keys: set) -> dict
Select a subset of keys from a dictionary.
6.476462
3.20564
2.020334
filtered = self._filter_keys(self.to_dict(), keyset) return Language.make(**filtered)
def _filter_attributes(self, keyset)
Return a copy of this object with a subset of its attributes set.
12.675337
11.129164
1.13893
if self._searchable is not None: return self._searchable self._searchable = self._filter_attributes( {'language', 'script', 'region'} ).simplify_script().prefer_macrolanguage() return self._searchable
def _searchable_form(self) -> 'Language'
Convert a parsed language tag so that the information it contains is in the best form for looking up information in the CLDR.
6.62807
5.7231
1.158126
max_priority = max([val[2] for val in vals]) val_count = Counter([val[1] for val in vals if val[2] == max_priority]) if len(val_count) == 1: unanimous = val_count.most_common(1) return unanimous[0][0] for pkey in val_count: if pkey in AMBIGUOUS_PREFERENCES: others = set(val_count) others.remove(pkey) if others == others & AMBIGUOUS_PREFERENCES[pkey]: if debug: print("Resolved: {} -> {}".format(key, pkey)) return pkey # In debug mode, show which languages vote for which name if debug and max_priority >= 0: votes = defaultdict(list) for voter, val, prio in vals: if prio == max_priority: votes[val].append(voter) print("{}:".format(key)) for val, voters in sorted(votes.items()): print("\t{}: {}".format(val, ' '.join(voters))) # Don't use names that remain ambiguous return None
def resolve_name(key, vals, debug=False)
Given a name, and a number of possible values it could resolve to, find the single value it should resolve to, in the following way: - Apply the priority order - If names with the highest priority all agree, use that name - If there is disagreement that can be resolved by AMBIGUOUS_PREFERENCES, use that - Otherwise, don't resolve the name (and possibly show a debugging message when building the data)
3.470011
3.318758
1.045575
filename = data_filename('{}/{}/{}.json'.format(path, language, category)) fulldata = json.load(open(filename, encoding='utf-8')) data = fulldata['main'][language]['localeDisplayNames'][category] return data
def read_cldr_names(path, language, category)
Read CLDR's names for things in a particular language.
4.461915
4.398011
1.01453
lines = [] for line in file: line = line.rstrip('\n') if line == '%%': # This is a separator between items. Parse the data we've # collected and yield the result. yield from parse_item(lines) lines.clear() elif line.startswith(' '): # This is a continuation line. Concatenate it to the previous # line, including one of the spaces. lines[-1] += line[1:] else: lines.append(line) yield from parse_item(lines)
def parse_file(file)
Take an open file containing the IANA subtag registry, and yield a dictionary of information for each subtag it describes.
3.827983
3.608769
1.060745
info = {} for line in lines: key, value = line.split(': ', 1) if key in LIST_KEYS: info.setdefault(key, []).append(value) else: assert key not in info info[key] = value if 'Subtag' in info or 'Tag' in info: yield info
def parse_item(lines)
Given the lines that form a subtag entry (after joining wrapped lines back together), parse the data they contain. Returns a generator that yields once if there was any data there (and an empty generator if this was just the header).
3.460901
3.221556
1.074295
name = name.casefold() name = name.replace("’", "'") name = name.replace("-", " ") name = name.replace("(", "") name = name.replace(")", "") name = name.replace(",", "") return name.strip()
def normalize_name(name)
When looking up a language-code component by name, we would rather ignore distinctions of case and certain punctuation. "Chinese (Traditional)" should be matched by "Chinese Traditional" and "chinese traditional".
2.270945
2.26746
1.001537
trie = marisa_trie.BytesTrie() # marisa_trie raises warnings that make no sense. Ignore them. with warnings.catch_warnings(): warnings.simplefilter("ignore") trie.load(filename) return trie
def load_trie(filename)
Load a BytesTrie from the marisa_trie on-disk format.
4.434244
3.801477
1.166453
assert '/' not in language, "Language codes cannot contain slashes" assert '-' not in language, "This code should be reduced to a language subtag only" trie_name = '{}/name_to_{}'.format(language, category) if trie_name not in TRIES: TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name))) trie = TRIES[trie_name] lookup = normalize_name(name) if lookup in trie: return get_trie_value(trie, lookup) else: # Is this a language plus extra junk? Maybe it has "...isch", "... language", # or "... Chinese" attached to it, for example. prefixes = trie.prefixes(lookup) if prefixes and len(prefixes[-1]) >= 4: return get_trie_value(trie, prefixes[-1]) else: return None
def name_to_code(category, name, language: str='und')
Get a language, script, or region by its name in some language. The language here must be a string representing a language subtag only. The `Language.find` method can handle other representations of a language and normalize them to this form. The default language, "und", will allow matching names in any language, so you can get the code 'fr' by looking up "French", "Français", or "francés". A small amount of fuzzy matching is supported: if the name can be shortened or lengthened to match a single language name, you get that language. This allows, for example, "Hakka Chinese" to match "Hakka". Occasionally, names are ambiguous in a way that can be resolved by specifying what name the language is supposed to be in. For example, there is a language named 'Malayo' in English, but it's different from the language named 'Malayo' in Spanish (which is Malay). Specifying the language will look up the name in a trie that is only in that language.
6.09873
5.971415
1.021321
trie_name = '{}_to_name'.format(category) if trie_name not in TRIES: TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name))) trie = TRIES[trie_name] lookup = code.lower() + '@' possible_keys = trie.keys(lookup) names = {} for key in possible_keys: target_language = key.split('@')[1] names[target_language] = get_trie_value(trie, key) return names
def code_to_names(category, code)
Given the code for a language, script, or region, get a dictionary of its names in various languages.
4.015194
3.72208
1.07875
def unicode2encode(text, charmap): ''' charmap : dictionary which has both encode as key, unicode as value ''' if isinstance(text, (list, tuple)): unitxt = '' for line in text: for val,key in charmap.items(): if key in line: line = line.replace(key, val) # end of if val in text: unitxt += line # end of for line in text: return unitxt elif isinstance(text, str): for val,key in charmap.items(): if key in text: text = text.replace(key, val) return text
charmap : dictionary which has both encode as key, unicode as value
null
null
null
def unicode2auto(unicode_text, encode_text): _all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes() # get unique word which falls under any one of available encodes from # user passed text lines unique_chars = _get_unique_ch(encode_text, _all_common_encodes_) # count common encode chars clen = len(_all_common_encodes_) msg = "Sorry, couldn't find encode :-(\n" msg += 'Need more words to find unique encode out side of %d ' % clen msg += 'common compound characters' if not unique_chars: print(msg) return '' # end of if not unique_chars: for encode_name, encode_keys in _all_unique_encodes_: if not len(encode_keys): continue for ch in encode_keys: # check either encode char is presnent in word if ch in unique_chars: # found encode print(("Found encode : ", encode_name)) encode = _all_encodes_[encode_name] return unicode2encode(unicode_text, encode) # end of if ch in unique_chars: # end of ifor ch in encode_keys: else: print(msg) return ''
This function will convert unicode (first argument) text into other encodes by auto find the encode (from available encodes) by using sample encode text in second argument of this function. unicode_text : Pass unicode string which has to convert into other encode. encode_text : Pass sample encode string to identify suitable encode for it. This function tries to identify encode in available encodings. If it finds, then it will convert unicode_text into encode string. Author : Arulalan.T 08.08.2014
null
null
null
cwl = 0.0 for k,_v in code.items(): print(u"%s -> %s"%(k,_v)) cwl += p[v.index(k)]*len(_v) print(u"cwl = %g"%cwl) return cwl,code.values()
def print_huffman_code_cwl(code,p,v)
code - code dictionary with symbol -> code map, p, v is probability map
4.254151
4.252687
1.000344
if not type(wordA) is list: lettersA = tamil.utf8.get_letters(wordA) else: lettersA = wordA if not type(wordB) is list: lettersB = tamil.utf8.get_letters(wordB) else: lettersB = wordB n_A = len(lettersA) n_B = len(lettersB) dist_AB = [[0 for i in range(0,n_B+1)] for i in range(0,(n_A+1))] # Target prefix reached by insertion for j in range(1,n_B+1): dist_AB[0][j] = j for i in range(1,n_A+1): dist_AB[i][0] = i for j in range(1,n_B+1): for i in range(1,n_A+1): if (lettersA[i-1] == lettersB[j-1]): new_dist = dist_AB[i-1][j-1] else: new_dist = min( [dist_AB[i-1][j]+1, dist_AB[i][j-1]+1, dist_AB[i-1][j-1]+1] ) #del, ins, or sub dist_AB[i][j] = new_dist return dist_AB[-1][-1]
def edit_distance(wordA,wordB)
Implements Daegmar-Levenshtein edit distance algorithm: Ref: https://en.wikipedia.org/wiki/Edit_distance Ref: https://en.wikipedia.org/wiki/Levenshtein_distance
1.930292
1.957734
0.985983
if not type(wordA) is list: lettersA = tamil.utf8.get_letters(wordA) else: lettersA = wordA if not type(wordB) is list: lettersB = tamil.utf8.get_letters(wordB) else: lettersB = wordB n_A = len(lettersA) n_B = len(lettersB) # OK only if unique - set(lettersA).intersection(set(lettersB)) n_AB = len( list( filter( lambda cmnL: cmnL in lettersB, lettersA) ) ) return (2.0*n_AB)/(n_A+n_B)
def Dice_coeff(wordA,wordB)
# Calculate edit-distance - Implements the Dice coefficent # Ref: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient # distance should be between 0 - 1.0. can be used as a similarity match
2.994525
2.977358
1.005766
L = len(self.letters)-1 for idx,letter in enumerate(self.letters): if not( letter in tamil.utf8.grantha_uyirmei_letters): continue muthal = idx == 0 and u"" or u"".join(self.letters[0:idx]) meethi = idx == L and u"" or u"".join(self.letters[idx+1:]) mei,uyir = tamil.utf8.splitMeiUyir(letter) muthal = muthal + mei meethi = uyir + meethi self.results.append([muthal,meethi]) return len(self.results) > 0
def generate_splits(self)
யாரிகழ்ந்து = [['ய்', 'ஆரிகழ்ந்து'], ['யார்', 'இகழ்ந்து'], ['யாரிக்', 'அழ்ந்து'], ['யாரிகழ்ந்த்', 'உ']]
4.761854
4.516872
1.054237
alternates = cm.get(word_in[pos],[]) if not candidates: candidates = [] assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]' if (pos >len(word_in)) or ed == 0: return candidates pfx = '' sfx = '' curr_candidates = [] for p in range(0,pos): pfx = pfx + word_in[p] for p in range(pos+1,len(word_in)): sfx = sfx + word_in[p] for alt in alternates: word_alt = pfx + alt + sfx if not (word_alt in candidates): candidates.append( word_alt ) curr_candidates.append( word_alt ) for n_pos in range(pos,len(word_in)): # already what we have ' candidates ' of this round are edit-distance 1 for word in curr_candidates: oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates) if level == 0: #candidates.append(word_in) for n_pos in range(pos,len(word_in)): oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates) return candidates
def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None)
ed = 1 by default, pos - internal variable for algorithm
3.677373
3.666588
1.002941
# Python 2-3 compatible return u"u'"+ u"".join( [ u"\\u%04x"%ord(l) for l in _letter ] ) + u"'"
def to_unicode_repr( _letter )
helpful in situations where browser/app may recognize Unicode encoding in the \u0b8e type syntax but not actual unicode glyph/code-point
4.933465
4.959419
0.994767
idx,idy = mei_idx,uyir_idx assert ( idy >= 0 and idy < uyir_len() ) assert ( idx >= 0 and idx < 6+mei_len() ) return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx]
def uyirmei_constructed( mei_idx, uyir_idx)
construct uyirmei letter give mei index and uyir index
6.262912
5.534863
1.131539
return not all_tamil(word_in) and len(word_in) > 0 and any([l in word_in for l in string.ascii_letters])
def has_english( word_in )
return True if word_in has any English letters in the string
5.431149
4.715079
1.151868
if isinstance(word_in,list): word = word_in else: word = get_letters( word_in ) return all( [(letter in tamil_letters) for letter in word] )
def all_tamil( word_in )
predicate checks if all letters of the input word are Tamil letters
3.844249
3.347986
1.148227
op = get_letters( word ) op.reverse() return u"".join(op)
def reverse_word( word )
reverse a Tamil word according to letters not unicode-points
9.633941
7.024571
1.371463
ta_letters = list() not_empty = False WLEN,idx = len(word),0 while (idx < WLEN): c = word[idx] #print(idx,hex(ord(c)),len(ta_letters)) if c in uyir_letter_set or c == ayudha_letter: ta_letters.append(c) not_empty = True elif c in grantha_agaram_set: ta_letters.append(c) not_empty = True elif c in accent_symbol_set: if not not_empty: # odd situation ta_letters.append(c) not_empty = True else: #print("Merge/accent") ta_letters[-1] += c else: if ord(c) < 256 or not (is_tamil_unicode(c)): ta_letters.append( c ) else: if not_empty: #print("Merge/??") ta_letters[-1]+= c else: ta_letters.append(c) not_empty = True idx = idx + 1 return ta_letters
def get_letters( word )
splits the word into a character-list of tamil/english characters present in the stream
3.974315
3.73079
1.065274
WLEN,idx = len(word),0 while (idx < WLEN): c = word[idx] #print(idx,hex(ord(c)),len(ta_letters)) if c in uyir_letter_set or c == ayudha_letter: idx = idx + 1 yield c elif c in grantha_agaram_set: if idx + 1 < WLEN and word[idx+1] in all_symbol_set: c2 = word[idx+1] idx = idx + 2 yield (c + c2) else: idx = idx + 1 yield c else: idx = idx + 1 yield c return
def get_letters_iterable( word )
splits the word into a character-list of tamil/english characters present in the stream
4.163143
3.752952
1.109298
# correct algorithm for get-tamil-words buf = [] for idx,let in enumerate(letters): if not let.isspace(): if istamil(let) or (not tamil_only): buf.append( let ) else: if len(buf) > 0: yield u"".join( buf ) buf = [] if len(buf) > 0: yield u"".join(buf)
def get_words_iterable( letters, tamil_only=False )
given a list of UTF-8 letters section them into words, grouping them at spaces
4.094004
3.827525
1.069622
if not isinstance(letters,list): raise Exception("metehod needs to be used with list generated from 'tamil.utf8.get_letters(...)'") return [word for word in get_words_iterable( letters, tamil_only = True )]
def get_tamil_words( letters )
reverse a Tamil word according to letters, not unicode-points
8.987369
8.403151
1.069524
# sanity check for words to be all Tamil if ( not all_tamil(word_a) ) or (not all_tamil(word_b)) : #print("## ") #print(word_a) #print(word_b) #print("Both operands need to be Tamil words") pass La = len(word_a) Lb = len(word_b) all_TA_letters = u"".join(tamil_letters) for itr in range(0,min(La,Lb)): pos1 = all_TA_letters.find( word_a[itr] ) pos2 = all_TA_letters.find( word_b[itr] ) if pos1 != pos2 : #print not( pos1 > pos2), pos1, pos2 return cmp(pos1, pos2) # result depends on if La is shorter than Lb, or 0 if La == Lb i.e. cmp return cmp(La,Lb)
def compare_words_lexicographic( word_a, word_b )
compare words in Tamil lexicographic order
4.56667
4.307432
1.060184
positions = [] word_a_letters = get_letters( word_a ) word_b_letters = get_letters( word_b ) for idx,wa in enumerate(word_a_letters): for idy,wb in enumerate(word_b_letters): if ( wa == wb ): positions.append( (idx, idy) ) return positions
def word_intersection( word_a, word_b )
return a list of tuples where word_a, word_b intersect
2.421363
2.288036
1.058271
if not isinstance(uyirmei_char, PYTHON3 and str or unicode): raise ValueError("Passed input letter '%s' must be unicode, \ not just string" % uyirmei_char) if uyirmei_char in mei_letters or uyirmei_char in uyir_letters or uyirmei_char in ayudha_letter: return uyirmei_char if uyirmei_char not in grantha_uyirmei_letters: if not is_normalized( uyirmei_char ): norm_char = unicode_normalize(uyirmei_char) rval = splitMeiUyir( norm_char ) return rval raise ValueError("Passed input letter '%s' is not tamil letter" % uyirmei_char) idx = grantha_uyirmei_letters.index(uyirmei_char) uyiridx = idx % 12 meiidx = int((idx - uyiridx)/ 12) return (grantha_mei_letters[meiidx], uyir_letters[uyiridx])
def splitMeiUyir(uyirmei_char)
This function split uyirmei compound character into mei + uyir characters and returns in tuple. Input : It must be unicode tamil char. Written By : Arulalan.T Date : 22.09.2014
3.085161
3.01453
1.02343
if not mei_char: return uyir_char if not uyir_char: return mei_char if not isinstance(mei_char, PYTHON3 and str or unicode): raise ValueError(u"Passed input mei character '%s' must be unicode, not just string" % mei_char) if not isinstance(uyir_char, PYTHON3 and str or unicode) and uyir_char != None: raise ValueError(u"Passed input uyir character '%s' must be unicode, not just string" % uyir_char) if mei_char not in grantha_mei_letters: raise ValueError(u"Passed input character '%s' is not a tamil mei character" % mei_char) if uyir_char not in uyir_letters: raise ValueError(u"Passed input character '%s' is not a tamil uyir character" % uyir_char) if uyir_char: uyiridx = uyir_letters.index(uyir_char) else: return mei_char meiidx = grantha_mei_letters.index(mei_char) # calculate uyirmei index uyirmeiidx = meiidx*12 + uyiridx return grantha_uyirmei_letters[uyirmeiidx]
def joinMeiUyir(mei_char, uyir_char)
This function join mei character and uyir character, and retuns as compound uyirmei unicode character. Inputs: mei_char : It must be unicode tamil mei char. uyir_char : It must be unicode tamil uyir char. Written By : Arulalan.T Date : 22.09.2014
2.256778
2.172142
1.038964
# few sequences for seq in [utf8.uyir_letters, utf8.grantha_mei_letters, \ utf8.grantha_agaram_letters]: if is_containing_seq(start,end,seq): return expand_sequence(start,end,seq) # all Tamil letters seq = utf8.grantha_uyirmei_letters if is_containing_seq(start,end,seq): return expand_sequence(start,end,seq) raise Exception("Cannot understand sequence [%s-%s]"%(start,end))
def expand_tamil(start,end)
expand uyir or mei-letter range etc. i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc.
5.339605
5.206688
1.025528
# print('input',len(patt)) patt_letters = utf8.get_letters( patt ) patt_out = list() idx = 0 # print('output',len(patt_letters)) patt = [None,None] prev = None LEN_PATT = len(patt_letters) while( idx < LEN_PATT ): if utf8.istamil( patt_letters[idx] ) and ( prev == '-' or ((idx+1) < LEN_PATT and patt_letters[idx+1] == u'-') ): if (idx+1) < LEN_PATT and patt_letters[idx+1] == u'-': patt[0] = patt_letters[idx] idx = idx + 2 prev = "-" elif prev == '-': patt[1] = patt_letters[idx] patt_out.extend( expand_tamil( patt[0], patt[1]) ) idx = idx + 1 prev = patt_letters[idx] continue patt_out.extend( patt_letters[idx] ) prev = patt_letters[idx] idx = idx + 1 opattern = u"".join( patt_out ) compile_regexp = re.compile( opattern, flags ) return (compile_regexp,opattern)
def make_pattern( patt, flags=0 )
returns a compile regular expression object
2.986464
2.941338
1.015342
prev_letter = None # use a generator in corpus prev_letter = list(self.corpus.next_tamil_letter())[0] for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus key = prev_letter+next_letter val = self.bigram.get(key,None) prev_letter = next_letter if not val: self.bigram[key] = 0 self.bigram[key] += 1 return
def frequency_model( self )
build a letter frequency model for Tamil letters from a corpus
5.218426
4.157481
1.25519
output = list() prev = None prev2x = None # need a look ahead of 2 tokens atleast for char in tscii_input: ## print "%2x"%ord(char) # debugging if ord(char) < 128 : # base-ASCII copy to output output.append( char ) prev = None prev2x = None elif ord(char) in ISCII_DIRECT_LOOKUP: if ( prev in ISCII_PRE_MODIFIER ): curr_char = [ISCII[ord(char)],ISCII[prev]] else: # we are direct lookup char curr_char = [ISCII[ord(char)]] char = None output.extend( curr_char ) elif ( (ord(char) in ISCII_POST_MODIFIER) ): if ( (prev in ISCII_DIRECT_LOOKUP) and (prev2x in ISCII_PRE_MODIFIER) ): if len(output) >= 2: del output[-1] #we are reducing this token to something new del output[-2] elif len(output)==1: del output[-1] else: # nothing to delete here.. pass output.extend( [ISCII[prev], ISCII[prev2x]] ) else: print("Warning: malformed ISCII encoded file; skipping characters") prev = None char = None else: # pass - must be one of the pre/post modifiers pass prev2x = prev if char: prev = ord(char) return u"".join(output)
def convert_to_unicode( tscii_input )
convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.
5.220342
5.208921
1.002193
def compute( self ): # compute the intersection graph into @xsections dictionary wordlist = self.wordlist xsections = {} for i in range(len(wordlist)): word_i = wordlist[i] for j in range(len(wordlist)): word_j = wordlist[j] if i == j: # force self-intersection to be 0 if not xsections.get(word_i,None): xsections[word_i] = [''] else: xsections[word_i].extend(['']) continue # optimize for, i > j, info is calculated already if i > j: xsec_counts = xsections[word_j][i] else: xsec_counts = tamil.utf8.word_intersection( word_i, word_j ) if not xsections.get(word_i,None): xsections[word_i] = [xsec_counts] else: xsections[word_i].extend( [ xsec_counts ] ) self.xsections = xsections
build a dictionary of words, and their intersections
null
null
null
chars = get_letters(word) flag = True #no error assumed reason = None #no reason freq_count = 0 for char in chars: if char in ref_set: freq_count += 1 if freq_count >= freq_threshold: flag = False break continue freq_count = 0 # continue loop if not flag: reason = ref_reason return flag,reason
def in_sequence( word, ref_set, ref_reason, freq_threshold = 2 )
ignore ctx information right now. If repetition/match length >= @freq_threshold then we flag-it
4.37766
4.240361
1.032379
return Sequential.in_sequence(word,AdjacentVowels.uyir_letters,AdjacentVowels.reason)
def apply(self, word, ctx=None)
ignore ctx information right now
68.343071
59.76107
1.143605
flag,reason = Sequential.in_sequence(word,AdjacentConsonants.mei_letters,AdjacentConsonants.reason,self.freq_threshold) if flag: flag,reason = Sequential.in_sequence(word,AdjacentConsonants.agaram_letters,AdjacentConsonants.reason,self.freq_threshold) return flag,reason
def apply(self, word, ctx=None)
ignore ctx information right now
8.015859
7.661751
1.046218
chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_letter = None for char in chars: if prev_letter == char: flag = False break prev_letter = char # continue loop if not flag: reason = RepeatedLetters.reason return flag,reason
def apply(self,word,ctx=None)
ignore ctx information right now
7.274476
6.791118
1.071175
chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_char = None for char in chars: rule1,rule2,rule3 = False,False,False # rule 1 : uyir followed by kombugal rule1 = (char[-1] in utf8.accent_symbols) and (char[0] in utf8.uyir_letters) if not rule1: # rule 2 : two pullis adjacent to each other rule2 = len(char) >= 2 and (char[-1] == utf8.pulli_symbols[0]) and (char[-2] == char[-1] ) if not rule2: # rule 3 : none of the accent symbols repeat # exclusions to rule 3 : non-standard Unicode encoding of periya kombu / siriya kombu with thunai kaal rule3 = len(char) >= 2 and (char[-1] in utf8.accent_symbols) and (char[-2] in utf8.accent_symbols) \ and not( char[-1] == u"ா" and char[-2] in [u"ெ",u"ே"]) if rule1 or rule2 or rule3: flag = False reason = BadIME.reason break prev_char = char # continue loop #print([flag,reason]) return flag,reason
def apply(self, word, ctx=None)
ignore ctx information right now
6.765388
6.619525
1.022035
''' Set the self.current string. ''' self.current = value self.cursor = 0 self.limit = len(self.current) self.limit_backward = 0 self.bra = self.cursor self.ket = self.limit
def set_current(self, value)
Set the self.current string.
5.004023
3.348295
1.494499
''' to replace chars between c_bra and c_ket in self.current by the chars in s. @type c_bra int @type c_ket int @type s: string ''' adjustment = len(s) - (c_ket - c_bra) self.current = self.current[0:c_bra] + s + self.current[c_ket:] self.limit += adjustment if self.cursor >= c_ket: self.cursor += adjustment elif self.cursor > c_bra: self.cursor = c_bra return adjustment
def replace_s(self, c_bra, c_ket, s)
to replace chars between c_bra and c_ket in self.current by the chars in s. @type c_bra int @type c_ket int @type s: string
3.391994
2.154635
1.574278
''' Copy the slice into the supplied StringBuffer @type s: string ''' result = '' if self.slice_check(): result = self.current[self.bra:self.ket] return result
def slice_to(self, s)
Copy the slice into the supplied StringBuffer @type s: string
12.349206
6.115079
2.019468
for c in word: if unicodedata.name(c).split()[0] != u'TAMIL' : return False return True
def isTamilPredicate(word)
is Tamil word : boolean True/False
5.335407
5.042918
1.058
tweet = ''.join( map( lambda c: (unicodedata.name(c).split()[0] in [u'TAMIL',u'LATIN']) and c or u' ', tweet) ) return tweet
def cleanupPunct( tweet )
NonEnglishOrTamilOr
6.937409
6.524056
1.063358
tweet = TamilTweetParser.cleanupPunct( tweet ); nonETwords = filter( lambda x: len(x) > 0 , re.split(r'\s+',tweet) );#|"+|\'+|#+ tamilWords = filter( TamilTweetParser.isTamilPredicate, nonETwords ); return tamilWords
def getTamilWords( tweet )
word needs to all be in the same tamil language
9.717454
9.746806
0.996989
output = list() prev = None prev2x = None # need a look ahead of 2 tokens atleast for char in tscii_input: ## print "%2x"%ord(char) # debugging if ord(char) < 128 : # base-ASCII copy to output output.append( char ) prev = None prev2x = None elif ord(char) in TSCII_DIRECT_LOOKUP: if ( prev in TSCII_PRE_MODIFIER ): curr_char = [TSCII[ord(char)],TSCII[prev]] else: # we are direct lookup char curr_char = [TSCII[ord(char)]] char = None output.extend( curr_char ) elif ( (ord(char) in TSCII_POST_MODIFIER) ): if ( (prev in TSCII_DIRECT_LOOKUP) and (prev2x in TSCII_PRE_MODIFIER) ): if len(output) >= 2: del output[-1] #we are reducing this token to something new del output[-2] elif len(output)==1: del output[-1] else: # nothing to delete here.. pass output.extend( [TSCII[prev], TSCII[prev2x]] ) else: print("Warning: malformed TSCII encoded file; skipping characters") prev = None char = None else: # pass - must be one of the pre/post modifiers pass prev2x = prev if char: prev = ord(char) return u"".join(output)
def convert_to_unicode( tscii_input )
convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.
5.137244
5.131618
1.001096
def _get_unique_ch(text, all_common_encodes): unique_chars = '' if isinstance(text, str): text = text.split("\n") elif isinstance(text, (list, tuple)): pass special_chars = ['.', ',', ';', ':','', ' ', '\r', '\t', '=', '\n'] for line in text: for word in line.split(' '): if ( not PYTHON3 ): word = word.decode( 'utf-8') for ch in all_common_encodes: if ch in word: word = word.replace(ch, '') # end of for ch in _all_common_encodes_: # if len of word is zero, then go for another word if not word: continue for ch in word: if ch.isdigit() or ch in special_chars: # remove special common chars word = word.replace(ch, '') continue # end of if ch.isdigit() or ...: # Whola, got unique chars from user passed text return word # end of for ch in word: # end of for word in line.split(' '): # end of for line in text: return ''
text : encode sample strings returns unique word / characters from input text encode strings.
null
null
null
def _get_unique_common_encodes(): _all_unique_encodes_ = [] _all_unicode_encodes_ = {} _all_common_encodes_ = set([]) _all_common_encodes_single_char_ = set([]) for name, encode in _all_encodes_.items(): encode_utf8 = set([PYTHON3 and ch or ch.decode( 'utf-8') for ch in encode.keys()]) _all_unicode_encodes_[name] = encode_utf8 _all_unique_encodes_full_ =_all_unicode_encodes_.copy() for supname, super_encode in _all_unicode_encodes_.items(): for subname, sub_encode in _all_unicode_encodes_.items(): if supname == subname: continue # get unique of super_encode among other encodings super_encode = super_encode - sub_encode # get common for all over encodings common = _all_unique_encodes_full_[supname] - super_encode # merge common to all encodings common _all_common_encodes_ = _all_common_encodes_.union(common) # store super_encode's unique keys with its name _all_unique_encodes_.append((supname, super_encode)) for ch in _all_common_encodes_: # collect single common chars if len(ch) == 1: _all_common_encodes_single_char_.add(ch) # end of for ch in _all_common_encodes_: # remove single common char from compound common chars _all_common_encodes_ -= _all_common_encodes_single_char_ if __WRITE_CHARS_TXT: # write common compound characters of all encodes f = open('all.encodes.common.chars.txt', 'w') for ch in _all_common_encodes_: ch = ch.encode('utf-8') for encode_keys in _all_encodes_.values(): if ch in encode_keys: uni = encode_keys[ch] break # end of if ch in encode_keys: # end of for encode_keys in _all_encodes_.values(): f.write(ch + ' => ' + uni + '\n') # end of for ch in _all_common_encodes_: f.close() # write unique compound characters of all encodes for encode_name, encode_keys in _all_unique_encodes_: f = open(encode_name + '.unique.chars.txt', 'w') for ch in encode_keys: ch = ch.encode('utf-8') uni = _all_encodes_[encode_name][ch] f.write(ch + ' => ' + uni + '\n') # end of for ch in encode_keys: f.close() # end of for encode_name, encode_keys in _all_unique_encodes_: # end of if __WRITE_CHARS_TXT: return (_all_unique_encodes_, _all_common_encodes_)
This function will return both unique_encodes and common_encodes as tuple. unique_encodes : In dictionary with encodes name as key and its corresponding encode's unique characters among other available encodes. common_encodes : In set type which has all common encode compound characters from all available encodes. i.e. removed common encode single characters Author : Arulalan.T 04.08.2014
null
null
null
def auto2unicode(text): _all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes() # get unique word which falls under any one of available encodes from # user passed text lines unique_chars = _get_unique_ch(text, _all_common_encodes_) # count common encode chars clen = len(_all_common_encodes_) msg = "Sorry, couldn't find encode :-(\n" msg += 'Need more words to find unique encode out side of %d ' % clen msg += 'common compound characters' if not unique_chars: print(msg) return '' # end of if not unique_chars: for encode_name, encode_keys in _all_unique_encodes_: if not len(encode_keys): continue for ch in encode_keys: # check either encode char is presnent in word if ch in unique_chars: # found encode print(("Found encode : ", encode_name)) encode = _all_encodes_[encode_name] return encode2unicode(text, encode) # end of if ch in unique_chars: # end of ifor ch in encode_keys: else: print(msg) return ''
This function tries to identify encode in available encodings. If it finds, then it will convert text into unicode string. Author : Arulalan.T 04.08.2014
null
null
null
# use a generator in corpus for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus self.letter[next_letter] = self.letter[next_letter] + 1
def frequency_model( self )
build a letter frequency model for Tamil letters from a corpus
11.557938
6.750366
1.712194
# use a generator in corpus prev = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if prev: self.letter2[prev][next_letter] += 1 if ( verbose ) : print(prev) print(next_letter) print( self.letter2[prev][next_letter] ) prev = next_letter #update always return
def language_model(self,verbose=True)
builds a Tamil bigram letter model
8.04031
7.068974
1.137408
# use a generator in corpus p2 = None p1 = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if p2: trig = p2+p1+next_letter self.letter3[trig] = 1 + self.letter3.get(trig,0) p2 = p1 p1 = next_letter #update always return
def language_model(self,verbose=True)
builds a Tamil bigram letter model
9.86764
8.661056
1.139311
#punctuations = u'-,+,/,*,>,<,_,],[,{,},(,)'.split(',')+[','] #isspace_or_tamil = lambda x: not x in punctuations and tamil.utf8.istamil(x) # correct algorithm for get-tamil-words buf = [] for idx,let in enumerate(letters): if tamil.utf8.istamil( let ): buf.append( let ) else: if len(buf) > 0: yield u"".join( buf ) buf = [] if len(buf) > 0: yield u"".join(buf)
def get_tamil_words_iterable( letters )
given a list of UTF-8 letters section them into words, grouping them at spaces
5.666115
5.488483
1.032364
''' Organization function -setups logging -gets inputdata -calls plotting function ''' args = get_args() try: utils.make_output_dir(args.outdir) utils.init_logs(args, tool="NanoComp") args.format = nanoplotter.check_valid_format(args.format) settings = vars(args) settings["path"] = path.join(args.outdir, args.prefix) sources = [args.fastq, args.bam, args.summary, args.fasta] sourcename = ["fastq", "bam", "summary", "fasta"] if args.split_runs: split_dict = validate_split_runs_file(args.split_runs) datadf = nanoget.get_input( source=[n for n, s in zip(sourcename, sources) if s][0], files=[f for f in sources if f][0], threads=args.threads, readtype=args.readtype, names=args.names, barcoded=args.barcoded, combine="track") datadf, settings = filter_and_transform_data(datadf, vars(args)) if args.raw: datadf.to_csv("NanoComp-data.tsv.gz", sep="\t", index=False, compression="gzip") if args.store: pickle.dump( obj=datadf, file=open(settings["path"] + "NanoComp-data.pickle", 'wb')) if args.split_runs: change_identifiers(datadf, split_dict) if args.barcoded: datadf["dataset"] = datadf["barcode"] identifiers = list(datadf["dataset"].unique()) write_stats( datadfs=[datadf[datadf["dataset"] == i] for i in identifiers], outputfile=settings["path"] + "NanoStats.txt", names=identifiers) if args.plot != 'false': plots = make_plots(datadf, settings) make_report(plots, path.join(args.outdir, args.prefix)) logging.info("Succesfully processed all input.") except Exception as e: logging.error(e, exc_info=True) raise
def main()
Organization function -setups logging -gets inputdata -calls plotting function
5.062328
4.481463
1.129615
try: content = [l.strip() for l in split_runs_file.readlines()] if content[0].upper().split('\t') == ['NAME', 'RUN_ID']: return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c} else: sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") except IndexError: sys.exit("ERROR: Format of --split_runs tab separated file not as expected") logging.error("ERROR: Format of --split_runs tab separated file not as expected")
def validate_split_runs_file(split_runs_file)
Check if structure of file is as expected and return dictionary linking names to run_IDs.
3.205727
2.878418
1.113711
for rid, name in split_dict.items(): datadf.loc[datadf["runIDs"] == rid, "dataset"] = name
def change_identifiers(datadf, split_dict)
Change the dataset identifiers based on the names in the dictionary.
5.728153
5.177966
1.106255
''' Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy) ''' logging.info("Writing html report.") html_head = html_content = ["\n<body>\n<h1>NanoComp report</h1>"] html_content.append("<h2>Summary statistics</h2>") html_content.append(utils.stats2html(path + "NanoStats.txt")) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_content.append("<h2>Plots</h2>") for plot in plots: html_content.append("\n<h3>" + plot.title + "</h3>\n" + plot.encode()) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_body = '\n'.join(html_content) + "</body></html>" html_str = html_head + html_body with open(path + "NanoComp-report.html", "w") as html_file: html_file.write(html_str) return path + "NanoComp-report.html"
def make_report(plots, path)
Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy)
4.047388
2.353196
1.719954
trailing = True while 1: where = self.file.tell() line = self.file.readline() if line: if trailing and line in self.line_terminators: # This is just the line terminator added to the end of the file # before a new line, ignore. trailing = False continue if line[-1] in self.line_terminators: line = line[:-1] if line[-1:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf line = line[:-1] trailing = False yield line else: trailing = True self.seek(where) time.sleep(delay)
def follow(self, delay=1.0)
\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
3.72578
3.706471
1.00521
header = {'content-type': 'application/json'} if data: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
def _http_response(self, url, method, data=None, **kwargs)
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
2.816389
2.797451
1.00677
response = self._http_response(url, method, data=data, **kwargs) if not response.content: return {} return response.json()
def _http_call(self, url, method, data=None, **kwargs)
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
3.80864
3.807411
1.000323
if q: q = '?q=' + q return self._http_call('/v1/search' + q, get)
def search(self, q='')
GET /v1/search
7.473645
5.632633
1.326847
return self._http_call(self.IMAGE_LAYER, get, image_id=image_id)
def get_images_layer(self, image_id)
GET /v1/images/{image_id}/layer
8.489321
7.172321
1.183623
return self._http_call(self.IMAGE_LAYER, put, image_id=image_id, data=data)
def put_images_layer(self, image_id, data)
PUT /v1/images/(image_id)/layer
6.34995
6.550349
0.969406
return self._http_call(self.IMAGE_JSON, put, data=data, image_id=image_id)
def put_image_layer(self, image_id, data)
PUT /v1/images/(image_id)/json
8.290183
8.126082
1.020194
return self._http_call(self.IMAGE_JSON, get, image_id=image_id)
def get_image_layer(self, image_id)
GET /v1/images/(image_id)/json
10.990499
8.524636
1.289263
return self._http_call(self.IMAGE_ANCESTRY, get, image_id=image_id)
def get_image_ancestry(self, image_id)
GET /v1/images/(image_id)/ancestry
7.216264
6.480217
1.113584
return self._http_call(self.TAGS, get, namespace=namespace, repository=repository)
def get_repository_tags(self, namespace, repository)
GET /v1/repositories/(namespace)/(repository)/tags
12.412434
11.565237
1.073254
return self._http_call(self.TAGS + '/' + tag, get, namespace=namespace, repository=respository)
def get_image_id(self, namespace, respository, tag)
GET /v1/repositories/(namespace)/(repository)/tags/(tag*)
10.060489
9.185327
1.095278
return self._http_call(self.TAGS + '/' + tag + '/json', get, namespace=namespace, repository=repository)
def get_tag_json(self, namespace, repository, tag)
GET /v1/repositories(namespace)/(repository)tags(tag*)/json
8.148648
8.010675
1.017224
return self._http_call(self.TAGS + '/' + tag, delete, namespace=namespace, repository=repository)
def delete_repository_tag(self, namespace, repository, tag)
DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)
10.641983
10.960274
0.97096
return self._http_call(self.TAGS + '/' + tag, put, data=image_id, namespace=namespace, repository=repository)
def set_tag(self, namespace, repository, tag, image_id)
PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)
6.555748
7.009186
0.935308
return self._http_call(self.REPO, delete, namespace=namespace, repository=repository)
def delete_repository(self, namespace, repository)
DELETE /v1/repositories/(namespace)/(repository)/
12.639989
13.953176
0.905886
if schema is None: schema = self.schema_2 header = { 'content-type': content_type or 'application/json', 'Accept': schema, } # Token specific part. We add the token in the header if necessary auth = self.auth token_required = auth.token_required token = auth.token desired_scope = auth.desired_scope scope = auth.scope if token_required: if not token or desired_scope != scope: logger.debug("Getting new token for scope: %s", desired_scope) auth.get_new_token() header['Authorization'] = 'Bearer %s' % self.auth.token if data and not content_type: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
def _http_response(self, url, method, data=None, content_type=None, schema=None, **kwargs)
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
3.127872
3.113527
1.004608
# Check manufacturer and device ID match expected values. mid = self._device.readU16BE(MCP9808_REG_MANUF_ID) did = self._device.readU16BE(MCP9808_REG_DEVICE_ID) self._logger.debug('Read manufacturer ID: {0:04X}'.format(mid)) self._logger.debug('Read device ID: {0:04X}'.format(did)) return mid == 0x0054 and did == 0x0400
def begin(self)
Start taking temperature measurements. Returns True if the device is intialized, False otherwise.
3.134336
2.983654
1.050503
# Read temperature register value. t = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP) self._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF)) # Scale and convert to signed value. temp = (t & 0x0FFF) / 16.0 if t & 0x1000: temp -= 256.0 return temp
def readTempC(self)
Read sensor and return its value in degrees celsius.
3.199493
3.214386
0.995367
if prefix is None: prefix = "" app_label = model._meta.app_label model_lower = model.__name__.lower() return '%s%s_%s_%s' % (prefix, app_label, model_lower, action)
def crud_url_name(model, action, prefix=None)
Returns url name for given model and action.
2.332433
2.089588
1.116217
fields = OrderedDict() info = model._meta if include: selected = [info.get_field(name) for name in include] else: selected = [field for field in info.fields if field.editable] for field in selected: fields[field.name] = field.verbose_name return fields
def get_fields(model, include=None)
Returns ordered dict in format 'field': 'verbose_name'
2.555241
2.183226
1.170397
if additional_kwargs is None: additional_kwargs = {} if isinstance(instance_or_model, Model): additional_kwargs['pk'] = instance_or_model.pk model_name = instance_or_model._meta.model else: model_name = instance_or_model return reverse( crud_url_name(model_name, action, prefix), kwargs=additional_kwargs )
def crud_url(instance_or_model, action, prefix=None, additional_kwargs=None)
Shortcut function returns URL for instance or model and action. Example:: crud_url(author, 'update') Is same as: reverse('testapp_author_update', kwargs={'pk': author.pk}) Example:: crud_url(Author, 'update') Is same as: reverse('testapp_author_list')
1.774122
2.128448
0.833529