code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return _regex.compile(_apply_search_backrefs(pattern, flags), flags, **kwargs)
def compile_search(pattern, flags=0, **kwargs)
Compile with extended search references.
12.84441
9.54975
1.345
def expandf(m, format): # noqa A002 _assert_expandable(format, True) return _apply_replace_backrefs(m, format, flags=FORMAT)
Expand the string using the format replace pattern or function.
null
null
null
raise ValueError("Compiled replace is not a format object!") pattern = compile_search(pattern, flags) rflags = FORMAT if is_string else 0 return _regex.subn( pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string, *args, **kwargs )
def subfn(pattern, format, string, *args, **kwargs): # noqa A002 flags = args[4] if len(args) > 4 else kwargs.get('flags', 0) is_replace = _is_replace(format) is_string = isinstance(format, (str, bytes)) if is_replace and not format.use_format
Wrapper for `subfn`.
6.936629
6.317356
1.098027
is_replace = _is_replace(template) is_string = isinstance(template, (str, bytes)) if is_replace and use_format != template.use_format: raise ValueError("Compiled replace cannot be a format object!") if is_replace or (is_string and self.auto_compile): return self.compile(template, (FORMAT if use_format and not is_replace else 0)) elif is_string and use_format: # Reject an attempt to run format replace when auto-compiling # of template strings has been disabled and we are using a # template string. raise AttributeError('Format replaces cannot be called without compiling replace template!') else: return template
def _auto_compile(self, template, use_format=False)
Compile replacements.
7.186239
6.792421
1.057979
return self._pattern.search(string, *args, **kwargs)
def search(self, string, *args, **kwargs)
Apply `search`.
6.903197
6.336977
1.089352
return self._pattern.match(string, *args, **kwargs)
def match(self, string, *args, **kwargs)
Apply `match`.
5.787516
4.857582
1.19144
return self._pattern.fullmatch(string, *args, **kwargs)
def fullmatch(self, string, *args, **kwargs)
Apply `fullmatch`.
5.608656
4.247522
1.320454
return self._pattern.split(string, *args, **kwargs)
def split(self, string, *args, **kwargs)
Apply `split`.
6.619279
6.155097
1.075414
return self._pattern.splititer(string, *args, **kwargs)
def splititer(self, string, *args, **kwargs)
Apply `splititer`.
5.896883
5.211369
1.131542
return self._pattern.findall(string, *args, **kwargs)
def findall(self, string, *args, **kwargs)
Apply `findall`.
5.973597
4.936106
1.210184
return self._pattern.finditer(string, *args, **kwargs)
def finditer(self, string, *args, **kwargs)
Apply `finditer`.
5.169825
4.568673
1.131581
return self._pattern.sub(self._auto_compile(repl), string, *args, **kwargs)
def sub(self, repl, string, *args, **kwargs)
Apply `sub`.
9.051354
8.801588
1.028377
global_retry = False if (self.version == _regex.V1 or scoped) and '-x' in text and self.verbose: self.verbose = False elif 'x' in text and not self.verbose: self.verbose = True if not scoped and self.version == _regex.V0: self.temp_global_flag_swap['verbose'] = True global_retry = True if 'V0' in text and self.version == _regex.V1: # pragma: no cover # Default is V0 if none is selected, # so it is unlikely that this will be selected. self.temp_global_flag_swap['version'] = True self.version = _regex.V0 global_retry = True elif "V1" in text and self.version == _regex.V0: self.temp_global_flag_swap['version'] = True self.version = _regex.V1 global_retry = True if global_retry: raise GlobalRetryException('Global Retry')
def flags(self, text, scoped=False)
Analyze flags.
3.982018
3.879079
1.026537
current = [] if not in_group and t == "R": current.append(self._re_line_break) elif t == 'e': current.extend(self._re_escape) else: current.extend(["\\", t]) return current
def reference(self, t, i, in_group=False)
Handle references.
6.548376
6.30073
1.039304
index = i.index value = ['['] try: c = next(i) if c != ':': raise ValueError('Not a valid property!') else: value.append(c) c = next(i) if c == '^': value.append(c) c = next(i) while c != ':': if c not in _PROPERTY: raise ValueError('Not a valid property!') if c not in _PROPERTY_STRIP: value.append(c) c = next(i) value.append(c) c = next(i) if c != ']' or not value: raise ValueError('Unmatched ]') value.append(c) except Exception: i.rewind(i.index - index) value = [] return ''.join(value) if value else None
def get_posix(self, i)
Get POSIX.
3.38493
3.371401
1.004013
index = i.index value = ['('] version = False toggle = False end = ':' if scoped else ')' try: c = next(i) if c != '?': i.rewind(1) return None value.append(c) c = next(i) while c != end: if toggle: if c not in _SCOPED_FLAGS: raise ValueError('Bad scope') toggle = False elif (not version0 or scoped) and c == '-': toggle = True elif version: if c not in _VERSIONS: raise ValueError('Bad version') version = False elif c == 'V': version = True elif c not in _GLOBAL_FLAGS and c not in _SCOPED_FLAGS: raise ValueError("Bad flag") value.append(c) c = next(i) value.append(c) except Exception: i.rewind(i.index - index) value = [] return ''.join(value) if value else None
def get_flags(self, i, version0, scoped=False)
Get flags.
3.425781
3.363781
1.018432
# (?flags) flags = self.get_flags(i, self.version == _regex.V0) if flags: self.flags(flags[2:-1]) return [flags] # (?#comment) comments = self.get_comments(i) if comments: return [comments] verbose = self.verbose # (?flags:pattern) flags = self.get_flags(i, (self.version == _regex.V0), True) if flags: t = flags self.flags(flags[2:-1], scoped=True) current = [] try: while t != ')': if not current: current.append(t) else: current.extend(self.normal(t, i)) t = next(i) except StopIteration: pass self.verbose = verbose if t == ")": current.append(t) return current
def subgroup(self, t, i)
Handle parenthesis.
4.795382
4.627288
1.036327
current = [] pos = i.index - 1 found = 0 sub_first = None escaped = False first = None try: while True: if not escaped and t == "\\": escaped = True elif escaped: escaped = False current.extend(self.reference(t, i, True)) elif t == "[" and not found: found += 1 first = pos current.append(t) elif t == "[" and found and self.version == _regex.V1: # Start of sub char set found posix = None if self.is_bytes else self.get_posix(i) if posix: current.append(posix) pos = i.index - 2 else: found += 1 sub_first = pos current.append(t) elif t == "[": posix = None if self.is_bytes else self.get_posix(i) if posix: current.append(posix) pos = i.index - 2 else: current.append(t) elif t == "^" and found == 1 and (pos == first + 1): # Found ^ at start of first char set; adjust 1st char position current.append(t) first = pos elif self.version == _regex.V1 and t == "^" and found > 1 and (pos == sub_first + 1): # Found ^ at start of sub char set; adjust 1st char sub position current.append(t) sub_first = pos elif t == "]" and found == 1 and (pos != first + 1): # First char set closed; log range current.append(t) found = 0 break elif self.version == _regex.V1 and t == "]" and found > 1 and (pos != sub_first + 1): # Sub char set closed; decrement depth counter found -= 1 current.append(t) else: current.append(t) pos += 1 t = next(i) except StopIteration: pass if escaped: current.append(t) return current
def char_groups(self, t, i)
Handle character groups.
2.979681
2.97039
1.003128
self.verbose = bool(self.re_verbose) self.version = self.re_version if self.re_version else _regex.DEFAULT_VERSION self.global_flag_swap = { "version": self.re_version != 0, "verbose": False } self.temp_global_flag_swap = { "version": False, "verbose": False } new_pattern = [] text = self.process_quotes(self.search.decode('latin-1') if self.is_bytes else self.search) i = _util.StringIter(text) iter(i) retry = True while retry: retry = False try: new_pattern = self.main_group(i) except GlobalRetryException: # Prevent a loop of retry over and over for a pattern like ((?V0)(?V1)) # or on V0 (?-x:(?x)) if self.temp_global_flag_swap['version']: if self.global_flag_swap['version']: raise LoopException('Global version flag recursion.') else: self.global_flag_swap["version"] = True if self.temp_global_flag_swap['verbose']: if self.global_flag_swap['verbose']: raise LoopException('Global verbose flag recursion.') else: self.global_flag_swap['verbose'] = True self.temp_global_flag_swap = { "version": False, "verbose": False } i.rewind(i.index) retry = True return "".join(new_pattern).encode('latin-1') if self.is_bytes else "".join(new_pattern)
def parse(self)
Apply search template.
4.335455
4.18644
1.035595
groups = [] literals = [] replacements = _compile_replacement_helper(pattern, template) count = 0 for part in replacements: if isinstance(part, int): literals.append(None) groups.append((count, part)) else: literals.append(part) count += 1 return groups, literals
def regex_parse_template(self, template, pattern)
Parse template for the regex module. Do NOT edit the literal list returned by _compile_replacement_helper as you will edit the original cached value. Copy the values instead.
4.456841
3.149389
1.415145
i = _util.StringIter((self._original.decode('latin-1') if self.is_bytes else self._original)) iter(i) self.result = [""] while True: try: t = next(i) if self.use_format and t in _CURLY_BRACKETS: self.handle_format(t, i) elif t == '\\': try: t = next(i) self.reference(t, i) except StopIteration: self.result.append(t) raise else: self.result.append(t) except StopIteration: break if len(self.result) > 1: self.literal_slots.append("".join(self.result)) del self.result[:] self.result.append("") self.slot += 1 if self.is_bytes: self._template = "".join(self.literal_slots).encode('latin-1') else: self._template = "".join(self.literal_slots) self.groups, self.literals = self.regex_parse_template(self._template, pattern)
def parse_template(self, pattern)
Parse template.
3.667202
3.610326
1.015754
if m is None: raise ValueError("Match is None!") sep = m.string[:0] if isinstance(sep, bytes) != self._bytes: raise TypeError('Match string type does not match expander string type!') text = [] # Expand string for x in range(0, len(self.literals)): index = x l = self.literals[x] if l is None: g_index = self._get_group_index(index) span_case, single_case, capture = self._get_group_attributes(index) if not self.use_format: # Non format replace try: l = m.group(g_index) except IndexError: # pragma: no cover raise IndexError("'%d' is out of range!" % capture) else: # String format replace try: obj = m.captures(g_index) except IndexError: # pragma: no cover raise IndexError("'%d' is out of range!" % g_index) l = _util.format_string(m, obj, capture, self._bytes) if span_case is not None: if span_case == _LOWER: l = l.lower() else: l = l.upper() if single_case is not None: if single_case == _LOWER: l = l[0:1].lower() + l[1:] else: l = l[0:1].upper() + l[1:] text.append(l) return sep.join(text)
def expand(self, m)
Using the template, expand the string.
3.883737
3.749861
1.035702
if value in GROUP_ESCAPES: # Escape characters that are (or will be in the future) problematic c = "\\x%02x\\x%02x" % (0x5c, value) elif value <= 0xFF: c = "\\x%02x" % value elif value <= 0xFFFF: c = "\\u%04x" % value else: c = "\\U%08x" % value return c
def uniformat(value)
Convert a Unicode char.
3.891886
3.635343
1.070569
if len(unirange) < 2: unirange.append(unirange[0]) if is_bytes: if unirange[0] > MAXASCII: return None if unirange[1] > MAXASCII: unirange[1] = MAXASCII return [x for x in range(unirange[0], unirange[1] + 1)]
def create_span(unirange, is_bytes=False)
Clamp the Unicode range.
2.504945
2.335704
1.072458
all_chars = ALL_ASCII if is_bytes else ALL_CHARS s = set() for k, v in table.items(): s.update(v) if name in table: table[name] = list(set(table[name]) | (all_chars - s)) else: table[name] = list(all_chars - s)
def not_explicitly_defined(table, name, is_bytes=False)
Compose a table with the specified entry name of values not explicitly defined.
3.380039
3.204119
1.054904
fmt = bytesformat if is_bytes else uniformat maxrange = MAXASCII if is_bytes else MAXUNICODE for k1 in sorted(d.keys()): v1 = d[k1] if not isinstance(v1, list): char2range(v1, is_bytes=is_bytes, invert=invert) else: inverted = k1.startswith('^') v1.sort() last = None first = None ilast = None ifirst = None v2 = [] iv2 = [] if v1 and v1[0] != 0: ifirst = 0 for i in v1: if first is None: first = i last = i elif i == last + 1: last = i elif first is not None: if first == last: v2.append(fmt(first)) else: v2.append("%s-%s" % (fmt(first), fmt(last))) if invert and ifirst is not None: ilast = first - 1 if ifirst == ilast: iv2.append(fmt(ifirst)) else: iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast))) ifirst = last + 1 first = i last = i if not v1: iv2 = ["%s-%s" % (fmt(0), fmt(maxrange))] elif first is not None: if first == last: v2.append(fmt(first)) else: v2.append("%s-%s" % (fmt(first), fmt(last))) if invert and ifirst is not None: ilast = first - 1 if ifirst == ilast: iv2.append(fmt(ifirst)) else: iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast))) ifirst = last + 1 if invert and ifirst <= maxrange: ilast = maxrange if ifirst == ilast: iv2.append(fmt(ifirst)) else: iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast))) d[k1] = ''.join(v2) if invert: d[k1[1:] if inverted else '^' + k1] = ''.join(iv2)
def char2range(d, is_bytes=False, invert=True)
Convert the characters in the dict to a range in string form.
2.216773
2.197866
1.008602
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) f.write('%s_blocks = {' % prefix) no_block = [] last = -1 max_range = MAXASCII if ascii_props else MAXUNICODE formatter = bytesformat if ascii_props else uniformat with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'Blocks.txt'), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split(';') if len(data) < 2: continue block = [int(i, 16) for i in data[0].strip().split('..')] if block[0] > last + 1: if (last + 1) <= max_range: endval = block[0] - 1 if (block[0] - 1) < max_range else max_range no_block.append((last + 1, endval)) last = block[1] name = format_name(data[1]) inverse_range = [] if block[0] > max_range: if ascii_props: f.write('\n "%s": "",' % name) f.write('\n "^%s": "%s-%s",' % (name, formatter(0), formatter(max_range))) continue if block[0] > 0: inverse_range.append("%s-%s" % (formatter(0), formatter(block[0] - 1))) if block[1] < max_range: inverse_range.append("%s-%s" % (formatter(block[1] + 1), formatter(max_range))) f.write('\n "%s": "%s-%s",' % (name, formatter(block[0]), formatter(block[1]))) f.write('\n "^%s": "%s",' % (name, ''.join(inverse_range))) if last < max_range: if (last + 1) <= max_range: no_block.append((last + 1, max_range)) last = -1 no_block_inverse = [] if not no_block: no_block_inverse.append((0, max_range)) else: for piece in no_block: if piece[0] > last + 1: no_block_inverse.append((last + 1, piece[0] - 1)) last = piece[1] for block, name in ((no_block, 'noblock'), (no_block_inverse, '^noblock')): f.write('\n "%s": "' % name) for piece in block: if piece[0] == piece[1]: f.write(formatter(piece[0])) else: f.write("%s-%s" % (formatter(piece[0]), formatter(piece[1]))) f.write('",') f.write('\n}\n')
def gen_blocks(output, ascii_props=False, append=False, prefix="")
Generate Unicode blocks.
2.307734
2.280556
1.011917
obj = {} with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedCombiningClass.txt'), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) if span is None: continue name = format_name(data[1]) if name not in obj: obj[name] = [] obj[name].extend(span) for x in range(0, 256): key = str(x) if key not in obj: obj[key] = [] for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) not_explicitly_defined(obj, '0', is_bytes=ascii_props) # Convert characters values to ranges char2range(obj, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_canonical_combining_class = {\n' % prefix) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1
def gen_ccc(output, ascii_props=False, append=False, prefix="")
Generate `canonical combining class` property.
3.091195
2.976253
1.03862
obj = {} obj2 = {} aliases = {} with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'PropertyValueAliases.txt'), 'r', 'utf-8') as uf: for line in uf: if line.startswith('sc ;'): values = line.split(';') aliases[format_name(values[1].strip())] = format_name(values[2].strip()) with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, file_name_ext), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue exts = [aliases[format_name(n)] for n in data[1].strip().split(' ')] span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) for ext in exts: if ext not in obj2: obj2[ext] = [] if span is None: continue obj2[ext].extend(span) with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, file_name), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) name = format_name(data[1]) if name not in obj: obj[name] = [] if name not in obj2: obj2[name] = [] if span is None: continue obj[name].extend(span) obj2[name].extend(span) for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) for name in list(obj2.keys()): s = set(obj2[name]) obj2[name] = sorted(s) if notexplicit: not_explicitly_defined(obj, notexplicit, is_bytes=ascii_props) not_explicitly_defined(obj2, notexplicit, is_bytes=ascii_props) # Convert characters values to ranges char2range(obj, is_bytes=ascii_props) char2range(obj2, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_%s = {\n' % (prefix, obj_name)) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1 with codecs.open(output_ext, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_%s = {\n' % (prefix, obj_ext_name)) count = len(obj2) - 1 i = 0 for k1, v1 in sorted(obj2.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1
def gen_scripts( file_name, file_name_ext, obj_name, obj_ext_name, output, output_ext, field=1, notexplicit=None, ascii_props=False, append=False, prefix="" )
Generate `script` property.
1.884866
1.888406
0.998125
obj = {} all_chars = ALL_ASCII if ascii_props else ALL_CHARS with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedAge.txt'), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) name = format_name(data[1]) if name not in obj: obj[name] = [] if span is None: continue obj[name].extend(span) unassigned = set() for x in obj.values(): unassigned |= set(x) obj['na'] = list(all_chars - unassigned) for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) # Convert characters values to ranges char2range(obj, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_age = {\n' % prefix) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1
def gen_age(output, ascii_props=False, append=False, prefix="")
Generate `age` property.
3.201768
3.169651
1.010133
categories = [] nf = {} all_chars = ALL_ASCII if ascii_props else ALL_CHARS file_name = os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedNormalizationProps.txt') with codecs.open(file_name, 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue if not data[1].strip().lower().endswith('_qc'): continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) if span is None: continue name = format_name(data[1][:-3] + 'quickcheck') subvalue = format_name(data[2]) if name not in nf: nf[name] = {} categories.append(name) if subvalue not in nf[name]: nf[name][subvalue] = [] nf[name][subvalue].extend(span) for k1, v1 in nf.items(): temp = set() for k2 in list(v1.keys()): temp |= set(v1[k2]) v1['y'] = list(all_chars - temp) for k1, v1 in nf.items(): for name in list(v1.keys()): s = set(nf[k1][name]) nf[k1][name] = sorted(s) # Convert characters values to ranges char2range(nf, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) for key, value in sorted(nf.items()): # Write out the Unicode properties f.write('%s_%s = {\n' % (prefix, key.replace('quickcheck', '_quick_check'))) count = len(value) - 1 i = 0 for k1, v1 in sorted(value.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1 return categories
def gen_nf_quick_check(output, ascii_props=False, append=False, prefix="")
Generate quick check properties.
3.022563
2.996472
1.008707
categories = [] binary_props = ( ('DerivedCoreProperties.txt', None), ('PropList.txt', None), ('DerivedNormalizationProps.txt', ('Changes_When_NFKC_Casefolded', 'Full_Composition_Exclusion')) ) binary = {} for filename, include in binary_props: with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, filename), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue if include and data[1].strip() not in include: continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) name = format_name(data[1]) if name not in binary: binary[name] = [] categories.append(name) if span is None: continue binary[name].extend(span) with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'CompositionExclusions.txt'), 'r', 'utf-8') as uf: name = 'compositionexclusion' for line in uf: if not line.startswith('#'): data = [x.strip() for x in line.split('#')[0] if x.strip()] if not data: continue span = create_span([int(data[0], 16)], is_bytes=ascii_props) if span is None: continue if name not in binary: binary[name] = [] categories.append(name) binary[name].extend(span) binary['full' + name].extend(span) with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'UnicodeData.txt'), 'r', 'utf-8') as uf: name = 'bidimirrored' for line in uf: data = line.strip().split(';') if data: if data[9].strip().lower() != 'y': continue span = create_span([int(data[0].strip(), 16)], is_bytes=ascii_props) if span is None: continue if name not in binary: binary[name] = [] categories.append(name) binary[name].extend(span) for name in list(binary.keys()): s = set(binary[name]) binary[name] = sorted(s) gen_uposix(table, binary) # Convert characters values to ranges char2range(binary, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_binary = {\n' % prefix) count = len(binary) - 1 i = 0 for k1, v1 in sorted(binary.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1 return categories[:]
def gen_binary(table, output, ascii_props=False, append=False, prefix="")
Generate binary properties.
2.789795
2.763453
1.009532
bidi_class = {} max_range = MAXASCII if ascii_props else MAXUNICODE with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'UnicodeData.txt'), 'r', 'utf-8') as uf: for line in uf: data = line.strip().split(';') if data: bidi = data[4].strip().lower() if not bidi: continue value = int(data[0].strip(), 16) if bidi not in bidi_class: bidi_class[bidi] = [] if value > max_range: continue bidi_class[bidi].append(value) for name in list(bidi_class.keys()): s = set(bidi_class[name]) bidi_class[name] = sorted(s) # Convert characters values to ranges char2range(bidi_class, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) f.write('%s_bidi_classes = {\n' % prefix) count = len(bidi_class) - 1 i = 0 for k1, v1 in sorted(bidi_class.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1
def gen_bidi(output, ascii_props=False, append=False, prefix="")
Generate `bidi class` property.
2.822849
2.816285
1.002331
# `Alnum: [\p{L&}\p{Nd}]` s = set(table['l']['c'] + table['n']['d']) posix_table["posixalnum"] = list(s) # `Alpha: [\p{L&}]` s = set(table['l']['c']) posix_table["posixalpha"] = list(s) # `ASCII: [\x00-\x7F]` s = set([x for x in range(0, 0x7F + 1)]) posix_table["posixascii"] = list(s) # `Blank: [\p{Zs}\t]` s = set(table['z']['s'] + [0x09]) posix_table["posixblank"] = list(s) # `Cntrl: [\p{Cc}]` s = set(table['c']['c']) posix_table["posixcntrl"] = list(s) # `Digit: [\p{Nd}]` s = set(table['n']['d']) posix_table["posixdigit"] = list(s) # `Graph: [^\p{Z}\p{C}]` s = set() for table_name in ('z', 'c'): for sub_table_name in table[table_name]: if not sub_table_name.startswith('^'): s |= set(table[table_name][sub_table_name]) posix_table["^posixgraph"] = list(s) # `Lower: [\p{Ll}]` s = set(table['l']['l']) posix_table["posixlower"] = list(s) # `Print: [\P{C}]` s = set() for table_name in ('c',): for sub_table_name in table[table_name]: if not sub_table_name.startswith('^'): s |= set(table[table_name][sub_table_name]) posix_table["^posixprint"] = list(s) # `Punct: [\p{P}\p{S}]` s = set() for table_name in ('p', 's'): for sub_table_name in table[table_name]: if not sub_table_name.startswith('^'): s |= set(table[table_name][sub_table_name]) posix_table["posixpunct"] = list(s) # `Space: [\p{Z}\t\r\n\v\f]` s = set() for table_name in ('z',): for sub_table_name in table[table_name]: if not sub_table_name.startswith('^'): s |= set(table[table_name][sub_table_name]) s |= set([x for x in range(0x09, 0x0e)]) posix_table["posixspace"] = list(s) # `Upper: [\p{Lu}]` s = set(table['l']['u']) posix_table["posixupper"] = list(s) # `XDigit: [A-Fa-f0-9]` s = set([x for x in range(0x30, 0x39 + 1)]) s |= set([x for x in range(0x41, 0x46 + 1)]) s |= set([x for x in range(0x61, 0x66 + 1)]) posix_table["posixxdigit"] = list(s)
def gen_uposix(table, posix_table)
Generate the posix table and write out to file.
1.614278
1.622628
0.994854
if not os.path.exists(output): os.mkdir(output) gen_properties(output)
def build_unicode_property_table(output)
Build and write out Unicode property table.
4.940593
5.035666
0.98112
if not os.path.exists(output): os.mkdir(output) gen_properties(output, ascii_props=True, append=True)
def build_ascii_property_table(output)
Build and write out Unicode property table.
6.420902
6.397655
1.003634
global UNIVERSION global UNIVERSION_INFO if version is None: version = unicodedata.unidata_version UNIVERSION = version UNIVERSION_INFO = tuple([int(x) for x in UNIVERSION.split('.')])
def set_version(version)
Set version.
5.058498
4.866297
1.039496
if mode == POSIX_BYTES: return unidata.ascii_posix_properties[value] elif mode == POSIX_UNICODE: return unidata.unicode_binary[ ('^posix' + value[1:]) if value.startswith('^') else ('posix' + value) ] else: return unidata.unicode_posix_properties[value]
def get_posix_property(value, mode=POSIX)
Retrieve the posix category.
6.634873
6.164749
1.07626
obj = unidata.ascii_properties if is_bytes else unidata.unicode_properties if value.startswith('^'): negate = True value = value[1:] else: negate = False value = unidata.unicode_alias['generalcategory'].get(value, value) assert 1 <= len(value) <= 2, 'Invalid property!' if not negate: p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], None) value = ''.join( [v for k, v in obj.get(p1, {}).items() if not k.startswith('^')] ) if p2 is None else obj.get(p1, {}).get(p2, '') else: p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], '') value = obj.get(p1, {}).get('^' + p2, '') assert value, 'Invalid property!' return value
def get_gc_property(value, is_bytes=False)
Get `GC` property.
2.863403
2.801531
1.022085
obj = unidata.ascii_binary if is_bytes else unidata.unicode_binary if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['binary'].get(negated, negated) else: value = unidata.unicode_alias['binary'].get(value, value) return obj[value]
def get_binary_property(value, is_bytes=False)
Get `BINARY` property.
4.439167
4.152202
1.069112
obj = unidata.ascii_canonical_combining_class if is_bytes else unidata.unicode_canonical_combining_class if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['canonicalcombiningclass'].get(negated, negated) else: value = unidata.unicode_alias['canonicalcombiningclass'].get(value, value) return obj[value]
def get_canonical_combining_class_property(value, is_bytes=False)
Get `CANONICAL COMBINING CLASS` property.
3.650891
3.436565
1.062366
obj = unidata.ascii_east_asian_width if is_bytes else unidata.unicode_east_asian_width if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated) else: value = unidata.unicode_alias['eastasianwidth'].get(value, value) return obj[value]
def get_east_asian_width_property(value, is_bytes=False)
Get `EAST ASIAN WIDTH` property.
3.847687
3.578709
1.075161
obj = unidata.ascii_grapheme_cluster_break if is_bytes else unidata.unicode_grapheme_cluster_break if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['graphemeclusterbreak'].get(negated, negated) else: value = unidata.unicode_alias['graphemeclusterbreak'].get(value, value) return obj[value]
def get_grapheme_cluster_break_property(value, is_bytes=False)
Get `GRAPHEME CLUSTER BREAK` property.
3.597194
3.361757
1.070034
obj = unidata.ascii_line_break if is_bytes else unidata.unicode_line_break if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['linebreak'].get(negated, negated) else: value = unidata.unicode_alias['linebreak'].get(value, value) return obj[value]
def get_line_break_property(value, is_bytes=False)
Get `LINE BREAK` property.
4.329075
4.080825
1.060833
obj = unidata.ascii_sentence_break if is_bytes else unidata.unicode_sentence_break if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['sentencebreak'].get(negated, negated) else: value = unidata.unicode_alias['sentencebreak'].get(value, value) return obj[value]
def get_sentence_break_property(value, is_bytes=False)
Get `SENTENCE BREAK` property.
4.455896
4.077447
1.092815
obj = unidata.ascii_word_break if is_bytes else unidata.unicode_word_break if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['wordbreak'].get(negated, negated) else: value = unidata.unicode_alias['wordbreak'].get(value, value) return obj[value]
def get_word_break_property(value, is_bytes=False)
Get `WORD BREAK` property.
4.225402
3.968977
1.064607
obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['hangulsyllabletype'].get(negated, negated) else: value = unidata.unicode_alias['hangulsyllabletype'].get(value, value) return obj[value]
def get_hangul_syllable_type_property(value, is_bytes=False)
Get `HANGUL SYLLABLE TYPE` property.
3.663822
3.414356
1.073064
if PY35: obj = unidata.ascii_indic_positional_category if is_bytes else unidata.unicode_indic_positional_category alias_key = 'indicpositionalcategory' else: obj = unidata.ascii_indic_matra_category if is_bytes else unidata.unicode_indic_matra_category alias_key = 'indicmatracategory' if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias[alias_key].get(negated, negated) else: value = unidata.unicode_alias[alias_key].get(value, value) return obj[value]
def get_indic_positional_category_property(value, is_bytes=False)
Get `INDIC POSITIONAL/MATRA CATEGORY` property.
3.281983
2.806243
1.169529
obj = unidata.ascii_indic_syllabic_category if is_bytes else unidata.unicode_indic_syllabic_category if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['indicsyllabiccategory'].get(negated, negated) else: value = unidata.unicode_alias['indicsyllabiccategory'].get(value, value) return obj[value]
def get_indic_syllabic_category_property(value, is_bytes=False)
Get `INDIC SYLLABIC CATEGORY` property.
3.698018
3.390888
1.090575
obj = unidata.ascii_decomposition_type if is_bytes else unidata.unicode_decomposition_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['decompositiontype'].get(negated, negated) else: value = unidata.unicode_alias['decompositiontype'].get(value, value) return obj[value]
def get_decomposition_type_property(value, is_bytes=False)
Get `DECOMPOSITION TYPE` property.
3.968244
3.780611
1.04963
obj = unidata.ascii_nfc_quick_check if is_bytes else unidata.unicode_nfc_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfcquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfcquickcheck'].get(value, value) return obj[value]
def get_nfc_quick_check_property(value, is_bytes=False)
Get `NFC QUICK CHECK` property.
3.873863
3.641648
1.063766
obj = unidata.ascii_nfd_quick_check if is_bytes else unidata.unicode_nfd_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfdquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfdquickcheck'].get(value, value) return obj[value]
def get_nfd_quick_check_property(value, is_bytes=False)
Get `NFD QUICK CHECK` property.
3.85773
3.524709
1.094482
obj = unidata.ascii_nfkc_quick_check if is_bytes else unidata.unicode_nfkc_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfkcquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfkcquickcheck'].get(value, value) return obj[value]
def get_nfkc_quick_check_property(value, is_bytes=False)
Get `NFKC QUICK CHECK` property.
4.105381
3.743401
1.096698
obj = unidata.ascii_nfkd_quick_check if is_bytes else unidata.unicode_nfkd_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfkdquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfkdquickcheck'].get(value, value) return obj[value]
def get_nfkd_quick_check_property(value, is_bytes=False)
Get `NFKD QUICK CHECK` property.
4.012335
3.68324
1.089349
obj = unidata.ascii_numeric_type if is_bytes else unidata.unicode_numeric_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['numerictype'].get(negated, negated) else: value = unidata.unicode_alias['numerictype'].get(value, value) return obj[value]
def get_numeric_type_property(value, is_bytes=False)
Get `NUMERIC TYPE` property.
4.279537
3.910387
1.094402
obj = unidata.ascii_numeric_values if is_bytes else unidata.unicode_numeric_values if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['numericvalue'].get(negated, negated) else: value = unidata.unicode_alias['numericvalue'].get(value, value) return obj[value]
def get_numeric_value_property(value, is_bytes=False)
Get `NUMERIC VALUE` property.
4.285814
4.003378
1.07055
obj = unidata.ascii_age if is_bytes else unidata.unicode_age if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['age'].get(negated, negated) else: value = unidata.unicode_alias['age'].get(value, value) return obj[value]
def get_age_property(value, is_bytes=False)
Get `AGE` property.
4.316661
4.079336
1.058177
obj = unidata.ascii_joining_type if is_bytes else unidata.unicode_joining_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['joiningtype'].get(negated, negated) else: value = unidata.unicode_alias['joiningtype'].get(value, value) return obj[value]
def get_joining_type_property(value, is_bytes=False)
Get `JOINING TYPE` property.
4.09099
3.832009
1.067583
obj = unidata.ascii_joining_group if is_bytes else unidata.unicode_joining_group if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['joininggroup'].get(negated, negated) else: value = unidata.unicode_alias['joininggroup'].get(value, value) return obj[value]
def get_joining_group_property(value, is_bytes=False)
Get `JOINING GROUP` property.
4.05068
3.807119
1.063975
obj = unidata.ascii_scripts if is_bytes else unidata.unicode_scripts if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['script'].get(negated, negated) else: value = unidata.unicode_alias['script'].get(value, value) return obj[value]
def get_script_property(value, is_bytes=False)
Get `SC` property.
4.447216
4.195654
1.059958
obj = unidata.ascii_script_extensions if is_bytes else unidata.unicode_script_extensions if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['script'].get(negated, negated) else: value = unidata.unicode_alias['script'].get(value, value) return obj[value]
def get_script_extension_property(value, is_bytes=False)
Get `SCX` property.
4.004144
3.904598
1.025495
obj = unidata.ascii_blocks if is_bytes else unidata.unicode_blocks if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['block'].get(negated, negated) else: value = unidata.unicode_alias['block'].get(value, value) return obj[value]
def get_block_property(value, is_bytes=False)
Get `BLK` property.
4.365977
4.123405
1.058828
obj = unidata.ascii_bidi_classes if is_bytes else unidata.unicode_bidi_classes if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['bidiclass'].get(negated, negated) else: value = unidata.unicode_alias['bidiclass'].get(value, value) return obj[value]
def get_bidi_property(value, is_bytes=False)
Get `BC` property.
4.355541
4.185316
1.040672
obj = unidata.ascii_bidi_paired_bracket_type if is_bytes else unidata.unicode_bidi_paired_bracket_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['bidipairedbrackettype'].get(negated, negated) else: value = unidata.unicode_alias['bidipairedbrackettype'].get(value, value) return obj[value]
def get_bidi_paired_bracket_type_property(value, is_bytes=False)
Get `BPT` property.
3.816132
3.661289
1.042292
obj = unidata.ascii_vertical_orientation if is_bytes else unidata.unicode_vertical_orientation if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['verticalorientation'].get(negated, negated) else: value = unidata.unicode_alias['verticalorientation'].get(value, value) return obj[value]
def get_vertical_orientation_property(value, is_bytes=False)
Get `VO` property.
4.178218
4.026853
1.037589
if value.startswith('^'): prefix = value[1:3] temp = value[3:] negate = '^' else: prefix = value[:2] temp = value[2:] negate = '' if prefix != 'is': raise ValueError("Does not start with 'is'!") script_obj = unidata.ascii_script_extensions if is_bytes else unidata.unicode_script_extensions bin_obj = unidata.ascii_binary if is_bytes else unidata.unicode_binary value = negate + unidata.unicode_alias['script'].get(temp, temp) if value not in script_obj: value = negate + unidata.unicode_alias['binary'].get(temp, temp) obj = bin_obj else: obj = script_obj return obj[value]
def get_is_property(value, is_bytes=False)
Get shortcut for `SC` or `Binary` property.
3.552449
3.407614
1.042503
if value.startswith('^'): prefix = value[1:3] temp = value[3:] negate = '^' else: prefix = value[:2] temp = value[2:] negate = '' if prefix != 'in': raise ValueError("Does not start with 'in'!") value = negate + unidata.unicode_alias['block'].get(temp, temp) obj = unidata.ascii_blocks if is_bytes else unidata.unicode_blocks return obj[value]
def get_in_property(value, is_bytes=False)
Get shortcut for `Block` property.
5.26272
4.925929
1.068371
if prop is not None: prop = unidata.unicode_alias['_'].get(prop, prop) try: if prop == 'generalcategory': return get_gc_property(value, is_bytes) elif prop == 'script': return get_script_property(value, is_bytes) elif prop == 'scriptextensions': return get_script_extension_property(value, is_bytes) elif prop == 'block': return get_block_property(value, is_bytes) elif prop == 'binary': return get_binary_property(value, is_bytes) elif prop == 'bidiclass': return get_bidi_property(value, is_bytes) elif prop == 'bidipairedbrackettype': return get_bidi_paired_bracket_type_property(value, is_bytes) elif prop == 'age': return get_age_property(value, is_bytes) elif prop == 'eastasianwidth': return get_east_asian_width_property(value, is_bytes) elif PY35 and prop == 'indicpositionalcategory': return get_indic_positional_category_property(value, is_bytes) elif not PY35 and prop == 'indicmatracategory': return get_indic_positional_category_property(value, is_bytes) elif prop == 'indicsyllabiccategory': return get_indic_syllabic_category_property(value, is_bytes) elif prop == 'hangulsyllabletype': return get_hangul_syllable_type_property(value, is_bytes) elif prop == 'decompositiontype': return get_decomposition_type_property(value, is_bytes) elif prop == 'canonicalcombiningclass': return get_canonical_combining_class_property(value, is_bytes) elif prop == 'numerictype': return get_numeric_type_property(value, is_bytes) elif prop == 'numericvalue': return get_numeric_value_property(value, is_bytes) elif prop == 'joiningtype': return get_joining_type_property(value, is_bytes) elif prop == 'joininggroup': return get_joining_group_property(value, is_bytes) elif prop == 'graphemeclusterbreak': return get_grapheme_cluster_break_property(value, is_bytes) elif prop == 'linebreak': return get_line_break_property(value, is_bytes) elif prop == 'sentencebreak': return get_sentence_break_property(value, is_bytes) elif prop == 'wordbreak': return get_word_break_property(value, is_bytes) elif prop == 'nfcquickcheck': return get_nfc_quick_check_property(value, is_bytes) elif prop == 'nfdquickcheck': return get_nfd_quick_check_property(value, is_bytes) elif prop == 'nfkcquickcheck': return get_nfkc_quick_check_property(value, is_bytes) elif prop == 'nfkdquickcheck': return get_nfkd_quick_check_property(value, is_bytes) elif PY37 and prop == 'verticalorientation': return get_vertical_orientation_property(value, is_bytes) else: raise ValueError('Invalid Unicode property!') except Exception: raise ValueError('Invalid Unicode property!') try: return get_gc_property(value, is_bytes) except Exception: pass try: return get_script_extension_property(value, is_bytes) except Exception: pass try: return get_block_property(value, is_bytes) except Exception: pass try: return get_binary_property(value, is_bytes) except Exception: pass try: return get_is_property(value, is_bytes) except Exception: pass try: return get_in_property(value, is_bytes) except Exception: pass raise ValueError('Invalid Unicode property!')
def get_unicode_property(value, prop=None, is_bytes=False)
Retrieve the Unicode category from the table.
1.580006
1.5708
1.00586
return _bre_parse._SearchParser(pattern, re_verbose, re_version).parse()
def _cached_search_compile(pattern, re_verbose, re_version, pattern_type)
Cached search compile.
18.272804
16.584366
1.101809
if isinstance(pattern, (str, bytes)): re_verbose = bool(VERBOSE & flags) re_unicode = None if bool((ASCII | LOCALE) & flags): re_unicode = False elif bool(UNICODE & flags): re_unicode = True if not (flags & DEBUG): pattern = _cached_search_compile(pattern, re_verbose, re_unicode, type(pattern)) else: # pragma: no cover pattern = _bre_parse._SearchParser(pattern, re_verbose, re_unicode).parse() elif isinstance(pattern, Bre): if flags: raise ValueError("Cannot process flags argument with a compiled pattern") pattern = pattern._pattern elif isinstance(pattern, (_RE_TYPE, Bre)): if flags: raise ValueError("Cannot process flags argument with a compiled pattern!") else: raise TypeError("Not a string or compiled pattern!") return pattern
def _apply_search_backrefs(pattern, flags=0)
Apply the search backrefs to the search pattern.
4.854352
4.76933
1.017827
if auto_compile is not None: raise ValueError("Cannot compile Bre with a different auto_compile!") elif flags != 0: raise ValueError("Cannot process flags argument with a compiled pattern") return pattern else: if auto_compile is None: auto_compile = True return Bre(compile_search(pattern, flags), auto_compile)
def compile(pattern, flags=0, auto_compile=None): # noqa A001 if isinstance(pattern, Bre)
Compile both the search or search and replace into one object.
4.911209
4.584625
1.071235
call = None if pattern is not None and isinstance(pattern, _RE_TYPE): if isinstance(repl, (str, bytes)): if not (pattern.flags & DEBUG): call = _cached_replace_compile(pattern, repl, flags, type(repl)) else: # pragma: no cover call = _bre_parse._ReplaceParser().parse(pattern, repl, bool(flags & FORMAT)) elif isinstance(repl, ReplaceTemplate): if flags: raise ValueError("Cannot process flags argument with a ReplaceTemplate!") if repl.pattern_hash != hash(pattern): raise ValueError("Pattern hash doesn't match hash in compiled replace!") call = repl else: raise TypeError("Not a valid type!") else: raise TypeError("Pattern must be a compiled regular expression!") return call
def compile_replace(pattern, repl, flags=0)
Construct a method that can be used as a replace method for `sub`, `subn`, etc.
5.86433
5.719442
1.025333
flags = args[2] if len(args) > 2 else kwargs.get('flags', 0) return _re.findall(_apply_search_backrefs(pattern, flags), string, *args, **kwargs)
def findall(pattern, string, *args, **kwargs)
Apply `findall` after applying backrefs.
4.553741
3.950102
1.152816
flags = args[4] if len(args) > 4 else kwargs.get('flags', 0) is_replace = _is_replace(repl) is_string = isinstance(repl, (str, bytes)) if is_replace and repl.use_format: raise ValueError("Compiled replace cannot be a format object!") pattern = compile_search(pattern, flags) return _re.sub( pattern, (compile_replace(pattern, repl) if is_replace or is_string else repl), string, *args, **kwargs )
def sub(pattern, repl, string, *args, **kwargs)
Apply `sub` after applying backrefs.
4.876283
4.838879
1.00773
request.session.setdefault(_key_name(kind), []).append({ "method": method, "args": args })
def add(request, kind, method, *args)
add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"}) add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"])
5.795342
6.18499
0.937001
here = os.path.abspath(os.path.dirname(__file__)) jbxapi_file = os.path.join(here, "jbxapi.py") with open(jbxapi_file) as f: content = f.read() match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M) if not match: raise RuntimeError("Unable to find version string.") return match.group(1)
def get_version()
Extract the version number from the code.
2.158199
2.068778
1.043224
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey}) return self._raise_or_extract(response)
def analysis_list(self)
Fetch a list of all analyses.
7.343985
6.343024
1.157805
self._check_user_parameters(params) files = {'sample': sample} if cookbook: files['cookbook'] = cookbook return self._submit(params, files, _extra_params=_extra_params)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={})
Submit a sample and returns the submission id. Parameters: sample: The sample to submit. Needs to be a file-like object or a tuple in the shape (filename, file-like object). cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a tuple in the shape (filename, file-like object) params: Customize the sandbox parameters. They are described in more detail in the default submission parameters. Example: import jbxapi joe = jbxapi.JoeSandbox() with open("sample.exe", "rb") as f: joe.submit_sample(f, params={"systems": ["w7"]}) Example: import io, jbxapi joe = jbxapi.JoeSandbox() cookbook = io.BytesIO(b"cookbook content") with open("sample.exe", "rb") as f: joe.submit_sample(f, cookbook=cookbook)
3.630152
4.292452
0.845706
self._check_user_parameters(params) params = copy.copy(params) params['sample-url'] = url return self._submit(params, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={})
Submit a sample at a given URL for analysis.
4.198229
3.817522
1.099726
self._check_user_parameters(params) params = copy.copy(params) params['url'] = url return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={})
Submit a website for analysis.
3.912293
3.538078
1.105768
self._check_user_parameters(params) files = {'cookbook': cookbook} return self._submit(params, files, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={})
Submit a cookbook.
4.614827
4.235722
1.089502
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id}) return self._raise_or_extract(response)
def submission_delete(self, submission_id)
Delete a submission.
5.161387
5.085139
1.014994
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey}) return self._raise_or_extract(response)
def server_online(self)
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
8.142751
7.593064
1.072393
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid}) return self._raise_or_extract(response)
def analysis_info(self, webid)
Show the status and most important attributes of an analysis.
5.664567
5.471344
1.035316
# when no file is specified, we create our own if file is None: _file = io.BytesIO() else: _file = file data = { 'apikey': self.apikey, 'webid': webid, 'type': type, 'run': run, } response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True) try: filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2] except Exception as e: filename = type # do standard error handling when encountering an error (i.e. throw an exception) if not response.ok: self._raise_or_extract(response) raise RuntimeError("Unreachable because statement above should raise.") try: for chunk in response.iter_content(1024): _file.write(chunk) except requests.exceptions.RequestException as e: raise ConnectionError(e) # no user file means we return the content if file is None: return (filename, _file.getvalue()) else: return filename
def analysis_download(self, webid, type, run=None, file=None)
Download a resource for an analysis. E.g. the full report, binaries, screenshots. The full list of resources can be found in our API documentation. When `file` is given, the return value is the filename specified by the server, otherwise it's a tuple of (filename, bytes). Parameters: webid: the webid of the analysis type: the report type, e.g. 'html', 'bins' run: specify the run. If it is None, let Joe Sandbox pick one file: a writeable file-like object (When obmitted, the method returns the data as a bytes object.) Example: json_report, name = joe.analysis_download(123456, 'jsonfixed') Example: with open("full_report.html", "wb") as f: name = joe.analysis_download(123456, "html", file=f)
3.832824
3.725628
1.028773
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query}) return self._raise_or_extract(response)
def analysis_search(self, query)
Lists the webids of the analyses that match the given query. Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
5.819678
6.812768
0.854231
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey}) return self._raise_or_extract(response)
def server_systems(self)
Retrieve a list of available systems.
8.029946
6.341443
1.266265
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey}) return self._raise_or_extract(response)
def account_info(self)
Only available on Joe Sandbox Cloud Show information about the account.
7.303793
7.08716
1.030567
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey}) return self._raise_or_extract(response)
def server_info(self)
Query information about the server.
7.817175
6.973025
1.121059
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey}) return self._raise_or_extract(response)
def server_lia_countries(self)
Show the available localized internet anonymization countries.
7.730523
6.45551
1.197508
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey}) return self._raise_or_extract(response)
def server_languages_and_locales(self)
Show the available languages and locales
6.83076
6.295459
1.08503
# Remove non-ASCII characters from filenames due to a limitation of the combination of # urllib3 (via python-requests) and our server # https://github.com/requests/requests/issues/2117 # Internal Ticket #3090 if "files" in kwargs and kwargs["files"] is not None: acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}" for param_name, fp in kwargs["files"].items(): if isinstance(fp, (tuple, list)): filename, fp = fp else: filename = requests.utils.guess_filename(fp) or param_name def encode(char): try: if char in acceptable_chars: return char except UnicodeDecodeError: pass return "x{:02x}".format(ord(char)) filename = "".join(encode(x) for x in filename) kwargs["files"][param_name] = (filename, fp) for i in itertools.count(1): try: return self.session.post(url, data=data, timeout=self.timeout, **kwargs) except requests.exceptions.Timeout as e: # exhausted all retries if i >= self.retries: raise ConnectionError(e) except requests.exceptions.RequestException as e: raise ConnectionError(e) # exponential backoff max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ... time.sleep(random.uniform(0, max_backoff))
def _post(self, url, data=None, **kwargs)
Wrapper around requests.post which (a) always inserts a timeout (b) converts errors to ConnectionError (c) re-tries a few times (d) converts file names to ASCII
3.417774
3.330909
1.026079
if not user_parameters: return # sanity check against typos for key in user_parameters: if key not in submission_defaults: raise ValueError("Unknown parameter {0}".format(key))
def _check_user_parameters(self, user_parameters)
Verifies that the parameter dict given by the user only contains known keys. This ensures that the user detects typos faster.
6.019161
5.037597
1.194848
try: data = response.json() except ValueError: raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code)) try: if response.ok: return data['data'] else: error = data['errors'][0] raise ApiError(error) except (KeyError, TypeError): raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
def _raise_or_extract(self, response)
Raises an exception if the response indicates an API error. Otherwise returns the object at the 'data' key of the API response.
3.479961
3.198484
1.088003
props = {} lexer = shlex.shlex(lines, posix=True) lexer.whitespace_split = True # The shlex module defines its `wordchars` variable using literals, # making it dependent on the encoding of the Python source file. # In Python 2.6 and 2.7, the shlex source file is encoded in # 'iso-8859-1', and the `wordchars` variable is defined as a byte # string. This causes a UnicodeDecodeError to be raised when the # parsed content is a unicode object. The following fix resolves that # (... but it should be fixed in shlex...): if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): lexer.wordchars = lexer.wordchars.decode('iso-8859-1') tokens = list(lexer) for token in tokens: # At this point, all shell-like parsing has been done (i.e. # comments processed, quotes and backslash escape sequences # processed, multi-line values assembled, trailing newlines # stripped, etc.), so the tokens are now either: # * variable assignments: var=value # * commands or their arguments (not allowed in os-release) if '=' in token: k, v = token.split('=', 1) if isinstance(v, bytes): v = v.decode('utf-8') props[k.lower()] = v else: # Ignore any tokens that are not variable assignments pass if 'version_codename' in props: # os-release added a version_codename field. Use that in # preference to anything else Note that some distros purposefully # do not have code names. They should be setting # version_codename="" props['codename'] = props['version_codename'] elif 'ubuntu_codename' in props: # Same as above but a non-standard field name used on older Ubuntus props['codename'] = props['ubuntu_codename'] elif 'version' in props: # If there is no version_codename, parse it from the version codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version']) if codename: codename = codename.group() codename = codename.strip('()') codename = codename.strip(',') codename = codename.strip() # codename appears within paranthese. props['codename'] = codename return props
def _parse_os_release_content(lines)
Parse the lines of an os-release file. Parameters: * lines: Iterable through the lines in the os-release file. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items.
4.639105
4.754706
0.975687
if type(n) != int: return n ret = [] n = str(n) for i in range(len(n) - 1, -1, -1): ret.append(n[i]) if (len(n) - i) % 3 == 0: ret.append(',') ret.reverse() return ''.join(ret[1:]) if ret[0] == ',' else ''.join(ret)
def pretty_print(n)
Pretty print function for very big integers
2.283321
2.147134
1.063427
my_plurals = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attributes.PLURALS.update(my_plurals) attr_map = RESOURCE_ATTRIBUTE_MAP ext_resources = resource_helper.build_resource_info(my_plurals, attr_map, constants.A10_CERTIFICATE) return ext_resources
def get_resources(cls)
Returns external resources.
5.737481
5.358781
1.070669
LOG.debug("A10DeviceInstancePlugin.create(): a10_device_instance=%s", a10_device_instance) config = a10_config.A10Config() vthunder_defaults = config.get_vthunder_config() imgr = instance_manager.InstanceManager.from_config(config, context) dev_instance = common_resources.remove_attributes_not_specified( a10_device_instance.get(resources.RESOURCE)) # Create the instance with specified defaults. vthunder_config = vthunder_defaults.copy() vthunder_config.update(_convert(dev_instance, _API, _VTHUNDER_CONFIG)) instance = imgr.create_device_instance(vthunder_config, dev_instance.get("name")) db_record = {} db_record.update(_convert(vthunder_config, _VTHUNDER_CONFIG, _DB)) db_record.update(_convert(dev_instance, _API, _DB)) db_record.update(_convert(instance, _INSTANCE, _DB)) # If success, return the created DB record # Else, raise an exception because that's what we would do anyway db_instance = super(A10DeviceInstancePlugin, self).create_a10_device_instance( context, {resources.RESOURCE: db_record}) return _make_api_dict(db_instance)
def create_a10_device_instance(self, context, a10_device_instance)
Attempt to create instance using neutron context
3.742503
3.726472
1.004302
vport_meta = self.meta(vip, 'vport', None) if vport_meta is None: vport_meta = self.meta(vip, 'port', {}) return vport_meta
def vport_meta(self, vip)
Get the vport meta, no matter which name was used
2.983594
2.729002
1.093291
if six.callable(template): return template(*args, **kw) if isinstance(template, six.string_types): return template if isinstance(template, collections.Mapping): return template.__class__((k, apply_template(v, *args, **kw)) for k, v in template.items()) if isinstance(template, collections.Iterable): return template.__class__(apply_template(v, *args, **kw) for v in template) return template
def apply_template(template, *args, **kw)
Applies every callable in any Mapping or Iterable
1.821243
1.743779
1.044423