_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q13000
|
new_deploy
|
train
|
def new_deploy(py_ver: PyVer, release_target: ReleaseTarget):
"""Job for deploying package to pypi"""
cache_file = f'app_{py_ver.name}.tar'
template = yaml.safe_load(f"""
machine:
image: circleci/classic:201710-02
steps:
- attach_workspace:
at: {cache_dir}
- checkout
- run:
name: Install prerequisites
command: sudo pip install awscli
- run:
name: Load docker image layer cache
command: docker load -i {cache_dir}/{cache_file}
- run:
name: Start a named container
command: docker run --name=SDK {py_ver.tag}
- run:
name: Extract the documentation
command: 'docker cp SDK:/build/built_docs ./built_docs'
- run:
name: Upload the documentation
command: >-
aws s3 sync --delete --cache-control
max-age=3600 built_docs s3://mbed-cloud-sdk-python
- run:
name: Tag and release
command: >-
docker run --env-file=scripts/templates/envvars.env
-e TWINE_REPOSITORY={release_target.twine_repo}
{py_ver.tag}
sh -c "source .venv/bin/activate && python scripts/tag_and_release.py --mode={release_target.mode}"
- run:
name: Start the release party!
command: >-
docker run --env-file=scripts/templates/envvars.env
{py_ver.tag}
sh -c "source .venv/bin/activate && python scripts/notify.py"
""")
return deploy_name(py_ver, release_target), template
|
python
|
{
"resource": ""
}
|
q13001
|
generate_docker_file
|
train
|
def generate_docker_file(py_ver: PyVer):
"""Templated docker files"""
with open(os.path.join(script_templates_root, 'Dockerfile')) as fh:
return fh.read().format(py_ver=py_ver, author=author_file)
|
python
|
{
"resource": ""
}
|
q13002
|
generate_docker_targets
|
train
|
def generate_docker_targets():
"""Write all templated container engine files"""
output = {}
for py_ver in python_versions.values():
filepath = os.path.join(container_config_root, py_ver.docker_file)
output[filepath] = generate_docker_file(py_ver)
filepath = os.path.join(container_config_root, py_ver.compose_file)
output[filepath] = generate_compose_file(py_ver)
return output
|
python
|
{
"resource": ""
}
|
q13003
|
main
|
train
|
def main(output_path=None):
"""Writes out new python build system
This is needed because CircleCI does not support build matrices
nor parameterisation of cache paths or other aspects of their
config
There's also the added bonus of validating the yaml as we go.
Additionally, we template and write Docker and docker-compose files
for multiple python versions, as Docker `FROM` statements are also
un-templatable using environment variables or similar.
The bulk of the config structure is parsed templated yaml, which
seems the most succinct way of building deeply nested dictionaries and lists,
and also cleanly maps to the appearance of config.yml before & after templating.
The main job blocks (build, test, deploy) are expanded as the product of python versions
and mbed cloud environments, before being recombined into the job listing.
Jobs are chained into a CircleCI workflow using a graph
(in which nodes are job identifiers, and edges describe the dependencies
and any additional parameters)
"""
config_output_file = output_path or os.path.join(PROJECT_ROOT, '.circleci', 'config.yml')
yaml_structure = generate_circle_output()
with open(config_output_file, 'w') as fh:
yaml_content = yaml.safe_dump(data=yaml_structure, default_flow_style=False)
fh.write(
f'#\n'
f'# This file is autogenerated, do not modify manually. '
f'See {author_file} for instructions.\n'
f'#\n'
f'{yaml_content}'
)
for path, content in generate_docker_targets().items():
LOG.info('writing %s', path)
with open(path, 'w') as fh:
fh.write(content)
|
python
|
{
"resource": ""
}
|
q13004
|
GlobalSignCredentials.passphrase
|
train
|
def passphrase(self, passphrase):
"""
Sets the passphrase of this GlobalSignCredentials.
The passphrase to decrypt the private key in case it is encrypted. Empty if the private key is not encrypted.
:param passphrase: The passphrase of this GlobalSignCredentials.
:type: str
"""
if passphrase is not None and len(passphrase) > 1000:
raise ValueError("Invalid value for `passphrase`, length must be less than or equal to `1000`")
self._passphrase = passphrase
|
python
|
{
"resource": ""
}
|
q13005
|
git_url_ssh_to_https
|
train
|
def git_url_ssh_to_https(url):
"""Convert a git url
url will look like
https://github.com/ARMmbed/mbed-cloud-sdk-python.git
or
git@github.com:ARMmbed/mbed-cloud-sdk-python.git
we want:
https://${GITHUB_TOKEN}@github.com/ARMmbed/mbed-cloud-sdk-python-private.git
"""
path = url.split('github.com', 1)[1][1:].strip()
new = 'https://{GITHUB_TOKEN}@github.com/%s' % path
print('rewriting git url to: %s' % new)
return new.format(GITHUB_TOKEN=os.getenv('GITHUB_TOKEN'))
|
python
|
{
"resource": ""
}
|
q13006
|
zip_unicode
|
train
|
def zip_unicode(output, version):
"""Zip the Unicode files."""
zipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version), 'w', zipfile.ZIP_DEFLATED)
target = os.path.join(output, 'unicodedata', version)
print('Zipping %s.zip...' % version)
for root, dirs, files in os.walk(target):
for file in files:
if file.endswith('.txt'):
zipper.write(os.path.join(root, file), arcname=file)
|
python
|
{
"resource": ""
}
|
q13007
|
unzip_unicode
|
train
|
def unzip_unicode(output, version):
"""Unzip the Unicode files."""
unzipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version))
target = os.path.join(output, 'unicodedata', version)
print('Unzipping %s.zip...' % version)
os.makedirs(target)
for f in unzipper.namelist():
# Do I need backslash on windows? Or is it forward as well?
unzipper.extract(f, target)
|
python
|
{
"resource": ""
}
|
q13008
|
download_unicodedata
|
train
|
def download_unicodedata(version, output=HOME, no_zip=False):
"""Download Unicode data scripts and blocks."""
files = [
'UnicodeData.txt',
'Scripts.txt',
'Blocks.txt',
'PropList.txt',
'DerivedCoreProperties.txt',
'DerivedNormalizationProps.txt',
'CompositionExclusions.txt',
'PropertyValueAliases.txt',
'PropertyAliases.txt',
'EastAsianWidth.txt',
'LineBreak.txt',
'HangulSyllableType.txt',
'DerivedAge.txt',
'auxiliary/WordBreakProperty.txt',
'auxiliary/SentenceBreakProperty.txt',
'auxiliary/GraphemeBreakProperty.txt',
'extracted/DerivedDecompositionType.txt',
'extracted/DerivedNumericType.txt',
'extracted/DerivedNumericValues.txt',
'extracted/DerivedJoiningType.txt',
'extracted/DerivedJoiningGroup.txt',
'extracted/DerivedCombiningClass.txt'
]
files.append('ScriptExtensions.txt')
if PY35:
files.append('IndicPositionalCategory.txt')
else:
files.append('IndicMatraCategory.txt')
files.append('IndicSyllabicCategory.txt')
if PY34:
files.append('BidiBrackets.txt')
if PY37:
files.append('VerticalOrientation.txt')
http_url = 'http://www.unicode.org/Public/%s/ucd/' % version
ftp_url = 'ftp://ftp.unicode.org/Public/%s/ucd/' % version
destination = os.path.join(output, 'unicodedata', version)
if not os.path.exists(destination):
os.makedirs(destination)
zip_data = not no_zip
for f in files:
file_location = os.path.join(destination, os.path.basename(f))
retrieved = False
if not os.path.exists(file_location):
for url in (ftp_url, http_url):
furl = url + f
try:
print('Downloading: %s --> %s' % (furl, file_location))
response = urlopen(furl, timeout=30)
data = response.read()
except Exception:
print('Failed: %s' % url)
continue
with open(file_location, 'w') as uf:
uf.write(data.decode('utf-8'))
retrieved = True
break
if not retrieved:
print('Failed to acquire all needed Unicode files!')
break
else:
retrieved = True
print('Skipping: found %s' % file_location)
if not retrieved:
zip_data = False
break
if zip_data and not os.path.exists(os.path.join(output, 'unicodedata', '%s.zip' % version)):
zip_unicode(output, version)
|
python
|
{
"resource": ""
}
|
q13009
|
get_unicodedata
|
train
|
def get_unicodedata(version, output=HOME, no_zip=False):
"""Ensure we have Unicode data to generate Unicode tables."""
target = os.path.join(output, 'unicodedata', version)
zip_target = os.path.join(output, 'unicodedata', '%s.zip' % version)
if not os.path.exists(target) and os.path.exists(zip_target):
unzip_unicode(output, version)
# Download missing files if any. Zip if required.
download_unicodedata(version, output, no_zip)
|
python
|
{
"resource": ""
}
|
q13010
|
_SearchParser.process_quotes
|
train
|
def process_quotes(self, text):
"""Process quotes."""
escaped = False
in_quotes = False
current = []
quoted = []
i = _util.StringIter(text)
iter(i)
for t in i:
if not escaped and t == "\\":
escaped = True
elif escaped:
escaped = False
if t == "E":
if in_quotes:
current.append(_re.escape("".join(quoted)))
quoted = []
in_quotes = False
elif t == "Q" and not in_quotes:
in_quotes = True
elif in_quotes:
quoted.extend(["\\", t])
else:
current.extend(["\\", t])
elif in_quotes:
quoted.extend(t)
else:
current.append(t)
if in_quotes and escaped:
quoted.append("\\")
elif escaped:
current.append("\\")
if quoted:
current.append(_re.escape("".join(quoted)))
return "".join(current)
|
python
|
{
"resource": ""
}
|
q13011
|
_SearchParser.verbose_comment
|
train
|
def verbose_comment(self, t, i):
"""Handle verbose comments."""
current = []
escaped = False
try:
while t != "\n":
if not escaped and t == "\\":
escaped = True
current.append(t)
elif escaped:
escaped = False
if t in self._new_refs:
current.append("\\")
current.append(t)
else:
current.append(t)
t = next(i)
except StopIteration:
pass
if t == "\n":
current.append(t)
return current
|
python
|
{
"resource": ""
}
|
q13012
|
_SearchParser.get_unicode_property
|
train
|
def get_unicode_property(self, i):
"""Get Unicode property."""
index = i.index
prop = []
value = []
try:
c = next(i)
if c.upper() in _ASCII_LETTERS:
prop.append(c)
elif c != '{':
raise SyntaxError("Unicode property missing '{' at %d!" % (i.index - 1))
else:
c = next(i)
if c == '^':
prop.append(c)
c = next(i)
while c not in (':', '=', '}'):
if c not in _PROPERTY:
raise SyntaxError('Invalid Unicode property character at %d!' % (i.index - 1))
if c not in _PROPERTY_STRIP:
prop.append(c)
c = next(i)
if c in (':', '='):
c = next(i)
while c != '}':
if c not in _PROPERTY:
raise SyntaxError('Invalid Unicode property character at %d!' % (i.index - 1))
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
if not value:
raise SyntaxError('Invalid Unicode property!')
except StopIteration:
raise SyntaxError("Missing or unmatched '{' at %d!" % index)
return ''.join(prop).lower(), ''.join(value).lower()
|
python
|
{
"resource": ""
}
|
q13013
|
_SearchParser.get_named_unicode
|
train
|
def get_named_unicode(self, i):
"""Get Unicode name."""
index = i.index
value = []
try:
if next(i) != '{':
raise ValueError("Named Unicode missing '{' %d!" % (i.index - 1))
c = next(i)
while c != '}':
value.append(c)
c = next(i)
except Exception:
raise SyntaxError("Unmatched '{' at %d!" % index)
return ''.join(value)
|
python
|
{
"resource": ""
}
|
q13014
|
_SearchParser.normal
|
train
|
def normal(self, t, i):
"""Handle normal chars."""
current = []
if t == "\\":
try:
t = next(i)
current.extend(self.reference(t, i))
except StopIteration:
current.append(t)
elif t == "(":
current.extend(self.subgroup(t, i))
elif self.verbose and t == "#":
current.extend(self.verbose_comment(t, i))
elif t == "[":
current.extend(self.char_groups(t, i))
else:
current.append(t)
return current
|
python
|
{
"resource": ""
}
|
q13015
|
_SearchParser.posix_props
|
train
|
def posix_props(self, prop, in_group=False):
"""
Insert POSIX properties.
Posix style properties are not as forgiving
as Unicode properties. Case does matter,
and whitespace and '-' and '_' will not be tolerated.
"""
try:
if self.is_bytes or not self.unicode:
pattern = _uniprops.get_posix_property(
prop, (_uniprops.POSIX_BYTES if self.is_bytes else _uniprops.POSIX)
)
else:
pattern = _uniprops.get_posix_property(prop, _uniprops.POSIX_UNICODE)
except Exception:
raise ValueError('Invalid POSIX property!')
if not in_group and not pattern: # pragma: no cover
pattern = '^%s' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE)
return [pattern]
|
python
|
{
"resource": ""
}
|
q13016
|
_SearchParser.unicode_name
|
train
|
def unicode_name(self, name, in_group=False):
"""Insert Unicode value by its name."""
value = ord(_unicodedata.lookup(name))
if (self.is_bytes and value > 0xFF):
value = ""
if not in_group and value == "":
return '[^%s]' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE)
elif value == "":
return value
else:
return ['\\%03o' % value if value <= 0xFF else chr(value)]
|
python
|
{
"resource": ""
}
|
q13017
|
_SearchParser.unicode_props
|
train
|
def unicode_props(self, props, value, in_group=False, negate=False):
"""
Insert Unicode properties.
Unicode properties are very forgiving.
Case doesn't matter and `[ -_]` will be stripped out.
"""
# `'GC = Some_Unpredictable-Category Name' -> 'gc=someunpredictablecategoryname'`
category = None
# `\p{^negated}` Strip off the caret after evaluation.
if props.startswith("^"):
negate = not negate
if props.startswith("^"):
props = props[1:]
# Get the property and value.
# If a property is present and not block,
# we can assume `GC` as that is all we support.
# If we are wrong it will fail.
if value:
if _uniprops.is_enum(props):
category = props
props = value
elif value in ('y', 'yes', 't', 'true'):
category = 'binary'
elif value in ('n', 'no', 'f', 'false'):
category = 'binary'
negate = not negate
else:
raise ValueError('Invalid Unicode property!')
v = _uniprops.get_unicode_property(("^" if negate else "") + props, category, self.is_bytes)
if not in_group:
if not v:
v = '^%s' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE)
v = "[%s]" % v
properties = [v]
return properties
|
python
|
{
"resource": ""
}
|
q13018
|
_ReplaceParser.parse_format_index
|
train
|
def parse_format_index(self, text):
"""Parse format index."""
base = 10
prefix = text[1:3] if text[0] == "-" else text[:2]
if prefix[0:1] == "0":
char = prefix[-1]
if char == "b":
base = 2
elif char == "o":
base = 8
elif char == "x":
base = 16
try:
text = int(text, base)
except Exception:
pass
return text
|
python
|
{
"resource": ""
}
|
q13019
|
_ReplaceParser.handle_format
|
train
|
def handle_format(self, t, i):
"""Handle format."""
if t == '{':
t = self.format_next(i)
if t == '{':
self.get_single_stack()
self.result.append(t)
else:
field, text = self.get_format(t, i)
self.handle_format_group(field, text)
else:
t = self.format_next(i)
if t == '}':
self.get_single_stack()
self.result.append(t)
else:
raise SyntaxError("Unmatched '}' at %d!" % (i.index - 2))
|
python
|
{
"resource": ""
}
|
q13020
|
_ReplaceParser.get_octal
|
train
|
def get_octal(self, c, i):
"""Get octal."""
index = i.index
value = []
zero_count = 0
try:
if c == '0':
for x in range(3):
if c != '0':
break
value.append(c)
c = next(i)
zero_count = len(value)
if zero_count < 3:
for x in range(3 - zero_count):
if c not in _OCTAL:
break
value.append(c)
c = next(i)
i.rewind(1)
except StopIteration:
pass
octal_count = len(value)
if not (self.use_format and octal_count) and not (zero_count and octal_count < 3) and octal_count != 3:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None
|
python
|
{
"resource": ""
}
|
q13021
|
_ReplaceParser.parse_octal
|
train
|
def parse_octal(self, text, i):
"""Parse octal value."""
value = int(text, 8)
if value > 0xFF and self.is_bytes:
# Re fails on octal greater than `0o377` or `0xFF`
raise ValueError("octal escape value outside of range 0-0o377!")
else:
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(chr(value))
|
python
|
{
"resource": ""
}
|
q13022
|
_ReplaceParser.get_named_unicode
|
train
|
def get_named_unicode(self, i):
"""Get named Unicode."""
index = i.index
value = []
try:
if next(i) != '{':
raise SyntaxError("Named Unicode missing '{'' at %d!" % (i.index - 1))
c = next(i)
while c != '}':
value.append(c)
c = next(i)
except StopIteration:
raise SyntaxError("Unmatched '}' at %d!" % index)
return ''.join(value)
|
python
|
{
"resource": ""
}
|
q13023
|
_ReplaceParser.parse_named_unicode
|
train
|
def parse_named_unicode(self, i):
"""Parse named Unicode."""
value = ord(_unicodedata.lookup(self.get_named_unicode(i)))
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(chr(value))
|
python
|
{
"resource": ""
}
|
q13024
|
_ReplaceParser.get_wide_unicode
|
train
|
def get_wide_unicode(self, i):
"""Get narrow Unicode."""
value = []
for x in range(3):
c = next(i)
if c == '0':
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
c = next(i)
if c in ('0', '1'):
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
for x in range(4):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1))
return ''.join(value)
|
python
|
{
"resource": ""
}
|
q13025
|
_ReplaceParser.parse_unicode
|
train
|
def parse_unicode(self, i, wide=False):
"""Parse Unicode."""
text = self.get_wide_unicode(i) if wide else self.get_narrow_unicode(i)
value = int(text, 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(chr(value))
|
python
|
{
"resource": ""
}
|
q13026
|
_ReplaceParser.get_byte
|
train
|
def get_byte(self, i):
"""Get byte."""
value = []
for x in range(2):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid byte character at %d!' % (i.index - 1))
return ''.join(value)
|
python
|
{
"resource": ""
}
|
q13027
|
_ReplaceParser.parse_bytes
|
train
|
def parse_bytes(self, i):
"""Parse byte."""
value = int(self.get_byte(i), 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
else:
self.result.append('\\%03o' % value)
|
python
|
{
"resource": ""
}
|
q13028
|
_ReplaceParser.format_next
|
train
|
def format_next(self, i):
"""Get next format char."""
c = next(i)
return self.format_references(next(i), i) if c == '\\' else c
|
python
|
{
"resource": ""
}
|
q13029
|
_ReplaceParser.format_references
|
train
|
def format_references(self, t, i):
"""Handle format references."""
octal = self.get_octal(t, i)
if octal:
value = int(octal, 8)
if value > 0xFF and self.is_bytes:
# Re fails on octal greater than `0o377` or `0xFF`
raise ValueError("octal escape value outside of range 0-0o377!")
value = chr(value)
elif t in _STANDARD_ESCAPES or t == '\\':
value = _BACK_SLASH_TRANSLATION['\\' + t]
elif not self.is_bytes and t == "U":
value = chr(int(self.get_wide_unicode(i), 16))
elif not self.is_bytes and t == "u":
value = chr(int(self.get_narrow_unicode(i), 16))
elif not self.is_bytes and t == "N":
value = _unicodedata.lookup(self.get_named_unicode(i))
elif t == "x":
value = chr(int(self.get_byte(i), 16))
else:
i.rewind(1)
value = '\\'
return value
|
python
|
{
"resource": ""
}
|
q13030
|
_ReplaceParser.span_case
|
train
|
def span_case(self, i, case):
"""Uppercase or lowercase the next range of characters until end marker is found."""
# A new \L, \C or \E should pop the last in the stack.
if self.span_stack:
self.span_stack.pop()
if self.single_stack:
self.single_stack.pop()
self.span_stack.append(case)
count = len(self.span_stack)
self.end_found = False
try:
while not self.end_found:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
else:
self.result.append(self.convert_case(t, case))
if self.end_found or count > len(self.span_stack):
self.end_found = False
break
except StopIteration:
pass
if count == len(self.span_stack):
self.span_stack.pop()
|
python
|
{
"resource": ""
}
|
q13031
|
_ReplaceParser.convert_case
|
train
|
def convert_case(self, value, case):
"""Convert case."""
if self.is_bytes:
cased = []
for c in value:
if c in _ASCII_LETTERS:
cased.append(c.lower() if case == _LOWER else c.upper())
else:
cased.append(c)
return "".join(cased)
else:
return value.lower() if case == _LOWER else value.upper()
|
python
|
{
"resource": ""
}
|
q13032
|
_ReplaceParser.single_case
|
train
|
def single_case(self, i, case):
"""Uppercase or lowercase the next character."""
# Pop a previous case if we have consecutive ones.
if self.single_stack:
self.single_stack.pop()
self.single_stack.append(case)
try:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
elif self.single_stack:
self.result.append(self.convert_case(t, self.get_single_stack()))
except StopIteration:
pass
|
python
|
{
"resource": ""
}
|
q13033
|
_ReplaceParser.get_single_stack
|
train
|
def get_single_stack(self):
"""Get the correct single stack item to use."""
single = None
while self.single_stack:
single = self.single_stack.pop()
return single
|
python
|
{
"resource": ""
}
|
q13034
|
_ReplaceParser.handle_format_group
|
train
|
def handle_format_group(self, field, text):
"""Handle format group."""
# Handle auto incrementing group indexes
if field == '':
if self.auto:
field = str(self.auto_index)
text[0] = (_util.FMT_FIELD, field)
self.auto_index += 1
elif not self.manual and not self.auto:
self.auto = True
field = str(self.auto_index)
text[0] = (_util.FMT_FIELD, field)
self.auto_index += 1
else:
raise ValueError("Cannot switch to auto format during manual format!")
elif not self.manual and not self.auto:
self.manual = True
elif not self.manual:
raise ValueError("Cannot switch to manual format during auto format!")
self.handle_group(field, tuple(text), True)
|
python
|
{
"resource": ""
}
|
q13035
|
_ReplaceParser.handle_group
|
train
|
def handle_group(self, text, capture=None, is_format=False):
"""Handle groups."""
if capture is None:
capture = tuple() if self.is_bytes else ''
if len(self.result) > 1:
self.literal_slots.append("".join(self.result))
if is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
del self.result[:]
self.result.append("")
self.slot += 1
elif is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
self.group_slots.append(
(
self.slot,
(
(self.span_stack[-1] if self.span_stack else None),
self.get_single_stack(),
capture
)
)
)
self.slot += 1
|
python
|
{
"resource": ""
}
|
q13036
|
ReplaceTemplate._get_group_index
|
train
|
def _get_group_index(self, index):
"""Find and return the appropriate group index."""
g_index = None
for group in self.groups:
if group[0] == index:
g_index = group[1]
break
return g_index
|
python
|
{
"resource": ""
}
|
q13037
|
ReplaceTemplate._get_group_attributes
|
train
|
def _get_group_attributes(self, index):
"""Find and return the appropriate group case."""
g_case = (None, None, -1)
for group in self.group_slots:
if group[0] == index:
g_case = group[1]
break
return g_case
|
python
|
{
"resource": ""
}
|
q13038
|
_to_bstr
|
train
|
def _to_bstr(l):
"""Convert to byte string."""
if isinstance(l, str):
l = l.encode('ascii', 'backslashreplace')
elif not isinstance(l, bytes):
l = str(l).encode('ascii', 'backslashreplace')
return l
|
python
|
{
"resource": ""
}
|
q13039
|
format_string
|
train
|
def format_string(m, l, capture, is_bytes):
"""Perform a string format."""
for fmt_type, value in capture[1:]:
if fmt_type == FMT_ATTR:
# Attribute
l = getattr(l, value)
elif fmt_type == FMT_INDEX:
# Index
l = l[value]
elif fmt_type == FMT_CONV:
if is_bytes:
# Conversion
if value in ('r', 'a'):
l = repr(l).encode('ascii', 'backslashreplace')
elif value == 's':
# If the object is not string or byte string already
l = _to_bstr(l)
else:
# Conversion
if value == 'a':
l = ascii(l)
elif value == 'r':
l = repr(l)
elif value == 's':
# If the object is not string or byte string already
l = str(l)
elif fmt_type == FMT_SPEC:
# Integers and floats don't have an explicit 's' format type.
if value[3] and value[3] == 's':
if isinstance(l, int): # pragma: no cover
raise ValueError("Unknown format code 's' for object of type 'int'")
if isinstance(l, float): # pragma: no cover
raise ValueError("Unknown format code 's' for object of type 'float'")
# Ensure object is a byte string
l = _to_bstr(l) if is_bytes else str(l)
spec_type = value[1]
if spec_type == '^':
l = l.center(value[2], value[0])
elif spec_type == ">":
l = l.rjust(value[2], value[0])
else:
l = l.ljust(value[2], value[0])
# Make sure the final object is a byte string
return _to_bstr(l) if is_bytes else str(l)
|
python
|
{
"resource": ""
}
|
q13040
|
StringIter.rewind
|
train
|
def rewind(self, count):
"""Rewind index."""
if count > self._index: # pragma: no cover
raise ValueError("Can't rewind past beginning!")
self._index -= count
|
python
|
{
"resource": ""
}
|
q13041
|
get_requirements
|
train
|
def get_requirements():
"""Load list of dependencies."""
install_requires = []
with open("requirements/project.txt") as f:
for line in f:
if not line.startswith("#"):
install_requires.append(line.strip())
return install_requires
|
python
|
{
"resource": ""
}
|
q13042
|
get_unicodedata
|
train
|
def get_unicodedata():
"""Download the `unicodedata` version for the given Python version."""
import unicodedata
fail = False
uver = unicodedata.unidata_version
path = os.path.join(os.path.dirname(__file__), 'tools')
fp, pathname, desc = imp.find_module('unidatadownload', [path])
try:
unidatadownload = imp.load_module('unidatadownload', fp, pathname, desc)
unidatadownload.get_unicodedata(uver, no_zip=True)
except Exception:
print(traceback.format_exc())
fail = True
finally:
fp.close()
assert not fail, "Failed to obtain unicodedata!"
return uver
|
python
|
{
"resource": ""
}
|
q13043
|
generate_unicode_table
|
train
|
def generate_unicode_table():
"""Generate the Unicode table for the given Python version."""
uver = get_unicodedata()
fail = False
path = os.path.join(os.path.dirname(__file__), 'tools')
fp, pathname, desc = imp.find_module('unipropgen', [path])
try:
unipropgen = imp.load_module('unipropgen', fp, pathname, desc)
unipropgen.build_tables(
os.path.join(
os.path.dirname(__file__),
'backrefs', 'uniprops', 'unidata'
),
uver
)
except Exception:
print(traceback.format_exc())
fail = True
finally:
fp.close()
assert not fail, "Failed uniprops.py generation!"
|
python
|
{
"resource": ""
}
|
q13044
|
_cached_replace_compile
|
train
|
def _cached_replace_compile(pattern, repl, flags, pattern_type):
"""Cached replace compile."""
return _bregex_parse._ReplaceParser().parse(pattern, repl, bool(flags & FORMAT))
|
python
|
{
"resource": ""
}
|
q13045
|
_get_cache_size
|
train
|
def _get_cache_size(replace=False):
"""Get size of cache."""
if not replace:
size = _cached_search_compile.cache_info().currsize
else:
size = _cached_replace_compile.cache_info().currsize
return size
|
python
|
{
"resource": ""
}
|
q13046
|
_apply_replace_backrefs
|
train
|
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the `ReplaceTemplate` or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m)
|
python
|
{
"resource": ""
}
|
q13047
|
_assert_expandable
|
train
|
def _assert_expandable(repl, use_format=False):
"""Check if replace template is expandable."""
if isinstance(repl, ReplaceTemplate):
if repl.use_format != use_format:
if use_format:
raise ValueError("Replace not compiled as a format replace")
else:
raise ValueError("Replace should not be compiled as a format replace!")
elif not isinstance(repl, (str, bytes)):
raise TypeError("Expected string, buffer, or compiled replace!")
|
python
|
{
"resource": ""
}
|
q13048
|
compile_search
|
train
|
def compile_search(pattern, flags=0, **kwargs):
"""Compile with extended search references."""
return _regex.compile(_apply_search_backrefs(pattern, flags), flags, **kwargs)
|
python
|
{
"resource": ""
}
|
q13049
|
expandf
|
train
|
def expandf(m, format): # noqa A002
"""Expand the string using the format replace pattern or function."""
_assert_expandable(format, True)
return _apply_replace_backrefs(m, format, flags=FORMAT)
|
python
|
{
"resource": ""
}
|
q13050
|
subfn
|
train
|
def subfn(pattern, format, string, *args, **kwargs): # noqa A002
"""Wrapper for `subfn`."""
flags = args[4] if len(args) > 4 else kwargs.get('flags', 0)
is_replace = _is_replace(format)
is_string = isinstance(format, (str, bytes))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return _regex.subn(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string,
*args, **kwargs
)
|
python
|
{
"resource": ""
}
|
q13051
|
Bregex._auto_compile
|
train
|
def _auto_compile(self, template, use_format=False):
"""Compile replacements."""
is_replace = _is_replace(template)
is_string = isinstance(template, (str, bytes))
if is_replace and use_format != template.use_format:
raise ValueError("Compiled replace cannot be a format object!")
if is_replace or (is_string and self.auto_compile):
return self.compile(template, (FORMAT if use_format and not is_replace else 0))
elif is_string and use_format:
# Reject an attempt to run format replace when auto-compiling
# of template strings has been disabled and we are using a
# template string.
raise AttributeError('Format replaces cannot be called without compiling replace template!')
else:
return template
|
python
|
{
"resource": ""
}
|
q13052
|
Bregex.search
|
train
|
def search(self, string, *args, **kwargs):
"""Apply `search`."""
return self._pattern.search(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13053
|
Bregex.match
|
train
|
def match(self, string, *args, **kwargs):
"""Apply `match`."""
return self._pattern.match(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13054
|
Bregex.fullmatch
|
train
|
def fullmatch(self, string, *args, **kwargs):
"""Apply `fullmatch`."""
return self._pattern.fullmatch(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13055
|
Bregex.split
|
train
|
def split(self, string, *args, **kwargs):
"""Apply `split`."""
return self._pattern.split(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13056
|
Bregex.splititer
|
train
|
def splititer(self, string, *args, **kwargs):
"""Apply `splititer`."""
return self._pattern.splititer(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13057
|
Bregex.findall
|
train
|
def findall(self, string, *args, **kwargs):
"""Apply `findall`."""
return self._pattern.findall(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13058
|
Bregex.finditer
|
train
|
def finditer(self, string, *args, **kwargs):
"""Apply `finditer`."""
return self._pattern.finditer(string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13059
|
Bregex.sub
|
train
|
def sub(self, repl, string, *args, **kwargs):
"""Apply `sub`."""
return self._pattern.sub(self._auto_compile(repl), string, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13060
|
_SearchParser.get_posix
|
train
|
def get_posix(self, i):
"""Get POSIX."""
index = i.index
value = ['[']
try:
c = next(i)
if c != ':':
raise ValueError('Not a valid property!')
else:
value.append(c)
c = next(i)
if c == '^':
value.append(c)
c = next(i)
while c != ':':
if c not in _PROPERTY:
raise ValueError('Not a valid property!')
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
value.append(c)
c = next(i)
if c != ']' or not value:
raise ValueError('Unmatched ]')
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None
|
python
|
{
"resource": ""
}
|
q13061
|
_ReplaceParser.regex_parse_template
|
train
|
def regex_parse_template(self, template, pattern):
"""
Parse template for the regex module.
Do NOT edit the literal list returned by
_compile_replacement_helper as you will edit
the original cached value. Copy the values
instead.
"""
groups = []
literals = []
replacements = _compile_replacement_helper(pattern, template)
count = 0
for part in replacements:
if isinstance(part, int):
literals.append(None)
groups.append((count, part))
else:
literals.append(part)
count += 1
return groups, literals
|
python
|
{
"resource": ""
}
|
q13062
|
ReplaceTemplate.expand
|
train
|
def expand(self, m):
"""Using the template, expand the string."""
if m is None:
raise ValueError("Match is None!")
sep = m.string[:0]
if isinstance(sep, bytes) != self._bytes:
raise TypeError('Match string type does not match expander string type!')
text = []
# Expand string
for x in range(0, len(self.literals)):
index = x
l = self.literals[x]
if l is None:
g_index = self._get_group_index(index)
span_case, single_case, capture = self._get_group_attributes(index)
if not self.use_format:
# Non format replace
try:
l = m.group(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % capture)
else:
# String format replace
try:
obj = m.captures(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % g_index)
l = _util.format_string(m, obj, capture, self._bytes)
if span_case is not None:
if span_case == _LOWER:
l = l.lower()
else:
l = l.upper()
if single_case is not None:
if single_case == _LOWER:
l = l[0:1].lower() + l[1:]
else:
l = l[0:1].upper() + l[1:]
text.append(l)
return sep.join(text)
|
python
|
{
"resource": ""
}
|
q13063
|
uniformat
|
train
|
def uniformat(value):
"""Convert a Unicode char."""
if value in GROUP_ESCAPES:
# Escape characters that are (or will be in the future) problematic
c = "\\x%02x\\x%02x" % (0x5c, value)
elif value <= 0xFF:
c = "\\x%02x" % value
elif value <= 0xFFFF:
c = "\\u%04x" % value
else:
c = "\\U%08x" % value
return c
|
python
|
{
"resource": ""
}
|
q13064
|
create_span
|
train
|
def create_span(unirange, is_bytes=False):
"""Clamp the Unicode range."""
if len(unirange) < 2:
unirange.append(unirange[0])
if is_bytes:
if unirange[0] > MAXASCII:
return None
if unirange[1] > MAXASCII:
unirange[1] = MAXASCII
return [x for x in range(unirange[0], unirange[1] + 1)]
|
python
|
{
"resource": ""
}
|
q13065
|
not_explicitly_defined
|
train
|
def not_explicitly_defined(table, name, is_bytes=False):
"""Compose a table with the specified entry name of values not explicitly defined."""
all_chars = ALL_ASCII if is_bytes else ALL_CHARS
s = set()
for k, v in table.items():
s.update(v)
if name in table:
table[name] = list(set(table[name]) | (all_chars - s))
else:
table[name] = list(all_chars - s)
|
python
|
{
"resource": ""
}
|
q13066
|
char2range
|
train
|
def char2range(d, is_bytes=False, invert=True):
"""Convert the characters in the dict to a range in string form."""
fmt = bytesformat if is_bytes else uniformat
maxrange = MAXASCII if is_bytes else MAXUNICODE
for k1 in sorted(d.keys()):
v1 = d[k1]
if not isinstance(v1, list):
char2range(v1, is_bytes=is_bytes, invert=invert)
else:
inverted = k1.startswith('^')
v1.sort()
last = None
first = None
ilast = None
ifirst = None
v2 = []
iv2 = []
if v1 and v1[0] != 0:
ifirst = 0
for i in v1:
if first is None:
first = i
last = i
elif i == last + 1:
last = i
elif first is not None:
if first == last:
v2.append(fmt(first))
else:
v2.append("%s-%s" % (fmt(first), fmt(last)))
if invert and ifirst is not None:
ilast = first - 1
if ifirst == ilast:
iv2.append(fmt(ifirst))
else:
iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast)))
ifirst = last + 1
first = i
last = i
if not v1:
iv2 = ["%s-%s" % (fmt(0), fmt(maxrange))]
elif first is not None:
if first == last:
v2.append(fmt(first))
else:
v2.append("%s-%s" % (fmt(first), fmt(last)))
if invert and ifirst is not None:
ilast = first - 1
if ifirst == ilast:
iv2.append(fmt(ifirst))
else:
iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast)))
ifirst = last + 1
if invert and ifirst <= maxrange:
ilast = maxrange
if ifirst == ilast:
iv2.append(fmt(ifirst))
else:
iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast)))
d[k1] = ''.join(v2)
if invert:
d[k1[1:] if inverted else '^' + k1] = ''.join(iv2)
|
python
|
{
"resource": ""
}
|
q13067
|
gen_ccc
|
train
|
def gen_ccc(output, ascii_props=False, append=False, prefix=""):
"""Generate `canonical combining class` property."""
obj = {}
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedCombiningClass.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
if span is None:
continue
name = format_name(data[1])
if name not in obj:
obj[name] = []
obj[name].extend(span)
for x in range(0, 256):
key = str(x)
if key not in obj:
obj[key] = []
for name in list(obj.keys()):
s = set(obj[name])
obj[name] = sorted(s)
not_explicitly_defined(obj, '0', is_bytes=ascii_props)
# Convert characters values to ranges
char2range(obj, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
# Write out the Unicode properties
f.write('%s_canonical_combining_class = {\n' % prefix)
count = len(obj) - 1
i = 0
for k1, v1 in sorted(obj.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1
|
python
|
{
"resource": ""
}
|
q13068
|
gen_age
|
train
|
def gen_age(output, ascii_props=False, append=False, prefix=""):
"""Generate `age` property."""
obj = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedAge.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
name = format_name(data[1])
if name not in obj:
obj[name] = []
if span is None:
continue
obj[name].extend(span)
unassigned = set()
for x in obj.values():
unassigned |= set(x)
obj['na'] = list(all_chars - unassigned)
for name in list(obj.keys()):
s = set(obj[name])
obj[name] = sorted(s)
# Convert characters values to ranges
char2range(obj, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
# Write out the Unicode properties
f.write('%s_age = {\n' % prefix)
count = len(obj) - 1
i = 0
for k1, v1 in sorted(obj.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1
|
python
|
{
"resource": ""
}
|
q13069
|
gen_nf_quick_check
|
train
|
def gen_nf_quick_check(output, ascii_props=False, append=False, prefix=""):
"""Generate quick check properties."""
categories = []
nf = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
file_name = os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedNormalizationProps.txt')
with codecs.open(file_name, 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
if not data[1].strip().lower().endswith('_qc'):
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
if span is None:
continue
name = format_name(data[1][:-3] + 'quickcheck')
subvalue = format_name(data[2])
if name not in nf:
nf[name] = {}
categories.append(name)
if subvalue not in nf[name]:
nf[name][subvalue] = []
nf[name][subvalue].extend(span)
for k1, v1 in nf.items():
temp = set()
for k2 in list(v1.keys()):
temp |= set(v1[k2])
v1['y'] = list(all_chars - temp)
for k1, v1 in nf.items():
for name in list(v1.keys()):
s = set(nf[k1][name])
nf[k1][name] = sorted(s)
# Convert characters values to ranges
char2range(nf, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
for key, value in sorted(nf.items()):
# Write out the Unicode properties
f.write('%s_%s = {\n' % (prefix, key.replace('quickcheck', '_quick_check')))
count = len(value) - 1
i = 0
for k1, v1 in sorted(value.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1
return categories
|
python
|
{
"resource": ""
}
|
q13070
|
gen_bidi
|
train
|
def gen_bidi(output, ascii_props=False, append=False, prefix=""):
"""Generate `bidi class` property."""
bidi_class = {}
max_range = MAXASCII if ascii_props else MAXUNICODE
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'UnicodeData.txt'), 'r', 'utf-8') as uf:
for line in uf:
data = line.strip().split(';')
if data:
bidi = data[4].strip().lower()
if not bidi:
continue
value = int(data[0].strip(), 16)
if bidi not in bidi_class:
bidi_class[bidi] = []
if value > max_range:
continue
bidi_class[bidi].append(value)
for name in list(bidi_class.keys()):
s = set(bidi_class[name])
bidi_class[name] = sorted(s)
# Convert characters values to ranges
char2range(bidi_class, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
f.write('%s_bidi_classes = {\n' % prefix)
count = len(bidi_class) - 1
i = 0
for k1, v1 in sorted(bidi_class.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1
|
python
|
{
"resource": ""
}
|
q13071
|
gen_uposix
|
train
|
def gen_uposix(table, posix_table):
"""Generate the posix table and write out to file."""
# `Alnum: [\p{L&}\p{Nd}]`
s = set(table['l']['c'] + table['n']['d'])
posix_table["posixalnum"] = list(s)
# `Alpha: [\p{L&}]`
s = set(table['l']['c'])
posix_table["posixalpha"] = list(s)
# `ASCII: [\x00-\x7F]`
s = set([x for x in range(0, 0x7F + 1)])
posix_table["posixascii"] = list(s)
# `Blank: [\p{Zs}\t]`
s = set(table['z']['s'] + [0x09])
posix_table["posixblank"] = list(s)
# `Cntrl: [\p{Cc}]`
s = set(table['c']['c'])
posix_table["posixcntrl"] = list(s)
# `Digit: [\p{Nd}]`
s = set(table['n']['d'])
posix_table["posixdigit"] = list(s)
# `Graph: [^\p{Z}\p{C}]`
s = set()
for table_name in ('z', 'c'):
for sub_table_name in table[table_name]:
if not sub_table_name.startswith('^'):
s |= set(table[table_name][sub_table_name])
posix_table["^posixgraph"] = list(s)
# `Lower: [\p{Ll}]`
s = set(table['l']['l'])
posix_table["posixlower"] = list(s)
# `Print: [\P{C}]`
s = set()
for table_name in ('c',):
for sub_table_name in table[table_name]:
if not sub_table_name.startswith('^'):
s |= set(table[table_name][sub_table_name])
posix_table["^posixprint"] = list(s)
# `Punct: [\p{P}\p{S}]`
s = set()
for table_name in ('p', 's'):
for sub_table_name in table[table_name]:
if not sub_table_name.startswith('^'):
s |= set(table[table_name][sub_table_name])
posix_table["posixpunct"] = list(s)
# `Space: [\p{Z}\t\r\n\v\f]`
s = set()
for table_name in ('z',):
for sub_table_name in table[table_name]:
if not sub_table_name.startswith('^'):
s |= set(table[table_name][sub_table_name])
s |= set([x for x in range(0x09, 0x0e)])
posix_table["posixspace"] = list(s)
# `Upper: [\p{Lu}]`
s = set(table['l']['u'])
posix_table["posixupper"] = list(s)
# `XDigit: [A-Fa-f0-9]`
s = set([x for x in range(0x30, 0x39 + 1)])
s |= set([x for x in range(0x41, 0x46 + 1)])
s |= set([x for x in range(0x61, 0x66 + 1)])
posix_table["posixxdigit"] = list(s)
|
python
|
{
"resource": ""
}
|
q13072
|
set_version
|
train
|
def set_version(version):
"""Set version."""
global UNIVERSION
global UNIVERSION_INFO
if version is None:
version = unicodedata.unidata_version
UNIVERSION = version
UNIVERSION_INFO = tuple([int(x) for x in UNIVERSION.split('.')])
|
python
|
{
"resource": ""
}
|
q13073
|
get_posix_property
|
train
|
def get_posix_property(value, mode=POSIX):
"""Retrieve the posix category."""
if mode == POSIX_BYTES:
return unidata.ascii_posix_properties[value]
elif mode == POSIX_UNICODE:
return unidata.unicode_binary[
('^posix' + value[1:]) if value.startswith('^') else ('posix' + value)
]
else:
return unidata.unicode_posix_properties[value]
|
python
|
{
"resource": ""
}
|
q13074
|
get_gc_property
|
train
|
def get_gc_property(value, is_bytes=False):
"""Get `GC` property."""
obj = unidata.ascii_properties if is_bytes else unidata.unicode_properties
if value.startswith('^'):
negate = True
value = value[1:]
else:
negate = False
value = unidata.unicode_alias['generalcategory'].get(value, value)
assert 1 <= len(value) <= 2, 'Invalid property!'
if not negate:
p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], None)
value = ''.join(
[v for k, v in obj.get(p1, {}).items() if not k.startswith('^')]
) if p2 is None else obj.get(p1, {}).get(p2, '')
else:
p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], '')
value = obj.get(p1, {}).get('^' + p2, '')
assert value, 'Invalid property!'
return value
|
python
|
{
"resource": ""
}
|
q13075
|
get_binary_property
|
train
|
def get_binary_property(value, is_bytes=False):
"""Get `BINARY` property."""
obj = unidata.ascii_binary if is_bytes else unidata.unicode_binary
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['binary'].get(negated, negated)
else:
value = unidata.unicode_alias['binary'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13076
|
get_canonical_combining_class_property
|
train
|
def get_canonical_combining_class_property(value, is_bytes=False):
"""Get `CANONICAL COMBINING CLASS` property."""
obj = unidata.ascii_canonical_combining_class if is_bytes else unidata.unicode_canonical_combining_class
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['canonicalcombiningclass'].get(negated, negated)
else:
value = unidata.unicode_alias['canonicalcombiningclass'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13077
|
get_east_asian_width_property
|
train
|
def get_east_asian_width_property(value, is_bytes=False):
"""Get `EAST ASIAN WIDTH` property."""
obj = unidata.ascii_east_asian_width if is_bytes else unidata.unicode_east_asian_width
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated)
else:
value = unidata.unicode_alias['eastasianwidth'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13078
|
get_grapheme_cluster_break_property
|
train
|
def get_grapheme_cluster_break_property(value, is_bytes=False):
"""Get `GRAPHEME CLUSTER BREAK` property."""
obj = unidata.ascii_grapheme_cluster_break if is_bytes else unidata.unicode_grapheme_cluster_break
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['graphemeclusterbreak'].get(negated, negated)
else:
value = unidata.unicode_alias['graphemeclusterbreak'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13079
|
get_line_break_property
|
train
|
def get_line_break_property(value, is_bytes=False):
"""Get `LINE BREAK` property."""
obj = unidata.ascii_line_break if is_bytes else unidata.unicode_line_break
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['linebreak'].get(negated, negated)
else:
value = unidata.unicode_alias['linebreak'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13080
|
get_sentence_break_property
|
train
|
def get_sentence_break_property(value, is_bytes=False):
"""Get `SENTENCE BREAK` property."""
obj = unidata.ascii_sentence_break if is_bytes else unidata.unicode_sentence_break
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['sentencebreak'].get(negated, negated)
else:
value = unidata.unicode_alias['sentencebreak'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13081
|
get_word_break_property
|
train
|
def get_word_break_property(value, is_bytes=False):
"""Get `WORD BREAK` property."""
obj = unidata.ascii_word_break if is_bytes else unidata.unicode_word_break
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['wordbreak'].get(negated, negated)
else:
value = unidata.unicode_alias['wordbreak'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13082
|
get_hangul_syllable_type_property
|
train
|
def get_hangul_syllable_type_property(value, is_bytes=False):
"""Get `HANGUL SYLLABLE TYPE` property."""
obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['hangulsyllabletype'].get(negated, negated)
else:
value = unidata.unicode_alias['hangulsyllabletype'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13083
|
get_indic_syllabic_category_property
|
train
|
def get_indic_syllabic_category_property(value, is_bytes=False):
"""Get `INDIC SYLLABIC CATEGORY` property."""
obj = unidata.ascii_indic_syllabic_category if is_bytes else unidata.unicode_indic_syllabic_category
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['indicsyllabiccategory'].get(negated, negated)
else:
value = unidata.unicode_alias['indicsyllabiccategory'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13084
|
get_decomposition_type_property
|
train
|
def get_decomposition_type_property(value, is_bytes=False):
"""Get `DECOMPOSITION TYPE` property."""
obj = unidata.ascii_decomposition_type if is_bytes else unidata.unicode_decomposition_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['decompositiontype'].get(negated, negated)
else:
value = unidata.unicode_alias['decompositiontype'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13085
|
get_nfc_quick_check_property
|
train
|
def get_nfc_quick_check_property(value, is_bytes=False):
"""Get `NFC QUICK CHECK` property."""
obj = unidata.ascii_nfc_quick_check if is_bytes else unidata.unicode_nfc_quick_check
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['nfcquickcheck'].get(negated, negated)
else:
value = unidata.unicode_alias['nfcquickcheck'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13086
|
get_nfd_quick_check_property
|
train
|
def get_nfd_quick_check_property(value, is_bytes=False):
"""Get `NFD QUICK CHECK` property."""
obj = unidata.ascii_nfd_quick_check if is_bytes else unidata.unicode_nfd_quick_check
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['nfdquickcheck'].get(negated, negated)
else:
value = unidata.unicode_alias['nfdquickcheck'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13087
|
get_nfkc_quick_check_property
|
train
|
def get_nfkc_quick_check_property(value, is_bytes=False):
"""Get `NFKC QUICK CHECK` property."""
obj = unidata.ascii_nfkc_quick_check if is_bytes else unidata.unicode_nfkc_quick_check
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['nfkcquickcheck'].get(negated, negated)
else:
value = unidata.unicode_alias['nfkcquickcheck'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13088
|
get_nfkd_quick_check_property
|
train
|
def get_nfkd_quick_check_property(value, is_bytes=False):
"""Get `NFKD QUICK CHECK` property."""
obj = unidata.ascii_nfkd_quick_check if is_bytes else unidata.unicode_nfkd_quick_check
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['nfkdquickcheck'].get(negated, negated)
else:
value = unidata.unicode_alias['nfkdquickcheck'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13089
|
get_numeric_type_property
|
train
|
def get_numeric_type_property(value, is_bytes=False):
"""Get `NUMERIC TYPE` property."""
obj = unidata.ascii_numeric_type if is_bytes else unidata.unicode_numeric_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['numerictype'].get(negated, negated)
else:
value = unidata.unicode_alias['numerictype'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13090
|
get_numeric_value_property
|
train
|
def get_numeric_value_property(value, is_bytes=False):
"""Get `NUMERIC VALUE` property."""
obj = unidata.ascii_numeric_values if is_bytes else unidata.unicode_numeric_values
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['numericvalue'].get(negated, negated)
else:
value = unidata.unicode_alias['numericvalue'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13091
|
get_age_property
|
train
|
def get_age_property(value, is_bytes=False):
"""Get `AGE` property."""
obj = unidata.ascii_age if is_bytes else unidata.unicode_age
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['age'].get(negated, negated)
else:
value = unidata.unicode_alias['age'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13092
|
get_joining_type_property
|
train
|
def get_joining_type_property(value, is_bytes=False):
"""Get `JOINING TYPE` property."""
obj = unidata.ascii_joining_type if is_bytes else unidata.unicode_joining_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['joiningtype'].get(negated, negated)
else:
value = unidata.unicode_alias['joiningtype'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13093
|
get_joining_group_property
|
train
|
def get_joining_group_property(value, is_bytes=False):
"""Get `JOINING GROUP` property."""
obj = unidata.ascii_joining_group if is_bytes else unidata.unicode_joining_group
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['joininggroup'].get(negated, negated)
else:
value = unidata.unicode_alias['joininggroup'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13094
|
get_script_property
|
train
|
def get_script_property(value, is_bytes=False):
"""Get `SC` property."""
obj = unidata.ascii_scripts if is_bytes else unidata.unicode_scripts
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['script'].get(negated, negated)
else:
value = unidata.unicode_alias['script'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13095
|
get_script_extension_property
|
train
|
def get_script_extension_property(value, is_bytes=False):
"""Get `SCX` property."""
obj = unidata.ascii_script_extensions if is_bytes else unidata.unicode_script_extensions
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['script'].get(negated, negated)
else:
value = unidata.unicode_alias['script'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13096
|
get_block_property
|
train
|
def get_block_property(value, is_bytes=False):
"""Get `BLK` property."""
obj = unidata.ascii_blocks if is_bytes else unidata.unicode_blocks
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['block'].get(negated, negated)
else:
value = unidata.unicode_alias['block'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13097
|
get_bidi_property
|
train
|
def get_bidi_property(value, is_bytes=False):
"""Get `BC` property."""
obj = unidata.ascii_bidi_classes if is_bytes else unidata.unicode_bidi_classes
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['bidiclass'].get(negated, negated)
else:
value = unidata.unicode_alias['bidiclass'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13098
|
get_bidi_paired_bracket_type_property
|
train
|
def get_bidi_paired_bracket_type_property(value, is_bytes=False):
"""Get `BPT` property."""
obj = unidata.ascii_bidi_paired_bracket_type if is_bytes else unidata.unicode_bidi_paired_bracket_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['bidipairedbrackettype'].get(negated, negated)
else:
value = unidata.unicode_alias['bidipairedbrackettype'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
q13099
|
get_vertical_orientation_property
|
train
|
def get_vertical_orientation_property(value, is_bytes=False):
"""Get `VO` property."""
obj = unidata.ascii_vertical_orientation if is_bytes else unidata.unicode_vertical_orientation
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['verticalorientation'].get(negated, negated)
else:
value = unidata.unicode_alias['verticalorientation'].get(value, value)
return obj[value]
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.