_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q34600 | _reldiff | train | def _reldiff(a, b):
"""
Computes the relative difference of two floating-point numbers
rel = abs(a-b)/min(abs(a), abs(b))
If a == 0 and b == 0, then 0.0 is returned
Otherwise if a or b is 0.0, inf is returned.
"""
a = float(a)
b = float(b)
aa = abs(a)
ba = abs(b)
if a == 0.0 and b == 0.0:
return 0.0
elif a == 0 or b == 0.0:
return float('inf')
return abs(a - b) / min(aa, ba) | python | {
"resource": ""
} |
q34601 | _compare_keys | train | def _compare_keys(element1, element2, key, compare_func, *args):
"""
Compares a specific key between two elements of a basis set
If the key exists in one element but not the other, False is returned.
If the key exists in neither element, True is returned.
Parameters
----------
element1 : dict
Basis info for an element
element2 : dict
Basis info for another element
key : string
Key to compare in the two elements
compare_func : function
Function that returns True if the data under the key is equivalent
in both elements
args
Additional arguments to be passed to compare_Func
"""
if key in element1 and key in element2:
if not compare_func(element1[key], element2[key], *args):
return False
elif key in element1 or key in element2:
return False
return True | python | {
"resource": ""
} |
q34602 | electron_shells_are_subset | train | def electron_shells_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_electron_shells(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True | python | {
"resource": ""
} |
q34603 | ecp_pots_are_subset | train | def ecp_pots_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of ecp potentials is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The potentials are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_ecp_pots(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True | python | {
"resource": ""
} |
q34604 | compare_elements | train | def compare_elements(element1,
element2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if the basis information for two elements is the same as another
Exponents/coefficients are compared using a tolerance.
Parameters
----------
element1 : dict
Basis information for an element
element2 : dict
Basis information for another element
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_meta : bool
Compare the overall element metadata
rel_tol : float
Maximum relative error that is considered equal
'''
if not _compare_keys(element1, element2, 'electron_shells', electron_shells_are_equal,
compare_electron_shells_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_potentials', ecp_pots_are_equal, compare_ecp_pots_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_electrons', operator.eq):
return False
if compare_meta:
if not _compare_keys(element1, element2, 'references', operator.eq):
return False
return True | python | {
"resource": ""
} |
q34605 | compare_basis | train | def compare_basis(bs1,
bs2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_elements_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if two basis set dictionaries are the same
bs1 : dict
Full basis information
bs2 : dict
Full basis information
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_elements_meta : bool
Compare the overall element metadata
compare_meta: bool
Compare the metadata for the basis set (name, description, etc)
rel_tol : float
Maximum relative error that is considered equal
'''
els1 = sorted(list(bs1['elements'].keys()))
els2 = sorted(list(bs2['elements'].keys()))
if not els1 == els2:
return False
for el in els1:
if not compare_elements(
bs1['elements'][el],
bs2['elements'][el],
compare_electron_shells_meta=compare_electron_shells_meta,
compare_ecp_pots_meta=compare_ecp_pots_meta,
compare_meta=compare_elements_meta,
rel_tol=rel_tol):
print("Element failed:", el)
return False
if compare_meta:
for k in ['name', 'family', 'description', 'revision_description', 'role', 'auxiliaries']:
if not _compare_keys(bs1, bs2, k, operator.eq):
return False
return True | python | {
"resource": ""
} |
q34606 | create_metadata_file | train | def create_metadata_file(output_path, data_dir):
'''Creates a METADATA.json file from a data directory
The file is written to output_path
'''
# Relative path to all (BASIS).metadata.json files
meta_filelist, table_filelist, _, _ = get_all_filelist(data_dir)
metadata = {}
for meta_file_relpath in meta_filelist:
# Read in the metadata for a single basis set
meta_file_path = os.path.join(data_dir, meta_file_relpath)
bs_metadata = read_json_basis(meta_file_path)
# Base of the filename for table basis sets
# Basename is something like '6-31G.', including the last period
base_relpath, meta_filename = os.path.split(meta_file_relpath)
base_filename = meta_filename.split('.')[0] + '.'
# All the table files that correspond to this metadata file
# (relative to data_dir)
this_filelist = [
x for x in table_filelist
if os.path.dirname(x) == base_relpath and os.path.basename(x).startswith(base_filename)
]
# The 'versions' dict that will go into the metadata
version_info = {}
# Make sure function types are the same
function_types = None
# For each table basis, compose it
for table_file in this_filelist:
# Obtain just the filename of the table basis
table_filename = os.path.basename(table_file)
# Obtain the base filename and version from the filename
# The base filename is the part before the first period
# (filebase.ver.table.json)
table_filebase, ver, _, _ = table_filename.split('.')
# Fully compose the basis set from components
bs = compose_table_basis(table_file, data_dir)
# Elements for which this basis is defined
defined_elements = sorted(list(bs['elements'].keys()), key=lambda x: int(x))
# Determine the types of functions contained in the basis
# (gto, ecp, etc)
if function_types is None:
function_types = bs['function_types']
elif function_types != bs['function_types']:
raise RuntimeError("Differing function types across versions for " + base_filename)
# Create the metadata for this specific version
# yapf: disable
version_info[ver] = { 'file_relpath': table_file,
'revdesc': bs['revision_description'],
'elements': defined_elements
}
# yapf: enable
# Sort the version dicts
version_info = dict(sorted(version_info.items()))
# Find the maximum version for this basis
latest_ver = max(version_info.keys())
# Create the common metadata for this basis set
# display_name and other_names are placeholders to keep order
# yapf: disable
common_md = { 'display_name': None,
'other_names': None,
'description': bs['description'],
'latest_version': latest_ver,
'basename': base_filename[:-1], # Strip off that trailing period
'relpath': base_relpath,
'family': bs['family'],
'role': bs['role'],
'functiontypes': function_types,
'auxiliaries': bs['auxiliaries'],
'versions': version_info }
# yapf: enable
# Loop through all the common names, translate them, and then add the data
for bs_name in bs_metadata['names']:
tr_name = transform_basis_name(bs_name)
if tr_name in metadata:
raise RuntimeError("Duplicate basis set name: " + tr_name)
# Create a new entry, with all the common metadata
# Also, store the other names for this basis
other_names = bs_metadata['names'].copy()
other_names.remove(bs_name)
metadata[tr_name] = common_md.copy()
metadata[tr_name]['display_name'] = bs_name
metadata[tr_name]['other_names'] = other_names
# Write out the metadata
metadata = dict(sorted(metadata.items()))
_write_plain_json(output_path, metadata) | python | {
"resource": ""
} |
q34607 | write_txt | train | def write_txt(refs):
'''Converts references to plain text format
'''
full_str = '\n'
lib_citation_desc, lib_citations = get_library_citation()
# Add the refs for the libarary at the top
full_str += '*' * 80 + '\n'
full_str += lib_citation_desc
full_str += '*' * 80 + '\n'
for r in lib_citations.values():
ref_txt = reference_text(r)
ref_txt = textwrap.indent(ref_txt, ' ' * 4)
full_str += '{}\n\n'.format(ref_txt)
full_str += '*' * 80 + '\n'
full_str += "References for the basis set\n"
full_str += '*' * 80 + '\n'
for ref in refs:
full_str += '{}\n'.format(compact_elements(ref['elements']))
for ri in ref['reference_info']:
full_str += ' ## {}\n'.format(ri['reference_description'])
refdata = ri['reference_data']
if len(refdata) == 0:
full_str += ' (...no reference...)\n\n'
for k, r in refdata:
ref_txt = reference_text(r)
ref_txt = textwrap.indent(ref_txt, ' ' * 4)
full_str += '{}\n\n'.format(ref_txt)
return full_str | python | {
"resource": ""
} |
q34608 | diff_basis_dict | train | def diff_basis_dict(left_list, right_list):
'''
Compute the difference between two sets of basis set dictionaries
The result is a list of dictionaries that correspond to each dictionary in
`left_list`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the dictionaries in `right_list`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
The return value contains deep copies of the input data
Parameters
----------
left_list : list of dict
Dictionaries to use as the base
right_list : list of dict
Dictionaries of basis data to subtract from each dictionary of `left_list`
Returns
----------
list
Each object in `left_list` containing data that does not appear in `right_list`
'''
ret = []
for bs1 in left_list:
res = copy.deepcopy(bs1)
for bs2 in right_list:
for el in res['elements'].keys():
if not el in bs2['elements']:
continue # Element only exist in left
eldata1 = res['elements'][el]
eldata2 = bs2['elements'][el]
s1 = eldata1['electron_shells']
s2 = eldata2['electron_shells']
eldata1['electron_shells'] = subtract_electron_shells(s1, s2)
# Remove any empty elements
res['elements'] = {k: v for k, v in res['elements'].items() if len(v['electron_shells']) > 0}
ret.append(res)
return ret | python | {
"resource": ""
} |
q34609 | diff_json_files | train | def diff_json_files(left_files, right_files):
'''
Compute the difference between two sets of basis set JSON files
The output is a set of files that correspond to each file in
`left_files`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the files in `right_files`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
`left_files` and `right_files` are lists of file paths. The output
is written to files with the same names as those in `left_files`,
but with `.diff` added to the end. If those files exist, they are overwritten.
Parameters
----------
left_files : list of str
Paths to JSON files to use as the base
right_files : list of str
Paths to JSON files to subtract from each file of `left_files`
Returns
----------
None
'''
left_data = [fileio.read_json_basis(x) for x in left_files]
right_data = [fileio.read_json_basis(x) for x in right_files]
d = diff_basis_dict(left_data, right_data)
for idx, diff_bs in enumerate(d):
fpath = left_files[idx]
fileio.write_json_basis(fpath + '.diff', diff_bs) | python | {
"resource": ""
} |
q34610 | shells_difference | train | def shells_difference(s1, s2):
"""
Computes and prints the differences between two lists of shells
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned.
"""
max_rdiff = 0.0
nsh = len(s1)
if len(s2) != nsh:
print("Different number of shells: {} vs {}".format(len(s1), len(s2)))
return float('inf')
shells1 = sort_shells(s1)
shells2 = sort_shells(s2)
for n in range(nsh):
sh1 = shells1[n]
sh2 = shells2[n]
if sh1['angular_momentum'] != sh2['angular_momentum']:
print("Different angular momentum for shell {}".format(n))
return float('inf')
nprim = len(sh1['exponents'])
if len(sh2['exponents']) != nprim:
print("Different number of primitives for shell {}".format(n))
return float('inf')
ngen = len(sh1['coefficients'])
if len(sh2['coefficients']) != ngen:
print("Different number of general contractions for shell {}".format(n))
return float('inf')
for p in range(nprim):
e1 = sh1['exponents'][p]
e2 = sh2['exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = sh1['coefficients'][g][p]
c2 = sh2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print("Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these shells: {}".format(max_rdiff))
return max_rdiff | python | {
"resource": ""
} |
q34611 | potentials_difference | train | def potentials_difference(p1, p2):
"""
Computes and prints the differences between two lists of potentials
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned.
"""
max_rdiff = 0.0
np = len(p1)
if len(p2) != np:
print("Different number of potentials")
return float('inf')
pots1 = sort_potentials(p1)
pots2 = sort_potentials(p2)
for n in range(np):
pot1 = pots1[n]
pot2 = pots2[n]
if pot1['angular_momentum'] != pot2['angular_momentum']:
print("Different angular momentum for potential {}".format(n))
return float('inf')
nprim = len(pot1['gaussian_exponents'])
if len(pot2['gaussian_exponents']) != nprim:
print("Different number of primitives for potential {}".format(n))
return float('inf')
ngen = len(pot1['coefficients'])
if len(pot2['coefficients']) != ngen:
print("Different number of general contractions for potential {}".format(n))
return float('inf')
for p in range(nprim):
e1 = pot1['gaussian_exponents'][p]
e2 = pot2['gaussian_exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Gaussian Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
e1 = pot1['r_exponents'][p]
e2 = pot2['r_exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" R Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = pot1['coefficients'][g][p]
c2 = pot2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print(" Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these potentials: {}".format(max_rdiff))
return max_rdiff | python | {
"resource": ""
} |
q34612 | basis_comparison_report | train | def basis_comparison_report(bs1, bs2, uncontract_general=False):
'''
Compares two basis set dictionaries and prints a report about their differences
'''
all_bs1 = list(bs1['elements'].keys())
if uncontract_general:
bs1 = manip.uncontract_general(bs1)
bs2 = manip.uncontract_general(bs2)
not_in_bs1 = [] # Found in bs2, not in bs1
not_in_bs2 = all_bs1.copy() # Found in bs1, not in bs2
no_diff = [] # Elements for which there is no difference
some_diff = [] # Elements that are different
big_diff = [] # Elements that are substantially different
for k, v in bs2['elements'].items():
if k not in all_bs1:
not_in_bs1.append(k)
continue
print()
print("-------------------------------------")
print(" Element ", k)
bs1_el = bs1['elements'][k]
max_rdiff_el = 0.0
max_rdiff_ecp = 0.0
# Check to make sure that neither or both have ecp/electron shells
if 'electron_shells' in v and 'electron_shells' not in bs1_el:
print("bs2 has electron_shells, but bs1 does not")
max_rdiff_el = float('inf')
if 'electron_shells' in bs1_el and 'electron_shells' not in v:
print("bs1 has electron_shells, but bs2 does not")
max_rdiff_el = float('inf')
if 'ecp_potentials' in v and 'ecp_potentials' not in bs1_el:
print("bs2 has ecp_potentials, but bs1 does not")
max_rdiff_ecp = float('inf')
if 'ecp_potentials' in bs1_el and 'ecp_potentials' not in v:
print("bs1 has ecp_potentials, but bs2 does not")
max_rdiff_ecp = float('inf')
if 'electron_shells' in v and 'electron_shells' in bs1_el:
max_rdiff_el = max(max_rdiff_el, shells_difference(v['electron_shells'], bs1_el['electron_shells']))
if 'ecp_potentials' in v and 'ecp_potentials' in bs1_el:
nel1 = v['ecp_electrons']
nel2 = bs1_el['ecp_electrons']
if int(nel1) != int(nel2):
print('Different number of electrons replaced by ECP ({} vs {})'.format(nel1, nel2))
max_rdiff_ecp = float('inf')
else:
max_rdiff_ecp = max(max_rdiff_ecp, potentials_difference(v['ecp_potentials'],
bs1_el['ecp_potentials']))
max_rdiff = max(max_rdiff_el, max_rdiff_ecp)
# Handle some differences
if max_rdiff == float('inf'):
big_diff.append(k)
elif max_rdiff == 0.0:
no_diff.append(k)
else:
some_diff.append(k)
not_in_bs2.remove(k)
print()
print(" Not in bs1: ", _print_list(not_in_bs1))
print(" Not in bs2: ", _print_list(not_in_bs2))
print(" No difference: ", _print_list(no_diff))
print("Some difference: ", _print_list(some_diff))
print(" BIG difference: ", _print_list(big_diff))
print()
return (len(not_in_bs1) == 0 and len(not_in_bs2) == 0 and len(some_diff) == 0 and len(big_diff) == 0) | python | {
"resource": ""
} |
q34613 | compare_basis_against_file | train | def compare_basis_against_file(basis_name,
src_filepath,
file_type=None,
version=None,
uncontract_general=False,
data_dir=None):
'''Compare a basis set in the BSE against a reference file'''
src_data = read_formatted_basis(src_filepath, file_type)
bse_data = get_basis(basis_name, version=version, data_dir=data_dir)
return basis_comparison_report(src_data, bse_data, uncontract_general=uncontract_general) | python | {
"resource": ""
} |
q34614 | _bse_cli_list_basis_sets | train | def _bse_cli_list_basis_sets(args):
'''Handles the list-basis-sets subcommand'''
metadata = api.filter_basis_sets(args.substr, args.family, args.role, args.data_dir)
if args.no_description:
liststr = metadata.keys()
else:
liststr = format_columns([(k, v['description']) for k, v in metadata.items()])
return '\n'.join(liststr) | python | {
"resource": ""
} |
q34615 | _bse_cli_list_formats | train | def _bse_cli_list_formats(args):
'''Handles the list-formats subcommand'''
all_formats = api.get_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(liststr) | python | {
"resource": ""
} |
q34616 | _bse_cli_list_ref_formats | train | def _bse_cli_list_ref_formats(args):
'''Handles the list-ref-formats subcommand'''
all_refformats = api.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return '\n'.join(liststr) | python | {
"resource": ""
} |
q34617 | _bse_cli_list_roles | train | def _bse_cli_list_roles(args):
'''Handles the list-roles subcommand'''
all_roles = api.get_roles()
if args.no_description:
liststr = all_roles.keys()
else:
liststr = format_columns(all_roles.items())
return '\n'.join(liststr) | python | {
"resource": ""
} |
q34618 | _bse_cli_lookup_by_role | train | def _bse_cli_lookup_by_role(args):
'''Handles the lookup-by-role subcommand'''
return api.lookup_basis_by_role(args.basis, args.role, args.data_dir) | python | {
"resource": ""
} |
q34619 | _bse_cli_get_basis | train | def _bse_cli_get_basis(args):
'''Handles the get-basis subcommand'''
return api.get_basis(
name=args.basis,
elements=args.elements,
version=args.version,
fmt=args.fmt,
uncontract_general=args.unc_gen,
uncontract_spdf=args.unc_spdf,
uncontract_segmented=args.unc_seg,
make_general=args.make_gen,
optimize_general=args.opt_gen,
data_dir=args.data_dir,
header=not args.noheader) | python | {
"resource": ""
} |
q34620 | _bse_cli_get_refs | train | def _bse_cli_get_refs(args):
'''Handles the get-refs subcommand'''
return api.get_references(
basis_name=args.basis, elements=args.elements, version=args.version, fmt=args.reffmt, data_dir=args.data_dir) | python | {
"resource": ""
} |
q34621 | _bse_cli_get_info | train | def _bse_cli_get_info(args):
'''Handles the get-info subcommand'''
bs_meta = api.get_metadata(args.data_dir)[args.basis]
ret = []
ret.append('-' * 80)
ret.append(args.basis)
ret.append('-' * 80)
ret.append(' Display Name: ' + bs_meta['display_name'])
ret.append(' Description: ' + bs_meta['description'])
ret.append(' Role: ' + bs_meta['role'])
ret.append(' Family: ' + bs_meta['family'])
ret.append(' Function Types: ' + ','.join(bs_meta['functiontypes']))
ret.append(' Latest Version: ' + bs_meta['latest_version'])
ret.append('')
aux = bs_meta['auxiliaries']
if len(aux) == 0:
ret.append('Auxiliary Basis Sets: None')
else:
ret.append('Auxiliary Basis Sets:')
ret.extend(format_columns(list(aux.items()), ' '))
ver = bs_meta['versions']
ret.append('')
ret.append('Versions:')
# Print 3 columns - version, elements, revision description
version_lines = format_columns([(k, compact_elements(v['elements']), v['revdesc']) for k, v in ver.items()],
' ')
ret.extend(version_lines)
return '\n'.join(ret) | python | {
"resource": ""
} |
q34622 | _bse_cli_get_versions | train | def _bse_cli_get_versions(args):
'''Handles the get-versions subcommand'''
name = args.basis.lower()
metadata = api.get_metadata(args.data_dir)
if not name in metadata:
raise KeyError(
"Basis set {} does not exist. For a complete list of basis sets, use the 'list-basis-sets' command".format(
name))
version_data = {k: v['revdesc'] for k, v in metadata[name]['versions'].items()}
if args.no_description:
liststr = version_data.keys()
else:
liststr = format_columns(version_data.items())
return '\n'.join(liststr) | python | {
"resource": ""
} |
q34623 | _bse_cli_create_bundle | train | def _bse_cli_create_bundle(args):
'''Handles the create-bundle subcommand'''
bundle.create_bundle(args.bundle_file, args.fmt, args.reffmt, args.archive_type, args.data_dir)
return "Created " + args.bundle_file | python | {
"resource": ""
} |
q34624 | Client.wait_ready | train | def wait_ready(self, timeout=120):
"""
wait until WDA back to normal
Returns:
bool (if wda works)
"""
deadline = time.time() + timeout
while time.time() < deadline:
try:
self.status()
return True
except:
time.sleep(2)
return False | python | {
"resource": ""
} |
q34625 | Client.screenshot | train | def screenshot(self, png_filename=None, format='raw'):
"""
Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, pillow or raw(default)
Returns:
raw data or PIL.Image
Raises:
WDAError
"""
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDAError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
return Image.open(buff)
else:
raise ValueError("unknown format") | python | {
"resource": ""
} |
q34626 | Session.tap_hold | train | def tap_hold(self, x, y, duration=1.0):
"""
Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
"""
data = {'x': x, 'y': y, 'duration': duration}
return self.http.post('/wda/touchAndHold', data=data) | python | {
"resource": ""
} |
q34627 | Session.screenshot | train | def screenshot(self):
"""
Take screenshot with session check
Returns:
PIL.Image
"""
b64data = self.http.get('/screenshot').value
raw_data = base64.b64decode(b64data)
from PIL import Image
buff = io.BytesIO(raw_data)
return Image.open(buff) | python | {
"resource": ""
} |
q34628 | Session.send_keys | train | def send_keys(self, value):
"""
send keys, yet I know not, todo function
"""
if isinstance(value, six.string_types):
value = list(value)
return self.http.post('/wda/keys', data={'value': value}) | python | {
"resource": ""
} |
q34629 | Selector.click_exists | train | def click_exists(self, timeout=0):
"""
Wait element and perform click
Args:
timeout (float): timeout for wait
Returns:
bool: if successfully clicked
"""
e = self.get(timeout=timeout, raise_error=False)
if e is None:
return False
e.click()
return True | python | {
"resource": ""
} |
q34630 | _GraphDataFilter.get_graph_data | train | def get_graph_data(self, graph, benchmark):
"""
Iterator over graph data sets
Yields
------
param_idx
Flat index to parameter permutations for parameterized benchmarks.
None if benchmark is not parameterized.
entry_name
Name for the data set. If benchmark is non-parameterized, this is the
benchmark name.
steps
Steps to consider in regression detection.
threshold
User-specified threshold for regression detection.
"""
if benchmark.get('params'):
param_iter = enumerate(zip(itertools.product(*benchmark['params']),
graph.get_steps()))
else:
param_iter = [(None, (None, graph.get_steps()))]
for j, (param, steps) in param_iter:
if param is None:
entry_name = benchmark['name']
else:
entry_name = benchmark['name'] + '({0})'.format(', '.join(param))
start_revision = self._get_start_revision(graph, benchmark, entry_name)
threshold = self._get_threshold(graph, benchmark, entry_name)
if start_revision is None:
# Skip detection
continue
steps = [step for step in steps if step[1] >= start_revision]
yield j, entry_name, steps, threshold | python | {
"resource": ""
} |
q34631 | _GraphDataFilter._get_start_revision | train | def _get_start_revision(self, graph, benchmark, entry_name):
"""
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
"""
start_revision = min(six.itervalues(self.revisions))
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
# Disable regression detection completely
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get('branch'))
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
# Commit not found in the branch --- warn and ignore.
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision | python | {
"resource": ""
} |
q34632 | _GraphDataFilter._get_threshold | train | def _get_threshold(self, graph, benchmark, entry_name):
"""
Compute the regression threshold in asv.conf.json.
"""
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
max_threshold = None
for regex, threshold in six.iteritems(self.conf.regressions_thresholds):
if re.match(regex, entry_name + branch_suffix):
try:
threshold = float(threshold)
except ValueError:
raise util.UserError("Non-float threshold in asv.conf.json: {!r}".format(threshold))
if max_threshold is None:
max_threshold = threshold
else:
max_threshold = max(threshold, max_threshold)
if max_threshold is None:
max_threshold = 0.05
return max_threshold | python | {
"resource": ""
} |
q34633 | sdist_checked.__check_submodules | train | def __check_submodules(self):
"""
Verify that the submodules are checked out and clean.
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line) | python | {
"resource": ""
} |
q34634 | solve_potts_autogamma | train | def solve_potts_autogamma(y, w, beta=None, **kw):
"""Solve Potts problem with automatically determined gamma.
The optimal value is determined by minimizing the information measure::
f(gamma) = beta J(x(gamma)) + log sum(abs(x(gamma) - y)**p)
where x(gamma) is the solution to the Potts problem for a fixed
gamma. The minimization is only performed rather roughly.
Parameters
----------
beta : float or 'bic'
Penalty parameter. Default is 4*ln(n)/n, similar to Bayesian
information criterion for gaussian model with unknown variance
assuming 4 DOF per breakpoint.
"""
n = len(y)
if n == 0:
return [], [], [], None
mu_dist = get_mu_dist(y, w)
mu, dist = mu_dist.mu, mu_dist.dist
if beta is None:
beta = 4 * math.log(n) / n
gamma_0 = dist(0, n-1)
if gamma_0 == 0:
# Zero variance
gamma_0 = 1.0
best_r = [None]
best_v = [None]
best_d = [None]
best_obj = [float('inf')]
best_gamma = [None]
def f(x):
gamma = gamma_0 * math.exp(x)
r, v, d = solve_potts_approx(y, w, gamma=gamma, mu_dist=mu_dist, **kw)
# MLE fit noise correlation
def sigma_star(rights, values, rho):
"""
|E_0| + sum_{j>0} |E_j - rho E_{j-1}|
"""
l = 1
E_prev = y[0] - values[0]
s = abs(E_prev)
for r, v in zip(rights, values):
for yv in y[l:r]:
E = yv - v
s += abs(E - rho*E_prev)
E_prev = E
l = r
return s
rho_best = golden_search(lambda rho: sigma_star(r, v, rho), -1, 1,
xatol=0.05, expand_bounds=True)
# Measurement noise floor
if len(v) > 2:
absdiff = [abs(v[j+1] - v[j]) for j in range(len(v) - 1)]
sigma_0 = 0.1 * min(absdiff)
else:
absv = [abs(z) for z in v]
sigma_0 = 0.001 * min(absv)
sigma_0 = max(1e-300, sigma_0)
# Objective function
s = sigma_star(r, v, rho_best)
obj = beta*len(r) + math.log(sigma_0 + s)
# Done
if obj < best_obj[0]:
best_r[0] = r
best_v[0] = v
best_d[0] = d
best_gamma[0] = gamma
best_obj[0] = obj
return obj
# Try to find best gamma (golden section search on log-scale); we
# don't need an accurate value for it however
a = math.log(0.1/n)
b = 0.0
golden_search(f, a, b, xatol=abs(a)*0.1, ftol=0, expand_bounds=True)
return best_r[0], best_v[0], best_d[0], best_gamma[0] | python | {
"resource": ""
} |
q34635 | merge_pieces | train | def merge_pieces(gamma, right, values, dists, mu_dist, max_size):
"""
Combine consecutive intervals in Potts model solution, if doing
that reduces the cost function.
"""
mu, dist = mu_dist.mu, mu_dist.dist
right = list(right)
# Combine consecutive intervals, if it results to decrease of cost
# function
while True:
min_change = 0
min_change_j = len(right)
l = 0
for j in range(1, len(right)):
if min_change_j < j - 2:
break
# Check whether merging consecutive intervals results to
# decrease in the cost function
change = dist(l, right[j]-1) - (dist(l, right[j-1]-1) + dist(right[j-1], right[j]-1) + gamma)
if change <= min_change:
min_change = change
min_change_j = j-1
l = right[j-1]
if min_change_j < len(right):
del right[min_change_j]
else:
break
# Check whether perturbing boundary positions leads to improvement
# in the cost function. The restricted Potts minimization can
# return sub-optimal boundaries due to the interval maximum size
# restriction.
l = 0
for j in range(1, len(right)):
prev_score = dist(l, right[j-1]-1) + dist(right[j-1], right[j]-1)
new_off = 0
for off in range(-max_size, max_size+1):
if right[j-1] + off - 1 <= l or right[j-1] + off >= right[j] - 1 or off == 0:
continue
new_score = dist(l, right[j-1]+off-1) + dist(right[j-1]+off, right[j]-1)
if new_score < prev_score:
new_off = off
prev_score = new_score
if new_off != 0:
right[j-1] += new_off
l = right[j-1]
# Rebuild values and dists lists
l = 0
values = []
dists = []
for j in range(len(right)):
dists.append(dist(l, right[j]-1))
values.append(mu(l, right[j]-1))
l = right[j]
return right, values, dists | python | {
"resource": ""
} |
q34636 | weighted_median | train | def weighted_median(y, w):
"""
Compute weighted median of `y` with weights `w`.
"""
items = sorted(zip(y, w))
midpoint = sum(w) / 2
yvals = []
wsum = 0
for yy, ww in items:
wsum += ww
if wsum > midpoint:
yvals.append(yy)
break
elif wsum == midpoint:
yvals.append(yy)
else:
yvals = y
return sum(yvals) / len(yvals) | python | {
"resource": ""
} |
q34637 | Virtualenv._find_python | train | def _find_python(python):
"""Find Python executable for the given Python version"""
is_pypy = python.startswith("pypy")
# Parse python specifier
if is_pypy:
executable = python
if python == 'pypy':
python_version = '2'
else:
python_version = python[4:]
else:
python_version = python
executable = "python{0}".format(python_version)
# Find Python executable on path
try:
return util.which(executable)
except IOError:
pass
# Maybe the current one is correct?
current_is_pypy = hasattr(sys, 'pypy_version_info')
current_versions = ['{0[0]}'.format(sys.version_info),
'{0[0]}.{0[1]}'.format(sys.version_info)]
if is_pypy == current_is_pypy and python_version in current_versions:
return sys.executable
return None | python | {
"resource": ""
} |
q34638 | Virtualenv.name | train | def name(self):
"""
Get a name to uniquely identify this environment.
"""
python = self._python
if self._python.startswith('pypy'):
# get_env_name adds py-prefix
python = python[2:]
return environment.get_env_name(self.tool_name, python, self._requirements) | python | {
"resource": ""
} |
q34639 | Virtualenv._setup | train | def _setup(self):
"""
Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`.
"""
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
'--no-site-packages',
"-p",
self._executable,
self._path])
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements() | python | {
"resource": ""
} |
q34640 | _basicsize | train | def _basicsize(t, base=0, heap=False, obj=None):
'''Get non-zero basicsize of type,
including the header sizes.
'''
s = max(getattr(t, '__basicsize__', 0), base)
# include gc header size
if t != _Type_type:
h = getattr(t, '__flags__', 0) & _Py_TPFLAGS_HAVE_GC
elif heap: # type, allocated on heap
h = True
else: # None has no __flags__ attr
h = getattr(obj, '__flags__', 0) & _Py_TPFLAGS_HEAPTYPE
if h:
s += _sizeof_CPyGC_Head
# include reference counters
return s + _sizeof_Crefcounts | python | {
"resource": ""
} |
q34641 | _derive_typedef | train | def _derive_typedef(typ):
'''Return single, existing super type typedef or None.
'''
v = [v for v in _values(_typedefs) if _issubclass(typ, v.type)]
if len(v) == 1:
return v[0]
return None | python | {
"resource": ""
} |
q34642 | _infer_dict | train | def _infer_dict(obj):
'''Return True for likely dict object.
'''
for ats in (('__len__', 'get', 'has_key', 'items', 'keys', 'values'),
('__len__', 'get', 'has_key', 'iteritems', 'iterkeys', 'itervalues')):
for a in ats: # no all(<generator_expression>) in Python 2.2
if not _callable(getattr(obj, a, None)):
break
else: # all True
return True
return False | python | {
"resource": ""
} |
q34643 | _isdictclass | train | def _isdictclass(obj):
'''Return True for known dict objects.
'''
c = getattr(obj, '__class__', None)
return c and c.__name__ in _dict_classes.get(c.__module__, ()) | python | {
"resource": ""
} |
q34644 | _lengstr | train | def _lengstr(obj):
'''Object length as a string.
'''
n = leng(obj)
if n is None: # no len
r = ''
elif n > _len(obj): # extended
r = ' leng %d!' % n
else:
r = ' leng %d' % n
return r | python | {
"resource": ""
} |
q34645 | _objs_opts | train | def _objs_opts(objs, all=None, **opts):
'''Return given or 'all' objects
and the remaining options.
'''
if objs: # given objects
t = objs
elif all in (False, None):
t = ()
elif all is True: # 'all' objects ...
# ... modules first, globals and stack
# (may contain duplicate objects)
t = tuple(_values(sys.modules)) + (
globals(), stack(sys.getrecursionlimit())[2:])
else:
raise ValueError('invalid option: %s=%r' % ('all', all))
return t, opts | python | {
"resource": ""
} |
q34646 | _p100 | train | def _p100(part, total, prec=1):
'''Return percentage as string.
'''
r = float(total)
if r:
r = part * 100.0 / r
return '%.*f%%' % (prec, r)
return 'n/a' | python | {
"resource": ""
} |
q34647 | _printf | train | def _printf(fmt, *args, **print3opts):
'''Formatted print.
'''
if print3opts: # like Python 3.0
f = print3opts.get('file', None) or sys.stdout
if args:
f.write(fmt % args)
else:
f.write(fmt)
f.write(print3opts.get('end', linesep))
elif args:
print(fmt % args)
else:
print(fmt) | python | {
"resource": ""
} |
q34648 | _refs | train | def _refs(obj, named, *ats, **kwds):
'''Return specific attribute objects of an object.
'''
if named:
for a in ats: # cf. inspect.getmembers()
if hasattr(obj, a):
yield _NamedRef(a, getattr(obj, a))
if kwds: # kwds are _dir2() args
for a, o in _dir2(obj, **kwds):
yield _NamedRef(a, o)
else:
for a in ats: # cf. inspect.getmembers()
if hasattr(obj, a):
yield getattr(obj, a)
if kwds: # kwds are _dir2() args
for _, o in _dir2(obj, **kwds):
yield o | python | {
"resource": ""
} |
q34649 | _SI | train | def _SI(size, K=1024, i='i'):
'''Return size as SI string.
'''
if 1 < K < size:
f = float(size)
for si in iter('KMGPTE'):
f /= K
if f < K:
return ' or %.1f %s%sB' % (f, si, i)
return '' | python | {
"resource": ""
} |
q34650 | _module_refs | train | def _module_refs(obj, named):
'''Return specific referents of a module object.
'''
# ignore this very module
if obj.__name__ == __name__:
return ()
# module is essentially a dict
return _dict_refs(obj.__dict__, named) | python | {
"resource": ""
} |
q34651 | _len_frame | train | def _len_frame(obj):
'''Length of a frame object.
'''
c = getattr(obj, 'f_code', None)
if c:
n = _len_code(c)
else:
n = 0
return n | python | {
"resource": ""
} |
q34652 | _len_slice | train | def _len_slice(obj):
'''Slice length.
'''
try:
return ((obj.stop - obj.start + 1) // obj.step)
except (AttributeError, TypeError):
return 0 | python | {
"resource": ""
} |
q34653 | _claskey | train | def _claskey(obj, style):
'''Wrap an old- or new-style class object.
'''
i = id(obj)
k = _claskeys.get(i, None)
if not k:
_claskeys[i] = k = _Claskey(obj, style)
return k | python | {
"resource": ""
} |
q34654 | _typedef_both | train | def _typedef_both(t, base=0, item=0, leng=None, refs=None, kind=_kind_static, heap=False):
'''Add new typedef for both data and code.
'''
v = _Typedef(base=_basicsize(t, base=base), item=_itemsize(t, item),
refs=refs, leng=leng,
both=True, kind=kind, type=t)
v.save(t, base=base, heap=heap)
return v | python | {
"resource": ""
} |
q34655 | _typedef_code | train | def _typedef_code(t, base=0, refs=None, kind=_kind_static, heap=False):
'''Add new typedef for code only.
'''
v = _Typedef(base=_basicsize(t, base=base),
refs=refs,
both=False, kind=kind, type=t)
v.save(t, base=base, heap=heap)
return v | python | {
"resource": ""
} |
q34656 | _typedef | train | def _typedef(obj, derive=False, infer=False):
'''Create a new typedef for an object.
'''
t = type(obj)
v = _Typedef(base=_basicsize(t, obj=obj),
kind=_kind_dynamic, type=t)
##_printf('new %r %r/%r %s', t, _basicsize(t), _itemsize(t), _repr(dir(obj)))
if ismodule(obj): # handle module like dict
v.dup(item=_dict_typedef.item + _sizeof_CPyModuleObject,
leng=_len_module,
refs=_module_refs)
elif isframe(obj):
v.set(base=_basicsize(t, base=_sizeof_CPyFrameObject, obj=obj),
item=_itemsize(t),
leng=_len_frame,
refs=_frame_refs)
elif iscode(obj):
v.set(base=_basicsize(t, base=_sizeof_CPyCodeObject, obj=obj),
item=_sizeof_Cvoidp,
leng=_len_code,
refs=_co_refs,
both=False) # code only
elif _callable(obj):
if isclass(obj): # class or type
v.set(refs=_class_refs,
both=False) # code only
if obj.__module__ in _builtin_modules:
v.set(kind=_kind_ignored)
elif isbuiltin(obj): # function or method
v.set(both=False, # code only
kind=_kind_ignored)
elif isfunction(obj):
v.set(refs=_func_refs,
both=False) # code only
elif ismethod(obj):
v.set(refs=_im_refs,
both=False) # code only
elif isclass(t): # callable instance, e.g. SCons,
# handle like any other instance further below
v.set(item=_itemsize(t), safe_len=True,
refs=_inst_refs) # not code only!
else:
v.set(both=False) # code only
elif _issubclass(t, dict):
v.dup(kind=_kind_derived)
elif _isdictclass(obj) or (infer and _infer_dict(obj)):
v.dup(kind=_kind_inferred)
elif getattr(obj, '__module__', None) in _builtin_modules:
v.set(kind=_kind_ignored)
else: # assume an instance of some class
if derive:
p = _derive_typedef(t)
if p: # duplicate parent
v.dup(other=p, kind=_kind_derived)
return v
if _issubclass(t, Exception):
v.set(item=_itemsize(t), safe_len=True,
refs=_exc_refs,
kind=_kind_derived)
elif isinstance(obj, Exception):
v.set(item=_itemsize(t), safe_len=True,
refs=_exc_refs)
else:
v.set(item=_itemsize(t), safe_len=True,
refs=_inst_refs)
return v | python | {
"resource": ""
} |
q34657 | adict | train | def adict(*classes):
'''Install one or more classes to be handled as dict.
'''
a = True
for c in classes:
# if class is dict-like, add class
# name to _dict_classes[module]
if isclass(c) and _infer_dict(c):
t = _dict_classes.get(c.__module__, ())
if c.__name__ not in t: # extend tuple
_dict_classes[c.__module__] = t + (c.__name__,)
else: # not a dict-like class
a = False
return a | python | {
"resource": ""
} |
q34658 | asizeof | train | def asizeof(*objs, **opts):
'''Return the combined size in bytes of all objects passed as positional argments.
The available options and defaults are the following.
*align=8* -- size alignment
*all=False* -- all current objects
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
All current module, global and stack objects are sized if
*all* is True and if no positional arguments are supplied.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat[4] sizes of
the given objects. High *limit* values may cause runtime
errors and miss objects for sizing.
A positive value for *stats* prints up to 8 statistics, (1)
a summary of the number of objects sized and seen, (2) a
simple profile of the sized objects by type and (3+) up to
6 tables showing the static, dynamic, derived, ignored,
inferred and dict types used, found resp. installed. The
fractional part of the *stats* value (x100) is the cutoff
percentage for simple profiles.
[4] See the documentation of this module for the definition of flat size.
'''
t, p = _objs_opts(objs, **opts)
if t:
_asizer.reset(**p)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts) # show opts as _kwdstr
_asizer._clear()
else:
s = 0
return s | python | {
"resource": ""
} |
q34659 | asizesof | train | def asizesof(*objs, **opts):
'''Return a tuple containing the size in bytes of all objects
passed as positional argments using the following options.
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics
See function **asizeof** for a description of the options.
The length of the returned tuple equals the number of given
objects.
'''
if 'all' in opts:
raise KeyError('invalid option: %s=%r' % ('all', opts['all']))
if objs: # size given objects
_asizer.reset(**opts)
t = _asizer.asizesof(*objs)
_asizer.print_stats(objs, opts=opts, sizes=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t | python | {
"resource": ""
} |
q34660 | _typedefof | train | def _typedefof(obj, save=False, **opts):
'''Get the typedef for an object.
'''
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v | python | {
"resource": ""
} |
q34661 | _Typedef.args | train | def args(self): # as args tuple
'''Return all attributes as arguments tuple.
'''
return (self.base, self.item, self.leng, self.refs,
self.both, self.kind, self.type) | python | {
"resource": ""
} |
q34662 | _Typedef.dup | train | def dup(self, other=None, **kwds):
'''Duplicate attributes of dict or other typedef.
'''
if other is None:
d = _dict_typedef.kwds()
else:
d = other.kwds()
d.update(kwds)
self.reset(**d) | python | {
"resource": ""
} |
q34663 | _Typedef.flat | train | def flat(self, obj, mask=0):
'''Return the aligned flat size.
'''
s = self.base
if self.leng and self.item > 0: # include items
s += self.leng(obj) * self.item
if _getsizeof: # _getsizeof prevails
s = _getsizeof(obj, s)
if mask: # align
s = (s + mask) & ~mask
return s | python | {
"resource": ""
} |
q34664 | _Typedef.kwds | train | def kwds(self):
'''Return all attributes as keywords dict.
'''
# no dict(refs=self.refs, ..., kind=self.kind) in Python 2.0
return _kwds(base=self.base, item=self.item,
leng=self.leng, refs=self.refs,
both=self.both, kind=self.kind, type=self.type) | python | {
"resource": ""
} |
q34665 | _Typedef.save | train | def save(self, t, base=0, heap=False):
'''Save this typedef plus its class typedef.
'''
c, k = _keytuple(t)
if k and k not in _typedefs: # instance key
_typedefs[k] = self
if c and c not in _typedefs: # class key
if t.__module__ in _builtin_modules:
k = _kind_ignored # default
else:
k = self.kind
_typedefs[c] = _Typedef(base=_basicsize(type(t), base=base, heap=heap),
refs=_type_refs,
both=False, kind=k, type=t)
elif isbuiltin(t) and t not in _typedefs: # array, range, xrange in Python 2.x
_typedefs[t] = _Typedef(base=_basicsize(t, base=base),
both=False, kind=_kind_ignored, type=t)
else:
raise KeyError('asizeof typedef %r bad: %r %r' % (self, (c, k), self.both)) | python | {
"resource": ""
} |
q34666 | _Typedef.set | train | def set(self, safe_len=False, **kwds):
'''Set one or more attributes.
'''
if kwds: # double check
d = self.kwds()
d.update(kwds)
self.reset(**d)
if safe_len and self.item:
self.leng = _len | python | {
"resource": ""
} |
q34667 | _Typedef.reset | train | def reset(self, base=0, item=0, leng=None, refs=None,
both=True, kind=None, type=None):
'''Reset all specified attributes.
'''
if base < 0:
raise ValueError('invalid option: %s=%r' % ('base', base))
else:
self.base = base
if item < 0:
raise ValueError('invalid option: %s=%r' % ('item', item))
else:
self.item = item
if leng in _all_lengs: # XXX or _callable(leng)
self.leng = leng
else:
raise ValueError('invalid option: %s=%r' % ('leng', leng))
if refs in _all_refs: # XXX or _callable(refs)
self.refs = refs
else:
raise ValueError('invalid option: %s=%r' % ('refs', refs))
if both in (False, True):
self.both = both
else:
raise ValueError('invalid option: %s=%r' % ('both', both))
if kind in _all_kinds:
self.kind = kind
else:
raise ValueError('invalid option: %s=%r' % ('kind', kind))
self.type = type | python | {
"resource": ""
} |
q34668 | _Prof.update | train | def update(self, obj, size):
'''Update this profile.
'''
self.number += 1
self.total += size
if self.high < size: # largest
self.high = size
try: # prefer using weak ref
self.objref, self.weak = Weakref.ref(obj), True
except TypeError:
self.objref, self.weak = obj, False | python | {
"resource": ""
} |
q34669 | Asizer._printf | train | def _printf(self, *args, **kwargs):
'''Print to configured stream if any is specified and the file argument
is not already set for this specific call.
'''
if self._stream and not kwargs.get('file'):
kwargs['file'] = self._stream
_printf(*args, **kwargs) | python | {
"resource": ""
} |
q34670 | Asizer._clear | train | def _clear(self):
'''Clear state.
'''
self._depth = 0 # recursion depth
self._duplicate = 0
self._incl = '' # or ' (incl. code)'
self._missed = 0 # due to errors
self._profile = False
self._profs = {}
self._seen = {}
self._total = 0 # total size
for k in _keys(self._excl_d):
self._excl_d[k] = 0 | python | {
"resource": ""
} |
q34671 | Asizer._prof | train | def _prof(self, key):
'''Get _Prof object.
'''
p = self._profs.get(key, None)
if not p:
self._profs[key] = p = _Prof()
return p | python | {
"resource": ""
} |
q34672 | Asizer._sizer | train | def _sizer(self, obj, deep, sized):
'''Size an object, recursively.
'''
s, f, i = 0, 0, id(obj)
# skip obj if seen before
# or if ref of a given obj
if i in self._seen:
if deep:
self._seen[i] += 1
if sized:
s = sized(s, f, name=self._nameof(obj))
return s
else:
self._seen[i] = 0
try:
k, rs = _objkey(obj), []
if k in self._excl_d:
self._excl_d[k] += 1
else:
v = _typedefs.get(k, None)
if not v: # new typedef
_typedefs[k] = v = _typedef(obj, derive=self._derive_,
infer=self._infer_)
if (v.both or self._code_) and v.kind is not self._ign_d:
s = f = v.flat(obj, self._mask) # flat size
if self._profile: # profile type
self._prof(k).update(obj, s)
# recurse, but not for nested modules
if v.refs and deep < self._limit_ and not (deep and ismodule(obj)):
# add sizes of referents
r, z, d = v.refs, self._sizer, deep + 1
if sized and deep < self._detail_:
# use named referents
for o in r(obj, True):
if isinstance(o, _NamedRef):
t = z(o.ref, d, sized)
t.name = o.name
else:
t = z(o, d, sized)
t.name = self._nameof(o)
rs.append(t)
s += t.size
else: # no sum(<generator_expression>) in Python 2.2
for o in r(obj, False):
s += z(o, d, None)
# recursion depth
if self._depth < d:
self._depth = d
self._seen[i] += 1
except RuntimeError: # XXX RecursionLimitExceeded:
self._missed += 1
if sized:
s = sized(s, f, name=self._nameof(obj), refs=rs)
return s | python | {
"resource": ""
} |
q34673 | Asizer.exclude_refs | train | def exclude_refs(self, *objs):
'''Exclude any references to the specified objects from sizing.
While any references to the given objects are excluded, the
objects will be sized if specified as positional arguments
in subsequent calls to methods **asizeof** and **asizesof**.
'''
for o in objs:
self._seen.setdefault(id(o), 0) | python | {
"resource": ""
} |
q34674 | Asizer.exclude_types | train | def exclude_types(self, *objs):
'''Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**.
'''
for o in objs:
for t in _keytuple(o):
if t and t not in self._excl_d:
self._excl_d[t] = 0 | python | {
"resource": ""
} |
q34675 | Asizer.print_summary | train | def print_summary(self, w=0, objs=(), **print3opts):
'''Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0
'''
self._printf('%*d bytes%s%s', w, self._total, _SI(self._total), self._incl, **print3opts)
if self._mask:
self._printf('%*d byte aligned', w, self._mask + 1, **print3opts)
self._printf('%*d byte sizeof(void*)', w, _sizeof_Cvoidp, **print3opts)
n = len(objs or ())
if n > 0:
d = self._duplicate or ''
if d:
d = ', %d duplicate' % self._duplicate
self._printf('%*d object%s given%s', w, n, _plural(n), d, **print3opts)
t = _sum([1 for t in _values(self._seen) if t != 0]) # [] for Python 2.2
self._printf('%*d object%s sized', w, t, _plural(t), **print3opts)
if self._excl_d:
t = _sum(_values(self._excl_d))
self._printf('%*d object%s excluded', w, t, _plural(t), **print3opts)
t = _sum(_values(self._seen))
self._printf('%*d object%s seen', w, t, _plural(t), **print3opts)
if self._missed > 0:
self._printf('%*d object%s missed', w, self._missed, _plural(self._missed), **print3opts)
if self._depth > 0:
self._printf('%*d recursion depth', w, self._depth, **print3opts) | python | {
"resource": ""
} |
q34676 | Asizer.print_typedefs | train | def print_typedefs(self, w=0, **print3opts):
'''Print the types and dict tables.
*w=0* -- indentation for each line
*print3options* -- print options, as in Python 3.0
'''
for k in _all_kinds:
# XXX Python 3.0 doesn't sort type objects
t = [(self._prepr(a), v) for a, v in _items(_typedefs) if v.kind == k and (v.both or self._code_)]
if t:
self._printf('%s%*d %s type%s: basicsize, itemsize, _len_(), _refs()',
linesep, w, len(t), k, _plural(len(t)), **print3opts)
for a, v in _sorted(t):
self._printf('%*s %s: %s', w, '', a, v, **print3opts)
# dict and dict-like classes
t = _sum([len(v) for v in _values(_dict_classes)]) # [] for Python 2.2
if t:
self._printf('%s%*d dict/-like classes:', linesep, w, t, **print3opts)
for m, v in _items(_dict_classes):
self._printf('%*s %s: %s', w, '', m, self._prepr(v), **print3opts) | python | {
"resource": ""
} |
q34677 | Asizer.reset | train | def reset(self, align=8, clip=80, code=False, derive=False,
detail=0, ignored=True, infer=False, limit=100, stats=0,
stream=None):
'''Reset options, state, etc.
The available options and default values are:
*align=8* -- size alignment
*clip=80* -- clip repr() strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*detail=0* -- Asized refs level
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics, see function **asizeof**
*stream=None* -- output stream for printing
See function **asizeof** for a description of the options.
'''
# options
self._align_ = align
self._clip_ = clip
self._code_ = code
self._derive_ = derive
self._detail_ = detail # for Asized only
self._infer_ = infer
self._limit_ = limit
self._stats_ = stats
self._stream = stream
if ignored:
self._ign_d = _kind_ignored
else:
self._ign_d = None
# clear state
self._clear()
self.set(align=align, code=code, stats=stats) | python | {
"resource": ""
} |
q34678 | _find_conda | train | def _find_conda():
"""Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
"""
if 'CONDA_EXE' in os.environ:
conda = os.environ['CONDA_EXE']
else:
conda = util.which('conda')
return conda | python | {
"resource": ""
} |
q34679 | recvall | train | def recvall(sock, size):
"""
Receive data of given size from a socket connection
"""
data = b""
while len(data) < size:
s = sock.recv(size - len(data))
data += s
if not s:
raise RuntimeError("did not receive data from socket "
"(size {}, got only {!r})".format(size, data))
return data | python | {
"resource": ""
} |
q34680 | get_source_code | train | def get_source_code(items):
"""
Extract source code of given items, and concatenate and dedent it.
"""
sources = []
prev_class_name = None
for func in items:
try:
lines, lineno = inspect.getsourcelines(func)
except TypeError:
continue
if not lines:
continue
src = "\n".join(line.rstrip() for line in lines)
src = textwrap.dedent(src)
class_name = None
if inspect.ismethod(func):
# Add class name
if hasattr(func, 'im_class'):
class_name = func.im_class.__name__
elif hasattr(func, '__qualname__'):
names = func.__qualname__.split('.')
if len(names) > 1:
class_name = names[-2]
if class_name and prev_class_name != class_name:
src = "class {0}:\n {1}".format(
class_name, src.replace("\n", "\n "))
elif class_name:
src = " {1}".format(
class_name, src.replace("\n", "\n "))
sources.append(src)
prev_class_name = class_name
return "\n\n".join(sources).rstrip() | python | {
"resource": ""
} |
q34681 | disc_modules | train | def disc_modules(module_name, ignore_import_errors=False):
"""
Recursively import a module and all sub-modules in the package
Yields
------
module
Imported module in the package tree
"""
if not ignore_import_errors:
module = import_module(module_name)
else:
try:
module = import_module(module_name)
except BaseException:
traceback.print_exc()
return
yield module
if getattr(module, '__path__', None):
for _, name, _ in pkgutil.iter_modules(module.__path__, module_name + '.'):
for item in disc_modules(name, ignore_import_errors=ignore_import_errors):
yield item | python | {
"resource": ""
} |
q34682 | disc_benchmarks | train | def disc_benchmarks(root, ignore_import_errors=False):
"""
Discover all benchmarks in a given directory tree, yielding Benchmark
objects
For each class definition, looks for any methods with a
special name.
For each free function, yields all functions with a special
name.
"""
root_name = os.path.basename(root)
for module in disc_modules(root_name, ignore_import_errors=ignore_import_errors):
for attr_name, module_attr in (
(k, v) for k, v in module.__dict__.items()
if not k.startswith('_')
):
if inspect.isclass(module_attr):
for name, class_attr in inspect.getmembers(module_attr):
if (inspect.isfunction(class_attr) or
inspect.ismethod(class_attr)):
benchmark = _get_benchmark(name, module, module_attr,
class_attr)
if benchmark is not None:
yield benchmark
elif inspect.isfunction(module_attr):
benchmark = _get_benchmark(attr_name, module, None, module_attr)
if benchmark is not None:
yield benchmark | python | {
"resource": ""
} |
q34683 | get_benchmark_from_name | train | def get_benchmark_from_name(root, name, extra_params=None):
"""
Create a benchmark from a fully-qualified benchmark name.
Parameters
----------
root : str
Path to the root of a benchmark suite.
name : str
Fully-qualified name to a specific benchmark.
"""
if '-' in name:
try:
name, param_idx = name.split('-', 1)
param_idx = int(param_idx)
except ValueError:
raise ValueError("Benchmark id %r is invalid" % (name,))
else:
param_idx = None
update_sys_path(root)
benchmark = None
# try to directly import benchmark function by guessing its import module
# name
parts = name.split('.')
for i in [1, 2]:
path = os.path.join(root, *parts[:-i]) + '.py'
if not os.path.isfile(path):
continue
modname = '.'.join([os.path.basename(root)] + parts[:-i])
module = import_module(modname)
try:
module_attr = getattr(module, parts[-i])
except AttributeError:
break
if i == 1 and inspect.isfunction(module_attr):
benchmark = _get_benchmark(parts[-i], module, None, module_attr)
break
elif i == 2 and inspect.isclass(module_attr):
try:
class_attr = getattr(module_attr, parts[-1])
except AttributeError:
break
if (inspect.isfunction(class_attr) or
inspect.ismethod(class_attr)):
benchmark = _get_benchmark(parts[-1], module, module_attr,
class_attr)
break
if benchmark is None:
for benchmark in disc_benchmarks(root):
if benchmark.name == name:
break
else:
raise ValueError(
"Could not find benchmark '{0}'".format(name))
if param_idx is not None:
benchmark.set_param_idx(param_idx)
if extra_params:
class ExtraBenchmarkAttrs:
pass
for key, value in extra_params.items():
setattr(ExtraBenchmarkAttrs, key, value)
benchmark._attr_sources.insert(0, ExtraBenchmarkAttrs)
return benchmark | python | {
"resource": ""
} |
q34684 | list_benchmarks | train | def list_benchmarks(root, fp):
"""
List all of the discovered benchmarks to fp as JSON.
"""
update_sys_path(root)
# Streaming of JSON back out to the master process
fp.write('[')
first = True
for benchmark in disc_benchmarks(root):
if not first:
fp.write(', ')
clean = dict(
(k, v) for (k, v) in benchmark.__dict__.items()
if isinstance(v, (str, int, float, list, dict, bool)) and not
k.startswith('_'))
json.dump(clean, fp, skipkeys=True)
first = False
fp.write(']') | python | {
"resource": ""
} |
q34685 | Benchmark.insert_param | train | def insert_param(self, param):
"""
Insert a parameter at the front of the parameter list.
"""
self._current_params = tuple([param] + list(self._current_params)) | python | {
"resource": ""
} |
q34686 | BuildCache._get_cache_dir | train | def _get_cache_dir(self, commit_hash):
"""
Get the cache dir and timestamp file corresponding to a given commit hash.
"""
path = os.path.join(self._path, commit_hash)
stamp = path + ".timestamp"
return path, stamp | python | {
"resource": ""
} |
q34687 | write_atom | train | def write_atom(dest, entries, author, title, address, updated=None, link=None,
language="en"):
"""
Write an atom feed to a file.
Parameters
----------
dest : str
Destination file path, or a file-like object
entries : list of FeedEntry
Feed entries.
author : str
Author of the feed.
title : str
Title for the feed.
address : str
Address (domain name or email) to be used in building unique IDs.
updated : datetime, optional
Time stamp for the feed. If not given, take from the newest entry.
link : str, optional
Link for the feed.
language : str, optional
Language of the feed. Default is 'en'.
"""
if updated is None:
if entries:
updated = max(entry.updated for entry in entries)
else:
updated = datetime.datetime.utcnow()
root = etree.Element(ATOM_NS + 'feed')
# id (obligatory)
el = etree.Element(ATOM_NS + 'id')
el.text = _get_id(address, None, ["feed", author, title])
root.append(el)
# author (obligatory)
el = etree.Element(ATOM_NS + 'author')
el2 = etree.Element(ATOM_NS + 'name')
el2.text = author
el.append(el2)
root.append(el)
# title (obligatory)
el = etree.Element(ATOM_NS + 'title')
el.attrib[XML_NS + 'lang'] = language
el.text = title
root.append(el)
# updated (obligatory)
el = etree.Element(ATOM_NS + 'updated')
el.text = updated.strftime('%Y-%m-%dT%H:%M:%SZ')
root.append(el)
# link
if link is not None:
el = etree.Element(ATOM_NS + 'link')
el.attrib[ATOM_NS + 'href'] = link
root.append(el)
# entries
for entry in entries:
root.append(entry.get_atom(address, language))
tree = etree.ElementTree(root)
def write(f):
if sys.version_info[:2] < (2, 7):
_etree_py26_write(f, tree)
else:
tree.write(f, xml_declaration=True, default_namespace=ATOM_NS[1:-1],
encoding=str('utf-8'))
if hasattr(dest, 'write'):
write(dest)
else:
with util.long_path_open(dest, 'wb') as f:
write(f) | python | {
"resource": ""
} |
q34688 | _etree_py26_write | train | def _etree_py26_write(f, tree):
"""
Compatibility workaround for ElementTree shipped with py2.6
"""
f.write("<?xml version='1.0' encoding='utf-8'?>\n".encode('utf-8'))
if etree.VERSION[:3] == '1.2':
def fixtag(tag, namespaces):
if tag == XML_NS + 'lang':
return 'xml:lang', ""
if '}' in tag:
j = tag.index('}') + 1
tag = tag[j:]
xmlns = ''
if tag == 'feed':
xmlns = ('xmlns', str('http://www.w3.org/2005/Atom'))
namespaces['http://www.w3.org/2005/Atom'] = 'xmlns'
return tag, xmlns
else:
fixtag = etree.fixtag
old_fixtag = etree.fixtag
etree.fixtag = fixtag
try:
tree.write(f, encoding=str('utf-8'))
finally:
etree.fixtag = old_fixtag | python | {
"resource": ""
} |
q34689 | _get_id | train | def _get_id(owner, date, content):
"""
Generate an unique Atom id for the given content
"""
h = hashlib.sha256()
# Hash still contains the original project url, keep as is
h.update("github.com/spacetelescope/asv".encode('utf-8'))
for x in content:
if x is None:
h.update(",".encode('utf-8'))
else:
h.update(x.encode('utf-8'))
h.update(",".encode('utf-8'))
if date is None:
date = datetime.datetime(1970, 1, 1)
return "tag:{0},{1}:/{2}".format(owner, date.strftime('%Y-%m-%d'), h.hexdigest()) | python | {
"resource": ""
} |
q34690 | GcpHubClient.InitializeDebuggeeLabels | train | def InitializeDebuggeeLabels(self, flags):
"""Initialize debuggee labels from environment variables and flags.
The caller passes all the flags that the the debuglet got. This function
will only use the flags used to label the debuggee. Flags take precedence
over environment variables.
Debuggee description is formatted from available flags.
Args:
flags: dictionary of debuglet command line flags.
"""
self._debuggee_labels = {}
for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):
# var_names is a list of possible environment variables that may contain
# the label value. Find the first one that is set.
for name in var_names:
value = os.environ.get(name)
if value:
# Special case for module. We omit the "default" module
# to stay consistent with AppEngine.
if label == labels.Debuggee.MODULE and value == 'default':
break
self._debuggee_labels[label] = value
break
if flags:
self._debuggee_labels.update(
{name: value for (name, value) in six.iteritems(flags)
if name in _DEBUGGEE_LABELS})
self._debuggee_labels['projectid'] = self._project_id | python | {
"resource": ""
} |
q34691 | GcpHubClient.SetupAuth | train | def SetupAuth(self,
project_id=None,
project_number=None,
service_account_json_file=None):
"""Sets up authentication with Google APIs.
This will use the credentials from service_account_json_file if provided,
falling back to application default credentials.
See https://cloud.google.com/docs/authentication/production.
Args:
project_id: GCP project ID (e.g. myproject). If not provided, will attempt
to retrieve it from the credentials.
project_number: GCP project number (e.g. 72386324623). If not provided,
project_id will be used in its place.
service_account_json_file: JSON file to use for credentials. If not
provided, will default to application default credentials.
Raises:
NoProjectIdError: If the project id cannot be determined.
"""
if service_account_json_file:
self._credentials = (
service_account.Credentials.from_service_account_file(
service_account_json_file, scopes=_CLOUD_PLATFORM_SCOPE))
if not project_id:
with open(service_account_json_file) as f:
project_id = json.load(f).get('project_id')
else:
self._credentials, credentials_project_id = google.auth.default(
scopes=_CLOUD_PLATFORM_SCOPE)
project_id = project_id or credentials_project_id
if not project_id:
raise NoProjectIdError(
'Unable to determine the project id from the API credentials. '
'Please specify the project id using the --project_id flag.')
self._project_id = project_id
self._project_number = project_number or project_id | python | {
"resource": ""
} |
q34692 | GcpHubClient.Start | train | def Start(self):
"""Starts the worker thread."""
self._shutdown = False
self._main_thread = threading.Thread(target=self._MainThreadProc)
self._main_thread.name = 'Cloud Debugger main worker thread'
self._main_thread.daemon = True
self._main_thread.start() | python | {
"resource": ""
} |
q34693 | GcpHubClient.Stop | train | def Stop(self):
"""Signals the worker threads to shut down and waits until it exits."""
self._shutdown = True
self._new_updates.set() # Wake up the transmission thread.
if self._main_thread is not None:
self._main_thread.join()
self._main_thread = None
if self._transmission_thread is not None:
self._transmission_thread.join()
self._transmission_thread = None | python | {
"resource": ""
} |
q34694 | GcpHubClient.EnqueueBreakpointUpdate | train | def EnqueueBreakpointUpdate(self, breakpoint):
"""Asynchronously updates the specified breakpoint on the backend.
This function returns immediately. The worker thread is actually doing
all the work. The worker thread is responsible to retry the transmission
in case of transient errors.
Args:
breakpoint: breakpoint in either final or non-final state.
"""
with self._transmission_thread_startup_lock:
if self._transmission_thread is None:
self._transmission_thread = threading.Thread(
target=self._TransmissionThreadProc)
self._transmission_thread.name = 'Cloud Debugger transmission thread'
self._transmission_thread.daemon = True
self._transmission_thread.start()
self._transmission_queue.append((breakpoint, 0))
self._new_updates.set() | python | {
"resource": ""
} |
q34695 | GcpHubClient._MainThreadProc | train | def _MainThreadProc(self):
"""Entry point for the worker thread."""
registration_required = True
while not self._shutdown:
if registration_required:
service = self._BuildService()
registration_required, delay = self._RegisterDebuggee(service)
if not registration_required:
registration_required, delay = self._ListActiveBreakpoints(service)
if self.on_idle is not None:
self.on_idle()
if not self._shutdown:
time.sleep(delay) | python | {
"resource": ""
} |
q34696 | GcpHubClient._TransmissionThreadProc | train | def _TransmissionThreadProc(self):
"""Entry point for the transmission worker thread."""
reconnect = True
while not self._shutdown:
self._new_updates.clear()
if reconnect:
service = self._BuildService()
reconnect = False
reconnect, delay = self._TransmitBreakpointUpdates(service)
self._new_updates.wait(delay) | python | {
"resource": ""
} |
q34697 | GcpHubClient._RegisterDebuggee | train | def _RegisterDebuggee(self, service):
"""Single attempt to register the debuggee.
If the registration succeeds, sets self._debuggee_id to the registered
debuggee ID.
Args:
service: client to use for API calls
Returns:
(registration_required, delay) tuple
"""
try:
request = {'debuggee': self._GetDebuggee()}
try:
response = service.debuggees().register(body=request).execute()
# self._project_number will refer to the project id on initialization if
# the project number is not available. The project field in the debuggee
# will always refer to the project number. Update so the server will not
# have to do id->number translations in the future.
project_number = response['debuggee'].get('project')
self._project_number = project_number or self._project_number
self._debuggee_id = response['debuggee']['id']
native.LogInfo('Debuggee registered successfully, ID: %s' % (
self._debuggee_id))
self.register_backoff.Succeeded()
return (False, 0) # Proceed immediately to list active breakpoints.
except BaseException:
native.LogInfo('Failed to register debuggee: %s, %s' %
(request, traceback.format_exc()))
except BaseException:
native.LogWarning('Debuggee information not available: ' +
traceback.format_exc())
return (True, self.register_backoff.Failed()) | python | {
"resource": ""
} |
q34698 | GcpHubClient._ListActiveBreakpoints | train | def _ListActiveBreakpoints(self, service):
"""Single attempt query the list of active breakpoints.
Must not be called before the debuggee has been registered. If the request
fails, this function resets self._debuggee_id, which triggers repeated
debuggee registration.
Args:
service: client to use for API calls
Returns:
(registration_required, delay) tuple
"""
try:
response = service.debuggees().breakpoints().list(
debuggeeId=self._debuggee_id, waitToken=self._wait_token,
successOnTimeout=True).execute()
if not response.get('waitExpired'):
self._wait_token = response.get('nextWaitToken')
breakpoints = response.get('breakpoints') or []
if self._breakpoints != breakpoints:
self._breakpoints = breakpoints
native.LogInfo(
'Breakpoints list changed, %d active, wait token: %s' % (
len(self._breakpoints), self._wait_token))
self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints))
except BaseException:
native.LogInfo('Failed to query active breakpoints: ' +
traceback.format_exc())
# Forget debuggee ID to trigger repeated debuggee registration. Once the
# registration succeeds, the worker thread will retry this query
self._debuggee_id = None
return (True, self.list_backoff.Failed())
self.list_backoff.Succeeded()
return (False, 0) | python | {
"resource": ""
} |
q34699 | GcpHubClient._TransmitBreakpointUpdates | train | def _TransmitBreakpointUpdates(self, service):
"""Tries to send pending breakpoint updates to the backend.
Sends all the pending breakpoint updates. In case of transient failures,
the breakpoint is inserted back to the top of the queue. Application
failures are not retried (for example updating breakpoint in a final
state).
Each pending breakpoint maintains a retry counter. After repeated transient
failures the breakpoint is discarded and dropped from the queue.
Args:
service: client to use for API calls
Returns:
(reconnect, timeout) tuple. The first element ("reconnect") is set to
true on unexpected HTTP responses. The caller should discard the HTTP
connection and create a new one. The second element ("timeout") is
set to None if all pending breakpoints were sent successfully. Otherwise
returns time interval in seconds to stall before retrying.
"""
reconnect = False
retry_list = []
# There is only one consumer, so two step pop is safe.
while self._transmission_queue:
breakpoint, retry_count = self._transmission_queue.popleft()
try:
service.debuggees().breakpoints().update(
debuggeeId=self._debuggee_id, id=breakpoint['id'],
body={'breakpoint': breakpoint}).execute()
native.LogInfo('Breakpoint %s update transmitted successfully' % (
breakpoint['id']))
except apiclient.errors.HttpError as err:
# Treat 400 error codes (except timeout) as application error that will
# not be retried. All other errors are assumed to be transient.
status = err.resp.status
is_transient = ((status >= 500) or (status == 408))
if is_transient and retry_count < self.max_transmit_attempts - 1:
native.LogInfo('Failed to send breakpoint %s update: %s' % (
breakpoint['id'], traceback.format_exc()))
retry_list.append((breakpoint, retry_count + 1))
elif is_transient:
native.LogWarning(
'Breakpoint %s retry count exceeded maximum' % breakpoint['id'])
else:
# This is very common if multiple instances are sending final update
# simultaneously.
native.LogInfo('%s, breakpoint: %s' % (err, breakpoint['id']))
except BaseException:
native.LogWarning(
'Fatal error sending breakpoint %s update: %s' % (
breakpoint['id'], traceback.format_exc()))
reconnect = True
self._transmission_queue.extend(retry_list)
if not self._transmission_queue:
self.update_backoff.Succeeded()
# Nothing to send, wait until next breakpoint update.
return (reconnect, None)
else:
return (reconnect, self.update_backoff.Failed()) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.