content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def repos():
"""Display And Add Repos"""
page = Repos(ReposTable, dynamodb_table)
return page.display() | 27,200 |
def lambda_handler(event, _context):
""" Main Handler. """
microservice_name = event.get('MicroserviceName')
environment_name = event.get('EnvironmentName')
new_vn_sha = event.get('Sha')
failure_threshold_value = event.get('FailureThresholdValue')
if not failure_threshold_value:
failure_threshold_value = 0
failure_threshold_time = event.get('FailureThresholdTime')
if not failure_threshold_time:
failure_threshold_time = 600
return get_healthcheck_status(
microservice_name,
environment_name,
new_vn_sha,
failure_threshold_value,
failure_threshold_time
) | 27,201 |
def run(hdf5_data):
"""
Run the solver
Args:
hdf5_data: object, the hdf5 opened storage
Returns:
the output of the fortran function as string if successful
"""
signature = __name__ + '.run(hdf5_data)'
logger = logging.getLogger(__name__)
utility.log_entrance(logger, signature,
{'hdf5_data': hdf5_data})
data = init_data()
data["log_level"] = logging.getLogger().getEffectiveLevel()
dset = utility.get_1d_array(logger, hdf5_data, "H5_L10_COUNT", expected_dim=4)
offset = 1
l10_i_sym = int(dset[0])
n_points = int(dset[1])
n_panels = int(dset[2])
n_bodies = int(dset[3])
mesh_cpanel = utility.get_dataset(hdf5_data, 'H5_L10_CPANEL')
mesh_xm = utility.get_dataset(hdf5_data, 'H5_L10_XM')
mesh_n = utility.get_dataset(hdf5_data, 'H5_L10_N')
mesh_a = utility.get_dataset(hdf5_data, 'H5_L10_A')
dset = utility.get_1d_array(logger, hdf5_data, "H5_L12_COUNT", expected_dim=2)
i_sym = int(dset[1])
if l10_i_sym != i_sym or int(dset[0]) != 2:
raise ValueError('Stopping because the mesh file format is not correct.'
'The symmetry about xoz axis is inconsistent')
data["i_sym"] = i_sym
data["mesh_p"] = np.asarray(utility.get_dataset(hdf5_data, 'H5_L12_P'), order='F', dtype='i')
data["mesh_x"] = np.asarray(utility.get_dataset(hdf5_data, 'H5_L12_X'), order='F', dtype='f')
data["n_points"] = n_points
data["n_panels"] = n_panels
data["n_bodies"] = n_bodies
data["mesh_cpanel"] = np.asarray(mesh_cpanel, order='F', dtype='i')
data["mesh_xm"] = np.asarray(mesh_xm, order='F', dtype='f')
data["mesh_n"] = np.asarray(mesh_n, order='F', dtype='f')
data["mesh_a"] = np.asarray(mesh_a, order='F', dtype='f')
dset = utility.get_dataset(hdf5_data, 'H5_NORMAL_VELOCITY_W')
bc_omega = np.asarray(dset, order='F', dtype='f')
n_problems = bc_omega.shape[0]
data["bc_omega"] = bc_omega
data["n_problems"] = n_problems
dset = utility.get_dataset(hdf5_data, 'H5_NORMAL_VELOCITY_BETA')
data["bc_switch_type"] = np.asarray(dset, order='F', dtype='i')
dset = utility.get_dataset(hdf5_data, 'H5_NORMAL_VELOCITY_SWITCH_POTENTIAL')
data["bc_switch_potential"] = np.asarray(dset, order='F', dtype='i')
dset = utility.get_dataset(hdf5_data, 'H5_NORMAL_VELOCITY_SWITCH_FREE_SURFACE')
data["bc_switch_freesurface"] = np.asarray(dset, order='F', dtype='i')
dset = utility.get_dataset(hdf5_data, 'H5_NORMAL_VELOCITY_SWITCH_KOCHIN')
data["bc_switch_kochin"] = np.asarray(dset, order='F', dtype='i')
dset = utility.get_dataset(hdf5_data, 'H5_NORMAL_VELOCITY_VELOCITIES')
data["bc_normal_velocity"] = np.asarray(dset, order='F', dtype='F')
data["nbc_panels"] = data["bc_normal_velocity"].shape[0]
data["rho"] = utility.get_1d_array(logger, hdf5_data, "H5_ENV_VOLUME", expected_dim=1)[0]
data["g"] = utility.get_1d_array(logger, hdf5_data, "H5_ENV_GRAVITY", expected_dim=1)[0]
data["depth"] = utility.get_1d_array(logger, hdf5_data, "H5_ENV_DEPTH", expected_dim=1)[0]
dset = utility.get_1d_array(logger, hdf5_data, "H5_ENV_WAVE_POINT", expected_dim=2)
data["xeff"] = dset[0]
data["y_eff"] = dset[1]
data["indiq_solver"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_TYPE", expected_dim=1)[0]
data["max_iterations"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_GMRES_MAX_ITERATIONS", 1)[0]
data["restart_param"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_GMRES_RESTART", expected_dim=1)[0]
data["tol_gmres"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_GMRES_STOPPING", expected_dim=1)[0]
data["nds"] = np.asarray(utility.get_dataset(hdf5_data, 'H5_MESH_INTEGRATION'), order='F', dtype='f')
data["n_integration"] = data["nds"].shape[0]
data["use_higher_order"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_USE_HIGHER_ORDER", 1)[0]
data["num_panel_higher_order"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_NUM_PANEL_HIGHER_ORDER", 1)[0]
data["b_spline_order"] = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_B_SPLINE_ORDER", expected_dim=1)[0]
data["theta"] = np.asarray(utility.get_dataset(hdf5_data, 'H5_MESH_KOCHIN'), order='F', dtype='f')
data["n_theta"] = data["theta"].shape[0]
data["meshfs_x"] = np.asarray(utility.get_2d_array(logger, hdf5_data, "H5_MESH_FREE_SURFACE_VECTORS"), dtype='f')
data["nfs_points"] = data["meshfs_x"].shape[1]
data["meshfs_p"] = np.asarray(utility.get_2d_array(logger, hdf5_data, "H5_MESH_FREE_SURFACE_INDEX"),
order='F', dtype='i') + offset
data["nfs_panels"] = data["meshfs_p"].shape[1]
data["out_phi"] = np.zeros((n_problems, 1+data["nfs_points"]), dtype='F', order="F")
data["out_pressure"] = np.zeros((n_problems, data["nbc_panels"]), dtype='F', order="F")
data["out_hkochin"] = np.zeros((n_problems, data["n_theta"]), dtype='F', order="F")
data["line"] = np.zeros((data["n_integration"], n_problems*2), order="F", dtype='f')
data["drift_forces"] = np.zeros((n_problems, data["n_theta"], 2), order="F", dtype='f')
data["yaw_moment"] = np.zeros((n_problems, data["n_theta"]), order="F", dtype='f')
data["center_buoyancy"] = np.zeros((n_bodies, 3), order="F", dtype='f')
data["displacement"] = np.zeros((n_bodies), order="F", dtype='f')
data["waterplane_area"] = np.zeros((n_bodies), order="F", dtype='f')
data["stifness"] = np.zeros((n_bodies, 6, 6), order="F", dtype='f')
n_potentials = 5*n_points*(1 + (i_sym == 1)) +9*n_panels*(1 + (i_sym == 1))
data["out_potential"] = np.zeros((n_problems, n_potentials), dtype='f', order="F")
data["n_potentials"] = n_potentials
data["n_tabulatedx"] = int(utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_GREEN_TABULATION_NUMX", 1)[0])
data["n_tabulatedz"] = int(utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_GREEN_TABULATION_NUMZ", 1)[0])
data["n_points_simpson"] = int(utility.get_1d_array(logger, hdf5_data,
"H5_SOLVER_GREEN_TABULATION_SIMPSON_NPOINTS", 1)[0])
dset = utility.get_dataset(hdf5_data, 'H5_SOLVER_SWITCH_ODE_INFLUENCE')
data["fast_influence_switch"] = np.asarray(dset, order='F', dtype='i')
data["is_interior_domain"] = np.zeros((n_panels), dtype='i', order="F")
remove_irregular_frequencies = utility.get_1d_array(logger, hdf5_data,
"H5_SOLVER_REMOVE_IRREGULAR_FREQUENCIES", 1)[0]
if remove_irregular_frequencies:
# Bug??? Previous code used dset = hdf5_data.get(structure.H5_SOLVER_REMOVE_IRREGULAR_FREQUENCIES)
dset = utility.get_dataset(hdf5_data, 'H5_SOLVER_IS_INTERIOR_DOMAIN')
data["is_interior_domain"] = np.asarray(dset, order='F', dtype='i')
dset = utility.get_dataset(hdf5_data, 'H5_RESULTS_CASE_BETA')
data["beta"] = np.asarray(dset, order='F', dtype='f')
data["n_beta"] = data["beta"].shape[0]
dset = utility.get_dataset(hdf5_data, 'H5_RESULTS_CASE_RADIATION')
data["rad_case"] = np.asarray(dset, order='F', dtype='f')
data["n_radiation"] = data["rad_case"].shape[0]
dset = utility.get_dataset(hdf5_data, 'H5_SOLVER_THIN_PANELS')
data["is_thin_body"] = np.asarray(dset, order='F', dtype='i')
dset = utility.get_dataset(hdf5_data, 'H5_SOLVER_USE_DIPOLES_IMPLEMENTATION')
data["use_dipoles_implementation"] = dset[0]
data["remove_irregular_frequencies"] = utility.get_dataset(hdf5_data, 'H5_SOLVER_REMOVE_IRREGULAR_FREQUENCIES')[0]
dset = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_COMPUTE_YAW_MOMENT", 1)
data["compute_yaw_moment"] = dset[0]
dset = utility.get_1d_array(logger, hdf5_data, "H5_SOLVER_COMPUTE_DRIFT_FORCES", 1)
data["compute_drift_forces"] = dset[0]
# Disable kochin, yaw moments and drift forces
if data["use_higher_order"] == 1 or data["use_dipoles_implementation"] == 1:
data["n_theta"] = 0
logger.info('Disabling koching, yaw monment and drift forces computation as '
'not supported when higher order panel or dipoles implementation is '
'enabled')
#with CaptureOutput() as capturer:
solver_fortran.run_solver(data)
write_result(hdf5_data, data) | 27,202 |
def pytest_addoption(parser):
""" Facilitate command-line test behavior adjustment. """
parser.addoption(
"--logging-level",
default="WARN",
help="Project root logger level to use for tests",
) | 27,203 |
def test_export_as_csl():
"""
CSL export can be tested via curl:
```
curl \
--header "Content-Type: application/json" \
--data '[{"key": "IN22XN53", "itemType": "webpage", "date": "2016-02-09T20:12:00"}]' \
'https://translate.manubot.org/export?format=csljson'
```
"""
zotero_data = [
{
"key": "IN22XN53",
"version": 0,
"itemType": "webpage",
"creators": [],
"tags": [],
"title": "Meet the Robin Hood of Science",
"websiteTitle": "Big Think",
"date": "2016-02-09T20:12:00",
"url": "https://bigthink.com/neurobonkers/a-pirate-bay-for-science",
"abstractNote": "How one researcher created a pirate bay for science more powerful than even libraries at top universities.",
"language": "en",
"accessDate": "2018-12-06T20:10:14Z",
}
]
csl_item = export_as_csl(zotero_data)[0]
assert csl_item["title"] == "Meet the Robin Hood of Science"
assert csl_item["container-title"] == "Big Think" | 27,204 |
def send_multipart_json(sock, idents, reply):
"""helper"""
reply_json = ut.to_json(reply).encode('utf-8')
reply = None
multi_reply = idents + [reply_json]
sock.send_multipart(multi_reply) | 27,205 |
def delete_file(path):
"""Deletes the file at the given path and recursively deletes any empty
directories from the resulting directory tree.
Args:
path: the filepath
Raises:
OSError if the deletion failed
"""
os.remove(path)
try:
os.removedirs(os.path.dirname(path))
except OSError:
# found a non-empty directory or directory with no write access
pass | 27,206 |
def masked_crc32c(data):
"""Copied from
https://github.com/TeamHG-Memex/tensorboard_logger/blob/master/tensorboard_logger/tensorboard_logger.py"""
x = u32(crc32c(data)) # pylint: disable=invalid-name
return u32(((x >> 15) | u32(x << 17)) + 0xa282ead8) | 27,207 |
def _printResults(opts, logger, header, content, filename=None):
"""Print header string and content string to file of given
name. If filename is none, then log to info.
If --tostdout option, then instead of logging, print to STDOUT.
"""
cstart = 0
# If the content is a single quote quoted XML doc then just drop those single quotes
if content is not None and content.startswith("'<?xml") and content.endswith("'"):
content = content[1:-1]
if content.find(">\n"):
content = content.replace("\\n", "\n")
# if content starts with <?xml ..... ?> then put the header after that bit
elif content is not None and content.find("<?xml") > -1 and content.find("'<?xml") < 0:
cstart = content.find("?>", content.find("<?xml") + len("<?xml"))+2
# push past any trailing \n
if content[cstart:cstart+2] == "\\n":
cstart += 2
# used by listresources
if filename is None:
if header is not None:
if cstart > 0:
if not opts.tostdout:
logger.info(content[:cstart])
else:
print content[:cstart] + "\n"
if not opts.tostdout:
# indent header a bit if there was something first
pre = ""
if cstart > 0:
pre = " "
logger.info(pre + header)
else:
# If cstart is 0 maybe still log the header so it
# isn't written to STDOUT and non-machine-parsable
if cstart == 0:
logger.info(header)
else:
print header + "\n"
elif content is not None:
if not opts.tostdout:
if cstart > 0 and content[:cstart].strip() != "":
logger.info(content[:cstart])
else:
print content[:cstart] + "\n"
if content is not None:
if not opts.tostdout:
# indent a bit if there was something first
pre = ""
if cstart > 0:
pre += " "
logger.info(pre + content[cstart:])
else:
print content[cstart:] + "\n"
else:
fdir = os.path.dirname(filename)
if fdir and fdir != "":
if not os.path.exists(fdir):
os.makedirs(fdir)
with open(filename,'w') as file:
logger.info( "Writing to '%s'"%(filename))
if header is not None:
if cstart > 0:
file.write (content[:cstart] + '\n')
# this will fail for JSON output.
# only write header to file if have xml like
# above, else do log thing per above
# FIXME: XML file without the <?xml also ends up logging the header this way
if cstart > 0:
file.write(" " + header )
file.write( "\n" )
else:
logger.info(header)
elif cstart > 0:
file.write(content[:cstart] + '\n')
if content is not None:
pre = ""
if cstart > 0:
pre += " "
file.write( pre + content[cstart:] )
file.write( "\n" ) | 27,208 |
def yaml_dictionary(gra, one_indexed=True):
""" generate a YAML dictionary representing a given graph
"""
if one_indexed:
# shift to one-indexing when we print
atm_key_dct = {atm_key: atm_key+1 for atm_key in atom_keys(gra)}
gra = relabel(gra, atm_key_dct)
yaml_atm_dct = atoms(gra)
yaml_bnd_dct = bonds(gra)
# prepare the atom dictionary
yaml_atm_dct = dict(sorted(yaml_atm_dct.items()))
yaml_atm_dct = dict_.transform_values(
yaml_atm_dct, lambda x: dict(zip(ATM_PROP_NAMES, x)))
# perpare the bond dictionary
yaml_bnd_dct = dict_.transform_keys(
yaml_bnd_dct, lambda x: tuple(sorted(x)))
yaml_bnd_dct = dict(sorted(yaml_bnd_dct.items()))
yaml_bnd_dct = dict_.transform_keys(
yaml_bnd_dct, lambda x: '-'.join(map(str, x)))
yaml_bnd_dct = dict_.transform_values(
yaml_bnd_dct, lambda x: dict(zip(BND_PROP_NAMES, x)))
yaml_gra_dct = {'atoms': yaml_atm_dct, 'bonds': yaml_bnd_dct}
return yaml_gra_dct | 27,209 |
def CreateCloudsWeights( weights = None, names = None, n_clusters = None,
save = 1, dirCreate = 1, filename = 'WC',
dirName = 'WCC', number = 50 ):
"""SAME AS CreateClouds but now it takes as inputs a list of each class
weights and makes the clouds based on them """
dictP, dictN = CalculateWeights( weights = weights, names = names,
n_clusters = n_clusters, number = number)
for i in np.arange( n_clusters ):
filenamePos = filename+'Pos'+str(i)
filenameNeg = filename+'Neg'+str(i)
clouds( counts = dictP[i], filename = filenamePos, dirName = dirName,
dirCreate = dirCreate)
clouds( counts = dictN[i], filename = filenameNeg, dirName = dirName,
dirCreate = dirCreate)
params = {'dictp':dictP, 'dictN': dictN}
return params | 27,210 |
def Seqslicer (Sequ, dictORF):
"""Slice sequence in fasta file from dict"""
record = SeqIO.read(Sequ, "fasta")
for keys, value in dictORF.items():
nameORF = "ORF" + keys
seqORF = record.seq[(int(value[0])-1):int(value[1]) ]
if value[2] == "-":
seqORF = inversComplement(str(seqORF))
elif value[2] == "+":
pass
#print(nameORF, "\n",seqORF) Affiche les séquences dans le tem (stdrr)
if not os.path.isfile(str(nameORF)+ ".fasta"):
print("\n writing of " + str(nameORF)+ ".fasta ... \n")
with open(str(nameORF)+ ".fasta", "a+") as f:
f.writelines(">" + nameORF + "_"+ value[2] + "\n")
f.writelines(str(seqORF))
else :
print("\n file {} already exist.. \n".format(str(nameORF)+ ".fasta")) | 27,211 |
def sigmoid(x, deri=False):
"""
Sigmoid activation function:
Parameters:
x (array) : A numpy array
deri (boolean): If set to True function calulates the derivative of sigmoid
Returns:
x (array) : Numpy array after applying the approprite function
"""
if deri:
return x*(1-x)
else:
return 1/(1+np.exp(-x)) | 27,212 |
def migrate(env, dry_run=False):
"""
User-friendly frontend to run database migrations.
"""
registry = env['registry']
settings = registry.settings
readonly_backends = ('storage', 'permission')
readonly_mode = asbool(settings.get('readonly', False))
for backend in ('cache', 'storage', 'permission'):
if hasattr(registry, backend):
if readonly_mode and backend in readonly_backends:
message = ('Cannot migrate the %s backend while '
'in readonly mode.' % backend)
logger.error(message)
else:
getattr(registry, backend).initialize_schema(dry_run=dry_run) | 27,213 |
def main(args):
""" Main function operates the following steps:
1. Get fits and aperature datafile
2. Get or guess the passband from filename
3. Get ra and dec from fits
4. Match image and reference with SkyCoord using ra's and dec's
5. Calculate zeropoint
5. Apply zeropoint to aperature data.
"""
image_name = os.path.splitext(args.image)[0][0:-2]
names = "id ximage yimage 3.5p 3.5err 5.0p 5.0err 7.0p 7.0err 9.0p 9.0err 1.0fh 1.0err 1.5fh 1.5err 2.0fh 2.0err".split()
print(names)
try:
image = Table.read('{}_apt.txt'.format(image_name),
format='ascii', names=names)
except FileNotFoundError:
image = Table.read('{}_apt.txt'.format(image_name))
image_fits = fits.open(args.image)
reference = Table.read(args.reference, format='ascii')
passband = args.passband
if not passband:
passband = guess_band(args.image)
print(image)
print(reference)
image_radec = xy2rd(image_fits, image['ximage'], image['yimage'])
image_catalog = SkyCoord(ra=image_radec['RA'], dec=image_radec['DEC'])
reference_catalog = SkyCoord(
ra=(reference['RA'] * u.deg), dec=(reference['DEC']*u.deg))
# Match image catalog to reference catalog and get zeropoint
match_id, sep2d, dist3d = image_catalog.match_to_catalog_sky(
reference_catalog)
reference = reference[match_id]
zeropoint = zeropoint_mean(image, reference, mag='3.5p', passband=passband)
# Apply zeropoint
apply_zeropoint(image, zeropoint)
# Save calibrated aperature data
if args.outfile:
ascii.write(image, output=args.outfile, delimiter='\t',
overwrite=True)
print('Write to {}: Success'.format(args.outfile))
return image | 27,214 |
def palindrome_permutation(string):
"""
All palindromes follow the same rule, they have at most one letter whose
count is odd, this letter being the "pivot" of the palindrome. The letters
with an even count can always be permuted to match each other across the
pivot.
"""
string = string.strip().lower()
c = Counter(string)
l = [1 for letter_count in c.values() if letter_count % 2 == 1]
return sum(l) < 2 | 27,215 |
def generate_pfm_v2(pfm_header_instance, toc_header_instance, toc_element_list, toc_elements_hash_list,
platform_id_header_instance, flash_device_instance, allowable_fw_list, fw_id_list, hash_type):
"""
Create a PFM V2 object from all the different PFM components
:param pfm_header_instance: Instance of a PFM header
:param toc_header_instance: Instance of a TOC header
:param toc_element_list: List of TOC elements to be included in PFM
:param toc_elements_hash_list: List of TOC hashes to be included in PFM
:param platform_id_header_instance: Instance of a PFM platform header
:param flash_device_instance: Instance of a PFM flash device header
:param allowable_fw_list: List of all allowable FWs to be included in PFM
:param fw_id_list: List of all FW ID instances
:hash_type: Hashing algorithm to be used for hashing TOC elements
:return Instance of a PFM object
"""
hash_algo = None
if hash_type == 2:
hash_algo = SHA512
elif hash_type == 1:
hash_algo = SHA384
elif hash_type == 0:
hash_algo = SHA256
else:
raise ValueError ("Invalid manifest hash type: {0}".format (hash_type))
toc_elements_size = ctypes.sizeof(toc_element_list[0]) * len (toc_element_list)
toc_hash_size = ctypes.sizeof(toc_elements_hash_list[0]) * len (toc_elements_hash_list)
# Table Hash
table_hash_buf = (ctypes.c_ubyte * ctypes.sizeof(toc_header))()
ctypes.memmove(ctypes.addressof(table_hash_buf), ctypes.addressof(toc_header), ctypes.sizeof(toc_header))
table_hash_object = hash_algo.new(table_hash_buf)
offset = 0
toc_elements_buf = (ctypes.c_ubyte * toc_elements_size)()
for toc_element in toc_elements_list:
ctypes.memmove(ctypes.addressof(toc_elements_buf) + offset, ctypes.addressof(toc_element), ctypes.sizeof(toc_element))
offset += ctypes.sizeof(toc_element)
# Update table hash with TOC elements
table_hash_object.update(toc_elements_buf)
toc_hash_buf = (ctypes.c_ubyte * toc_hash_size)()
offset = 0
for toc_hash in toc_elements_hash_list:
ctypes.memmove(ctypes.addressof(toc_hash_buf) + offset, ctypes.addressof(toc_hash), ctypes.sizeof(toc_hash))
offset += ctypes.sizeof(toc_hash)
# Update table hash with TOC
table_hash_object.update(toc_hash_buf)
table_hash_buf_size = ctypes.c_ubyte * table_hash_object.digest_size
table_hash_buf = (ctypes.c_ubyte * table_hash_object.digest_size).from_buffer_copy(table_hash_object.digest())
table_hash_buf_size = ctypes.sizeof(table_hash_buf)
platform_id_size = ctypes.sizeof(platform_id_header_instance)
platform_id_buf = (ctypes.c_ubyte * platform_id_size)()
ctypes.memmove(ctypes.addressof(platform_id_buf), ctypes.addressof(platform_id_header_instance), platform_id_size)
allowable_fw_size = 0
for fw_id in fw_id_list.values():
allowable_fw_size += ctypes.sizeof(fw_id)
for fw_list in allowable_fw_list.values():
for allowable_fw in fw_list:
allowable_fw_size += ctypes.sizeof(allowable_fw)
flash_device_size = 0
if flash_device_instance != None:
flash_device_size = ctypes.sizeof(flash_device_instance)
flash_device_buf = (ctypes.c_ubyte * flash_device_size)()
if flash_device_size:
ctypes.memmove(ctypes.addressof(flash_device_buf), ctypes.addressof(flash_device_instance), flash_device_size)
class pfm_v2(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [('manifest_header', manifest_common.manifest_header),
('toc_header', manifest_common.manifest_toc_header),
('toc_elements', ctypes.c_ubyte * toc_elements_size),
('toc_hash', ctypes.c_ubyte * toc_hash_size),
('table_hash', ctypes.c_ubyte * table_hash_buf_size),
('platform_id', ctypes.c_ubyte * platform_id_size),
('flash_device', ctypes.c_ubyte * flash_device_size),
('allowable_fw', ctypes.c_ubyte * allowable_fw_size)]
offset = 0
fw_buf = (ctypes.c_ubyte * allowable_fw_size)()
for fw_type, fw_id in fw_id_list.items():
ctypes.memmove(ctypes.addressof(fw_buf) + offset, ctypes.addressof(fw_id), ctypes.sizeof(fw_id))
offset += ctypes.sizeof(fw_id)
fw_list = allowable_fw_list.get(fw_type)
for allowed_fw in fw_list:
ctypes.memmove(ctypes.addressof(fw_buf) + offset, ctypes.addressof(allowed_fw), ctypes.sizeof(allowed_fw))
offset += ctypes.sizeof(allowed_fw)
return pfm_v2(pfm_header_instance, toc_header_instance, toc_elements_buf, toc_hash_buf, table_hash_buf,
platform_id_buf, flash_device_buf, fw_buf) | 27,216 |
async def handle(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
"""Handles incoming data and returns"""
dispatch = get_dispatch()
while True:
data = await reader.read(100)
message = data.decode().rstrip(' \n')
address = writer.get_extra_info('peername')
logging.info(f"Received {message!r} from {address!r}")
func_name, *args = message.split(' ')
logging.debug(f"Received following function name and arguments: {func_name} and {args}")
res_func = dispatch.get(func_name.lower())
try:
result = res_func(*args)
except TypeError as t:
logging.exception(t)
if res_func is not None:
logging.error("User did not provide correct number of arguments")
result = "Incorrect number of arguments provided, please try again"
else:
logging.error("User requested unknown function")
result = f"Function '{func_name}' not available, provide 'list' to see which functions are available"
except ValueError as v:
logging.exception(v)
result = "Incorrect argument type(s) provided, please try again"
writer.write(f"{result}\n".encode('UTF-8'))
logging.info(f"Sent: {result!r}")
await writer.drain() | 27,217 |
def symmetrise_AP(AP):
"""
No checks on this since this is a deep-inside-module helper routine.
AP must be a batch of matrices (n, 1, N, N).
"""
return AP + AP.transpose(2, 3) | 27,218 |
def index():
"""新闻首页"""
#----------------------1.查询用户基本信息展示----------------------
# 需求:发现查询用户基本信息代码在多个地方都需要实现,
# 为了达到代码复用的目的,将这些重复代码封装到装饰器中
# # 1.根据session获取用户user_id
# user_id = session.get("user_id")
#
# user = None
# # 先定义,再使用 否则:local variable 'user_dict' referenced before assignment
# user_dict = None
# if user_id:
# # 2.根据user_id查询用户对象
# try:
# user = User.query.get(user_id)
# except Exception as e:
# current_app.logger.error(e)
# return "查询用户对象异常"
# 从g对象中读取user对象
user = g.user
# 3.将用户对象转换成字典
"""
if user:
user_dict = user.to_dict()
"""
user_dict = user.to_dict() if user else None
# ----------------------2.查询新闻排行列表数据----------------------
# order_by 将新闻的浏览量降序排序
try:
rank_news_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询点击排行数据异常")
"""
rank_news_list:是一个对象列表 [news_obj1, news_obj2, .....]
rank_dict_list = []
if rank_news_list:
for news in rank_news_list:
news_dict = news.to_dict()
rank_dict_list.append(news_dict)
"""
# 将对象列表转换成字典列表
rank_dict_list = []
for news in rank_news_list if rank_news_list else []:
# 将对象转换成字典并且添加到列表中
rank_dict_list.append(news.to_dict())
# ----------------------3.查询新闻分类列表数据----------------------
# 1.查询所有分类数据
try:
categories = Category.query.all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询新闻分类对象异常")
# 2.将分类对象列表转换成字典列表
category_dict_list = []
for category in categories if categories else []:
# 将分类对象转换成字典添加到列表中
category_dict_list.append(category.to_dict())
# 返回模板的同时将查询到的数据一并返回
"""
数据格式:
data = {
"user_info": {
"id": self.id,
"nick_name": self.nick_name,
}
}
使用: data.user_info.nick_name
data.rank_news_list -- 字典列表
"""
# 组织返回数据
data = {
"user_info": user_dict,
"click_news_list": rank_dict_list,
"categories": category_dict_list
}
return render_template("news/index.html", data=data) | 27,219 |
def formatter_message(message, use_color = True):
""" Method to format the pattern in which the log messages will be
displayed.
@param message: message log to be displayed
@param use_color: Flag to indicates the use of colors or not
@type message: str
@type use_color: boolean
@return: the new formatted message
@rtype: str
"""
if use_color:
message = message.replace('$RESET', RESET_SEQ).replace('$BOLD',
BOLD_SEQ)
else:
message = message.replace('$RESET', '').replace('$BOLD', '')
return message | 27,220 |
def find_offsets(head_mapping):
"""Find the time offsets that align the series in head_mapping
Finds the set of time offsets that minimize the sum of squared
differences in times at which each series crosses a particular
head. Input is a mapping of head id (a hashable value
corresponding to a head, normally an integer) to a sequence of
(series_id, time) pairs wherein series_id is an identifier for a
sequence and time is the time at which the series crossed the
corresponding head value.
The series with the series_id that is largest (last in sort order)
is treated as the reference and given an offset of zero; all other
offsets are relative to that one.
Returns series_ids, offsets where series_ids are the identifiers
"""
# Eliminate all heads with only one series, these are
# uninformative
for head_id, seq in list(head_mapping.items()):
# Don't use "assert seq" here, this is an ndarray
assert len(seq) > 0 # pylint: disable=len-as-condition
if len(seq) == 1:
del head_mapping[head_id]
# Assemble mapping of series ids to row numbers for the
# least-squares problem
series_ids = ((series_id for series_id, t_mean in seq)
for seq in list(head_mapping.values()))
series_ids = sorted(set().union(*series_ids))
series_indices = dict(zip(series_ids,
range(len(series_ids))))
# Reference series corresponds to the highest series id; it
# has the largest initial head, because we sorted them
reference_index = max(series_ids)
LOG.info('Reference index: %s', reference_index)
number_of_equations = sum(len(series_at_head) for series_at_head
in list(head_mapping.values()))
number_of_unknowns = len(series_indices) - 1
LOG.info('%s equations, %s unknowns',
number_of_equations, number_of_unknowns)
A = np.zeros((number_of_equations, number_of_unknowns))
b = np.zeros((number_of_equations,))
row_template = np.zeros((number_of_unknowns,))
row_index = 0
for head_id, series_at_head in list(head_mapping.items()):
row_template[:] = 0
sids, times = list(zip(*series_at_head))
number_of_series_at_head = len(sids)
indices = [series_indices[index] for index in sids
if index != reference_index]
row_template[indices] = 1. / number_of_series_at_head
mean_time = np.mean(times)
for series_id, t in series_at_head:
A[row_index] = row_template
# !!! some redundancy here
if series_id != reference_index:
series_index = series_indices[series_id]
A[row_index, series_index] -= 1
b[row_index] = t - mean_time
row_index += 1
assert row_index == number_of_equations, row_index
ATA = np.dot(A.transpose(), A)
assert ATA.shape == (number_of_unknowns,
number_of_unknowns), ATA.shape
ATd = np.dot(A.transpose(), b)
offsets = linalg_mod.solve(ATA, ATd) # pylint: disable=E1101
# this was the boundary condition, zero offset for
# reference (last) id
offsets = np.concatenate((offsets, [0]))
# offsets are by index, but reverse mapping is trivial
# because series ids are sorted
assert len(series_ids) == len(offsets), \
'{} != {}'.format(len(series_ids), len(offsets))
return (series_ids, offsets) | 27,221 |
def whoami():
"""
Displays the username.
USAGE
- sniper whoami
"""
try:
with open(TOKEN_FILE) as t:
token = t.read()
except:
raise SniperError('Error reading the credentials file.')
if token == '':
raise SniperError('You are currently not logged in.')
token = token.split('\n')
if len(token) < 2:
raise SniperError('Credentials file is corrupt. Please report this issue on Github.')
click.echo(token[1]) | 27,222 |
def get_argument(value, arg):
"""Get argument by variable"""
return value.get(arg, None) | 27,223 |
def update_user_distances(user, start, end, update_only=True):
"""Update travelled_distances for given user, based on changes to data
between given start and end. If update_only, disallow writing stats on a
new day, do update global stats."""
# Snap to whole days
start = start.replace(hour=0, minute=0, second=0, microsecond=0)
end += timedelta(days=1, microseconds=-1)
end = end.replace(hour=0, minute=0, second=0, microsecond=0)
data_rows = get_filtered_device_data_points(user, start, end)
# discard suspiciously sharp movement from bogus location jumps
data_rows = trace_discard_sidesteps(data_rows, BAD_LOCATION_RADIUS)
dists = db.metadata.tables["travelled_distances"]
for rating in get_ratings_from_rows(data_rows, user):
where = and_(*(dists.c[x] == rating[x] for x in ["user_id", "time"]))
ex = db.engine.execute(dists.select(where)).first() # no upsert yet
if ex:
db.engine.execute(dists.update(where, rating))
elif not update_only:
# Refrain from writing partial stats for today that daily batch
# then wouldn't update
db.engine.execute(dists.insert([rating]))
# Batch updates may want to defer generating derived sums and rankings
if not update_only:
return
# Update unused weekly rankings based on ratings
query = text("""
SELECT DISTINCT time FROM travelled_distances
WHERE time >= :start AND time < :end + interval '6 days'
AND total_distance IS NOT NULL""")
for row in db.engine.execute(query, start=start, end=end):
generate_rankings(row[0])
# Update unused global distance, co2 average, active users in last 13 days
update_global_statistics(start, end) | 27,224 |
def tokenize(docs, word_tokenize_flag=1):
"""
:param docs:
:param word_tokenize_flag:
:return:
"""
sent_tokenized = []
for d_ in docs:
sent_tokenized += sent_tokenize(d_)
if word_tokenize_flag==1:
word_tokenized = []
for sent in sent_tokenized:
word_tokenized.append(word_tokenize(sent))
return word_tokenized
elif word_tokenize_flag==0:
return sent_tokenized | 27,225 |
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> is_literal(a)
True
>>> is_literal(~a)
True
>>> is_literal(a + b)
True
>>> is_literal(Or(a, b))
False
"""
if isinstance(expr, Not):
return not isinstance(expr.args[0], BooleanFunction)
else:
return not isinstance(expr, BooleanFunction) | 27,226 |
def get_target_proportions_of_current_trial(individuals, target):
"""Get the proportion waiting times within the target for a given trial of
a threshold
Parameters
----------
individuals : object
A ciw object that contains all individuals records
Returns
-------
int
all ambulance patients that finished the simulation
int
all ambulance patients whose waiting times where within the target
int
all other patients that finished the simulation
int
all other patients whose waiting times where within the target
"""
ambulance_waits, ambulance_target_waits = 0, 0
other_waits, other_target_waits = 0, 0
for individual in individuals:
ind_class = len(individual.data_records) - 1
rec = individual.data_records[-1]
if rec.node == 2 and ind_class == 0:
other_waits += 1
if rec.waiting_time < target:
other_target_waits += 1
elif rec.node == 2 and ind_class == 1:
ambulance_waits += 1
if rec.waiting_time < target:
ambulance_target_waits += 1
return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits | 27,227 |
def how_many(aDict):
"""
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
"""
return sum(len(value) for value in aDict.values()) | 27,228 |
def build_model(images, num_classes):
"""Build the model
:param images: Input image placeholder
:param num_classes: Nbr of final output classes
:return: Output of final fc-layer
"""
#####Insert your code here for subtask 1e#####
# It might be useful to define helper functions which add a layer of type needed
# If you define such as function, remember that multiple variables with the same name will result in an error
# To this end you may want to use with tf.variable_scope(name) to define a named scope for each layer
# This way, you get a less cluttered visualization of the graph in tensorboard and debugging may be easier in tfdbg
#####Insert your code here for subtask 1f#####
# Add fc-classifictaion-layers
return softmax_logits | 27,229 |
def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
x, y, a, s = keypoint
x1, y1, x2, y2 = crop_coords
cropped_keypoint = [x - x1, y - y1, a, s]
return cropped_keypoint | 27,230 |
def get_tcoeff(epd_model, dF):
"""
Tranmission coefficients beta, gamma and delta
can be directly computed from the time series data.
Here we do not need reference to any compartmental model.
"""
df = dF.copy()
dfc = pd.DataFrame(columns=['date','beta','gamma','delta'])
df['infected'] = df['confirmed'] \
- df['recovered'] - df['deaths']
I = df['infected']
R = df['recovered']
D = df['deaths']
dI = I.diff(periods=1).iloc[1:]
dR = R.diff(periods=1).iloc[1:]
dD = D.diff(periods=1).iloc[1:]
dfc['beta'] = (dI + dR + dD ) / I
if epd_model == 'SIR':
dfc['gamma'] = (dR+dD) / I
if epd_model == 'SIRD':
dfc['gamma'] = dR / I
dfc['delta'] = dD / I
dfc['date'] = df['date'].to_list()
dfc.index = df['date'].to_list()
return dfc | 27,231 |
def run(conf: AgentConfig):
"""
This is the main function which start workers by agent by machine.
To ack:
qnames: will be prepended with the name of the cluster, for instance:
qnames = ["default", "control"]; cluster = "gpu"
qnames_fit = ["gpu.default", "gpu.control"]
:param redis_dsn: redis url connection like redis://localhost:6379/0
:param qnames: a list of queues to listen to
:param name: a custom name for this worker
:param ip_address: the ip as worker that will advertise to Redis.
:param workers_n: how many worker to run
"""
name = conf.agent_name or conf.machine_id.rsplit("/", maxsplit=1)[1]
rdb = redis.from_url(conf.redis_dsn, decode_responses=True)
heart = HeartbeatThread(
rdb,
name,
ttl_secs=conf.heartbeat_ttl,
check_every_secs=conf.heartbeat_check_every,
)
heart.setDaemon(True)
heart.start()
workers_names = [f"{name}.{x}" for x in range(conf.workers_n)]
cluster_queues = [f"{conf.cluster}.{q}" for q in conf.qnames]
_now = int(datetime.utcnow().timestamp())
pid = os.getpid()
node = AgentNode(
ip_address=conf.ip_address,
name=name,
machine_id=conf.machine_id,
cluster=conf.cluster,
pid=pid,
qnames=conf.qnames,
workers=workers_names,
birthday=_now,
)
ag = AgentRegister(rdb, cluster=conf.cluster)
ag.register(node)
if conf.workers_n > 1:
_executor = get_reusable_executor(max_workers=conf.workers_n, kill_workers=True)
_results = [
_executor.submit(
start_worker, conf.redis_dsn, cluster_queues, conf.ip_address, name_i
)
for name_i in workers_names
]
else:
start_worker(
conf.redis_dsn,
cluster_queues,
name=workers_names[0],
ip_address=conf.ip_address,
)
ag.unregister(node)
heart.unregister() | 27,232 |
async def test_setup_fails_without_config(hass):
"""Test if the MQTT component fails to load with no config."""
assert not await async_setup_component(hass, mqtt.DOMAIN, {}) | 27,233 |
def rx_filter(observable: Observable, predicate: PredicateOperator) -> Observable:
"""Create an observable which event are filtered by a predicate function.
Args:
observable (Observable): observable source
predicate (Operator): predicate function which take on argument and return
a truthy value
Returns:
(Observable): observable instance
"""
_awaitable = iscoroutinefunction(predicate)
async def _subscribe(an_observer: Observer) -> Subscription:
async def _on_next(item: Any):
nonlocal _awaitable
_test = await predicate(item) if _awaitable else predicate(item) # type: ignore
if _test:
await an_observer.on_next(item)
return await observable.subscribe(an_observer=rx_observer_from(observer=an_observer, on_next=_on_next))
return rx_create(subscribe=_subscribe) | 27,234 |
def remove_tasks_in_namespace(obj, namespace):
"""Remove all scheduled tasks in given namespace."""
api = lib.get_api(**obj)
lib.run_plugins_task(
api,
"remove_scheduled_tasks_in_namespace",
dict(namespace=namespace),
"Removing scheduled tasks",
) | 27,235 |
def map_time_program(raw_time_program, key: Optional[str] = None) \
-> TimeProgram:
"""Map *time program*."""
result = {}
if raw_time_program:
result["monday"] = map_time_program_day(
raw_time_program.get("monday"), key)
result["tuesday"] = map_time_program_day(
raw_time_program.get("tuesday"), key)
result["wednesday"] = map_time_program_day(
raw_time_program.get("wednesday"), key)
result["thursday"] = map_time_program_day(
raw_time_program.get("thursday"), key)
result["friday"] = map_time_program_day(
raw_time_program.get("friday"), key)
result["saturday"] = map_time_program_day(
raw_time_program.get("saturday"), key)
result["sunday"] = map_time_program_day(
raw_time_program.get("sunday"), key)
return TimeProgram(result) | 27,236 |
def evaluate_expression(pipelineId=None, objectId=None, expression=None):
"""
Task runners call EvaluateExpression to evaluate a string in the context of the specified object. For example, a task runner can evaluate SQL queries stored in Amazon S3.
See also: AWS API Documentation
Exceptions
:example: response = client.evaluate_expression(
pipelineId='string',
objectId='string',
expression='string'
)
:type pipelineId: string
:param pipelineId: [REQUIRED]\nThe ID of the pipeline.\n
:type objectId: string
:param objectId: [REQUIRED]\nThe ID of the object.\n
:type expression: string
:param expression: [REQUIRED]\nThe expression to evaluate.\n
:rtype: dict
ReturnsResponse Syntax
{
'evaluatedExpression': 'string'
}
Response Structure
(dict) --
Contains the output of EvaluateExpression.
evaluatedExpression (string) --
The evaluated expression.
Exceptions
DataPipeline.Client.exceptions.InternalServiceError
DataPipeline.Client.exceptions.TaskNotFoundException
DataPipeline.Client.exceptions.InvalidRequestException
DataPipeline.Client.exceptions.PipelineNotFoundException
DataPipeline.Client.exceptions.PipelineDeletedException
:return: {
'evaluatedExpression': 'string'
}
:returns:
DataPipeline.Client.exceptions.InternalServiceError
DataPipeline.Client.exceptions.TaskNotFoundException
DataPipeline.Client.exceptions.InvalidRequestException
DataPipeline.Client.exceptions.PipelineNotFoundException
DataPipeline.Client.exceptions.PipelineDeletedException
"""
pass | 27,237 |
def _find_tols(equipment_id, start, end):
"""Returns existing TransportOrderLines matching with given arguments.
Matches only if load_in is matching between start and end."""
#logger.error('Trying to find TOL')
#logger.error(equipment_id)
#logger.error(start_time)
#logger.error(end_time)
tols = TransportOrderLine.objects.filter(
equipment__id=equipment_id).filter(
Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
#Q(transport_order__from_loc_load_out__range=(start, end)) | Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__from_convention__load_out__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
)
return tols | 27,238 |
def MC1(N,g1,x):
""" Calculating the numerical solution to the integral of the agents value by Monte Carlo of policy 1
Args:
N (int): Number of iterations/draws
g1 (float): Agents value of policy 1
x (float): Drawn from a beta distribution (X)
Returns:
MC1 (float): Agents value of policy 1
"""
#Draw N random values from a beta distribution (x)
X = np.random.beta(2, 7, size=N)
return np.mean(g1(X, par2, par3)) | 27,239 |
async def fry(message):
""" For .fry command, fries stickers or creates new ones. """
reply_message = await message.get_reply_message()
photo = BytesIO()
if message.media:
await message.edit("Frying...")
await message.download_media(photo)
elif reply_message.media:
await message.edit("Frying...")
await reply_message.download_media(photo)
else:
await message.edit("Can't deepfry nothing")
return
if photo:
image = await resize_photo(photo)
image = await deepfry(image, token=FACE_API_KEY, api_url=FACE_API_URL)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')
temp.close()
image.save(temp.name)
await message.delete()
await message.client.send_file(message.chat.id, file=temp.name, reply_to=reply_message) | 27,240 |
def string_between(string, start, end):
"""
Returns a new string between the start and end range.
Args:
string (str): the string to split.
start (str): string to start the split at.
end (str): string to stop the split at.
Returns:
new string between start and end.
"""
try:
return str(string).split(str(start), 1)[1].split(str(end))[0]
except IndexError:
return "" | 27,241 |
def _parse_line(line: str):
"""
行解析,逗号隔开,目前支持3个字段,第一个是展示的名称,第二个是xml中保存的名称,第三个是附带值value的正则标的形式
:param line:
:return:
"""
line = line.strip()
config = line.split(",")
if len(config) == 2:
return config[0], config[1], ""
elif len(config) == 3:
return config[0] + INTER_FLAG, config[1], config[2]
elif len(config) == 1:
return config[0], config[0], ""
else:
raise Exception("配置{}错误".format(line)) | 27,242 |
def harvest_zmat(zmat: str) -> Molecule:
"""Parses the contents of the Cfour ZMAT file into array and
coordinate information. The coordinate info is converted into a
rather dinky Molecule (no fragment, but does read charge, mult,
unit). Return qcdb.Molecule. Written for findif zmat* where
geometry always Cartesian and Bohr.
"""
zmat = zmat.splitlines()[1:] # skip comment line
Nat = 0
readCoord = True
isBohr = ""
charge = 0
mult = 1
molxyz = ""
for line in zmat:
if line.strip() == "":
readCoord = False
elif readCoord:
molxyz += line + "\n"
Nat += 1
else:
if line.find("CHARGE") > -1:
idx = line.find("CHARGE")
charge = line[idx + 7 :]
idxc = charge.find(",")
if idxc > -1:
charge = charge[:idxc]
charge = int(charge)
if line.find("MULTIPLICITY") > -1:
idx = line.find("MULTIPLICITY")
mult = line[idx + 13 :]
idxc = mult.find(",")
if idxc > -1:
mult = mult[:idxc]
mult = int(mult)
if line.find("UNITS=BOHR") > -1:
isBohr = " bohr"
molxyz = f"{Nat}{isBohr}\n{charge} {mult}\n" + molxyz
mol = Molecule(
validate=False,
**qcel.molparse.to_schema(
qcel.molparse.from_string(molxyz, dtype="xyz+", fix_com=True, fix_orientation=True)["qm"], dtype=2
),
)
return mol | 27,243 |
def test_ensure_env_decorator_sets_gdal_data_wheel(gdalenv, monkeypatch, tmpdir):
"""fiona.env.ensure_env finds GDAL data in a wheel"""
@ensure_env
def f():
return getenv()['GDAL_DATA']
tmpdir.ensure("gdal_data/pcs.csv")
monkeypatch.delenv('GDAL_DATA', raising=False)
monkeypatch.setattr(_env, '__file__', str(tmpdir.join(os.path.basename(_env.__file__))))
assert f() == str(tmpdir.join("gdal_data")) | 27,244 |
def then_wait(msg_type, criteria_func, context, timeout=None):
"""Wait for a specific message type to fullfil a criteria.
Uses an event-handler to not repeatedly loop.
Args:
msg_type: message type to watch
criteria_func: Function to determine if a message fulfilling the
test case has been found.
context: behave context
timeout: Time allowance for a message fulfilling the criteria, if
provided will override the normal normal step timeout.
Returns:
(result (bool), debug (str)) Result containing status and debug
message.
"""
waiter = CriteriaWaiter(msg_type, criteria_func, context)
return waiter.wait(timeout) | 27,245 |
def interaction_fingerprint_list(interactions, residue_dict, interaction_dict):
"""
Create list of fingerprints for all given structures.
"""
fp_list = []
for sites in interactions.items():
for site_name, site_interactions in sites.items():
if not site_name.startswith("LIG"):
continue # fragments are labeled as LIG; other "sites" detected by PLIP are XRC artefacts
for interaction_type, dataframe in site_to_dataframes(site_interactions).items():
if dataframe is not None:
residue_nos = dataframe["RESNR"].tolist()
fp = interaction_fingerprint(
residue_dict, interaction_dict, residue_nos, interaction_type
)
fp_list.append(fp)
return fp_list | 27,246 |
def process_funding_records(max_rows=20, record_id=None):
"""Process uploaded affiliation records."""
set_server_name()
task_ids = set()
funding_ids = set()
"""This query is to retrieve Tasks associated with funding records, which are not processed but are active"""
tasks = (Task.select(
Task, FundingRecord, FundingInvitee, User,
UserInvitation.id.alias("invitation_id"), OrcidToken).where(
FundingRecord.processed_at.is_null(), FundingInvitee.processed_at.is_null(),
FundingRecord.is_active,
(OrcidToken.id.is_null(False)
| ((FundingInvitee.status.is_null())
| (FundingInvitee.status.contains("sent").__invert__())))).join(
FundingRecord, on=(Task.id == FundingRecord.task_id).alias("record")).join(
FundingInvitee,
on=(FundingRecord.id == FundingInvitee.record_id).alias("invitee")).join(
User,
JOIN.LEFT_OUTER,
on=((User.email == FundingInvitee.email)
| ((User.orcid == FundingInvitee.orcid)
& (User.organisation_id == Task.org_id)))).join(
Organisation,
JOIN.LEFT_OUTER,
on=(Organisation.id == Task.org_id)).join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id)
& (UserOrg.org_id == Organisation.id))).
join(
UserInvitation,
JOIN.LEFT_OUTER,
on=((UserInvitation.email == FundingInvitee.email)
& (UserInvitation.task_id == Task.id))).join(
OrcidToken,
JOIN.LEFT_OUTER,
on=((OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update")))).limit(max_rows))
if record_id:
tasks = tasks.where(FundingRecord.id == record_id)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(tasks, lambda t: (
t.id,
t.org_id,
t.record.id,
t.record.invitee.user,)):
"""If we have the token associated to the user then update the funding record, otherwise send him an invite"""
if (user.id is None or user.orcid is None or not OrcidToken.select().where(
(OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))).exists()): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name, )
): # noqa: E501
email = k[2]
token_expiry_in_sec = 2600000
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds")
try:
if FundingInvitee.select().where(
FundingInvitee.email == email,
FundingInvitee.status ** "%reset%").count() != 0:
token_expiry_in_sec = 1300000
send_user_invitation(
*k,
task_id=task_id,
token_expiry_in_sec=token_expiry_in_sec)
(FundingInvitee.update(status=FundingInvitee.status + "\n" + status).where(
FundingInvitee.status.is_null(False),
FundingInvitee.email == email).execute())
(FundingInvitee.update(status=status).where(
FundingInvitee.status.is_null(),
FundingInvitee.email == email).execute())
except Exception as ex:
(FundingInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.").where(
FundingInvitee.email == email,
FundingInvitee.processed_at.is_null())).execute()
else:
create_or_update_funding(user, org_id, tasks_by_user)
task_ids.add(task_id)
funding_ids.add(record_id)
for record in FundingRecord.select().where(FundingRecord.id << funding_ids):
# The funding record is processed for all invitees
if not (FundingInvitee.select().where(
FundingInvitee.record_id == record.id,
FundingInvitee.processed_at.is_null()).exists()):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Funding record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (FundingRecord.select().where(FundingRecord.task_id == task.id,
FundingRecord.processed_at.is_null()).exists()):
task.completed_at = datetime.utcnow()
task.save()
error_count = FundingRecord.select().where(
FundingRecord.task_id == task.id, FundingRecord.status**"%error%").count()
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"fundingrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True)
send_email(
"email/funding_task_completed.html",
subject="Funding Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
filename=task.filename) | 27,247 |
def get_dynamic_client(
access_token: str, project_id: str, cluster_id: str, use_cache: bool = True
) -> CoreDynamicClient:
"""
根据 token、cluster_id 等参数,构建访问 Kubernetes 集群的 Client 对象
:param access_token: bcs access_token
:param project_id: 项目 ID
:param cluster_id: 集群 ID
:param use_cache: 是否使用缓存
:return: 指定集群的 CoreDynamicClient
"""
if use_cache:
return _get_dynamic_client(access_token, project_id, cluster_id)
# 若不使用缓存,则直接生成新的实例返回
return generate_core_dynamic_client(access_token, project_id, cluster_id) | 27,248 |
def _launch(url, runtime, **kwargs):
""" Attempt to launch runtime by its name.
Return (runtime_object, is_launched, error_object)
"""
rt = None
launched = False
try:
if runtime.endswith('-app'):
# Desktop-like app runtime
runtime = runtime.split('-')[0]
Runtime = _runtimes.get(runtime, None)
if Runtime is None:
logger.warn('Unknown app runtime %r.' % runtime)
else:
rt = Runtime(**kwargs)
if rt.is_available():
rt.launch_app(url)
launched = True
elif runtime.startswith('selenium-'):
# Selenium runtime - always try or fail
if '-' in runtime:
kwargs['type'] = runtime.split('-', 1)[1]
rt = SeleniumRuntime(**kwargs)
rt.launch_tab(url)
launched = True
elif runtime.endswith('-browser'):
# Browser runtime
runtime = runtime.split('-')[0]
# Try using our own runtimes to open in tab, because
# the webbrowser module is not that good at opening specific browsers.
Runtime = _runtimes.get(runtime, None)
if Runtime is not None:
rt = Runtime(**kwargs)
if rt.is_available():
rt.launch_tab(url)
launched = True
# Use browser runtime (i.e. webbrowser module)
# Default-browser always works (from the runtime perspective)
kwargs['type'] = runtime
rt = BrowserRuntime(**kwargs)
if rt.is_available():
rt.launch_tab(url)
launched = True
else:
logger.warn('Runtime names should be "app", "browser" or '
'end with "-app" or "-browser", not %r' % runtime)
except Exception as err:
type_, value, tb = sys.exc_info()
trace = traceback.format_list(traceback.extract_tb(tb))
del tb
return rt, False, str(err) + '\n' + ''.join(trace[-1:])
return rt, launched, None | 27,249 |
def a2b_base64(*args, **kwargs): # real signature unknown
""" Decode a line of base64 data. """
pass | 27,250 |
def bucket(db, dummy_location):
"""File system location."""
b1 = Bucket.create()
db.session.commit()
return b1 | 27,251 |
def standardize_str(string):
"""Returns a standardized form of the string-like argument.
This will convert from a `unicode` object to a `str` object.
"""
return str(string) | 27,252 |
def lambda_handler(event, context):
"""
スタッフの日毎の空き情報を返却する
Parameters
----------
event : dict
フロントからのパラメータ群
context : dict
コンテキスト内容。
Returns
-------
return_calendar : dict
スタッフの日毎の空き情報(予約がある日のみ空き有無の判定結果を返す)
"""
# パラメータログ、チェック
logger.info(event)
req_param = event['queryStringParameters']
if req_param is None:
error_msg_display = common_const.const.MSG_ERROR_NOPARAM
return utils.create_error_response(error_msg_display, 400)
param_checker = validation.HairSalonParamCheck(req_param) # noqa 501
if error_msg := param_checker.check_api_staff_calendar_get():
error_msg_display = ('\n').join(error_msg)
logger.error(error_msg_display)
return utils.create_error_response(error_msg_display, 400)
try:
# スタッフIDで希望月のスタッフの空き情報を取得する
staff_calendar = get_staff_calendar(req_param)
except Exception as e:
logger.exception('Occur Exception: %s', e)
return utils.create_error_response('Error')
body = json.dumps(
staff_calendar,
default=utils.decimal_to_int,
ensure_ascii=False)
return utils.create_success_response(body) | 27,253 |
def init_process_group_and_set_device(world_size, process_id, device_id, config):
"""
This function needs to be called on each spawned process to initiate learning using DistributedDataParallel.
The function initiates the process' process group and assigns it a single GPU to use during training.
"""
config.world_size = world_size
config.rank = process_id
torch.cuda.set_device(device_id)
device = torch.device(f'cuda:{device_id}')
config.device = device
if world_size > 1:
config.distributed = True
torch.distributed.init_process_group(
torch.distributed.Backend.NCCL,
world_size=world_size,
rank=process_id
)
torch.distributed.barrier(device_ids=[device_id])
utils.setup_for_distributed(config.rank == 0)
else:
config.distributed = False
return device | 27,254 |
def yes_no(
question : str = '',
options : Tuple[str, str] = ('y', 'n'),
default : str = 'y',
wrappers : Tuple[str, str] = ('[', ']'),
icon : bool = True,
yes : bool = False,
noask : bool = False,
interactive : bool = False,
**kw : Any
) -> bool:
"""
Print a question and prompt the user with a yes / no input.
Returns True for 'yes', False for 'no'.
:param question:
The question to print to the user.
:param options:
The y/n options. The first is always considered `True`, and all options must be lower case.
This behavior may be modifiable change in the future.
:param default:
The default option. Is represented with a capital to distinguish that it's the default.\
E.g. [y/N] would return False by default.
:param wrappers:
Text to print around the '[y/n]' options.
Defaults to ('[', ']').
:param icon:
If True, prepend the configured question icon.
:param interactive:
Not implemented. Was planning on using prompt_toolkit, but for some reason
I can't figure out how to make the default selection 'No'.
"""
from meerschaum.utils.warnings import error, warn
from meerschaum.utils.formatting import ANSI, UNICODE
from meerschaum.utils.packages import attempt_import
default = options[0] if yes else default
noask = yes or noask
ending = f" {wrappers[0]}" + "/".join(
[
o.upper() if o.lower() == default.lower()
else o.lower() for o in options
]
) + f"{wrappers[1]}"
while True:
try:
answer = prompt(question + ending, icon=icon, detect_password=False, noask=noask)
success = True
except KeyboardInterrupt:
success = False
if not success:
error(f"Error getting response. Aborting...", stack=False)
if answer == "":
answer = default
if answer.lower() in options:
break
warn('Please enter a valid reponse.', stack=False)
return answer.lower() == options[0].lower() | 27,255 |
def load(filename, instrument=None, **kw):
"""
Return a probe for NCNR data.
"""
header, data = parse_file(filename)
return _make_probe(geometry=Polychromatic(), header=header, data=data, **kw) | 27,256 |
def set_mem_lock_xml(params, env):
"""
"""
pass | 27,257 |
def get_repeat():
""" get_repeat() -> (delay, interval)
see how held keys are repeated
"""
check_video()
delay, interval = ffi.new('int*'), ffi.new('int*')
sdl.SDL_GetKeyRepeat(delay, interval)
return (delay[0], interval[0]) | 27,258 |
def growth(params, ns, rho=None, theta=1.0, gamma=None, h=0.5, sel_params=None):
"""
exponential growth or decay model
params = (nu,T)
nu - final size
T - time in past size changes begin
"""
nu,T = params
if rho == None:
print("Warning: no rho value set. Simulating with rho = 0.")
rho = 0.0
if gamma==None:
gamma=0.0
gamma = make_floats(gamma)
rho = make_floats(rho)
theta = make_floats(theta)
sel_params = make_floats(sel_params)
F = equilibrium(ns, rho=rho, theta=theta, gamma=gamma, h=h, sel_params=sel_params)
nu_func = lambda t: np.exp(np.log(nu) * t/T)
F.integrate(nu_func, T, rho=rho, theta=theta, gamma=gamma, h=h, sel_params=sel_params)
return F | 27,259 |
def mse(y_true, y_pred):
""" Mean Squared Error """
return K.mean(K.square(_error(y_true, y_pred))) | 27,260 |
def knn_name_matching(
A: Iterable[str], B: Iterable[str],
vectorizer_kws: dict = {}, nn_kws: dict = {},
max_distance: float = None, return_B=True) -> list:
"""
Nearest neighbor name matching of sentences in B to A.
"""
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_extraction.text import TfidfVectorizer
# vectorize the B documents after fitting on A
vectorizer = TfidfVectorizer(**vectorizer_kws)
Xa = vectorizer.fit_transform(A)
Xb = vectorizer.transform(B)
# find nearest neighbor matching
neigh = NearestNeighbors(n_neighbors=1, **nn_kws)
neigh.fit(Xa)
if max_distance is None:
indices = neigh.kneighbors(Xb, return_distance=False).flatten()
else:
indices, distances = neigh.kneighbors(Xb)
indices, distances = indices.flatten(), distances.flatten()
indices = indices[distances <= max_distance]
if return_B:
result = [(B[i], A[idx]) for i, idx in enumerate(indices)]
else:
result = [A[idx] for idx in indices]
return result | 27,261 |
def context(name: str) -> Generator[str, None, None]:
"""Allows specifying additional information for any logs contained in this block.
The specified string gets included in the log messages.
"""
try:
stack = CTX_STACK.get()
except LookupError:
stack = []
CTX_STACK.set(stack)
stack.append(name)
try:
yield name
finally:
popped = stack.pop()
assert popped is name, f'Popped incorrect value: pop({popped!r}) != ctx({name!r})!' | 27,262 |
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the ISY994 platform. """
logger = logging.getLogger(__name__)
devs = []
# verify connection
if ISY is None or not ISY.connected:
logger.error('A connection has not been made to the ISY controller.')
return False
# import dimmable nodes
for (path, node) in ISY.nodes:
if node.dimmable and SENSOR_STRING not in node.name:
if HIDDEN_STRING in path:
node.name += HIDDEN_STRING
devs.append(ISYLightDevice(node))
add_devices(devs) | 27,263 |
def frames_downsample(arFrames:np.array, nFramesTarget:int) -> np.array:
""" Adjust number of frames (eg 123) to nFramesTarget (eg 79)
works also if originally less frames then nFramesTarget
"""
nSamples, _, _, _ = arFrames.shape
if nSamples == nFramesTarget: return arFrames
# down/upsample the list of frames
fraction = nSamples / nFramesTarget
index = [int(fraction * i) for i in range(nFramesTarget)]
liTarget = [arFrames[i,:,:,:] for i in index]
#print("Change number of frames from %d to %d" % (nSamples, nFramesTarget))
#print(index)
return np.array(liTarget) | 27,264 |
def icon_dir():
"""pathname of the directory from which to load custom icons"""
return module_dir()+"/icons" | 27,265 |
def search_pkgs(db, project_type, pkg_list):
"""
Method to search packages in our vulnerability database
:param db: DB instance
:param project_type: Project type
:param pkg_list: List of packages to search
"""
expanded_list = []
pkg_aliases = {}
for pkg in pkg_list:
variations = normalize.create_pkg_variations(pkg)
expanded_list += variations
vendor, name = get_pkg_vendor_name(pkg)
# TODO: Use purl here
pkg_aliases[vendor + ":" + name] = [
"{}:{}".format(vari.get("vendor"), vari.get("name")) for vari in variations
]
quick_res = dbLib.bulk_index_search(expanded_list)
raw_results = dbLib.pkg_bulk_search(db, quick_res)
raw_results = normalize.dedup(project_type, raw_results, pkg_aliases=pkg_aliases)
pkg_aliases = normalize.dealias_packages(
project_type, raw_results, pkg_aliases=pkg_aliases
)
return raw_results, pkg_aliases | 27,266 |
def test_update_account_without_data_returning_400_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) without data
THEN check the response HTTP 400 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
response = client.put(endpoint,
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 400
assert response.json['status'] == 'fail'
assert response.json['message'] == 'bad request' | 27,267 |
def post_question():
""" Post a question."""
q_data = request.get_json()
# No data provied
if not q_data:
abort(make_response(jsonify({'status': 400, 'message': 'No data sent'}), 400))
else:
try:
data = QuestionSchema().load(q_data)
if not MeetupModel().exists('id', data['meetup_id']):
abort(make_response(jsonify({'status': 404, 'message': 'Meetup not found'}), 404))
else:
data['user_id'] = get_jwt_identity()
question = QuestionModel().save(data)
result = QuestionSchema().dump(question)
return jsonify({ 'status': 201, 'message': 'Question posted successfully', 'data':result}), 201
# return errors alongside valid data
except ValidationError as errors:
#errors.messages
valid_data = errors.valid_data
abort(make_response(jsonify({'status': 400, 'message' : 'Invalid data.', 'errors': errors.messages, 'valid_data':valid_data}), 400)) | 27,268 |
def infinitegenerator(generatorfunction):
"""Decorator that makes a generator replay indefinitely
An "infinite" parameter is added to the generator, that if set to True
makes the generator loop indifenitely.
"""
def infgenerator(*args, **kwargs):
if "infinite" in kwargs:
infinite = kwargs["infinite"]
del kwargs["infinite"]
else:
infinite = False
if infinite == True:
while True:
for elem in generatorfunction(*args, **kwargs):
yield elem
else:
for elem in generatorfunction(*args, **kwargs):
yield elem
return infgenerator | 27,269 |
def clear_punctuation(document):
"""Remove from document all pontuation signals."""
document = str(document)
if sys.version_info[0] < 3:
return document.translate(None, string.punctuation)
else:
return document.translate(str.maketrans("", "", string.punctuation)) | 27,270 |
def test_time_of_use_summer_off_peak_usage():
"""Test Time of Use for summer Off Peak Usage for kwh."""
with patch(PATCH_GET) as session_get, patch(PATCH_POST) as session_post:
session_post.return_value = MOCK_LOGIN_RESPONSE
session_get.side_effect = get_mock_requests(ROUTES)
client = SrpEnergyClient(TEST_ACCOUNT_TOU_ID, TEST_USER_NAME, TEST_PASSWORD)
start_date = datetime(2020, 6, 24, 0, 0, 0)
end_date = datetime(2020, 6, 24, 23, 0, 0)
usage = client.usage(start_date, end_date, True)
assert len(usage) == 3
_date, _hour, _isodate, kwh, cost = usage[0]
assert kwh == 6.1
assert cost == 0.44 | 27,271 |
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""Credit: http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python"""
return ''.join(random.choice(chars) for _ in range(size)) | 27,272 |
def _update_objective(C_obj, Q, QN, R, R0, xr, z_init, u_init, const_offset, u_prev, N, nx, nu):
"""
Construct MPC objective function
:return:
"""
res = np.hstack(
((C_obj.T @ Q @ (C_obj @ z_init[:, :-1] - xr[:, :-1])).T.flatten(),
C_obj.T @ QN @ (C_obj @ z_init[:, -1] - xr[:, -1]),
(R @ (u_init + const_offset)).T.flatten()))
# Jitter regularization linear objective:
res[(N+1)*nx:(N+1)*nx + nu] -= R0 @ u_prev
return res | 27,273 |
def absolute_time(arg):
"""Validate user provided absolute time"""
if not all([t.isdigit() for t in arg.split(':')]):
raise argparse.ArgumentTypeError("Invalid time format: {}".format(arg))
# Valid time (e.g. hour must be between 0..23)
try:
datetime.time(*map(int, arg.split(':')))
except ValueError as e:
raise argparse.ArgumentTypeError("Invalid time format: {}".format(e))
return arg | 27,274 |
def convert(M: any) -> torch.Tensor:
"""
Convert Scipy sparse matrix to pytorch sparse tensor.
Parameters
----------
M : any
Scipy sparse matrix.
Returns
-------
Ms : torch.Tensor
pytorch sparse tensor.
"""
M = M.tocoo()
indices = torch.from_numpy(np.vstack((M.row, M.col))).long()
values = torch.from_numpy(M.data)
shape = torch.Size(M.shape)
Ms = torch.sparse_coo_tensor(indices, values, shape)
return Ms | 27,275 |
def process_lines(lines):
"""
It classifies the lines and combine them into a CombinedLine
:param lines: np.array with all the lines detected in the image. It should be the output of a HoughLinesP function
:return: np.array with 2 lines
"""
lines_l = CombinedLine()
lines_r = CombinedLine()
for line in lines[:, 0]:
# the slope of the line
slope = math.atan2(line[3] - line[1], line[2] - line[0])
# Filter almost horizontal lines
if not filter_by_slope(slope):
continue
# Classifies lines in left and right lane lines and add them to the corespondent CombinedLine
if slope > 0:
lines_r.add(line)
else:
lines_l.add(line)
# The max_y coordinate gives and approximation of the bottom of the image
max_y = max(lines_l.point_bottom[1], lines_r.point_bottom[1])
# Calculate the intersection, it gives an approximation of the horizon
intersection = CombinedLine.intersection(lines_l, lines_r)
# A parameter to cut the horizon below intersection
p_horizon = 1.1
# The output is created using the horizon and max_y as y coordinates and camculating the xs
return np.array([[[lines_l.x(intersection[1]*p_horizon), intersection[1]*p_horizon, lines_l.x(max_y), max_y],
[lines_r.x(intersection[1]*p_horizon), intersection[1]*p_horizon, lines_r.x(max_y), max_y]]],
dtype=np.int16) | 27,276 |
def get_balances_with_token(token: str):
"""Returns all entries where a token is involved"""
token = token.lower()
conn = create_connection()
with conn:
cursor = conn.cursor()
fiat = confighandler.get_fiat_currency().lower()
cursor.execute(
f"SELECT date,balance_btc,balance_{fiat} FROM cbalancehistory WHERE token = '{token}'")
return cursor.fetchall() | 27,277 |
def relative_cumulative_gain_curve(df: pd.DataFrame,
treatment: str,
outcome: str,
prediction: str,
min_rows: int = 30,
steps: int = 100,
effect_fn: EffectFnType = linear_effect) -> np.ndarray:
"""
Orders the dataset by prediction and computes the relative cumulative gain curve curve according to that ordering.
The relative gain is simply the cumulative effect minus the Average Treatment Effect (ATE) times the relative
sample size.
Parameters
----------
df : Pandas' DataFrame
A Pandas' DataFrame with target and prediction scores.
treatment : Strings
The name of the treatment column in `df`.
outcome : Strings
The name of the outcome column in `df`.
prediction : Strings
The name of the prediction column in `df`.
min_rows : Integer
Minimum number of observations needed to have a valid result.
steps : Integer
The number of cumulative steps to iterate when accumulating the effect
effect_fn : function (df: pandas.DataFrame, treatment: str, outcome: str) -> int or Array of int
A function that computes the treatment effect given a dataframe, the name of the treatment column and the name
of the outcome column.
Returns
----------
relative cumulative gain curve: float
The relative cumulative gain according to the predictions ordering.
"""
ate = effect_fn(df, treatment, outcome)
size = df.shape[0]
n_rows = list(range(min_rows, size, size // steps)) + [size]
cum_effect = cumulative_effect_curve(df=df, treatment=treatment, outcome=outcome, prediction=prediction,
min_rows=min_rows, steps=steps, effect_fn=effect_fn)
return np.array([(effect - ate) * (rows / size) for rows, effect in zip(n_rows, cum_effect)]) | 27,278 |
def setup():
"""
Install project user, structure, env, source, dependencies and providers
"""
from .deploy import install_project, install_virtualenv, \
install_requirements, install_providers
from .project import requirements_txt, use_virtualenv
install_project()
if use_virtualenv():
install_virtualenv()
install_requirements(requirements_txt())
install_providers()
configure_providers() | 27,279 |
def add_curve_scatter(axis, analysis_spot, color_idx):
"""Ad one of more scatter curves that spot events
Arguments:
y_axis : a pyplot x-y axis
analysis : a dictionnary { 'name': [<datetime>, ...], ... }
"""
curves = []
# each spot analysis has a different y value
spot_value = 0
axis.set_ylim(-1, len(analysis_spot)+1)
axis.get_yaxis().set_visible(False)
for name in analysis_spot:
data = analysis_spot[name]
color = get_color(color_idx)
t = data
data_spot = [spot_value for _x in data]
p = axis.scatter(t, data_spot, color=color, label=name)
curves.append(p)
spot_value += 1
color_idx += 1
return curves | 27,280 |
def detect_faces(path):
"""Detects faces in an image."""
with io.open(file=path, mode='rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.face_detection(image=image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Faces:')
for face in faces:
print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices])
print('face bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message)) | 27,281 |
def get_character_journal(character_ccp_id, page = 1, page_limit=5):
"""
:param self:
:param character_ccp_id:
:param oldest_entry:
:param page_limit:
:return:
"""
character = EVEPlayerCharacter.get_object(character_ccp_id)
if not character.has_esi_scope('esi-wallet.read_character_wallet.v1'):
return None
client = EsiClient(authenticating_character=character)
journal_entries, _ = client.get("/v4/characters/%s/wallet/journal/?page=%s" % (character_ccp_id,page))
formatted_entries = []
for entry in journal_entries:
e = verify_journal_entry(entry, character)
formatted_entries.append(e)
# pagination logic
if formatted_entries and page <= page_limit:
older_entries = get_character_journal(
character_ccp_id = character_ccp_id,
page = page + 1,
page_limit = page_limit
)
else:
older_entries = []
return journal_entries + older_entries | 27,282 |
def get_entropy(labels):
"""Calculates entropy using the formula
`-Sum(Prob(class) * log2(Prob(class)))` for each class in labels."""
assert len(labels.shape) == 1
_, count = get_unique_classes_count(labels)
probabilities = count / labels.shape
return -np.sum(probabilities * np.log2(probabilities)) | 27,283 |
def prev_attached_usb():
"""
Returns information about the previously connected
usb drives.
"""
for sub_key in enum_usb():
# Additional information of the connected USB storage device.
extra = list()
with reg.OpenKeyEx(reg.HKEY_LOCAL_MACHINE, sub_key) as usb:
instance_id_inx = sub_key.rfind("\\") + 1
sys_gen_inx = sub_key.rfind("&")
instance_id = sub_key[instance_id_inx:sys_gen_inx]
if sys_gen_inx > 0:
extra.append("Device doesn't have a serial number")
# Names.
friendly_name = reg.QueryValueEx(usb, "FriendlyName")[0]
device_name = get_device_name(instance_id)
# IDs.
container_id = reg.QueryValueEx(usb, "ContainerID")[0]
class_guid = reg.QueryValueEx(usb, "ClassGUID")[0]
disk_id = get_disk_id(usb)
device_class_guid = get_device_class_guid(instance_id)
# Extras.
mfg = reg.QueryValueEx(usb, "Mfg")[0]
driver = reg.QueryValueEx(usb, "Driver")[0]
windows_time = get_first_attached_date(device_class_guid, instance_id)
yield device_name, windows_time, friendly_name, container_id,\
class_guid, disk_id, device_class_guid, mfg, driver, extra | 27,284 |
def store():
"""Database storage fixture."""
in_memory_database = Database.in_memory(echo=False)
in_memory_database.create_tables()
return DBResultStorage(in_memory_database) | 27,285 |
def report_tests(test_suite,
label="",
filter=None,
filter_i=None,
first_only=False,
spacer_lines=10):
"""Report tests of a test suite using either:
single line per fail/error, or
stack trace for first error
verbose
0 = single line, only print fails/errors
1 = single line, all tests (fails/errors/success)
2 = print stack
"""
# print spacer lines
[print("") for _ in range(spacer_lines)]
# run tests
res = unittest.TestResult()
test_suite.run(res)
# if successful, report and be done
if res.wasSuccessful():
print("SUCCESS " + label)
return
i = 0
# errors
if filter in [None, 'errors']:
for test_case, stack_trace in res.errors:
i += 1
if not filter_i or filter_i == i:
print(f"ERROR {i:2d} " + test_case.id())
if first_only or filter_i == i:
print("")
print(stack_trace)
if first_only:
return
# failures
if filter in [None, 'failures']:
for test_case, stack_trace in res.failures:
i += 1
if not filter_i or filter_i == i:
print(f"FAIL {i:2d} " + test_case.id())
if first_only or filter_i == i:
print("")
print(stack_trace)
if first_only:
return | 27,286 |
def test_homology(dash_threaded):
"""Test the display of a basic homology"""
prop_type = 'dict'
prop_val = {
"chrOne": {
"organism": "9606",
"start": [10001, 105101383],
"stop": [27814790, 156030895],
},
"chrTwo": {
"organism": "9606",
"start": [3000000, 125101383],
"stop": [9000000, 196130895],
},
}
def assert_callback(prop_value, nclicks, input_value):
answer = ''
if nclicks is not None:
answer = FAIL
if PROP_TYPES[prop_type](input_value) == prop_value:
answer = PASS
return answer
template_test_component(
dash_threaded,
APP_NAME,
assert_callback,
ideogram_test_props_callback,
'homology',
json.dumps(prop_val),
prop_type=prop_type,
component_base=COMPONENT_REACT_BASE,
perspective="comparative",
chromosomes=["1", "2"],
**BASIC_PROPS
)
driver = dash_threaded.driver
# assert the absence of homology region
regions = driver.find_elements_by_class_name('syntenicRegion')
assert len(regions) == 0
# trigger a change of the component prop
btn = wait_for_element_by_css_selector(driver, '#test-{}-btn'.format(APP_NAME))
btn.click()
# assert the presence of homology region
regions = wait_for_elements_by_css_selector(driver, '.syntenicRegion')
assert len(regions) > 0 | 27,287 |
def coverage():
"""
View a report on test coverage.
Call py.test with coverage turned on.
"""
print("\ncoverage")
return subprocess.run(
[
PYTEST,
"--cov-config",
".coveragerc",
"--cov-report",
"term-missing",
"--cov=circup",
"tests/",
]
).returncode | 27,288 |
def cmd_session_create(cmd_ctx):
"""Create an HMC session."""
session = cmd_ctx.session
try:
# We need to first log off, to make the logon really create a new
# session. If we don't first log off, the session from the
# ZHMC_SESSION_ID env var will be used and no new session be created.
session.logoff(verify=True)
session.logon(verify=True)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
if session.verify_cert is False:
no_verify = 'TRUE'
ca_certs = None
elif session.verify_cert is True:
no_verify = None
ca_certs = None
else:
no_verify = None
ca_certs = session.verify_cert
click.echo("export ZHMC_HOST={h}".format(h=session.host))
click.echo("export ZHMC_USERID={u}".format(u=session.userid))
click.echo("export ZHMC_SESSION_ID={s}".format(s=session.session_id))
if no_verify is None:
click.echo("unset ZHMC_NO_VERIFY")
else:
click.echo("export ZHMC_NO_VERIFY={nv}".format(nv=no_verify))
if ca_certs is None:
click.echo("unset ZHMC_CA_CERTS")
else:
click.echo("export ZHMC_CA_CERTS={cc}".format(cc=ca_certs)) | 27,289 |
def main():
""" Main entry point for AnsibleModule """
argument_spec = FactsArgs.argument_spec
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = []
ansible_facts, additional_warnings = Facts(module).get_facts()
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings) | 27,290 |
def get_files(directory):
"""Gets full path of all files within directory, including subdirectories
Returns a list of paths"""
file_paths = []
for root, dirs, files in os.walk(directory):
for f in files:
filepath = os.path.join(root, f)
file_paths.append(filepath)
return file_paths | 27,291 |
def setup_add_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Setup a ldb in the private dir.
:param ldb: LDB file to import data into
:param ldif_path: Path of the LDIF file to load
:param subst_vars: Optional variables to subsitute in LDIF.
:param nocontrols: Optional list of controls, can be None for no controls
"""
assert isinstance(ldif_path, str)
data = read_and_sub_file(ldif_path, subst_vars)
ldb.add_ldif(data, controls) | 27,292 |
def log_sheets_with_index(bk: xw.Book):
"""Logs the indicies and name of every sheet in a workbook.
Arguments:
bk {xw.Book} -- The book to log.
"""
for index, sht in enumerate(bk.sheets):
print(index, sht.name) | 27,293 |
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
word2id = dict()
with open(filename, 'r', encoding='utf-8') as f:
for idx, word in enumerate(f):
word = word.strip()
word2id[word] = idx
id2word = {v: k for k, v in word2id.items()}
assert len(word2id) == len(id2word)
return word2id, id2word | 27,294 |
def _log_and_cleanup(sesh: session.Session):
"""
When done, write some logs and drop the db
"""
log_md = format_markdown(sesh.script, sesh.storage)
fn_md = _log_path(sesh.storage.script_path, sesh.storage.description)
fn_md.write_text(log_md)
log_json = format_json(sesh.script, sesh.storage)
fn_json = fn_md.with_suffix(".json")
fn_json.write_text(log_json)
print(f"{sesh.storage.script_path}: Cleaning up. Log: {fn_md}")
sesh.storage.drop() | 27,295 |
def select_class_for_slot(class_name, slot_number):
"""
Select a class_name for a certain slot_number.
Class name is selected from one of get_potential_classes_for_slot(slot_number)
Do the necessary manipulation
"""
global valid_schedules
valid_schedules_new = []
class_ID = class_name_to_class_ID[class_name]
for valid_schedule in valid_schedules:
slot = valid_schedule[slot_number]
if slot[class_ID] == 1:
valid_schedules_new.append(valid_schedule)
valid_schedules = deepcopy(valid_schedules_new)
return | 27,296 |
def load_labels(label_path):
"""
Load labels for VOC2012, Label must be maded txt files and like my label.txt
Label path can be change when run training code , use --label_path
label : { label naem : label color}
index : [ [label color], [label color]]
"""
with open(label_path, "r") as f:
lines = f.readlines()
label = {}
index = []
for line in lines:
sp = line.split()
label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]
index.append([int(sp[3]), int(sp[2]), int(sp[1])])
return label, index | 27,297 |
def _make_function_ptr_ctype(restype, argtypes):
"""Return a function pointer ctype for the given return type and argument types.
This ctype can for example be used to cast an existing function to a different signature.
"""
if restype != void:
try:
restype.kind
except AttributeError:
raise TypeError("restype ({}) has no kind attribute. This usually means that restype is an array type, which is not a valid return type.".format(restype))
argdecls = []
for i, argtype in enumerate(argtypes):
if argtype is ...:
if i != len(argtypes) - 1:
raise ValueError("... can only be the last argtype")
else:
argdecls.append("...")
else:
argdecls.append(ffi.getctype(argtype))
return ffi.getctype(restype, "(*)({})".format(",".join(argdecls))) | 27,298 |
def parse_matl_results(output):
"""Convert MATL output to a custom data structure.
Takes all of the output and parses it out into sections to pass back
to the client which indicates stderr/stdout/images, etc.
"""
result = list()
parts = re.split(r'(\[.*?\][^\n].*\n?)', output)
for part in parts:
if part == '':
continue
# Strip a single trailing newline
part = part.rstrip('\n')
item = {}
if part.startswith('[IMAGE'):
item = process_image(re.sub(r'\[IMAGE.*?\]', '', part),
part.startswith('[IMAGE]'))
elif part.startswith('[AUDIO]'):
item = process_audio(part.replace('[AUDIO]', ''))
elif part.startswith('[STDERR]'):
item = {'type': 'stderr', 'value': part.replace('[STDERR]', '')}
elif part.startswith('[STDOUT]'):
item = {'type': 'stdout2', 'value': part.replace('[STDOUT]', '')}
else:
item = {'type': 'stdout', 'value': part}
if item:
result.append(item)
return result | 27,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.