content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def plot_target_measure(config_list, target_list, measure_list_mm, ax):
"""
plot target and measure
"""
test_type = config_list[0]
boundary_error_threshold = config_list[4]
center_error_threshold = config_list[5]
for i in range(len(target_list)):
if test_type == "line test":
start_x = target_list[i][0]
start_y = target_list[i][1]
end_x = target_list[i][2]
end_y = target_list[i][3]
# insert_point = 200
# step_x = (end_x - start_x) / insert_point
# step_y = (end_y - start_y) / insert_point
# target_x = []
# target_y = []
# for j in range(insert_point):
# target_x.append(start_x + step_x * j)
# target_y.append(start_y + step_y * j)
# target_x.append(end_x)
# target_y.append(end_y)
target_x = [start_x, end_x]
target_y = [start_y, end_y]
measure_x = [measure_list_mm[i][j][0] for j in range(len(measure_list_mm[i]))]
measure_y = [measure_list_mm[i][j][1] for j in range(len(measure_list_mm[i]))]
for j in range(len(measure_y)):
if measure_y[j] > 150:
print(i, j, measure_x[j], measure_y[j])
target_line, = ax.plot(target_x, target_y, 'g-', linewidth=0.8)
# measure_line, = ax.plot(measure_x, measure_y, 'b.', linewidth=1.5)
measure_line, = ax.plot(measure_x, measure_y, 'b.', linewidth=0.5)
elif test_type == "point test":
# target_point, = ax.plot(target_list[i][0], target_list[i][1], 'go')
target_point = plt.Circle((target_list[i][0], target_list[i][1]), 0.35, color='green', fill=False)
ax.add_artist(target_point)
for repeat in measure_list_mm[i]:
measure_x = [repeat[j][0] for j in range(len(repeat))]
measure_y = [repeat[j][1] for j in range(len(repeat))]
measure_point, = ax.plot(measure_x, measure_y, 'b.')
| 25,200
|
def process_temp_config(configs, verbose=True):
"""
Temporarily set the sysctl configs.
Given configs must follow format:
Key=variable; Value=variable_value when evaluated as a string
"""
assert isinstance(configs, dict)
assert isinstance(verbose, bool)
print(f"\nVariables to set:\n")
max_char_count = len(max(configs.keys(), key=lambda e: len(e)))
formatted_row = f"{{:<{max_char_count}}}\t{{:<30}}"
print(formatted_row.format("Variable", "Value"))
print(formatted_row.format("--------", "-----"))
for key, value in configs.items():
print(formatted_row.format(key, value))
print("")
prompt = "Temporarily set the sysctl variables (until system reboot)? [Y/n]\n>"
if input(prompt).lower() not in {'yes', 'y'}:
return
for key, value in configs.items():
subprocess.check_call(["sysctl", "-w", f"{key}={str(value)}"])
if verbose:
print(f"Successfully set sysctl variables!")
| 25,201
|
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(
argument_spec=Bgp_globalArgs.argument_spec,
mutually_exclusive=[],
required_if=[],
supports_check_mode=False,
)
result = Bgp_global(module).execute_module()
module.exit_json(**result)
| 25,202
|
def write_curies(filepaths: dict, ontoid: str, prefix_map: dict, pref_prefix_map: dict) -> bool:
"""
Update node id field in an edgefile
and each corresponding subject/object
node in the corresponding edges
to have a CURIE, where the prefix is
the ontology ID and the class is
inferred from the IRI.
:param in_path: str, path to directory
:param ontoid: the Bioportal ID of the ontology
:return: True if complete, False otherwise
"""
success = False
nodepath = filepaths["nodelist"]
edgepath = filepaths["edgelist"]
outnodepath = nodepath + ".tmp"
outedgepath = edgepath + ".tmp"
update_these_nodes = {}
try:
with open(nodepath,'r') as innodefile, \
open(edgepath, 'r') as inedgefile:
with open(outnodepath,'w') as outnodefile, \
open(outedgepath, 'w') as outedgefile:
for line in innodefile:
updated_node = False
line_split = (line.rstrip()).split("\t")
node_iri = line_split[0]
if ontoid in prefix_map:
for prefix in prefix_map[ontoid]["prefixes"]:
if node_iri.startswith(prefix[0]):
split_iri = node_iri.rsplit(prefix[1],1)
if ontoid in pref_prefix_map:
ontoid = pref_prefix_map[ontoid]
if len(split_iri) == 2:
new_curie = f"{ontoid}:{split_iri[1]}"
else:
new_curie = f"{ontoid}:"
line_split[0] = new_curie
update_these_nodes[node_iri] = new_curie
updated_node = True
continue
# If we don't have a native prefix OR this is a foreign prefix
# then look at other ontologies
if ontoid not in prefix_map or not updated_node:
for prefix_set in prefix_map:
for prefix in prefix_map[prefix_set]["prefixes"]:
if node_iri.startswith(prefix[0]):
split_iri = node_iri.rsplit(prefix[1],1)
if prefix_set in pref_prefix_map:
prefix_set = pref_prefix_map[prefix_set]
if len(split_iri) == 2:
new_curie = f"{prefix_set}:{split_iri[1]}"
else:
new_curie = f"{prefix_set}:"
line_split[0] = new_curie
update_these_nodes[node_iri] = new_curie
continue
outnodefile.write("\t".join(line_split) + "\n")
for line in inedgefile:
line_split = (line.rstrip()).split("\t")
# Check for edges containing nodes to be updated
if line_split[1] in update_these_nodes:
line_split[1] = update_these_nodes[line_split[1]]
if line_split[3] in update_these_nodes:
line_split[3] = update_these_nodes[line_split[3]]
outedgefile.write("\t".join(line_split) + "\n")
os.replace(outnodepath,nodepath)
os.replace(outedgepath,edgepath)
success = True
except (IOError, KeyError) as e:
print(f"Failed to write CURIES for {nodepath} and/or {edgepath}: {e}")
success = False
return success
| 25,203
|
def split(time: list, value: list, step, group_hours, region=None, whole_group=False):
"""
Split and group 'step' number of averaged values 'hours' apart
:param time: time per value (hour apart)
:param value: values corresponding to time
:param step: number of group times set for each index
:param group_hours: group times into 'hours' hours
:param region: region of indices to be considered
:param whole_group: include the aggregated value of
whole time group for each of its members not just until that member
:return:
"""
splits = list() # step group times per index
size = len(time)
if size != len(value):
return -1
# direction is the sign of step
direction = np.sign(step)
# indices to be considered
region = (0, size - 1) if region is None else region
region = (max(region[0], 0), size - 1 if region[1] < 0 else region[1])
# Running group average of each index either forward (when step < 0)
# or backward (when step > 0), when whole_group = False
if not whole_group:
run_average = running_average(time, value, group_hours=group_hours,
direction=-np.sign(step), whole_group=False)
else:
run_average = []
group_time, average, group_lookup, _ = group_average(time, value, group_hours=group_hours)
group_size = len(group_time)
# init first 'steps' (for forward)
# or duplication o first (for backward) [whole/partial] group average as array of step values
group_time = pre_group_time = round_hour(time[region[0]], group_hours)
group_index = group_lookup[group_time]
last_index = group_index + step - direction
if step > 0:
initial_values = average[group_index:min(last_index + 1, group_size)]
if len(initial_values) != abs(step): # duplicate the last group average to reach 'step' values
initial_values += [[average[-1] * (group_size - last_index)]]
else:
initial_values = average[max(last_index, 0):group_index + 1]
if len(initial_values) != abs(step): # duplicate the first group average to reach 'step' values
initial_values = ([average[0]] * (-last_index)) + initial_values
step_values = deque(initial_values)
cur_step = 0
for i in range(region[0], region[1] + 1):
group_time = round_hour(time[i], group_hours)
if group_time != pre_group_time:
group_index = group_lookup[group_time]
last_index = group_index + step - direction
cur_step = min(step, cur_step + 1)
step_values.rotate(-1) # shift right to go toward end of groups
# duplicate the second to last value if group size is passed
# otherwise set the last value from group averages
if step > 0:
step_values[-1] = average[last_index] if last_index < group_size else step_values[-2]
else:
step_values[-1] = average[group_index]
pre_group_time = group_time
# replace the group average with partial average if the whole group is not required
if not whole_group:
if cur_step == step or step > 0:
step_values[0 if step > 0 else -1] = run_average[i]
elif group_index == 0:
# this branch is executed only for the first group for backward (few times)
step_values = deque([run_average[i]] * abs(step))
splits.append(list(step_values))
return splits
| 25,204
|
def launch_ebs_affinity_process(instanceid, instance_infos, ebs_configs):
""" Manage the ebs affinity process.
:param instanceid string The instance id
:param instance_infos dict Informations about the instance
:param ebs_config dict The EBS parameters
:return None
"""
if not check_if_ebs_already_attached(instanceid,
ebs_configs['mount_point'],
instance_infos):
if manage_ebs_volume(ebs_configs, instanceid, instance_infos):
logger.info("EBS: {0} has been attached on the Instance-id: {1}" .format(ebs_configs['mount_point'], instanceid))
else:
logger.error("Error during the management of the EBS volume: {0}. Disk not attached to the instance: {1} " .format(ebs_configs['mount_point'], instanceid))
return False
return True
else:
logger.info("A disk is already attached on the target mount point: {0}" .format(ebs_configs['mount_point']))
return True
| 25,205
|
def forecast_plot(train, test, fitted_values, forecast_values, new_fig=False, plot_title="Forecast"):
"""
Plot train data, test data, fitted values of the model and the predicted/forecast values
Params:
train : the train dataframe
test : the test dataframe
fitted_values : fitted values of the model
forecast_values : predicted/forecast values
plot_title
"""
test_window = 0
if test is not None:
test_window = len(test) if len(forecast_values) >= len(
test) else len(forecast_values)
if new_fig:
plt.figure(figsize=(12, 6))
plt.title(plot_title)
plt.plot(train, color=TRAIN_COLOR, label='Train')
if test_window > 0:
plt.plot(test[:test_window], color=TEST_COLOR,
ls=TEST_LS, label='Test')
if fitted_values is not None:
plt.plot(fitted_values, color=FITTED_COLOR, label="Fitted Values")
plt.plot(forecast_values, color=FORECAST_COLOR, label="Predicted Values")
plt.legend(loc="best")
| 25,206
|
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieve a list of mapping rules
"""
return isamAppliance.invoke_get("Retrieve a list of mapping rules",
"/iam/access/v8/mapping-rules")
| 25,207
|
def neg(program: MipsProgram, rd: str, rs: str):
"""Negate Reg[rs] and store in Reg[rd]."""
program.registers[rd] = ((~program.registers[rs]) & 0xFFFF_FFFF) + 1
| 25,208
|
def get_oyente(test_subject=None, mutation=None):
"""
Run the Oyente test suite on a provided script
"""
is_request = False
if not test_subject:
test_subject = request.form.get('data')
is_request = True
o = Oyente(test_subject)
info, errors = o.oyente(test_subject)
if len(errors) > 0:
errors = [{'lineno':e[0].split(':')[1],'code':"\n".join(e[1].split('\n')[1:]),'description':e[1].split('\n')[0]} for e in errors]
if len(info) > 0:
info = [{x[0]:x[1] for x in info}]
output = {"info":info, "issues": errors, 'error':[]}
if mutation:
output['mutation'] = mutation
if is_request:
return jsonify(output)
return output
| 25,209
|
def search_for_example(search_string: str) -> tuple:
"""Get the Example for a Particular Function"""
function = match_string(search_string)
if function:
function = function.strip()
sql = f"SELECT example, comment FROM example WHERE function='{function}'"
data = execute(sql)
return function, data
else:
return None, (())
| 25,210
|
def WildZumba(x,c1=20,c2=0.2,c3=2*np.pi) :
""" A separable R**n==>R function, assumes a real-valued numpy vector as input """
return -c1 * np.exp(-c2*np.sqrt(np.mean(x**2))) - np.exp(np.mean(np.cos(c3*x))) + c1 + np.exp(1)
| 25,211
|
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
parser.add_input_file(who="The KGTK file(s) to validate.",
dest="input_files",
options=["-i", "--input-files"],
allow_list=True,
positional=True)
parser.add_argument( "--header-only", dest="header_only",
help="Process the only the header of the input file (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, validate_by_default=True, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
| 25,212
|
def import_xlsx(filename, skip_variation=False):
"""Импортирует параметры пиков, хроматограммы и варьируемых параметров, если они указаны.
Parameters
----------
filename : str
Имя xlsx файла.
skip_variation : bool, default = False
Пропустить блок Variation даже если он есть.
Returns
-------
Tuple[List[Peak], Chromatogram, dict, int, np.ndarray]
Если в xlsx файле есть блок Variation, то вернется кортеж, в который входит список из
экземпляров класса Peak, экземпляр класса Chromatogram, словарь варьируемых параметров
со списками сигм, количество файлов и массив с долями файлов в которых будут пропущенны пики.
Tuple[List[Peak], Chromatogram]
Если же блок Variation отсутствует, то вернется кортеж только из списка экземпляров класса
Peak и экземпляра класса Chromatogram.
"""
wb = ox.load_workbook(filename, data_only=True)
sheet_ranges = wb['input']
max_row = sheet_ranges.max_row
rows = list(sheet_ranges.rows)
wb.close()
def get_row(row, key):
return list(map(lambda x: x.value, rows[row][d_xl[key]['start_idx']:
d_xl[key]['end_idx']]))
def get_col(col, start_row, nn):
res = []
for i_cell in range(start_row, start_row + nn):
res.append(sheet_ranges.cell(i_cell, col).value)
return res
d_xl = {}
# читаем первую строку
for cell in rows[0]:
cell_value = cell.value
if cell_value is not None:
d_xl.update({cell_value: {}})
# обработка объединенных ячеек (Chromatogram, Peaks, Variation)
mcr = sheet_ranges.merged_cells.ranges
for cr in mcr:
name = cr.start_cell.value
if name in d_xl:
start_idx = cr.start_cell.col_idx - 1
cols = cr.size['columns']
end_idx = start_idx + cols
d_xl[name].update({'start_idx': start_idx, 'cols': cols, 'end_idx': end_idx})
# Chromatogram
names, values = map(lambda x: get_row(x, 'Chromatogram'), (1, 2))
d_xl['Chromatogram'].update(zip(names, values))
chrom = Chromatogram(**d_xl['Chromatogram'])
# Peaks
head_peaks = get_row(1, 'Peaks')
params_peak = {}
sep_mz_i = ';'
sep_into_mz_i = ' '
peak_list = []
for i in range(2, max_row):
params_peak.update(zip(head_peaks, get_row(i, 'Peaks')))
mz_i = np.fromstring(params_peak['mass_spect'].replace('\n', '').
replace(sep_mz_i, ''), sep=sep_into_mz_i).reshape((-1, 2))
del params_peak['mass_spect']
mz_list = mz_i[:, 0].astype(np.int16)
peak_list.append(Peak(mz_list=mz_list, intensity_list=mz_i[:, 1], **params_peak))
# Variation
if 'Variation' in d_xl and not skip_variation:
head_variation = get_row(1, 'Variation')
params_variation = {}
for par in head_variation:
params_variation.update({par: []})
for i in range(2, max_row):
for key, value in zip(head_variation, get_row(i, 'Variation')):
params_variation[key].append(value)
num_files = 0
for n, i in enumerate(rows[0]):
if i.value in ('Num_files', 'Num files'):
num_files = rows[1][n].value
break
# Missing
miss = np.zeros(max_row)
for n, i in enumerate(rows[0]):
if i.value in ('Missing', 'missing', 'miss'):
miss = np.array(get_col(n + 1, 3, len(peak_list)))
break
return peak_list, chrom, params_variation, num_files, miss
return peak_list, chrom
| 25,213
|
def _get_dataset_builder(
dataset: Union[str, tfds.core.DatasetBuilder],
data_dir: Optional[str] = None) -> tfds.core.DatasetBuilder:
"""Returns a dataset builder."""
if isinstance(dataset, str):
dataset_builder = tfds.builder(dataset, data_dir=data_dir)
elif isinstance(dataset, tfds.core.DatasetBuilder):
dataset_builder = dataset
else:
raise ValueError("`dataset` must be a string or tfds.core.DatasetBuilder. "
f"Received {dataset} instead.")
return dataset_builder
| 25,214
|
def build_design(yamlfile, sources_dir=None, part=None):
"""Generate a complete project
:param yamlfile: file describing the top design
:param sources_dir: directory to scan to include additional HDL files
to core file
"""
ipc = IPConnect()
with open(yamlfile) as f:
design = load(f, Loader=Loader)
ports = dict()
interfaces = dict()
external = dict()
if 'ports' in design.keys():
ports = design['ports']
if 'interfaces' in design.keys():
interfaces = design['interfaces']
if 'external' in design.keys():
external = design['external']
for name, ip in design['ips'].items():
ip_wrapper = IPWrapper(ip['file'],
ip['module'],
name)
if 'parameters' in ip.keys():
_interpret_parameters(ip['parameters'])
ip_wrapper.set_parameters(ip['parameters'])
ipc.add_ip(ip_wrapper)
ipc.make_connections(ports, interfaces)
ipc.make_external_ports(external)
ipc.build(sources_dir=sources_dir, part=part)
| 25,215
|
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
| 25,216
|
def test_get_sr_no_results(client):
"""Assert that searching for a sr filing on a coop without one returns a 404."""
rv = client.get('/api/v1/businesses/CP0000000/filings/specialResolution')
assert 404 == rv.status_code
| 25,217
|
def label_rotate(annot, rotate):
"""
anti-clockwise rotate the occ order annotation by rotate*90 degrees
:param annot: (H, W, 9) ; [-1, 0, 1]
:param rotate: value in [0, 1, 2, 3]
:return:
"""
rotate = int(rotate)
if rotate == 0:
return annot
else:
annot_rot = np.rot90(annot, rotate)
orientation = annot_rot[:, :, 1:].copy()
if rotate == 1:
mapping = [2, 4, 7, 1, 6, 0, 3, 5]
elif rotate == 2:
mapping = [7, 6, 5, 4, 3, 2, 1, 0]
else:
mapping = [5, 3, 0, 6, 1, 7, 4, 2]
annot_rot[:, :, 1:] = orientation[:, :, mapping]
return annot_rot
| 25,218
|
def allocate_available_excess(region):
"""
Allocate available excess capital (if any).
"""
difference = region['total_revenue'] - region['total_cost']
if difference > 0:
region['available_cross_subsidy'] = difference
region['deficit'] = 0
else:
region['available_cross_subsidy'] = 0
region['deficit'] = abs(difference)
return region
| 25,219
|
def items(dic):
"""Py 2/3 compatible way of getting the items of a dictionary."""
try:
return dic.iteritems()
except AttributeError:
return iter(dic.items())
| 25,220
|
def new_settingsresponse_message(loaded_json, origin):
"""
takes in a request - executes search for settings and creates a response as bytes
:param loaded_json:
:param origin: is this a response of drone or groundstation
:return: a complete response packet as bytes
"""
complete_response = {}
complete_response['destination'] = 4
complete_response['type'] = DBCommProt.DB_TYPE_SETTINGS_RESPONSE.value
complete_response['response'] = loaded_json['request']
complete_response['origin'] = origin
complete_response['id'] = loaded_json['id']
if loaded_json['request'] == DBCommProt.DB_REQUEST_TYPE_DB.value:
if 'settings' in loaded_json:
complete_response = read_dronebridge_settings(complete_response, origin, True, loaded_json['settings'])
else:
complete_response = read_dronebridge_settings(complete_response, origin, False, None)
elif loaded_json['request'] == DBCommProt.DB_REQUEST_TYPE_WBC.value:
if 'settings' in loaded_json:
complete_response = read_wbc_settings(complete_response, True, loaded_json['settings'])
else:
complete_response = read_wbc_settings(complete_response, False, None)
response = json.dumps(complete_response)
crc32 = binascii.crc32(str.encode(response))
return response.encode() + crc32.to_bytes(4, byteorder='little', signed=False)
| 25,221
|
def validate(config_dir_path: Parameter.REQUIRED, **kwargs):
"""
Validate an Ambassador configuration. This is an extension of "config" that
redirects output to devnull and always exits on error.
:param config_dir_path: Configuration directory to scan for Ambassador YAML files
"""
config(config_dir_path, os.devnull, exit_on_error=True, **kwargs)
| 25,222
|
def list_icmp_block(zone, permanent=True):
"""
List ICMP blocks on a zone
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewlld.list_icmp_block zone
"""
cmd = "--zone={0} --list-icmp-blocks".format(zone)
if permanent:
cmd += " --permanent"
return __firewall_cmd(cmd).split()
| 25,223
|
def AddVirtualCurrencyTypes(request, callback, customData = None, extraHeaders = None):
"""
Adds one or more virtual currencies to the set defined for the title. Virtual Currencies have a maximum value of
2,147,483,647 when granted to a player. Any value over that will be discarded.
https://docs.microsoft.com/rest/api/playfab/admin/title-wide-data-management/addvirtualcurrencytypes
"""
if not PlayFabSettings.DeveloperSecretKey:
raise PlayFabErrors.PlayFabException("Must have DeveloperSecretKey set to call this method")
def wrappedCallback(playFabResult, error):
if callback:
callback(playFabResult, error)
PlayFabHTTP.DoPost("/Admin/AddVirtualCurrencyTypes", request, "X-SecretKey", PlayFabSettings.DeveloperSecretKey, wrappedCallback, customData, extraHeaders)
| 25,224
|
def dependencies_found(analysis_id, execution_id):
"""
Installation data from buildbot.
Requires a JSON list of objects with the following keys:
* installer: The system used to install the dependency.
* spec: The full specification used by the user to request the
package.
* source: Entity providing the artifact.
* name: The real package name.
* version: The installed version of the package.
.. note:: Internal API
"""
installations = bottle.request.json
if installations:
# Create database objects returning a list of scanneable artifacts.
artifacts = register_installations(analysis_id, execution_id,
installations)
analysis_needed = {a for a in artifacts if a.analysis_needed()}
# Launch dependency scan and mark done when finished.
analysis_task = (
providers.analyze_artifacts(analysis_needed) # <- group of tasks
| tasks.mark_task_done.si(analysis_id)).delay()
return {'task_id': analysis_task.id, 'scanning': len(analysis_needed)}
else:
return {'task_id': None, 'scanning': 0}
| 25,225
|
def dict_merge(dct, merge_dct):
""" Taken from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, _ in merge_dct.items():
if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
| 25,226
|
def set_augmentor():
"""
Set the augmentor.
1. Select the operations and create the config dictionary
2. Pass it to the Augmentor class with any other information that requires
3. Return the instance of the class.
:return:
"""
config = {'blur': {'values': ('gaussian', 0.7, 1.0), 'prob': 0.3},
'brightness': {'values': (0.6, 1.0), 'prob': 0.1},
'brightness1': {'values': (1.0, 1.5), 'prob': 0.1},
'flip': {'values': ('hor',), 'prob': 0.5},
'grid_mask': {'values': (0, 0.2, 0, 0.2, 0.01, 0.1, 0.01, 0.1, 0.1, 0.2, 0.1, 0.2), 'prob': 0.4},
'illumination': {'values': ('blob_negative', 0.1, 0.2, 100, 150), 'prob': 0.2},
'noise': {'values': (2, 10), 'use_gray_noise': True, 'prob': 1},
'rotate': {'values': (-45, 45), 'prob': 0.4},
'translate': {'values': ('RANDOM', -0.2, 0.2), 'prob': 0.2, 'use_replication': True},
'zoom': {'values': (0.5, 1.5), 'prob': 0.9, 'use_replication': True}}
augmentor = Augmentor(config, no_repetition=True)
return augmentor
| 25,227
|
def get_global_event_logger_instance():
"""Get an event logger with prefilled fields for the collection.
This returns an options configured event logger (proxy) with prefilled
fields. This is almost CERTAINLY the event logger that you want to use in
zaza test functions.
:returns: a configured LoggerInstance with prefilled collection and unit
fields.
:rtype: LoggerInstance
"""
return get_global_events_logging_manager().get_event_logger_instance()
| 25,228
|
def plot_market_entry(cat_entry_and_exit_df, cat_entry_and_exit_df_2):
"""
returns a plot with the entry and exit of firms per category
"""
# get the limits so everything is on the same scale
df = pd.concat([cat_entry_and_exit_df, cat_entry_and_exit_df_2])
limits = [-df.exit.max() - 0.3, df.entry.max() + 0.3]
fig = tools.make_subplots(rows=1, cols=2)
xs = cat_entry_and_exit_df.index
new_per_cat = cat_entry_and_exit_df.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df.exit.astype(int)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} entries in category {}'.format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[0]}), 1, 1)
fig.append_trace(
go.Bar(y=xs, x=-dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} exits in category {}'.format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[0]}), 1, 1)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat - dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} net entries in category {}'.format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': dark_scen_colours[0]}), 1, 1)
xs = cat_entry_and_exit_df_2.index
new_per_cat = cat_entry_and_exit_df_2.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df_2.exit.astype(int)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} entries in category {}'.format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[1]}), 1, 2)
fig.append_trace(
go.Bar(y=xs, x=-dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} exits in category {}'.format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[1]}), 1, 2)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat - dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} net entries in category {}'.format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': dark_scen_colours[1]}), 1, 2)
fig['layout']['xaxis2'].update(title="Number of companies", range=limits)
fig['layout']['xaxis1'].update(title="Number of companies", range=limits)
fig['layout']['yaxis1'].update(title="Product category")
fig['layout'].update(title='Market entry and exit per product category')
fig['layout']['font'].update(family='HelveticaNeue')
fig['layout'].update(barmode='overlay')
return fig
| 25,229
|
def load_surface_file(file_name):
"""Load a CONVERGE surface data file into Tecplot 360.
This will create a new dataset in the active frame or create a new frame
if the active frame already has a dataset.
"""
import os
try:
print("Loading ASCII file")
nodes, verts = get_surface_data_ascii(file_name)
node_count = int(len(nodes)/3)
cell_count = int(len(verts)/3)
with tp.session.suspend():
frame = tp.active_frame()
if frame.has_dataset:
frame = tp.active_page().add_frame()
ds = tp.active_frame().dataset
ds.add_variable("X")
ds.add_variable("Y")
ds.add_variable("Z")
#ds.add_variable("BoundaryID")
value_locations = [ValueLocation.Nodal, ValueLocation.Nodal, ValueLocation.Nodal, ValueLocation.CellCentered]
zone = ds.add_fe_zone(ZoneType.FETriangle, os.path.basename(file_name), node_count, cell_count, locations=value_locations)
xvals = nodes[0:node_count*3:3]
yvals = nodes[1:node_count*3:3]
zvals = nodes[2:node_count*3:3]
zone.values('X')[:] = xvals
zone.values('Y')[:] = yvals
zone.values('Z')[:] = zvals
#zone.values('BoundaryID')[:] = components
zero_based_verts = [v-1 for v in verts]
zone.nodemap.array[:] = zero_based_verts
tp.active_frame().plot_type = PlotType.Cartesian3D
tp.active_frame().plot().fieldmap(zone).effects.lighting_effect=LightingEffect.Paneled
except:
print("ASCII failed")
| 25,230
|
def burt2020_surrogates(name, scale):
"""
Generates surrogates according to Burt et al., 2020, NeuroImage
Parameters
----------
atlas : {'atl-cammoun2012', 'atl-schaefer2018'}, str
Name of atlas for which to load data
scale : str
Scale of atlas to use
"""
# load data + distance matrix for given parcellation
lh, rh, concepts = surrogates.load_data(NSDIR, name, scale)
dlh, drh = surrogates.load_dist(DISTDIR, name, scale)
outdir = SURRDIR / name / 'burt2020' / 'neurosynth'
Parallel(n_jobs=N_PROC)(delayed(surrogates.burt2020_surrogates)(
lh[:, i], rh[:, i], dlh, drh,
fname=outdir / concepts[i] / f'{scale}_surrogates.csv',
n_perm=N_PERM, seed=SEED
) for i in putils.trange(len(concepts), desc=f'Burt 2020 ({scale})'))
| 25,231
|
def find_elements_by_image(self, filename):
"""
Locate all the occurence of an image in the webpage.
:Args:
- filename: The path to the image to search (image shall be in PNG format).
:Returns:
A list of ImageElement.
"""
template = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
template_height, template_width, _ = template.shape
webpage_png = self.get_screenshot_as_png()
webpage_img = Image.open(io.BytesIO(webpage_png))
webpage = np.asarray(webpage_img, dtype=np.float32).astype(np.uint8)
webpage = cv2.cvtColor(webpage, cv2.COLOR_BGR2RGB)
return [
ImageElement(self, loc[0], loc[1], template_width, template_height)
for loc in match_template(webpage, template)
]
| 25,232
|
def compile_tf_signature_def_saved_model(
saved_model_dir: str, saved_model_tags: Set[str], module_name: str,
exported_name: str, input_names: Sequence[str],
output_names: Sequence[str]) -> Modules:
"""Compiles a SignatureDef SavedModel to each backend that we test.
Args:
saved_model_dir: Directory of the saved model.
saved_model_tags: Optional set of tags to use when loading the model.
module_name: A name for this compiled module.
backend_info: BackendInfo with the details for compiling the saved model.
exported_name: A str representing the signature on the saved model to
compile.
input_names: A sequence of kwargs to feed to the saved model.
output_names: A sequence of named outputs to extract from the saved model.
Returns:
A 'Modules' dataclass containing the reference module, target modules and
artifacts directory.
"""
global _global_modules
if _global_modules is not None:
return _global_modules
# Setup the directory for saving compilation artifacts and traces.
artifacts_dir = _setup_artifacts_dir(module_name)
# Get the backend information for this test.
ref_backend_info = module_utils.BackendInfo(FLAGS.reference_backend,
f"{FLAGS.reference_backend}_ref")
tar_backend_infos = get_target_backends()
compile_backend = (
lambda backend_info: backend_info.compile_signature_def_saved_model(
saved_model_dir, saved_model_tags, module_name, exported_name,
input_names, output_names, artifacts_dir))
ref_module = compile_backend(ref_backend_info)
tar_modules = [
compile_backend(backend_info) for backend_info in tar_backend_infos
]
_global_modules = Modules(ref_module, tar_modules, artifacts_dir)
return _global_modules
| 25,233
|
def origtime2float(time):
""" converts current datetime to float
>>> import datetime
>>> t = datetime.datetime(2010, 8, 5, 14, 45, 41, 778877)
>>> origtime2float(t)
53141.778876999997
"""
t3fmt = time.strftime("%H:%M:%S:%f")
return time2float(t3fmt)
| 25,234
|
def test_commands(cam):
"""Short hand commands should work as intended."""
# get_information
cmd = cam.prefix + [("cmd", "getinfo"), ("dev", "stage")]
information = cam.get_information()
should_be = tuples_as_dict(cmd)
assert information == should_be
# start_scan
cmd = cam.prefix + [("cmd", "startscan")]
response = cam.start_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# stop_scan
cmd = cam.prefix + [("cmd", "stopscan")]
response = cam.stop_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# autofocus_scan
cmd = cam.prefix + [("cmd", "autofocusscan")]
response = cam.autofocus_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# pause_scan
cmd = cam.prefix + [("cmd", "pausescan")]
response = cam.pause_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# enable
cmd = [
("cmd", "enable"),
("slide", str(0)),
("wellx", str(1)),
("welly", str(1)),
("fieldx", str(1)),
("fieldy", str(1)),
("value", "true"),
]
cmd = cam.prefix + cmd
response = cam.enable()
should_be = tuples_as_dict(cmd)
assert response == should_be
# disable
cmd = [
("cmd", "enable"),
("slide", str(0)),
("wellx", str(1)),
("welly", str(1)),
("fieldx", str(1)),
("fieldy", str(1)),
("value", "false"),
]
cmd = cam.prefix + cmd
response = cam.disable()
should_be = tuples_as_dict(cmd)
assert response == should_be
# enable_all
cmd = [("cmd", "enableall"), ("value", "true")]
cmd = cam.prefix + cmd
response = cam.enable_all()
should_be = tuples_as_dict(cmd)
assert response == should_be
# disable_all
cmd = [("cmd", "enableall"), ("value", "false")]
cmd = cam.prefix + cmd
response = cam.disable_all()
should_be = tuples_as_dict(cmd)
assert response == should_be
# save_template
cmd = [
("sys", "0"),
("cmd", "save"),
("fil", "{ScanningTemplate}leicacam.xml"),
]
cmd = cam.prefix + cmd
response = cam.save_template()
should_be = tuples_as_dict(cmd)
assert response == should_be
| 25,235
|
def set_level(lvl, log_to_syslog):
"""
Sets the log level
:param lvl: Log level as ERR/INFO/DEBUG; default: syslog.LOG_ERR
:param log_to_syslog; True - write into syslog. False: skip
:return None
"""
global report_level
global write_to_syslog
write_to_syslog = log_to_syslog
if (lvl == Level.INFO):
report_level = syslog.LOG_INFO
if (lvl == Level.DEBUG):
report_level = syslog.LOG_DEBUG
| 25,236
|
def test_setitem_downstream_doesnt_affect_upstream_backprop():
"""Test that upstream computational graph is not affected by downstream set-item"""
x = Tensor([1.0, 2.0, 3.0, 4.0])
y = Tensor([-1.0, -2.0, -3.0, -4.0])
z = x * y
y[:] = 0
z.backward()
assert_allclose(np.ones_like(z.data), z.grad, err_msg=f"{type(z.grad)}")
assert_allclose(np.array([-1.0, -2.0, -3.0, -4.0]), x.grad)
assert_allclose(np.array([0.0, 0.0, 0.0, 0.0]), y.data)
assert y.grad is None
| 25,237
|
def discrete_fourier_transform1(freq, tvec, dvec, log=False):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
:param log: boolean, if True prints progress to standard output
if False silent
"""
# deal with logging
if log:
print('\n\t Calculating Discrete Fourier Transform...')
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
wfn = np.zeros(len(freq), dtype=complex)
dft = np.zeros(int(len(freq)/2), dtype=complex)
for i in __tqdmlog__(range(len(freq)), log):
phase = -2*np.pi*freq[i]*tvec
phvec = np.array(np.cos(phase) + 1j * np.sin(phase))
if i < int(len(freq)/2):
wfn[i] = np.sum(phvec)/len(tvec)
dft[i] = np.sum(dvec*phvec)/len(tvec)
# complete the spectral window function
else:
wfn[i] = np.sum(phvec)/len(tvec)
return wfn, dft
| 25,238
|
def ngram_overlaps(a: List[str], b: List[str], threshold: int = 3) -> List[int]:
"""
Compute the set over overlapping strings in each set based on n-gram
overlap where 'n' is defined by the passed in threshold.
"""
def get_ngrams(text):
"""
Get a set of all the ngrams in the text
"""
return set(" ".join(g) for g in grouper(text.split(), threshold))
overlaps = []
remaining = set(range(len(b)))
for text in a:
best_idx = -1
best_overlap = 0
ngrams = get_ngrams(text)
for idx in remaining:
ngram_overlap = len(ngrams & get_ngrams(b[idx]))
if ngram_overlap > best_overlap:
best_idx = idx
best_overlap = ngram_overlap
if best_idx >= 0:
overlaps.append(best_idx)
remaining.remove(best_idx)
return overlaps
| 25,239
|
def update_spec_cache(resources: Resources = None, spec_dir: str = None) -> None:
"""
Allows users to update specified specs in cache.
If nothing specified, all urls in "RESOURCES" are updated
in the Tapipy folder.
If a folder is specified, all urls specified are updated there.
"""
if not resources:
# Get base resources from RESOURCES if resources not inputted
resources = RESOURCES['tapipy']
spec_dir = get_spec_dir(spec_dir)
download_and_pickle_spec_dicts(resources, spec_dir=spec_dir, download_latest_specs=True)
| 25,240
|
def ssq_cwt(x, wavelet='morlet', scales='log', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='symmetric', squeezing='sum',
difftype='direct', difforder=None, gamma=None):
"""Calculates the synchrosqueezed Continuous Wavelet Transform of `x`.
Implements the algorithm described in Sec. III of [1].
# Arguments:
x: np.ndarray
Vector of signal samples (e.g. x = np.cos(20 * np.pi * t))
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear'] / np.ndarray
CWT scales.
- 'log': exponentially distributed scales, as pow of 2:
`[2^(1/nv), 2^(2/nv), ...]`
- 'linear': linearly distributed scales.
!!! EXPERIMENTAL; default scheme for len(x)>2048 performs
poorly (and there may not be a good non-piecewise scheme).
nv: int / None
Number of voices (CWT only). Suggested >= 32 (default=32).
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq
frequencies range from 1/dT to 0.5, i.e. as fraction of reference
sampling rate up to Nyquist limit; dT = total duration (N/fs).
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str
Pad scheme to apply on input. One of:
('zero', 'symmetric', 'replicate').
'zero' is most naive, while 'symmetric' (default) partly mitigates
boundary effects. See `padsignal`.
squeezing: str['sum', 'lebesgue']
- 'sum' = standard synchrosqueezing using `Wx`.
- 'lebesgue' = as in [4], setting `Wx=ones()/len(Wx)`, which is
not invertible but has better robustness properties in some cases.
Not recommended unless you know what you're doing.
difftype: str['direct', 'phase', 'numerical']
Method by which to differentiate Wx (default='direct') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'direct': use `dWx`, obtained via frequency-domain
differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numerical': first-, second-, or fourth-order (set by
`difforder`) numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numerical' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = sqrt(machine epsilon) = np.sqrt(np.finfo(np.float64).eps)
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x` L2-normed (see `cwt`);
to L1-norm, `Wx /= np.sqrt(scales)`
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n]
Phase transform for each element of `Wx`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(N, fs, t, nv, difftype, difforder, squeezing):
if difftype not in ('direct', 'phase', 'numerical'):
raise ValueError("`difftype` must be one of: direct, phase, numerical"
" (got %s)" % difftype)
if difforder is not None:
if difftype != 'numerical':
WARN("`difforder` is ignored if `difftype != 'numerical'")
elif difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
elif difftype == 'numerical':
difforder = 4
if squeezing not in ('sum', 'lebesgue'):
raise ValueError("`squeezing` must be one of: sum, lebesgue "
"(got %s)" % squeezing)
dt, fs, t = _process_fs_and_t(fs, t, N)
nv = nv or 32
return dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'direct':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numerical':
# !!! tested to be very inaccurate for small `a`
# calculate derivative numerically
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N = len(x)
dt, fs, difforder, nv = _process_args(N, fs, t, nv, difftype, difforder,
squeezing)
scales, cwt_scaletype, *_ = process_scales(scales, N, nv=nv, get_params=True)
# l1_norm=False to spare a multiplication; for SSWT L1 & L2 are exactly same
# anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numerical')
Wx, scales, _, dWx = cwt(x, wavelet, scales=scales, fs=fs, l1_norm=False,
derivative=True, padtype=padtype, rpadded=rpadded)
gamma = gamma or np.sqrt(EPS)
Wx, w = _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder)
if ssq_freqs is None:
# default to same scheme used by `scales`
ssq_freqs = cwt_scaletype
Tx, ssq_freqs = ssqueeze(Wx, w, scales=scales, fs=fs, ssq_freqs=ssq_freqs,
transform='cwt', squeezing=squeezing)
if difftype == 'numerical':
Wx = Wx[:, 4:-4]
w = w[:, 4:-4]
Tx = Tx[:, 4:-4]
return Tx, ssq_freqs, Wx, scales, w
| 25,241
|
def test_board_clear():
"""Test if the board resets whenever clear is called"""
board = Board(size=(3, 3))
board.add(lf.Blinker(length=3), loc=(0, 1))
board.clear()
assert len(np.unique(board.state)) == 1
assert np.unique(board.state)[0].astype(int) == 0
| 25,242
|
def request_retry_decorator(fn_to_call, exc_handler):
"""A generic decorator for retrying cloud API operations with consistent repeatable failure
patterns. This can be API rate limiting errors, connection timeouts, transient SSL errors, etc.
Args:
fn_to_call: the function to call and wrap around
exc_handler: a bool return function to check if the passed in exception is retriable
"""
def wrapper(*args, **kwargs):
MAX_ATTEMPTS = 10
SLEEP_SEC_MIN = 5
SLEEP_SEC_MAX = 15
for i in range(1, MAX_ATTEMPTS + 1):
try:
return fn_to_call(*args, **kwargs)
except Exception as e:
if i < MAX_ATTEMPTS and exc_handler(e):
sleep_duration_sec = \
SLEEP_SEC_MIN + random.random() * (SLEEP_SEC_MAX - SLEEP_SEC_MIN)
logging.warn(
"API call failed, waiting for {} seconds before re-trying (this was attempt"
" {} out of {}).".format(sleep_duration_sec, i, MAX_ATTEMPTS))
time.sleep(sleep_duration_sec)
continue
raise e
return wrapper
| 25,243
|
def mass_vulndata():
"""
Add a vulndata to a lot of hosts
TODO: This!
"""
host_ids = []
if request.vars.has_key('host_ids'):
for z in request.vars.host_ids.split('|'):
if z is not '':
host_ids.append(z)
form=SQLFORM.factory(
Field('vulndata', 'reference t_vulndata', label=T('Vulnerability')),
buttons=[], _id="mass_asset_form")
if form.validate():
pass
| 25,244
|
def lambda_handler(event, context):
"""
1. Receive from data from the lambda event.
2. DynamoDB: Stores incomming form data
3. Discord: Posts notification to a channel
4. Mailgun: sends notification
args:
- event: Event data that has trigged the lambda function
- context:
"""
logging.info(f'OS.environ: {os.environ}')
logging.info(f'lambda_handler: event {event}')
# store form data
for key, value in event["data"].items():
logging.info(f'lambda_handler: {key}: {value}')
data = event["data"]
db_put_success = dynamo_put(data=data)
if not db_put_success:
payload = create_payload(
is_success=False, data=data, method="DynamoDB")
notification.post_message_to_channel(payload=payload)
return {
'statusCode': 500,
'body': 'There was a problem uploading your data to DynamoDB.',
}
em_send_success = send_mailgun_message(data=data)
if not em_send_success:
payload = create_payload(
is_success=False, data=data, method="Mailgun")
notification.post_message_to_channel(payload=payload)
return {
'statusCode': 500,
'body': 'There was a problem sending your email via the Mailgun API.',
}
payload = create_payload(is_success=True, data=data, method="Lambda")
notification.post_message_to_channel(payload=payload)
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Origin': f'{settings.company_url}',
'Access-Control-Allow-Methods': 'OPTIONS,POST'
},
'body': 'success',
}
| 25,245
|
async def establish_async_connection(config: json, logger: AirbyteLogger) -> AsyncConnection:
"""
Creates an async connection to Firebolt database using the parameters provided.
This connection can be used for parallel operations.
:param config: Json object containing db credentials.
:param logger: AirbyteLogger instance to print logs.
:return: PEP-249 compliant database Connection object.
"""
logger.debug("Connecting to Firebolt.")
connection = await async_connect(**parse_config(config, logger))
logger.debug("Connection to Firebolt established.")
return connection
| 25,246
|
def _wrap_with_before(action, responder, resource=None, is_method=False):
"""Execute the given action function before a responder method.
Args:
action: A function with a similar signature to a resource responder
method, taking (req, resp, resource, params)
responder: The responder method to wrap
resource: The resource affected by `action` (default None). If None,
`is_method` MUST BE True, so that the resource can be
derived from the `self` param that is passed into the wrapper
is_method: Whether or not `responder` is an unbound method
(default False)
"""
# NOTE(swistakm): introspect action function to guess if it can handle
# additional resource argument without breaking backwards compatibility
action_spec = _get_argspec(action)
# NOTE(swistakm): create shim before checking what will be actually
# decorated. This allows to avoid excessive nesting
if len(action_spec.args) == (5 if _has_self(action_spec) else 4):
shim = action
else:
# TODO(kgriffs): This decorator does not work on callable
# classes in Python vesions prior to 3.4.
#
# @wraps(action)
def shim(req, resp, resource, kwargs):
# NOTE(kgriffs): Don't have to pass "self" even if has_self,
# since method is assumed to be bound.
action(req, resp, kwargs)
# NOTE(swistakm): method must be decorated differently than
# normal function
if is_method:
@wraps(responder)
def do_before(self, req, resp, **kwargs):
shim(req, resp, self, kwargs)
responder(self, req, resp, **kwargs)
else:
assert resource is not None
@wraps(responder)
def do_before(req, resp, **kwargs):
shim(req, resp, resource, kwargs)
responder(req, resp, **kwargs)
return do_before
| 25,247
|
def last_day_of_month(d):
""" From: https://stackoverflow.com/a/43088/6929343 """
if d.month == 12:
return d.replace(day=31)
return d.replace(month=d.month+1, day=1) - datetime.timedelta(days=1)
| 25,248
|
def plot_landscape(landscape):
"""
Plot all landscapes for a given (set of) diagrams
Inputs:
-------
landscape (list): Output of one iteration of persim.to_landscape()
Outputs:
--------
Plots for each landscape in the list
Returns:
--------
None
"""
# TODO: Integrate more complex plotting args and kwargs for more precise control
for i in range(len(landscape)):
pts, ls = landscape[i]
plt.figure()
for j in range(len(ls)):
plt.plot(pts, ls[j], label = f'$\lambda_{{{j}}}$')
plt.title(f'$H_{{{i}}}$ Landscape')
plt.legend()
plt.show()
return None
| 25,249
|
def get_access_token():
"""Return access token for use in API request.
Raises:
requests.exceptions.ConnectionError.
"""
credentials, _ = google.auth.default(scopes=[
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only'
])
request = google.auth.transport.requests.Request()
credentials.refresh(request)
return credentials.token
| 25,250
|
def _singleInstrumentParametersToJson(instrument: InstrumentBase,
get: bool = False,
addPrefix: str = '',
includeMeta: List[str] = [],
excludeParameters: List[str] = [],
simpleFormat: bool = True) -> Dict:
"""Create a dictionary that holds the parameters of an instrument."""
if "IDN" not in excludeParameters:
excludeParameters.append("IDN")
ret = {}
snap = instrument.snapshot(update=get)
for name, param in instrument.parameters.items():
if name not in excludeParameters:
if len(includeMeta) == 0 and simpleFormat:
ret[addPrefix + name] = snap['parameters'][name].get('value', None)
else:
ret[addPrefix + name] = dict()
for k, v in snap['parameters'][name].items():
if k in (['value'] + includeMeta):
ret[addPrefix + name][k] = v
else:
logger.debug(f"excluded: {addPrefix + name}")
for name, submod in instrument.submodules.items():
ret.update(_singleInstrumentParametersToJson(
submod, get=get, addPrefix=f"{addPrefix + name}.",
simpleFormat=simpleFormat, includeMeta=includeMeta))
return ret
| 25,251
|
def check_type(value: Optional[object],
info: QAPISourceInfo,
source: str,
allow_array: bool = False,
allow_dict: Union[bool, str] = False) -> None:
"""
Normalize and validate the QAPI type of ``value``.
Python types of ``str`` or ``None`` are always allowed.
:param value: The value to check.
:param info: QAPI schema source file information.
:param source: Error string describing this ``value``.
:param allow_array:
Allow a ``List[str]`` of length 1, which indicates an array of
the type named by the list element.
:param allow_dict:
Allow a dict. Its members can be struct type members or union
branches. When the value of ``allow_dict`` is in pragma
``member-name-exceptions``, the dict's keys may violate the
member naming rules. The dict members are normalized in place.
:raise QAPISemError: When ``value`` fails validation.
:return: None, ``value`` is normalized in-place as needed.
"""
if value is None:
return
# Type name
if isinstance(value, str):
return
# Array type
if isinstance(value, list):
if not allow_array:
raise QAPISemError(info, "%s cannot be an array" % source)
if len(value) != 1 or not isinstance(value[0], str):
raise QAPISemError(info,
"%s: array type must contain single type name" %
source)
return
# Anonymous type
if not allow_dict:
raise QAPISemError(info, "%s should be a type name" % source)
if not isinstance(value, dict):
raise QAPISemError(info,
"%s should be an object or type name" % source)
permissive = False
if isinstance(allow_dict, str):
permissive = allow_dict in info.pragma.member_name_exceptions
# value is a dictionary, check that each member is okay
for (key, arg) in value.items():
key_source = "%s member '%s'" % (source, key)
if key.startswith('*'):
key = key[1:]
check_name_lower(key, info, key_source,
permit_upper=permissive,
permit_underscore=permissive)
if c_name(key, False) == 'u' or c_name(key, False).startswith('has_'):
raise QAPISemError(info, "%s uses reserved name" % key_source)
check_keys(arg, info, key_source, ['type'], ['if', 'features'])
check_if(arg, info, key_source)
check_features(arg.get('features'), info)
check_type(arg['type'], info, key_source, allow_array=True)
| 25,252
|
def unknown_action(player: Player, table: dynamodb.Table) -> ActionResponse:
"""
Do nothing because the action could not be resolved.
In the message list, returns a message saying the action was bad.
:return: Original inputs matching updated inputs, and a message
"""
message = ["Action could not be resolved, type better next time"]
return player, player, {}, {}, message
| 25,253
|
def loadThemes():
"""
Load default and user themes (if exist)
"""
def loadThemesFromDir(dname, isBuiltin=False):
if not os.path.isdir(dname):
return
for fname in [fname for fname in os.listdir(dname) if fname.endswith(".theme")]:
try:
theme = ssdf.load(os.path.join(dname, fname))
assert (
theme.name.lower() == fname.lower().split(".")[0]
), "Theme name does not match filename"
theme.data = {
key.replace("_", "."): val for key, val in theme.data.items()
}
theme["builtin"] = isBuiltin
themes[theme.name.lower()] = theme
print("Loaded theme %r" % theme.name)
except Exception as ex:
print("Warning ! Error while reading %s: %s" % (fname, ex))
loadThemesFromDir(os.path.join(pyzoDir, "resources", "themes"), True)
loadThemesFromDir(os.path.join(appDataDir, "themes"))
| 25,254
|
def dump_garbage():
"""
show us what's the garbage about
"""
# force collection
print "\nGARBAGE:"
gc.collect()
print "\nGARBAGE OBJECTS:"
for x in gc.garbage:
s = str(x)
if len(s) > 80: s = s[:80]
print type(x),"\n ", s
| 25,255
|
def forecast_marginal_bindglm(mod, n, k, X=None, nsamps=1, mean_only=False):
"""
Marginal forecast function k steps ahead for a binomial DGLM
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# F = np.copy(mod.F)
# if mod.nregn > 0:
# F[mod.iregn] = X.reshape(mod.nregn,1)
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2)
if mean_only:
return mod.get_mean(n, param1, param2)
# Simulate from the forecast distribution
return mod.simulate(n, param1, param2, nsamps)
| 25,256
|
def _list_redundant(namespace):
"""Generate a list of configured policies which match defaults.
This checks all policies loaded from policy files and checks to see if they
match registered policies. If so then it is redundant to have them defined
in a policy file and operators should consider removing them.
"""
enforcer = _get_enforcer(namespace)
# Ensure that files have been parsed
enforcer.load_rules()
for name, file_rule in enforcer.file_rules.items():
reg_rule = enforcer.registered_rules.get(name, None)
if reg_rule:
if file_rule == reg_rule:
print(reg_rule)
| 25,257
|
def import_file(isamAppliance, id, filename, check_mode=False, force=False):
"""
Importing a file in the runtime template files directory.
"""
warnings = []
check_file = _check(isamAppliance, id)
if check_file != None and force == False:
warnings.append("File {0} exist.".format(id))
if force is True or _check_import(isamAppliance, id, filename, check_mode=check_mode):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post_files(
"Importing a file in the runtime template files directory",
"/mga/template_files/{0}".format(id),
[
{
'file_formfield': 'file',
'filename': filename,
'mimetype': 'application/octet-stream'
}
],
{
'type': 'file',
'force': force
})
return isamAppliance.create_return_object(warnings=warnings)
| 25,258
|
def createOverlayMap(*args, **kwargs):
""" Create overlay map """
pass
| 25,259
|
def get_func_from_attrdict(func_name : str, attrdict : AttrDict) -> ObjectiveFunction1D:
"""
Given a string func_name, attempts to find the corresponding entry from attrdict.
:param func_name
:param attrdict
:returns Objective Function
"""
for key, val in attrdict.items():
if val.name == func_name:
return val
| 25,260
|
def event_handle(handle_code):
"""
Performs HTTP request-response procedure
:param handle_code: customer's code
:type handle_code: fdk.customer_code.Function
:return: None
"""
async def pure_handler(request):
from fdk import runner
log.log("in pure_handler")
headers = dict(request.headers)
log_frame_header(headers)
func_response = await runner.handle_request(
handle_code, constants.HTTPSTREAM,
headers=headers, data=io.BytesIO(request.body))
log.log("request execution completed")
headers = func_response.context().GetResponseHeaders()
status = func_response.status()
if status not in constants.FN_ENFORCED_RESPONSE_CODES:
status = constants.FN_DEFAULT_RESPONSE_CODE
return response.HTTPResponse(
headers=headers,
status=status,
content_type=headers.get(constants.CONTENT_TYPE),
body_bytes=func_response.body_bytes(),
)
return pure_handler
| 25,261
|
def load_precomputed_embeddings(det_df, seq_info_dict, embeddings_dir, use_cuda):
"""
Given a sequence's detections, it loads from disk embeddings that have already been computed and stored for its
detections
Args:
det_df: pd.DataFrame with detection coordinates
seq_info_dict: dict with sequence meta info (we need frame dims)
embeddings_dir: name of the directory where embeddings are stored
Returns:
torch.Tensor with shape (num_detects, embeddings_dim)
"""
# Retrieve the embeddings we need from their corresponding locations
embeddings_path = osp.join(
seq_info_dict["seq_path"],
"processed_data",
"embeddings",
seq_info_dict["det_file_name"],
embeddings_dir,
)
# print("EMBEDDINGS PATH IS ", embeddings_path)
frames_to_retrieve = sorted(det_df.frame.unique())
embeddings_list = [
torch.load(osp.join(embeddings_path, f"{frame_num}.pt"))
for frame_num in frames_to_retrieve
]
embeddings = torch.cat(embeddings_list, dim=0)
# First column in embeddings is the index. Drop the rows of those that are not present in det_df
ixs_to_drop = list(
set(embeddings[:, 0].int().numpy()) - set(det_df["detection_id"])
)
embeddings = embeddings[
~np.isin(embeddings[:, 0], ixs_to_drop)
] # Not so clean, but faster than a join
assert_str = "Problems loading embeddings. Indices between query and stored embeddings do not match. BOTH SHOULD BE SORTED!"
assert (embeddings[:, 0].numpy() == det_df["detection_id"].values).all(), assert_str
embeddings = embeddings[:, 1:] # Get rid of the detection index
return embeddings.to(
torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")
)
| 25,262
|
def printerr(text, width=80, errtype=None):
"""
Small utility to print custom errors with proper indentation and text wrapping. The only error
types coded are `error` and `warning`.
Parameters:
text (str): String of text without the preceding error flag that will be formated
width (int): The maximum length of wrapped lines
errtype (str): Indicate which type of error to format the string as. The default value of
`None` will only text wrap the string to the specified `width`.
.. codeauthor:: pygeostat development team 2015-11-01
"""
import pygeostat as gs
if isinstance(errtype, str):
errtype.lower()
if errtype is 'error':
text = 'ERROR: ' + text
subsequent_indent = " "
elif errtype is 'warning':
text = 'WARNING: ' + text
subsequent_indent = " "
else:
subsequent_indent = ""
print(textwrap.fill(text, width=width, subsequent_indent=subsequent_indent))
| 25,263
|
def _model_gpt(size=0, dropout_rate=0.0, attention_dropout_rate=0.0):
"""Configs for a variety of Transformer model sizes."""
num_layers = [1, 3, 6, 12, 24, 36, 48][size]
dim = [64, 128, 512, 768, 1024, 1280, 1600][size]
num_heads = int(dim / 64) # Always dim 64 per head
return _transformer(
emb_dim=dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=dim,
mlp_dim=dim * 4,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate)
| 25,264
|
def parse_local_alignments(input_stream: Iterable[str]) -> Iterable[dict]:
"""Parse DALIGNER LAdump local alignments.
This function reads from an iterable `input_stream` the available local
alignments encoded in DALIGNER LAdump format. In other words, you could
pipe the output of LAdump to this function, and it yields each local
alignment with all avaible information as a nice Python `dict`.
"""
current_la_data = {}
num_tracepoints = 0
current_tracepoint = 0
for line in input_stream:
parts = line.split()
# Indicates overlap between two reads, and on which strand
if line.startswith('P'):
if (current_la_data and 'a' in current_la_data and 'b' in
current_la_data):
yield current_la_data
current_la_data = {}
num_tracepoints = 0
current_tracepoint = 0
current_la_data['a'] = parts[1]
current_la_data['b'] = parts[2]
current_la_data['strand'] = (Strand.SAME if parts[3] == 'n' else
Strand.OPPOSITE)
# Indicates alignment range between two reads
if line.startswith('C'):
a_start, a_end, b_start, b_end = map(int, parts[1:])
current_la_data['arange'] = (a_start, a_end)
current_la_data['brange'] = (b_start, b_end)
if line.startswith('T'):
current_la_data['trace_points'] = []
num_tracepoints = int(parts[1])
current_tracepoint = 0
if line.startswith(' '):
if current_tracepoint >= num_tracepoints:
raise ValueError(
"Received more tracepoints than expected (expected {} "
"tracepoints).".format(num_tracepoints)
)
current_la_data['trace_points'].append(tuple(map(int, parts)))
if line.startswith('D'):
current_la_data['differences'] = int(parts[1])
if current_la_data and 'a' in current_la_data and 'b' in current_la_data:
yield current_la_data
current_la_data = {}
| 25,265
|
def create_cxr_test_dataset(path_to_test_dataset: Path,
num_encoder_images: int = 200,
num_labelled_images: int = 300) -> None:
"""
Creates fake datasets dataframe and dicom images mimicking the expected structure of the datasets
of NIHCXR and RSNAKaggleCXR
:param path_to_test_dataset: folder to which we want to save the mock data.
:param num_encoder_images: The number of unlabelled images that the dataset should contain (for encoder training)
:param num_labelled_images: The number of labelled images that the dataset should contain (for the linear head).
"""
if path_to_test_dataset.is_dir():
return
path_to_test_dataset.mkdir(exist_ok=True, parents=True)
df = pd.DataFrame({"Image Index": np.repeat("1.dcm", num_encoder_images)})
df.to_csv(path_to_test_dataset / "Data_Entry_2017.csv", index=False)
df = pd.DataFrame({"subject": np.repeat("1", num_labelled_images),
"label": np.random.RandomState(42).binomial(n=1, p=0.2, size=num_labelled_images)})
df.to_csv(path_to_test_dataset / "dataset.csv", index=False)
write_test_dicom(array=np.ones([256, 256], dtype="uint16"), path=path_to_test_dataset / "1.dcm")
| 25,266
|
def test_module(client, demisto_args: dict):
"""
Test the OMWS Client connection by attempting to query a common username
"""
d = client.query_profile_data("maneenus")
if d:
return 'ok'
else:
raise DemistoException("Incorrect or empty API response")
| 25,267
|
def docs(open_browser=True):
"""
Generage Sphinx HTML documentation, including API docs.
Args:
open_browser: Open browser automatically after building docs
"""
local('rm -f docs/python_boilerplate.rst')
local('rm -f docs/modules.rst')
local('rm -f docs/python_boilerplate*')
local('sphinx-apidoc -o docs/ python_boilerplate')
with lcd('docs'):
local('make clean')
local('make html')
local('cp -rf docs/_build/html/ public/')
if true(open_browser):
local('open public/index.html')
| 25,268
|
def as_string(raw_data):
"""Converts the given raw bytes to a string (removes NULL)"""
return bytearray(raw_data[:-1])
| 25,269
|
def init_suffix_tree(tld_file=None):
"""Call this first to initialize the suffix tree"""
if tld_file is None:
tld_file = os.path.join(os.path.dirname(__file__), 'public_suffix_list.txt')
fp = open(tld_file)
suffix_lines = fp.readlines()
suffix_rules = _tokenize(suffix_lines)
fp.close()
global suffixtree
suffixtree = PrefixTree(suffix_rules)
| 25,270
|
def plot_series_statistics(observed=None,
expected=None,
total_stdev=None,
explained_stdev=None,
color_set='Set2',
xscale="linear",
yscale="linear",
xlabel="feature",
ylabel="value",
y_cutoff=None,
sort_by='expected',
sort_ascending=True,
despine=True,
legend_enable=True,
legend_title=None,
legend_loc='best',
alpha=None,
markersize=1.0,
linewdith=1.2,
fontsize=8,
ax=None,
title=None,
return_handles=False,
return_indices=False):
""" This function can plot 2 comparable series, and the
scale are represented in 2 y-axes (major axis - left) and
the right one
Parameters
----------
xcale, yscale : {"linear", "log", "symlog", "logit", ...}
text or instance in `matplotlib.scale`
despine : bool (default: True)
if True, remove the top and right spines from plot,
otherwise, only remove the top spine
Example
-------
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> np.random.seed(1234)
>>> x = np.random.randn(8000)
>>> y = np.random.randn(8000)
...
>>> z = np.random.rand(8000) + 3
>>> w = np.random.rand(8000) + 3
...
>>> ax, handles1 = V.plot_series_statistics(observed=x, expected=y,
... explained_stdev=np.std(x),
... total_stdev=np.std(y),
... color_set='Set1',
... legend_enable=False, legend_title="Series_1",
... return_handles=True)
>>> _, handles2 = V.plot_series_statistics(observed=z, expected=w,
... explained_stdev=np.std(z),
... total_stdev=np.std(w),
... color_set='Set2',
... legend_enable=False, legend_title="Series_2",
... return_handles=True,
... ax=ax.twinx(), alpha=0.2)
>>> plt.legend(handles=handles1 + handles2, loc='best', fontsize=8)
"""
import seaborn
import matplotlib
ax = to_axis2D(ax)
observed, expected, total_stdev, explained_stdev = _preprocess_series(
observed, expected, total_stdev, explained_stdev)
# ====== color palette ====== #
if isinstance(color_set, (tuple, list)):
observed_color, expected_color, \
expected_total_standard_deviations_color, \
expected_explained_standard_deviations_color = color_set
else:
standard_palette = seaborn.color_palette(color_set, 8)
observed_color = standard_palette[0]
expected_palette = seaborn.light_palette(standard_palette[1], 5)
expected_color = expected_palette[-1]
expected_total_standard_deviations_color = expected_palette[1]
expected_explained_standard_deviations_color = expected_palette[3]
# ====== prepare ====== #
sort_indices = _get_sort_indices(observed, expected, sort_by, sort_ascending)
# ====== plotting expected and observed ====== #
indices = np.arange(
len(observed) if observed is not None else len(expected)) + 1
handles = []
# ====== series title ====== #
if legend_title is not None:
_, = ax.plot([],
marker='None',
linestyle='None',
label="$%s$" % legend_title)
handles.append(_)
# ====== plotting expected and observed ====== #
if observed is not None:
_, = ax.plot(indices,
observed[sort_indices],
label="Observations",
color=observed_color,
linestyle="",
marker="o",
zorder=2,
markersize=markersize)
handles.append(_)
if expected is not None:
_, = ax.plot(indices,
expected[sort_indices],
label="Expectation",
color=expected_color,
linestyle="-",
marker="",
zorder=3,
linewidth=linewdith)
handles.append(_)
# ====== plotting stdev ====== #
if total_stdev is not None:
lower = expected - total_stdev
upper = expected + total_stdev
ax.fill_between(
indices,
lower[sort_indices],
upper[sort_indices],
color=expected_total_standard_deviations_color,
zorder=0,
alpha=alpha,
)
_ = matplotlib.patches.Patch(label="Stdev(Total)",
color=expected_total_standard_deviations_color)
handles.append(_)
if explained_stdev is not None:
lower = expected - explained_stdev
upper = expected + explained_stdev
ax.fill_between(
indices,
lower[sort_indices],
upper[sort_indices],
color=expected_explained_standard_deviations_color,
zorder=1,
alpha=alpha,
)
_ = matplotlib.patches.Patch(
label="Stdev(Explained)",
color=expected_explained_standard_deviations_color)
handles.append(_)
# ====== legend ====== #
if legend_enable:
ax.legend(handles=handles, loc=legend_loc, fontsize=fontsize)
# ====== adjusting ====== #
if bool(despine):
seaborn.despine(top=True, right=True)
else:
seaborn.despine(top=True, right=False)
ax.set_yscale(yscale, nonposy="clip")
ax.set_ylabel('[%s]%s' % (yscale, ylabel), fontsize=fontsize)
ax.set_xscale(xscale)
ax.set_xlabel('[%s]%s%s' %
(xscale, xlabel, ' (sorted by "%s")' %
str(sort_by).lower() if sort_by is not None else ''),
fontsize=fontsize)
# ====== set y-cutoff ====== #
y_min, y_max = ax.get_ylim()
if y_cutoff is not None:
if yscale == "linear":
y_max = y_cutoff
elif yscale == "log":
y_min = y_cutoff
ax.set_ylim(y_min, y_max)
ax.tick_params(axis='both', labelsize=fontsize)
# ====== title ====== #
if title is not None:
ax.set_title(title, fontsize=fontsize, fontweight='bold')
ret = [ax]
if return_handles:
ret.append(handles)
if return_indices:
ret.append(sort_indices)
return ax if len(ret) == 1 else tuple(ret)
| 25,271
|
def generate_trapezoid_profile(max_v, time_to_max_v, dt, goal):
"""Creates a trapezoid profile with the given constraints.
Returns:
t_rec -- list of timestamps
x_rec -- list of positions at each timestep
v_rec -- list of velocities at each timestep
a_rec -- list of accelerations at each timestep
Keyword arguments:
max_v -- maximum velocity of profile
time_to_max_v -- time from rest to maximum velocity
dt -- timestep
goal -- final position when the profile is at rest
"""
t_rec = [0.0]
x_rec = [0.0]
v_rec = [0.0]
a_rec = [0.0]
a = max_v / time_to_max_v
time_at_max_v = goal / max_v - time_to_max_v
# If profile is short
if max_v * time_to_max_v > goal:
time_to_max_v = math.sqrt(goal / a)
time_from_max_v = time_to_max_v
time_total = 2.0 * time_to_max_v
profile_max_v = a * time_to_max_v
else:
time_from_max_v = time_to_max_v + time_at_max_v
time_total = time_from_max_v + time_to_max_v
profile_max_v = max_v
while t_rec[-1] < time_total:
t = t_rec[-1] + dt
t_rec.append(t)
if t < time_to_max_v:
# Accelerate up
a_rec.append(a)
v_rec.append(a * t)
elif t < time_from_max_v:
# Maintain max velocity
a_rec.append(0.0)
v_rec.append(profile_max_v)
elif t < time_total:
# Accelerate down
decel_time = t - time_from_max_v
a_rec.append(-a)
v_rec.append(profile_max_v - a * decel_time)
else:
a_rec.append(0.0)
v_rec.append(0.0)
x_rec.append(x_rec[-1] + v_rec[-1] * dt)
return t_rec, x_rec, v_rec, a_rec
| 25,272
|
def read_trigger_config(filename):
"""
filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file.
"""
# We give you the code to read in the file and eliminate blank lines and
# comments. You don't need to know how it works for now!
trigger_file = open(filename, 'r')
lines = []
for line in trigger_file:
line = line.rstrip()
if not (len(line) == 0 or line.startswith('//')):
lines.append(line)
# TODO: Problem 11
# line is the list of lines that you need to parse and for which you need
# to build triggers
print(lines)
| 25,273
|
def list_graphs(NextToken=None, MaxResults=None):
"""
Returns the list of behavior graphs that the calling account is a master of. This operation can only be called by a master account.
Because an account can currently only be the master of one behavior graph within a Region, the results always contain a single graph.
See also: AWS API Documentation
Exceptions
:example: response = client.list_graphs(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: For requests to get the next page of results, the pagination token that was returned with the previous set of results. The initial request does not include a pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of graphs to return at a time. The total must be less than the overall limit on the number of results to return, which is currently 200.
:rtype: dict
ReturnsResponse Syntax
{
'GraphList': [
{
'Arn': 'string',
'CreatedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
GraphList (list) --
A list of behavior graphs that the account is a master for.
(dict) --
A behavior graph in Detective.
Arn (string) --
The ARN of the behavior graph.
CreatedTime (datetime) --
The date and time that the behavior graph was created. The value is in milliseconds since the epoch.
NextToken (string) --
If there are more behavior graphs remaining in the results, then this is the pagination token to use to request the next page of behavior graphs.
Exceptions
Detective.Client.exceptions.InternalServerException
Detective.Client.exceptions.ValidationException
:return: {
'GraphList': [
{
'Arn': 'string',
'CreatedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
Detective.Client.exceptions.InternalServerException
Detective.Client.exceptions.ValidationException
"""
pass
| 25,274
|
def main(args):
"""
Main function of PyGalGen generator
Parameters
----------
args : list of command line arguments:
Returns
-------
Error code
"""
logging.basicConfig(level=logging.DEBUG)
parser = define_default_params()
pipeline = PipelineExecutor(parser)
logging.info("Created pipeline executor")
path_to_default = res.files(pygalgen.generator.default_plugins)
default_plugins = discover_plugins(path_to_default)
logging.info(f"Discovered {len(default_plugins)} default"
f" plugin{'' if len(default_plugins) == 1 else 's'}")
plugin_path = obtain_plugins_path(args)
custom_plugins = discover_plugins(plugin_path)
logging.info(f"Discovered {len(custom_plugins)} custom"
f" plugin{'' if len(default_plugins) == 1 else 's'}")
result = pipeline.execute_pipeline(default_plugins +
custom_plugins)
return result
| 25,275
|
def query_pypi(spec_pk):
""" Query one spec of package on PyPI"""
spec = Spec.objects.get(pk=spec_pk)
logger.debug('[PYPI] Fetching data for %s' % spec)
pkg_data = PyPI().get_info(spec.name, spec.version)
if not pkg_data:
logger.debug('[PYPI] Errored %s ' % spec)
spec.status = 'error'
spec.save(update_fields=['status', 'updated_at'])
return {}
spec.release_date = pkg_data['last_release_date']
spec.python_versions = pkg_data['py3_versions']
spec.save(update_fields=['release_date', 'python_versions', 'updated_at'])
logger.debug('[PYPI] Finished %s ' % spec)
return pkg_data
| 25,276
|
def plot_results(results, time_axis, filename, molecule_name):
"""
Plots the results of the end-to-end-distance test, lever angle test and
twist_amount test, if they exist. Saves pdf plots to the working directory.
In: results, a dictionary containing a number of named 1-d arrays created
in run_sequential_tests(), time_axis, the times of each framenumber (also
created in run_sequential_tests), the string to prefix the filenames with,
and a string containing the molecule name (shows up on the plots).
Out: nothin'. Just side effects.
"""
plot.xlabel("Time (s)")
if results["dist_trajectory"].any():
plot.xlabel("Time (s)")
plot.plot(time_axis, results["dist_trajectory"])
plot.title(molecule_name+" End-to-end Distance")
plot.ylabel("End-to-end distance (m)")
plot.savefig(filename+"_endtoend.pdf", format="pdf")
plot.cla()
if results["lever_angle_trajectory"].any():
plot.xlabel("Time (s)")
plot.plot(time_axis, results["lever_angle_trajectory"])
plot.title(molecule_name+" Lever Angle Evolution")
plot.ylabel("Lever angle (rads)")
plot.savefig(filename+"_leverangle.pdf", format="pdf")
plot.cla()
if results["twist_amount"].any():
plot.xlabel("Time (s)")
plot.plot(time_axis, results["twist_amount"])
plot.title(molecule_name+" Twist Amount")
plot.ylabel("Twist amount (rads/m)")
plot.savefig(filename+"_twist_amount.pdf", format="pdf")
plot.cla()
elif results["twist_angles"].any():
plot.xlabel("Time (s)")
plot.plot(time_axis, results["twist_angles"])
plot.title(molecule_name+" Twist Angle")
plot.ylabel("Twist angle (rads)")
plot.savefig(filename+"_twist_angles.pdf", format="pdf")
plot.cla()
plot.close()
return
| 25,277
|
def get_logger(name: str,
format_str: str = aps_logger_format,
date_format: str = aps_time_format,
file: bool = False) -> logging.Logger:
"""
Get logger instance
Args:
name: logger name
format_str|date_format: to configure logging format
file: if true, treat name as the name of the logging file
"""
def get_handler(handler):
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=format_str, datefmt=date_format)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# both stdout & file
if file:
logger.addHandler(get_handler(logging.FileHandler(name)))
return logger
| 25,278
|
def to_tensor(args, device=None):
"""Convert an arg or sequence of args to torch Tensors
"""
singleton = not isinstance(args, (list, tuple))
if singleton:
args = [args]
tensor_args = []
for arg in args:
if isinstance(arg, torch.Tensor):
tensor_args.append(arg)
elif _is_numeric(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.float64:
tensor_args.append(
torch.tensor(arg, dtype=torch.float32, device=device)
)
else:
tensor_args.append(torch.tensor(arg, device=device))
else:
raise ValueError("Received non-numeric argument ", arg)
return tensor_args[0] if singleton else tensor_args
| 25,279
|
def main(self):
"""
to run:
kosmos 'j.data.bcdb.test(name="meta_test")'
"""
bcdb, _ = self._load_test_model()
assert len(bcdb.get_all()) == 0
assert len(bcdb.meta._data["url"]) == 7
s = list(j.data.schema._url_to_md5.keys())
assert "despiegk.test" in s
m = bcdb.model_get(url="despiegk.test")
schema_text = """
@url = jumpscale.schema.test.a
category**= ""
txt = ""
i = 0
"""
s = bcdb.schema_get(schema=schema_text)
assert s.properties_unique == []
bcdb.meta._schema_set(s)
assert len(bcdb.meta._data["url"]) == 8
assert "jumpscale.schema.test.a" in j.data.schema._url_to_md5
assert "jumpscale.bcdb.circle.2" in j.data.schema._url_to_md5
schema = bcdb.model_get(url="jumpscale.schema.test.a")
o = schema.new()
assert "jumpscale.schema.test.a" in j.data.schema._url_to_md5
assert "jumpscale.bcdb.circle.2" in j.data.schema._url_to_md5
s0 = bcdb.schema_get(url="jumpscale.schema.test.a")
s0md5 = s0._md5 + ""
model = bcdb.model_get(schema=s0)
assert bcdb.get_all() == [] # just to make sure its empty
assert len(bcdb.meta._data["url"]) == 8
a = model.new()
a.category = "acat"
a.txt = "data1"
a.i = 1
a.save()
a2 = model.new()
a2.category = "acat2"
a2.txt = "data2"
a2.i = 2
a2.save()
assert len([i for i in model.index.model.find()]) == 2
myid = a.id + 0
assert a._model.schema._md5 == s0md5
# lets upgrade schema to float
s_temp = bcdb.schema_get(schema=schema_text)
assert len(bcdb.meta._data["url"]) == 8 # should be same because is same schema, should be same md5
assert s_temp._md5 == s0._md5
# lets upgrade schema to float
s2 = bcdb.schema_get(schema=schema_text)
model2 = bcdb.model_get(schema=s2)
assert len(bcdb.meta._data["url"]) == 8 # acl, user, circle, despiegktest and the 1 new one
a3 = model2.new()
a3.category = "acat3"
a3.txt = "data3"
a3.i = 3
a3.save()
assert a3.i == 3.0
assert a2.i == 2 # int
assert len(model2.find()) == 3 # needs to be 3 because model is for all of them
assert len(model.find()) == 3 # needs to be 3 because model is for all of them
all = model2.find()
print(all)
a4 = model2.get(all[0].id)
a4_ = model.get(all[0].id)
assert a4_ == a4
a5 = model2.get(all[1].id)
a6 = model.get(all[2].id)
a6_ = model.get(all[2].id)
assert a6_ == a6
assert a6.id == a3.id
assert a6.i == a3.i
self._log_info("TEST META DONE")
return "OK"
| 25,280
|
def test_parse_fails(py_c_token) -> None:
"""Test various forms of invalid syntax to ensure they indeed fail."""
def t(text):
"""Test a string to ensure it fails parsing."""
try:
result = Property.parse(text)
except KeyValError:
pass
else:
pytest.fail("Successfully parsed bad text ({!r}) to {!r}".format(
text,
result,
))
# Bare text at end of file
t('''\
regular text. with sentences.
''')
# Bare text in the middle
t('''\
regular text. with sentences.
"blah" "value"
''')
t('''\
"Ok block"
{
"missing" //value
}
''')
# Test block without a block
t('''\
"block1"
"no_block"
''')
# Test block expecting a {
t('''\
"block"
{
"blsh" "Val"
}
"block1"
''')
# Test characters before a keyvalue
t('''\
bbhf "text before"
"key" "value
''')
t('''
"text" bl "between"
"key" "value
''')
# Test text after the keyvalue
t('''\
"text" "value" blah
"key" "value
''')
# Test quotes after the keyvalue
t('''
"text" "with extra" "
''')
t('''
"multi" "line
text with
multiple
quotes" "
''')
# Test a flag without ] at end
t('''
"Name" "value" [flag
''')
# Test a flag with values after the bracket.
t('''
"Name" "value" [flag ] hi
''')
# Test too many closing brackets
t('''
"Block"
{
"Opened"
{
"Closed" "value"
}
}
}
"More text" "value"
''')
# Test property with a value and block
t('''
"Block" "value"
{
"Name" "value"
}
''')
# Test '/' in text by itself (not a comment!)
t('''\
"Block"
{
"Name" / "Value"
{
}
}
''')
# Test unterminated strings
t('''\
"Block"
{
"blah
}
''')
# Test unterminated string with '\' at the end
t('''"Blah \\''')
# Test too many open brackets
t('''\
"Block"
{
"Key" "Value"
"Block"
{
{
"Key" "value"
}
}
''')
# Too many open blocks.
t('''\
"Block"
{
"Key" "Value"
"Block2"
{
"Key" "Value"
}
''')
t('''\
"Key" "value
which is multi-line
and no ending.
''')
# Test a key and value split over a line.
t('''\
"block"
{
"key" "value"
"key"
"value"
}
''')
| 25,281
|
def draw_grid(pygame_window: pygame.Surface, grid: List[List[Vertex]],
num_rows: int, grid_width: int) -> None:
"""Draw a complete grid on pygame_window corresponding to the input grid,
num_rows and grid_width
"""
pygame_window.fill(THECOLORS['white']) # fills screen with one color
for row in grid:
for node in row:
node.draw_node(pygame_window)
draw_gridlines(pygame_window, num_rows, grid_width)
pygame.display.update()
| 25,282
|
def test_multiple_inputs():
"""
Create a VectorSpacesDataset with two inputs (features0 and features1)
and train an MLP which takes both inputs for 1 epoch.
"""
mlp = MLP(
layers=[
FlattenerLayer(
CompositeLayer(
'composite',
[Linear(10, 'h0', 0.1),
Linear(10, 'h1', 0.1)],
{
0: [1],
1: [0]
}
)
),
Softmax(5, 'softmax', 0.1)
],
input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
input_source=('features0', 'features1')
)
dataset = VectorSpacesDataset(
(np.random.rand(20, 20).astype(theano.config.floatX),
np.random.rand(20, 15).astype(theano.config.floatX),
np.random.rand(20, 5).astype(theano.config.floatX)),
(CompositeSpace([
VectorSpace(20),
VectorSpace(15),
VectorSpace(5)]),
('features1', 'features0', 'targets')))
train = Train(dataset, mlp, SGD(0.1, batch_size=5))
train.algorithm.termination_criterion = EpochCounter(1)
train.main_loop()
| 25,283
|
def snake_head_only():
"""
|===========|
|···········|
|···········|
|···········|
|···········|
|···········|
|···········|
|·······o···|
|···········|
|···········|
|···········|
|···········|
|===========|
"""
return Snake.from_dict(
**{
"body": [
{"x": 7, "y": 4},
],
}
)
| 25,284
|
def test_post_new_org_empty_params():
""" empty new org name and short name parameters is a bad request """
res = requests.post(
f'{env.AWG_BASE_URL}{ORG_URL}',
headers=utils.BASE_HEADERS,
json={
'name': '',
'short_name': ''
}
)
assert res.status_code == 400
assert 'name' in res.content.decode()
assert 'short_name' in res.content.decode()
assert len(json.loads(res.content.decode())['details']) == 2
response_contains_json(res, 'message', 'Parameters were invalid')
| 25,285
|
def str_to_bool(s):
"""Convert a string value to its corresponding boolean value."""
if isinstance(s, bool):
return s
elif not isinstance(s, six.string_types):
raise TypeError('argument must be a string')
true_values = ('true', 'on', '1')
false_values = ('false', 'off', '0')
if s.lower() in true_values:
return True
elif s.lower() in false_values:
return False
else:
raise ValueError('not a recognized boolean value: %s'.format(s))
| 25,286
|
def id_feat_pred_mz_rt(cursor, mz, rt, ccs, tol_mz, tol_rt, tol_ccs, esi_mode, norm='l2'):
"""
id_feat_pred_mz_rt
description:
identifies a feature on the basis of predicted m/z and retention time
parameters:
cursor (sqlite3.Cursor) -- cursor for querying lipids.db
mz (float) -- m/z to match
rt (float) -- retention time to match
ccs (float) -- CCS to match
tol_mz (float) -- tolerance for m/z
tol_rt (float) -- tolerance for retention time
tol_ccs (float) -- tolerance for CCS
esi_mode (str) -- filter results by ionization mode: 'neg', 'pos', or None for unspecified
[norm (str)] -- specify l1 or l2 norm for computing scores [optional, default='l2']
returns:
(str or list(str)), (str) -- putative identification(s) (or '' for no matches), identification level
"""
qry = 'SELECT name, adduct, mz, rt FROM predicted_mz JOIN predicted_rt ON ' \
+ 'predicted_mz.t_id=predicted_rt.t_id WHERE mz BETWEEN ? AND ? AND rt BETWEEN ? and ?'
if esi_mode == 'pos':
qry += ' AND adduct LIKE "%+"'
elif esi_mode == 'neg':
qry += ' AND adduct LIKE "%-"'
mz_min = mz - tol_mz
mz_max = mz + tol_mz
rt_min = rt - tol_rt
rt_max = rt + tol_rt
putative_ids, putative_scores = [], []
for name, adduct, mz_x, rt_x in cursor.execute(qry, (mz_min, mz_max, rt_min, rt_max)).fetchall():
putative_ids.append('{}_{}'.format(name, adduct))
putative_scores.append(get_score(tol_mz, tol_rt, tol_ccs, mz_q=mz, rt_q=rt, mz_x=mz_x, rt_x=rt_x))
if putative_ids:
return putative_ids, 'pred_mz_rt', putative_scores
else:
return '', '', []
| 25,287
|
def generate(env):
"""Add Builders and construction variables to the Environment."""
env["JAL"] = _detect(env)
env.SetDefault(
# Additional command-line flags
JAL_FLAGS=SCons.Util.CLVar("-quiet"),
# Suffixes/prefixes
JAL_SUFFIX=".jal",
JAL_ASMSUFFIX=".asm",
JAL_CODSUFFIX=".cod",
JAL_HEXSUFFIX=".hex",
# JAL commands
JAL_COM="$JAL $JAL_FLAGS $SOURCES",
JAL_COMSTR="",
JAL_ASMCOM="$JAL $JAL_FLAGS -no-codfile -no-hex -asm $TARGET $SOURCE",
JAL_ASMCOMSTR="",
JAL_CODCOM="$JAL $JAL_FLAGS -no-asm -no-hex -codfile $TARGET $SOURCE",
JAL_CODCOMSTR="",
JAL_HEXCOM="$JAL $JAL_FLAGS -no-asm -no-codfile -hex $TARGET $SOURCE",
JAL_HEXCOMSTR="",
)
try:
env.AddMethod(Jal, "Jal")
except AttributeError:
# Looks like we use a pre-0.98 version of SCons...
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.Jal = Jal
env["BUILDERS"]["JalAsm"] = __jal_asm_builder
env["BUILDERS"]["JalCod"] = __jal_cod_builder
env["BUILDERS"]["JalHex"] = __jal_hex_builder
| 25,288
|
def help(user_display_name, module_file_fullpath, module_name):
"""Generate help message for all actions can be used in the job"""
my_path = os.path.dirname(module_file_fullpath)
my_fname = os.path.basename(module_file_fullpath)
my_package = module_name.rsplit(u'.')[-2] # ex: sayhello
my_package_path = module_name.rsplit(u'.', 1)[-2] # ex: wechatbot.sayhello
help_msg = u'Actions in "%s":\n========\n' % (my_package)
for action_py in os.listdir(my_path):
action_name = u''
action_desc = u''
# Skip non-file
if not os.path.isfile(os.path.join(my_path, action_py)):
continue
# Skip self
if action_py == my_fname:
continue
# Folders start with "__"
if re.findall(u'^__.+', action_py):
continue
# Folders start with "."
if re.findall(u'^\..*', action_py):
continue
action_name = re.sub(u'\.py$', u'', action_py)
# Load action module
action_module_path = u'%s.%s' % (my_package_path, action_name)
action_from_path = my_package_path
# Import the "help" module
try:
action_module = __import__(
action_module_path, fromlist = [action_from_path])
except:
print(u"Cannot import %s." % (action_module_path), file = sys.stderr)
continue
# Get Job description
try:
action_desc = action_module._help_desc
except:
action_desc = u'[no description]'
print(u"No _help_desc for %s." % (action_module_path), file = sys.stderr)
# Arrange action_name and action_desc in help_msg
help_msg += u'> %s\n\t%s\n' % (action_name, action_desc)
# Tail messages
help_msg += u'========\nTo get detailed usage for\neach action, try:\n'
if user_display_name:
help_msg += u'@%s\u2005%s <action> -h' % (user_display_name, my_package)
else:
help_msg += u'%s <action> -h' % (my_package)
return help_msg
| 25,289
|
def Arrow_bg(self):
"""
The function that will create the background for the dropdown arrow button.
For internal use only. This function is therefore also not imported by __init__.py
"""
#Just leave the making of the buttons background to the default function. Not gonna bother re-doing that here (because why would I?)
if not self.func_data:
surface = self.Make_background_surface(None)
elif self.value:
surface = self.Make_background_surface(self.func_data["__accent_bg"])
else:
surface = self.Make_background_surface(self.func_data["__bg"])
#Draw the arrow so characteristic to dropdown boxes
if not self.value:
arrow_coords = (
(self.scaled(self.width * 1/6), self.scaled(self.height * 1/3)), #Top left
(self.scaled(self.width * 1/2), self.scaled(self.height * 2/3)), #Bottom
(self.scaled(self.width * 5/6), self.scaled(self.height * 1/3)), #Top right
)
else:
arrow_coords = (
(self.scaled(self.width * 1/6), self.scaled(self.height * 2/3)), #Bottom left
(self.scaled(self.width * 1/2), self.scaled(self.height * 1/3)), #Top
(self.scaled(self.width * 5/6), self.scaled(self.height * 2/3)), #Bottom right
)
pygame.draw.polygon(surface, self.border[0] if self.border else (63, 63, 63), arrow_coords)
return surface
| 25,290
|
def _is_debugging(ctx):
"""Returns `True` if the current compilation mode produces debug info.
rules_apple specific implementation of rules_swift's `is_debugging`, which
is not currently exported.
See: https://github.com/bazelbuild/rules_swift/blob/44146fccd9e56fe1dc650a4e0f21420a503d301c/swift/internal/api.bzl#L315-L326
"""
return ctx.var["COMPILATION_MODE"] in ("dbg", "fastbuild")
| 25,291
|
def plot_bars(dgm, order='birth', ax=None, bar_style=None):
"""
Plot the barcode.
adapted from "https://github.com/mrzv/dionysus"
Parameters:
----------
dgm: ndarray
persistence barcode diagram
order (str): How to sort the bars, either 'death' or 'birth'
(Default: 'birth')
ax (AxesSubplot): Axes that should be used for plotting (Default: None)
**bar_style: Arguments passed to `ax.plot` for style of the bars.
(Defaults: color='b')
"""
bar_kwargs = {'color': 'b'}
if bar_style is not None:
bar_kwargs.update(bar_style)
if order == 'death':
generator = enumerate(sorted(dgm, key = lambda p: p[1]))
else:
generator = enumerate(sorted(dgm, key = lambda p: p[0]))
if ax is None:
ax = plt.axes()
for i,p in generator:
ax.plot([p[0], p[1]], [i,i], **bar_kwargs)
plt.show()
| 25,292
|
def trick_them(gm, vm):
"""
Fourberie de Scapy n°1.
"""
for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)):
send(
ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc est à l'adresse MAC hwsrc (qui est la nôtre en fait) en l'envoyeant bien à l'adresse MAC hwdst en réalité.
)
| 25,293
|
def get_bounds_5km_to_1km( itk_5km, isc_5km ) :
"""
return the 1km pixel indexes limits in the 5km pixel [ itk_5km, isc_5km ] footprint
"""
# set the (track,scan) indexes of the 5km pixel in the 5km grid
itk_1km = itk_5km_to_1km ( itk_5km )
isc_1km = isc_5km_to_1km ( isc_5km )
# set the 1km indexes of pixels to interpolate along track
itk_1km_min = itk_1km - 2
itk_1km_max = itk_1km + 2
# general case : 2 interpolations done along scan : [isc-1, isc] then [isc, isc+1]
isc_1km_min = isc_1km - 2
isc_1km_max = isc_1km + 2
# if last 5km pixel along scan, only 4 1km pixels in the 5km footprint in this direction
if ( isc_5km == sz_sc_5km - 1 ) :
isc_1km_max = isc_1km + 6
return itk_1km_min, itk_1km_max, isc_1km_min, isc_1km_max
| 25,294
|
def batch_hard_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = torch.multiply(mask_anchor_positive, pairwise_dist)
# shape (batch_size, 1)
hardest_positive_dist = torch.max(anchor_positive_dist, dim=1, keepdim=True).values
# print("hardest_positive_dist", hardest_positive_dist.mean())
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist = torch.max(pairwise_dist, dim=1, keepdim=True).values
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist = torch.min(anchor_negative_dist, dim=1, keepdim=True).values
# print("hardest_negative_dist", hardest_negative_dist.mean())
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_loss = torch.relu(hardest_positive_dist - hardest_negative_dist + margin)
# Get final mean triplet loss
triplet_loss = torch.mean(triplet_loss)
return triplet_loss
| 25,295
|
def vrt_scrambled(doc: Document = Document(),
out: Export = Export("vrt_scrambled/{doc}.vrt"),
chunk: Annotation = Annotation("[cwb.scramble_on]"),
chunk_order: Annotation = Annotation("[cwb.scramble_on]:misc.number_random"),
token: Annotation = Annotation("<token>"),
word: Annotation = Annotation("[export.word]"),
annotations: ExportAnnotations = ExportAnnotations("cwb.annotations"),
source_annotations: SourceAnnotations = SourceAnnotations("cwb.source_annotations"),
remove_namespaces: bool = Config("export.remove_module_namespaces", False),
sparv_namespace: str = Config("export.sparv_namespace"),
source_namespace: str = Config("export.source_namespace")):
"""Export annotations to vrt in scrambled order."""
# Get annotation spans, annotations list etc.
annotation_list, token_attributes, export_names = util.get_annotation_names(annotations, source_annotations,
doc=doc, token_name=token.name,
remove_namespaces=remove_namespaces,
sparv_namespace=sparv_namespace,
source_namespace=source_namespace)
if chunk not in annotation_list:
raise util.SparvErrorMessage(
"The annotation used for scrambling ({}) needs to be included in the output.".format(chunk))
span_positions, annotation_dict = util.gather_annotations(annotation_list, export_names, doc=doc,
split_overlaps=True)
# Read words and document ID
word_annotation = list(word.read())
chunk_order_data = list(chunk_order.read())
# Reorder chunks and open/close tags in correct order
new_span_positions = util.scramble_spans(span_positions, chunk.name, chunk_order_data)
# Make vrt format
vrt_data = create_vrt(new_span_positions, token.name, word_annotation, token_attributes, annotation_dict,
export_names)
# Create export dir
os.makedirs(os.path.dirname(out), exist_ok=True)
# Write result to file
with open(out, "w") as f:
f.write(vrt_data)
log.info("Exported: %s", out)
| 25,296
|
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1, name='sample_once')
| 25,297
|
def test_encode(args):
"""
Test CLI for encoding using subprocesses.
"""
tree_path = Path("kissim.tree")
annotation_path = Path("kinase_annotation.csv")
args = args.split()
with enter_temp_directory():
subprocess.run(args, check=True)
# Tree file there?
assert tree_path.exists()
print(args)
print("-a" in args)
if "-a" in args:
# Annotation file there?
assert annotation_path.exists()
| 25,298
|
def init_chm_test_session():
"""Creates and removes the main directory for the test session."""
tmp_dir = tempfile.gettempdir()
# Create the main directory
parent_dir_path = create_dir(
full_path=os.path.join(tmp_dir, PARENT_DIR), on_conflict="replace"
)
yield parent_dir_path
# Delete the entire main directory
delete_dir(parent_dir_path)
| 25,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.