content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def string_dumper(dumper, value, _tag=u'tag:yaml.org,2002:str'):
"""
Ensure that all scalars are dumped as UTF-8 unicode, folded and quoted in
the sanest and most readable way.
"""
if not isinstance(value, basestring):
value = repr(value)
if isinstance(value, str):
value = value.decode('utf-8')
style = None
multilines = '\n' in value
if multilines:
literal_style = '|'
style = literal_style
return dumper.represent_scalar(_tag, value, style=style)
| 23,400
|
def to_hours_from_seconds(value):
"""From seconds to rounded hours"""
return Decimal(math.ceil((value / Decimal(60)) / Decimal(60)))
| 23,401
|
def extract_named_geoms(sde_floodplains = None, where_clause = None,
clipping_geom_obj = None):
"""
Clips SDE flood delineations to the boundary of FEMA floodplain changes, and
then saves the geometry and DRAINAGE name to a list of dictionaries.
:param sde_floodplains: {str} The file path to the UTIL.Floodplains layer
:param where_clause: {str} The where clause used to isolate polygons of interest
:param clipping_geom_obj: {arc geom obj} The geometry object representing
the boundaries of the LOMR/FIRM update
:return: {list} [{"SHAPE@": <Poly obj>, "DRAINAGE": "drain name"},...]
"""
sde_fields = ['SHAPE@', 'DRAINAGE']
with arcpy.da.SearchCursor(sde_floodplains, sde_fields, where_clause) as sCurs:
named_geoms = []
geom = None
for row in sCurs:
# if clipper.contains(row[0].centroid) or row[0].overlaps(clipper):
geom = row[0].clip(clipping_geom_obj.extent)
named_geoms.append({'SHAPE@': geom, 'DRAINAGE': str(row[1])})
return named_geoms
| 23,402
|
def parser_train():
"""
Parse input arguments (train.py).
"""
parser = argparse.ArgumentParser(description='Standard + Adversarial Training.')
parser.add_argument('--augment', type=str2bool, default=True, help='Augment training set.')
parser.add_argument('--batch-size', type=int, default=128, help='Batch size for training.')
parser.add_argument('--batch-size-validation', type=int, default=256, help='Batch size for testing.')
parser.add_argument('--num-samples-eval', type=int, default=512, help='Number of samples to use for margin calculations.')
parser.add_argument('--data-dir', type=str, default='/cluster/scratch/rarade/data/')
parser.add_argument('--log-dir', type=str, default='/cluster/home/rarade/adversarial-hat/logs/')
parser.add_argument('--tmp-dir', type=str, default='/cluster/scratch/rarade/')
parser.add_argument('-d', '--data', type=str, default='cifar10', choices=DATASETS, help='Data to use.')
parser.add_argument('--desc', type=str, required=True,
help='Description of experiment. It will be used to name directories.')
parser.add_argument('-m', '--model', choices=MODELS, default='resnet18', help='Model architecture to be used.')
parser.add_argument('--normalize', type=str2bool, default=False, help='Normalize input.')
parser.add_argument('--pretrained-file', type=str, default=None, help='Pretrained weights file name.')
parser.add_argument('-ns', '--num-std-epochs', type=int, default=0, help='Number of standard training epochs.')
parser.add_argument('-na', '--num-adv-epochs', type=int, default=0, help='Number of adversarial training epochs.')
parser.add_argument('--adv-eval-freq', type=int, default=30, help='Adversarial evaluation frequency (in epochs).')
parser.add_argument('--h', default=2.0, type=float, help='Parameter h to compute helper examples (x + h*r) for HAT.')
parser.add_argument('--helper-model', type=str, default=None, help='Helper model weights file name for HAT.')
parser.add_argument('--beta', default=None, type=float, help='Stability regularization, i.e., 1/lambda in TRADES \
or weight of robust loss in HAT.')
parser.add_argument('--gamma', default=1.0, type=float, help='Weight of helper loss in HAT.')
parser.add_argument('--robust-loss', default='kl', choices=['ce', 'kl'], type=str, help='Type of robust loss in HAT.')
parser.add_argument('--lr', type=float, default=0.21, help='Learning rate for optimizer (SGD).')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='Optimizer (SGD) weight decay.')
parser.add_argument('--scheduler', choices=SCHEDULERS, default='cyclic', help='Type of scheduler.')
parser.add_argument('--nesterov', type=str2bool, default=True, help='Use Nesterov momentum.')
parser.add_argument('--clip-grad', type=float, default=None, help='Gradient norm clipping.')
parser.add_argument('-a', '--attack', type=str, choices=ATTACKS, default='linf-pgd', help='Type of attack.')
parser.add_argument('--attack-eps', type=str2float, default=8/255, help='Epsilon for the attack.')
parser.add_argument('--attack-step', type=str2float, default=2/255, help='Step size for PGD attack.')
parser.add_argument('--attack-iter', type=int, default=10, help='Max. number of iterations (if any) for the attack.')
parser.add_argument('--keep-clean', type=str2bool, default=False, help='Use clean samples during adversarial training.')
parser.add_argument('--debug', action='store_true', default=False,
help='Debug code. Run 1 epoch of training and evaluation.')
parser.add_argument('--exp', action='store_true', default=False,
help='Store results for performing margin and curvature experiments later.')
parser.add_argument('--mart', action='store_true', default=False, help='MART training.')
parser.add_argument('--unsup-fraction', type=float, default=0.5, help='Ratio of unlabelled data to labelled data.')
parser.add_argument('--aux-data-filename', type=str, help='Path to additional Tiny Images data.',
default='/cluster/scratch/rarade/cifar10s/ti_500K_pseudo_labeled.pickle')
parser.add_argument('--seed', type=int, default=1, help='Random seed.')
return parser
| 23,403
|
def convert_examples_to_features(examples: Sequence[InputExampleTC],
labels: List[str],
tokenizer: Any,
max_length: int = 512,
ignore_lbl_id: int = -100
) -> List[InputFeaturesTC]:
"""Converts sequence of ``InputExampleTC to list of ``InputFeaturesTC``.
Args:
examples (:obj:`list` of :obj:`InputExampleTC`): Sequence of
``InputExampleTC`` containing the examples to be converted to
features.
tokenizer (:obj): Instance of a transformer tokenizer that will
tokenize the example tokens and convert them to model specific ids.
max_length (int): the maximum length of the post-tokenized tokens and
the respective associated fields in an InputFeaturesTC. Sequences
longer will be truncated, sequences shorter will be padded.
This length includes any special tokens that must be added such
as [CLS] and [SEP] in BERT.
ignore_lbl_id (int, optional): a value of a label id to be ignored,
used for subword tokens. This is typically negative.
Usually, -1 or `torch.nn.CrossEntropy().ignore_index`.
Returns:
If the input is a list of ``InputExamplesTC``, will return
a list of task-specific ``InputFeaturesTC`` which can be fed to the
model.
"""
logger.info(f'Using label list {labels}')
label2id = {label: i for i, label in enumerate(labels)}
all_features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Converting example %d" % (ex_index))
feats, tks = convert_example_to_features(example=example,
label2id=label2id,
tokenizer=tokenizer,
max_length=max_length,
ignore_lbl_id=ignore_lbl_id)
if ex_index < 5:
log_example_features(example, feats, tks)
all_features.append(feats)
return all_features
| 23,404
|
def build_image(local_conda_channel, conda_env_file, container_tool, container_build_args=""):
"""
Build a container image from the Dockerfile in RUNTIME_IMAGE_PATH.
Returns a result code and the name of the new image.
"""
variant = os.path.splitext(conda_env_file)[0].replace(utils.CONDA_ENV_FILENAME_PREFIX, "", 1)
variant = variant.replace("-runtime", "")
image_name = REPO_NAME + ":" + IMAGE_NAME + "-" + variant
build_cmd = container_tool + " build "
build_cmd += "-f " + os.path.join(RUNTIME_IMAGE_PATH, "Dockerfile") + " "
build_cmd += "-t " + image_name + " "
build_cmd += "--build-arg OPENCE_USER=" + OPENCE_USER + " "
build_cmd += "--build-arg LOCAL_CONDA_CHANNEL=" + local_conda_channel + " "
build_cmd += "--build-arg CONDA_ENV_FILE=" + conda_env_file + " "
build_cmd += "--build-arg TARGET_DIR=" + TARGET_DIR + " "
build_cmd += container_build_args + " "
build_cmd += BUILD_CONTEXT
print("Container build command: ", build_cmd)
if os.system(build_cmd):
raise OpenCEError(Error.BUILD_IMAGE, image_name)
return image_name
| 23,405
|
def update_setup_cfg(setupcfg: ConfigUpdater, opts: ScaffoldOpts):
"""Update `pyscaffold` in setupcfg and ensure some values are there as expected"""
if "options" not in setupcfg:
template = templates.setup_cfg(opts)
new_section = ConfigUpdater().read_string(template)["options"]
setupcfg["metadata"].add_after.section(new_section.detach())
# Add "PyScaffold" section if missing and update saved extensions
setupcfg = templates.add_pyscaffold(setupcfg, opts)
return setupcfg, opts
| 23,406
|
def testable_renderable() -> CXRenderable:
"""
Provides a generic CXRenderable useful for testin the base class.
"""
chart: CanvasXpress = CanvasXpress(
render_to="canvasId",
data=CXDictData(
{
"y": {
"vars": ["Gene1"],
"smps": ["Smp1", "Smp2", "Smp3"],
"data": [[10, 35, 88]]
}
}
),
config=CXConfigs(
CXGraphType(CXGraphTypeOptions.Bar)
)
)
return SampleRenderable(chart)
| 23,407
|
def secondSolution( fixed, c1, c2, c3 ):
"""
If given four tangent circles, calculate the other one that is tangent
to the last three.
@param fixed: The fixed circle touches the other three, but not
the one to be calculated.
@param c1, c2, c3: Three circles to which the other tangent circle
is to be calculated.
"""
curf = fixed.curvature()
cur1 = c1.curvature()
cur2 = c2.curvature()
cur3 = c3.curvature()
curn = 2 * (cur1 + cur2 + cur3) - curf
mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn
return Circle( mn.real, mn.imag, 1/curn )
| 23,408
|
def deploy_control_plane(operator_name, namespaces):
"""
Create OpenShift Service Mesh Control Plane using API POST calls:
"""
false = False
true = True
global REQ_HEADER
response = 0
count = 0
try:
#get_csv_name is used to update the OPERATOR_CSV_NAME global variable
get_csv_name(operator_name)
while isinstance(response, int):
response = check_service_creation(namespaces)
count += 1
uri = "https://" + operator_config.oc_host + "/apis/maistra.io/v1/namespaces/" + namespaces + \
"/servicemeshcontrolplanes"
payload = {
"apiVersion": "maistra.io/v1",
"kind": "ServiceMeshControlPlane",
"metadata": {
"name": "basic-install",
"namespace": namespaces
},
"spec": {
"istio": {
"gateways": {
"istio-egressgateway": {
"autoscaleEnabled": false
},
"istio-ingressgateway": {
"autoscaleEnabled": false
}
},
"mixer": {
"policy": {
"autoscaleEnabled": false
},
"telemetry": {
"autoscaleEnabled": false
}
},
"pilot": {
"autoscaleEnabled": false,
"traceSampling": 100
},
"kiali": {
"enabled": true
},
"grafana": {
"enabled": true
},
"tracing": {
"enabled": true,
"jaeger": {
"template": "all-in-one"
}
}
}
}
}
response_check = 0
while response_check not in (201, 409):
response = requests.post(uri, json=payload, verify=False, headers=REQ_HEADER)
response_check = response.status_code
if response.status_code == 201:
print("Created Service Mesh Control Plane")
elif response.status_code == 409:
print("Service Mesh exists: Conflict!")
except Exception as run_err:
print("deploy_control_plane: The exception '{}' has occured while deploying the control plane".format(run_err))
| 23,409
|
def collect3d(v1a,ga,v2a,use_nonan=True):
"""
set desired line properties
"""
v1a = np.real(v1a)
ga = np.real(ga)
v2a = np.real(v2a)
# remove nans for linewidth stuff later.
ga_nonan = ga[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
v1a_nonan = v1a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
v2a_nonan = v2a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
if use_nonan:
sol = np.zeros((len(ga_nonan),3))
sol[:,0] = v1a_nonan
sol[:,1] = ga_nonan
sol[:,2] = v2a_nonan
else:
sol = np.zeros((len(ga),3))
sol[:,0] = v1a
sol[:,1] = ga
sol[:,2] = v2a
sol = np.transpose(sol)
points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3)
segs = np.concatenate([points[:-1],points[1:]],axis = 1)
line3d = Line3DCollection(segs,linewidths=(1.+(v1a_nonan)/(.001+np.amax(v1a_nonan))*6.),colors='k')
return line3d
| 23,410
|
def _DevNull():
"""On Windows, sometimes the inherited stdin handle from the parent process
fails. Workaround this by passing null to stdin to the subprocesses commands.
This function can be used to create the null file handler.
"""
return open(os.devnull, 'r')
| 23,411
|
def get_job_priorities(rest_url):
"""This retrieves priorities of all active jobs"""
url = urllib.parse.urljoin(rest_url, "/jobs/priorities")
resp = requests.get(url)
return resp.json()
| 23,412
|
def create_vertices_intrinsics(disparity, intrinsics):
"""3D mesh vertices from a given disparity and intrinsics.
Args:
disparity: [B, H, W] inverse depth
intrinsics: [B, 4] reference intrinsics
Returns:
[B, L, H*W, 3] vertex coordinates.
"""
# Focal lengths
fx = intrinsics[:, 0]
fy = intrinsics[:, 1]
fx = fx[Ellipsis, tf.newaxis, tf.newaxis]
fy = fy[Ellipsis, tf.newaxis, tf.newaxis]
# Centers
cx = intrinsics[:, 2]
cy = intrinsics[:, 3]
cx = cx[Ellipsis, tf.newaxis]
cy = cy[Ellipsis, tf.newaxis]
batch_size, height, width = disparity.shape.as_list()
vertex_count = height * width
i, j = tf.meshgrid(tf.range(width), tf.range(height))
i = tf.cast(i, tf.float32)
j = tf.cast(j, tf.float32)
width = tf.cast(width, tf.float32)
height = tf.cast(height, tf.float32)
# 0.5 is added to get the position of the pixel centers.
i = (i + 0.5) / width
j = (j + 0.5) / height
i = i[tf.newaxis]
j = j[tf.newaxis]
depths = 1.0 / tf.clip_by_value(disparity, 0.01, 1.0)
mx = depths / fx
my = depths / fy
px = (i-cx) * mx
py = (j-cy) * my
vertices = tf.stack([px, py, depths], axis=-1)
vertices = tf.reshape(vertices, (batch_size, vertex_count, 3))
return vertices
| 23,413
|
def _find_smart_path(challbs, preferences, combinations):
"""Find challenge path with server hints.
Can be called if combinations is included. Function uses a simple
ranking system to choose the combo with the lowest cost.
"""
chall_cost = {}
max_cost = 1
for i, chall_cls in enumerate(preferences):
chall_cost[chall_cls] = i
max_cost += i
# max_cost is now equal to sum(indices) + 1
best_combo = []
# Set above completing all of the available challenges
best_combo_cost = max_cost
combo_total = 0
for combo in combinations:
for challenge_index in combo:
combo_total += chall_cost.get(challbs[
challenge_index].chall.__class__, max_cost)
if combo_total < best_combo_cost:
best_combo = combo
best_combo_cost = combo_total
combo_total = 0
if not best_combo:
_report_no_chall_path()
return best_combo
| 23,414
|
def run_generator_and_test(test_case,
mlmd_connection,
generator_class,
pipeline,
task_queue,
use_task_queue,
service_job_manager,
num_initial_executions,
num_tasks_generated,
num_new_executions,
num_active_executions,
expected_exec_nodes=None,
ignore_node_ids=None):
"""Runs generator.generate() and tests the effects."""
if service_job_manager is None:
service_job_manager = service_jobs.DummyServiceJobManager()
with mlmd_connection as m:
executions = m.store.get_executions()
test_case.assertLen(
executions, num_initial_executions,
f'Expected {num_initial_executions} execution(s) in MLMD.')
tasks = run_generator(
mlmd_connection,
generator_class,
pipeline,
task_queue,
use_task_queue,
service_job_manager,
ignore_node_ids=ignore_node_ids)
with mlmd_connection as m:
test_case.assertLen(
tasks, num_tasks_generated,
f'Expected {num_tasks_generated} task(s) to be generated.')
executions = m.store.get_executions()
num_total_executions = num_initial_executions + num_new_executions
test_case.assertLen(
executions, num_total_executions,
f'Expected {num_total_executions} execution(s) in MLMD.')
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
test_case.assertLen(
active_executions, num_active_executions,
f'Expected {num_active_executions} active execution(s) in MLMD.')
if expected_exec_nodes:
for i, task in enumerate(tasks):
_verify_exec_node_task(test_case, pipeline, expected_exec_nodes[i],
active_executions[i].id, task)
return tasks
| 23,415
|
def sort_func(kd1, kd2):
"""
Compares 2 key descriptions
:param kd1: First key description
:param kd2: Second key description
:return: -1,0,1 depending on whether kd1 le,eq or gt then kd2
"""
_c = type_order(kd1, kd2)
if _c is not None:
return _c
return kid_order(kd1, kd2)
| 23,416
|
def _update_mf2000_files(srcdir, fc, cc, arch, double):
"""Update MODFLOW-2000 source files
Parameters
----------
srcdir : str
path to directory with source files
fc : str
fortran compiler
cc : str
c/c++ compiler
arch : str
architecture
double : bool
boolean indicating if compiler switches are used to build a
double precision target
Returns
-------
"""
# Remove six src folders
dlist = ["beale2k", "hydprgm", "mf96to2k", "mfpto2k", "resan2k", "ycint2k"]
for d in dlist:
dname = os.path.join(srcdir, d)
if os.path.isdir(dname):
print('Removing..."{}"'.format(dname))
shutil.rmtree(os.path.join(srcdir, d))
# Move src files and serial src file to src directory
tpth = os.path.join(srcdir, "mf2k")
files = [
f for f in os.listdir(tpth) if os.path.isfile(os.path.join(tpth, f))
]
for f in files:
shutil.move(os.path.join(tpth, f), os.path.join(srcdir, f))
tpth = os.path.join(srcdir, "mf2k", "serial")
files = [
f for f in os.listdir(tpth) if os.path.isfile(os.path.join(tpth, f))
]
for f in files:
shutil.move(os.path.join(tpth, f), os.path.join(srcdir, f))
# Remove mf2k directory in source directory
tpth = os.path.join(srcdir, "mf2k")
shutil.rmtree(tpth)
# modify the openspec.inc file to use binary instead of unformatted
fname = os.path.join(srcdir, "openspec.inc")
with open(fname) as f:
lines = f.readlines()
with open(fname, "w") as f:
for line in lines:
if " DATA FORM/'UNFORMATTED'/" in line:
line = "C DATA FORM/'UNFORMATTED'/\n"
if "C DATA FORM/'BINARY'/" in line:
line = " DATA FORM/'BINARY'/\n"
f.write(line)
return
| 23,417
|
def write_property(row_list, output_file, decl_id):
"""
@brief: Write property row to CSV file
@param row_list: List of rows which should be written to file
@param output_file: Output file handler
@param decl_id: Declaration id
"""
for item in row_list:
csv_writer(output_file).writerow([decl_id] + item)
| 23,418
|
def sumdigits(a: int):
"""Sum of the digits of an integer"""
return sum(map(int, str(a)))
| 23,419
|
def nth_weekday_of_month(y, m, n, w):
"""
y = 2020; m = 2
assert nth_weekday_of_month(y, m, -1, 'sat') == dt(2020, 2, 29)
assert nth_weekday_of_month(y, m, -2, 'sat') == dt(2020, 2, 22)
assert nth_weekday_of_month(y, m, 1, 'sat') == dt(2020, 2, 1)
assert nth_weekday_of_month(y, m, 1, 'sun') == dt(2020, 2, 2)
assert nth_weekday_of_month(y, m, 1, 'monday') == dt(2020, 2, 3)
assert nth_weekday_of_month(y, 'G', 3, 'sat') == dt(2020, 2, 15)
assert nth_weekday_of_month(y, 'G', 3, 'sun') == dt(2020, 2, 16)
assert nth_weekday_of_month(y, 'G', 3, 'monday') == dt(2020, 2, 17)
"""
if n < 0 :
return nth_weekday_of_month(y, m+1, 1, w) + datetime.timedelta(7 * n)
t = dt(y, m , 1)
bump = wkdays[w[:3].lower()] - t.weekday()
if bump < 0:
bump = bump + 7
bump = bump + (n-1) * 7
res = t + datetime.timedelta(bump)
return res
| 23,420
|
def validate_ogrn(ogrn: str, is_ip: Optional[bool] = None) -> None:
"""
Source:
https://ru.wikipedia.org/wiki/%D0%9E%D1%81%D0%BD%D0%BE%D0%B2%D0%BD%D0%BE%D0%B9_%D0%B3%D0%BE%D1%81%D1%83%D0%B4%D0%B0%D1%80%D1%81%D1%82%D0%B2%D0%B5%D0%BD%D0%BD%D1%8B%D0%B9_%D1%80%D0%B5%D0%B3%D0%B8%D1%81%D1%82%D1%80%D0%B0%D1%86%D0%B8%D0%BE%D0%BD%D0%BD%D1%8B%D0%B9_%D0%BD%D0%BE%D0%BC%D0%B5%D1%80
"""
if not isinstance(ogrn, str):
raise ValidationError('ogrn should be passed as string')
if len(ogrn) != 13 and len(ogrn) != 15:
raise ValidationError('wrong size of ogrn, it can be 13 chars only')
if not re.fullmatch(r'[1-9][0-9]+', ogrn):
raise ValidationError('wrong ogrn')
if len(ogrn) == 13 and is_ip is not True:
n13 = int(ogrn[:-1]) % 11 % 10
if n13 != int(ogrn[12]):
raise ValidationError(f'wrong checksum on pre-last digit: {ogrn[12]}; expected: {n13}')
return
if len(ogrn) == 15 and is_ip is not False:
n15 = int(ogrn[:-1]) % 13 % 10
if n15 != int(ogrn[14]):
raise ValidationError(f'wrong checksum on pre-last digit: {ogrn[14]}; expected: {n15}')
return
raise ValidationError('ogrn for ip can be 15 chars only')
| 23,421
|
def getBits(val, hiIdx: int, loIdx: int) -> int:
"""Returns a bit slice of a value.
Args:
val: Original value.
hiIdx: Upper (high) index of slice.
loIdx: Lower index of slice.
Returns:
The bit slice.
"""
return (~(MASK_32<<(hiIdx-loIdx+1)) & (val>>loIdx))
| 23,422
|
def test_fid(get_average_fidelitiy):
"""
Check that the average fideltiy of an identity is maximal.
"""
almost_equal(get_average_fidelitiy, 1)
| 23,423
|
def test_remove_word_removes_word(trie_3):
"""If word in trie remove should remove it."""
trie_3.remove('potato')
with pytest.raises(ValueError):
trie_3.remove('potato')
| 23,424
|
def check_validation_and_epoch_counts(lines: List[str], **kwargs: Dict) -> None:
"""Ensure that validation_period and epoch indices increment by 1 each time."""
counts = {"on_epoch": 0, "on_validation_period": 0}
for i, line in enumerate(lines):
cb = line.split(":")[0]
for prefix in counts:
if cb.startswith(prefix):
cb_idx = int(line.split(":")[1])
if cb.endswith("begin"):
"got on_validation_period_begin for index 0 but expected 0 on line {i}"
assert (
cb_idx == counts[prefix]
), f"got {cb} for index {cb_idx} but expected {counts[prefix]} on line {i}"
if cb.endswith("end"):
assert (
cb_idx == counts[prefix]
), f"got {cb} for index {cb_idx} but expected {counts[prefix]} on line {i}"
counts[prefix] += 1
| 23,425
|
def test_install_uninstall_local(get_integration, get_application):
"""
Check a local integration can only be installed once, fails otherwise, but
multiple calls on the same target simply overwrite the value and activate it
"""
local_application = get_application(integration=get_integration(is_local=True))
assert models.ApplicationInstallation.objects.count() == 0
local_application.install(target_id=1)
assert models.ApplicationInstallation.objects.count() == 1
assert models.ApplicationInstallation.objects.active().count() == 1
installation = local_application.install(target_id=1)
assert models.ApplicationInstallation.objects.count() == 1
installation.delete()
assert models.ApplicationInstallation.objects.count() == 1
assert models.ApplicationInstallation.objects.active().count() == 0
local_application.install(target_id=2)
assert models.ApplicationInstallation.objects.count() == 2
assert models.ApplicationInstallation.objects.active().count() == 1
with pytest.raises(ValidationError):
local_application.install(target_id=1)
| 23,426
|
def get(context, option):
""" Shows a default option."""
config = context.obj["config"]
update_config(config)
if option in config:
echov(f"The value of '{option}' is set to '{config[option]}'.")
else:
echow(f"The value of '{option}' is not set!")
| 23,427
|
def init(ctx):
"""
Interactively create a paradrop.yaml file.
This will ask the user some questions and then writes a paradrop.yaml file.
"""
chute = build_chute()
with open("paradrop.yaml", "w") as output:
yaml.safe_dump(chute, output, default_flow_style=False)
# If this is a node.js chute, generate a package.json file from the
# information that the user provided.
if chute.get('use', None) == 'node':
if not os.path.isfile('package.json'):
data = {
'name': chute['name'],
'version': '1.0.0',
'description': chute['description']
}
with open('package.json', 'w') as output:
json.dump(data, output, sort_keys=True, indent=2)
| 23,428
|
def get_hostname():
"""Returns the hostname, from /etc/hostname."""
hostname = ""
try:
with open('/etc/hostname') as f:
hostname = f.read().rstrip()
if len(hostname) == 0:
hostname = "Unknown"
except:
hostname = "Unknown"
return hostname
| 23,429
|
def is_valid_body(val):
"""Body must be a dictionary."""
return isinstance(val, dict)
| 23,430
|
def safe_hangup(channel):
"""Safely hang up the specified channel"""
try:
channel.hangup()
print "Hung up {}".format(channel.json.get('name'))
except requests.HTTPError as e:
if e.response.status_code != requests.codes.not_found:
raise e
| 23,431
|
def _vba_to_python_op(op, is_boolean):
"""
Convert a VBA boolean operator to a Python boolean operator.
"""
op_map = {
"Not" : "not",
"And" : "and",
"AndAlso" : "and",
"Or" : "or",
"OrElse" : "or",
"Eqv" : "|eq|",
"=" : "|eq|",
">" : ">",
"<" : "<",
">=" : ">=",
"=>" : ">=",
"<=" : "<=",
"=<" : "<=",
"<>" : "|neq|",
"is" : "|eq|"
}
if (not is_boolean):
op_map["Not"] = "~"
op_map["And"] = "&"
op_map["AndAlso"] = "&"
op_map["Or"] = "|"
op_map["OrElse"] = "|"
return op_map[op]
| 23,432
|
def file_diff_format(filename1, filename2):
"""
Inputs:
filename1 - name of first file
filename2 - name of second file
Output:
Returns a four line string showing the location of the first
difference between the two files named by the inputs.
If the files are identical, the function instead returns the
string "No differences\n".
If either file does not exist or is not readable, then the
behavior of this function is undefined.
"""
# read files
lst1 = get_file_lines(filename1)
lst2 = get_file_lines(filename2)
# get tuple indicating line and index of first difference between two files
my_tup = multiline_diff(lst1, lst2)
# handle identical case
if my_tup[0] == -1:
return "No differences\n"
else:
# get 3 line formatted output of first difference between two lines
sdf_output = singleline_diff_format(lst1[my_tup[0]], lst2[my_tup[0]], my_tup[1])
# all other cases
return "Line " + str(my_tup[0]) + ":\n" + sdf_output
| 23,433
|
def _check_assembly_string(base_asm, instr_type, target, operands):
"""
:param base_asm:
:type base_asm:
:param instr_type:
:type instr_type:
:param target:
:type target:
:param operands:
:type operands:
"""
LOG.debug("Start checking assembly string: %s", base_asm)
operands = list(operands)
relocation_mode = False
for idx, operand in enumerate(operands):
if isinstance(operand, six.string_types) and "@" not in operand:
operands[idx] = Address(base_address=operand)
if isinstance(operand, six.string_types) and "@" in operand:
relocation_mode = True
instruction = target.new_instruction(instr_type.name)
try:
if not relocation_mode:
instruction.set_operands(operands)
else:
# Go one by one, and make relocation safe
for operand, value in zip(instruction.operands(), operands):
if (isinstance(operand.type, OperandImmRange) and
"@" in value):
operand.set_value(value, check=False)
else:
operand.set_value(value)
except MicroprobeValueError:
LOG.debug("End checking assembly string: Operands not valid")
return False
except MicroprobeCodeGenerationError:
LOG.debug(
"End checking assembly string: Operands not valid for "
"callback"
)
return False
nasm = _normalize_asm(instruction.assembly())
base_asm = _normalize_asm(base_asm)
base_asm = base_asm.replace(instr_type.name, instr_type.mnemonic)
LOG.debug("'%s' == '%s' ?", nasm, base_asm)
if nasm == base_asm:
LOG.debug("End checking assembly string: Valid")
return True
LOG.debug("End checking assembly string: Not valid")
return False
| 23,434
|
def get_label_number(window):
"""This method assigns to each label of a window a number."""
mode_list = ["bike", "car", "walk", "bus", "train"]
current_label_number = 0
for mode in enumerate(mode_list):
if window[1] == mode[1]:
current_label_number = mode[0]
return current_label_number
| 23,435
|
def test_saw3():
"""SAW3"""
utcnow = utc(2014, 3, 10, 3, 29)
sts = utcnow.replace(hour=3, minute=35)
ets = utcnow.replace(hour=9, minute=0)
prod = sawparser(get_test_file("SAW/SAW3.txt"), utcnow=utcnow)
assert prod.saw == 3
assert abs(prod.geometry.area - 7.73) < 0.01
assert prod.ww_num == 503
assert prod.sts == sts
assert prod.ets == ets
assert prod.ww_type == prod.SEVERE_THUNDERSTORM
assert prod.action == prod.ISSUES
| 23,436
|
def midi_array_to_event(midi_as_array):
"""
Take converted MIDI array and convert to array of Event objects
"""
# Sort MIDI array
midi = sorted(midi_as_array, key=itemgetter(2))
# Init result
result = []
# Accumulators for computing start and end times
active_notes = []
curr_time = 0
# For comparing velocities
prev_vel_range = 0
# For all the entries in the midi array
for i in midi:
# Add the current note
active_notes.append(i)
# Get time shift values
shift_values, shift_sum = get_shift_value(i[2] - curr_time)
# Apply time shift to the next start note
if shift_values:
for s in shift_values:
if s > 0:
result.append(Event(EventType.TIME_SHIFT, s))
else:
result.append(Event(EventType.TIME_SHIFT, shift_sum))
# Update time
curr_time += shift_sum
# Check if there are notes that are playing that need to end
notes_to_end = [x for x in active_notes if curr_time >= x[3]]
active_notes[:] = (x for x in active_notes if curr_time < x[3])
# For the finished notes
for j in notes_to_end:
# End the note
result.append(Event(EventType.NOTE_OFF, j[1]))
# If the velocity has changed by a large enough amount, add a set velocity event
temp_velocity = i[0]
bin_size = (127/20)
for vel in range(20):
if temp_velocity < (vel + 1) * bin_size:
if prev_vel_range != vel:
result.append(Event(EventType.SET_VELOCITY, int((vel + 1) * bin_size)))
prev_vel_range = vel
break
# Start the note
result.append(Event(EventType.NOTE_ON, i[1]))
# If there are still notes in midi_acc
if active_notes:
for i in active_notes:
if i[3] > curr_time:
# Apply time shift
shift_values, shift_sum = get_shift_value(i[3] - curr_time)
if shift_values:
for s in shift_values:
if s > 0:
result.append(Event(EventType.TIME_SHIFT, s))
else:
result.append(Event(EventType.TIME_SHIFT, shift_sum))
# Update time
curr_time += shift_sum
# End note
result.append(Event(EventType.NOTE_OFF, i[1]))
# Return array
return result
| 23,437
|
def find_benchmarks(module) -> Dict[str, Type[Benchmark]]:
"""Enumerate benchmarks in `module`."""
found = {}
for name in module.__all__:
benchmark_type = getattr(module, name)
found[benchmark_type.name] = benchmark_type
return found
| 23,438
|
def out_folder_android_armv8_clang(ctx, section_name, option_name, value):
""" Configure output folder for Android ARMv8 Clang """
if not _is_user_input_allowed(ctx, option_name, value):
Logs.info('\nUser Input disabled.\nUsing default value "%s" for option: "%s"' % (value, option_name))
return value
# GUI
if not ctx.is_option_true('console_mode'):
return ctx.gui_get_attribute(section_name, option_name, value)
_output_folder_disclaimer(ctx)
return _get_string_value(ctx, 'Android ARMv8 Clang Output Folder', value)
| 23,439
|
def remap_classes(dataset, class_map):
""" Replaces classes of dataset based on a dictionary"""
class_new_names = list(set(class_map.values()))
class_new_names.sort() # NOTE sort() is a NoneType return method, it sorts the list without outputting new vars
class_originals = copy.deepcopy(dataset['categories'])
dataset['categories'] = [] # removing all dependencies
class_ids_map = {} # map from old id to new id
# Check whether the category has background or not, assign index 0. Useful for panoptic segmentation.
has_background = False
if 'Background' in class_new_names:
# Check whether the backgroun category has index zero.
if class_new_names.index('Background') != 0:
class_new_names.remove('Background')
class_new_names.insert(0, 'Background')
has_background = True
# Catching duplicates - TACO had duplicates for id 4040 and 309. Re-id'd
id_ann_all = []
id_ann_repeated = []
for index_old, ann_old in enumerate(dataset['annotations']):
if ann_old['id'] in id_ann_all:
# if found a duplicate, re-id at the end
id_ann_repeated.append(ann_old['id'])
ann_old['id'] = len(dataset['annotations'])+len(id_ann_repeated)-1
else:
id_ann_all.append(ann_old['id'])
print(f'Found {len(id_ann_repeated)} annotations repeated.'
f'\nPlease double check input file, annotation id(s) {id_ann_repeated} are duplicated!\n')
# Replace categories, iterating through every class name
for id_new, class_new_name in enumerate(class_new_names):
# Make sure id:0 is reserved for background
id_rectified = id_new
if not has_background:
id_rectified += 1
# Creating new category dictionary, using new category ID and the new class name
category = {
'supercategory': '',
'id': id_rectified, # Background has id=0
'name': class_new_name,
}
dataset['categories'].append(category) # assigning new categories
# Map class names
for class_original in class_originals:
# If the new class exists in the value of the class map dict, create new class id
if class_map[class_original['name']] == class_new_name:
class_ids_map[class_original['id']] = id_rectified
# Update annotations category id tag
for ann in dataset['annotations']:
ann['category_id'] = class_ids_map[ann['category_id']]
# Saving the newly created file as a JSON file
num_classes = str(len(class_new_names))
ann_out_path = './data' + '/' + 'ann_'+ 'map_to_' + num_classes +'.json'
with open(ann_out_path, 'w+') as f:
f.write(json.dumps(dataset))
# return path to new file, for loading somewhere else.
return str(os.path.abspath(ann_out_path))
| 23,440
|
def MCTS(root, verbose = False):
"""initialization of the chemical trees and grammar trees"""
run_time=time.time()+600*2
rootnode = Node(state = root)
state = root.Clone()
maxnum=0
iteration_num=0
start_time=time.time()
"""----------------------------------------------------------------------"""
"""global variables used for save valid compounds and simulated compounds"""
valid_compound=[]
all_simulated_compound=[]
desired_compound=[]
max_score=-100.0
desired_activity=[]
time_distribution=[]
num_searched=[]
current_score=[]
depth=[]
all_score=[]
"""----------------------------------------------------------------------"""
while maxnum<10100:
print maxnum
node = rootnode
state = root.Clone()
"""selection step"""
node_pool=[]
print "current found max_score:",max_score
while node.childNodes!=[]:
node = node.Selectnode()
state.SelectPosition(node.position)
print "state position:,",state.position
depth.append(len(state.position))
if len(state.position)>=81:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
"""------------------------------------------------------------------"""
"""expansion step"""
"""calculate how many nodes will be added under current leaf"""
expanded=expanded_node(model,state.position,val)
nodeadded=node_to_add(expanded,val)
all_posible=chem_kn_simulation(model,state.position,val,nodeadded)
generate_smile=predict_smile(all_posible,val)
new_compound=make_input_smile(generate_smile)
node_index,score,valid_smile,all_smile=check_node_type(new_compound,SA_mean,SA_std,logP_mean,logP_std,cycle_mean,cycle_std)
print node_index
valid_compound.extend(valid_smile)
all_simulated_compound.extend(all_smile)
all_score.extend(score)
iteration_num=len(all_simulated_compound)
if len(node_index)==0:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
re=[]
for i in range(len(node_index)):
m=node_index[i]
maxnum=maxnum+1
node.Addnode(nodeadded[m],state)
node_pool.append(node.childNodes[i])
if score[i]>=max_score:
max_score=score[i]
current_score.append(max_score)
else:
current_score.append(max_score)
depth.append(len(state.position))
"""simulation"""
re.append((0.8*score[i])/(1.0+abs(0.8*score[i])))
if maxnum==100:
maxscore100=max_score
time100=time.time()-start_time
if maxnum==500:
maxscore500=max_score
time500=time.time()-start_time
if maxnum==1000:
maxscore1000=max_score
time1000=time.time()-start_time
if maxnum==5000:
maxscore5000=max_score
time5000=time.time()-start_time
if maxnum==10000:
time10000=time.time()-start_time
maxscore10000=max_score
#valid10000=10000*1.0/len(all_simulated_compound)
"""backpropation step"""
#print "node pool length:",len(node.childNodes)
for i in range(len(node_pool)):
node=node_pool[i]
while node != None:
node.Update(re[i])
node = node.parentNode
#finish_iteration_time=time.time()-iteration_time
#print "four step time:",finish_iteration_time
"""check if found the desired compound"""
#print "all valid compounds:",valid_compound
finished_run_time=time.time()-start_time
print "logp max found:", current_score
#print "length of score:",len(current_score)
#print "time:",time_distribution
print "valid_com=",valid_compound
print "num_valid:", len(valid_compound)
print "all compounds:",len(all_simulated_compound)
print "score=", all_score
print "depth=",depth
print len(depth)
print "runtime",finished_run_time
#print "num_searched=",num_searched
print "100 max:",maxscore100,time100
print "500 max:",maxscore500,time500
print "1000 max:",maxscore1000,time1000
print "5000 max:",maxscore5000,time5000
print "10000 max:",maxscore10000,time10000
return valid_compound
| 23,441
|
def update_internalnodes_MRTKStandard() -> bpy.types.NodeGroup:
"""定義中のノードグループの内部ノードを更新する
Returns:
bpy.types.NodeGroup: 作成ノードグループの参照
"""
# データ内に既にMRTKStandardのノードグループが定義されているか確認する
# (get関数は対象が存在しない場合 None が返る)
get_nodegroup = bpy.data.node_groups.get(def_nodegroup_name)
# ノードグループが取得できたか確認する
if get_nodegroup == None:
# ノードグループが定義されていない場合は処理を行わない
return None
# 入力出力ノードを除くノードグループ内部のノードとリンクのみ更新を行う
# 現在の内部ノードを全て操作する
for node in get_nodegroup.nodes:
# 入力ノードか確認する
if node.name == def_inputnode_name:
# 入力ノードの場合、処理しない
continue
# 出力ノードか確認する
if node.name == def_outputnode_name:
# 出力ノードの場合、処理しない
continue
# 入出力ノード以外は全て削除する
get_nodegroup.nodes.remove(node)
# ノードグループにバージョン記載ノードを作成する
group_versionnode = add_nodegroup_MRTKStandard_framenode()
# ノードグループにBSDFノードを作成する
group_bsdfnode = add_nodegroup_MRTKStandard_bsdfnode()
# ノードグループにRGBミックスノードを作成する
group_rgbmix = add_nodegroup_MRTKStandard_rgbmixnode()
# ノードグループに滑らかさ数値反転ノードを作成する
group_smoothinversion = add_nodegroup_MRTKStandard_smoothinversionnode()
# ノードグループを構成するのリンク情報を設定する
link_result = link_MRTKStandardNodeGroup_default()
# リンク接続に成功したか
if link_result == False:
# リンク接続に失敗した場合はノードを返さない
return None
return get_nodegroup
| 23,442
|
def _ensure_consistent_schema(
frame: SparkDF,
schemas_df: pd.DataFrame,
) -> SparkDF:
"""Ensure the dataframe is consistent with the schema.
If there are column data type mismatches, (more than one data type
for a column name in the column schemas) then will try to convert
the data type if possible:
* if they are all number data types, then picks the largest number
type present
* if one of the types is string, then ensures it casts the column to
string type
Also fills any missing columns with Null values, ensuring correct
dtype.
Parameters
----------
frame : SparkDF
column_schemas : set
A set of simple column schemas in the form (name, dtype) for all
dataframes set to be concatenated.
Returns
-------
SparkDF
Input dataframe with consistent schema.
"""
final_schema = _get_final_schema(schemas_df)
missing_fields = [f for f in final_schema if f not in frame.dtypes]
for column, dtype in missing_fields:
# If current frame missing the column in the schema, then
# set values to Null.
vals = (
F.lit(None) if column not in frame.columns
else F.col(column)
)
# Cast the values with the correct dtype.
frame = frame.withColumn(column, vals.cast(dtype))
return frame
| 23,443
|
def shave_bd(img, bd):
"""
Shave border area of spatial views. A common operation in SR.
:param img:
:param bd:
:return:
"""
return img[bd:-bd, bd:-bd, :]
| 23,444
|
def get_raw_dir(args):
"""
Archived function. Ignore this for now
"""
root = "C:\\Workspace\\FakeNews"
if os.name == "posix":
root = '..'
path = osp.join(root, "Demo", "data", f"{args.dataset}", "raw")
return path
| 23,445
|
def test_joboffer_detail_view_render_state_with_active_label(publisher_client):
"""
Test that the joboffer detail view renders the state with active label class
"""
client = publisher_client
joboffer = JobOfferFactory.create(state=OfferState.ACTIVE)
target_url = reverse(VIEW_URL, kwargs={'slug': joboffer.slug})
response = client.get(target_url)
label_class = STATE_LABEL_CLASSES[OfferState.ACTIVE]
assert response.context_data['state_label_class'] == label_class
| 23,446
|
def process_game_hook(instance, created, **_):
"""Process a game immediately after game creation."""
if created:
instance.process_game()
| 23,447
|
def plotly_single(ma, average_type, color, label, plot_type='line'):
"""A plotly version of plot_single. Returns a list of traces"""
summary = list(np.ma.__getattribute__(average_type)(ma, axis=0))
x = list(np.arange(len(summary)))
if isinstance(color, str):
color = list(matplotlib.colors.to_rgb(color))
traces = [go.Scatter(x=x, y=summary, name=label, line={'color': "rgba({},{},{},0.9)".format(color[0], color[1], color[2])}, showlegend=False)]
if plot_type == 'fill':
traces[0].update(fill='tozeroy', fillcolor=color)
if plot_type in ['se', 'std']:
if plot_type == 'se': # standard error
std = np.std(ma, axis=0) / np.sqrt(ma.shape[0])
else:
std = np.std(ma, axis=0)
x_rev = x[::-1]
lower = summary - std
trace = go.Scatter(x=x + x_rev,
y=np.concatenate([summary + std, lower[::-1]]),
fill='tozerox',
fillcolor="rgba({},{},{},0.2)".format(color[0], color[1], color[2]),
line=go.Line(color='transparent'),
showlegend=False,
name=label)
traces.append(trace)
return traces
| 23,448
|
def remove_pip(packages):
"""Remove pip modules, from anysnake.toml.
If they're installed, remove their installation
If they're editable, remove their code/folders as well"""
import shutil
import tomlkit
d, config = get_anysnake()
local_config = tomlkit.loads(Path("anysnake.toml").read_text())
write_toml = False
for p in packages:
if p in local_config.get("python"):
del local_config["python"][p]
write_toml = True
path = d.paths["code_clones"] / p
if path.exists():
if click.confirm(f"really remove {path}?)"):
shutil.rmtree(str(path))
lib_path = (
d.paths["code_venv"]
/ "lib"
/ ("python" + d.major_python_version)
/ "site-packages"
)
print(p + "*")
for f in lib_path.glob(p + "*"):
print(f)
if write_toml:
import time
backup_filename = "anysnake.toml.%s" % time.strftime("%Y-%M-%d-%H-%M")
print("writing new anysnake.toml - old one in %s" % backup_filename)
shutil.copy("anysnake.toml", backup_filename)
with open("anysnake.toml", "w") as op:
op.write(tomlkit.dumps(local_config))
| 23,449
|
def create_task(className, *args, projectDirectory='.', dryrun=None, force=None, source=False):
"""Generates task class from the parameters derived from :class:`.Task`
Fails if the target file already exists unless ``force=True`` or ``--force`` in the CLI is set.
Setting the ``--source`` will generate a different template that have stubs with the functions that need to be overwritten.
Parameters
----------
className : string (CamelCase)
Name of the class to be created
projectDirectory : string (default='.')
Location of the project directory, the code will be created in ``projectDirectory/data_models/class_name.py``.
dryrun : bool (default=None)
If set to ``True`` it returns the generated code as a string
force : bool (default=None)
If set to ``True`` it overwrites the target file
source : bool (default=False)
If set to ``True`` the class will generate stubs for functions to be overwritten
*args : List of strings (CamelCase)
Classes to be imported into the generated code from the datamodel, fails if class not found
Returns
-------
content : string
The generated code if ``dryrun`` is specified
"""
if source:
taskType = NameString('Source')
else:
taskType = NameString('Task')
project = HypergolProject(projectDirectory=projectDirectory, dryrun=dryrun, force=force)
className = NameString(className)
dependencies = [NameString(value) for value in args]
project.check_dependencies(dependencies)
content = project.render(
templateName=f'{taskType.asFileName}.j2',
templateData={'className': className, 'dependencies': dependencies},
filePath=Path(projectDirectory, 'tasks', className.asFileName)
)
return project.cli_final_message(creationType=taskType, name=className, content=(content, ))
| 23,450
|
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
| 23,451
|
def fix_multipath_1():
"""Installs multipath tooling packages"""
vprint("Fixing multipath settings")
if get_os() == UBUNTU:
exe("apt-get install multipath-tools -y")
else:
exe("yum install device-mapper-multipath -y")
| 23,452
|
def __resolve_key(key: Handle) -> PyHKEY:
"""
Returns the full path to the key
>>> # Setup
>>> fake_registry = fake_reg_tools.get_minimal_windows_testregistry()
>>> load_fake_registry(fake_registry)
>>> # Connect registry and get PyHkey Type
>>> reg_handle = ConnectRegistry(None, HKEY_CURRENT_USER)
>>> __resolve_key(key=reg_handle).handle.full_key
'HKEY_CURRENT_USER'
>>> __resolve_key(key=HKEY_CURRENT_USER).handle.full_key
'HKEY_CURRENT_USER'
>>> # Test PyHKey Type (the most common)
>>> discard = __resolve_key(reg_handle)
>>> # Test int Type
>>> discard = __resolve_key(HKEY_CURRENT_USER)
>>> # Test HKEYType
>>> hkey = HKEYType(handle=reg_handle.handle, access=reg_handle._access)
>>> discard = __resolve_key(hkey)
>>> # Test invalid handle
>>> discard = __resolve_key(42)
Traceback (most recent call last):
...
OSError: [WinError 6] The handle is invalid
>>> # Test invalid type
>>> discard = __resolve_key('spam') # noqa
Traceback (most recent call last):
...
RuntimeError: unknown Key Type
"""
if isinstance(key, PyHKEY):
key_handle = key
elif isinstance(key, int):
try:
key_handle = PyHKEY(__fake_registry.hive[key])
except KeyError:
error = OSError("[WinError 6] The handle is invalid")
setattr(error, "winerror", 6)
raise error
elif isinstance(key, HKEYType):
key_handle = PyHKEY(handle=key.handle, access=key._access)
else:
raise RuntimeError("unknown Key Type")
return key_handle
| 23,453
|
def vec3f_unitZ():
"""vec3f_unitZ() -> vec3f"""
return _libvncxx.vec3f_unitZ()
| 23,454
|
def view_recipe(tileset, token=None):
"""View a tileset's recipe JSON
tilesets view-recipe <tileset_id>
"""
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = '{0}/tilesets/v1/{1}/recipe?access_token={2}'.format(mapbox_api, tileset, mapbox_token)
r = requests.get(url)
if r.status_code == 200:
utils.print_response(r.text)
else:
click.echo(r.text)
| 23,455
|
def _BotNames(source_config, full_mode=False):
"""Returns try bot names to use for the given config file name."""
platform = os.path.basename(source_config).split('.')[0]
assert platform in PLATFORM_BOT_MAP
bot_names = PLATFORM_BOT_MAP[platform]
if full_mode:
return bot_names
return [bot_names[0]]
| 23,456
|
def display_board(board):
""" show board in terminal"""
logging.debug('display_board()')
print(board[0] +" | "+ board[1] + " | " + board[2]+ 5*" " + " 1 | 2 | 3 ")
print(board[3] +" | "+ board[4] + " | " + board[5]+ 5*" " + " 4 | 5 | 6 ")
print(board[6] +" | "+ board[7] + " | " + board[8]+ 5*" " + " 7 | 8 | 9 ")
print("")
| 23,457
|
def get_public_key(public_key_path=None, private_key_path=None):
"""get_public_key.
Loads public key. If no path is specified, loads signing_key.pem.pub from the
current directory. If a private key path is provided, the public key path is
ignored and the public key is loaded from the private key.
:param public_key_path: a string of the public key file name, with relative or full path
:param private_key_path: a string of the private key file name, with relative or full path
:return:
"""
if private_key_path is not None:
private_key = get_private_key(private_key_path)
public_key = private_key.publickey().exportKey("PEM")
return public_key
elif public_key_path is None:
public_key_path = "signing_key.pem.pub"
with open(public_key_path, "rb") as f:
public_key = RSA.importKey(f.read())
return public_key
| 23,458
|
def InitializeState(binaryString):
"""
State initializer
"""
state = np.zeros(shape=(4, 4), dtype=np.uint8)
plaintextBytes = SplitByN(binaryString, 8)
for col in range(4):
for row in range(4):
binary = plaintextBytes[col * 4 + row]
state[row, col] = int(binary, 2)
return np.matrix(state)
| 23,459
|
def _apply_modifier(s: str, modifier: str, d: Dict[Any, str]) -> str:
"""
This will search for the ^ signs and replace the next
digit or (digits when {} is used) with its/their uppercase representation.
:param s: Latex string code
:param modifier: Modifier command
:param d: Dict to look upon
:return: New text with replaced text.
"""
s = s.replace(modifier, "^")
newtext = ""
mode_normal, mode_modified, mode_long = range(3)
mode = mode_normal
for ch in s:
if mode == mode_normal and ch == '^':
mode = mode_modified
continue
elif mode == mode_modified and ch == '{':
mode = mode_long
continue
elif mode == mode_modified:
newtext += d.get(ch, ch)
mode = mode_normal
continue
elif mode == mode_long and ch == '}':
mode = mode_normal
continue
if mode == mode_normal:
newtext += ch
else:
newtext += d.get(ch, ch)
return newtext
| 23,460
|
def threaded_polling(data, max_workers):
"""
Multithreaded polling method to get the data from cryptocompare
:param data: dictionary containing the details to be fetched
:param max_workers: maximum number of threads to spawn
:return list: containing the high low metrics for each pair
"""
hl_parsed_data = list()
exchange = data["name"]
pairs = data["pair_whitelist"]
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Start the load operations and mark each future with its URL
future_to_pairs = [
executor.submit(run_parser, _.split("/")[0], _.split("/")[1], exchange)
for _ in pairs
]
total = len(future_to_pairs)
count = 0
for future in concurrent.futures.as_completed(future_to_pairs):
try:
data = future.result()
hl_parsed_data.append(data)
except Exception as exc:
print(exc)
else:
count += 1
msg = f"Parsing {data['symbol']:10} | {count:2}/{total:2}"
print(msg, end="\r")
print(f"Pairs processed from {exchange} | {count:2}/{total:2}")
return hl_parsed_data
| 23,461
|
def sub(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, List[int]]:
"""Return numpy-style subtraction layer registration information (shape)
NOTE: https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html"""
assert len(in_xlayers) == 2, "Subtract layer expects two input layers"
lX, rX = in_xlayers
shape = TensorShape(get_numpy_broadcasted_shape(lX.shapes[:], rX.shapes[:]))
return {'shape': shape}
| 23,462
|
def test_js102_class_def(class_def_file_path):
"""Class definition should not trigger JS102."""
style_guide = flake8.get_style_guide(
select=['JS102'],
)
p = os.path.abspath(class_def_file_path)
r = style_guide.check_files([p])
assert 0 == r.total_errors
| 23,463
|
def flatten_scan(scan, headers, output_file):
"""
Flattening the sequence data in a single line-separated value and keying always by path,
given a ScanCode `scan` results list.
"""
mainlist = []
parent_dict = {}
keys=[]
fg_val=0
i = 0
for scanned_file in scan:
for k, v in scanned_file.items():
if k == "path":
keys.insert(i, "path")
newlist = dict()
newlist['path']=v
path_value = v
elif k == "type":
i+=1
keys.insert(i, "type")
newlist['type']=v
if v=="directory":
fg_val+=1
#keys.insert(2, "folder_group")
newlist['folder_group']=fg_val
parent_dict[path_value] = fg_val
'''elif k == "folder_group":
keys.insert(2, "folder_group")
newlist['folder_group']=v
parent_dict[path_value] = v'''
parentpath = Path(path_value)
key = str(parentpath.parent)
if key != '.':
key = key.replace("\\","/")
newlist['parent_group'] = parent_dict[key]
#keys.insert(3, "parent_group")
if type(v) is list:
if k == "licenses":
for val1 in v:
if isinstance(val1, dict):
for keyname,keyvalue in val1.items():
keyvalue = str(keyvalue)
keyvalue = keyvalue.replace(',','')
if keyname == "name":
i+=1
keys.insert(i, "license_name")
if 'license_name' in newlist.keys():
if keyvalue not in newlist.get('license_name'):
templist = newlist['license_name']
keyvalue = ", ".join([templist, keyvalue])
newlist['license_name']=keyvalue
else:
newlist['license_name']=keyvalue
elif keyname == "text_url" and keyvalue!="":
i+=1
keys.insert(i, "license_text_url")
if 'license_text_url' in newlist.keys():
if keyvalue not in newlist.get('license_text_url'):
templist = newlist['license_text_url']
keyvalue = ", ".join([templist, keyvalue])
newlist['license_text_url']=keyvalue
else:
newlist['license_text_url']=keyvalue
elif k == "copyrights":
for val1 in v:
if isinstance(val1, dict):
for keyname,keyvalue in val1.items():
keyvalue = str(keyvalue)
keyvalue = keyvalue.replace(',','')
if keyname == "value":
i+=1
keys.insert(i, "copyright")
if 'copyright' in newlist.keys():
if keyvalue not in newlist.get('copyright'):
templist = newlist['copyright']
keyvalue = ", ".join([templist, keyvalue])
newlist['copyright']=keyvalue
else:
newlist['copyright']=keyvalue
elif k=="packages":
for val1 in v:
if isinstance(val1, dict):
for keyname,keyvalue in val1.items():
if keyname == "name":
i+=1
keys.insert(i, "package_name")
newlist['package_name']=keyvalue
elif keyname == "version":
i+=1
keys.insert(i, "package_version")
newlist['package_version']=keyvalue
elif keyname == "homepage_url":
i+=1
keys.insert(i, "package_homepage_url")
newlist['package_homepage_url']=keyvalue
elif k=="urls":
for val1 in v:
if isinstance(val1, dict):
for keyname,keyvalue in val1.items():
if keyname == "url":
i+=1
keys.insert(i, "url")
if 'url' in newlist.keys():
if keyvalue not in newlist.get('url'):
templist = newlist['url']
keyvalue = ", ".join([templist, keyvalue])
newlist['url']=keyvalue
else:
newlist['url']=keyvalue
mainlist.append(newlist)
previouspath=''
previous_packagename=''
previous_packageversion=''
previous_packageurl=''
flag=0
"""get the previous path's package name and version"""
for templist in mainlist:
if (templist['type'] == "directory") and ('package_name' not in templist.keys()) and (previouspath in templist['path']) and not templist['path'].endswith("node_modules"):
if previous_packagename:
templist['package_name'] = previous_packagename
templist['package_version'] = previous_packageversion
templist['package_homepage_url'] = previous_packageurl
flag=1
else:
flag=0
if templist['type'] == "directory" and ('package_name' in templist.keys()) and flag==0:
previouspath = templist['path']
previous_packagename = templist['package_name']
previous_packageversion = templist['package_version']
previous_packageurl = templist['package_homepage_url']
"""to print package name matching the folder group"""
for sublist in mainlist:
strippedpath, tail = os.path.split(sublist['path'])
if (sublist['type'] == "directory") and ('package_name' not in sublist.keys()) and not sublist['path'].endswith("node_modules"):
for templist in mainlist:
if templist['path']==strippedpath and 'package_name' in templist.keys():
sublist['package_name'] = templist['package_name']
sublist['package_version'] = templist['package_version']
sublist['package_homepage_url'] = templist['package_homepage_url']
if 'package_name' in sublist.keys():
fldr_grp = sublist['folder_group']
for templist in mainlist:
if templist['folder_group'] == fldr_grp and 'package_name' not in templist.keys():
templist['package_name'] = sublist['package_name']
templist['package_version'] = sublist['package_version']
templist['package_homepage_url'] = sublist['package_homepage_url']
mainlist_modified = []
for sublist in mainlist:
sublist_modified={}
for k1, v1 in sublist.items():
if k1 not in ['folder_group','parent_group']:
sublist_modified[k1]=v1
mainlist_modified.append(sublist_modified)
"""to print in csv file"""
keys_list = []
for x in keys:
if x not in keys_list:
keys_list.append(x)
w = csv.DictWriter(output_file, keys_list)
w.writeheader()
w.writerows(mainlist_modified)
| 23,464
|
def word_column_filter_df(dataframe, column_to_filter, column_freeze, word_list):
# La fonction .where() donne une position qu'il faut transformer en index
# Il faut entrer le nom d'une colonne repère (exemple: code produit) pour retrouver l'index, ou construire un colonne de re-indexée.
"""Filtre les colonnes d'un dataframe, en fonction d'une liste de mots, puis retourne le dataframe"""
import re
position_to_drop_lst = np.where(dataframe[column_to_filter].str.contains('|'.join(map(re.escape, word_list)),
np.NaN))[0]
indices_to_drop_lst = []
for position in position_to_drop_lst:
indice = (dataframe[dataframe[column_freeze] == dataframe.iloc[position].loc[column_freeze]]).index[0]
indices_to_drop_lst.append(indice)
print("Nombre de lignes supprimées:")
nbr= len(indices_to_drop_lst)
print(nbr)
print("\n")
dataframe.drop(indices_to_drop_lst, axis=0,inplace=True)
return dataframe
| 23,465
|
def unlocked():
"""
Context manager which unlocks a Document and dispatches
ModelChangedEvents triggered in the context body to all sockets
on current sessions.
"""
curdoc = state.curdoc
if curdoc is None or curdoc.session_context is None:
yield
return
connections = curdoc.session_context.session._subscribed_connections
hold = curdoc._hold
if hold:
old_events = list(curdoc._held_events)
else:
old_events = []
curdoc.hold()
try:
yield
events = []
for conn in connections:
socket = conn._socket
if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:
state._locks.add(socket)
locked = socket in state._locks
for event in curdoc._held_events:
if (isinstance(event, ModelChangedEvent) and event not in old_events
and hasattr(socket, 'write_message') and not locked):
msg = conn.protocol.create('PATCH-DOC', [event])
WebSocketHandler.write_message(socket, msg.header_json)
WebSocketHandler.write_message(socket, msg.metadata_json)
WebSocketHandler.write_message(socket, msg.content_json)
for header, payload in msg._buffers:
WebSocketHandler.write_message(socket, header)
WebSocketHandler.write_message(socket, payload, binary=True)
elif event not in events:
events.append(event)
curdoc._held_events = events
finally:
if not hold:
curdoc.unhold()
| 23,466
|
def pytest_configure(config):
"""Configure and init envvars for airflow."""
config.old_env = {}
for key, value in TEST_ENV_VARS.items():
config.old_env[key] = os.getenv(key)
os.environ[key] = value
# define some models to get the tests to pass.
db.merge_conn(
models.Connection(
conn_id='ingestion_server', conn_type='ssh',
host='http://localhost')
)
| 23,467
|
def test_contactus_invalid(client, jwt, session):
"""Assert that the endpoint returns the failure status."""
req_data = {
'firstName': 'my',
'lastName': 'self',
'email': '',
'description': ''
}
rv = client.post(API_URI_PREFIX + 'contactus', data=json.dumps(req_data), content_type='application/json')
assert rv.status_code == HTTPStatus.BAD_REQUEST
| 23,468
|
def adjust_learning_rate(optimizer, iteration, epoch_size, hyp, epoch, epochs):
"""adjust learning rate, warmup and lr decay
:param optimizer: optimizer
:param gamma: gamma
:param iteration: iteration
:param epoch_size: epoch_size
:param hyp: hyperparameters
:param epoch: epoch
:param epochs: the number of epochs
:return: lr
"""
step_index = 0
if epoch < 6:
# The first 6 epochs carried out warm up
learning_rate = 1e-6 + (hyp['lr0'] - 1e-6) * iteration / (epoch_size * 2)
else:
if epoch > epochs * 0.5:
# At 50% of the epochs, the learning rate decays in Gamma
step_index = 1
if epoch > epochs * 0.7:
# At 70% of the epochs, the learning rate decays in Gamma^2
step_index = 2
learning_rate = hyp['lr0'] * (0.1 ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
return learning_rate
| 23,469
|
def initServer(port):
"""Método para inicializar o servidor e carrgar dados da database.
Parameters
----------
port : int
A porta a ser escutada pelo servidor.
"""
global serversocket
# pegar nome da maquina local
host = socket.gethostname()
# conectar e setar tamanho do backlog
serversocket.bind((host, port))
# carregar dados na memória
with open('../database/gas_stations.txt', 'r') as gasStations:
for gasStation in gasStations:
gasStationDetais = gasStation.replace('\n', '').split(' ')
gasStationDetais.pop(1) # Remover id da mensagem
insertIntoListOfGasStations(gasStationDetais)
print("Servidor operante!\n")
| 23,470
|
def reading_data(data_file):
"""
Read in a data file (16 bit) and obtain the entire data set that is
multiplexed between ECG and Pulse data. The data is then extracted and appended
to separate arrays
:param data_file: The binary data file to be loaded into the function
:return data: The ECG data in array
"""
try:
import numpy as np
from scipy.io import loadmat
m = loadmat(data_file)
x = dict(m)
fs = x.get('f_s')
fs = np.array(fs)
fs = fs.flatten()
pp = x.get('pulse_P')
pp = np.array(pp)
pp = pp.flatten()
ecg = x.get('ECG')
ecg = np.array(ecg)
ecg = ecg.flatten()
print(fs)
return fs, pp, ecg
except ValueError:
try:
import numpy as np
import h5py
# for h5py
with h5py.File(data_file, 'r') as hf:
fs = hf.get('f_s')
fs = np.array(fs)
fs = fs.flatten()
pp = hf.get('pp')
pp = np.array(pp)
pp = pp.flatten()
ecg = hf.get('ECG')
ecg = np.array(ecg)
ecg = ecg.flatten()
print(fs)
return fs, pp, ecg
except IOError:
from numpy import fromfile, empty, append
fs = fromfile(data_file, dtype='uint16', count=1, sep='')
hrData = fromfile(data_file, dtype='uint16', count=-1, sep='')
ecg = empty(shape=[0, len(hrData)], dtype=int) # Initialize Empty Arrays
pp = empty(shape=[0, len(hrData)], dtype=int) # Initialize Empty Arrays
for i in range(1, len(hrData), 2):
ecg = append(ecg, hrData[i])
for k in range(2, len(hrData), 2):
pp = append(pp, hrData[k])
print(ecg)
return fs, pp, ecg
| 23,471
|
def daemon_service(dstate, action, retries=10):
"""
perform systemctl command with action provided
Args:
dstate: Daemon state
action: action to be performed
retries: number of retries
"""
mark = __mark(dstate)
daemon = "{cluster}-{role}".format(cluster=dstate.cluster, role=dstate.type_)
daemon_id = "{id}".format(id=dstate.id_)
log.info("{} {} daemon".format(mark, action.upper()))
while retries:
retries -= 1
try:
getattr(dstate, action)()
__wait(60, msg="systemctl command executed")
res = dstate.remote.run(args=[run.Raw(dstate.show_cmd)], stdout=StringIO())
res = res.stdout.read().lower()
if "ActiveState=failed".lower() in res:
assert False, res
log.info("{} {} daemon - Successful ".format(mark, action))
return
except (AssertionError, CommandFailedError) as err:
log.error("{} Command execution failed - {}".format(mark, err))
log.warn("{} Trying to {}, Retries left: {}".format(mark, action,
retries))
cmd = "sudo systemctl reset-failed"
log.warn("{} Running '{}'".format(mark, cmd))
dstate.remote.run(args=[run.Raw(cmd)])
__wait(10, msg="Resetted failed daemons")
cmd = "sudo systemctl daemon-reload"
log.warn("{} Running '{}'".format(mark, cmd))
dstate.remote.run(args=[run.Raw(cmd)])
__wait(10, msg="Daemon reloaded")
log.warn("{} Restarting daemon".format(mark))
dstate.restart()
__wait(30, msg="Daemon Restarted")
else:
assert False, "{} Unable to complete {} action".format(mark, action)
| 23,472
|
def terminate_app(
app_name,
sigterm_timeout=DEFAULT_TIMEOUT_AFTER_SIGTERM,
sigkill_timeout=DEFAULT_TIMEOUT_AFTER_SIGKILL,
):
"""Terminate an application.
Kill the container with SIGTERM before deleting its resources.
If the container's process is already killed, proceed with deleting its
resouces.
If killing the container with SIGTERM fails, kill it with SIGKILL instead.
"""
log.debug("Terminate application '{}'".format(app_name))
try:
container.kill(app_name, "SIGTERM")
except container.ContainerKillError as error:
log.debug(
"Failed to terminate '{}', error: '{}".format(app_name, str(error))
)
if any(
x in str(error)
for x in [
container.ContainerState.ALREADY_STOPPED.value,
container.ContainerState.NOT_RUNNING.value,
]
):
log.debug("Application '{}' already stopped".format(app_name))
container.delete(app_name)
elif container.ContainerState.DOES_NOT_EXIST.value in str(error):
return
else:
raise error
else:
try:
_wait_for_app_stop(app_name, sigterm_timeout)
except AppStopTimeoutError:
log.debug(
"Failed to stop '{}' in '{}' seconds. Try SIGKILL.".format(
app_name, sigterm_timeout
)
)
kill_app(app_name, sigkill_timeout)
else:
container.delete(app_name)
log.info("Application '{}' terminated".format(app_name))
| 23,473
|
def two_body_mc_grad(env1: AtomicEnvironment, env2: AtomicEnvironment,
d1: int, d2: int, hyps: 'ndarray', cutoffs: 'ndarray',
cutoff_func: Callable = cf.quadratic_cutoff) \
-> (float, 'ndarray'):
"""2-body multi-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
(float, np.ndarray):
Value of the 2-body kernel and its gradient with respect to the
hyperparameters.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
return two_body_mc_grad_jit(env1.bond_array_2, env1.ctype, env1.etypes,
env2.bond_array_2, env2.ctype, env2.etypes,
d1, d2, sig, ls, r_cut, cutoff_func)
| 23,474
|
def get_task_name(task):
"""Gets a tasks *string* name, whether it is a task object/function."""
task_name = ""
if isinstance(task, (types.MethodType, types.FunctionType)):
# If its a function look for the attributes that should have been
# set using the task() decorator provided in the decorators file. If
# those have not been set, then we should at least have enough basic
# information (not a version) to form a useful task name.
task_name = get_attr(task, 'name')
if not task_name:
name_pieces = [a for a in get_many_attr(task,
'__module__',
'__name__')
if a is not None]
task_name = join(name_pieces, ".")
else:
task_name = str(task)
return task_name
| 23,475
|
def EventAddPublication(builder, publication):
"""This method is deprecated. Please switch to AddPublication."""
return AddPublication(builder, publication)
| 23,476
|
def convModel(input1_shape, layers):
"""" convolutional model defined by layers. ith entry
defines ith layer. If entry is a (x,y) it defines a conv layer
with x kernels and y filters. If entry is x it defines a pool layer
with size x"""
model = Sequential()
for (i, layer) in enumerate(layers):
if isinstance(layer, int):
model.add(MaxPool1D(layer))
elif len(layer) == 2:
if i == 0:
model.add(Conv1D(layer[0], layer[1],
input_shape=input1_shape, padding='same',
activation='relu'))
else:
model.add(Conv1D(layer[0], layer[1], padding='same',
activation='relu'))
else:
print("Hodor")
model.add(GlobalMaxPool1D())
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))
model.compile(loss='binary_crossentropy',
metrics=['accuracy',precision],
optimizer=Adam(lr=3e-4))
print(model.inputs)
print(model.summary())
return model
| 23,477
|
def cached_open_doc(db, doc_id, cache_expire=COUCH_CACHE_TIMEOUT, **params):
"""
Main wrapping function to open up a doc. Replace db.open_doc(doc_id)
"""
try:
cached_doc = _get_cached_doc_only(doc_id)
except ConnectionInterrupted:
cached_doc = INTERRUPTED
if cached_doc in (None, INTERRUPTED):
doc = db.open_doc(doc_id, **params)
if cached_doc is not INTERRUPTED:
do_cache_doc(doc, cache_expire=cache_expire)
return doc
else:
return cached_doc
| 23,478
|
def Voices_preload_and_split(subset='room-1', test_subset='room-2', seconds=3,
path=None, pad=False, splits=None, trim=True):
"""Index and split librispeech dataset.
Args:
subset (string): LibriSpeech subset to parse, load and split.
Currently can only handle one at a time
seconds (int): Minimum length of audio samples to include.
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
pad (bool): Flag to specify whether to pad (with 0's) and keep the
samples with lenght below the minimum.
splits (dict): dictionary with {name:[fractions]} for a user specified
split. The split will be saved to 'DATASPLITS_DIR' under 'name'
trim (bool): trims data by >.5. removes half of mics, and no noise data
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
- Add option and functionality to split longer recording into samples
of length 'seconds' to augment data.
"""
num_splits = 6
fragment_seconds = seconds
if path is None:
path = DATASETS_DIR
print('Initialising VOiCESDataset with minimum length = {}s'
' and subset = {}'.format(seconds, subset))
df = load_or_index_subset(subset=subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
test_df = load_or_index_subset(subset=test_subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
# remove all None sound from df
if trim:
df = df[df['Noise'] != 'none']
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
dfs = {} # dictionary of dataframes
sample_dfs = {}
# split df into data-subsets
if splits is None:
# Default behaviour will be to load cyphercat default splits
# check if splits exists.
print('Build/load speaker membership inference splits')
splits_ready = [False]*num_splits
for i_split in range(num_splits):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default speaker splits, loading dataframe')
dfs = {}
for i_split in range(num_splits):
dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split))
else:
# Case when splits not found. This should only occur first time
# VOiCES is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default speaker splits for VOiCES!')
dfs = default_speaker_splitter2(
dfs, df, trim=trim, test_df=test_df)
# write the default dataframes
for i_df, this_df in enumerate(dfs):
dfs[this_df] = dfs[this_df].drop(columns=['id'])
dfs[this_df].rename(columns={'level_0': 'idx_in_original_df'},
inplace=True)
dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_df),
index=False)
print('Build/load sample membership inference splits')
splits_ready = [False]*(num_splits-1)
for i_split in range(num_splits-1):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default sample splits, loading dataframe')
sample_dfs = {}
for i_split in range(num_splits-1):
sample_dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset,
i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default sample splits for VOiCES!')
sample_dfs = default_sample_splitter(sample_dfs, df, trim)
# write the default dataframes
for i_df, this_df in enumerate(sample_dfs):
sample_dfs[this_df] = sample_dfs[this_df].drop(columns=['id'])
sample_dfs[this_df].rename(columns={'level_0':
'idx_in_original_df'},
inplace=True)
sample_dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/'
'sample_splits/VOiCES_%i.csv' %
(subset, i_df), index=False)
else:
name = list(splits.keys())[0]
print('Creating user defined splits under name %s' %
(list(splits.keys())[0]))
total = 0
for fraction in splits[name]:
total += fraction
if total != 1.:
raise('Data split doesn\'t not add up to 1.')
# this creates user selescted splits according to the list provided
# num speakers for train & test is the same.
# the below was solved with a system of equations
# amt data depends on train data
n = int(len(unique_speakers)//(2+2*splits[0]))
# n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] # target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers1,
category_id='speaker_id', splits=splits, N=0)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers2,
category_id='speaker_id', splits=splits, N=2)
# split out data for attack train + test evenly
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers3,
category_id='speaker_id', splits=[0.5, 0.5], N=4)
print('\n ------- Speaker split statistics ------- ')
for d in dfs:
this_df = dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print(' ------- Sample split statistics -------- ')
for d in sample_dfs:
this_df = sample_dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print('Finished splitting data.')
return dfs, sample_dfs
| 23,479
|
def dense2bpseq(sequence: torch.Tensor, label: torch.Tensor) -> str:
"""converts sequence and label tensors to `.bpseq`-style string"""
seq_lab = dense2seqlab(sequence, label)
return seqlab2bpseq
| 23,480
|
def _base_app(config):
"""
init a barebone flask app.
if it is needed to create multiple flask apps,
use this function to create a base app which can be further modified later
"""
app = Flask(__name__)
app.config.from_object(config)
config.init_app(app)
bootstrap.init_app(app)
app.jinja_env.globals['datetime'] = datetime
app.jinja_env.globals['str_to_datetime'] = lambda x: from_string_to_datetime(x)
app.jinja_env.globals['format_float'] = lambda x: "%.2f" % x if x else None
app.jinja_env.globals['momentjs'] = momentjs
app.jinja_env.globals['get_collapsed_ids'] = get_collapsed
return app
| 23,481
|
def test_cpu_bilateral_k3_rgbd1(rgbd1, loops, benchmark):
"""
Will benchmark bilateral cpu
"""
a = Matrix3fRef(rgbd1)
b = benchmark(opf.filter.bilateral_K3, a, loops)
| 23,482
|
def applyC(input_map,nbar,MAS_mat,pk_map,Y_lms,k_grids,r_grids,v_cell,shot_fac,include_pix=True):
"""Apply the fiducial covariance to a pixel map x, i.e. C[x] = S[x]+N[x].
We decompose P(k;x) = \sum_l P_l(k) L_l(k.x) where x is the position of the second galaxy and use spherical harmonic decompositions.
P_l(k) are the even fiducial power spectrum multipoles, taken as an input (including the MAS window if relevant).
Parameters
----------
input_map : ndarray
The input map to apply the covariance to.
nbar : ndarray
Map of the background number density.
MAS_mat : ndarray
The mass assignment (i.e. compensation) matrix.
pk_map : ndarray
The fiducial power spectrum multipoles (only used with ML weights).
Y_lms : list
List of spherical harmonic functions, generated by the compute_spherical_harmonic_functions() function.
k_grids : ndarray
3D grids containing the (k_x,k_y,k_z) values.
r_grids : ndarray
3D grids containing the (r_x,r_y,r_z) values.
v_cell : float
Cell volume.
shot_fac : float
Shot noise factor.
include_pix : bool, optional
Whether to include the MAS effects in the covariance (default: True).
Returns
-------
ndarray
Covariance matrix applied to the input map.
"""
return applyS(input_map,nbar,MAS_mat,pk_map,Y_lms,k_grids,r_grids,v_cell,include_pix=include_pix)+applyN(input_map,nbar,MAS_mat,v_cell,shot_fac,include_pix=include_pix)
| 23,483
|
def rx_stop(ctx):
"""Stop LoraP2P RX."""
lora = Rak811()
lora.rx_stop()
if ctx.obj['VERBOSE']:
click.echo('LoraP2P RX stopped.')
lora.close()
| 23,484
|
def invert_injective_mapping(dictionary):
"""
Inverts a dictionary with a one-to-one mapping from key to value, into a
new dictionary with a one-to-one mapping from value to key.
"""
inverted_dict = {}
for key, value in iteritems(dictionary):
assert value not in inverted_dict, "Mapping is not 1-1"
inverted_dict[value] = key
return inverted_dict
| 23,485
|
def biosql_dbseqrecord_to_seqrecord(dbseqrecord_, off=False):
"""Converts a DBSeqRecord object into a SeqRecord object.
Motivation of this function was two-fold: first, it makes type testing simpler; and second, DBSeqRecord does
not have a functional implementation of the translate method.
:param DBSeqRecord dbseqrecord_: The DBSeqRecord object to be converted.
:param bool off: Don't actually convert the DBSeqRecord. [Default: False]
:return:
"""
assert isinstance(dbseqrecord_, DBSeqRecord), ('Input must be a DBSeqRecord, '
'was of type {}!').format(type(dbseqrecord_))
if off:
return dbseqrecord_
else:
return SeqRecord(seq=Seq(data=str(dbseqrecord_.seq)), id=dbseqrecord_.id, name=dbseqrecord_.name,
description=dbseqrecord_.description, dbxrefs=dbseqrecord_.dbxrefs,
features=dbseqrecord_.features, annotations=dbseqrecord_.annotations,
letter_annotations=dbseqrecord_.letter_annotations)
| 23,486
|
def idf():
"""Commands related to idf"""
pass
| 23,487
|
def loadConfig(fileName):
""" Attempt to load the specified config file. If successful, clean the variables/data the
config file has setup """
if not os.path.isfile(fileName):
return False
if not os.access(fileName, os.R_OK):
warn('Unable to read config file: ' + fileName)
return False
try:
execfile(fileName)
# Cache this operation (whether or not we're in debug mode) for faster (hardly)
# debug spamming (from NZBLeecher)
if hasattr(Hellanzb, 'DEBUG_MODE') and Hellanzb.DEBUG_MODE is not None and \
Hellanzb.DEBUG_MODE != False:
# Set this ASAP for sane logging. FIXME: You could possibly lose some debug
# output during initialization if you're using the -d option
Hellanzb.DEBUG_MODE_ENABLED = True
# Ensure the types are lower case
for varName in ('NOT_REQUIRED_FILE_TYPES', 'KEEP_FILE_TYPES'):
types = getattr(Hellanzb, varName)
lowerTypes = [ext.lower() for ext in types]
setattr(Hellanzb, varName, lowerTypes)
if not hasattr(Hellanzb, 'MAX_RATE') or Hellanzb.MAX_RATE is None:
Hellanzb.MAX_RATE = 0
else:
Hellanzb.MAX_RATE = int(Hellanzb.MAX_RATE)
if not hasattr(Hellanzb, 'UNRAR_CMD') or Hellanzb.UNRAR_CMD is None:
Hellanzb.UNRAR_CMD = assertIsExe(['rar', 'unrar'])
else:
Hellanzb.UNRAR_CMD = assertIsExe([Hellanzb.UNRAR_CMD])
if not hasattr(Hellanzb, 'PAR2_CMD') or Hellanzb.PAR2_CMD is None:
Hellanzb.PAR2_CMD = assertIsExe(['par2'])
else:
Hellanzb.PAR2_CMD = assertIsExe([Hellanzb.PAR2_CMD])
if not hasattr(Hellanzb, 'MACBINCONV_CMD') or Hellanzb.MACBINCONV_CMD is None:
# macbinconv is optional when not explicitly specified in the conf
Hellanzb.MACBINCONV_CMD = None
try:
Hellanzb.MACBINCONV_CMD = assertIsExe(['macbinconv'])
except FatalError:
pass
else:
Hellanzb.MACBINCONV_CMD = assertIsExe([Hellanzb.MACBINCONV_CMD])
if not hasattr(Hellanzb, 'SKIP_UNRAR') or Hellanzb.SKIP_UNRAR is None:
Hellanzb.SKIP_UNRAR = False
if not hasattr(Hellanzb, 'SMART_PAR'):
Hellanzb.SMART_PAR = True
if not hasattr(Hellanzb, 'CATEGORIZE_DEST'):
Hellanzb.CATEGORIZE_DEST = True
if not hasattr(Hellanzb, 'NZB_ZIPS'):
Hellanzb.NZB_ZIPS = '.nzb.zip'
if not hasattr(Hellanzb, 'NZB_GZIPS'):
Hellanzb.NZB_GZIPS = '.nzb.gz'
if not hasattr(Hellanzb, 'DISABLE_COLORS'):
Hellanzb.DISABLE_COLORS = False
if not hasattr(Hellanzb, 'DISABLE_ANSI'):
Hellanzb.DISABLE_ANSI = False
Hellanzb.CACHE_LIMIT = unPrettyBytes(getattr(Hellanzb, 'CACHE_LIMIT', 0))
if not hasattr(Hellanzb, 'OTHER_NZB_FILE_TYPES'):
# By default, just match .nzb files in the queue dir
Hellanzb.NZB_FILE_RE = re.compile(r'(?i)\.(nzb)$')
else:
nzbTypeRe = r'(?i)\.(%s)$'
if not isinstance(Hellanzb.OTHER_NZB_FILE_TYPES, list):
Hellanzb.OTHER_NZB_FILE_TYPES = [Hellanzb.OTHER_NZB_FILE_TYPES]
if 'nzb' not in Hellanzb.OTHER_NZB_FILE_TYPES:
Hellanzb.OTHER_NZB_FILE_TYPES.append('nzb')
typesStr = '|'.join(Hellanzb.OTHER_NZB_FILE_TYPES)
Hellanzb.NZB_FILE_RE = re.compile(nzbTypeRe % typesStr)
# Make sure we expand pathnames so that ~ can be used
for expandPath in ('PREFIX_DIR', 'QUEUE_DIR', 'DEST_DIR', 'POSTPONED_DIR',
'CURRENT_DIR', 'TEMP_DIR', 'PROCESSING_DIR', 'STATE_XML_FILE',
'WORKING_DIR', 'LOG_FILE', 'DEBUG_MODE',
'UNRAR_CMD', 'PAR2_CMD', 'MACBINCONV_CMD',
'EXTERNAL_HANDLER_SCRIPT'):
if hasattr(Hellanzb, expandPath):
thisDir = getattr(Hellanzb, expandPath)
if thisDir is not None:
expandedDir = os.path.expanduser(thisDir)
setattr(Hellanzb, expandPath, expandedDir)
if not hasattr(Hellanzb, 'EXTERNAL_HANDLER_SCRIPT') or \
Hellanzb.EXTERNAL_HANDLER_SCRIPT is None or \
not os.path.isfile(Hellanzb.EXTERNAL_HANDLER_SCRIPT) or \
not os.access(Hellanzb.EXTERNAL_HANDLER_SCRIPT, os.X_OK):
Hellanzb.EXTERNAL_HANDLER_SCRIPT = None
debug('Found config file in directory: ' + os.path.dirname(fileName))
return True
except FatalError, fe:
error('A problem occurred while reading the config file', fe)
raise
except Exception, e:
msg = 'An unexpected error occurred while reading the config file'
error(msg, e)
raise
| 23,488
|
def update_collections_referencing_this_form(form):
"""Update all collections that reference the input form in their ``contents`` value.
When a form is deleted, it is necessary to update all collections whose
``contents`` value references the deleted form. The update removes the
reference, recomputes the ``contents_unpacked``, ``html`` and ``forms``
attributes of the affected collection and causes all of these changes to
percolate through the collection-collection reference chain.
:param form: a form model object
:returns: ``None``
.. note::
Getting the collections that reference this form by searching for those
whose ``forms`` attribute contain it is not quite the correct way to do
this because many of these collections will not *directly* reference this
form -- in short, this will result in redundant updates and backups.
"""
pattern = unicode(h.form_reference_pattern.pattern.replace('[0-9]+', str(form.id)))
collections_referencing_this_form = Session.query(Collection).\
filter(Collection.contents.op('regexp')(pattern)).all()
for collection in collections_referencing_this_form:
update_collection_by_deletion_of_referenced_form(collection, form)
| 23,489
|
def test_calc_ta_fwhm():
"""Test the calc_ta_fwhm function."""
tests = [(1133775752, 4.208367616629838e-08),
(1164110416, 4.098704979920858e-08),
(1194350120, 3.9949300287565104e-08)]
for obsid, expect_fwhm in tests:
ans = calc_ta_fwhm(obsid)
assert_almost_equal(ans, expect_fwhm, decimal=10)
| 23,490
|
def validate_gpy_model(models: Any):
"""Make sure that all elements of the list a GPRegression models"""
import GPy # pylint:disable=import-outside-toplevel
for model in models:
if not isinstance(model, GPy.models.GPRegression):
raise ValueError("The models must be an instance of GPy.model")
| 23,491
|
def parse_rpsbproc(handle):
"""Parse a results file generated by rpsblast->rpsbproc.
This function takes a handle corresponding to a rpsbproc output file.
local.rpsbproc returns a subprocess.CompletedProcess object, which contains the
results as byte string in it's stdout attribute.
"""
# Sanitize input. Should work for either an open file handle (str, still contains \n
# when iterating) or byte-string stdout stored in a CompletedProcess object passed to this
# function as e.g. process.stdout.splitlines()
stdout = "\n".join(
line.decode().strip() if isinstance(line, bytes) else line.strip()
for line in handle
)
# Files produced by rpsbproc have anchors for easy parsing. Each query sequence
# is given a block starting/ending with QUERY/ENDQUERY, and domain hits for the
# query with DOMAINS/ENDDOMAINS.
query_pattern = re.compile(
r"QUERY\tQuery_\d+\tPeptide\t\d+\t([A-Za-z0-9.]+?)\n"
r"DOMAINS\n(.+?)ENDDOMAINS",
re.DOTALL,
)
domains = defaultdict(list)
for match in query_pattern.finditer(stdout):
query = match.group(1)
for row in match.group(2).split("\n"):
try:
domain = domain_from_row(row)
except ValueError:
continue
domains[query].append(domain)
return domains
| 23,492
|
def _invalidate(obj, depth=0):
"""
Recursively validate type anotated classes.
"""
annotations = get_type_hints(type(obj))
for k, v in annotations.items():
item = getattr(obj, k)
res = not_type_check(item, v)
if res:
return f"{k} field of {type(obj)} : {res}"
if isinstance(item, (list, tuple)):
for ii, i in enumerate(item):
sub = _invalidate(i, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
if isinstance(item, dict):
for ii, i in item.items():
sub = _invalidate(i, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
else:
sub = _invalidate(item, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
# return outcome,s
| 23,493
|
def to_edgelist(graph: Graph, filename: str):
"""
Save the given graph object as an edgelist file.
:param graph: the graph to be saved.
:param filename: the edgelist filename.
"""
with open(filename, 'w', encoding='utf-8') as fout:
# Iterate through every edge in the graph, and add the edge to the edgelist.
adj_mat = graph.adjacency_matrix().tocoo()
for vertex1, vertex2, weight in zip(adj_mat.row, adj_mat.col, adj_mat.data):
fout.write(f"{vertex1} {vertex2} {weight}\n")
| 23,494
|
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
| 23,495
|
def convtranspose2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0):
"""Calculates the output height and width of a feature map for a ConvTranspose2D operation."""
h_w, kernel_size, stride, pad, dilation, out_pad = num2tuple(h_w), num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(dilation), num2tuple(out_pad)
pad = num2tuple(pad[0]), num2tuple(pad[1])
out_height = (h_w[0] - 1) * stride[0] - sum(pad[0]) + dilation[0] * (kernel_size[0] - 1) + out_pad[0] + 1
out_width = (h_w[1] - 1) * stride[1] - sum(pad[1]) + dilation[1] * (kernel_size[1] - 1) + out_pad[1] + 1
return out_height, out_width
| 23,496
|
def set_multizone_read_mode(session, read_mode, return_type=None, **kwargs):
"""
Modifies where data is read from in multizone environments.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type read_mode: str
:param read_mode: For multizone environments, if set to 'roundrobin', data
will be read from storage nodes in all protection zones. If set to
'localcopy', data from the local protection zone will be favored.
'roundrobin' is the default value. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_read_mode(read_mode)
body_values = {'readmode': read_mode}
path = '/api/settings/raid_read_mode.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
| 23,497
|
def main_fn():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
if goparams.DUMMY_MODEL:
# monkeypatch the hyperparams so that we get a quickly executing network.
dual_net.get_default_hyperparams = lambda **kwargs: {
'k': 8, 'fc_width': 16, 'num_shared_layers': 1, 'l2_strength': 1e-4, 'momentum': 0.9}
dual_net.TRAIN_BATCH_SIZE = 16
dual_net.EXAMPLES_PER_GENERATION = 64
#monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
preprocessing.SHUFFLE_BUFFER_SIZE = 1000
print("Creating random initial weights...")
bootstrap()
| 23,498
|
def save_user(user):
"""
Function to save new user
"""
user.save_user()
| 23,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.