content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def swipe_left(times='1', start=None):
"""Swipes left on the screen.
Examples
--------
.. code-block:: robotframework
SwipeLeft # Swipes left once
SwipeLeft 5 # Swipes left five times
SwipeLeft 1 Qentinel Touch # Swipes left once, from the text "Qentinel Touch"
SwipeLeft 5 Qentinel Touch # Swipes left five times, from the text "Qentinel Touch"
Parameters
----------
times : str
The amount of times swiped / length of the swipe
start : str
Optional starting point for the swipe
Raises
------
ValueError
If the swipe amount is not an integer.
"""
window.swipe('left', times, start) | 5,334,700 |
def main():
"""main function"""
input_path = str(pathlib.Path(__file__).resolve().parent.parent) + "/inputs/" + str(pathlib.Path(__file__).stem)
start_time = time.time()
input_data = read_input(input_path)
entries = extract(input_data)
valid_tickets, invalid_fields_sum = part1(entries)
print("Part 1: %d" % invalid_fields_sum)
print("Part 2: %d" % part2(entries[0], entries[1], valid_tickets))
end_time = time.time()
print("Execution time: %f" % (end_time-start_time)) | 5,334,701 |
def sourceExtractImage(data, bkgArr=None, sortType='centre', verbose=False,
**kwargs):
"""Extract sources from data array and return enumerated objects sorted
smallest to largest, and the segmentation map provided by source extractor
"""
data = np.array(data).byteswap().newbyteorder()
if bkgArr is None:
bkgArr = np.zeros(data.shape)
o = sep.extract(data, kwargs.pop('threshold', 0.05), segmentation_map=True,
**kwargs)
if sortType == 'size':
if verbose:
print('Sorting extracted objects by radius from size')
sizeSortedObjects = sorted(
enumerate(o[0]), key=lambda src: src[1]['npix']
)
return sizeSortedObjects, o[1]
elif sortType == 'centre':
if verbose:
print('Sorting extracted objects by radius from centre')
centreSortedObjects = sorted(
enumerate(o[0]),
key=lambda src: (
(src[1]['x'] - data.shape[0] / 2)**2
+ (src[1]['y'] - data.shape[1] / 2)**2
)
)[::-1]
return centreSortedObjects, o[1] | 5,334,702 |
def test_malformed_destinations():
"""WALE_SYSLOG_FACILITY contains bogus values"""
os.environ['WALE_SYSLOG_FACILITY'] = 'wat'
out, valid_facility = log_help.get_syslog_facility()
assert not valid_facility
assert out == handlers.SysLogHandler.LOG_USER
os.environ['WALE_SYSLOG_FACILITY'] = 'local0,wat'
out, valid_facility = log_help.get_syslog_facility()
assert not valid_facility
assert out == handlers.SysLogHandler.LOG_USER
os.environ['WALE_SYSLOG_FACILITY'] = ','
out, valid_facility = log_help.get_syslog_facility()
assert not valid_facility
assert out == handlers.SysLogHandler.LOG_USER | 5,334,703 |
def nodes():
"""
get node list for the current lab
"""
server = VIRLServer()
client = get_cml_client(server)
current_lab = get_current_lab()
if current_lab:
lab = safe_join_existing_lab(current_lab, client)
if lab:
node_list_table(lab.nodes())
else:
click.secho("Lab {} is not running".format(current_lab), fg="red")
exit(1)
else:
click.secho("No current lab selected", fg="red")
exit(1) | 5,334,704 |
def make_pizza(size, *toppings):
"""概述要制作的pizza"""
print("\nMaking a " + str(size) + "-inch pizza with the following toppings:")
for topping in toppings:
print("- " + topping) | 5,334,705 |
def _check_resource(resource_path: str) -> bool:
"""
Checks if the resource is file and accessible, or checks that all resources in directory are files and accessible
:param resource_path: A path to the resource
:return: True if resource is OK to upload, False otherwise
"""
if os.path.isfile(resource_path):
try:
open(resource_path, 'rb')
return True
except PermissionError or FileNotFoundError:
return False
return True | 5,334,706 |
def upload_log(blob_client, application):
"""
upload output.log to storage account
"""
log_file = os.path.join(os.environ["AZ_BATCH_TASK_WORKING_DIR"], os.environ["SPARK_SUBMIT_LOGS_FILE"])
upload_file_to_container(
container_name=os.environ["STORAGE_LOGS_CONTAINER"],
application_name=application.name,
file_path=log_file,
blob_client=blob_client,
use_full_path=False,
) | 5,334,707 |
def convert_one_fmt_off_pair(node: Node) -> bool:
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
Returns True if a pair was converted.
"""
for leaf in node.leaves():
previous_consumed = 0
for comment in list_comments(leaf.prefix, is_endmarker=False):
if comment.value not in FMT_PASS:
previous_consumed = comment.consumed
continue
# We only want standalone comments. If there's no previous leaf or
# the previous leaf is indentation, it's a standalone comment in
# disguise.
if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT:
prev = preceding_leaf(leaf)
if prev:
if comment.value in FMT_OFF and prev.type not in WHITESPACE:
continue
if comment.value in FMT_SKIP and prev.type in WHITESPACE:
continue
ignored_nodes = list(generate_ignored_nodes(leaf, comment))
if not ignored_nodes:
continue
first = ignored_nodes[0] # Can be a container node with the `leaf`.
parent = first.parent
prefix = first.prefix
if comment.value in FMT_OFF:
first.prefix = prefix[comment.consumed :]
if comment.value in FMT_SKIP:
first.prefix = ""
hidden_value = "".join(str(n) for n in ignored_nodes)
if comment.value in FMT_OFF:
hidden_value = comment.value + "\n" + hidden_value
if comment.value in FMT_SKIP:
hidden_value += " " + comment.value
if hidden_value.endswith("\n"):
# That happens when one of the `ignored_nodes` ended with a NEWLINE
# leaf (possibly followed by a DEDENT).
hidden_value = hidden_value[:-1]
first_idx: Optional[int] = None
for ignored in ignored_nodes:
index = ignored.remove()
if first_idx is None:
first_idx = index
assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
parent.insert_child(
first_idx,
Leaf(
STANDALONE_COMMENT,
hidden_value,
prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
),
)
return True
return False | 5,334,708 |
def merge_tf_records(output_path: str, src_records: List[str]) -> None:
"""Merge multiple TFRecord files into one.
Args:
output_path: Where to write the merged TFRecord file.
src_records: A list of strings giving the location of the
input TFRecord files.
Returns:
None
"""
records = 0
with tf.python_io.TFRecordWriter(output_path) as writer:
print('Merging TFRecords', end='', flush=True)
for src_record in src_records:
for string_record in tf.python_io.tf_record_iterator(src_record):
writer.write(string_record)
records = records + 1
print('.', end='', flush=True)
print('{} records'.format(records))
print() | 5,334,709 |
def scale_down_deployments(node_name):
"""
Scale down the deployments of a node as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
"""
ocp = OCP(kind="node", namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pods_to_scale_down = get_node_pods_to_scale_down(node_name)
for p in pods_to_scale_down:
deployment_name = pod.get_deployment_name(p.name)
log.info(f"Scale down deploymet {deployment_name}")
ocp.exec_oc_cmd(f"scale deployment {deployment_name} --replicas=0")
log.info("Scale down rook-ceph-crashcollector")
ocp.exec_oc_cmd(
f"scale deployment --selector=app=rook-ceph-crashcollector,"
f"node_name='{node_name}' --replicas=0"
) | 5,334,710 |
def validate_uuid4(uuid_string):
"""
Source: https://gist.github.com/ShawnMilo/7777304
Validate that a UUID string is infact a valid uuid4. Luckily, the uuid module
does the actual checking for us. It is vital that the 'version' kwarg be
passed to the UUID() call, otherwise any 32-characterhex string is considered valid.
"""
try:
val = UUID(uuid_string, version=4)
except ValueError:
# If ValueError, then the string is not a valid hex code for a UUID.
return False
# If the uuid_string is a valid hex code, but an invalid uuid4,
# the UUID.__init__ will convert it to a valid uuid4.
# This is bad for validation purposes.
return val.hex == uuid_string.replace('-','') | 5,334,711 |
def check_sp(sp):
"""Validate seasonal periodicity.
Parameters
----------
sp : int
Seasonal periodicity
Returns
-------
sp : int
Validated seasonal periodicity
"""
if sp is not None:
if not is_int(sp) or sp < 1:
raise ValueError("`sp` must be a positive integer >= 1 or None")
return sp | 5,334,712 |
def api_error_handler(func):
"""
Handy decorator that catches any exception from the Media Cloud API and
sends it back to the browser as a nicely formatted JSON error. The idea is
that the client code can catch these at a low level and display error messages.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except MCException as e:
logger.exception(e)
return json_error_response(e.message, e.status_code)
return wrapper | 5,334,713 |
def gen_data(data_format, dtype, shape):
"""Generate data for testing the op"""
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
head_np = input
if data_format == "NC1HWC0":
channel_dims = [1, 4]
elif data_format == DEFAULT:
channel_dims = [1]
else:
channel_dims = [len(shape) - 1]
reduce_axis = [i for i in range(len(shape)) if i not in channel_dims]
if dtype == "float16":
expect = np_bisect_sum(input, axis=tuple(reduce_axis), keepdims=True)
else:
expect = np.sum(input, axis=tuple(reduce_axis), keepdims=True)
output = np.full(expect.shape, np.nan, dtype)
return expect, head_np, input, output | 5,334,714 |
def clean_value_name(value: Any) -> str:
"""Returns a string representation of an object."""
if isinstance(value, pydantic.BaseModel):
value = str(value)
elif isinstance(value, float) and int(value) == value:
value = int(value)
value = str(value)
elif isinstance(value, (np.int64, np.int32)):
value = int(value)
value = str(value)
elif isinstance(value, np.ndarray):
value = np.round(value, 3)
value = get_string(value)
elif callable(value) and isinstance(value, functools.partial):
sig = inspect.signature(value.func)
args_as_kwargs = dict(zip(sig.parameters.keys(), value.args))
args_as_kwargs.update(**value.keywords)
clean_dict(args_as_kwargs)
args_as_kwargs.pop("function", None)
func = value.func
while hasattr(func, "func"):
func = func.func
value = dict(function=func.__name__, **args_as_kwargs)
value = get_string(value)
elif hasattr(value, "to_dict"):
value = value.to_dict()
value = get_string(value)
elif isinstance(value, np.float64):
value = float(value)
value = str(value)
elif type(value) in [int, float, str, bool]:
pass
elif callable(value) and isinstance(value, toolz.functoolz.Compose):
value = [clean_value_name(value.first)] + [
clean_value_name(func) for func in value.funcs
]
value = get_string(value)
elif callable(value) and hasattr(value, "__name__"):
value = value.__name__
elif isinstance(value, PathPhidl):
value = value.hash_geometry()
elif isinstance(value, pathlib.Path):
value = value.stem
elif isinstance(value, dict):
d = copy.deepcopy(value)
for k, v in d.items():
if isinstance(v, dict):
d[k] = clean_dict(v)
else:
d[k] = clean_value_name(v)
value = get_string(value)
else:
value = get_string(value)
return value | 5,334,715 |
def add_line_analyzer(func):
"""A simple decorator that adds a function to the list
of all functions that analyze a single line of code."""
LINE_ANALYZERS.append(func)
def wrapper(tokens):
return func(tokens)
return wrapper | 5,334,716 |
def decode_account(source_a):
"""
Take a string of the form "xrb_..." of length 64 and return
the associated public key (as a bytes object)
"""
assert len(source_a) == 64
assert source_a.startswith('xrb_') or source_a.startswith('xrb-')
number_l = 0
for character in source_a[4:]:
if ord(character) < 0x30 or ord(character) >= 0x80:
raise ValueError('Character out of range')
byte = account_decode(character)
if byte == '~':
raise ValueError('Invalid character')
number_l <<= 5
number_l += ord(byte)
account = (number_l >> 40).to_bytes(length=32, byteorder='big')
# The digest to check is in the lowest 40 bits of the address
check = number_l & 0xffffffffff
hash = hashlib.blake2b(digest_size=5)
hash.update(account)
validation = hash.digest()
assert check.to_bytes(length=5, byteorder='little') == validation
"""
if (!result)
{
*this = (number_l >> 40).convert_to <rai::uint256_t> ();
uint64_t check (number_l.convert_to <uint64_t> ());
check &= 0xffffffffff;
uint64_t validation (0);
blake2b_state hash;
blake2b_init (&hash, 5);
blake2b_update (&hash, bytes.data (), bytes.size ());
blake2b_final (&hash, reinterpret_cast <uint8_t *> (&validation), 5);
result = check != validation;
}
"""
return account | 5,334,717 |
def update_epics_order_in_bulk(bulk_data: list, field: str, project: object):
"""
Update the order of some epics.
`bulk_data` should be a list of tuples with the following format:
[{'epic_id': <value>, 'order': <value>}, ...]
"""
epics = project.epics.all()
epic_orders = {e.id: getattr(e, field) for e in epics}
new_epic_orders = {d["epic_id"]: d["order"] for d in bulk_data}
apply_order_updates(epic_orders, new_epic_orders)
epic_ids = epic_orders.keys()
events.emit_event_for_ids(ids=epic_ids,
content_type="epics.epic",
projectid=project.pk)
db.update_attr_in_bulk_for_ids(epic_orders, field, models.Epic)
return epic_orders | 5,334,718 |
def calc_precision(output, target):
"""calculate precision from tensor(b,c,x,y) for every category c"""
precs = []
for c in range(target.size(1)):
true_positives = ((output[:, c] - (output[:, c] != 1).int()) == target[:, c]).int().sum().item()
# print(true_positives)
false_positives = ((output[:, c] - (output[:, c] != 1).int()) == (target[:, c] != 1).int()).int().sum().item()
# print(false_positives)
if (true_positives == 0):
precs.append(1.0)
else:
precs.append(true_positives / (true_positives + false_positives))
return precs | 5,334,719 |
def _move_files(files):
"""
Launches a window to select the new folder destinations for the files.
Parameters
----------
files : list
A nested list of lists of lists of strings corresponding
to file paths.
"""
text_layout = [[sg.Text(f'Dataset {i + 1}')] for i in range(len(files))]
files_layout = [
[sg.Input('', key=f'folder_{i}', enable_events=True,
disabled=True),
sg.FolderBrowse(target=f'folder_{i}', key=f'button_{i}')]
for i in range(len(files))
]
tot_layout = [i for j in zip(text_layout, files_layout) for i in j]
if len(files) > 2:
scrollable = True
size = (600, 200)
else:
scrollable = False
size = (None, None)
layout = [
[sg.Text('Choose the folder(s) to move files to:', size=(30, 1))],
[sg.Frame('', [[sg.Column(tot_layout, scrollable=scrollable,
vertical_scroll_only=True, size=size)]])],
[sg.Button('Submit', bind_return_key=True,
button_color=utils.PROCEED_COLOR),
sg.Check('All Same Folder', key='same_folder',
enable_events=True, disabled=len(files) == 1)]
]
try:
window = sg.Window('Move Files', layout, icon=utils._LOGO)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
utils.safely_close_window(window)
elif event.startswith('folder_') and values['same_folder']:
for i in range(1, len(files)):
window[f'folder_{i}'].update(value=values['folder_0'])
elif event == 'same_folder':
if values['same_folder']:
for i in range(1, len(files)):
window[f'folder_{i}'].update(value=values['folder_0'])
window[f'button_{i}'].update(disabled=True)
else:
for i in range(1, len(files)):
window[f'button_{i}'].update(disabled=False)
elif event == 'Submit':
if any(not values[key] for key in values if key.startswith('folder_')):
sg.popup('Please enter folders for all datasets',
title='Error', icon=utils._LOGO)
else:
break
window.close()
del window
except (utils.WindowCloseError, KeyboardInterrupt):
print('\nMoving files manually ended early.\nMoving on with program.')
else:
try:
folders = [values[f'folder_{i}'] for i in range(len(files))]
for i, file_list in enumerate(files):
# Will automatically rename files if there is already a file with
# the same name in the destination folder.
file_mover(file_list, new_folder=folders[i], skip_same_files=False)
except Exception:
print('\nException occured during moving files:\n')
print(traceback.format_exc())
print('Moving on with program.') | 5,334,720 |
def upper_credible_choice(self):
"""pick the bandit with the best LOWER BOUND. See chapter 5"""
def lb(a,b):
return a/(a+b) + 1.65*np.sqrt((a*b)/((a+b)**2*(a+b+1)))
a = self.wins + 1
b = self.trials - self.wins + 1
return np.argmax(lb(a,b)) | 5,334,721 |
def download(bell, evnt):
"""
Download the current event from the given doorbell.
If the video is already in the download history or
successfully downloaded then return True otherwise False.
"""
event_id = evnt.get("id")
event_time = evnt.get("created_at")
filename = "".join(
(
f"{DOWNLOADFOLDER}/",
f"{bell.name}-",
f'{event_time.strftime("%Y%m%d_%H%M%S")}-',
f"{event_id}.mp4",
)
)
filename = filename.replace(" ", "_")
print(filename)
status = evnt.get("recording", {}).get("status")
if status == "ready":
try:
bell.recording_download(event_id, filename=filename)
os.utime(
filename, (event_time.timestamp(), event_time.timestamp())
)
return True
except Exception as ex: # pylint: disable=broad-except
print(ex)
return False
else:
print(f"Event: {event_id} is {status}")
return False | 5,334,722 |
def copytask(filename, num_seqs, len_min, len_max, num_vals):
"""
Generate sequences of random binary vectors for the copy task
and save as .tfrecords file
Args:
filename - the name of the file to save
num_seqs - the number of sequences to generate
len_min/max - the minimum and maximum length of the sequences
num_vals - the number of values per step
Each sequence will therefore be of size [length, num_vals] flattened
to shape [length * (num_vals)] when written to the file.
"""
print("Writing to file %s" % filename)
with tf.python_io.TFRecordWriter(filename) as writer:
# Generate and write the sequences
for i in range(num_seqs):
length = np.random.randint(len_min, len_max+1)
seq = np.random.randint(0, 2, size=(length, (num_vals)))
# the last value is only used for the delimiter
seq[-1,:] = 0.
seq = seq.astype(np.float32)
target_seq = np.copy(seq)
# prepend and append the delimiter for the target input and output
delim = np.zeros(shape=(1, num_vals), dtype=np.float32)
delim[0,-1] = 1.
target_seq_in = np.concatenate([delim, target_seq], 0)
target_seq_out = np.concatenate([target_seq, delim], 0)
seq = seq.reshape(-1)
target_seq_in = target_seq_in.reshape(-1)
target_seq_out = target_seq_out.reshape(-1)
example = tf.train.Example(features=tf.train.Features(feature={
'seq_len': _int64_feature(length),
'seq_data': _floats_feature(seq),
'tgt_in': _floats_feature(target_seq_in),
'tgt_out': _floats_feature(target_seq_out)}))
writer.write(example.SerializeToString()) | 5,334,723 |
def get_unique_id():
"""
for unique random docname
:return: length 32 string
"""
_id = str(uuid.uuid4()).replace("-", "")
return _id | 5,334,724 |
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict) | 5,334,725 |
def load_azure_auth() -> AzureSSOClientConfig:
"""
Load config for Azure Auth
"""
return AzureSSOClientConfig(
clientSecret=conf.get(LINEAGE, "client_secret"),
authority=conf.get(LINEAGE, "authority"),
clientId=conf.get(LINEAGE, "client_id"),
scopes=conf.getjson(LINEAGE, "scopes", fallback=[]),
) | 5,334,726 |
def get_map_zones(map_id):
"""Get map zones.
.. :quickref: Zones; Get map zones.
**Example request**:
.. sourcecode:: http
GET /zones/map/1 HTTP/1.1
**Example response**:
.. sourcecode:: json
[
{
"id": 1,
"p1": [0, 0, 0],
"p2": [256, 256, 256],
"zone_type": "start"
},
{
"id": 2,
"p1": [1000, 1000, 1000],
"p2": [1256, 1256, 1256],
"zone_type": "end"
},
{
"id": 1,
"zone_type": "cp",
"map_id": 1,
"cp_index": 1,
"zone": {
"id": 3,
"p1": [500, 500, 500],
"p2": [756, 756, 756]
}
}
]
:query map_id: map id.
:status 200: Success.
:status 404: Map not found.
:returns: List of zones
"""
map_ = Map.query.filter_by(id_=map_id).first()
if map_ is None:
error = {"message": "Map not found."}
return make_response(jsonify(error), 404)
zones = []
if map_.start_zone is not None:
zone = Zone.query.filter_by(id_=map_.start_zone).first()
if zone:
zone_dict = zone.json
zone_dict["zone_type"] = "start"
zones.append(zone_dict)
if map_.end_zone is not None:
zone = Zone.query.filter_by(id_=map_.end_zone).first()
if zone:
zone_dict = zone.json
zone_dict["zone_type"] = "end"
zones.append(zone_dict)
checkpoints = MapCheckpoint.query.filter_by(map_id=map_id).all()
if checkpoints:
for checkpoint in checkpoints:
zones.append(checkpoint.json)
return make_response(jsonify(zones), 200) | 5,334,727 |
def rgb2he_macenko(img, D=None, alpha=1.0, beta=0.15, white=255.0,
return_deconvolution_matrix=False):
"""
Performs stain separation from RGB images using the method in
M Macenko, et al. "A method for normalizing histology slides for quantitative analysis",
IEEE ISBI, 2009. dx.doi.org/10.1109/ISBI.2009.5193250
Args:
img (numpy.ndarray): RGB input image
D (numpy.ndarray): a deconvolution matrix. If None, one will be computed from the image
alpha (float): tolerance for pseudo-min/-max
beta (float): OD threshold for transparent pixels
white (float): white level (in each channel)
return_deconvolution_matrix (bool): if True, the deconvolution matrix is also returned
Returns:
three 2d arrays for H-, E- and remainder channels, respectively.
If return_deconvolution_matrix is True, the deconvolution matrix is also returned.
"""
assert (img.ndim == 3)
assert (img.shape[2] == 3)
I = img.reshape((img.shape[0] * img.shape[1], 3))
OD = -np.log((I + 1.0) / white) # optical density
if D is None:
# the deconvolution matrix is not provided so one has to be estimated from the
# image
rows = (OD >= beta).all(axis=1)
if not any(rows):
# no rows with all pixels above the threshold
raise RuntimeError('optical density below threshold')
ODhat = OD[rows, :] # discard transparent pixels
u, V, _ = eig(np.cov(ODhat.T))
idx = np.argsort(u) # get a permutation to sort eigenvalues increasingly
V = V[:, idx] # sort eigenvectors
theta = np.dot(ODhat, V[:, 1:3]) # project optical density onto the eigenvectors
# corresponding to the largest eigenvalues
phi = np.arctan2(theta[:,1], theta[:,0])
min_phi, max_phi = np.percentile(phi, [alpha, 100.0-alpha], axis=None)
u1 = np.dot(V[:,1:3], np.array([[np.cos(min_phi)],[np.sin(min_phi)]]))
u2 = np.dot(V[:,1:3], np.array([[np.cos(max_phi)],[np.sin(max_phi)]]))
if u1[0] > u2[0]:
D = np.hstack((u1, u2)).T
else:
D = np.hstack((u2, u1)).T
D = np.vstack((D, np.cross(D[0,],D[1,])))
D = D / np.reshape(np.repeat(np.linalg.norm(D, axis=1), 3), (3,3), order=str('C'))
img_res = np.linalg.solve(D.T, OD.T).T
img_res = np.reshape(img_res, img.shape, order=str('C'))
if not return_deconvolution_matrix:
D = None
return rescale_intensity(img_res[:,:,0], out_range=(0,1)), \
rescale_intensity(img_res[:,:,1], out_range=(0,1)), \
rescale_intensity(img_res[:,:,2], out_range=(0,1)), \
D | 5,334,728 |
def validated(base_model=None):
"""
Decorates an ``__init__`` method with typed parameters with validation
and auto-conversion logic.
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
Classes with decorated initializers can be instantiated using arguments of
another type (e.g. an ``y`` argument of type ``str`` ). The decorator
handles the type conversion logic.
>>> c = ComplexNumber(y='42')
>>> (c.x, c.y)
(0.0, 42.0)
If the bound argument cannot be converted, the decorator throws an error.
>>> c = ComplexNumber(y=None)
Traceback (most recent call last):
...
pydantic.error_wrappers.ValidationError: 1 validation error for ComplexNumberModel
y
none is not an allowed value (type=type_error.none.not_allowed)
Internally, the decorator delegates all validation and conversion logic to
`a Pydantic model <https://pydantic-docs.helpmanual.io/>`_, which can be
accessed through the ``Model`` attribute of the decorated initiazlier.
>>> ComplexNumber.__init__.Model
<class 'ComplexNumberModel'>
The Pydantic model is synthesized automatically from on the parameter
names and types of the decorated initializer. In the ``ComplexNumber``
example, the synthesized Pydantic model corresponds to the following
definition.
>>> class ComplexNumberModel(BaseValidatedInitializerModel):
... x: float = 0.0
... y: float = 0.0
Clients can optionally customize the base class of the synthesized
Pydantic model using the ``base_model`` decorator parameter. The default
behavior uses :class:`BaseValidatedInitializerModel` and its
`model config <https://pydantic-docs.helpmanual.io/#config>`_.
See Also
--------
BaseValidatedInitializerModel
Default base class for all synthesized Pydantic models.
"""
def validator(init):
init_qualname = dict(inspect.getmembers(init))["__qualname__"]
init_clsnme = init_qualname.split(".")[0]
init_params = inspect.signature(init).parameters
init_fields = {
param.name: (
param.annotation
if param.annotation != inspect.Parameter.empty
else Any,
param.default
if param.default != inspect.Parameter.empty
else ...,
)
for param in init_params.values()
if param.name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
}
if base_model is None:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__config__=BaseValidatedInitializerModel.Config,
**init_fields,
)
else:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__base__=base_model,
**init_fields,
)
def validated_repr(self) -> str:
return dump_code(self)
def validated_getnewargs_ex(self):
return (), self.__init_args__
@functools.wraps(init)
def init_wrapper(*args, **kwargs):
self, *args = args
nmargs = {
name: arg
for (name, param), arg in zip(
list(init_params.items()), [self] + args
)
if name != "self"
}
model = PydanticModel(**{**nmargs, **kwargs})
# merge nmargs, kwargs, and the model fields into a single dict
all_args = {**nmargs, **kwargs, **model.__dict__}
# save the merged dictionary for Representable use, but only of the
# __init_args__ is not already set in order to avoid overriding a
# value set by a subclass initializer in super().__init__ calls
if not getattr(self, "__init_args__", {}):
self.__init_args__ = OrderedDict(
{
name: arg
for name, arg in sorted(all_args.items())
if type(arg) != torch.nn.ParameterDict
}
)
self.__class__.__getnewargs_ex__ = validated_getnewargs_ex
self.__class__.__repr__ = validated_repr
return init(self, **all_args)
# attach the Pydantic model as the attribute of the initializer wrapper
setattr(init_wrapper, "Model", PydanticModel)
return init_wrapper
return validator | 5,334,729 |
def make_header_table(fitsdir, search_string='*fl?.fits'):
"""Construct a table of key-value pairs from FITS headers of images
used in dolphot run. Columns are the set of all keywords that appear
in any header, and rows are per image.
Inputs
------
fitsdir : string or Path
directory of FITS files
search_string : string or regex pattern, optional
string to search for FITS images with. Default is
'*fl?.chip?.fits'
Returns
-------
df : DataFrame
A table of header key-value pairs indexed by image name.
"""
keys = []
headers = {}
# force fitsdir to Path
if type(fitsdir) == str:
fitsdir = Path(fitsdir)
fitslist = list(fitsdir.glob(search_string))
if len(fitslist) == 0: # this shouldn't happen
print('No fits files found in {}!'.format(fitsdir))
return pd.DataFrame()
# get headers from each image
with Pool(cpu_count()-1) as p:
all_headers = p.map(combine_headers, fitslist)
for name, head in all_headers:
headers.update({name:head})
keys += [k for k in head]
unique_keys = np.unique(keys).tolist()
remove_keys = ['COMMENT', 'HISTORY', '']
[unique_keys.remove(key) for key in remove_keys if key in unique_keys]
# construct dataframe
df = pd.DataFrame(columns=unique_keys)
for fitsname, head in headers.items():
row = pd.Series(dict(head.items()))
df.loc[fitsname.split('.fits')[0]] = row.T
# I do not know why dask is so bad at mixed types
# but here is my hacky solution
try:
df = df.infer_objects()
except Exception:
print("Could not infer objects")
df_obj = df.select_dtypes(['object'])
# iterate over columns and force types
for c in df_obj:
dtype = pd.api.types.infer_dtype(df[c], skipna=True)
if dtype == 'string':
df.loc[:,c] = df.loc[:,c].astype(str)
elif dtype in ['float', 'mixed-integer-float']:
df.loc[:,c] = df.loc[:,c].astype(float)
elif dtype == 'integer':
df.loc[:,c] = df.loc[:,c].astype(int)
elif dtype == 'boolean':
df.loc[:,c] = df.loc[:,c].astype(bool)
else:
print('Unrecognized datatype "{}" for column {}; coercing to string'.format(dtype, c))
df.loc[:,c] = df.loc[:,c].astype(str)
# lambda function to construct detector-filter pairs
lamfunc = lambda x: '-'.join(x[~(x.str.startswith('CLEAR') | x.str.startswith('nan'))])
df['FILT_DET'] = df.filter(regex='(DETECTOR)|(FILTER)').astype(str).apply(lamfunc, axis=1)
return df | 5,334,730 |
def test_main_incorrect_json_structure(monkeypatch, capfd, service_account_json, caplog):
"""Tests the execution of main function when incorrectly formatted service account json is provided."""
from RubrikPolaris import main
monkeypatch.setattr(mock_params, lambda: {
"url": "rubrik-se-beta",
"service_account_json": service_account_json})
monkeypatch.setattr(mock_command, lambda: "some_command")
with pytest.raises(SystemExit):
capfd.close()
caplog.set_level(50)
main() | 5,334,731 |
def parse_head_final_tags(ctx, lang, form):
"""Parses tags that are allowed at the end of a form head from the end
of the form. This can also be used for parsing the final gender etc tags
from translations and linkages."""
assert isinstance(ctx, Wtp)
assert isinstance(lang, str) # Should be language that "form" is for
assert isinstance(form, str)
# print("parse_head_final_tags: lang={} form={!r}".format(lang, form))
# Make sure there are no double spaces in the form as this code does not
# handle them otherwise.
form = re.sub(r"\s+", " ", form.strip())
if not form:
return form, []
origform = form
tags = []
# If parsing for certain Bantu languages (e.g., Swahili), handle
# some extra head-final tags first
if lang in head_final_bantu_langs:
m = re.search(head_final_bantu_re, form)
if m is not None:
tagkeys = m.group(1)
if not ctx.title.endswith(tagkeys):
form = form[:m.start()]
v = head_final_bantu_map[tagkeys]
if v.startswith("?"):
v = v[1:]
ctx.debug("suspicious suffix {!r} in language {}: {}"
.format(tagkeys, lang, origform))
tags.extend(v.split())
# If parsing for certain Semitic languages (e.g., Arabic), handle
# some extra head-final tags first
if lang in head_final_semitic_langs:
m = re.search(head_final_semitic_re, form)
if m is not None:
tagkeys = m.group(1)
if not ctx.title.endswith(tagkeys):
form = form[:m.start()]
v = head_final_semitic_map[tagkeys]
if v.startswith("?"):
v = v[1:]
ctx.debug("suspicious suffix {!r} in language {}: {}"
.format(tagkeys, lang, origform))
tags.extend(v.split())
# If parsing for certain other languages (e.g., Lithuanian,
# French, Finnish), handle some extra head-final tags first
if lang in head_final_other_langs:
m = re.search(head_final_other_re, form)
if m is not None:
tagkeys = m.group(1)
if not ctx.title.endswith(tagkeys):
form = form[:m.start()]
tags.extend(head_final_other_map[tagkeys].split(" "))
# Handle normal head-final tags
m = re.search(head_final_re, form)
if m is not None:
tagkeys = m.group(3)
# Only replace tags ending with numbers in languages that have
# head-final numeric tags (e.g., Bantu classes); also, don't replace
# tags if the main title ends with them (then presume they are part
# of the word)
# print("head_final_tags form={!r} tagkeys={!r} lang={}"
# .format(form, tagkeys, lang))
tagkeys_contains_digit = re.search(r"\d", tagkeys)
if ((not tagkeys_contains_digit or
lang in head_final_numeric_langs) and
not ctx.title.endswith(" " + tagkeys)):
if not tagkeys_contains_digit or lang in head_final_numeric_langs:
form = form[:m.start()]
v = xlat_head_map[tagkeys]
if v.startswith("?"):
v = v[1:]
ctx.debug("suspicious suffix {!r} in language {}: {}"
.format(tagkeys, lang, origform))
tags.extend(v.split())
# Generate warnings about words ending in " or" after processing
if ((form.endswith(" or") and not origform.endswith(" or")) or
re.search(r" (1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|"
r"1a|2a|9a|10a|m1|f1|f2|m2|f3|m3|f4|m4|f5|m5|or|\?)"
r"($|/| (f|m|sg|pl|anim|inan))", form) or
form.endswith(" du")):
if form not in ok_suspicious_forms:
ctx.debug("suspicious unhandled suffix in {}: {!r}, originally {!r}"
.format(lang, form, origform))
# print("parse_head_final_tags: form={!r} tags={}".format(form, tags))
return form, tags | 5,334,732 |
def cars_to_people(df,peoplePerCar=1.7,percentOfTransit=.005):
"""
args: demand dataframe, people/car float, % of transit floats
returns: people demand dataframe by terminal and arrival/departure
"""
columns = ['Arrive_A','Arrive_B','Arrive_C','Arrive_D','Arrive_E',
'Depart_A','Depart_B','Depart_C','Depart_D','Depart_E']
tmp_df = pd.DataFrame()
for col in columns:
tmp_people = []
for row in df[col]:
tmp_people.append((row * peoplePerCar/(1-percentOfTransit)))
tmp_df[col + "_people"] = tmp_people
depart_columns = []
arrive_columns = []
for col in tmp_df.columns:
if col.startswith('Depart'):
depart_columns.append(col)
elif col.startswith('Arrive'):
arrive_columns.append(col)
tmp_df['Depart_total'] = tmp_df[depart_columns].sum(axis=1)
tmp_df['Arrival_total'] = tmp_df[arrive_columns].sum(axis=1)
tmp_df['pass_thru'] = df['pass_thru']
tmp_df['Total'] = tmp_df[['Depart_total','Arrival_total']].sum(axis=1)
return tmp_df | 5,334,733 |
def get_dup_key_val(errmsg):
"""Return the duplicate key referenced in an error message.
Parameters
----------
errmsg : |str|
A pymongo `DuplicateKeyError` message.
Returns
-------
|dict|
The key(s) and value(s) of the duplicate key.
Example
-------
>>> errmsg = ('insertDocument :: caused by :: 11000 E11000 duplicate '
>>> 'key error collection: cyphon.posts index: '
>>> '_platform_1_doc_id_1 dup key: { : twitter", : '
>>> '"ObjectId(\'5543769ef861c942838c7ee9\') }')
>>> get_dup_key_val(errmsg)
{'_platform': 'twitter', '_doc_id': ObjectId('5543769ef861c942838c7ee9')}
"""
msg = errmsg.split(' dup key: { ')
key = extract_substring(msg[0], 'index: ', '_', 'right').strip()
val = extract_substring(msg[1], ':', '}').strip()
# parse compound indexes
keys = re.split(r'_[0-9]+_', key)
values = val.split(', : ')
if len(keys) != len(values): # pragma: no cover
raise ValueError('cannot match index keys with values')
key_val = {}
for index, value in enumerate(values):
key_val[keys[index]] = restore_type_from_str(values[index])
return key_val | 5,334,734 |
def mock_archive(request, tmpdir_factory):
"""Creates a very simple archive directory with a configure script and a
makefile that installs to a prefix. Tars it up into an archive.
"""
tar = spack.util.executable.which('tar', required=True)
tmpdir = tmpdir_factory.mktemp('mock-archive-dir')
tmpdir.ensure(spack.stage._source_path_subdir, dir=True)
repodir = tmpdir.join(spack.stage._source_path_subdir)
# Create the configure script
configure_path = str(tmpdir.join(spack.stage._source_path_subdir,
'configure'))
with open(configure_path, 'w') as f:
f.write(
"#!/bin/sh\n"
"prefix=$(echo $1 | sed 's/--prefix=//')\n"
"cat > Makefile <<EOF\n"
"all:\n"
"\techo Building...\n\n"
"install:\n"
"\tmkdir -p $prefix\n"
"\ttouch $prefix/dummy_file\n"
"EOF\n"
)
os.chmod(configure_path, 0o755)
# Archive it
with tmpdir.as_cwd():
archive_name = '{0}{1}'.format(spack.stage._source_path_subdir,
request.param[0])
tar('-c{0}f'.format(request.param[1]), archive_name,
spack.stage._source_path_subdir)
Archive = collections.namedtuple('Archive',
['url', 'path', 'archive_file',
'expanded_archive_basedir'])
archive_file = str(tmpdir.join(archive_name))
# Return the url
yield Archive(
url=('file://' + archive_file),
archive_file=archive_file,
path=str(repodir),
expanded_archive_basedir=spack.stage._source_path_subdir) | 5,334,735 |
def worker(vac_flag,cache_dict,mylock): # Used in multiprocess_traditional_evaluate() #20220204
"""thread worker function"""
this_key = tuple(vac_flag.squeeze().cpu().numpy())
if(this_key in cache_dict):
print('Found in cache_dict')
[total_cases, case_rate_std] = cache_dict[this_key]
elif(this_key in combined_dict):
print('Found in combined_dict')
[total_cases, case_rate_std] = combined_dict[this_key]
else:
print('Not found in cache')
total_cases, case_rate_std = traditional_evaluate(vac_flag)
cache_dict[this_key] = [total_cases, case_rate_std]
print(len(list(cache_dict.keys())))
return total_cases | 5,334,736 |
def use_k8s_secret(
secret_name: str = 'k8s-secret',
k8s_secret_key_to_env: Optional[Dict] = None,
):
"""An operator that configures the container to use k8s credentials.
k8s_secret_key_to_env specifies a mapping from the name of the keys in the k8s secret to the name of the
environment variables where the values will be added.
The secret needs to be deployed manually a priori.
Example:
::
train = train_op(...)
train.apply(use_k8s_secret(secret_name='s3-secret',
k8s_secret_key_to_env={'secret_key': 'AWS_SECRET_ACCESS_KEY'}))
This will load the value in secret 's3-secret' at key 'secret_key' and source it as the environment variable
'AWS_SECRET_ACCESS_KEY'. I.e. it will produce the following section on the pod:
env:
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-secret
key: secret_key
"""
k8s_secret_key_to_env = k8s_secret_key_to_env or {}
def _use_k8s_secret(task):
from kubernetes import client as k8s_client
for secret_key, env_var in k8s_secret_key_to_env.items():
task.container \
.add_env_variable(
k8s_client.V1EnvVar(
name=env_var,
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key=secret_key
)
)
)
)
return task
return _use_k8s_secret | 5,334,737 |
def delete_access_key(UserName=None, AccessKeyId=None):
"""
Deletes the access key pair associated with the specified IAM user.
If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.
See also: AWS API Documentation
Examples
The following command deletes one access key (access key ID and secret access key) assigned to the IAM user named Bob.
Expected Output:
:example: response = client.delete_access_key(
UserName='string',
AccessKeyId='string'
)
:type UserName: string
:param UserName: The name of the user whose access key pair you want to delete.
This parameter allows (per its regex pattern ) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-
:type AccessKeyId: string
:param AccessKeyId: [REQUIRED]
The access key ID for the access key ID and secret access key you want to delete.
This parameter allows (per its regex pattern ) a string of characters that can consist of any upper or lowercased letter or digit.
:return: response = client.delete_access_key(
AccessKeyId='AKIDPMS9RO4H3FEXAMPLE',
UserName='Bob',
)
print(response)
"""
pass | 5,334,738 |
def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):
"""Return DataFrame of recommended tracks.
Arguments:
artists: an optional sequence of artists to seed recommendation
genres: an optional sequence of genres to seed recommendation
limit: number of tracks to return
features: whether to include track features in output
"""
recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)
tracks = recs['tracks']
# TODO: need a compose function...
to_keep = (
'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',
'explicit', 'id'
)
rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))
out = pd.DataFrame(rows)
track_ids = [row['id'] for row in rows]
if features:
extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']
return out.merge(
get_track_features(track_ids).drop(columns = extra_cols),
on = "id"
)
return out | 5,334,739 |
def reorder_point(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time, lead_time):
"""Returns the reorder point for a given product based on sales and lead time.
The reorder point is the stock level at which a new order should be placed in order to avoid stock outs.
Args:
max_units_sold_daily (int): Maximum number of units sold daily in previous period.
avg_units_sold_daily (float): Average number of units sold daily in previous period.
max_lead_time (int): Maximum number of days required to obtain stock.
avg_lead_time (int): Average number of days required to obtain stock.
lead_time (int): Number of days required to obtain stock.
Returns:
Safety stock level for the product based on sales and lead time.
"""
safety = safety_stock(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time)
return (lead_time * avg_units_sold_daily) + safety | 5,334,740 |
def get_total_value_report(total_value):
""""TBD"""
# Total value report
currency = CURRENCY
slack_str = "*" + "Total value report" + "*\n>>>\n"
slack_str = slack_str + make_slack_etf_chain_total(total_value, currency)
"""
sendSlackNotification('etf', slack_str, "ETF Notification", ':chart_with_upwards_trend:')
"""
return total_value | 5,334,741 |
def verify_count_responses(responses):
""" Verifies that the responses given are well formed.
Parameters
----------
responses : int OR list-like
If an int, the exact number of responses targeted.
If list-like, the first two elements are the minimum and maximum
(inclusive) range of responses targeted.
If a third item is in the list it must be a list of values from
which the range of target responses is being restricted.
Returns
-------
None
"""
if isinstance(responses, int):
responses = [responses]
elif isinstance(responses, (list, tuple)):
if not len(responses) in [2, 3]:
raise IndexError (
"The responses list given to has_count() is must have "
"either 2 or 3 items in the form: "
"[min, max, [values subset]]. Found %s." % (responses)
)
valid_types = [int, int, (list, tuple)]
for r, response in enumerate(responses):
if not isinstance(response, valid_types[r]):
raise TypeError (
"The responses list given to has_count() has "
"incorrectly typed items. It must be either 2 or 3 "
"items in the form: [int, int, list/tuple]. "
"Found %s." % (responses)
)
if r==3:
for value in response:
if not isinstance(value, int):
raise TypeError (
"The values subset given as the third item "
"in has_count(responses) is not correctly "
"typed. Each value must be int. "
"Found %s." % (response)
)
return responses | 5,334,742 |
def _mdk_writefile(path, contents):
"""Write a file to disk."""
with open(path, "wb") as f:
f.write(contents.encode("utf-8")) | 5,334,743 |
def _subtract(supernet, subnets, subnet_idx, ranges):
"""Calculate IPSet([supernet]) - IPSet(subnets).
Assumptions: subnets is sorted, subnet_idx points to the first
element in subnets that is a subnet of supernet.
Results are appended to the ranges parameter as tuples of in format
(version, first, last). Return value is the first subnet_idx that
does not point to a subnet of supernet (or len(subnets) if all
subsequents items are a subnet of supernet).
"""
version = supernet._module.version
subnet = subnets[subnet_idx]
if subnet.first > supernet.first:
ranges.append((version, supernet.first, subnet.first - 1))
subnet_idx += 1
prev_subnet = subnet
while subnet_idx < len(subnets):
cur_subnet = subnets[subnet_idx]
if cur_subnet not in supernet:
break
if prev_subnet.last + 1 == cur_subnet.first:
# two adjacent, non-mergable IPNetworks
pass
else:
ranges.append((version, prev_subnet.last + 1, cur_subnet.first - 1))
subnet_idx += 1
prev_subnet = cur_subnet
first = prev_subnet.last + 1
last = supernet.last
if first <= last:
ranges.append((version, first, last))
return subnet_idx | 5,334,744 |
def commiter_factory(config: dict) -> BaseCommitizen:
"""Return the correct commitizen existing in the registry."""
name: str = config["name"]
try:
_cz = registry[name](config)
except KeyError:
msg_error = (
"The commiter has not been found in the system.\n\n"
f"Try running 'pip install {name}'\n"
)
out.error(msg_error)
raise SystemExit(NO_COMMITIZEN_FOUND)
else:
return _cz | 5,334,745 |
def changeDocIdToMongoId(jsonDoc):
"""
Changes the _id to ObjectId.
Will crash if jsonDoc is not a simple JSON object with _id field
"""
if(jsonDoc is not None):
jsonDoc['_id'] = ObjectId(jsonDoc['_id']) | 5,334,746 |
def faceshq(output_folder):
"""faceshq.
src yaml: 'https://app.koofr.net/links/a04deec9-0c59-4673-8b37-3d696fe63a5d?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fconfigs%2F2020-11-13T21-41-45-project.yaml'
src ckpt: 'https://app.koofr.net/content/links/a04deec9-0c59-4673-8b37-3d696fe63a5d/files/get/last.ckpt?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fcheckpoints%2Flast.ckpt'
"""
filename = "faceshq"
yaml_file = 'https://app.koofr.net/links/a04deec9-0c59-4673-8b37-3d696fe63a5d?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fconfigs%2F2020-11-13T21-41-45-project.yaml'
ckpt_file = 'https://app.koofr.net/content/links/a04deec9-0c59-4673-8b37-3d696fe63a5d/files/get/last.ckpt?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fcheckpoints%2Flast.ckpt'
output_yaml_file = Path(output_folder)/ f"{filename}.yaml"
output_ckpt_file = Path(output_folder)/ f"{filename}.ckpt"
os.makedirs(Path(output_folder), exist_ok=True)
return (__download(yaml_file, output_yaml_file), __download(ckpt_file, output_ckpt_file)) | 5,334,747 |
async def apiAccountEditPhaaze(cls:"WebIndex", WebRequest:Request) -> Response:
"""
Default url: /api/account/phaaze/edit
"""
WebUser:WebUserInfo = await cls.getWebUserInfo(WebRequest)
if not WebUser.found:
return await apiMissingAuthorisation(cls, WebRequest)
Data:WebRequestContent = WebRequestContent(WebRequest)
await Data.load()
# get required stuff
current_password:str = Data.getStr("password", "")
new_username:str = Data.getStr("username", "")
new_email:str = Data.getStr("email", "")
new_password:str = Data.getStr("newpassword", "")
new_password2:str = Data.getStr("newpassword2", "")
# checks
if not current_password or WebUser.password != password_function(current_password):
return await apiAccountPasswordsDontMatch(cls, WebRequest, msg="Current password is not correct")
changed_email:bool = False # if yes, reset valiated and send mail
update:dict = dict()
# if new_password is set, check all and set to update
if new_password:
if new_password != new_password2:
return await apiAccountPasswordsDontMatch(cls, WebRequest)
if len(new_password) < 8:
return await apiAccountPasswordToShort(cls, WebRequest, min_length=8)
update["password"] = password_function(new_password)
if new_username:
# want a new username
if new_username.lower() != WebUser.username.lower():
is_occupied:list = await getWebUsers(cls, "LOWER(`user`.`username`) = LOWER(%s)", (new_username,))
if is_occupied:
# already taken
return await apiAccountTaken(cls, WebRequest)
else:
# username is free, add to update and add one to username_changed,
# maybe i do something later with it
update["username_changed"] = WebUser.username_changed + 1
update["username"] = new_username
# else, it's a diffrent captation or so
elif new_username != WebUser.username:
update["username"] = new_username
if new_email and new_email.lower() != WebUser.email:
if re.match(IsEmail, new_email) == None:
# does not look like a email
return await apiAccountEmailWrong(cls, WebRequest, email=new_email)
is_occupied:list = await getWebUsers(cls, "user.email LIKE %s", (new_email,))
if is_occupied:
# already taken
return await apiAccountTaken(cls, WebRequest)
else:
changed_email = True
update["email"] = new_email
if not update:
return await apiWrongData(cls, WebRequest, msg=f"No changes, please add at least one")
# verification mail
if changed_email:
cls.Web.BASE.Logger.warning(f"(API) New Email, send new verification mail: {new_email}", require="api:account")
# TODO: SEND MAIL
update["edited_at"] = str(datetime.datetime.now())
cls.Web.BASE.PhaazeDB.updateQuery(
table = "user",
content = update,
where = "`user`.`id` = %s",
where_values = (WebUser.user_id,)
)
cls.Web.BASE.Logger.debug(f"(API) Account edit ({WebUser.user_id}) : {str(update)}", require="api:account")
return cls.response(
status=200,
text=json.dumps( dict(error="successfull_edited", msg="Your account has been successfull edited", update=update, status=200) ),
content_type="application/json"
) | 5,334,748 |
def put_push_messages_to_dynamo(body, shop_info, remind_date_difference):
"""
プッシュメッセージ情報を作成し、
予約完了メッセージの送信とDynamoDBにメッセージを登録処理を実行する。
Parameters
----------
body : dict
フロントから渡ってきたパラメータ
remind_date_difference : int
当日以前のリマインド行う日付の差分
予約日以降のメッセージ送信を考慮し、マイナス値を許可(ex:3日前→ -3)
Notes
-----
テンプレートメッセージでは後続メッセージのnotification_tokenを更新する必要がある。
そのため、本メソッド中でも後続メッセージを先にDB投入し、後続メッセージのidを取得している。
"""
# 予約確定通知メッセージの送信
user_id = body['userId']
shop_name = body['shopName']
shop_address = shop_info['shop']['shopAddress']
reservation_datetime = body['reservationDate'] + ' ' + \
body['reservationStarttime'] + '-' + body['reservationEndtime']
course_name = body['courseName']
staff_name = body['staffName']
flex = {'shop_name': shop_name,
'shop_address': shop_address,
'reservation_date': reservation_datetime,
'course_name': course_name,
'staff_name': staff_name,
'remind_status': 'confirm'
}
flex_obj = hair_salon_utils.create_flex_message(**flex)
# 短期チャネルアクセストークンの取得
channel_info = channel_access_token_table_controller.get_item(
CHANNEL_ID)
logger.debug('message_channel_info: %s', channel_info)
channel_access_token = channel_info['channelAccessToken']
response = line.send_push_message(channel_access_token, flex_obj, user_id)
logger.debug(response)
# 当日送信するメッセージ情報をテーブル登録
# 当日のリマインドデータを作成
flex_on_day = {'shop_name': shop_name,
'shop_address': shop_address,
'reservation_date': reservation_datetime,
'course_name': course_name,
'staff_name': staff_name,
'remind_status': 'on_day'
}
flex_obj = hair_salon_utils.create_flex_message(**flex_on_day)
remind_date_on_day = body['reservationDate']
remind_message_id_on_day = message_table_controller.put_push_message(
user_id, CHANNEL_ID, flex_obj, remind_date_on_day)
# 当日より前のリマインドデータを作成
date_text_before_day = str(abs(remind_date_difference))
flex_before_day = {'shop_name': shop_name,
'shop_address': shop_address,
'reservation_date': reservation_datetime,
'course_name': course_name,
'staff_name': staff_name,
'remind_status': 'day_before',
'day_before': date_text_before_day
}
flex_obj = hair_salon_utils.create_flex_message(**flex_before_day)
remind_date_before_day = utils.calculate_date_str_difference(
body['reservationDate'], remind_date_difference)
remind_message_id_on_day = message_table_controller.put_push_message(
user_id, CHANNEL_ID, flex_obj, remind_date_before_day) | 5,334,749 |
def start(mainloop=False,banner=True):
"""
Start Tk and read in an options_database file (if present), then
open a TopoConsole.
Does nothing if the method has previously been called (i.e. the
module-level console variable is not None).
mainloop: If True, then the command-line is frozen while the GUI
is open. If False, then commands can be entered at the command-line
even while the GUI is operational. Default is False.
"""
global console
### Return immediately if console already set
# (console itself might have been destroyed but we still want to
# quit this function before starting another Tk instance, etc)
if console is not None: return
if banner: print 'Launching GUI'
# tcl equivalent of 'if not hasattr(wm,forget)' would be better
if system_platform=='mac' or Tkinter.TkVersion<8.5:
global TK_SUPPORTS_DOCK
TK_SUPPORTS_DOCK=False
paramtk.initialize()
paramtk.root.menubar = ControllableMenu(paramtk.root)
paramtk.root.configure(menu=paramtk.root.menubar)
# default,clam,alt,classic
try:
paramtk.root.tk.call("ttk::style","theme","use","classic")
except:
pass
# Try to read in options from an options_database file
# (see http://www.itworld.com/AppDev/1243/UIR000616regex/
# or p. 49 Grayson)
try:
options_database = os.path.join(sys.path[0],"topo","tkgui","options_database")
paramtk.root.option_readfile(options_database)
print "Read options database from",options_database
except Tkinter.TclError:
pass
console = TopoConsole(paramtk.root)
# Provide a way for other code to access the GUI when necessary
topo.guimain=console
# This alows context menus to work on the Mac. Widget code should bind
# contextual menus to the virtual event <<right-click>>, not
# <Button-3>.
console.event_add('<<right-click>>',*right_click_events)
console.event_add('<<right-click-release>>',*right_click_release_events)
# GUI/threads:
# http://thread.gmane.org/gmane.comp.python.scientific.user/4153
# (inc. ipython info)
# (Also http://mail.python.org/pipermail/python-list/2000-January/021250.html)
# mainloop() freezes the commandline until the GUI window exits.
# Without this line the command-line remains responsive.
if mainloop: paramtk.root.mainloop() | 5,334,750 |
def get_s3_items_by_type_from_queue(volume_folder):
"""
Load redis queue named "volume:<volume_folder>", and return dict of keys and md5s sorted by file type.
Queue will contain items consisting of newline and tab-delimited lists of files.
Returned value:
{'alto': [[s3_key, md5], [s3_key, md5]], 'jp2': ..., 'tiff': ..., 'casemets': ..., 'volmets': ..., 'md5': ...}
"""
# Get all entries from volume:<volume_folder> queue, splitting tab-delimited strings back into tuples:
s3_items = [line.split("\t") for files_str in spop_all('volume:' + volume_folder) for line in
force_str(files_str).split("\n")]
return sort_s3_items_by_type(s3_items, volume_folder) | 5,334,751 |
def calculate_mixture_features(workspace, speech_dir, noise_dir, data_type, snr):
"""Calculate spectrogram for mixed, speech and noise audio. Then write the
features to disk.
Args:
workspace: str, path of workspace.
speech_dir: str, path of speech data.
noise_dir: str, path of noise data.
data_type: str, 'train' | 'test'.
snr: float, signal to noise ratio to be mixed.
"""
# workspace = args.workspace
# speech_dir = args.speech_dir
# noise_dir = args.noise_dir
# data_type = args.data_type
# snr = args.snr #信噪比
fs = 8000
# Open mixture csv.
mixture_csv_path = os.path.join(workspace, "mixture_csvs", "%s.csv" % data_type)
with open(mixture_csv_path, 'r', encoding="utf-8") as f:
reader = csv.reader(f, delimiter='\t') # 读取文件以 制表符分开
lis = list(reader) # 得到csv文件中的名字
t1 = time.time()
cnt = 0
for i1 in range(1, len(lis)):
[speech_na, noise_na, noise_onset, noise_offset] = lis[i1]
noise_onset = int(noise_onset)
noise_offset = int(noise_offset)
# Read speech audio.
speech_path = os.path.join(speech_dir, speech_na) # 路径拼接
(speech_audio, _) = read_audio(speech_path, target_fs=fs) # 读取音频信号的数据 采样率为8000hz
# Read noise audio.
noise_path = os.path.join(noise_dir, noise_na) # 文件路径 噪声名字
(noise_audio, _) = read_audio(noise_path, target_fs=fs) # 噪声的音频数据
# Repeat noise to the same length as speech.
if len(noise_audio) < len(speech_audio):
n_repeat = int(np.ceil(float(len(speech_audio)) / float(len(noise_audio)))) # float(1) = 1.0 将函数里面的数字转换为浮点型
# ceil() 函数返回数字的上入整数 取整的意思
noise_audio_ex = np.tile(noise_audio, n_repeat) # 将噪声近行拓展 n_repeat 就是拓展的的行数
noise_audio = noise_audio_ex[0: len(speech_audio)] # 更新后的噪声数据
# Truncate noise to the same length as speech.
else:
noise_audio = noise_audio[noise_onset: noise_offset] # 否则按照默认的长度来?
# Scale speech to given snr. #将语音缩放到给定的信噪比
scaler = get_amplitude_scaling_factor(speech_audio, noise_audio, snr=snr)
speech_audio *= scaler # a*5 解释为a= a*5 相乘之后再赋值 计算的优先顺序
# Get normalized mixture, speech, noise.
(mixed_audio, speech_audio, noise_audio, alpha) = additive_mixing(speech_audio, noise_audio) # 混合
# Write out mixed audio.
out_bare_na = os.path.join("%s.%s" %
(os.path.splitext(speech_na)[0], os.path.splitext(noise_na)[0])) # 分离拓展名
out_audio_path = os.path.join(workspace, "mixed_audios", "spectrogram",
data_type, "%ddb" % int(snr), "%s.wav" % out_bare_na) # 加上信噪比的一个命名
create_folder(os.path.dirname(out_audio_path))
write_audio(out_audio_path, mixed_audio, fs) # 将混合好的语音写入一个新文件夹中
# Extract spectrogram. 提取频谱图
mixed_complx_x = calc_sp(mixed_audio, mode='complex') # complex 代表的是复数的模式吗? 复合模式?
speech_x = calc_sp(speech_audio, mode='magnitude')
noise_x = calc_sp(noise_audio, mode='magnitude')
# Write out features.
out_feat_path = os.path.join(workspace, "features", "spectrogram",
data_type, "%ddb" % int(snr), "%s.p" % out_bare_na)
create_folder(os.path.dirname(out_feat_path))
data = [mixed_complx_x, speech_x, noise_x, alpha, out_bare_na]
# cPickle.dump(data, open(out_feat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(data, open(out_feat_path, 'wb')) # dump: 将python对象序列化保存到本地的文件
# 第一个是需要序列化的python对象名称,第二个是本地的文件,需要注意的是,在这里需要使用open函数打开一个文件,并指定“写”操作 wb
# Print.
if cnt % 100 == 0:
print(cnt)
cnt += 1
print("Extracting feature time: %s" % (time.time() - t1)) | 5,334,752 |
def strip_spectral_type(series, return_mask=False):
"""
Strip spectral type from series of string
Args:
series (pd.Series): series of object names (strings)
return_mask (bool): returns boolean mask True where there is a type
Returns:
no_type (pd.Series): series without spectral types
type_mask (pd.Series): boolean mask where type is given
"""
type_mask = series.str.match('\\([OBAFGKM]\\)')
no_type = series.copy()
no_type[type_mask] = series[type_mask].str.slice(start=4)
return (no_type, type_mask) if return_mask else no_type | 5,334,753 |
def already_in_bioconda(recipe, meta, df):
"""
Does the package exist in bioconda?
"""
results = _subset_df(recipe, meta, df)
build_number = int(meta.get_value('build/number', 0))
build_results = results[results.build_number == build_number]
channels = set(build_results.channel)
if 'bioconda' in channels:
return {
'already_in_bioconda': True,
'fix': 'bump version or build number'
} | 5,334,754 |
def expand_groups(node_id, groups):
"""
node_id: a node ID that may be a group
groups: store group IDs and list of sub-ids
return value: a list that contains all group IDs deconvoluted
"""
node_list = []
if node_id in groups.keys():
for component_id in groups[node_id]:
node_list.extend(expand_groups(component_id, groups))
else:
node_list.extend([node_id])
return node_list | 5,334,755 |
def atol_receive_receipt_report(self, receipt_id):
"""
Attempt to retrieve a receipt report for given receipt_id
If received an unrecoverable error, then stop any further attempts to receive the report
"""
atol = AtolAPI()
Receipt = apps.get_model('atol', 'Receipt')
receipt = Receipt.objects.get(id=receipt_id)
if not receipt.uuid:
logger.error('receipt %s does not have a uuid', receipt.id)
return
if receipt.status not in [ReceiptStatus.initiated, ReceiptStatus.retried]:
logger.error('receipt %s has invalid status: %s', receipt.uuid, receipt.status)
return
try:
report = atol.report(receipt.uuid)
except AtolUnrecoverableError as exc:
logger.error('unable to fetch report for receipt %s due to %s',
receipt.id, exc, exc_info=True)
receipt.declare_failed()
except AtolReceiptNotProcessed as exc:
logger.warning('unable to fetch report for receipt %s due to %s',
receipt.id, exc, exc_info=True)
logger.info('repeat receipt registration: id %s; old internal_uuid %s',
receipt.id, receipt.internal_uuid)
with transaction.atomic():
receipt.internal_uuid = uuid4()
receipt.status = ReceiptStatus.retried
receipt.save(update_fields=['internal_uuid', 'status'])
transaction.on_commit(
lambda: atol_create_receipt.apply_async(args=(receipt.id,), countdown=60)
)
except Exception as exc:
logger.warning('failed to fetch report for receipt %s due to %s',
receipt.id, exc, exc_info=True)
try:
countdown = 60 * int(math.exp(self.request.retries))
logger.info('retrying to receive receipt %s with countdown %s due to %s',
receipt.id, countdown, exc)
self.retry(countdown=countdown)
except MaxRetriesExceededError:
logger.error('run out of attempts to create receipt %s due to %s',
receipt.id, exc)
receipt.declare_failed()
else:
with transaction.atomic():
receipt.receive(content=report.data) | 5,334,756 |
def _preservation_derivatives_query(storage_service_id, storage_location_id, aip_uuid):
"""Fetch information on preservation derivatives from db.
:param storage_service_id: Storage Service ID (int)
:param storage_location_id: Storage Location ID (int)
:param aip_uuid: AIP UUID (str)
:returns: SQLAlchemy query results
"""
files = (
File.query.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.filter(File.file_type == FileType.preservation)
.order_by(AIP.uuid, File.file_format)
)
if storage_location_id:
files = files.filter(StorageLocation.id == storage_location_id)
if aip_uuid:
files = files.filter(AIP.uuid == aip_uuid)
return files | 5,334,757 |
def euler2rot_symbolic(angle1='ϕ', angle2='θ', angle3='ψ', order='X-Y-Z', ertype='extrinsic'):
"""returns symbolic expression for the composition of elementary rotation matrices
Parameters
----------
angle1 : string or sympy.Symbol
angle representing first rotation
angle2 : string or sympy.Symbol
angle representing second rotation
angle3 : string or sympy.Symbol
angle representing third rotation
order : string
valid string sequence that specifies the order of rotation. See `euler2rot()`
for details
ertype : string ('extrinsic' or 'intrinsic') See `euler2rot()` for details
the type of elemental rotations.
deg : bool
`True` = degree (default), `False` = radians
Example
-------
>>> R = euler2rot_symbolic('1', '2', '3', 'X-Y-Z' , 'intrinsic')
>>> c, s = sy.symbols('c, s', cls=sy.Function)
>>> R.subs({sy.cos:c, sy.sin:s})
Matrix([
[ c(2)*c(3), -c(2)*s(3), s(2)],
[ c(1)*s(3) + c(3)*s(1)*s(2), c(1)*c(3) - s(1)*s(2)*s(3), -c(2)*s(1)],
[-c(1)*c(3)*s(2) + s(1)*s(3), c(1)*s(2)*s(3) + c(3)*s(1), c(1)*c(2)]])
Note
----
The order of the input angles are specified in the order of rotations (corresponding
to the `order`). They are not specified with respect to any particular axis.
"""
X = rotX_symbolic
Y = rotY_symbolic
Z = rotZ_symbolic
order = order.split('-')
if ertype == 'extrinsic':
order.reverse()
composition = '{}(angle3)*{}(angle2)*{}(angle1)'.format(*order)
elif ertype == 'intrinsic':
composition = '{}(angle1)*{}(angle2)*{}(angle3)'.format(*order)
else:
raise ValueError('Incorrect elemental rotation parameter.')
#print(composition)
return eval(composition) | 5,334,758 |
def add_plugin_translations(plugin, translation):
"""Adds a new language to the plugin translations.
:param plugin: The plugins identifier.
:param translation: The short name of the translation
like ``en`` or ``de_AT``.
"""
plugin_folder = current_app.pluggy.get_plugin(plugin).__path__[0]
translations_folder = os.path.join(plugin_folder, "translations")
source_file = os.path.join(translations_folder, "messages.pot")
subprocess.call(["pybabel", "extract", "-F", "babel.cfg",
"-k", "lazy_gettext", "-o", source_file,
plugin_folder])
subprocess.call(["pybabel", "init", "-i", source_file,
"-d", translations_folder, "-l", translation]) | 5,334,759 |
def k4a_playback_get_next_imu_sample(playback_handle, imu_sample):
"""
K4ARECORD_EXPORT k4a_stream_result_t k4a_playback_get_next_imu_sample(k4a_playback_t playback_handle,
k4a_imu_sample_t *imu_sample);
"""
_k4a_playback_get_next_imu_sample = record_dll.k4a_playback_get_next_imu_sample
_k4a_playback_get_next_imu_sample.restype = k4a_stream_result_t
_k4a_playback_get_next_imu_sample.argtypes = (k4a_playback_t, \
ctypes.POINTER(k4a_imu_sample_t),)
return _k4a_playback_get_next_imu_sample(playback_handle, imu_sample) | 5,334,760 |
def knapsack_iterative_numpy(items, maxweight):
"""
Iterative knapsack method
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is the set of indicies in the optimal solution
"""
#import numpy as np
items = np.array(items)
weights = items.T[1]
# Find maximum decimal place (this problem is in NP)
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
weights = (weights * coeff).astype(np.int)
values = items.T[0]
MAXWEIGHT = int(maxweight * coeff)
W_SIZE = MAXWEIGHT + 1
dpmat = np.full((len(items), W_SIZE), np.inf)
kmat = np.full((len(items), W_SIZE), 0, dtype=np.bool)
idx_subset = []
for w in range(W_SIZE):
dpmat[0][w] = 0
for idx in range(1, len(items)):
item_val = values[idx]
item_weight = weights[idx]
for w in range(W_SIZE):
valid_item = item_weight <= w
prev_val = dpmat[idx - 1][w]
if valid_item:
prev_noitem_val = dpmat[idx - 1][w - item_weight]
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
else:
more_valuable = False
dpmat[idx][w] = withitem_val if more_valuable else prev_val
kmat[idx][w] = more_valuable
K = MAXWEIGHT
for idx in reversed(range(1, len(items))):
if kmat[idx, K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][MAXWEIGHT]
return total_value, items_subset | 5,334,761 |
def gram_matrix(image: torch.Tensor):
"""https://pytorch.org/tutorials/
advanced/neural_style_tutorial.html#style-loss"""
n, c, h, w = image.shape
x = image.view(n * c, w * h)
gram_m = torch.mm(x, x.t()).div(n * c * w * h)
return gram_m | 5,334,762 |
def create_xls_file(files,
output,
clean=False,
delimiter=DEF_DELIMITER,
quotechar=DEF_QUOTECHAR,
inference=True,
date_format=DEF_DATE_FORMAT,
keep_prefix=False):
"""Main function creating the xls file.
"""
if not output.endswith(".xls") and not output.endswith(".xlsx"):
print("! Output name should end with .xls[x] extension, got:")
print("{0:^40}".format(output))
return
if op.exists(output):
print("! Output {0} already exists, removing.".format(output))
os.unlink(output)
# THE Excel book ;)
book = xlwt.Workbook()
for f, sheet_name in sorted(build_sheet_names(files, keep_prefix),
key=lambda t: t[1].lower()):
print("Processing {0:>30} -> {1}/{2}".format(f, output, sheet_name))
with open(f) as fl:
# This is an interator on the rows, quoted and splitted
rows = csv.reader(fl, delimiter=delimiter, quotechar=quotechar)
sheet = book.add_sheet(sheet_name)
add_to_sheet(sheet, rows, date_format, inference)
book.save(output)
# Hopefully no exception raised so far
if clean:
for f in sorted(files):
print("Removing {0}.".format(f))
os.unlink(f) | 5,334,763 |
def _block_diag_cvp(model, vec):
"""
g: n x p
v: p
c = sum[gg^t]: p x p
cvp = sum[gg^t]v = sum[g(g^t)v]: p
"""
batch_gvp_dict = {k: v for k, v in _module_batch_gvp(model, vec)}
for module, batch_grads in _module_batch_grads(model):
cvp = []
# compute cvp = sum[g(g^t)v]
batch_gvp = batch_gvp_dict[module]
for b_g in batch_grads.values():
cvp.append(torch.einsum('n...,n->...', b_g, batch_gvp))
setattr(module, _CVP_BLOCK_DIAG, cvp) | 5,334,764 |
def photos_of_user(request, user_id):
"""Displaying user's photo gallery and adding new photos to user's gellery
view.
"""
template = 'accounts/profile/photos_gallery.html'
user_acc = get_object_or_404(TLAccount, id=user_id)
photos = user_acc.photos_of_user.all() # Custom related name
context = {
'photos': photos,
'user_acc': user_acc
}
if request.method == 'POST':
if request.user.email != user_acc.email:
return HttpResponseBadRequest()
initial = {
'photo': request.FILES['user_gallery_photo']
}
form = AddPhotoToUserGalleryForm(request.POST, initial)
if form.is_valid():
final_form = form.save(commit=False)
# final_form.place = place
final_form.author = user_acc
final_form.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
return render(request, template, context)
# If HTTP method is GET...
else:
return render(request, template, context) | 5,334,765 |
def sync(lock):
"""
A thread that will sync Leases with the local rethinkdb
"""
# db connection is shared between threads
dbconnection = connect()
logger = logging.getLogger('myslice.leases')
while True:
logger.info("syncing Leases")
try:
syncLeases()
except Exception as e:
logger.exception(e)
continue
logger.info("sleeping")
# sleep for 5 minutes
# to be fine tuned
time.sleep(300) | 5,334,766 |
def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name):
"""Convert sw update options db model plus subcloud name to dictionary."""
result = {"id": sw_update_opts.id,
"name": subcloud_name,
"subcloud-id": sw_update_opts.subcloud_id,
"storage-apply-type": sw_update_opts.storage_apply_type,
"compute-apply-type": sw_update_opts.compute_apply_type,
"max-parallel-computes": sw_update_opts.max_parallel_computes,
"alarm-restriction-type": sw_update_opts.alarm_restriction_type,
"default-instance-action":
sw_update_opts.default_instance_action,
"created-at": sw_update_opts.created_at,
"updated-at": sw_update_opts.updated_at}
return result | 5,334,767 |
def test_jsd5():
""" Test that JSD fails when more weights than dists are given """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
assert_raises(ditException, JSD, [d1, d2], [0.1, 0.6, 0.3]) | 5,334,768 |
def imf_binary_primary(m, imf, binary_fraction=constants.BIN_FRACTION):
"""
Initial mass function for primary stars of binary systems
Integrated between m' and m'' using Newton-Cotes
Returns 0 unless m is in (1.5, 16)
"""
m_inf = max(constants.B_MIN, m)
m_sup = min(constants.B_MAX, 2 * m)
if m <= 0 or m_sup <= m_inf:
return 0.0
return binary_fraction * newton_cotes(m_inf, m_sup, phi_primary(m, imf)) | 5,334,769 |
def optimize_local_loss(layer, get_inp_out, data_tensor, optimizer, loss_fn, batch_size, iters,
use_cached_data=True, keep_gpu=True):
"""AdaRound optimization loop."""
if use_cached_data:
logger.info('Caching data for local loss optimization')
cached_batches = []
if keep_gpu:
torch.cuda.empty_cache()
with torch.no_grad():
for i in range(ceil(data_tensor.size(0) / batch_size)):
cur_inp, cur_out = get_inp_out(data_tensor[i * batch_size:(i + 1) * batch_size])
cached_batches.append((cur_inp.cpu(), cur_out.cpu()))
cached_inps = torch.cat([x[0] for x in cached_batches])
cached_outs = torch.cat([x[1] for x in cached_batches])
device = cur_inp.device
del cached_batches
if keep_gpu: # put all cached data on GPU for faster optimization
torch.cuda.empty_cache()
try:
cached_inps = cached_inps.to(device)
cached_outs = cached_outs.to(device)
except RuntimeError as e:
logger.warning(
f"WARNING: could not cache training data on GPU, keep on CPU ({e})"
)
cached_inps = cached_inps.cpu()
cached_outs = cached_outs.cpu()
for i in range(iters):
idx = torch.randperm(cached_inps.size(0))[:batch_size]
if use_cached_data:
cur_inp = cached_inps[idx].to(device)
cur_out = cached_outs[idx].to(device)
else:
cur_inp, cur_out = get_inp_out(data_tensor[idx])
optimizer.zero_grad()
try:
out_quant = layer(cur_inp)
loss = loss_fn(out_quant, cur_out)
loss.backward()
except RuntimeError as e:
if use_cached_data and 'cuda' in str(cached_inps.device):
logger.warning(
f"WARNING: not enough CUDA memory for forward pass, "
f"move cached data to CPU ({e})"
)
cached_inps = cached_inps.cpu()
cached_outs = cached_outs.cpu()
else:
raise e
optimizer.step() | 5,334,770 |
def compute_scene_graph_similarity(ade20k_split, threshold=None,
recall_funct=compute_recall_johnson_feiefei):
"""
:param ade20k_split:
:param threshold:
:param recall_funct:
:return:
"""
model = get_scene_graph_encoder()
model.eval()
test_results = []
with torch.no_grad():
for k, graph_dict in ade20k_split.items():
res = model(graph_dict)
test_results.append(res)
stacked_vectors = torch.stack(test_results)
category = get_categories(ade20k_split)
num_captions = stacked_vectors.shape[1]
index_inferred_caption = num_captions - 1
index_range_human_captions = index_inferred_caption
caption_dim = 1
recall_list = []
mean_rank_list = []
similarity_list = []
for index_caption in range(index_range_human_captions):
comparison = torch.cat((stacked_vectors[:, index_caption, :].unsqueeze(caption_dim),
stacked_vectors[:, index_inferred_caption, :].unsqueeze(caption_dim)),
dim=caption_dim)
similarity_caption = calculate_normalized_cosine_similarity_on_tensor(comparison)
recall_val, mean_rank = recall_funct(similarity_caption, threshold, category)
similarity_list.append(similarity_caption.diag().mean().to("cpu").numpy())
recall_list.append(recall_val)
mean_rank_list.append(mean_rank)
print(f"Threshold for retrieval: {threshold}")
recall_mean = pd.DataFrame(recall_list).mean().to_dict()
average_mean_rank = pd.DataFrame(mean_rank_list).mean()[0]
average_similarity = pd.DataFrame(similarity_list).mean()[0]
for k in recall_mean.keys():
print(f"Average {k}: {recall_mean[k]}")
recall_mean["mean_rank"] = average_mean_rank
print(f"Average Mean Rank: {average_mean_rank}")
print(f"Average Similarity{average_similarity}")
recall_mean["average_similarity"] = average_similarity
recall_mean["threshold"] = threshold
return recall_mean | 5,334,771 |
def load_cmudict():
"""Loads the CMU Pronouncing Dictionary"""
dict_ref = importlib_resources.files("tacotron").joinpath("cmudict-0.7b.txt")
with open(dict_ref, encoding="ISO-8859-1") as file:
cmudict = (line.strip().split(" ") for line in islice(file, 126, 133905))
cmudict = {
format_alt_entry(word): pronunciation for word, pronunciation in cmudict
}
return cmudict | 5,334,772 |
def evalasm(d, text, r0 = 0, defines = defines, address = pad, thumb = False):
"""Compile and remotely execute an assembly snippet.
32-bit ARM instruction set by default.
Saves and restores r2-r12 and lr.
Returns (r0, r1).
"""
if thumb:
# In Thumb mode, we still use ARM code to save/restore registers.
assemble(d, address, '''\
push { r2-r12, lr }
adr lr, link
adr r8, text+1
bx r8
link:
pop { r2-r12, pc }
.pool
.thumb
.align 5
text:
%(text)s
bx lr
''' % locals(), defines=defines, thumb=False)
return d.blx(address, r0)
else:
# ARM mode (default)
assemble(d, address, '''\
push { r2-r12, lr }
%(text)s
pop { r2-r12, pc }
''' % locals(), defines=defines, thumb=False)
return d.blx(address, r0) | 5,334,773 |
def _get_thread_count():
"""Gets a thread_count based on the multiprocessing.cpu_count()."""
try:
thread_count = multiprocessing.cpu_count()
# cpu_count only gets the physical core count. There doesn't appear to be a
# simple way of determining whether a CPU supports simultaneous
# multithreading in Python, so assume that anything with 6 or more cores
# supports it.
if thread_count >= 6:
thread_count *= 2
except NotImplementedError:
# Assume a quad core if we can't get the actual core count.
thread_count = 4
return thread_count | 5,334,774 |
def _fft_inplace(points, invert=True):
"""Computes the fast fourier transform inplace
The length of points must be a power of two. Pass False to invert to
calculate the inverse.
"""
# Resources:
# https://cp-algorithms.web.app/algebra/fft.html
# https://jakevdp.github.io/blog/2013/08/28/understanding-the-fft/
# https://www.youtube.com/watch?v=r6sGWTCMz2k
n = len(points)
# Ensure the array is a power of 2 so we can achieve O(n log n) performance
if n == 0 or 2 ** (n.bit_length() - 1) != n:
raise ValueError("list length must be a power of 2")
# Faster lookup
range_ = range
rect_ = cmath.rect
# Bit reversal permutation
half_n = n // 2
j = 0
for i in range_(1, n):
mask = half_n
while j & mask:
j ^= mask
mask //= 2
j ^= mask
if i < j:
points[i], points[j] = points[j], points[i]
# Iterative fast fourier transform
direction = -cmath.pi if invert else cmath.pi
size = 1
while size < n:
wlen = rect_(1, direction / size)
for i in range_(0, n, size * 2):
w = 1 + 0j
for j in range_(i, i + size):
u = points[j]
v = points[j + size] * w
points[j] = u + v
points[j + size] = u - v
w *= wlen
size *= 2
if invert:
for i, a in enumerate(points):
points[i] = a / n | 5,334,775 |
def is_dir(dir_name):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dir_name):
msg = "{0} does not exist".format(dir_name)
raise argparse.ArgumentTypeError(msg)
else:
return dir_name | 5,334,776 |
def weighted_avg(x, weights): # used in lego_reader.py
""" x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1) | 5,334,777 |
def test_partial_load__short_read_of_required():
"""Crash if we get a short read on a required field."""
stream = io.BytesIO(b"zyxwvut\x0b\xad")
# int64 should be the last field included in the output.
with pytest.raises(errors.UnexpectedEOFError):
BasicStruct.partial_load(stream, "int64") | 5,334,778 |
def CYR(df, N=5, M=5):
"""
市场强弱
:param df:
:param M:
:return:
"""
VOL = df['volume']
AMOUNT = df['amount']
DIVE = 0.01 * EMA(AMOUNT, N) / EMA(VOL, N)
CRY = (DIVE / REF(DIVE, 1) - 1) * 100
MACYR = MA(CRY, M)
return pd.DataFrame({
'CRY': CRY, 'MACYR': MACYR
}) | 5,334,779 |
def configure_vrf_on_interface(device, interface, vrf):
""" Configure interface to use VRF
Args:
device ('obj'): Device object
interface ('str'): Interface
vrf ('str'): VRF name
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"vrf forwarding {vrf}".format(vrf=vrf),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure VRF {vrf} on interface "
"{interface}. Error:\n{error}".format(
interface=interface, vrf=vrf, error=e
)
) | 5,334,780 |
def main(number, divisors):
""" Run the program """
print(som(number, divisors)) | 5,334,781 |
def test_download_urls_to_file(
credentials, mocker, project_id_download, recording, vcr
):
"""Test saving downloaded urls output to a json file."""
runner = CliRunner()
if not recording:
# Mock only if using the cassettes, since we mock the return value.
get_project_samples_response = get_vcr_response(
"/api/v2/project-samples/", vcr, operator.contains
)
mocked_project_samples = mocker.patch.object(
APIClient,
"get_project_samples",
return_value=ProjectSamples(**get_project_samples_response),
)
get_sample_details_response = get_vcr_response(
"/api/v2/samples/", vcr, operator.contains
)
sample = SampleDetails(**get_sample_details_response)
mocked_sample_details = mocker.patch.object(
APIClient,
"get_sample_details",
return_value=sample,
)
mocked_output_list = mocker.patch(
"gencove.command.download.main.Download.output_list"
)
with runner.isolated_filesystem():
res = runner.invoke(
download,
[
"output.json",
"--project-id",
project_id_download,
*credentials,
"--download-urls",
],
)
assert res.exit_code == 0
mocked_output_list.assert_called_once()
if not recording:
mocked_project_samples.assert_called_once()
mocked_sample_details.assert_called_once() | 5,334,782 |
def packCode(code):
"""Packs the given code by passing it to the compression engine"""
if code in packCache:
return packCache[code]
packed = compressor.compress(parse(code))
packCache[code] = packed
return packed | 5,334,783 |
def _get_vars_cls(_cls):
"""
Yield all attributes and it's value which isn't in object class.
class AnyClass(object):
a=1
b=2.
list(_get_vars_cls(_cls))
# return: [('a', 1), ('b', 2)]
"""
if not inspect.isclass(_cls):
raise TypeError(f"Expect class object. Got '{type(_cls)}'")
keys = set(_cls.__dict__).difference(set(object.__dict__))
for k in keys:
if k.startswith("_"):
continue
yield k, _cls.__dict__[k] | 5,334,784 |
def cus_excepthook(logger):
"""
Custom excepthook function to log exception information.
logger will log exception information automatically.
This doesn't work in ipython(including jupyter). Use `get_ipython().set_custom_execs((Exception,), your_exception_function)` instead in ipython environment.
Parameters
----------
logger: a logger object.
Examples
--------
import sys
sys.excepthook = cus_excepthook(logger)
"""
def _excepthook(etype, value, tb):
sys_excepthook(etype, value, tb)
logger.debug("Got exception.\n", exc_info = (etype, value, tb))
return _excepthook | 5,334,785 |
def langpack_submission_allowed(user, parsed_addon_data):
"""Language packs can only be submitted by people with the right
permission.
See https://github.com/mozilla/addons-server/issues/11788 and
https://github.com/mozilla/addons-server/issues/11793
"""
return (
not parsed_addon_data.get('type') == amo.ADDON_LPAPP or
action_allowed_user(user, amo.permissions.LANGPACK_SUBMIT)) | 5,334,786 |
def ht_edge_probabilities(p):
"""
Given the probability of sampling an edge, returns the probabilities of sampling two-stars and triangles
Parameters
---------------------
p: float
"""
pi_twostars = 0
pi_triangles = 0
###TIP: #TODO write the probabilites of sampling twostars and triangles under edge sampling
# YOUR CODE HERE
return pi_twostars, pi_triangles | 5,334,787 |
def install_git_hook(c, force=False):
"""Installs pre-push git hook
"""
path = Path('.git/hooks/pre-push')
hook_exists = path.is_file()
if hook_exists:
if force:
path.unlink()
else:
sys.exit('Error: pre-push hook already exists. '
'Run: "invoke install-git-hook -f" to force overwrite.')
shutil.copy('.githooks/pre-push', '.git/hooks')
print(f'pre-push hook installed at {str(path)}') | 5,334,788 |
def lseek(fd, pos, how):
"""Set the current position of file descriptor *fd* to position *pos*, modified
by *how*: :const:`SEEK_SET` or ``0`` to set the position relative to the
beginning of the file; :const:`SEEK_CUR` or ``1`` to set it relative to the
current position; :const:`os.SEEK_END` or ``2`` to set it relative to the end of
the file.""" | 5,334,789 |
def get_tag_color_name(colorid):
""" Return name of the Finder color based on ID """
# TODO: need to figure out how to do this in locale/language name
try:
colorname = _COLORIDS[colorid]
except:
raise ValueError(f"Invalid colorid: {colorid}")
return colorname | 5,334,790 |
def com_google_fonts_check_083(family_metadata):
"""METADATA.pb: check if fonts field only has
unique "full_name" values.
"""
fonts = {}
for f in family_metadata.fonts:
fonts[f.full_name] = f
if len(set(fonts.keys())) != len(family_metadata.fonts):
yield FAIL, ("Found duplicated \"full_name\" values"
" in METADATA.pb fonts field.")
else:
yield PASS, ("METADATA.pb \"fonts\" field only has"
" unique \"full_name\" values.") | 5,334,791 |
def process_derived_core_properties(derived_core_properties):
"""Parse DerivedCoreProperties.txt and returns its version,
and set of characters with ID_Start and ID_Continue. """
id_start = set()
id_continue = set()
m = re.match('# DerivedCoreProperties-([0-9\.]+).txt', derived_core_properties)
assert m
version = m.group(1)
for (char, prop) in read_derived_core_properties(derived_core_properties):
if prop == 'ID_Start':
id_start.add(char)
if prop == 'ID_Continue':
id_continue.add(char)
return (version, id_start, id_continue) | 5,334,792 |
def error_500(error):
"""Route function for handling 500 error pages
"""
return flask.templating.render_template("errors/500.html.j2"), 500 | 5,334,793 |
def poormax(X : np.ndarray, feature_axis = 1) -> np.ndarray:
"""
对数据进行极差化 \n
:param feature_axis: 各特征所在的维度 \n
feature_axis = 1 表示每列是不同的特征 \n
"""
if not feature_axis:
X = X.T
_min = np.min(X, axis = 0)
_max = np.max(X, axis = 0)
across = _max - _min
X = (X - _min) / across
if not feature_axis:
X = X.T
return X | 5,334,794 |
def login_form(request):
"""
The request must be get
"""
menu = MenuService.visitor_menu()
requestContext = RequestContext(request, {'menu':menu,
'page_title': 'Login'} )
return render_to_response('login.html', requestContext) | 5,334,795 |
def tz_from_dd(points):
"""Get the timezone for a coordinate pair
Args:
points: (lat, lon) | [(lat, lon),] | pd.DataFrame w/lat and lon as columns
Returns:
np.array
"""
if isinstance(points, pd.DataFrame):
points = points.values.tolist()
if not isinstance(points, list):
points = [points]
x = ztree.query(points)
x = zips.iloc[x[1]].timezone.values
return x | 5,334,796 |
def create_volume(devstack_node, ceph_node, vol_name, size):
"""
:param size: The size of the volume, in GB
"""
size = str(size)
log.info("Creating a {size}GB volume named {name}...".format(
name=vol_name,
size=size))
args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
'--display-name', vol_name, size]
cinder_create = devstack_node.sh(args, wait=True)
vol_info = parse_os_table(cinder_create)
log.debug("Volume info: %s", str(vol_info))
try:
rbd_output = ceph_node.sh("rbd --id cinder ls -l volumes", wait=True)
except run.CommandFailedError:
log.debug("Original rbd call failed; retrying without '--id cinder'")
rbd_output = ceph_node.sh("rbd ls -l volumes", wait=True)
assert vol_info['id'] in rbd_output, \
"Volume not found on Ceph cluster"
assert vol_info['size'] == size, \
"Volume size on Ceph cluster is different than specified"
return vol_info['id'] | 5,334,797 |
def find_touching_pixels(label_img, distance=1, selem=None):
"""
Returns a mask indicating touching regions. Either provide a diameter for a disk shape
distance or a selem mask.
:param label_img: a label image with integer labels
:param distance: =1: touching pixels, >1 pixels labels distance appart
:param selem: optional, a selection mask, e.g. skimage.morphology.disk(1) (if this is bigger than
1 the 'distance' is not true.
:return: a mask of the regions touching or are close up to a certain diameter
"""
if selem is None:
selem = morphology.disk(1)
touch_mask = np.zeros(label_img.shape)
not_bg = label_img > 0
for i in np.unique(label_img):
if i != 0:
cur_lab = (label_img == i)
# touch_mask[ndi.filters.maximum_filter(cur_lab, footprint=selem) &
# not_bg & (cur_lab == False)] = 1
touch_mask[ndi.binary_dilation(cur_lab, structure=selem, iterations=distance, mask=not_bg) &
(cur_lab == False)] = 1
return touch_mask | 5,334,798 |
def install_ssh_keys(config, *hosts):
"""
Generate and put public and private SSH keys to hosts that are listed in
hosts to make cold migration work.
:param config: CloudFerry config
:param hosts: list of hosts where to install keys
"""
ssh_user = config.cloud.ssh_user
ssh_password = config.cloud.ssh_sudo_password
home_path = cfglib.CONF.evacuation.nova_home_path
nova_user = cfglib.CONF.evacuation.nova_user
ssh_config = '\\n'.join(['UserKnownHostsFile /dev/null',
'StrictHostKeyChecking no'])
ssh_path = '/'.join([home_path, '.ssh'])
ssh_backup_base = '/'.join([home_path, '.ssh_backup'])
key = RSA.generate(2048, os.urandom)
public_key = str(key.exportKey('OpenSSH')).replace('\n', '\\n')
private_key = str(key.exportKey('PEM')).replace('\n', '\\n')
ssh_backups = {}
for host in hosts:
runner = remote_runner.RemoteRunner(host, ssh_user,
password=ssh_password,
sudo=True)
ssh_backup_path = '/'.join([ssh_backup_base,
os.urandom(8).encode('hex')])
try:
runner.run('test -e "{path}"', path=ssh_path)
runner.run('mkdir -p {backup_base}', backup_base=ssh_backup_base)
runner.run('mv "{path}" "{backup_path}"', path=ssh_path,
backup_path=ssh_backup_path)
ssh_backups[host] = ssh_backup_path
except remote_runner.RemoteExecutionError:
LOG.debug("Dot SSH directory not found, skipping backup")
runner.run('mkdir -p "{path}"', path=ssh_path)
runner.run('echo -e "{key}" > "{path}"', key=public_key,
path='/'.join([ssh_path, 'authorized_keys']))
runner.run('echo -e "{key}" > "{path}"', key=private_key,
path='/'.join([ssh_path, 'id_rsa']))
runner.run('echo -e "{config}" > "{path}"', config=ssh_config,
path='/'.join([ssh_path, 'config']))
runner.run('chmod 0600 "{path}"', path='/'.join([ssh_path, 'id_rsa']))
runner.run('chown -R "{user}:{user}" "{path}"',
user=nova_user, path=ssh_path)
try:
yield
finally:
for host in hosts:
runner = remote_runner.RemoteRunner(host, ssh_user,
password=ssh_password,
sudo=True)
runner.run('rm -rf "{path}"', path=ssh_path)
ssh_backup_path = ssh_backups.get(host)
if ssh_backup_path is not None:
runner.run_ignoring_errors(
'mv "{backup_path}" "{path}"',
backup_path=ssh_backup_path, path=ssh_path) | 5,334,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.