content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def token_bytes(nbytes):
"""Return a random byte string containing *nbytes* bytes.
If *nbytes* is ``None`` or not supplied, a reasonable
default is used.
>>> token_bytes(16) #doctest:+SKIP
b'\\xebr\\x17D*t\\xae\\xd4\\xe3S\\xb6\\xe2\\xebP1\\x8b'
"""
return os.urandom(nbytes)
| 16,500
|
def test_three_related_branch_w_ac(clean_db, three_family_branch_with_trials, capsys):
"""Test three related experiments in a branch with --collapse and --all."""
orion.core.cli.main(["status", "--collapse", "--all"])
captured = capsys.readouterr().out
expected = """\
test_double_exp-v1
==================
id status
-------------------------------- -----------
c2187f4954884c801e423d851aec9a0b broken
e42cc22a15188d72df315b9eac79c9c0 completed
b849f69cc3a77f39382d7435d0d41b14 interrupted
7fbbd152f7ca2c064bf00441e311609d new
d5f1c1cae188608b581ded20cd198679 new
183148e187a1399989a06ffb02059920 new
667513aa2cb2244bee9c4f41c7ff1cea reserved
557b9fdb9f96569dff7eb2de10d3946f suspended
"""
assert captured == expected
| 16,501
|
def quat2expmap(q):
"""
Converts a quaternion to an exponential map
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args
q: 1x4 quaternion, w, x, y, z
Returns
r: 1x3 exponential map
Raises
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q) - 1) > 1e-3):
raise (ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide(q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps));
theta = 2 * np.arctan2(sinhalftheta, coshalftheta)
theta = np.mod(theta + 2 * np.pi, 2 * np.pi)
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r
| 16,502
|
def test_is_swagger_documentation_route_without_route_is_safe():
"""
Not sure if `None` is an option for the `route_info` dict, but make
sure nothing crashes in that possible scenario.
"""
from pyramid_swagger.tween import is_swagger_documentation_route
assert is_swagger_documentation_route(None) is False
| 16,503
|
def log_to_file(filename, level=None, formatter=None):
""" Output logs to a file
Causes logs to be additionally directed to a file, if you call this twice you will
get duplicated logging. This does not disable or invalidate other logging options , it
adds to them.
Supported Logging levels are CRITICAL, ERROR, WARNING, INFO and DEBUG
Logging formatters are documented here , they control the format of the logs.
https://docs.python.org/3/library/logging.html#formatter-objects
Example:
Selecting DEBUG will show all other levels
Selecting ERROR will show CRITICAL and ERROR only
Args:
filename (str) : Filename to log to.
level (str) : Display logs tagged below this level.
formatter (Formatter) : The python logging formatter you want to use.
Returns:
(logger) : A logging handle that you don't have to use.
"""
filehandler = logging.FileHandler(filename)
if level:
filehandler.setLevel(level)
else:
filehandler.setLevel(logging.DEBUG)
if formatter:
formatter = logging.Formatter(format)
else:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler.setFormatter(formatter)
if len(logger.handlers) < 2:
logger.addHandler(filehandler)
return logger
| 16,504
|
def create_referral(sender, **kwargs):
"""
This function is more of a middleman, it signals the referral to
validate and create referral
"""
create_flat_referral.send(sender=get_user_model(), request=kwargs['request'], user=kwargs['user'])
| 16,505
|
def candidate_results_for_race_type(result_form, race_type, num_results=None):
"""Return the candidates and results for a result form and race type.
:param result_form: The result form to return data for.
:param race_type: The race type to get results for, get component results
if this is None.
:param num_results: Enforce a particular number of results, default None.
:returns: A list of tuples containing the candidate and all results for
that candidate.
"""
return get_candidates(get_results_for_race_type(result_form, race_type),
num_results)
| 16,506
|
def sort_2metals(metals):
"""
Handles iterable or string of 2 metals and returns them
in alphabetical order
Args:
metals (str || iterable): two metal element names
Returns:
(tuple): element names in alphabetical order
"""
# return None's if metals is None
if metals is None:
return None, None
if isinstance(metals, str):
if len(metals) != 4:
raise ValueError('str can only have two elements.')
metal1, metal2 = sorted([metals[:2], metals[2:]])
else:
metal1, metal2 = sorted(metals)
return metal1.title(), metal2.title()
| 16,507
|
def parse_join(tables, operation, left, right):
"""
Parses a join from the where clause
"""
# Verify Left
table_name = left['column']['table']
column_name = left['column']['name']
# If table and column, check that table for presense
if table_name is not None and not tables[table_name].has_column(column_name):
write_error("ERROR: Column reference \"{}\" does not exist in table \"{}\"", column_name, table_name)
# If no table, check for ambiguous column
present_in_tables = { key for (key, value) in tables.iteritems() if value.has_column(column_name)}
if table_name is None and len(present_in_tables) > 1:
write_error("ERROR: Column reference \"{}\" is ambiguous; present in multiple tables: {}.", column_name, ", ".join(present_in_tables))
if len(present_in_tables) == 1:
table_name = present_in_tables.pop()
column_location = tables[table_name].column_location(column_name)
left = (table_name, column_name, column_location)
# Verify Right
table_name = right['column']['table']
column_name = right['column']['name']
# If table and column, check that table for presense
if table_name is not None and not tables[table_name].has_column(column_name):
write_error("ERROR: Column reference \"{}\" does not exist in table \"{}\"", column_name, table_name)
# If no table, check for ambiguous column
present_in_tables = { key for (key, value) in tables.iteritems() if value.has_column(column_name)}
if table_name is None and len(present_in_tables) > 1:
write_error("ERROR: Column reference \"{}\" is ambiguous; present in multiple tables: {}.", column_name, ", ".join(present_in_tables))
if len(present_in_tables) == 1:
table_name = present_in_tables.pop()
column_location = tables[table_name].column_location(column_name)
right = (table_name, column_name, column_location)
# Are join types compatible
if tables[left[0]].column_type(left[1]) != tables[right[0]].column_type(right[1]):
write_error("ERROR: Column join types are incompatible.")
return Join(operation, left, right)
| 16,508
|
def address_id_handler(id):
"""
GET - called as /addresses/25
PUT - called to update as /addresses/25?address='abc'&lat=25&lon=89
DELETE - called as /addresses/25
:param id:
:return:
"""
if request.method == 'GET':
return jsonify(read_address(session, address_id=id))
elif request.method == 'PUT':
address = request.form.get('address','dummy')
lat = request.form.get('lat',0.1)
lon = request.form.get('lon',0.1)
update_address(session, address_id=id, search_string=address, lat=lat, lon=lon)
return jsonify({'success': True})
elif request.method == 'DELETE':
delete_address(session, id)
| 16,509
|
def get_change_token_status(ChangeToken=None):
"""
Returns the status of a ChangeToken that you got by calling GetChangeToken . ChangeTokenStatus is one of the following values:
See also: AWS API Documentation
Exceptions
Examples
The following example returns the status of a change token with the ID abcd12f2-46da-4fdb-b8d5-fbd4c466928f.
Expected Output:
:example: response = client.get_change_token_status(
ChangeToken='string'
)
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe change token for which you want to get the status. This change token was previously returned in the GetChangeToken response.\n
:rtype: dict
ReturnsResponse Syntax{
'ChangeTokenStatus': 'PROVISIONED'|'PENDING'|'INSYNC'
}
Response Structure
(dict) --
ChangeTokenStatus (string) --The status of the change token.
Exceptions
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
Examples
The following example returns the status of a change token with the ID abcd12f2-46da-4fdb-b8d5-fbd4c466928f.
response = client.get_change_token_status(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
)
print(response)
Expected Output:
{
'ChangeTokenStatus': 'PENDING',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeTokenStatus': 'PROVISIONED'|'PENDING'|'INSYNC'
}
:returns:
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
"""
pass
| 16,510
|
def init( cur ):
"""Create a temporaty table for generating scinet. """
global gb_
cur.execute( 'DROP TABLE IF EXISTS scinet' )
cur.execute(
"""
CREATE TABLE IF NOT EXISTS scinet (
speaker_a VARCHAR(100) NOT NULL
, speaker_b VARCHAR(100) NOT NULL
, rank DECIMAL(7,4) NOT NULL DEFAULT '0.0'
, keywords TEXT
, PRIMARY KEY (speaker_a, speaker_b )
)
"""
)
db_.commit( )
| 16,511
|
def sync(path, project):
"""Upload mlflow runs data to Neptune.
PATH is a directory where Neptune will look for `mlruns` directory with mlflow data.
Examples:
neptune mlflow .
neptune mlflow /path
neptune mlflow /path --project username/sandbox
"""
# We do not want to import anything if process was executed for autocompletion purposes.
from neptune_mlflow.sync import sync as run_sync
return run_sync(path=path, project=project)
| 16,512
|
def prefix_search_heuristic_split(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding with heuristic to speed up the algorithm.
Speed up prefix computation by splitting sequence into subsequences as described by Graves (p66).
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, _ = mat.shape
# split sequence into 3 subsequences, splitting points should be roughly placed at 1/3 and 2/3
split_targets = [int(max_T * 1 / 3), int(max_T * 2 / 3)]
best = [{'target': s, 'bestDist': max_T, 'bestIdx': s} for s in split_targets]
# find good splitting points (blanks above threshold)
thres = 0.9
for t in range(max_T):
for b in best:
if mat[t, blank_idx] > thres and abs(t - b['target']) < b['bestDist']:
b['bestDist'] = abs(t - b['target'])
b['bestIdx'] = t
break
# splitting points plus begin and end of sequence
ranges = [0] + [b['bestIdx'] for b in best] + [max_T]
# do prefix search for each subsequence and concatenate results
res = ''
for i in range(len(ranges) - 1):
beg = ranges[i]
end = ranges[i + 1]
res += prefix_search(mat[beg: end, :], chars)
return res
| 16,513
|
def _parse_proc_mounts() -> typing.Iterator[MountTpl]:
"""
:return: A list of mapping objects each gives mount info
"""
for line in pathlib.Path("/proc/mounts").read_text().splitlines():
match = _PROC_MNT_LINE_RE.match(line)
if match is None:
continue # Although it should not happen ...
minfo = match.groupdict()
opts = dict(opt.split('=') if '=' in opt else (opt, True)
for opt in minfo["options"].split(','))
yield (minfo["mountpoint"],
dict(device=minfo["device"], type=minfo["type"], options=opts))
| 16,514
|
def _make_set_permissions_url(calendar_id, userid, level):
"""
:return: the URL string for GET request call
to Trumba SetPermissions method
"""
return "{0}?CalendarID={1}&Email={2}@uw.edu&Level={3}".format(
set_permission_url_prefix, calendar_id, userid, level)
| 16,515
|
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs)
| 16,516
|
def upload_blob(bucket_name, data, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# data = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_string(data)
print(f"Data uploaded to file {destination_blob_name}.")
| 16,517
|
def datetime_without_seconds(date: datetime) -> datetime:
"""
Returns given datetime with seconds and microseconds set to 0
"""
return date.replace(second=0, microsecond=0)
| 16,518
|
def update_br(request):
"""
更新会议室
:param request:
:return:
"""
if request.method == 'POST':
dbs = request.dbsession
app_path = request.registry.settings['app_path']
br = dbs.query(HasBoardroom).filter(HasBoardroom.id == request.POST.get('br_id', 0)).first()
old_br = copy.deepcopy(br)
new_name = request.POST.get('br_name', '')
if old_br.name != new_name:
msg = check_brm_name(dbs, room_name=request.POST.get('br_name', ''), org_id=request.POST.get('org_id', 0))
if not msg:
br.name = new_name
else:
return {
'resultFlag': 'failed',
'error_msg': msg
}
br.org_id = request.POST.get('org_id', 0)
br.config = request.POST.get('br_config', '')
br.description = request.POST.get('br_desc', '')
room_pic = request.POST.get('room_pic', '')
if room_pic:
room_pic = request.session['#room_pic']
br.picture = IMG_RPATH + str(br.org_id) + '/' + room_pic
room_logo1 = request.POST.get('room_logo1', '')
if room_logo1:
room_logo1 = request.session['#room_logo1']
br.logo1 = IMG_RPATH + str(br.org_id) + '/' + room_logo1
room_logo2 = request.POST.get('room_logo2', '')
if room_logo2:
room_logo2 = request.session['#room_logo2']
br.logo2 = IMG_RPATH + str(br.org_id) + '/' + room_logo2
room_btn = request.POST.get('room_btn', '')
if room_btn:
room_btn = request.session['#room_btn']
br.button_img = IMG_RPATH + str(br.org_id) + '/' + room_btn
room_bgd = request.POST.get('room_bgd', '')
if room_bgd:
room_bgd = request.session['#room_bgd']
br.background = IMG_RPATH + str(br.org_id) + '/' + room_bgd
br.state = request.POST.get('state', 1)
org_id = br.org_id
if old_br.org_id != int(org_id):
update_pic(old_br, br)
new_br = copy.deepcopy(br)
msg = update(dbs, br)
if not msg:
if room_pic:
delete_pic(old_br.picture, app_path)
move_pic(room_pic, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.picture, new_br.picture, app_path)
if room_logo1:
delete_pic(old_br.logo1, app_path)
move_pic(room_logo1, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.logo1, new_br.logo1, app_path)
if room_logo2:
delete_pic(old_br.logo2, app_path)
move_pic(room_logo2, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.logo2, new_br.logo2, app_path)
if room_btn:
delete_pic(old_br.button_img, app_path)
move_pic(room_btn, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.button_img, new_br.button_img, app_path)
if room_bgd:
delete_pic(old_br.background, app_path)
move_pic(room_bgd, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.background, new_br.background, app_path)
json_str = {
'resultFlag': 'failed' if msg else 'success',
'error_msg': msg
}
HyLog.log_update(request.client_addr, request.session['userAccount'],
request.POST.get('br_id') + ' failed' if msg else 'success', 'boardroom')
return json_str
return {}
| 16,519
|
def find_north_pole(valid_rooms):
"""
Decode the room names and find the north pole.
Args:
valid_rooms (list): A list of valid rooms to decode/search.
Returns:
tuple
"""
global NORTH_POLE_NAME
for room in valid_rooms:
room_name, sector_id, checksum = room
decoded_name = decode_room_name(room_name, sector_id)
if decoded_name == NORTH_POLE_NAME:
return decoded_name, sector_id
| 16,520
|
def cli(gene1, gene2, gene3, frameness, keep_exon, fusion_fraction,
add_insertion, total_coverage, output, common_filename):
"""[Simulator] Fusion generator."""
normal_coverage = total_coverage * (1. - fusion_fraction)
fusion_coverage = total_coverage * fusion_fraction
normal_ref = generate_normal_reference([gene1, gene2, gene3], output, common_filename)
fusion_ref = generate_fusion_reference([gene1, gene2, gene3], output,
keep_exon, frameness, add_insertion, common_filename)
normal_fastq = generate_fastq(normal_ref, output, 'normal', normal_coverage)
fusion_fastq = generate_fastq(fusion_ref, output, 'fusion', fusion_coverage)
merged1, merged2 = merge_fastq(normal_fastq, fusion_fastq, output, common_filename)
# chimerascan_bedpe = run_chimerascan(merged1, merged2, output)
# print chimerascan_bedpe
# generate_manifest(merged1, merged2)
# run_detango(merged1, merged2, output)
return merged1, merged2
| 16,521
|
def build_upsample_layer(cfg, *args, **kwargs):
"""Build upsample layer.
Args:
cfg (dict): The upsample layer config, which should contain:
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding conv layer.
Returns:
nn.Module: Created upsample layer.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'typename' not in cfg:
raise KeyError(
f'the cfg dict must contain the key "typename", but got {cfg}')
cfg_ = cfg.copy()
layer_type = cfg_.pop('typename')
upsample = registry.get(layer_type, 'upsample')
if upsample is nn.Upsample:
cfg_['mode'] = layer_type
layer = upsample(*args, **kwargs, **cfg_)
return layer
| 16,522
|
def ctime_ticks(t):
"""This is for backwards compatibility and should not be used."""
return tsc_time.TSC_from_ticks(t).ctime()
| 16,523
|
def _dynamo_set_last_processed_jenkins_run_id(dynamo_db, jenkins_run):
"""
Mark the passed Jenkins run as processed in the database. This allows to avoid duplicate processing in future.
It's important that runs are processed from oldest to latest (and not in parallel) since we expect to only increase
the 'last scanned run id'.
:param dyanmo_db: Boto DynamoDB handle
:param jenkins_run: Jenkins run
:return: Nothing
"""
table = dynamo_db.Table(DYNAMODB_TABLE)
table.update_item(
Key={
DYNAMO_KEY_FULL_JOB_NAME: jenkins_run.parent_job.full_job_name
},
UpdateExpression=f"set {DYNAMO_VALUE_LAST_SCANNED_RUN_ID} = :id",
ExpressionAttributeValues={
':id': jenkins_run.run_id
}
)
| 16,524
|
def vitruvian_loss(input, mask, dataset):
"""Vitruvian loss implementation"""
if dataset == "itop":
# 1 - 2 e 1 - 3 -> collo spalle
# 2 - 4 e 3 - 5 -> spalle gomito
# 4 - 6 e 5 - 7 -> gomito mano
# 9 - 11 e 10 - 12 -> anca ginocchio
# 11 - 13 e 12 - 14 -> ginocchio piede
loss = _vitruvian_calculate(input, [1, 2, 1, 3], mask)
loss += _vitruvian_calculate(input, [2, 4, 3, 5], mask)
loss += _vitruvian_calculate(input, [4, 6, 5, 7], mask)
loss += _vitruvian_calculate(input, [9, 11, 10, 12], mask)
loss += _vitruvian_calculate(input, [11, 13, 12, 14], mask)
elif dataset in ("watch_n_patch", "wnp", "watch-n-patch"):
# 20 - 4 e 20 - 8 -> spine shoulder spalle
# 4 - 5 e 8 - 9 -> spalle gomito
# 5 - 6 e 9 - 10 -> gomito polso
# 6 - 7 e 10 - 11 -> polso mano
# 12 - 0 e 0 - 16 -> anche spine base
# 12 - 13 e 16 - 17 -> anca ginocchio
# 13 - 14 e 17 - 18 -> ginocchio caviglia
# 14 - 15 e 18 - 19 -> caviglia piede
limbs = [
[20, 4, 20, 8],
[4, 5, 8, 9],
[5, 6, 9, 10],
[6, 7, 10, 11],
[0, 12, 0, 16],
[12, 13, 16, 17],
[13, 14, 17, 18],
[14, 15, 18, 19],
]
loss = 0.0
for limb in limbs:
loss += _vitruvian_calculate(input, limb, mask)
return loss
| 16,525
|
def LoadElement(href, only_etag=False):
"""
Return an instance of a element as a ElementCache dict
used as a cache.
:rtype ElementCache
"""
request = SMCRequest(href=href)
request.exception = FetchElementFailed
result = request.read()
if only_etag:
return result.etag
return ElementCache(
result.json, etag=result.etag)
| 16,526
|
def load_config(config_file: str) -> EnvironmentAwareConfigParser:
"""Load the main configuration and return a config object."""
config = EnvironmentAwareConfigParser()
if not os.path.exists(config_file):
main_logger.critical('Configuration file "%s" does not exist!', config_file)
sys.exit(1)
try:
config.read(config_file)
except Exception as e:
main_logger.critical("Unable to read configuration file")
main_logger.critical(e)
sys.exit(1)
return config
| 16,527
|
def xreplace_indices(exprs, mapper, candidates=None, only_rhs=False):
"""
Create new expressions from ``exprs``, by replacing all index variables
specified in mapper appearing as a tensor index. Only tensors whose symbolic
name appears in ``candidates`` are considered if ``candidates`` is not None.
"""
get = lambda i: i.rhs if only_rhs is True else i
handle = flatten(retrieve_indexed(get(i)) for i in as_tuple(exprs))
if candidates is not None:
handle = [i for i in handle if i.base.label in candidates]
mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle]))
replaced = [i.xreplace(mapper) for i in as_tuple(exprs)]
return replaced if isinstance(exprs, Iterable) else replaced[0]
| 16,528
|
def format_ratio(in_str, separator='/'):
""" Convert a string representing a rational value to a decimal value.
Args:
in_str (str): Input string.
separator (str): Separator character used to extract numerator and
denominator, if not found in ``in_str`` whitespace is used.
Returns:
An integer or float value with 2 digits precision or ``in_str`` if
formating has failed.
>>> format_ratio('48000/1')
48000
>>> format_ratio('24000 1000')
24
>>> format_ratio('24000 1001')
23.98
>>> format_ratio('1,77')
'1,77'
>>> format_ratio(1.77)
1.77
"""
if not isinstance(in_str, six.string_types):
return in_str
try:
sep = separator if separator in in_str else ' '
ratio = in_str.split(sep)
if len(ratio) == 2:
ratio = round(float(ratio[0]) / float(ratio[1]), 2)
else:
ratio = float(ratio[0])
if ratio.is_integer():
ratio = int(ratio)
return ratio
except ValueError:
return in_str
| 16,529
|
def fill_tidal_data(da,fill_time=True):
"""
Extract tidal harmonics from an incomplete xarray DataArray, use
those to fill in the gaps and return a complete DataArray.
Uses all 37 of the standard NOAA harmonics, may not be stable
with short time series.
A 5-day lowpass is removed from the harmonic decomposition, and added
back in afterwards.
Assumes that the DataArray has a 'time' coordinate with datetime64 values.
The time dimension must be dense enough to extract an exact time step
If fill_time is True, holes in the time coordinate will be filled, too.
"""
diffs=np.diff(da.time)
dt=np.median(diffs)
if fill_time:
gaps=np.nonzero(diffs>1.5*dt)[0]
pieces=[]
last=0
for gap_i in gaps:
# gap_i=10 means that the 10th diff was too big
# that means the jump from 10 to 11 was too big
# the preceding piece should go through 9, so
# exclusive of gap_i
pieces.append(da.time.values[last:gap_i])
pieces.append(np.arange( da.time.values[gap_i],
da.time.values[gap_i+1],
dt))
last=gap_i+1
pieces.append(da.time.values[last:])
dense_times=np.concatenate(pieces)
dense_values=np.nan*np.zeros(len(dense_times),np.float64)
dense_values[ np.searchsorted(dense_times,da.time.values) ] = da.values
da=xr.DataArray(dense_values,
dims=['time'],coords=[dense_times])
else:
pass
dnums=utils.to_dnum(da.time)
data=da.values
# lowpass at about 5 days, splitting out low/high components
winsize=int( np.timedelta64(5,'D') / dt )
data_lp=filters.lowpass_fir(data,winsize)
data_hp=data - data_lp
valid=np.isfinite(data_hp)
omegas=harm_decomp.noaa_37_omegas() # as rad/sec
harmonics=harm_decomp.decompose(dnums[valid]*86400,data_hp[valid],omegas)
dense=harm_decomp.recompose(dnums*86400,harmonics,omegas)
data_recon=utils.fill_invalid(data_lp) + dense
data_filled=data.copy()
missing=np.isnan(data_filled)
data_filled[missing] = data_recon[missing]
fda=xr.DataArray(data_filled,coords=[da.time],dims=['time'])
return fda
| 16,530
|
def wrapper_unit_scaling(x, T, s_ref, n_gt, *args, **kwargs):
"""Normalize segments to unit-length and use center-duration format
"""
xc = segment_format(x, 'b2c')
init_ref = np.repeat(s_ref[:, 0], n_gt)
return segment_unit_scaling(xc, T, init_ref)
| 16,531
|
def fairseq_generate(data_lines, args, models, task, batch_size, beam_size, device):
"""beam search | greedy decoding implemented by fairseq"""
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
gen_args = copy.copy(args)
with open_dict(gen_args):
gen_args.beam = beam_size
generator = task.build_generator(models, gen_args)
data_size = len(data_lines)
all_results = []
logger.info(f'Fairseq generate batch {batch_size}, beam {beam_size}')
start = time.perf_counter()
for start_idx in tqdm(range(0, data_size, batch_size)):
batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]]
batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines]
lengths = torch.LongTensor([t.numel() for t in batch_ids])
batch_dataset = task.build_dataset_for_inference(batch_ids, lengths)
batch_dataset.left_pad_source = True
batch = batch_dataset.collater(batch_dataset)
batch = utils.apply_to_sample(lambda t: t.to(device), batch)
translations = generator.generate(models, batch, prefix_tokens=None)
results = []
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos])
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta
| 16,532
|
def retrieve_features(dataframe):
"""
Retrieves features (X) from dataframe
:param dataframe:
:return:
"""
return list(dataframe["tweet"])
| 16,533
|
def atarzia_short_MD_settings():
"""My default settings for short, crude cage optimizations in stk.
Modified on 26/04/19.
"""
Settings = {
'output_dir': None,
'timeout': None,
'force_field': 16,
'temperature': 700, # K
'conformers': 50,
'time_step': 1, # fs
'eq_time': 50, # ps
'simulation_time': 1000, # ps -- 1 ns
'maximum_iterations': 2500,
'minimum_gradient': 0.05,
'use_cache': False
}
return Settings
| 16,534
|
def result_by_score_from_csv(f, score, ascending=True):
"""Return result with the best defined score"""
df = pd.read_csv(f)
df.sort_values(score, ascending=ascending, inplace=True)
return df.loc[0, ["pdb_code", score]].tolist()
| 16,535
|
def similarity_metric_simple_test(cur_samples, next_samples, bins, min_r, max_r):
"""
Test the output of the 6 similarity metrics for a single set of current and next samples
:param cur_samples: samples of the current distribution
:param next_samples: samples of the target distribution
:param bins: the bin sizes
:param min_r: minimum reward
:param max_r: maximum reward
:return: nothing, only prints
"""
ovl_metric = ovl(cur_samples, next_samples, bins)
emd_metric = emd(cur_samples, next_samples, bins)
ks_metric = ks(cur_samples, next_samples)
hell_metric = hellinger(cur_samples, next_samples, bins)
js_metric = jsd(cur_samples, next_samples, bins)
tdl_metric = tdl_rq1(cur_samples, next_samples, dist_type='uniform', dist_params=[min_r, max_r])
print(f'ovl: {ovl_metric}\nemd: {emd_metric}\nks: {ks_metric}\nhellinger: {hell_metric}\njs: {js_metric}'
f'\ntdl: {tdl_metric}')
| 16,536
|
def create_column_dnn(
predict_feature='close',
ticker='',
debug=False,
use_epochs=10,
use_batch_size=10,
use_test_size=0.1,
use_random_state=1,
use_seed=7,
use_shuffle=False,
model_verbose=True,
fit_verbose=True,
use_scalers=True,
df=[],
dnn_config={},
compile_config={},
s3_bucket='',
s3_key='',
send_plots_to_slack=False):
"""create_column_dnn
For scaler-normalized datasets this will
compile numeric columns and ignore string/non-numeric
columns as training and test feature columns
:param predict_feature: Column to create DNN with
:param ticker: Ticker being used
:param debug: Debug mode
:param use_epochs: Epochs times to use
:param use_batch_size: Batch size to use
:param use_test_size: Test size to use
:param use_random_state: Random state to train with
:param use_seed: Seed used to build scalar datasets
:param use_shuffle: To shuffle the regression estimator or not
:param model_verbose: To use a verbose Keras regression model or not
:param fit_verbose: To use a verbose fitting of the regression estimator
:param use_scalers: To build using scalars or not
:param df: Ticker dataset
:param dnn_config: Deep Neural Net keras model json to build the model
:param compile_config: Deep Neural Net dictionary of compile options
:param s3_bucket: S3 Bucket
:param s3_key: S3 Key
"""
df_filter = (df[f'{predict_feature}'] >= 0.1)
first_date = df[df_filter]['date'].iloc[0]
end_date = df[df_filter]['date'].iloc[-1]
if 'minute' in df:
found_valid_minute = df['minute'].iloc[0]
if found_valid_minute:
first_date = df[df_filter]['minute'].iloc[0]
end_date = df[df_filter]['minute'].iloc[-1]
num_rows = len(df.index)
log.info(
f'prepared training data from '
f'history {s3_bucket}@{s3_key} '
f'rows={num_rows} '
f'dates: {first_date} to {end_date}')
if debug:
for i, r in df.iterrows():
log.info(
f'{r["minute"]} - {r["{}".format(predict_feature)]}')
# end of for loop
log.info(
f'columns: {df.columns.values}')
log.info(
f'rows: {len(df.index)}')
# end of debug
use_all_features = use_scalers
all_features = []
train_features = []
if use_all_features:
for c in df.columns.values:
if (
pandas_types.is_numeric_dtype(df[c]) and
c not in train_features):
if c != predict_feature:
train_features.append(c)
if c not in all_features:
all_features.append(c)
dnn_config['layers'][-1]['activation'] = (
'sigmoid')
else:
temp_choices = choices[:]
temp_choices.remove(predict_feature)
train_features = ['open']
train_features.extend(temp_choices)
all_features = [
f'{predict_feature}'
] + train_features
num_features = len(train_features)
features_and_minute = [
'minute'
] + all_features
log.info(
'converting columns to floats')
timeseries_df = df[df_filter][features_and_minute].fillna(-10000.0)
converted_df = timeseries_df[all_features].astype('float32')
train_df = None
test_df = None
scaler_predictions = None
if use_all_features:
scaler_res = build_scaler_datasets.build_datasets_using_scalers(
train_features=train_features,
test_feature=predict_feature,
df=converted_df,
test_size=use_test_size,
seed=use_seed)
if scaler_res['status'] != ae_consts.SUCCESS:
log.error(
'failed to build scaler train and test datasets')
return
train_df = scaler_res['scaled_train_df']
test_df = scaler_res['scaled_test_df']
x_train = scaler_res['x_train']
x_test = scaler_res['x_test']
y_train = scaler_res['y_train']
y_test = scaler_res['y_test']
scaler_predictions = scaler_res['scaler_test']
else:
log.info(
'building train and test dfs from subset of features')
train_df = converted_df[train_features]
test_df = converted_df[[predict_feature]]
log.info(
f'splitting {num_rows} into test and training '
f'size={use_test_size}')
(x_train,
x_test,
y_train,
y_test) = tt_split.train_test_split(
train_df,
test_df,
test_size=use_test_size,
random_state=use_random_state)
log.info(
f'split breakdown - '
f'x_train={len(x_train)} '
f'x_test={len(x_test)} '
f'y_train={len(y_train)} '
f'y_test={len(y_test)}')
def set_model():
return build_dnn.build_regression_dnn(
num_features=num_features,
compile_config=compile_config,
model_config=dnn_config)
estimator = keras_scikit.KerasRegressor(
build_fn=set_model,
epochs=use_epochs,
batch_size=use_batch_size,
verbose=model_verbose)
log.info(
f'fitting estimator - '
f'predicting={predict_feature} '
f'epochs={use_epochs} '
f'batch={use_batch_size} '
f'test_size={use_test_size} '
f'seed={use_seed}')
history = estimator.fit(
x_train,
y_train,
validation_data=(
x_train,
y_train),
epochs=use_epochs,
batch_size=use_batch_size,
shuffle=use_shuffle,
verbose=fit_verbose)
created_on = (
datetime.datetime.now().strftime(
ae_consts.COMMON_TICK_DATE_FORMAT))
plot_fit_history.plot_dnn_fit_history(
df=history.history,
title=(
f'DNN Errors Over Training Epochs\n'
f'Training Data: s3://{s3_bucket}/{s3_key}\n'
f'Created: {created_on}'),
red='mean_squared_error',
blue='mean_absolute_error',
green='acc',
orange='cosine_proximity',
send_plots_to_slack=send_plots_to_slack)
# on production use newly fetched pricing data
# not the training data
predict_records = []
if use_all_features:
prediction_res = build_scaler_df.build_scaler_dataset_from_df(
df=converted_df[train_features])
if prediction_res['status'] == ae_consts.SUCCESS:
predict_records = prediction_res['df']
else:
predict_records = converted_df[train_features]
log.info(
f'making predictions: {len(predict_records)}')
predictions = estimator.model.predict(
predict_records,
verbose=True)
np.set_printoptions(threshold=np.nan)
indexes = tf.argmax(predictions, axis=1)
data = {}
data['indexes'] = indexes
price_predictions = []
if use_all_features and scaler_predictions:
price_predictions = [
ae_consts.to_f(x) for x in
scaler_predictions.inverse_transform(
predictions.reshape(-1, 1)).reshape(-1)]
else:
price_predictions = [ae_consts.to_f(x[0]) for x in predictions]
timeseries_df[f'predicted_{predict_feature}'] = price_predictions
timeseries_df['error'] = (
timeseries_df[f'{predict_feature}'] -
timeseries_df[f'predicted_{predict_feature}'])
output_features = [
'minute',
f'{predict_feature}',
f'predicted_{predict_feature}',
'error'
]
date_str = (
f'Dates: {timeseries_df["minute"].iloc[0]} '
f'to '
f'{timeseries_df["minute"].iloc[-1]}')
log.info(
f'historical {predict_feature} with predicted {predict_feature}: '
f'{timeseries_df[output_features]}')
log.info(
date_str)
log.info(
f'Columns: {output_features}')
average_error = ae_consts.to_f(
timeseries_df['error'].sum() / len(timeseries_df.index))
log.info(
f'Average historical {predict_feature} '
f'vs predicted {predict_feature} error: '
f'{average_error}')
log.info(
f'plotting historical {predict_feature} vs predicted {predict_feature}'
f' from training with columns={num_features}')
ts_filter = (timeseries_df[f'{predict_feature}'] > 0.1)
latest_feature = (
timeseries_df[ts_filter][f'{predict_feature}'].iloc[-1])
latest_predicted_feature = (
timeseries_df[ts_filter][f'predicted_{predict_feature}'].iloc[-1])
log.info(
f'{end_date} {predict_feature}={latest_feature} '
f'with '
f'predicted_{predict_feature}={latest_predicted_feature}')
plot_trading_history.plot_trading_history(
title=(
f'{ticker} - Historical {predict_feature.title()} vs '
f'Predicted {predict_feature.title()}\n'
f'Number of Training Features: {num_features}\n'
f'{date_str}'),
df=timeseries_df,
red=f'{predict_feature}',
blue=f'predicted_{predict_feature}',
green=None,
orange=None,
date_col='minute',
date_format='%d %H:%M:%S\n%b',
xlabel='minute',
ylabel=(
f'Historical {predict_feature.title()} vs '
f'Predicted {predict_feature.title()}'),
df_filter=ts_filter,
width=8.0,
height=8.0,
show_plot=True,
dropna_for_all=False,
send_plots_to_slack=send_plots_to_slack)
| 16,537
|
def _release_lock(key):
"""Release the lock by deleting the key from the cache."""
django_cache.delete(key)
| 16,538
|
def main():
"""
Generate CoreML model for benchmark by using non-trained model.
It's useful if you just want to measure the inference speed
of your model
"""
hack_coremltools()
sizes = [224, 192, 160, 128]
alphas = [1., .75, .50, .25]
name_fmt = 'mobile_unet_{0:}_{1:03.0f}_{2:03.0f}'
experiments = [
{
'name': name_fmt.format(s, a * 100, a * 100),
'model': MobileUNet(input_shape=(s, s, 3),
input_tensor=Input(shape=(s, s, 3)),
alpha=a,
alpha_up=a)
}
for s, a in product(sizes, alphas)
]
for e in experiments:
model = e['model']
name = e['name']
model.summary()
with CustomObjectScope(custom_objects()):
coreml_model = coremltools.converters.keras.convert(model, input_names='data')
coreml_model.save('artifacts/{}.mlmodel'.format(name))
| 16,539
|
def get_default_hand_connection_style(
) -> Mapping[Tuple[int, int], DrawingSpec]:
"""Returns the default hand connection drawing style.
Returns:
A mapping from each hand connection to the default drawing spec.
"""
hand_connection_style = {}
for k, v in _HAND_CONNECTION_STYLE.items():
for connection in k:
hand_connection_style[connection] = v
return hand_connection_style
| 16,540
|
def lists_to_html_table(a_list):
"""
Converts a list of lists to a HTML table. First list becomes the header of the table.
Useful while sending email from the code
:param list(list) a_list: values in the form of list of lists
:return: HTML table representation corresponding to the values in the lists
:rtype: str
"""
header = "<tr><th>%s</th></tr>" % ("</th><th>".join(a_list[0]))
body = ""
if len(a_list) > 1:
for sub_list in a_list[1:]:
body += "<tr><td>%s</td></tr>\n" % ("</td><td>".join(sub_list))
return "<table>%s\n%s</table>" % (header, body)
| 16,541
|
def from_matrix_vector(matrix, vector):
"""Combine a matrix and vector into a homogeneous transform.
Combine a rotation matrix and translation vector into a transform
in homogeneous coordinates.
Parameters
----------
matrix : ndarray
An NxM array representing the the linear part of the transform
a transform from an M-dimensional space to an N-dimensional space.
vector : ndarray
A 1xN array representing the translation.
Returns
-------
xform : ndarray
An N+1xM+1 transform matrix.
See Also
--------
to_matrix_vector
"""
nin, nout = matrix.shape
t = np.zeros((nin+1,nout+1), matrix.dtype)
t[0:nin, 0:nout] = matrix
t[nin, nout] = 1.
t[0:nin, nout] = vector
return t
| 16,542
|
def test_escaped_brackets(capsys):
"""Don't warn if it seems fine."""
# escaped cases
command_fix('one two three \(\) four five six')
out, _err = capsys.readouterr()
assert "unescaped brackets" not in out
command_fix('one two three \'()\' four five six')
out, _err = capsys.readouterr()
assert "unescaped brackets" not in out
command_fix('one two three "()" four five six')
out, _err = capsys.readouterr()
assert "unescaped brackets" not in out
| 16,543
|
async def _time_sync_task(time_between_syncs_s=300):
"""
task for synchronising time
"""
while True:
try:
server_time = await _time_from_server()
_set_system_time(server_time)
except Exception as e:
grown_log.error("time_control: %s" % str(e))
await asyncio.sleep(time_between_syncs_s)
| 16,544
|
def take_closest(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before
| 16,545
|
def check_host(host):
""" Helper function to get the hostname in desired format """
if not ('http' in host and '//' in host) and host[len(host) - 1] == '/':
return ''.join(['http://', host[:len(host) - 1]])
elif not ('http' in host and '//' in host):
return ''.join(['http://', host])
elif host[len(host) - 1] == '/':
return host[:len(host) - 1]
else:
return host
| 16,546
|
def job_wrapper_output_files(params, user_defined_work_func, register_cleanup, touch_files_only):
"""
job wrapper for all that only deals with output files.
run func on any output file if not up to date
"""
job_wrapper_io_files(params, user_defined_work_func, register_cleanup, touch_files_only,
output_files_only=True)
| 16,547
|
def do_bay_show(cs, args):
"""Show details about the given bay.
(Deprecated in favor of cluster-show.)
"""
bay = cs.bays.get(args.bay)
if args.long:
baymodel = cs.baymodels.get(bay.baymodel_id)
del baymodel._info['links'], baymodel._info['uuid']
for key in baymodel._info:
if 'baymodel_' + key not in bay._info:
bay._info['baymodel_' + key] = baymodel._info[key]
_show_bay(bay)
| 16,548
|
def task_time_slot_add(request, task_id, response_format='html'):
"""Time slot add to preselected task"""
task = get_object_or_404(Task, pk=task_id)
if not request.user.profile.has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
task_time_slot = TaskTimeSlot(
task=task, time_to=datetime.now(), user=request.user.profile)
form = TaskTimeSlotForm(
request.user.profile, task_id, request.POST, instance=task_time_slot)
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif form.is_valid():
task_time_slot = form.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(request.user.profile, task_id)
subtasks = Object.filter_by_request(
request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'form': form,
'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_time_add', context,
context_instance=RequestContext(request), response_format=response_format)
| 16,549
|
def get_artist_names(res: dict[str, Any]) -> str:
"""
Retrieves all artist names for a given input to the "album" key of a response.
"""
artists = []
for artist in res["artists"]:
artists.append(artist["name"])
artists_str = ", ".join(artists)
return artists_str
| 16,550
|
def json_response(func):
"""
View decorator function that converts the dictionary response
returned by a view function to django JsonResponse.
"""
@wraps(func)
def func_wrapper(request, *args, **kwargs):
func_response = func(request, *args, **kwargs)
status_code = func_response.get('status_code', 200)
return JsonResponse(func_response, status=status_code)
return func_wrapper
| 16,551
|
def clean_df(df):
"""return : pandas.core.frame.DataFrame"""
df.index = pd.DatetimeIndex(df.comm_time)
df = df.sort_index()
df = df[~(np.abs(df.com_per-df.com_per.mean())>(3*df.com_per.std()))]#清洗出三个标准差之外的数据,人均有关的计算用df2
df = df.drop('_id',1)
df = df.drop_duplicates()
return df
| 16,552
|
def _nonnull_powerset(iterable) -> Iterator[Tuple[Any]]:
"""Returns powerset of iterable, minus the empty set."""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(1, len(s) + 1))
| 16,553
|
def _parse_obs_status_file(filename):
"""
Parse a yaml file and return a dictionary.
The dictionary will be of the form: {'obs': [], 'bad': [], 'mags: []}
:param filename:
:return:
"""
with open(filename) as fh:
status = yaml.load(fh, Loader=yaml.SafeLoader)
if 'obs' not in status:
status['obs'] = []
if 'bad' not in status:
status['bad'] = []
if 'mags' not in status:
status['mags'] = []
if hasattr(status['bad'], 'items'):
status['bad'] = list(status['bad'].items())
return status
| 16,554
|
def extract_optional_suffix(r):
"""
a | a b -> a b?
"""
modified = False
def match_replace_fn(o):
if isinstance(o, Antlr4Selection):
potential_prefix = None
potential_prefix_i = None
to_remove = []
for i, c in enumerate(o):
if potential_prefix is None:
potential_prefix = c
potential_prefix_i = i
else:
# check if the potential_prefix is really a prefix of this rule
is_prefix, suffix = is_prefix_of_elem(potential_prefix, c)
if is_prefix:
# put suffix as a optional to a prefix
if list(iter_non_visuals(suffix)):
if not isinstance(potential_prefix, Antlr4Sequence):
assert o[potential_prefix_i] is potential_prefix
potential_prefix = Antlr4Sequence([potential_prefix, ])
o[potential_prefix_i] = potential_prefix
if len(suffix) == 1:
suffix = suffix[0]
else:
suffix = Antlr4Sequence(suffix)
potential_prefix.append(Antlr4Option(suffix))
to_remove.append(c)
potential_prefix = None
potential_prefix_i = None
modified = True
else:
potential_prefix = c
potential_prefix_i = i
for c in to_remove:
o.remove(c)
if len(o) == 1:
return Antlr4Sequence([o[0], ])
replace_item_by_sequence(r, match_replace_fn)
return modified
| 16,555
|
def _heading_index(config, info, token, stack, level, blockquote_depth):
"""Get the next heading level, adjusting `stack` as a side effect."""
# Treat chapter titles specially.
if level == 1:
return tuple(str(i) for i in stack)
# Moving up
if level > len(stack):
if (level > len(stack) + 1) and (blockquote_depth == 0):
err(f"Heading {level} out of place", info, token)
while len(stack) < level:
stack.append(1)
# Same level
elif level == len(stack):
stack[-1] += 1
# Going down
else:
while len(stack) > level:
stack.pop()
stack[-1] += 1
# Report.
return tuple(str(i) for i in stack)
| 16,556
|
def sin_salida_naive(vuelos: Data) -> List[str]:
"""Retorna una lista de aeropuertos a los cuales hayan llegado
vuelos pero no hayan salido vuelos de este.
:param vuelos: Información de los vuelos.
:vuelos type: Dict[str, Dict[str, Union[str, float]]]
:return: Lista de aeropuertos
:rtype: List[str]
"""
salidas, llegadas, aeropuertos = [], [], []
for vuelo in vuelos.values():
salidas.append(vuelo['origen'])
llegadas.append(vuelo['destino'])
for aeropuerto in llegadas:
if aeropuerto not in salidas:
aeropuertos.append(aeropuerto)
return aeropuertos
| 16,557
|
def test_persistent_group_missed_inv_resp(dev):
"""P2P persistent group re-invocation with invitation response getting lost"""
form(dev[0], dev[1])
addr = dev[1].p2p_dev_addr()
dev[1].global_request("SET persistent_reconnect 1")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr, social=True):
raise Exception("Peer " + addr + " not found")
dev[0].dump_monitor()
peer = dev[0].get_peer(addr)
# Drop the first Invitation Response frame
if "FAIL" in dev[0].request("SET ext_mgmt_frame_handling 1"):
raise Exception("Failed to enable external management frame handling")
cmd = "P2P_INVITE persistent=" + peer['persistent'] + " peer=" + addr
dev[0].global_request(cmd)
rx_msg = dev[0].mgmt_rx()
if rx_msg is None:
raise Exception("MGMT-RX timeout (no Invitation Response)")
time.sleep(2)
# Allow following Invitation Response frame to go through
if "FAIL" in dev[0].request("SET ext_mgmt_frame_handling 0"):
raise Exception("Failed to disable external management frame handling")
time.sleep(1)
# Force the P2P Client side to be on its Listen channel for retry
dev[1].p2p_listen()
ev = dev[0].wait_global_event(["P2P-INVITATION-RESULT"], timeout=15)
if ev is None:
raise Exception("Invitation result timed out")
# Allow P2P Client side to continue connection-to-GO attempts
dev[1].p2p_stop_find()
# Verify that group re-invocation goes through
ev = dev[1].wait_global_event([ "P2P-GROUP-STARTED",
"P2P-GROUP-FORMATION-FAILURE" ],
timeout=20)
if ev is None:
raise Exception("Group start event timed out")
if "P2P-GROUP-STARTED" not in ev:
raise Exception("Group re-invocation failed")
dev[0].group_form_result(ev)
ev = dev[0].wait_global_event([ "P2P-GROUP-STARTED" ], timeout=5)
if ev is None:
raise Exception("Group start event timed out on GO")
dev[0].group_form_result(ev)
terminate_group(dev[0], dev[1])
| 16,558
|
def converter(doc):
"""
This is a function for converting various kinds of objects we see
inside a graffle document.
"""
if doc.nodeName == "#text":
return str(doc.data)
elif doc.nodeName == "string":
return str(doc.firstChild.data)
elif doc.nodeName == 'integer':
return int(doc.firstChild.data)
elif doc.nodeName == 'real':
return float(doc.firstChild.data)
elif doc.nodeName == 'dict':
return convert_dict(doc)
elif doc.nodeName == 'array':
return convert_list(doc)
elif doc.nodeName == 'plist':
return convert_list(doc)
else:
return 'unknown:' + doc.nodeName
| 16,559
|
def printable_cmd(c):
"""Converts a `list` of `str`s representing a shell command to a printable
`str`."""
return " ".join(map(lambda e: '"' + str(e) + '"', c))
| 16,560
|
def details_from_params(
params: QueryParams,
items_per_page: int,
items_per_page_async: int = -1,
) -> common.Details:
"""Create details from request params."""
try:
page = int(params.get('page', 1))
except (ValueError, TypeError):
page = 1
try:
anchor = int(params.get('anchor', 1))
except (ValueError, TypeError):
anchor = -1
return common.Details(
page=max(1, page),
anchor=anchor,
items_per_page=items_per_page,
items_per_page_async=items_per_page_async,
)
| 16,561
|
def write_gif_with_tempfiles(clip, filename, fps=None, program= 'ImageMagick',
opt="OptimizeTransparency", fuzz=1, verbose=True,
loop=0, dispose=True, colors=None, tempfiles=False):
""" Write the VideoClip to a GIF file.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg. Does the same as write_gif (see this one for more
docstring), but writes every frame to a file instead of passing
them in the RAM. Useful on computers with little RAM.
"""
fileName, fileExtension = os.path.splitext(filename)
tt = np.arange(0,clip.duracion, 1.0/fps)
tempfiles = []
verbose_print(verbose, "\n[MoviePy] Building file %s\n"%filename
+40*"-"+"\n")
verbose_print(verbose, "[MoviePy] Generating GIF frames...\n")
total = int(clip.duracion*fps)+1
for i, t in tqdm(enumerate(tt), total=total):
name = "%s_GIFTEMP%04d.png"%(fileName, i+1)
tempfiles.append(name)
clip.save_frame(name, t, withmask=True)
delay = int(100.0/fps)
if program == "ImageMagick":
verbose_print(verbose, "[MoviePy] Optimizing GIF with ImageMagick... ")
cmd = [get_setting("IMAGEMAGICK_BINARY"),
'-delay' , '%d'%delay,
"-dispose" ,"%d"%(2 if dispose else 1),
"-loop" , "%d"%loop,
"%s_GIFTEMP*.png"%fileName,
"-coalesce",
"-layers", "%s"%opt,
"-fuzz", "%02d"%fuzz + "%",
]+(["-colors", "%d"%colors] if colors is not None else [])+[
filename]
elif program == "ffmpeg":
cmd = [get_setting("FFMPEG_BINARY"), '-y',
'-f', 'image2', '-r',str(fps),
'-i', fileName+'_GIFTEMP%04d.png',
'-r',str(fps),
filename]
try:
subprocess_call( cmd, verbose = verbose )
verbose_print(verbose, "[MoviePy] GIF %s is ready."%filename)
except (IOError,OSError) as err:
error = ("MoviePy Error: creation of %s failed because "
"of the following error:\n\n%s.\n\n."%(filename, str(err)))
if program == "ImageMagick":
error = error + ("This error can be due to the fact that "
"ImageMagick is not installed on your computer, or "
"(for Windows users) that you didn't specify the "
"path to the ImageMagick binary in file conf.py." )
raise IOError(error)
for f in tempfiles:
os.remove(f)
| 16,562
|
def error(msg):
"""Exit with error 'msg'
"""
sys.exit("ERROR: " + msg)
| 16,563
|
def run_nuclei_type_stat(
pred_dir, true_dir, nuclei_type_dict, type_uid_list=None, exhaustive=True, rad=12, verbose=False
):
"""
rad = 12 if x40
rad = 6 if x20
"""
def _get_type_name(uid, ntd=nuclei_type_dict):
for name,v in ntd.items():
if v == uid:
return name
def calc_type_metrics(paired_true, paired_pred, unpaired_true, unpaired_pred, type_id, w):
type_samples = (paired_true == type_id) | (paired_pred == type_id)
paired_true = paired_true[type_samples]
paired_pred = paired_pred[type_samples]
# unpaired_pred_t = unpaired_pred[unpaired_pred == type_id] # (unpaired_pred == type_id).sum()
# unpaired_true_t = unpaired_true[unpaired_true == type_id]
# Original
tp_dt = ((paired_true == type_id) & (paired_pred == type_id)).sum()
tn_dt = ((paired_true != type_id) & (paired_pred != type_id)).sum()
fp_dt = ((paired_true != type_id) & (paired_pred == type_id)).sum()
fn_dt = ((paired_true == type_id) & (paired_pred != type_id)).sum()
# Classification
# TP - detected cell with GT label t, classified as t
tp_dtc = ((paired_true == type_id) & (paired_pred == type_id)).sum()
# TN - detected cell with GT label other than t, classified as other than t
tn_dtc = ((paired_true != type_id) & (paired_pred != type_id)).sum()
# FP - detected cell with GT label other than t classified as t
fp_dtc = ((paired_true != type_id) & (paired_pred == type_id)).sum()
# FN - detected cell with GT label t classified as other than t
fn_dtc = ((paired_true == type_id) & (paired_pred != type_id)).sum()
# Integrated classification
# TP - detected cell with GT label t, classified as t
tp_dtic = ((paired_true == type_id) & (paired_pred == type_id)).sum()
# TN - detected or falsely detected cell with GT label other than t, classified as other than t
tn_dtic = np.concatenate((
((paired_true != type_id) & (paired_pred != type_id)),
(unpaired_pred != type_id)
# np.concatenate(
# ((unpaired_true != type_id), (unpaired_pred != type_id))
# )
)).sum()
# FP - detected or falsely detected cell with GT label other than t, classified as t
fp_dtic = np.concatenate((
((paired_true != type_id) & (paired_pred == type_id)),
(unpaired_pred == type_id)
# np.concatenate(
# ((unpaired_true != type_id), (unpaired_pred == type_id))
# )
)).sum()
# FN - detected cell with GT label t, classified as other than t and all cells with GT label t not detected
fn_dtic = np.concatenate((
((paired_true == type_id) & (paired_pred != type_id)),
(unpaired_true == type_id)
)).sum()
if not exhaustive:
ignore = (paired_true == -1).sum()
fp_dt -= ignore
tp_d = (paired_pred == type_id).sum()
# tn_d = (paired_true == type_id).sum()
fp_d = (unpaired_pred == type_id).sum()
fn_d = (unpaired_true == type_id).sum()
rec_dt = tp_d / (tp_d + fn_d)
def __internal_metrics(tp, tn, fp, fn):
# print (f"tp: {tp}, \ntn: {tn}, \nfp:{fp}, fn: {fn}\n")
acc = (tp + tn) / (tp + fp + fn + tn)
prec = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (prec * recall) / (prec + recall)
# print (f"Accuracy: {acc}, \nPrecision: {prec}, \nRecall:{recall}, F1: {f1}\n")
return acc, prec, recall, f1
res_class = __internal_metrics(tp_dtc, tn_dtc, fp_dtc, fn_dtc)
dtc_tptnfpfn = (tp_dtc, tn_dtc, fp_dtc, fn_dtc)
res_i_class = __internal_metrics(tp_dtic, tn_dtic, fp_dtic, fn_dtic)
dtic_tptnfpfn = (tp_dtic, tn_dtic, fp_dtic, fn_dtic)
# print (f"tp_dt: {tp_dt}") # TPc
# print (f"tn_dt: {tn_dt}") # TNc
# print (f"fp_dt: {fp_dt}") # FPc
# print (f"fn_dt: {fn_dt}") # FNc
# print (f"fp_d: {fp_d}")
# print (f"fn_d: {fn_d}")
tp_w = tp_dt + tn_dt
fp_w = 2 * fp_dt + fp_d
fn_w = 2 * fn_dt + fn_d
w_f1_type = (2 * (tp_dt + tn_dt)) / (
2 * (tp_dt + tn_dt)
+ w[0] * fp_dt
+ w[1] * fn_dt
+ w[2] * fp_d
+ w[3] * fn_d
)
w_acc_type = (tp_w) / (tp_w + fp_w + fn_w) ## check
w_precision_type = tp_w / (tp_w + fp_w)
w_recall_type = tp_w / (tp_w + fn_w)
weighted = (w_acc_type, w_precision_type, w_recall_type, w_f1_type)
cls_r = (dtc_tptnfpfn, res_class)
icls_r = (dtic_tptnfpfn, res_i_class)
#return f1_type, precision_type, recall_type
return (
rec_dt, ### Segmentation recall
cls_r, ### Classification
icls_r, ### Integrated classification
weighted ### Weighted
)
######################################################
types = sorted([f"{v}:{k}" for k, v in nuclei_type_dict.items()])
if verbose: print(types)
file_list = glob.glob(os.path.join(pred_dir, "*.mat"))
file_list.sort() # ensure same order [1]
paired_all = [] # unique matched index pair
unpaired_true_all = [] # the index must exist in `true_inst_type_all` and unique
unpaired_pred_all = [] # the index must exist in `pred_inst_type_all` and unique
true_inst_type_all = [] # each index is 1 independent data point
pred_inst_type_all = [] # each index is 1 independent data point
for file_idx, filename in enumerate(file_list[:]):
filename = os.path.basename(filename)
basename = filename.split(".")[0]
# print (basename)
# true_info = sio.loadmat(os.path.join(true_dir, '{}.mat'.format(basename)))
# # dont squeeze, may be 1 instance exist
# true_centroid = (true_info['inst_centroid']).astype('float32')
# true_inst_type = (true_info['inst_type']).astype('int32')
true_info = np.load(
os.path.join(true_dir, "{}.npy".format(basename)), allow_pickle=True
)
# dont squeeze, may be 1 instance exist
true_centroid = (true_info.item().get("inst_centroid")).astype("float32")
true_inst_type = (true_info.item().get("inst_type")).astype("int32")
if true_centroid.shape[0] != 0:
true_inst_type = true_inst_type[:, 0]
else: # no instance at all
pass
true_centroid = np.array([[0, 0]])
true_inst_type = np.array([0])
pred_info = sio.loadmat(os.path.join(pred_dir, "{}.mat".format(basename)))
# dont squeeze, may be 1 instance exist
pred_centroid = (pred_info["inst_centroid"]).astype("float32")
pred_inst_type = (pred_info["inst_type"]).astype("int32")
if pred_centroid.shape[0] != 0:
pred_inst_type = pred_inst_type[:, 0]
else: # no instance at all
pass
pred_centroid = np.array([[0, 0]])
pred_inst_type = np.array([0])
# ! if take longer than 1min for 1000 vs 1000 pairing, sthg is wrong with coord
paired, unpaired_true, unpaired_pred = pair_coordinates(
true_centroid, pred_centroid, rad
)
# * Aggreate information
# get the offset as each index represent 1 independent instance
true_idx_offset = (
true_idx_offset + true_inst_type_all[-1].shape[0] if file_idx != 0 else 0
)
pred_idx_offset = (
pred_idx_offset + pred_inst_type_all[-1].shape[0] if file_idx != 0 else 0
)
true_inst_type_all.append(true_inst_type)
pred_inst_type_all.append(pred_inst_type)
# increment the pairing index statistic
if paired.shape[0] != 0: # ! sanity
paired[:, 0] += true_idx_offset
paired[:, 1] += pred_idx_offset
paired_all.append(paired)
unpaired_true += true_idx_offset
unpaired_pred += pred_idx_offset
unpaired_true_all.append(unpaired_true)
unpaired_pred_all.append(unpaired_pred)
paired_all = np.concatenate(paired_all, axis=0) # (x, 2) # paired ids (found in GT and pred)
unpaired_true_all = np.concatenate(unpaired_true_all, axis=0) # (x,) # unpaired ids (found in GT and NOT in pred)
unpaired_pred_all = np.concatenate(unpaired_pred_all, axis=0) # (x,) # unpaired ids (NOT found in GT and found in pred)
true_inst_type_all = np.concatenate(true_inst_type_all, axis=0) # all type ids in true [3,3,3...1,1,1]
paired_true_type = true_inst_type_all[paired_all[:, 0]] # paired true type ids [3,3,3...1,1,1]
unpaired_true_type = true_inst_type_all[unpaired_true_all]
pred_inst_type_all = np.concatenate(pred_inst_type_all, axis=0) # all type ids in pred [3,3,3...1,1,1]
paired_pred_type = pred_inst_type_all[paired_all[:, 1]]
unpaired_pred_type = pred_inst_type_all[unpaired_pred_all]
# true_inst_type_all = paired_true_type + unpaired_true_type
###
# overall
# * quite meaningless for not exhaustive annotated dataset
tp_d = paired_pred_type.shape[0]
fp_d = unpaired_pred_type.shape[0]
fn_d = unpaired_true_type.shape[0]
tp_tn_dt = (paired_pred_type == paired_true_type).sum()
fp_fn_dt = (paired_pred_type != paired_true_type).sum()
if not exhaustive:
ignore = (paired_true_type == -1).sum()
fp_fn_dt -= ignore
w = [1, 1]
acc_type = tp_tn_dt / (tp_tn_dt + fp_fn_dt)
precision = tp_d / (tp_d + w[0] * fp_d)
recall = tp_d / (tp_d + w[0] * fn_d)
f1_d = 2 * tp_d / (2 * tp_d + w[0] * fp_d + w[1] * fn_d)
# results_list = [acc_type, precision, recall, f1_d]
results_all_types = [[acc_type], [precision], [recall], [f1_d]]
w = [2, 2, 1, 1]
if type_uid_list is None:
type_uid_list = np.unique(true_inst_type_all).tolist()
if 0 in type_uid_list:
type_uid_list.remove(0)
pred_type_uid_list = np.unique(pred_inst_type_all).tolist()
if 0 in pred_type_uid_list:
pred_type_uid_list.remove(0)
if verbose:
print(f"True type_uid_list: {type_uid_list}")
print(f"Pred type_uid_list: {pred_type_uid_list}")
res_all = {}
for type_uid in type_uid_list:
res = calc_type_metrics(
paired_true_type,
paired_pred_type,
unpaired_true_type,
unpaired_pred_type,
type_uid,
w,
)
result_uid_metrics = [
[res[0]], # rec_dt ### Segmentation recall
[res[1][1][0]], [res[1][1][1]], [res[1][1][2]], [res[1][1][3]], # (dtc_tptnfpfn, res_class), ### Classification
[res[2][1][0]], [res[2][1][1]], [res[2][1][2]], [res[2][1][3]], # (dtic_tptnfpfn, res_i_class), ### Integrated classification
[res[3][0]], [res[3][1]], [res[3][2]], [res[3][3]] # weighted ### Weighted
]
res_all[f"{type_uid}:{_get_type_name(type_uid)}"] = result_uid_metrics
### I - integrated, W - weighted, Type - across all types
cols_uid = ["Recall_dt", "Cls_acc", "Cls_precision", "Cls_recall", "Cls_F1", "ICls_acc", "ICls_precision", "ICls_recall", "ICls_F1", "WCls_acc", "WCls_precision", "WCls_recall", "WCls_F1"] # result_uid_metrics
cols_all_types = ["Type_acc", "Type_precision", "Type_recall", "Type_F1"] # results_all_types
df_all_types = pd.DataFrame(np.transpose(np.array(results_all_types)), columns=cols_all_types)
df_uid = pd.DataFrame(np.squeeze(np.array(list(res_all.values()))), columns=cols_uid)
df_uid["Type"] = list(res_all.keys())
df_uid = df_uid[["Type", *cols_uid]]
if verbose:
print()
print(df_all_types.to_markdown(index=False))
print()
print(df_uid.to_markdown(index=False))
return df_uid, df_all_types
| 16,564
|
def redirect():
"""
Redirects standard input into standard output
"""
for line in sys.stdin:
print(line, file=sys.stdout, end="")
| 16,565
|
def _signature_pre_process_predict(
signature: _SignatureDef) -> Tuple[Text, Mapping[Text, Text]]:
"""Returns input tensor name and output alias tensor names from signature.
Args:
signature: SignatureDef
Returns:
A tuple of input tensor name and output alias tensor names.
"""
input_tensor_names = [value.name for value in signature.inputs.values()]
input_tensor_types = dict([
(key, value.dtype) for key, value in signature.inputs.items()
])
output_alias_tensor_names = dict([
(key, output.name) for key, output in signature.outputs.items()
])
return input_tensor_names, input_tensor_types, output_alias_tensor_names
| 16,566
|
def _generate_features(reader, paths, same_size=False):
"""Load and stack features in a memory efficient way. This function is
meant to be used inside :py:func:`vstack_features`.
Parameters
----------
reader : ``collections.Callable``
See the documentation of :py:func:`vstack_features`.
paths : ``collections.Iterable``
See the documentation of :py:func:`vstack_features`.
same_size : :obj:`bool`, optional
See the documentation of :py:func:`vstack_features`.
Yields
------
object
The first object returned is a tuple of :py:class:`numpy.dtype` of
features and the shape of the first feature. The rest of objects are
the actual values in features. The features are returned in C order.
"""
shape_determined = False
for i, path in enumerate(paths):
feature = numpy.atleast_2d(reader(path))
feature = numpy.ascontiguousarray(feature)
if not shape_determined:
shape_determined = True
dtype = feature.dtype
shape = list(feature.shape)
yield (dtype, shape)
else:
# make sure all features have the same shape and dtype
if same_size:
assert shape == list(feature.shape)
else:
assert shape[1:] == list(feature.shape[1:])
assert dtype == feature.dtype
if same_size:
yield (feature.ravel(),)
else:
for feat in feature:
yield (feat.ravel(),)
| 16,567
|
def draw_polygon_outline(point_list: PointList,
color: Color, line_width: float=1):
"""
Draw a polygon outline. Also known as a "line loop."
Args:
:point_list: List of points making up the lines. Each point is
in a list. So it is a list of lists.
:color: color, specified in a list of 3 or 4 bytes in RGB or
RGBA format.
:border_width: Width of the line in pixels.
Returns:
None
Raises:
None
"""
_generic_draw_line_strip(point_list, color, line_width, gl.GL_LINE_LOOP)
| 16,568
|
def edit_photo_caption(token: str, photo: Path):
"""Run a bot that tests sendPhoto and editMessageCaption"""
trio.run(run_bot, token, lambda b, u: edit_photo_caption_handler(b, u, photo))
| 16,569
|
def test_key_raises():
"""Constructing Site with an invalid key should raise an exception."""
with pytest.raises(ValueError):
Site("invalid_key")
| 16,570
|
def virtualenv(ctx: DoctorContext):
"""Check that we're in the correct virtualenv."""
try:
venv_path = pathlib.Path(os.environ['VIRTUAL_ENV']).resolve()
except KeyError:
ctx.error('VIRTUAL_ENV not set')
return
# When running in LUCI we might not have gone through the normal environment
# setup process, so we need to skip the rest of this step.
if 'LUCI_CONTEXT' in os.environ:
return
root = pathlib.Path(os.environ['PW_ROOT']).resolve()
if root not in venv_path.parents:
ctx.error('VIRTUAL_ENV (%s) not inside PW_ROOT (%s)', venv_path, root)
ctx.error('\n'.join(os.environ.keys()))
| 16,571
|
def style_95_read_mode(line, patterns):
"""Style the EAC 95 read mode line."""
# Burst mode doesn't have multiple settings in one line
if ',' not in line:
return style_setting(line, 'bad')
split_line = line.split(':', 1)
read_mode = split_line[0].rstrip()
line = line.replace(read_mode, '<span class="log5">{}</span>'.format(read_mode), 1)
parts = split_line[1].lstrip().split(' ', 1)
parts[1:] = [part.strip() for part in parts[1].split(',')]
num = 0
p = patterns['95 settings']
for setting in [
p['Read mode'],
p['C2 pointers'],
p['Accurate stream'],
p['Audio cache'],
]:
if num == len(parts):
break
class_ = 'good' if setting in line else 'bad'
line = line.replace(
parts[num], '<span class="{}">{}</span>'.format(class_, parts[num]), 1
)
num += 1
return line
| 16,572
|
def test_get_organizations(gc):
"""Check that get_organizations is able to retrieve a list of organization"""
responses.add(
responses.GET,
'https://api.github.com/user/orgs',
json=[{'login': 'power_rangers'}, {'login': 'teletubbies'}],
status=200,
)
res = gc.get_organizations()
assert len(responses.calls) == 1
assert res == ['power_rangers', 'teletubbies']
| 16,573
|
def trange(
client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close"
):
"""This will return a dataframe of true range for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
trange = t.TRANGE(df[highcol].values, df[lowcol].values, df[closecol].values)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"trange": trange,
}
)
| 16,574
|
def ConvolveUsingAlm(map_in, psf_alm):
"""Convolve a map using a set of pre-computed ALM
Parameters
----------
map_in : array_like
HEALPix map to be convolved
psf_alm : array_like
The ALM represenation of the PSF
Returns
-------
map_out : array_like
The smeared map
"""
norm = map_in.sum()
nside = hp.pixelfunc.npix2nside(map_in.size)
almmap = hp.sphtfunc.map2alm(map_in)
almmap *= psf_alm
outmap = hp.sphtfunc.alm2map(almmap, nside)
outmap *= norm / outmap.sum()
return outmap
| 16,575
|
def load_summary_data():
""" Function to load data
param DATA_URL: data_url
return: pandas dataframe
"""
DATA_URL = 'data/summary_df.csv'
data = pd.read_csv(DATA_URL)
return data
| 16,576
|
def _enumerate_trees_w_leaves(n_leaves):
"""Construct all rooted trees with n leaves."""
def enumtree(*args):
n_args = len(args)
# trivial cases:
if n_args == 0:
return []
if n_args == 1:
return args
# general case of 2 or more args:
# build index array
idxs = range(0, n_args)
trees = []
# we consider all possible subsets of size n_set to gather
for n_set in range(2, n_args+1):
idxsets = list(itertools.combinations(idxs, n_set))
for idxset in idxsets:
# recurse by joining all subtrees with
# n_set leaves and (n_args - n_set) leaves
arg_set = tuple(args[i] for i in idxs if i in idxset)
arg_coset = tuple(args[i] for i in idxs if i not in idxset)
if arg_coset:
trees.extend(tuple(itertools.product(enumtree(*arg_set),
enumtree(*arg_coset))))
else:
# trivial case where arg_set is entire set
trees.append(arg_set)
return trees
# return enumerated trees with integers as leaves
return enumtree(*range(n_leaves))
| 16,577
|
def list_collections(NextToken=None, MaxResults=None):
"""
Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs.
For an example, see Listing Collections in the Amazon Rekognition Developer Guide.
This operation requires permissions to perform the rekognition:ListCollections action.
See also: AWS API Documentation
Exceptions
Examples
This operation returns a list of Rekognition collections.
Expected Output:
:example: response = client.list_collections(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: Pagination token from the previous response.
:type MaxResults: integer
:param MaxResults: Maximum number of collection IDs to return.
:rtype: dict
ReturnsResponse Syntax
{
'CollectionIds': [
'string',
],
'NextToken': 'string',
'FaceModelVersions': [
'string',
]
}
Response Structure
(dict) --
CollectionIds (list) --
An array of collection IDs.
(string) --
NextToken (string) --
If the result is truncated, the response provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs.
FaceModelVersions (list) --
Version numbers of the face detection models associated with the collections in the array CollectionIds . For example, the value of FaceModelVersions[2] is the version number for the face detection model used by the collection in CollectionId[2] .
(string) --
Exceptions
Rekognition.Client.exceptions.InvalidParameterException
Rekognition.Client.exceptions.AccessDeniedException
Rekognition.Client.exceptions.InternalServerError
Rekognition.Client.exceptions.ThrottlingException
Rekognition.Client.exceptions.ProvisionedThroughputExceededException
Rekognition.Client.exceptions.InvalidPaginationTokenException
Rekognition.Client.exceptions.ResourceNotFoundException
Examples
This operation returns a list of Rekognition collections.
response = client.list_collections(
)
print(response)
Expected Output:
{
'CollectionIds': [
'myphotos',
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'CollectionIds': [
'string',
],
'NextToken': 'string',
'FaceModelVersions': [
'string',
]
}
:returns:
(string) --
"""
pass
| 16,578
|
def lint(shout, requirements):
"""Check for unused dependencies."""
unused = list(_unused())
if shout:
if unused:
print("Unused deps:")
for d in unused:
print(f" - {d}")
if unused:
exit(1)
reqs = _load(requirements)
if reqs != sorted(reqs, key=sort_key):
exit(1)
| 16,579
|
def main():
"""Make a jazz noise here"""
args = get_args()
sub = args.substring
word = args.word
# Sanity check
if sub not in word:
sys.exit(f'Substring "{sub}" does not appear in word "{word}"')
# Create a pattern that replaces the length of the sub with any letters
pattern = word.replace(sub, '[a-z]{' + str(len(sub)) + '}')
regex = re.compile('^' + pattern + '$')
# Find matches but exclude the original word
def match(check):
return check != word and regex.match(check)
if words := list(filter(match, args.wordlist.read().lower().split())):
print('\n'.join(words))
else:
print('Womp womp')
| 16,580
|
def gen_tfidf(tokens, idf_dict):
"""
Given a segmented string and idf dict, return a dict of tfidf.
"""
# tokens = text.split()
total = len(tokens)
tfidf_dict = {}
for w in tokens:
tfidf_dict[w] = tfidf_dict.get(w, 0.0) + 1.0
for k in tfidf_dict:
tfidf_dict[k] *= idf_dict.get(k, 0.0) / total
return tfidf_dict
| 16,581
|
def timestamp_to_seconds(timestamp):
"""Convert timestamp to python (POSIX) time in seconds.
:param timestamp: The timestamp.
:return: The python time in float seconds.
"""
return (timestamp / 2**30) + EPOCH
| 16,582
|
def launch_dpf(ansys_path, ip=LOCALHOST, port=DPF_DEFAULT_PORT, timeout=10, docker_name=None):
"""Launch Ansys DPF.
Parameters
----------
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initialization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
docker_name : str, optional
To start DPF server as a docker, specify the docker name here.
Returns
-------
process : subprocess.Popen
DPF Process.
"""
process = _run_launch_server_process(ansys_path, ip, port, docker_name)
# check to see if the service started
lines = []
docker_id = []
def read_stdout():
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
LOG.debug(line)
lines.append(line)
if docker_name:
docker_id.append(lines[0].replace("\n", ""))
docker_process = subprocess.Popen(f"docker logs {docker_id[0]}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in io.TextIOWrapper(docker_process.stdout, encoding="utf-8"):
LOG.debug(line)
lines.append(line)
errors = []
def read_stderr():
for line in io.TextIOWrapper(process.stderr, encoding="utf-8"):
LOG.error(line)
errors.append(line)
# must be in the background since the process reader is blocking
Thread(target=read_stdout, daemon=True).start()
Thread(target=read_stderr, daemon=True).start()
t_timeout = time.time() + timeout
started = False
while not started:
started = any("server started" in line for line in lines)
if time.time() > t_timeout:
raise TimeoutError(f"Server did not start in {timeout} seconds")
# verify there were no errors
time.sleep(0.1)
if errors:
try:
process.kill()
except PermissionError:
pass
errstr = "\n".join(errors)
if "Only one usage of each socket address" in errstr:
raise errors.InvalidPortError(f"Port {port} in use")
raise RuntimeError(errstr)
if len(docker_id) > 0:
return docker_id[0]
| 16,583
|
def fit_lowmass_mstar_mpeak_relation(mpeak_orig, mstar_orig,
mpeak_mstar_fit_low_mpeak=default_mpeak_mstar_fit_low_mpeak,
mpeak_mstar_fit_high_mpeak=default_mpeak_mstar_fit_high_mpeak):
"""
"""
mid = 0.5*(mpeak_mstar_fit_low_mpeak + mpeak_mstar_fit_high_mpeak)
mask = (mpeak_orig >= 10**mpeak_mstar_fit_low_mpeak)
mask &= (mpeak_orig < 10**mpeak_mstar_fit_high_mpeak)
# Add noise to mpeak to avoid particle discreteness effects in the fit
_x = np.random.normal(loc=np.log10(mpeak_orig[mask])-mid, scale=0.002)
_y = np.log10(mstar_orig[mask])
c1, c0 = np.polyfit(_x, _y, deg=1)
return c0, c1, mid
| 16,584
|
def load_ref_system():
""" Returns benzaldehyde as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 0.3179 1.0449 -0.0067
C 1.6965 0.8596 -0.0102
C 2.2283 -0.4253 -0.0050
C 1.3808 -1.5297 0.0037
C 0.0035 -1.3492 0.0073
C -0.5347 -0.0596 0.0021
C -2.0103 0.0989 0.0061
O -2.5724 1.1709 0.0021
H 2.3631 1.7283 -0.0171
H 3.3139 -0.5693 -0.0078
H 1.8000 -2.5413 0.0078
H -0.6626 -2.2203 0.0142
H -2.6021 -0.8324 0.0131
H -0.1030 2.0579 -0.0108
""")
| 16,585
|
def get_department_level_grade_data_completed(request_ctx, account_id, **request_kwargs):
"""
Returns the distribution of grades for students in courses in the
department. Each data point is one student's current grade in one course;
if a student is in multiple courses, he contributes one value per course,
but if he's enrolled multiple times in the same course (e.g. a lecture
section and a lab section), he only constributes on value for that course.
Grades are binned to the nearest integer score; anomalous grades outside
the 0 to 100 range are ignored. The raw counts are returned, not yet
normalized by the total count.
Shares the same variations on endpoint as the participation data.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:return: Get department-level grade data
:rtype: requests.Response (with void data)
"""
path = '/v1/accounts/{account_id}/analytics/completed/grades'
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, **request_kwargs)
return response
| 16,586
|
def allocate_samples_to_bins(n_samples, ideal_bin_count=100):
"""goal is as best as possible pick a number of bins
and per bin samples to a achieve a given number
of samples.
Parameters
----------
Returns
----------
number of bins, list of samples per bin
"""
if n_samples <= ideal_bin_count:
n_bins = n_samples
samples_per_bin = [1 for _ in range(n_bins)]
else:
n_bins = ideal_bin_count
remainer = n_samples % ideal_bin_count
samples_per_bin = np.array([(n_samples - remainer) / ideal_bin_count for _ in range(n_bins)])
if remainer != 0:
additional_samples_per_bin = distribute_samples(remainer, n_bins)
samples_per_bin = samples_per_bin + additional_samples_per_bin
return n_bins, np.array(samples_per_bin).astype(int)
| 16,587
|
def create_algo(name: str, discrete: bool, **params: Any) -> AlgoBase:
"""Returns algorithm object from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
params (any): arguments for algorithm.
Returns:
d3rlpy.algos.base.AlgoBase: algorithm.
"""
return get_algo(name, discrete)(**params)
| 16,588
|
def tf_inv(T):
""" Invert 4x4 homogeneous transform """
assert T.shape == (4, 4)
return np.linalg.inv(T)
| 16,589
|
def send_reset_password_email(token, to, username):
"""
send email to user for reset password
:param token: token
:param to: email address
:param username: user.username
:return:
"""
url_to = current_app.config["WEB_BASE_URL"] + "/auth/reset-password?token=" + token
response = _send_email(
subject="请重置密码",
to=to,
html_body=render_template(
"emails/reset_password.html", username=username, url_to=url_to
),
)
return response.status_code
| 16,590
|
def cleanup_test_resources(instance=None, vm_firewall=None,
key_pair=None, network=None):
"""Clean up any combination of supplied resources."""
with cb_helpers.cleanup_action(
lambda: cleanup_network(network) if network else None):
with cb_helpers.cleanup_action(
lambda: key_pair.delete() if key_pair else None):
with cb_helpers.cleanup_action(
lambda: vm_firewall.delete() if vm_firewall else None):
delete_instance(instance)
| 16,591
|
def part1(data):
"""Solve part 1"""
countIncreased = 0
prevItem = None
for row in data:
if prevItem == None:
prevItem = row
continue
if prevItem < row:
countIncreased += 1;
prevItem = row
return countIncreased
| 16,592
|
def yzrotation(theta = np.pi*3/20.0):
"""
Returns a simple planar rotation matrix that rotates
vectors around the x-axis.
args:
theta: The angle by which we will perform the rotation.
"""
r = np.eye(3)
r[1,1] = np.cos(theta)
r[1,2] = -np.sin(theta)
r[2,1] = np.sin(theta)
r[2,2] = np.cos(theta)
return r
| 16,593
|
def is_compiled_release(data):
"""
Returns whether the data is a compiled release (embedded or linked).
"""
return 'tag' in data and isinstance(data['tag'], list) and 'compiled' in data['tag']
| 16,594
|
def resize_frame(
frame: numpy.ndarray, width: int, height: int, mode: str = "RGB"
) -> numpy.ndarray:
"""
Use PIL to resize an RGB frame to an specified height and width.
Args:
frame: Target numpy array representing the image that will be resized.
width: Width of the resized image.
height: Height of the resized image.
mode: Passed to Image.convert.
Returns:
The resized frame that matches the provided width and height.
"""
frame = Image.fromarray(frame)
frame = frame.convert(mode).resize(size=(width, height))
return numpy.array(frame)
| 16,595
|
def get_proxies(host, user, password, database, port=3306, unix_socket=None):
""""Connect to a mysql database using pymysql and retrieve proxies for the scraping job.
Args:
host: The mysql database host
user: The mysql user
password: The database password
port: The mysql port, by default 3306
unix_socket: Sometimes you need to specify the mysql socket file when mysql doesn't reside
in a standard location.
Returns;
A list of proxies obtained from the database
Raisese:
An Exception when connecting to the database fails.
"""
try:
conn = pymysql.connect(host=host, port=port, user=user, passwd=password, unix_socket=unix_socket)
conn.select_db(database)
cur = conn.cursor(pymysql.cursors.DictCursor)
# Adapt this code for you to make it retrieving the proxies in the right format.
cur.execute('SELECT host, port, username, password, protocol FROM proxies')
proxies = [Proxy(proto=s['protocol'], host=s['host'], port=s['port'],
username=s['username'], password=s['password']) for s in cur.fetchall()]
return proxies
except Exception as e:
logger.error(e)
raise
| 16,596
|
def main():
"""Process files according to the command line arguments."""
parser = ArgParser()
handler = Handler(parser.args)
print(handler.message)
handler.log.logger.info(handler.message)
handler.process()
handler.log.logger.info(f"Process finished\n")
handler.log.close()
| 16,597
|
def APIRevision():
"""Gets the current API revision to use.
Returns:
str, The revision to use.
"""
return 'v1beta3'
| 16,598
|
def pretty_string_value_error(value, error, error_digits=2, use_unicode=True):
"""
Returns a value/error combination of numbers in a scientifically
'pretty' format.
Scientific quantities often come as a *value* (the actual
quantity) and the *error* (the uncertainty in the value).
Given two floats, value and error, return the two in a
'pretty' formatted string: where the value and error are truncated
at the correct precision.
Parameters
----------
value : float
The quantity in question
error : float
The uncertainty of the quantity
error_digits : int, default 2
How many significant figures the error has. Scientific
convention holds that errors have 1 or (at most) 2 significant
figures. The larger number of digits is chosen here by default.
Returns
-------
new_string : str
A new list of strings sorted numerically
Examples
--------
>>> pretty_string_value_error(1.23456789e8, 4.5678e5,
error_digits=2)
"1.2346 +/- 0.0046 * 10^+08"
>>> pretty_string_value_error(5.6e-2, 2.0e-3, error_digits=1)
"5.6 +/- 0.2 * 10^-02"
"""
if value is None:
return "None"
if error is None or not np.isfinite(error):
if use_unicode:
new_string = "{:.6E} \u00B1 UNKNOWN ERROR MARGIN".format(value)
else:
new_string = "{:.6E} +/- UNKNOWN ERROR MARGIN".format(value)
else:
if not np.isfinite(value):
return str(value)
assert "e" in "{:e}".format(value), "Cannot convert into scientific "\
"notation: {1}".format(value)
value_mantissa_str, value_exponent_str = \
"{:e}".format(value).strip().split('e')
value_mantissa = float(value_mantissa_str)
value_exponent = int(value_exponent_str)
error_mantissa_str, error_exponent_str = \
"{:e}".format(error).strip().split('e')
error_mantissa = float(error_mantissa_str)
error_exponent = int(error_exponent_str)
padding = value_exponent - error_exponent + error_digits - 1
if padding < 1: padding = 1
exp_diff = error_exponent - value_exponent
string_for_formatting = "{:.%df}" % padding
new_value_mantissa = string_for_formatting.format(value_mantissa)
new_error_mantissa = string_for_formatting.format(
error_mantissa*10**exp_diff)
if use_unicode:
new_string = "%s \u00B1 %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
else:
new_string = "%s +/- %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
return new_string
| 16,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.