content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _get_vcf_breakends(hydra_file, genome_2bit, options=None):
"""Parse BEDPE input, yielding VCF ready breakends.
"""
if options is None: options = {}
for features in group_hydra_breakends(hydra_parser(hydra_file, options)):
if len(features) == 1 and is_deletion(features[0], options):
yield build_vcf_deletion(features[0], genome_2bit)
elif len(features) == 1 and is_tandem_dup(features[0], options):
yield build_tandem_deletion(features[0], genome_2bit)
elif len(features) == 2 and is_inversion(*features):
yield build_vcf_inversion(features[0], features[1], genome_2bit)
elif len(features) == 2 and is_translocation(*features):
info = get_translocation_info(features[0], features[1])
for feature in features:
for brend in build_vcf_parts(feature, genome_2bit, info):
yield brend
else:
for feature in features:
for brend in build_vcf_parts(feature, genome_2bit):
yield brend
| 5,340,200
|
def get_line_pixels(start, end):
"""Bresenham's Line Algorithm
Produces a list of tuples from start and end
>>> points1 = get_line((0, 0), (3, 4))
>>> points2 = get_line((3, 4), (0, 0))
>>> assert(set(points1) == set(points2))
>>> debuginfo(points1)
[(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)]
>>> debuginfo(points2)
[(3, 4), (2, 3), (1, 2), (1, 1), (0, 0)]
"""
# Setup initial conditions
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
| 5,340,201
|
def extrema(x):
"""
Gets the local extrema points from a time series. This includes endpoints if necessary.
Note that the indices will start counting from 1 to match MatLab.
Args:
x: time series vector
Returns:
imin: indices of XMIN
"""
x = np.asarray(x)
imin = signal.argrelextrema(x, np.less)[0]
if(x[-1] < x[-2]): # Check last point
imin = np.append(imin, len(x)-1)
if(x[0] < x[1]): # Check first point
imin = np.insert(imin, 0, 0)
xmin = x[imin]
minorder = np.argsort(xmin)
imin = imin[minorder]
return imin+1
| 5,340,202
|
def mv_single_gpu_test(model, data_loader, runstate, draw_contours=False, draw_target=True, out_dir=None):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
show (bool): Whether show results during infernece. Default: False.
out_dir (str, optional): If specified, the results will be dumped
into the directory to save output results.
Returns:
list: The prediction results.
"""
if not osp.isdir(out_dir):
os.mkdir(out_dir)
log_path = osp.join(out_dir, 'test_log.csv')
if osp.isfile(log_path):
os.remove(log_path)
test_log = CSV(log_path)
log_head = ['Image_ID']
test_log.append(log_head)
out_pt = osp.join(out_dir, 'test_predict')
mmcv.mkdir_or_exist(out_pt)
if draw_contours:
out_cnt = osp.join(out_dir, 'test_drawContours')
mmcv.mkdir_or_exist(out_cnt)
model.eval()
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
draw_target_flag = False
for img_id, data in enumerate(data_loader):
if runstate[0] == 0:
sys.exit(0)
if 'gt_semantic_seg' in data and draw_target:
draw_target_flag = True
target = data.pop('gt_semantic_seg')[0]
target = target.cpu().numpy()[0] # 1*h*w ==> h*w
with torch.no_grad():
result = model(return_loss=False, return_logit=True, **data)
img_metas = data['img_metas'][0].data[0]
img_path = img_metas[0]['filename']
img_name = osp.basename(img_path)
## output pt map
base_name = img_name.split('.')[0]
for chn in range(1, result.size(1)):
probability = np.uint8(result[0, chn, :, :].cpu() * 255)
out_path = osp.join(out_pt, '{}_{}.png'.format(base_name, chn))
# imwrite(probability, out_path)
cv2.imwrite(out_path, probability)
## output image with draw_contours
if draw_contours:
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
h, w = image.shape[:2]
line = max(int(np.sqrt(h*w) // 512), 1)
predict = torch.max(result, 1)[1].cpu().numpy()
predict = np.uint8(np.squeeze(predict))
contours, _ = cv2.findContours(predict, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0, 0, 255), line)
if draw_target_flag:
contours, _ = cv2.findContours(target, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0, 255, 0), line)
cv2.imwrite(osp.join(out_cnt, img_name), image)
test_log.append(img_id)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
| 5,340,203
|
def d2X_dt2_Vanderpol(X, t=0):
""" Return the Jacobian matrix evaluated in X. """
return array([[0, 1 ],
[-2*r*X[1]*X[0]-w**2 , r*(1-X[0]**2)]])
| 5,340,204
|
def multiply_scalar(s, q, qout):
"""Multiply scalar by quaternion s*q"""
qout[0] = s * q[0]
qout[1] = s * q[1]
qout[2] = s * q[2]
qout[3] = s * q[3]
| 5,340,205
|
def list_used_variables(node, ARGS=None):
""" thanks Avi
https://software.ecmwf.int/wiki/pages/viewpage.action?pageId=53513079
"""
if ARGS is None:
ARGS = object()
ARGS.var_name = None
ARGS.not_value = None
# make a list of all nodes up the parent hierarchy
parent_hierarchy = []
parent_hierarchy.append(node)
parent = node.get_parent()
while parent:
parent_hierarchy.append(parent)
parent = parent.get_parent()
# now reverse the list
parent_hierarchy.reverse()
# now create a map of all used variables going down the hierarchy.
# Allow variable lower down the hierarchy to override parent variables
variable_map = {}
for node in parent_hierarchy:
for var in node.variables:
variable_map[var.name()] = var.value()
# finally print the used variables
if ARGS.var_name:
if ARGS.not_value:
# use exact match for key
for key in variable_map:
if ARGS.var_name == key:
if ARGS.not_value != variable_map[key]:
print("edit " + key + " '" + variable_map[key] + "'")
else:
# use substring match for variable name
for key in variable_map:
if ARGS.var_name in key:
print("edit " + key + " '" + variable_map[key] + "'")
else:
for key in variable_map:
print("edit " + key + " '" + variable_map[key] + "'")
| 5,340,206
|
def main():
"""Opened ports getting and their protocols recognizing."""
host, ports = argument_parse()
print("Analysing...")
ports_analysers = []
with ThreadPoolExecutor(max_workers=WORKERS_COUNT) as thread_pool:
for port in ports:
if 0 <= port <= 65535:
port_analyser = PortAnalyser(host, port)
thread_pool.submit(port_analyser.run)
ports_analysers.append(port_analyser)
else:
warning("Incorrect port: %s", port)
while True:
if active_count() == 1:
# Only one active interface thread. All of ports have been anaysed.
break
# Making a table.
print()
print("\tPorts of {}".format(host))
print("--------------------------------")
print("Port\tTCP\tUDP\tProtocol")
for analyser in ports_analysers:
print("{}\t{}\t{}\t{}".format(analyser.port, analyser.opened_tcp, \
analyser.opened_udp, analyser.protocol))
| 5,340,207
|
def domain(domain):
"""Locate the given domain in our database and
render an info page for it.
"""
current_app.logger.info('domain [%s]' % domain)
g.domain = current_app.iwn.domain(domain)
if g.domain is None:
return Response('', 404)
else:
return render_template('domain.jinja')
| 5,340,208
|
def gaussian_filter(img, kernel_size, sigma=0):
"""take value weighted by pixel distance in the neighbourhood of center pixel.
"""
return cv2.GaussianBlur(img, ksize=kernel_size, sigmaX=sigma, sigmaY=sigma)
| 5,340,209
|
def extractBillAdoptedLinks(soup):
"""Extract list of links for Adopted Bill Texts (HTML & PDF Versions)
"""
tables = soup.find_all("table")
content_table = [t for t in tables if t.text.strip().startswith("View Available Bill Summaries")][-1]
adopted_links = {}
for row in content_table.find_all('tr'):
cols = row.find_all('td')
if len(cols) > 1:
label = cols[0].text.strip('[HTML]').strip().encode('utf8').replace(b'\xc2\xa0', b' ')
if label in [b'Adopted', b'Ratified']:
links = cols[0].find_all('a')
pdf_link = links[0]['href']
html_link = links[1]['href']
adopted_links = {'label' : label, 'pdf' : pdf_link, 'html' : html_link}
return(adopted_links)
| 5,340,210
|
def do_flavor_access_list(cs, args):
"""Print access information about the given flavor."""
if args.flavor:
flavor = _find_flavor(cs, args.flavor)
if flavor.is_public:
raise exceptions.CommandError(_("Access list not available "
"for public flavors."))
kwargs = {'flavor': flavor}
else:
raise exceptions.CommandError(_("Unable to get all access lists. "
"Specify --flavor"))
try:
access_list = cs.flavor_access.list(**kwargs)
except NotImplementedError as e:
raise exceptions.CommandError("%s" % str(e))
columns = ['Flavor_ID', 'Tenant_ID']
utils.print_list(access_list, columns)
| 5,340,211
|
def copy_sheets_to_packages(sheets: Union[str,list[str]], template: str = None) -> None:
"""Copy designated sheets from template to all package Excel files.
This is intended to be used for updating the non-user-edited sheets
:param sheets: list of sheet names to copy, or a single name that will be listified
:param template: path to template file; defaults to 'package template.xlsx' in root
"""
# set template to default if unspecified
if template is None:
root = git.Repo('.', search_parent_directories=True).working_tree_dir
template = os.path.join(root, 'package template.xlsx')
if isinstance(sheets,str):
sheets = [sheets]
# open template file and check that all sheets are there
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning) # filter the "data validation not supported" warning
template_excel = openpyxl.open(template, data_only=True)
for s in sheets:
template_excel[s] # KeyError will be thrown if any sheet doesn't exist
# for each package
for package in package_dirs():
# open the package excel file
package_excel_file = package_excel(package)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning) # filter the "data validation not supported" warning
target_excel = openpyxl.open(package_excel_file, data_only=True)
# copy the sheets
print(f'Copying sheets to {package_excel_file}')
for s in sheets:
print(f' Copying sheet "{s}"')
target_index = target_excel.index(target_excel[s])
# remove old sheet, create a blank, then copy values
target_excel.remove_sheet(target_excel[s])
target_excel.create_sheet(s,target_index)
copy_sheet(template_excel[s], target_excel[s])
# save
target_excel.save(package_excel_file)
print(f' Saved after copying all sheets')
| 5,340,212
|
def uniq(string):
"""Removes duplicate words from a string (only the second duplicates).
The sequence of the words will not be changed.
"""
words = string.split()
return ' '.join(sorted(set(words), key=words.index))
| 5,340,213
|
def git_repo_empty(tmpdir):
"""Create temporary empty git directory, meaning no commits/users/repository-url to extract (error)"""
cwd = str(tmpdir)
version = subprocess.check_output("git version", shell=True)
# decode "git version 2.28.0" to (2, 28, 0)
decoded_version = tuple(int(n) for n in version.decode().strip().split(" ")[-1].split(".") if n.isdigit())
if decoded_version >= (2, 28):
# versions starting from 2.28 can have a different initial branch name
# configured in ~/.gitconfig
subprocess.check_output("git init --initial-branch=master", cwd=cwd, shell=True)
else:
# versions prior to 2.28 will create a master branch by default
subprocess.check_output("git init", cwd=cwd, shell=True)
yield cwd
| 5,340,214
|
def l2_regularizer(
params: kfac_jax.utils.Params,
haiku_exclude_batch_norm: bool,
haiku_exclude_biases: bool,
) -> chex.Array:
"""Computes an L2 regularizer."""
if haiku_exclude_batch_norm:
params = hk.data_structures.filter(
lambda m, n, p: "batchnorm" not in m, params)
if haiku_exclude_biases:
params = hk.data_structures.filter(
lambda m, n, p: n != "b", params)
return 0.5 * kfac_jax.utils.inner_product(params, params)
| 5,340,215
|
def _on_monitor_deleted(ref):
"""Remove the weakreference from the set
of active MONITORS. We no longer
care about keeping track of it
"""
MONITORS.remove(ref)
| 5,340,216
|
def _normalize(log_weights):
"""Normalize log-weights into weights and return resulting weights and log-likelihood increment."""
n = log_weights.shape[0]
max_logw = jnp.max(log_weights)
w = jnp.exp(log_weights - max_logw)
w_mean = w.mean()
log_likelihood_increment = jnp.log(w_mean) + max_logw
w = w / (n * w_mean)
return w, log_likelihood_increment
| 5,340,217
|
def schedule_notification(*, event, affected_object, extra_data):
"""
Schedule notifying users about a given event.
@returns right after scheduling the event. Notification delivery, retries and failures is handled separately
"""
# TODO: Currently only resolves email sending. Later on, weekly letters, notification center in the news section
# and others should be added
# All other events triggering emails should also migrate to use notification center once API is figured out
ScheduledNotification.objects.create(
event=event.value,
serialized_model=serializers.serialize("json", [affected_object]),
extra_data=json.dumps(extra_data),
)
| 5,340,218
|
def pq_compute_multi_core(matched_annotations_list,
gt_folder,
pred_folder,
categories,
file_client=None,
nproc=32):
"""Evaluate the metrics of Panoptic Segmentation with multithreading.
Same as the function with the same name in `panopticapi`.
Args:
matched_annotations_list (list): The matched annotation list. Each
element is a tuple of annotations of the same image with the
format (gt_anns, pred_anns).
gt_folder (str): The path of the ground truth images.
pred_folder (str): The path of the prediction images.
categories (str): The categories of the dataset.
file_client (object): The file client of the dataset. If None,
the backend will be set to `disk`.
nproc (int): Number of processes for panoptic quality computing.
Defaults to 32. When `nproc` exceeds the number of cpu cores,
the number of cpu cores is used.
"""
if PQStat is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
if file_client is None:
file_client_args = dict(backend='disk')
file_client = mmcv.FileClient(**file_client_args)
cpu_num = min(nproc, multiprocessing.cpu_count())
annotations_split = np.array_split(matched_annotations_list, cpu_num)
print('Number of cores: {}, images per core: {}'.format(
cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for proc_id, annotation_set in enumerate(annotations_split):
p = workers.apply_async(pq_compute_single_core,
(proc_id, annotation_set, gt_folder,
pred_folder, categories, file_client))
processes.append(p)
# Close the process pool, otherwise it will lead to memory
# leaking problems.
workers.close()
workers.join()
pq_stat = PQStat()
for p in processes:
pq_stat += p.get()
return pq_stat
| 5,340,219
|
def logout():
"""
Function that handles logout of user
---
POST:
description: remove curent user in the session
responses:
200:
description:
Successfuly log out user from the session.
"""
logout_user() # flask logout library
return redirect("/", code=200)
| 5,340,220
|
def edit_line(file_name: str, regex: str, replace: str, mode: str = 'status',
show_ok: bool = False):
"""
Edit line in file matching a regular expression.
:param file_name: Full path and name of file to write content to.
:param regex: Regular expression for matching line to edit.
:param replace: Replace line with this. Matching groups from regex are
matched with {1}...{10}
:param mode: Choices are: "status", "regular" and "quiet":
"status": Print command and status.
"regular": Print command, stdout and stderr to screen
(just as usual).
"verbose": Print status, command, stdout and stderr to
screen.
"quiet": Only print errors.
:param show_ok: If ok status should be shown.
"""
error = ''
# Insert values from run_cmd_vars in "regex" and "replace"
# (if they exist)
for key, val in run_cmd_vars.items():
var = '{' + key + '}'
if var in regex:
regex = regex.replace(var, val)
if var in replace:
replace = replace.replace(var, val)
# Set OK status message
status_string = 'Replaced "{old}" with "{replace}" in file "{file_name}"'
status_string = status_string.replace('{replace}', replace)
status_string = status_string.replace('{file_name}', file_name)
# Read file
try:
file = open(file_name, 'r', encoding='utf-8')
line_list = file.readlines()
line_list = [i.strip('\n') for i in line_list]
file.close()
except BaseException as e:
status_string = 'Error editing file "{file_name}"'
status_string = status_string.format(file_name=file_name)
error = str(e)
# Edit line in file
if error == '':
for i in range(len(line_list)):
match = re.match(pattern=regex, string=line_list[i])
# Replace line in memory
if match is not None:
# Insert matching groups in replace (if any)
for n in range(1, 11):
group_string = '{' + str(n) + '}'
if group_string in replace:
replace = replace.replace(group_string, match.group(n))
# Complete status string
status_string = status_string.format(old=line_list[i])
# Replace line in memory
line_list[i] = replace
break
# Not finding a match is an error so we set error status
if match is None:
status_string = 'No match was found for "{regex}" in "{file_name}"'
status_string = status_string.format(regex=regex,
file_name=file_name)
error = None
# Write file
if error == '':
try:
tmp_file_name = file_name + '~'
file = open(tmp_file_name, 'w', encoding='utf-8')
file.writelines('\n'.join(line_list))
file.close()
os.rename(tmp_file_name, file_name)
except BaseException as e:
status_string = 'Error editing file "{file_name}"'
status_string = status_string.format(file_name=file_name)
error = str(e)
# Print quiet mode
if mode == 'quiet' and error != '':
status = '[ \033[1;91mERROR\033[0m ] '
status_string = status + status_string
print(status_string, flush=True)
if error is not None:
print(error, flush=True)
# Print regular mode
elif mode == 'regular' and (error != '' or show_ok):
print(status_string, flush=True)
# Print verbose and status mode
elif (mode == 'verbose' or mode == 'status') and (error != '' or show_ok):
status = '[ \033[1;32m OK \033[0m ] '
if error != '':
status = '[ \033[1;91mERROR\033[0m ] '
status_string = status + status_string
print(status_string, flush=True)
if error != '' and error is not None:
print(error, flush=True)
| 5,340,221
|
def header(data):
"""Create class based on decode of a PCI configuration space header from raw data."""
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
field_list = header_field_list(addr)
return header_factory(field_list).from_buffer_copy(data)
| 5,340,222
|
def downscale_fit(fitter, data, seed, Pool):
"""Do a fit for a given Fitter (for multiple locations)
"""
n_sample, is_print, n_thread = get_fit_parser()
pool = None
if n_thread is None:
pool = Pool()
else:
pool = Pool(n_thread)
fitter.fit(data, seed, n_sample, pool, is_print)
pool.join()
| 5,340,223
|
def FilterByKeyUsingSideInput(pcoll, lookup_entries, filter_key):
"""Filters a single collection by a single lookup collection, using a common key.
Given:
- a `PCollection` (lookup_entries) of `(V)`, as a lookup collection
- a `PCollection` (pcoll) of `(V)`, as values to be filtered
- a common key (filter_key)
A dictionary called `filter_dict` - is created by mapping the value of `filter_key`
for each entry in `lookup_entries` to True.
Then, for each item in pcoll, the value associated with `filter_key` checkd against
`filter_dict`, and if it is found, the entry passes through. Otherwise, the entry is
discarded.
Note: `lookup_entries` will be used as a **side input**, so care
must be taken regarding the size of the `lookup_entries`
"""
filter_dict_prepared = beam.pvalue.AsDict(
lookup_entries | beam.Map(lambda row: (row[filter_key], True))
)
def _filter_fn(row, filter_dict):
return row[filter_key] in filter_dict
return pcoll | beam.Filter(_filter_fn, filter_dict=filter_dict_prepared)
| 5,340,224
|
def sw_maxent_irl(x, xtr, phi, phi_bar, max_path_length, nll_only=False):
"""Maximum Entropy IRL using our exact algorithm
Returns NLL and NLL gradient of the demonstration data under the proposed reward
parameters x.
N.b. the computed NLL here doesn't include the contribution from the MDP dynamics
for each path - this term is independent of the parameter x, so doesn't affect the
optimization result.
Args:
x (numpy array): Current reward function parameter vector estimate
xtr (mdp_extras.BaseExtras): Extras object for the MDP being
optimized
phi (mdp_extras.FeatureFunction): Feature function to use with linear reward
parameters. We require len(phi) == len(x).
phi_bar (numpy array): Feature expectation. N.b. if using a weighted feature
expectation, it is very important to make sure the weights you used
sum to 1.0!
max_path_length (int): Maximum path length
nll_only (bool): If true, only return NLL
Returns:
(float): Negative Log Likelihood of a MaxEnt model with x as the reward
parameters and the given feature expectation
(numpy array): Downhill gradient of negative log likelihood at the given point
"""
# Store current argument guess
r_linear = Linear(x)
if isinstance(xtr, DiscreteExplicitExtras):
# Process tabular MDP
# Explode reward function to indicator arrays
rs, rsa, rsas = r_linear.structured(xtr, phi)
# Catch float overflow as an error - reward magnitude is too large for
# exponentiation with this max path length
with np.errstate(over="raise"):
# Compute backward message
alpha_log = nb_backward_pass_log(
xtr.p0s,
max_path_length,
xtr.t_mat,
gamma=xtr.gamma,
rs=rs,
rsa=rsa,
rsas=rsas,
)
# Compute partition value
Z_theta_log = log_partition(
max_path_length, alpha_log, padded=xtr.is_padded
)
# Compute NLL
nll = Z_theta_log - x @ phi_bar
if nll_only:
return nll
else:
# Compute gradient
with np.errstate(over="raise"):
# Compute forward message
beta_log = nb_forward_pass_log(
max_path_length,
xtr.t_mat,
gamma=xtr.gamma,
rs=rs,
rsa=rsa,
rsas=rsas,
)
# Compute transition marginals
pts_log, ptsa_log, ptsas_log = nb_marginals_log(
max_path_length,
xtr.t_mat,
alpha_log,
beta_log,
Z_theta_log,
gamma=xtr.gamma,
rsa=rsa,
rsas=rsas,
)
# Compute gradient based on feature type
if phi.type == Disjoint.Type.OBSERVATION:
s_counts = np.sum(np.exp(pts_log), axis=-1)
efv_s = np.sum([s_counts[s] * phi(s) for s in xtr.states], axis=0)
nll_grad = efv_s - phi_bar
elif phi.type == Disjoint.Type.OBSERVATION_ACTION:
sa_counts = np.sum(np.exp(ptsa_log), axis=-1)
efv_sa = np.sum(
[
sa_counts[s1, a] * phi(s1, a)
for s1 in xtr.states
for a in xtr.actions
],
axis=0,
)
nll_grad = efv_sa - phi_bar
elif phi.type == Disjoint.Type.OBSERVATION_ACTION_OBSERVATION:
sas_counts = np.sum(np.exp(ptsas_log), axis=-1)
efv_sas = np.sum(
[
sas_counts[s1, a, s2] * phi(s1, a, s2)
for s1 in xtr.states
for a in xtr.actions
for s2 in xtr.states
],
axis=0,
)
nll_grad = efv_sas - phi_bar
else:
raise ValueError
return nll, nll_grad
elif isinstance(xtr, DiscreteImplicitExtras):
# Handle Implicit dynamics MDP
# Only supports state features - otherwise we run out of memory
assert (
phi.type == phi.Type.OBSERVATION
), "For DiscreteImplicit MPDs only state-based rewards are supported"
# Only supports deterministic transitions
assert (
xtr.is_deterministic
), "For DiscreteImplicit MPDs only deterministic dynamics are supported"
rs = np.array([r_linear(phi(s)) for s in xtr.states])
# Catch float overflow as an error - reward magnitude is too large for
# exponentiation with this max path length
with np.errstate(over="raise"):
# Compute alpha_log
alpha_log = nb_backward_pass_log_deterministic_stateonly(
xtr.p0s,
max_path_length,
xtr.parents_fixedsize,
rs,
gamma=xtr.gamma,
padded=xtr.is_padded,
)
# Compute partition value
Z_theta_log = log_partition(
max_path_length, alpha_log, padded=xtr.is_padded
)
# Compute NLL
nll = Z_theta_log - x @ phi_bar
if nll_only:
return nll
else:
# Compute NLL gradient as well
with np.errstate(over="raise"):
# Compute beta_log
beta_log = nb_forward_pass_log_deterministic_stateonly(
max_path_length, xtr.children_fixedsize, rs, gamma=xtr.gamma
)
# Compute transition marginals pts_log (not ptsa, ptsas)
pts_log = nb_marginals_log_deterministic_stateonly(
max_path_length,
xtr.children_fixedsize,
alpha_log,
beta_log,
Z_theta_log,
)
# Compute gradient
s_counts = np.sum(np.exp(pts_log), axis=-1)
efv_s = np.sum([s_counts[s] * phi(s) for s in xtr.states], axis=0)
nll_grad = efv_s - phi_bar
return nll, nll_grad
else:
# Unknown MDP type
raise ValueError(f"Unknown MDP class {xtr}")
| 5,340,225
|
def __multiprocess_point_in_poly(df: pd.DataFrame,
x: str,
y: str,
poly: Polygon):
"""
Return rows in dataframe who's values for x and y are contained in some polygon coordinate shape
Parameters
----------
df: Pandas.DataFrame
Data to query
x: str
name of x-axis plane
y: str
name of y-axis plane
poly: shapely.geometry.Polygon
Polygon object to search
Returns
--------
Pandas.DataFrame
Masked DataFrame containing only those rows that fall within the Polygon
"""
mask = df.apply(lambda r: poly.contains(Point(r[x], r[y])), axis=1)
return df.loc[mask]
| 5,340,226
|
def print_runtime(func, create_global_dict=True):
"""
A timer decorator that creates a global dict for reporting times across multiple runs
"""
def function_timer(*args, **kwargs):
"""
A nested function for timing other functions
"""
import time
from collections import defaultdict
start = time.time()
value = func(*args, **kwargs)
end = time.time()
runtime = end - start
print(f"The runtime for {func.__name__} took {round(runtime, 2)} \
seconds to complete")
return value
return function_timer
| 5,340,227
|
def generate_astrometry(kop, time_list):
"""
Simulates observational data.
:param kop: Keplerian orbit parameters
:param time_list: List of observation times
:return: astrometry
"""
trajectory = generate_complete_trajectory(kop, time_list)
return {'t':time_list,
'x':trajectory['position'].T[0],
'y':trajectory['position'].T[1],
'vz':trajectory['velocity'].T[2]}
| 5,340,228
|
def _open_remote(file_ref):
"""Retrieve an open handle to a file.
"""
return io.StringIO(_run_gsutil(["cat", file_ref]).decode())
| 5,340,229
|
def fit(image, labels, featurizer="../model/saved_model/UNet_hpa_4c_mean_8.pth"):
"""Train a pixel classifier.
Parameters
----------
image: np.ndarray
Image data to be classified.
labels: np.ndarray
Sparse classification, where 0 pixels are ingored, and other integer
values correspond to class membership.
multichannel: bool, optional
If image data is multichannel.
Returns
----------
classifier: sklearn.ensemble.RandomForestClassifier
Object that can perform classifications
"""
print(featurizer)
# pad input image
w,h = image.shape[-2:]
w_padding = int((16-w%16)/2) if w%16 >0 else 0
h_padding = int((16-h%16)/2) if h%16 >0 else 0
if len(image.shape) == 3:
image = np.pad(image, ((0,0),(w_padding, w_padding),(h_padding, h_padding)), 'constant')
elif len(image.shape) == 2:
image = np.pad(image, ((w_padding, w_padding),(h_padding, h_padding)), 'constant')
# make sure image has four dimentions (b,c,w,h)
while len(image.shape) < 4:
image = np.expand_dims(image, 0)
image = np.transpose(image, (1,0,2,3))
# choose filter or unet featurizer
if featurizer == "filter":
features = filter_featurize(image)
else:
features = unet_featurize(image, featurizer)
# crop out paddings
if w_padding > 0:
features = features[w_padding:-w_padding]
if h_padding > 0:
features = features[:,h_padding:-h_padding]
# reshape and extract data
X = features.reshape([-1, features.shape[-1]])
y = labels.reshape(-1)
X = X[y != 0]
y = y[y != 0]
# define and fit classifier
clf = RandomForestClassifier(n_estimators=10)
if len(X) > 0:
clf = clf.fit(X, y)
return clf, features
| 5,340,230
|
def test_register_algorithm_with_missing_fields(
host: str,
token: str,
project: int) -> None:
""" Unit test for the ReigsterAlgorithm endpoint focused on missing request body fields
Request bodies are created with missing required fields and a workflow tries
to be registered with these incorrect request bodies.
Args:
host: Project URL
token: User token used for connecting to the host
project: Unique identifier of test project
"""
NAME = str(uuid.uuid1())
DESCRIPTION = 'description'
CLUSTER = 1
FILES_PER_JOB = 1
# Setup the interface to tator and get the user ID
tator_api = tator.get_api(host=host, token=token)
user = tator_api.whoami()
user_id = user.id
# Upload a manifest file
response = _upload_test_algorithm_manifest(
host=host, token=token, project=project, manifest_name='test.yaml')
manifest_url = response.url
# Missing name field
caught_exception = False
try:
spec = tator.models.Algorithm(
project=project,
user=user_id,
description=DESCRIPTION,
manifest=manifest_url,
cluster=CLUSTER,
files_per_job=FILES_PER_JOB)
response = tator_api.register_algorithm(project=project, algorithm_spec=spec)
except:
caught_exception = True
assert caught_exception
# Missing user field
caught_exception = False
try:
spec = tator.models.Algorithm(
name=NAME,
project=project,
description=DESCRIPTION,
manifest=manifest_url,
cluster=CLUSTER,
files_per_job=FILES_PER_JOB)
response = tator_api.register_algorithm(project=project, algorithm_spec=spec)
except:
caught_exception = True
assert caught_exception
# Missing description field
caught_exception = False
try:
spec = tator.models.Algorithm(
name=NAME,
project=project,
user=user_id,
description=DESCRIPTION,
manifest=manifest_url,
cluster=CLUSTER,
files_per_job=FILES_PER_JOB)
response = tator_api.register_algorithm(project=project, algorithm_spec=spec)
except:
caught_exception = True
assert caught_exception
# Missing manifest
caught_exception = False
try:
spec = tator.models.Algorithm(
name=NAME,
project=project,
user=user_id,
description=DESCRIPTION,
cluster=CLUSTER,
files_per_job=FILES_PER_JOB)
response = tator_api.register_algorithm(project=project, algorithm_spec=spec)
except:
caught_exception = True
assert caught_exception
# Missing fields per job
caught_exception = False
try:
spec = tator.models.Algorithm(
name=NAME,
project=project,
user=user_id,
description=DESCRIPTION,
manifest=manifest_url,
cluster=CLUSTER)
response = tator_api.register_algorithm(project=project, algorithm_spec=spec)
except:
caught_exception = True
assert caught_exception
| 5,340,231
|
def overlay_data(
graph: BELGraph,
data: Mapping[BaseEntity, Any],
label: Optional[str] = None,
overwrite: bool = False,
) -> None:
"""Overlay tabular data on the network.
:param graph: A BEL Graph
:param data: A dictionary of {tuple node: data for that node}
:param label: The annotation label to put in the node dictionary
:param overwrite: Should old annotations be overwritten?
"""
if label is None:
label = 'weight'
for node, value in data.items():
if node not in graph:
logger.debug('%s not in graph', node)
continue
if label in graph.nodes[node] and not overwrite:
logger.debug('%s already on %s', label, node)
continue
graph.nodes[node][label] = value
| 5,340,232
|
def himydata_client(args):
"""Returns an instance of Himydata Client or DryRunClient"""
if args.dry_run:
return DryRunClient()
else:
with open(args.config) as input:
config = json.load(input)
if not config.get('disable_collection', True):
logger.info('Sending version information to stitchdata.com. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target=collect).start()
missing_fields = []
if 'client_id' in config:
client_id = config['client_id']
else:
missing_fields.append('client_id')
if 'himydata_url' in config:
himydata_url = config['himydata_url']
else:
himydata_url = DEFAULT_HIMYDATA_URL
if 'api_key' in config:
api_key = config['api_key']
else:
missing_fields.append('api_key')
if missing_fields:
raise Exception('Configuration is missing required fields: {}'
.format(missing_fields))
return Client(client_id, api_key, himydata_url=himydata_url, callback_function=write_last_state)
| 5,340,233
|
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return ST_KEYWORDS[1]
| 5,340,234
|
def coo_index_to_data(index):
"""
Converts data index (row, col) to 1-based pixel-centerd (x,y) coordinates of the center ot the pixel
index: (int, int) or int
(row,col) index of the pixel in dtatabel or single row or col index
"""
return (index[1] + 1.0, index[0] + 1.0)
| 5,340,235
|
def prepare_output_well(df, plates, output, rawdata, identifier_features, location_features):
""" Prepare the output file with plate, row and column information
Calculate penetrance and p-value
Args:
df: Existing combined dictionary
plates: Plates in this screen
output: Output filenames
identifier_features: List of strain identifiers
location_features: List of Plate - Row - Column - Filename
Return:
final_df_output: Combined outlier detection results
"""
print('Preparing the output values by well...')
log_write(output['log'], 'Preparing penetrance results by well...\n')
# Create new dataframe from dict
append_list = identifier_features + location_features + ['Is_Inlier']
final_df = dataframe_from_dict(df, append_list)
if 'Row' in final_df.columns:
well_identifier = 'Row_Col'
else:
for f in location_features:
if 'well' in f.lower():
well_identifier = f
try:
final_df[well_identifier] = final_df.Row.map(int).map(str) + '_' + final_df.Column.map(int).map(str)
except AttributeError:
final_df[well_identifier] = final_df[well_identifier].map(str)
# Initialize output folder
final_df_output = pd.DataFrame(columns = identifier_features + location_features +
['Num_cells', 'Penetrance', 'P-value'])
this_row = 0
# Regroup this dataframes by plates then row column info
WT_cells, WT_cells_outliers = p_value(df)
plate_column = 'Plate'
for p in plates:
final_df_plate = final_df[final_df[plate_column] == p]
# Regroup this dataframes by Row and Column
row_col = final_df_plate[well_identifier].unique().tolist()
for rc in row_col:
df_rc = final_df_plate[final_df_plate[well_identifier] == rc]
is_inlier_rc = np.asarray(df_rc['Is_Inlier'])
num_cells = df_rc.shape[0]
num_outliers = sum(is_inlier_rc == 0)
pene = float(num_outliers) / num_cells * 100
pval = 1 - stats.hypergeom.cdf(num_outliers, WT_cells, WT_cells_outliers, num_cells)
# Append them to corresponding variables
line = []
for i in identifier_features + location_features:
if 'plate' in i.lower():
i = 'Plate'
line.append(df_rc[i].unique()[0])
line.append(num_cells)
line.append(pene)
line.append(pval)
final_df_output.loc[this_row, ] = line
this_row += 1
# Save into a dataframe
final_df_output = final_df_output.sort_values('Penetrance', ascending=False)
final_df_output = final_df_output.reset_index(drop=True)
final_df_output.to_csv(path_or_buf=output['ODresultsWell'], index=False)
return final_df_output
| 5,340,236
|
def main(input_file):
"""Solve puzzle and connect part 1 with part 2 if needed."""
inp = read_input(input_file)
p1, p2 = part_1_and_2(inp)
print(f"Solution to part 1: {p1}")
print(f"Solution to part 2: {p2}")
return p1, p2
| 5,340,237
|
def extract_fields():
"""Compiles all the fields within an object on a Tkinter Listbox"""
global object_name
object_name = select(entity, 0)
options = sf.query_all(
("SELECT ID, QualifiedAPIName from FieldDefinition "
"where EntityDefinitionId = '" + select(entity, 0)
+ "' order by QualifiedApiName")
)
optionList = []
fields.delete(0, END)
for record in options['records']:
optionList.append(record['QualifiedApiName'])
for each_item in range(len(optionList)):
fields.insert(END, optionList[each_item])
| 5,340,238
|
def generateListPermutations(elements, level=0):
"""Generate all possible permutations of the list 'elements'."""
#print(" " * level, "gP(", elements, ")")
if len(elements) == 0:
return [[]]
permutations = []
for e in elements:
reduced = elements[:]
reduced.remove(e)
reducedPermutations = generateListPermutations(reduced, level + 1)
#print(" "*level, "reduced", reducedPermutations)
for p in reducedPermutations:
p.insert(0, e)
permutations.append(p)
return permutations
| 5,340,239
|
def save_config(config_dict, filename=None, update=False):
"""
Writes configuration to a file
"""
filename = filename or default_config_filename
config = configparser.RawConfigParser()
if update:
if os.path.exists(filename):
config.read(filename)
else:
bootstrap_config_parser(config)
for section, section_content in config_dict.items():
if not config.has_section(section):
config.add_section(section)
for option, option_value in section_content.items():
config.set(section, option, option_value)
with open(filename, 'wt') as f:
os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR)
config.write(f)
| 5,340,240
|
def test_tree_with_one_node_root_exists(one_t):
"""Root of tree should exist if it has one node."""
assert one_t.root
| 5,340,241
|
def main(provider: Provider, args: List[str]) -> None: # args not in use?
"""For use as the `main` in programs that wrap a custom Provider
implementation into a Pulumi-compatible gRPC server.
:param provider: an instance of a Provider subclass
:args: command line arguiments such as os.argv[1:]
"""
argp = argparse.ArgumentParser(description='Pulumi provider plugin (gRPC server)')
argp.add_argument('engine', help='Pulumi engine address')
engine_address: str = argp.parse_args().engine
async def serve() -> None:
server = grpc.aio.server(options=_GRPC_CHANNEL_OPTIONS)
servicer = ProviderServicer(provider, args, engine_address=engine_address)
provider_pb2_grpc.add_ResourceProviderServicer_to_server(servicer, server)
port = server.add_insecure_port(address='0.0.0.0:0')
await server.start()
sys.stdout.buffer.write(f'{port}\n'.encode())
sys.stdout.buffer.flush()
await server.wait_for_termination()
try:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(serve())
finally:
loop.close()
except KeyboardInterrupt:
pass
| 5,340,242
|
def run_tfa(tfalclist_path, trendlisttfa_paths, datestfa_path, lcdirectory,
statsdir,
nworkers=16, do_bls_ls_killharm=True, npixexclude=10,
blsqmin=0.002, blsqmax=0.1, blsminper=0.2, blsmaxper=30.0,
blsnfreq=20000, blsnbins=1000, lsminp=0.1, lsmaxp=30.0,
lssubsample=0.1, killharmnharm=10, tfafromirm=False,
outlcdir_tfa=None):
"""
Run TFA on all apertures. Optionally, if do_bls_ls_killharm, include a
sequence of BLS, then Lomb-Scargle, then harmonic killing, then BLS on the
harmonic-subtracted residual. If tfafromirm is True, TFA is called on IRM
mags, otherwise it is called on EPD mags.
If running TFA alone, of order ~10k lightcurves per minute are created in
os.path.join(lcdirectory, 'TFA_LCS'). If also doing BLS etc, it's of order
50-100 processed lightcurves per minute. Periodograms are expensive.
TFA parameters:
npixexclude: trend stars exclusion radius, in units of pixels.
BLS parameters
blsqmin: minimum transit duration in phase units
blsqmax
blsminper
blsmaxper
blsnfreq
blsnbins
GLS parameters
lsminp
lsmaxp
lssubsample
Killharm parameters
killharmnharm
"""
trendlist_tfa_ap1 = [t for t in trendlisttfa_paths if 'ap1' in t][0]
trendlist_tfa_ap2 = [t for t in trendlisttfa_paths if 'ap2' in t][0]
trendlist_tfa_ap3 = [t for t in trendlisttfa_paths if 'ap3' in t][0]
outblsmodeldir_iter1 = os.path.join(lcdirectory,'BLS_MODEL_ITER1')
outblsmodeldir_iter2 = os.path.join(lcdirectory,'BLS_MODEL_ITER2')
if not isinstance(outlcdir_tfa,str):
outlcdir_tfa = os.path.join(lcdirectory,'TFA_LCS')
outstatsfile = os.path.join(statsdir, 'vartools_tfa_stats.txt')
for d in [outblsmodeldir_iter1, outblsmodeldir_iter2, outlcdir_tfa]:
if not os.path.exists(d):
os.mkdir(d)
if do_bls_ls_killharm:
cmdtorun = tfablslskillharmcmd.format(
lc_list_tfa = tfalclist_path,
trendlist_tfa_ap1 = trendlist_tfa_ap1,
trendlist_tfa_ap2 = trendlist_tfa_ap2,
trendlist_tfa_ap3 = trendlist_tfa_ap3,
dates_tfa = datestfa_path,
npixexclude = npixexclude,
outblsmodeldir_iter1 = outblsmodeldir_iter1,
outblsmodeldir_iter2 = outblsmodeldir_iter2,
outlcdir_tfa = outlcdir_tfa,
outstatsfile = outstatsfile,
nproc = nworkers,
blsqmin = blsqmin,
blsqmax = blsqmax,
blsminper = blsminper,
blsmaxper = blsmaxper,
blsnfreq = blsnfreq,
blsnbins = blsnbins,
lsminp = lsminp,
lsmaxp = lsmaxp,
lssubsample = lssubsample,
killharmnharm = killharmnharm
)
else:
if tfafromirm:
cmdtorun = tfafromirmcmd.format(
lc_list_tfa = tfalclist_path,
trendlist_tfa_ap1 = trendlist_tfa_ap1,
trendlist_tfa_ap2 = trendlist_tfa_ap2,
trendlist_tfa_ap3 = trendlist_tfa_ap3,
dates_tfa = datestfa_path,
npixexclude = npixexclude,
outblsmodeldir_iter1 = outblsmodeldir_iter1,
outblsmodeldir_iter2 = outblsmodeldir_iter2,
outlcdir_tfa = outlcdir_tfa,
outstatsfile = outstatsfile,
nproc = nworkers
)
else:
cmdtorun = tfaonlycmd.format(
lc_list_tfa = tfalclist_path,
trendlist_tfa_ap1 = trendlist_tfa_ap1,
trendlist_tfa_ap2 = trendlist_tfa_ap2,
trendlist_tfa_ap3 = trendlist_tfa_ap3,
dates_tfa = datestfa_path,
npixexclude = npixexclude,
outblsmodeldir_iter1 = outblsmodeldir_iter1,
outblsmodeldir_iter2 = outblsmodeldir_iter2,
outlcdir_tfa = outlcdir_tfa,
outstatsfile = outstatsfile,
nproc = nworkers
)
print(cmdtorun)
returncode = os.system(cmdtorun)
if returncode == 0:
print('{}Z: TFA+BLS+LS+KILLHARM cmd ran'.format(
datetime.utcnow().isoformat()))
return 1
else:
print('ERR! {}Z: TFA+BLS+LS+KILLHARM cmd failed'.format(
datetime.utcnow().isoformat()))
raise AssertionError
return 0
| 5,340,243
|
def write_abundance(species_path, markers_path, species_abundance, markers_abundance):
""" Write species results to specified output file """
# Sort the species by median_coverage
output_order = sorted(species_abundance.keys(), key=lambda sid: species_abundance[sid]['median_coverage'], reverse=True)
with OutputStream(species_path) as outfile:
outfile.write('\t'.join(species_profile_schema.keys()) + '\n')
for species_id in output_order:
r = species_abundance[species_id]
record = [species_id, r['read_counts'], r['median_coverage'], r['coverage'], r['relative_abundance'],
r['total_covered'], r['unique_covered'], r['ambiguous_covered'],
r['total_marker_counts'], r['unique_fraction_covered'], r['total_marker_length']]
outfile.write("\t".join(map(format_data, record, repeat(DECIMALS6, len(record)))) + "\n")
with OutputStream(markers_path) as outfile:
outfile.write('\t'.join(species_marker_profile_schema.keys()) + '\n')
for species_id in output_order:
for mid, md in markers_abundance[species_id].items():
record = [species_id, mid, md['length'], md["gene_id"], md['total_reads'], md['total_alnbps'],
md['coverage'], md['uniq_reads'], md['ambi_reads'], md['uniq_alnbps'], md['ambi_alnbps']]
outfile.write("\t".join(map(format_data, record, repeat(DECIMALS6, len(record)))) + "\n")
| 5,340,244
|
def has_security_updates(update_list):
"""
Returns true if there are security updates available.
"""
return filter_updates(update_list, 'category', lambda x: x == 'security')
| 5,340,245
|
def lobby():
"""Return an unchecked place named lobby."""
return UncheckedPlace("Lobby")
| 5,340,246
|
def get_random_sample_indices(
seq_len, num_samples=100, device=torch.device("cpu")):
"""
Args:
seq_len: int, the sampled indices will be in the range [0, seq_len-1]
num_samples: sample size
device: torch.device
Returns:
1D torch.LongTensor consisting of sorted sample indices
(sort should not affect the results as we use transformers)
"""
if num_samples >= seq_len:
# return all indices
sample_indices = np.arange(seq_len)
else:
sample_indices = np.random.choice(
seq_len, size=num_samples, replace=False)
sample_indices = np.sort(sample_indices)
return torch.from_numpy(sample_indices).long().to(device)
| 5,340,247
|
def render_mesh_as_dot(mesh, template=DOT_TEMPLATE):
"""Renders the given mesh in the Graphviz dot format.
:param Mesh mesh: the mesh to be rendered
:param str template: alternative template to use
:returns: textual dot representation of the mesh
"""
custom_filters = {
'hash': lambda s: "id" + hashlib.md5(s).hexdigest()[:6],
# alternative hash for provides ports to avoid conflicts with needs ports with same name
'hash_p': lambda s: "idp" + hashlib.md5(s).hexdigest()[:6],
'escape': lambda s: re.sub(r'([{}|"<>])', r'\\\1', s),
}
return render(mesh, template, custom_filters=custom_filters)
| 5,340,248
|
def snrest(noisy: np.ndarray, noise: np.ndarray, axis=None):
"""
Computes SNR [in dB] when you have:
"noisy" signal+noise time series
"noise": noise only without signal
"""
Psig = ssq(noisy, axis)
Pnoise = ssq(noise)
return 10 * np.log10(Psig / Pnoise)
| 5,340,249
|
def test_sgp4(client):
""" test the sgp4 device interface """
tsince = np.linspace(0, 100, client.n, dtype=np.float64)
sgp4.sgp4(tsince, client.whichconst_array, client.satrec_array)
array = []
for ts in tsince:
r, v = prop.sgp4(client.Satrec(), ts, wgs72)
array.append([r[0], r[1], r[2], v[0], v[1], v[2]])
expected = np.array(array)
assert np.allclose(expected, client.satrec_array[:, 94:])
| 5,340,250
|
async def trace_bek(client: Client, message: Message):
""" Reverse Search Anime Clips/Photos """
x = await message.reply_text("Reverse searching the given media")
dls_loc = await media_to_image(client, message, x)
if dls_loc:
async with ClientSession() as session:
tracemoe = tracemoepy.AsyncTrace(session=session)
search = await tracemoe.search(dls_loc, upload_file=True)
os.remove(dls_loc)
result = search["docs"][0]
caption = (
f"**Title**: {result['title_english']} (`{result['title_native']}`)\n"
f"\n**Anilist ID:** `{result['anilist_id']}`"
f"\n**Similarity**: `{(str(result['similarity']*100))[:5]}`"
f"\n**Episode**: `{result['episode']}`"
)
preview = await tracemoe.natural_preview(search)
if await check_if_adult(int(result['anilist_id']))=="True" and await (SFW_GRPS.find_one({"id": message.chat.id})):
await message.reply_text("The results parsed seems to be 18+ and not allowed in this group")
return
with open("preview.mp4", "wb") as f:
f.write(preview)
await session.close()
await message.reply_video(
"preview.mp4",
caption=caption,
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("More Info", url=f"https://anilist.co/anime/{result['anilist_id']}")]]))
os.remove("preview.mp4")
else:
await message.reply_text("Couldn't parse results!!!")
await x.delete()
| 5,340,251
|
def load(spect_path, spect_format=None):
"""load spectrogram and related arrays from a file,
return as an object that provides Python dictionary-like
access
Parameters
----------
spect_path : str, Path
to an array file.
spect_format : str
Valid formats are defined in vak.io.spect.SPECT_FORMAT_LOAD_FUNCTION_MAP.
Default is None, in which case the extension of the file is used.
Returns
-------
spect_dict : dict-like
either a dictionary or dictionary-like object that provides access to arrays
from the file via keys, e.g. spect_dict['s'] for the spectrogram.
See docstring for vak.audio.to_spect for default keys for spectrogram
array files that function creates.
"""
spect_path = Path(spect_path)
if spect_format is None:
# "replace('.', '')", because suffix returns file extension with period included
spect_format = spect_path.suffix.replace('.', '')
spect_dict = constants.SPECT_FORMAT_LOAD_FUNCTION_MAP[spect_format](spect_path)
return spect_dict
| 5,340,252
|
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(views.blueprint, url_prefix='/api')
app.register_blueprint(pipeline.views.blueprint, url_prefix='/api')
app.register_blueprint(job.views.blueprint, url_prefix='/api')
app.register_blueprint(stage.views.blueprint, url_prefix='/api')
app.register_blueprint(result.views.blueprint)
app.register_blueprint(starter.views.blueprint)
| 5,340,253
|
def PSNR(a, b, max_val=255.0, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
Arguments:
a: first set of images.
b: second set of images.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The shape of the returned tensor is
[batch_size, 1].
"""
with tf.name_scope(name, 'PSNR', [a, b]):
psnr = tf.image.psnr(a, b, max_val=max_val, name=name)
_, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
return tf.identity(psnr)
| 5,340,254
|
def split_list(iterable: Iterable,
size: Optional[int] = 5) -> List[list]:
"""Takes an iterable and splits it into lists of its elements.
The size of each sub-list depends on the provided size argument."""
for i in range(0, len(iterable), size):
yield iterable[i:i + size]
| 5,340,255
|
def command(f):
""" indicate it's a command of naviseccli
:param f: function that returns the command in list
:return: command execution result
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
if 'ip' in kwargs:
ip = kwargs['ip']
del kwargs['ip']
else:
ip = None
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute(commands, ip=ip)
return func_wrapper
| 5,340,256
|
def top():
"""
CPU和内存监测
CPU and memory monitoring
:return: None
"""
from . import dir_char
if dir_char == '\\':
from .SystemTools.Monitor import top
top()
else:
import sys
sys.argv = ['bpytop'] + sys.argv[2:]
from . import requirePackage
requirePackage('bpytop', 'main')()
| 5,340,257
|
def clean_data(file_name):
"""
file_name: file to be cleaned
This function converts the data types in the original dataframe into more suitable type.
The good news is that the orginal dataframe is already in good shape so there's less to do.
"""
df_input = pd.read_excel(file_name,sheet_name = "IPU2016")
#Checking the basic information about the dataframe (optional)
#print(df_input.info(), df_input.describe())
#print(df_input["Lokasi"].unique())
#Making a copy of the dataframe
df_output = df_input.copy()
#Change column name for consistency
df_output = df_input.rename(columns = {"Tarikh":"Date",
"API":"API_Values",
"Masa":"Time"})
#Note that there is no dominant pollutant data for this dataset
#Converting the date into datetime
df_output["Date"] = df_output["Date"].astype(str)
df_output["Time"] = df_output["Time"].astype(str)
df_output["Datetime"] = df_output[["Date","Time"]].agg("-".join, axis = 1)
df_output["Datetime"] = pd.to_datetime(df_output["Datetime"], format = "%Y%m%d-%I:%M:%S")
#Creating new columns "Area" based on "Lokasi" for consistency
#The area and state allocated are based on the categorization of other dataframes
#the dictionary is organized in the following form: Lokasi: Area
#Note that there are subtle differences in the input Lokasi values so the directory from previous data cleaning python doc is not applicable
df_output["Lokasi"] = df_output["Lokasi"].astype(str)
df_output["Lokasi"] = df_output["Lokasi"].str.rstrip()
area_directory = {"Sek. Men. Pasir Gudang 2, Pasir Gudang": "Pasir Gudang",
"Institut Perguruan Malaysia, Temenggong Ibrahim, Larkin, Johor Bharu": "Lakrin Lama",
"Sek. Men. Teknik Muar, Muar, Johor": "Muar",
"SMA, Bandar Penawar, Kota Tinggi": "Kota Tinggi",
"Sek. Keb. Bakar Arang, Sungai Petani": "Bakar Arang, Sg. Petani",
"Komplek Sukan Langkawi, Kedah": "Langkawi",
"Sek. Men. Agama Mergong, Alor Setar": "Alor Setar",
"Sek. Men. Keb. Tanjung Chat, Kota Bahru": "SMK Tanjung Chat, Kota Bharu",
"SMK. Tanah Merah": "Tanah Merah",
"Sek. Men. Keb. Bukit Rambai": "Bukit Rambai",
"Sek. Men. Tinggi Melaka, Melaka": "Bandaraya Melaka",
"Tmn. Semarak (Phase II), Nilai": "Nilai",
"Sek. Men. Teknik Tuanku Jaafar, Ampangan, Seremban": "Seremban",
"Pusat Sumber Pendidikan N.S. Port Dickson": "Port Dickson",
"Pej. Kajicuaca Batu Embun, Jerantut": "Jerantut",
"Sek. Keb. Indera Mahkota, Kuantan": "Indera Mahkota, Kuantan",
"Sek. Keb. Balok Baru, Kuantan": "Balok Baru, Kuantan",
"Sek. Men. Jalan Tasek, Ipoh": "Jalan Tasek, Ipoh",
"Sek. Men. Keb. Air Puteh, Taiping": "Kg. Air Putih, Taiping",
"Pejabat Pentadbiran Daerah Manjung, Perak": "Seri Manjung",
"Universiti Pendidikan Sultan Idris, Tanjung Malim": "Tanjung Malim",
"Sek. Men. Pegoh, Ipoh, Perak": "S K Jalan Pegoh, Ipoh",
"Institut Latihan Perindustrian (ILP) Kangar": "Kangar",
"Sek. Keb. Cederawasih, Taman Inderawasih, Perai": "Perai",
"Sek. Keb. Seberang Jaya II, Perai": "Seberang Jaya 2, Perai",
"Universiti Sains Malaysia, Pulau Pinang": "USM",
"Sek. Men. Keb Putatan, Tg Aru, Kota Kinabalu": "Kota Kinabalu",
"Pejabat JKR Tawau, Sabah": "Tawau",
"Sek. Men. Keb Gunsanad, Keningau": "Keningau",
"Pejabat JKR Sandakan, Sandakan": "Sandakan",
"Medical Store, Kuching": "Kuching",
"Ibu Pejabat Polis Sibu, Sibu": "Sibu",
"Balai Polis Pusat Bintulu": "Bintulu",
"Sek. Men. Dato Permaisuri Miri": "Miri",
"Balai Polis Pusat Sarikei": "Sarikei",
"Dewan Suarah, Limbang": "Limbang",
"Pejabat Daerah Samarahan, Kota Samarahan": "Samarahan",
"Kompleks Sukan, Sri Aman": "Sri Aman",
"Stadium Tertutup, Kapit": "Kapit",
"ILP MIRI": "ILP Miri",
"Sek. Men. (P) Raja Zarina, Kelang": "Pelabuhan Kelang",
"Sek. Keb. Bandar Utama, Petaling Jaya": "Petaling Jaya",
"Sek. Keb. TTDI Jaya, Shah Alam": "Shah Alam",
"Sekolah Menengah Sains, Kuala Selangor": "Kuala Selangor",
"Kolej MARA, Banting": "Banting",
"Sek. Ren. Keb. Bukit Kuang, Teluk Kalung, Kemaman": "Kemaman",
"Kuarters TNB, Paka-Kertih": "Paka",
"Sek. Keb. Chabang Tiga, Kuala Terengganu": "Kuala Terengganu",
"Taman Perumahan Majlis Perbandaran Labuan": "Labuan",
"Sek. Keb. Putrajaya 8(2), Jln P8/E2, Presint 8, Putrajaya": "Putrajaya",
"Sek.Men.Keb.Seri Permaisuri, Cheras": "Cheras,Kuala Lumpur",
"Sek. Keb. Batu Muda, Batu Muda, Kuala Lumpur": "Batu Muda,Kuala Lumpur"}
#Create column "Area"
df_output["Area"] = df_output["Lokasi"].map(area_directory)
#Create column "State"
#Since there is very little tokens, mapping a dictionary will be faster
state_directory = {"JOHOR": "Johor",
"KEDAH": "Kedah",
"KELANTAN": "Kelantan",
"MELAKA": "Melaka",
"N.SEMBILAN": "Negeri Sembilan",
"PAHANG": "Pahang",
"PERAK": "Perak",
"PERLIS": "Perlis",
"PULAU PINANG": "Pulau Pinang",
"SABAH": "Sabah",
"SARAWAK": "Sarawak",
"SELANGOR": "Selangor",
"TERENGGANU": "Terengganu",
"WILAYAH PERSEKUTUAN": "Wilayah Persekutuan"}
df_output["State"] = df_output["Negeri"].map(state_directory)
df_output = df_output.drop(columns = ["Date", "Time", "Lokasi", "Negeri"])
#Checking the basic information about the final dataframe (optional)
#print(df_output.info())
#Export output to new csv file (edit path and name as needed)
df_extract.to_csv(r"file_path\file_name.csv")
return df_output
| 5,340,258
|
def get_contact_pages(buffer, domain):
"""
Returns links to all possible contact pages found on the site index page
"""
usual_contact_titles = [u'Contact', u'Contacts', u'About', u'Контакты', u'Связаться с нами']
usual_contact_urls = ['/contact', '/contacts', '/info']
result = list()
html = fromstring(buffer)
for a in html.xpath('//a'):
title = a.text_content().strip()
url = a.get('href')
if url is None:
continue
if title in usual_contact_titles or url in usual_contact_urls:
result.append(normalize_url(url, domain))
del html
return list(set(result))
| 5,340,259
|
def _hydrate_active_votes(vote_csv):
"""Convert minimal CSV representation into steemd-style object."""
if not vote_csv:
return []
votes = []
for line in vote_csv.split("\n"):
voter, rshares, percent, reputation = line.split(',')
votes.append(dict(voter=voter,
rshares=rshares,
percent=percent,
reputation=rep_to_raw(reputation)))
return votes
| 5,340,260
|
def skew(width, height, magnitude, mode='random'):
"""
Skew the ChArUco in 4 different modes.
:param width:
:param height:
:param magnitude:
:param mode: 0: top narrow, 1: bottom narrow, 2: left skew, 3 right skew
:return:
"""
# Randomize skew
if mode == 'random':
mode = random.randint(0, 3)
# Translate skew mode into transform coefficients
if mode == 0:
coeffs = find_coeffs(
[(magnitude, 0), (width - magnitude, 0), (width, height), (0, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
elif mode == 1:
coeffs = find_coeffs(
[(0, 0), (width, 0), (width - magnitude, height), (magnitude, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
elif mode == 2:
coeffs = find_coeffs(
[(0, 0), (width, 0), (width + magnitude, height), (magnitude, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
elif mode == 3:
coeffs = find_coeffs(
[(magnitude, 0), (width + magnitude, 0), (width, height), (0, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
return coeffs
| 5,340,261
|
def reset_env(exit=True):
"""Reset the environment by cleaning out all temporary outputs."""
print('NOTE: Resetting the environment...')
wd.database.init()
wd.database.delete_temp()
wd.outputtools.delete_temp()
wd.database.close()
if exit:
print('NOTE: Exiting the program... Goodbye!\n')
| 5,340,262
|
def aws_ec2_pricing():
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get EC2 pricing per gibabyte in all regions and storage types
summary: *desc
responses:
200:
description: List of instance types
schema:
properties:
instances:
type: array
items:
properties:
instanceType:
type: string
location:
type: string
prices:
type: array
items:
properties:
type:
type: string
costPerHour:
type: float
upfrontCost:
type: float
reservationYears:
type: integer
403:
description: Not logged in
"""
return jsonify(instances=ec2pricing.get_pricing_data())
| 5,340,263
|
def create_app():
"""
Create the application and return it to the user
:return: flask.Flask application
"""
app = Flask(__name__, static_folder=None)
app.url_map.strict_slashes = False
# Load config and logging
load_config(app)
logging.config.dictConfig(
app.config['SLACKBACK_LOGGING']
)
# Register extensions
api = Api(app)
# Add end points
api.add_resource(SlackFeedback, '/feedback/slack')
return app
| 5,340,264
|
def before_feature(context, feature):
"""
HOOK: To be executed before each Feature.
"""
__logger__.info("Starting execution of feature")
__logger__.info("##############################")
__logger__.info("##############################")
| 5,340,265
|
def _get_graph_cls(name):
"""Get scaffoldgraph class from name string."""
if name == 'network':
return ScaffoldNetwork
elif name == 'tree':
return ScaffoldTree
elif name == 'hiers':
return HierS
else:
msg = f'scaffold graph type: {name} not known'
raise ValueError(msg)
| 5,340,266
|
def benchmark_index(
indices_dict, gt_test, test_points, vectors_size_in_bytes, save_path=None, speed_dict=None, size_dict=None
):
"""
Compute recall curves for the indices.
"""
perfect_index_label = "perfect index"
if perfect_index_label not in indices_dict:
indices_dict[perfect_index_label] = None
if speed_dict:
speed_dict[perfect_index_label] = vectors_size_in_bytes
k_max = gt_test.shape[1]
plt.figure(figsize=(16, 8))
k_values = np.arange(0, k_max + 1)
avg_one_recall_at_r = {}
avg_r_recall_at_r = {}
timout_s = 5.0
comp_size = vectors_size_in_bytes
for index_key in tq(list(sorted(indices_dict.keys()))):
if index_key not in indices_dict:
continue
index = indices_dict[index_key]
if index_key == "Flat" or (index is None):
y_r_recall_at_r = np.arange(1, k_max + 1)
y_one_recall_at_r = np.ones(k_max)
tot = 1
else:
y_r_recall_at_r = np.zeros(k_max)
y_one_recall_at_r = np.zeros(k_max)
tot = 0
start_time = time.time()
for i, item in enumerate(test_points):
y_r_recall_at_r += np.array(r_recall_at_r_single(item, gt_test[i], index, k_max))
y_one_recall_at_r += np.array(one_recall_at_r_single(item, gt_test[i], index, k_max))
tot += 1
if time.time() - start_time > timout_s and tot > 150:
break
avg_r_recall_at_r[index_key] = y_r_recall_at_r / tot
avg_one_recall_at_r[index_key] = y_one_recall_at_r / tot
info_string = {index_key: "" for index_key in indices_dict}
initial_size_string = cast_bytes_to_memory_string(comp_size)
for index_key in indices_dict:
if index_key in speed_dict:
info_string[index_key] += f"avg speed: {format_speed_ms_per_query(speed_dict[index_key])}, "
if index_key in size_dict:
info_string[index_key] += (
f"(Size: {cast_bytes_to_memory_string(size_dict[index_key])} "
f"({(100*size_dict[index_key]/comp_size):.1f}% of {initial_size_string})"
)
plt.subplot(121)
for index_key in sorted(indices_dict.keys()):
if index_key not in indices_dict:
continue
label = f"{index_key:<30} Index, {info_string[index_key]}"
plt.plot(k_values, np.concatenate(([0], avg_r_recall_at_r[index_key])), label=label)
plt.xlabel("k, number of nearests items")
plt.ylabel("k-recall@k")
plt.vlines(40, 0, k_max)
plt.legend()
plt.tight_layout()
plt.subplot(122)
for index_key in sorted(indices_dict.keys()):
if index_key not in indices_dict:
continue
label = f"{index_key:<30} Index, {info_string[index_key]}"
plt.plot(k_values, np.concatenate(([0], 100 * avg_one_recall_at_r[index_key])), label=label)
plt.xlabel("k, number of nearests items")
plt.ylabel("1-Recall@k")
plt.vlines(100, 0, k_max)
plt.legend()
plt.tight_layout()
if save_path:
plt.savefig(save_path)
plt.show()
| 5,340,267
|
def getgrayim(ra, dec, size=240, output_size=None, filter="g", format="jpg"):
"""Get grayscale image at a sky position
ra, dec = position in degrees
size = extracted image size in pixels (0.25 arcsec/pixel)
output_size = output (display) image size in pixels (default = size).
output_size has no effect for fits format images.
filter = string with filter to extract (one of grizy)
format = data format (options are "jpg", "png")
Returns the image
"""
if format not in ("jpg","png"):
raise ValueError("format must be jpg or png")
if filter not in list("grizy"):
raise ValueError("filter must be one of grizy")
url = geturl(ra,dec,size=size,filters=filter,output_size=output_size,format=format)
r = requests.get(url[0])
im = Image.open(BytesIO(r.content))
return im
| 5,340,268
|
def get_logger(base_name, file_name=None):
"""
get a logger that write logs to both stdout and a file. Default logging level is info so remember to
:param base_name:
:param file_name:
:param logging_level:
:return:
"""
if (file_name is None):
file_name = base_name
logger = logging.getLogger(base_name)
logger.setLevel(logging.INFO)
# create console handler & file handler
ch = logging.StreamHandler()
ch.setStream(sys.stdout)
fi = logging.FileHandler(filename="..\logs\\" + file_name + ".log")
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to channels
ch.setFormatter(formatter)
fi.setFormatter(formatter)
# add channels to logger
logger.addHandler(ch)
logger.addHandler(fi)
return logger
| 5,340,269
|
def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for image as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : numpy.array
A greyscale image.
alpha : float
Alpha value for elastic transformation.
sigma : float or sequence of float
The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.
mode : str
See `scipy.ndimage.filters.gaussian_filter <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html>`__. Default is `constant`.
cval : float,
Used in conjunction with `mode` of `constant`, the value outside the image boundaries.
is_random : boolean
Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07)
References
------------
- `Github <https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`__.
- `Kaggle <https://www.kaggle.com/pscion/ultrasound-nerve-segmentation/elastic-transform-for-data-augmentation-0878921a>`__
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
#
is_3d = False
if len(x.shape) == 3 and x.shape[-1] == 1:
x = x[:, :, 0]
is_3d = True
elif len(x.shape) == 3 and x.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(x.shape) != 2:
raise AssertionError("input should be grey-scale image")
shape = x.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
if is_3d:
return map_coordinates(x, indices, order=1).reshape((shape[0], shape[1], 1))
else:
return map_coordinates(x, indices, order=1).reshape(shape)
| 5,340,270
|
def create_markdown(
escape=True,
renderer=None,
plugins=None,
acronyms=None,
bibliography="",
chapters=False,
toc=False,
):
"""Create a Markdown instance based on the given condition.
:param escape: Boolean. If using html renderer, escape html.
:param renderer: renderer instance or string of ``html`` and ``ast``.
:param plugins: List of plugins, string or callable.
This method is used when you want to re-use a Markdown instance::
markdown = create_markdown(
escape=False,
renderer='html',
plugins=['url', 'strikethrough', 'footnotes', 'table'],
)
# re-use markdown function
markdown('.... your text ...')
"""
if renderer is None or renderer == "latex":
renderer = LaTeXRenderer(acronym_file=acronyms, chapters=chapters)
if renderer == "html":
renderer = HTMLRenderer(escape=escape)
elif renderer == "ast":
renderer = AstRenderer()
if plugins:
_plugins = []
for p in plugins:
if isinstance(p, str):
_plugins.append(PLUGINS[p])
else:
_plugins.append(p)
plugins = _plugins
return ExtendedMarkdown(
renderer,
inline=ExtendedInlineParser(renderer, chapters=chapters),
block=ExtendedBlockParser(),
plugins=plugins,
bibliography=bibliography,
chapters=chapters,
toc=toc,
)
| 5,340,271
|
def make_game_from_level(level: int, options: Optional[GameOptions] = None) -> textworld.Game:
""" Make a Cooking game of the desired difficulty level.
Arguments:
level: Difficulty level (see notes).
options:
For customizing the game generation (see
:py:class:`textworld.GameOptions <textworld.generator.game.GameOptions>`
for the list of available options).
Returns:
Generated game.
Notes:
Difficulty levels are defined as follows:
TODO
"""
if level == 1:
mode = "easy"
elif level == 2:
mode = "medium"
elif level == 3:
mode = "hard"
else:
raise ValueError("Only level 1, 2 or 3 is supported for this game.")
return make_game(mode, options)
| 5,340,272
|
def _split(num):
"""split the num to a list of every bits of it"""
# xxxx.xx => xxxxxx
num = num * 100
result = []
for i in range(16):
tmp = num // 10 ** i
if tmp == 0:
return result
result.append(tmp % 10)
return result
| 5,340,273
|
def genGameMap():
"""This is an "abstract function" to hold this docstring and information.
A GameMap function defines Places and connects all the Places it defines in
a graph, but simpler graph than CommandGraph. It simply uses Place.nextnodes.
A GameMap function returns the starting location."""
| 5,340,274
|
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
| 5,340,275
|
def get_args_kwargs_param_names(fparams) -> (str, str):
"""fparams is inspect.signature(f).parameters
for some function f.
Doctests:
>>> import inspect
>>> def f(): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
(None, None)
>>> def f(*args): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
('args', None)
>>> def f(a, b, *filters, **kwargs): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
('filters', 'kwargs')
>>> def f(x, y, z, user='Joe', **other_users): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
(None, 'other_users')
"""
args_name = None
kwargs_name = None
for name in fparams:
param = fparams[name]
if param.kind == param.VAR_KEYWORD:
kwargs_name = name
elif param.kind == param.VAR_POSITIONAL:
args_name = name
if args_name and kwargs_name:
break # found both: done
return args_name, kwargs_name
| 5,340,276
|
def clean_up_tokenization_spaces(out_string):
"""Converts an output string (de-BPE-ed) using de-tokenization algorithm from OpenAI GPT."""
out_string = out_string.replace('<unk>', '')
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
| 5,340,277
|
def upload_pkg(go_workspace, pkg_file, service_url, tags, service_account):
"""Uploads existing *.cipd file to the storage and tags it.
Args:
go_workspace: path to 'infra/go' or 'infra_internal/go'.
pkg_file: path to *.cipd file to upload.
service_url: URL of a package repository service.
tags: a list of tags to attach to uploaded package instance.
service_account: path to *.json file with service account to use.
Returns:
{'package': <name>, 'instance_id': <sha1>}
Raises:
UploadException on error.
"""
print_title('Uploading: %s' % os.path.basename(pkg_file))
args = ['-service-url', service_url]
for tag in sorted(tags):
args.extend(['-tag', tag])
args.extend(['-ref', 'latest'])
if service_account:
args.extend(['-service-account-json', service_account])
args.append(pkg_file)
exit_code, json_output = run_cipd(go_workspace, 'pkg-register', args)
if exit_code:
print
print >> sys.stderr, 'FAILED! ' * 10
raise UploadException('Failed to upload the CIPD package, see logs')
info = json_output['result']
print '%s %s' % (info['package'], info['instance_id'])
return info
| 5,340,278
|
def train_world_model(env, data_dir, output_dir, hparams, epoch):
"""Train the world model on problem_name."""
train_steps = hparams.model_train_steps * (
epoch + hparams.inital_epoch_train_steps_multiplier)
model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
# Hardcoded for now. TODO(koz4k): Make it a hparam.
model_hparams.video_num_input_frames = 4
model_hparams.video_num_target_frames = 1
model_hparams.learning_rate = model_hparams.learning_rate_constant
if epoch > 0:
model_hparams.learning_rate *= hparams.learning_rate_bump
train_supervised(
problem=env,
model_name=hparams.generative_model,
hparams=model_hparams,
data_dir=data_dir,
output_dir=output_dir,
train_steps=train_steps,
eval_steps=100,
local_eval_frequency=2000
)
| 5,340,279
|
def make_rgg(n: int, kbar: float) -> ig.Graph:
"""Make Random Geometric Graph with given number of nodes
and average degree.
"""
radius = np.sqrt(kbar/(np.pi*(n-1)))
return ig.Graph.GRG(n, radius=radius, torus=True)
| 5,340,280
|
def list_directory(bucket, prefix, s3=None, request_pays=False):
"""AWS s3 list directory."""
if not s3:
session = boto3_session(region_name=region)
s3 = session.client('s3')
pag = s3.get_paginator('list_objects_v2')
params = {
'Bucket': bucket,
'Prefix': prefix,
'Delimiter': '/'}
if request_pays:
params['RequestPayer'] = 'requester'
directories = []
for subset in pag.paginate(**params):
if 'CommonPrefixes' in subset.keys():
directories.extend(subset.get('CommonPrefixes'))
return [r['Prefix'] for r in directories]
| 5,340,281
|
def manual_overrides():
"""Read the overrides file.
Read the overrides from cache, if available. Otherwise, an attempt is made
to read the file as it currently stands on GitHub, and then only if that
fails is the included file used. The result is cached for one day.
"""
return _manual_overrides(datetime.date.today())
| 5,340,282
|
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
| 5,340,283
|
def preprocess_data_for_clustering(df):
"""Prepare data in order to apply a clustering algorithm
Parameters
----------
df : pandas.DataFrame
Input data, *i.e.* city-related timeseries, supposed to have
`station_id`, `ts` and `nb_bikes` columns
Returns
-------
pandas.DataFrame
Simpified version of `df`, ready to be used for clustering
"""
# Filter unactive stations
max_bikes = df.groupby("station_id")["nb_bikes"].max()
unactive_stations = max_bikes[max_bikes==0].index.tolist()
active_station_mask = np.logical_not(df['station_id'].isin(unactive_stations))
df = df[active_station_mask]
# Set timestamps as the DataFrame index and resample it with 5-minute periods
df = (df.set_index("ts")
.groupby("station_id")["nb_bikes"]
.resample("5T")
.mean()
.bfill())
df = df.unstack(0)
# Drop week-end records
df = df[df.index.weekday < 5]
# Gather data regarding hour of the day
df['hour'] = df.index.hour
df = df.groupby("hour").mean()
return df / df.max()
| 5,340,284
|
def chars(line):
"""Returns the chars in a TerminalBuffer line.
"""
return "".join(c for (c, _) in notVoids(line))
| 5,340,285
|
def map_is_finite(query_points: tf.Tensor, observations: tf.Tensor) -> Dataset:
"""
:param query_points: A tensor.
:param observations: A tensor.
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points``,
along with the tensor result of mapping the elements of ``observations`` to: `1` if they are
a finite number, else `0`, with dtype `tf.uint8`.
:raise ValueError or InvalidArgumentError: If ``query_points`` and ``observations`` do not
satisfy the shape constraints of :class:`~trieste.data.Dataset`.
"""
return Dataset(query_points, tf.cast(_is_finite(observations), tf.uint8))
| 5,340,286
|
def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool
"""Return True if the image exists, otherwise False."""
try:
docker_command(args, ['image', 'inspect', image], capture=True)
except SubprocessError:
return False
return True
| 5,340,287
|
def remove_local(path):
"""Remove a local file or directory.
Arguments:
path (str): Absolute path to the file or directory.
Returns:
Boolean indicating result.
"""
if os.path.isfile(path):
# Regular file
remover = os.remove
elif os.path.isdir(path):
# Directory
remover = shutil.rmtree
else:
# What?
cprint(m.PATH_NOEXIST % path, 'red')
return False
try:
remover(path)
except Exception as e:
# Something failed
cprint(m.RM_ERR % (path, e), 'red')
return False
return True
| 5,340,288
|
def get_g2_fit_general_two_steps(
g2,
taus,
function="simple_exponential",
second_fit_range=[0, 20],
sequential_fit=False,
*argv,
**kwargs,
):
"""
Fit g2 in two steps,
i) Using the "function" to fit whole g2 to get baseline and beta (contrast)
ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function
"""
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(
g2, taus, function, sequential_fit, *argv, **kwargs
)
guess_values = {}
for k in list(g2_fit_result[0].params.keys()):
guess_values[k] = np.array(
[g2_fit_result[i].params[k].value for i in range(g2.shape[1])]
)
if "guess_limits" in kwargs:
guess_limits = kwargs["guess_limits"]
else:
guess_limits = dict(
baseline=[1, 1.8],
alpha=[0, 2],
beta=[0.0, 1],
relaxation_rate=[0.001, 10000],
)
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(
g2,
taus,
function="simple_exponential",
sequential_fit=sequential_fit,
fit_range=second_fit_range,
fit_variables={
"baseline": False,
"beta": False,
"alpha": False,
"relaxation_rate": True,
},
guess_values=guess_values,
guess_limits=guess_limits,
)
return g2_fit_result, taus_fit, g2_fit
| 5,340,289
|
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % \
(module.__name__, t)
return f, t
| 5,340,290
|
def rate_matrix_arrhenius_time_segmented(energies, barriers, segment_temperatures, segment_start_times, t_range):
"""
Compute the rate matrix for each time ``t`` in ``t_range``, where the bath temperature is a piecewise constant
function of time.
The bath temperature function, by which the rate matrices are calculated, is a piecewise constant function where
each piece is a segment described by the its temperature and the time it starts.
First, the temperature for every time, denoted by ``T(t)``, is calculated as follows:
``T(t) = Ti`` where ``t = segment_start_times[i]`` and ``Ti = segment_temperatures[i]``.
Then, for every time ``t`` in ``t_range``, a rate matrix is calculated with the corresponding temperature ``T(t)``.
The bath temperature is set to the last given temperature ``segment_start_times[-1]`` and stays at this value
until the last time ``t`` in ``t_range``.
Parameters
----------
energies : (N,) array or sequence of float
Energies of the states of the arrhenius, ordered in ascending order.
barriers : (N, N) array
Energy barriers between states. Must be given as matrix.
segment_temperatures : (K,) array
Temperature sequence where each temperature corresponds to each segment.
segment_start_times : (K,) array
Start time sequence where each time corresponds to each segment.
t_range : (M,) array
Time sequence.
Returns
-------
rate_matrix_time : (N, N, M)
Rate matrices stacked in the depth dimension (axis=2).
Raises
-----
ValueError
If the first segment start time ``segment_start_times[0]`` is not equal to ``t_range[0]``.
"""
if segment_start_times[0] != t_range[0]:
raise ValueError('The first segment start time `segment_start_times[0]` must be equal to `t_range[0]`.')
temperature_array = temperature_array_from_segments(segment_temperatures, segment_start_times, t_range)
return rate_matrix_arrhenius(energies, barriers, temperature_array)
| 5,340,291
|
def div_tensor(tensor, coords=(x, y, z), h_vec=(1, 1, 1)):
"""
Divergence of a (second order) tensor
Parameters
----------
tensor : Matrix (3, 3)
Tensor function function to compute the divergence from.
coords : Tuple (3), optional
Coordinates for the new reference system. This is an optional
parameter it takes (x, y, z) as default.
h_vec : Tuple (3), optional
Scale coefficients for the new coordinate system. It takes
(1, 1, 1), as default.
Returns
-------
divergence: Matrix
Divergence of tensor.
References
----------
.. [RICHARDS] Rowland Richards. Principles of Solids Mechanics.
CRC Press, 2011.
"""
h1, h2, h3 = h_vec
u1, u2, u3 = coords
div1 = diff(h2*h3*tensor[0, 0], u1) + diff(h1*h3*tensor[0, 1], u2) \
+ diff(h1*h2*tensor[0, 2], u3) + h3*tensor[0, 1]*diff(h1, u2) \
+ h2*tensor[0, 2]*diff(h1, u3) - h3*tensor[1, 1]*diff(h2, u1) \
- h2*tensor[2, 2]*diff(h3, u1)
div2 = diff(h2*h3*tensor[1, 0], u1) + diff(h1*h3*tensor[1, 1], u2) \
+ diff(h1*h2*tensor[1, 2], u3) + h1*tensor[1, 2]*diff(h2, u3) \
+ h3*tensor[1, 0]*diff(h2, u1) - h1*tensor[2, 2]*diff(h3, u2) \
- h3*tensor[2, 2]*diff(h1, u2)
div3 = diff(h2*h3*tensor[2, 0], u1) + diff(h1*h3*tensor[2, 1], u2) \
+ diff(h1*h2*tensor[2, 2], u3) + h2*tensor[2, 0]*diff(h1, u1) \
+ h1*tensor[2, 1]*diff(h1, u2) - h1*tensor[1, 1]*diff(h2, u3) \
+ h2*tensor[2, 2]*diff(h1, u3)
return Matrix([div1, div2, div3])/(h1*h2*h3)
| 5,340,292
|
def convert_path_to_repr_exp(path, with_end=False):
"""
Generate a representative expression for the given path
"""
exp = ""
#print("Path: {}".format(path))
for i in range(len(path)):
if with_end == False and \
((i == 0) or (i == len(path)-1)):
continue
nd_idx = path[i]
if nd_idx == start_state:
exp += "BOS"
continue
if nd_idx == end_state:
exp += "EOS"
continue
node_content = idx_to_node[nd_idx]
#print("Node content: {}".format(node_content))
node_dic = ast.literal_eval(str(node_content))
text = ""
for key, value in node_dic.items():
text = value[1]
break
exp += ' ' + text
return exp
| 5,340,293
|
def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, eth_dst, ports_info):
"""
@summary: Create packets with multicast/broadcast ethernet dst.
"""
if "vlan" in tx_dut_ports[ports_info["dut_iface"]].lower():
pytest.skip("Test case is not supported on VLAN interface")
log_pkt_params(ports_info["dut_iface"], eth_dst, ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=eth_dst, # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
| 5,340,294
|
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
| 5,340,295
|
def compute_heading_error(est, gt):
"""
Args:
est: the estimated heading as sin, cos values
gt: the ground truth heading as sin, cos values
Returns:
MSE error and angle difference from dot product
"""
mse_error = np.mean((est-gt)**2)
dot_prod = np.sum(est * gt, axis=1)
angle = np.arccos(np.clip(dot_prod, a_min=-1, a_max=1))
return mse_error, angle
| 5,340,296
|
def _get_count_bid(soup: bs4.BeautifulSoup) -> int:
""" Return bidding count from `soup`.
Parameters
----------
soup : bs4.BeautifulSoup
Soup of a Yahoo Auction page.
Returns
-------
int
Count of total bidding.
"""
tags = soup.find_all('dt', text='入札件数')
if len(tags) > 0:
tag = tags[0]
if isinstance(tag, bs4.element.Tag):
tag = tag.find_next_sibling('dd', {'class': 'Count__number'})
return int(tag.text[:-4])
return 0
| 5,340,297
|
def _is_class(s):
"""Imports from a class/object like import DefaultJsonProtocol._"""
return s.startswith('import ') and len(s) > 7 and s[7].isupper()
| 5,340,298
|
def evaluate(vsm, wordsim_dataset_path):
"""Extract Correlation, P-Value for specified vector space mapper."""
return evaluation.extract_correlation_coefficient(
score_data_path=wordsim_dataset_path, vsm=vsm
)
| 5,340,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.