content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _uid_or_str(node_or_entity):
""" Helper function to support the transition from `Entitie`s to `Node`s.
"""
return (
node_or_entity.uid
if hasattr(node_or_entity, "uid")
else str(node_or_entity)
)
|
82f5747e8c73e1c167d351e1926239f17ea37b98
| 3,645,800
|
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8):
"""
estimate power for a given allele frequency, effect size beta and sample size N
Assumption:
z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis
the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )
Arguments:
maf: minor allele frequency of the SNP
beta: effect size of the SNP
N: sample size (number of individuals)
Returns:
power: probability to detect a SNP in that study with the given parameters
"""
"""
std(snp)=sqrt(2.0*maf*(1-maf))
power = \int
beta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp)
E[beta_ML] = (snp^T*snp)^{-1}*snp^T*E[Y]
= (snp^T*snp)^{-1}*snp^T*snp * beta
= beta
Var[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1}
= (snp^T*snp)^{-1}
= 1/N * var(snp)
= 1/N * maf*(1-maf)
"""
assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf
if beta<0.0:
beta=-beta
std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf)))
non_centrality = beta
beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)
n_grid = 100000
beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0))
beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality)
pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0)
power = (pvals<cutoff).mean()
return power, pvals
|
1806718cd0af5deb38a25a90864bb14f40e2c57a
| 3,645,801
|
def get_rotation_matrix(angle: float, direction: np.ndarray, point: np.ndarray = None) -> np.ndarray:
"""Compute rotation matrix relative to point and direction
Args:
angle (float): angle of rotation in radian
direction (np.ndarray): axis of rotation
point (np.ndarray, optional): center of rotation. Defaults to None.
Returns:
np.ndarray: rotation_matrix
"""
sina = np.sin(angle)
cosa = np.cos(angle)
direction = direction[:3] / np.linalg.norm(direction[:3])
M = np.diag([cosa, cosa, cosa, 1.0])
M[:3, :3] += np.outer(direction, direction) * (1.0 - cosa)
direction = direction * sina
M[:3, :3] += np.array([[0.0, -direction[2], direction[1]], [direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
# if point is specified, rotation is not around origin
if point is not None:
point = np.array(point[:3], dtype=np.float64, copy=False)
M[:3, 3] = point - np.dot(M[:3, :3], point)
return M
|
fd7c8d22368b51310a85453f6a9732f56a443803
| 3,645,802
|
import logging
import sys
import traceback
import inspect
def assert_check(args: dict = None, log_level: str = LOG_LEVEL) -> bool:
""" assert caller function args """
if args is None:
logger.critical("Arguments dict is empty or does not exist!")
return False
else:
logging.debug("Args dictionary exists, processing assertion check...")
try:
for k, v in args.items():
assert k is not None
assert k != ""
assert k != []
assert k != {}
assert k != ()
assert type(k) == v
return True
except AssertionError:
if log_level == "DEBUG":
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
tb_info = traceback.extract_tb(tb)
_, line, _func, text = tb_info[-1]
logging.error(
f'An error occurred on line {line} in statement "{text}" in function "{inspect.stack()[1].function}".'
)
return False
else:
logging.critical(
f'An assertion error occured but did not call traceback because log level is: "{log_level}".'
)
return False
except Exception as e:
logging.error(e)
raise
return False
|
a61d33f78f99cf91bcde7d5920ba20d6ac3e6816
| 3,645,803
|
from typing import Any
import os
def establish_github_connection(store: dict[str, Any]) -> ValidationStepResult:
"""
Establishes the connection to GitHub.
If the name of the environment variable storing the GitHub PAT is not given,
then it will default to searching for one named "GH_TOKEN". If provided,
can help rate-limiting be less stringent. See https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting
for more details.
Uses the repository named in the system environment variable
"GITHUB_REPOSITORY" if it exists. If not, default to the hub repository
which is named in the configurations (loaded in using the store).
Returns:
A ValidationStepResult object with
* the Github object,
* the object of the repository from which the pull
request originated
* a dictionary of label names to labels that can be applied to the
pull request.
"""
logger.info(
"Running validations version %s",
store.get(
"VALIDATIONS_VERSION",
"<missing validation version number>"
)
)
logger.info("Current working directory: %s", os.getcwd())
logger.info("GitHub Actions information:")
logger.info(
"GitHub Actions event name: %s",
os.environ.get("GITHUB_EVENT_NAME", "<missing GitHub event name>")
)
logger.info("Connecting to GitHub and retrieving repository...")
# initial GitHub connection
github_PAT: str = os.environ.get(store.get(
"GITHUB_TOKEN_ENVIRONMENT_VARIABLE_NAME",
"GH_TOKEN"
))
github: Github = Github(github_PAT) if github_PAT is not None else Github()
# Get specific repository
repository_name = os.environ.get(
"GITHUB_REPOSITORY",
store.get("HUB_REPOSITORY_NAME")
)
if repository_name is None:
raise RuntimeError("FAILURE: could not find GitHub repository")
repository: Repository = github.get_repo(repository_name)
# Get list of possible labels to apply to PR
possible_labels = {l.name: l for l in repository.get_labels()}
logger.info("Repository successfully retrieved")
logger.info("Github repository: %s", repository.full_name)
return ValidationStepResult(
success=True,
to_store={
"github": github,
"repository": repository,
"possible_labels": possible_labels
}
)
|
8aab64c1d042639096b307f11dc469525095cfcf
| 3,645,804
|
def answer(panel_array):
""" Returns the maximum product of positive and (odd) negative numbers."""
print("panel_array=", panel_array)
# Edge case I: no panels :]
if (len(panel_array) == 0):
return str(0)
# Get zero panels.
zero_panels = list(filter(lambda x: x == 0 , panel_array))
print("zero_panels=", zero_panels)
# Edge case II: no positive nor negative panels.
if (len(zero_panels) == len(panel_array)):
return str(0)
# Get positive panels
positive_panels = list(filter(lambda x: x >0 , panel_array))
print("positive_panels=", positive_panels)
positive_product = 1
for x in positive_panels:
positive_product *= x
# Get negative panels.
negative_panels = sorted(list(filter(lambda x: x <0 , panel_array)))
print("negative_panels=", negative_panels)
# Edge case III: there is only one "negative panel".
if (len(negative_panels) == 1):
# If this is the only panel.
if (len(panel_array) == 1):
return negative_panels[0]
# If there are no positive panels, but there are some panels with zeros
elif (len(positive_panels) == 0) and (len(zero_panels) > 1):
return 0
# Check number of negative panels.
if len(negative_panels) % 2 != 0:
# Remove smallest.
negative_panels.pop()
print("final negative_panels=", negative_panels)
negative_product = 1
for x in negative_panels:
negative_product *= x
# Return product of those two.
return str(negative_product * positive_product)
|
7169fba8dcf6c0932722dcbc606d6d60fdaf3ed1
| 3,645,805
|
def load_fromh5(filepath, dir_structure, slice_num, strt_frm=0):
"""
load_fromh5 will extract the sinogram from the h5 file
Output: the sinogram
filepath: where the file is located in the system
dir_structure: the h5 file directory structure
slice_num: the slice where the singoram will be extracted
strt_frm (optional): where the sinogram should begin
"""
f = h5py.File(filepath, 'r')
#["entry/data/data"]
print(f[dir_structure].shape)
end_frm = f[dir_structure].shape[0]
sino = f[dir_structure][int(strt_frm):int(end_frm),int(slice_num),:] #For APS 2BM h5 file format
return sino
|
90aa278a7429cc832071a374df9de2d8dd2abb88
| 3,645,806
|
def lqr_6_2(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns an LQR environment with 6 bodies of which first 2 are actuated."""
return _make_lqr(
n_bodies=6,
n_actuators=2,
control_cost_coef=_CONTROL_COST_COEF,
time_limit=time_limit,
random=random,
environment_kwargs=environment_kwargs,
)
|
b27a4fd55d67cfcbb9a651b7915fd2e3b4460af9
| 3,645,807
|
def machine_stop(request, tenant, machine):
"""
Stop (power off) the specified machine.
"""
with request.auth.scoped_session(tenant) as session:
serializer = serializers.MachineSerializer(
session.stop_machine(machine),
context = { "request": request, "tenant": tenant }
)
return response.Response(serializer.data)
|
b91a375c1aa8b62a6ed665d0045ff4b9eeae6a18
| 3,645,808
|
import subprocess
def exec_command_rc(*cmdargs, **kwargs):
"""
Return the exit code of the command specified by the passed positional arguments, optionally configured by the
passed keyword arguments.
Parameters
----------
cmdargs : list
Variadic list whose:
1. Mandatory first element is the absolute path, relative path, or basename in the current `${PATH}` of the
command to run.
2. Optional remaining elements are arguments to pass to this command.
All keyword arguments are passed as is to the `subprocess.call()` function.
Returns
----------
int
This command's exit code as an unsigned byte in the range `[0, 255]`, where 0 signifies success and all other
values signal a failure.
"""
# 'encoding' keyword is not supported for 'subprocess.call'; remove it from kwargs.
if 'encoding' in kwargs:
kwargs.pop('encoding')
return subprocess.call(cmdargs, **kwargs)
|
bfe4f5bbdcbed6cfc3c8f52abffe2be7107fd091
| 3,645,809
|
from typing import Tuple
from typing import Dict
from typing import Any
def _get_input_value(arg: Tuple[str, GraphQLArgument]) -> Dict[str, Any]:
"""Compute data for the InputValue fragment of the introspection query for a particular arg."""
return {
"name": __InputValue.fields["name"].resolve(arg, None),
"description": __InputValue.fields["description"].resolve(arg, None),
"type": _get_type_ref(__InputValue.fields["type"].resolve(arg, None)),
"defaultValue": __InputValue.fields["defaultValue"].resolve(arg, None),
}
|
7e82936b07b01531b0716c6904709c37e807d868
| 3,645,810
|
def wrapper(X_mixture,X_component):
""" Takes in 2 arrays containing the mixture and component data as
numpy arrays, and prints the estimate of kappastars using the two gradient
thresholds as detailed in the paper as KM1 and KM2"""
N=X_mixture.shape[0]
M=X_component.shape[0]
best_width,kernel=compute_best_rbf_kernel_width(X_mixture,X_component)
lambda_values=np.array([1.00,1.05])
dists=get_distance_curve(kernel,lambda_values,N=N,M=M)
begin_slope=(dists[1]-dists[0])/(lambda_values[1]-lambda_values[0])
dist_diff = np.concatenate((np.ones((N, 1)) / N, -1 * np.ones((M,1)) / M))
distribution_RKHS_dist = sqrt(np.dot(dist_diff.T, np.dot(kernel, dist_diff))[0,0])
thres_par=0.2
nu1=(1-thres_par)*begin_slope + thres_par*distribution_RKHS_dist
nu1=nu1/distribution_RKHS_dist
lambda_star_est_1=mpe(kernel,N,M,nu=nu1)
kappa_star_est_1=(lambda_star_est_1-1)/lambda_star_est_1
nu2=1/sqrt(np.min([M,N]))
nu2=nu2/distribution_RKHS_dist
if nu2>0.9:
nu2=nu1
lambda_star_est_2=mpe(kernel,N,M,nu=nu2)
kappa_star_est_2=(lambda_star_est_2-1)/lambda_star_est_2
return (kappa_star_est_2,kappa_star_est_1)
|
f5e093590897c363bbab2360a14d7c3a82fd6bcd
| 3,645,811
|
import torch
def iou(
outputs: torch.Tensor,
targets: torch.Tensor,
eps: float = 1e-7,
threshold: float = 0.5,
activation: str = "sigmoid"
):
"""
Args:
outputs (torch.Tensor): A list of predicted elements
targets (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold (float): threshold for outputs binarization
activation (str): An torch.nn activation applied to the outputs.
Must be one of ['none', 'sigmoid', 'softmax2d']
Returns:
float: IoU (Jaccard) score
"""
activation_fn = get_activation_by_name(activation)
outputs = activation_fn(outputs)
if threshold is not None:
outputs = (outputs > threshold).float()
intersection = torch.sum(targets * outputs)
union = torch.sum(targets) + torch.sum(outputs) - intersection + eps
return (intersection + eps) / union
|
4c43832560126c19b8b9ebc01daf3920603b5f17
| 3,645,812
|
def readCoords(f):
"""Read XYZ file and return as MRChem JSON friendly string."""
with open(f) as file:
return '\n'.join([line.strip() for line in file.readlines()[2:]])
|
0cf1a9d07b4b3fe1836ce5c8a308ff67b5fe4c70
| 3,645,813
|
import os
def fetch_hillstrom(target_col='visit', data_home=None, dest_subdir=None, download_if_missing=True,
return_X_y_t=False, as_frame=True):
"""Load and return Kevin Hillstrom Dataset MineThatData (classification or regression).
This dataset contains 64,000 customers who last purchased within twelve months.
The customers were involved in an e-mail test.
Major columns:
* ``Visit`` (binary): target. 1/0 indicator, 1 = Customer visited website in the following two weeks.
* ``Conversion`` (binary): target. 1/0 indicator, 1 = Customer purchased merchandise in the following two weeks.
* ``Spend`` (float): target. Actual dollars spent in the following two weeks.
* ``Segment`` (str): treatment. The e-mail campaign the customer received
Read more in the :ref:`docs <Hillstrom>`.
Args:
target_col (string, 'visit' or 'conversion' or 'spend', default='visit'): Selects which column from dataset
will be target
data_home (str): The path to the folder where datasets are stored.
dest_subdir (str): The name of the folder in which the dataset is stored.
download_if_missing (bool): Download the data if not present. Raises an IOError if False and data is missing.
return_X_y_t (bool, default=False): If True, returns (data, target, treatment) instead of a Bunch object.
as_frame (bool): If True, returns a pandas Dataframe for the data, target and treatment objects
in the Bunch returned object; Bunch return object will also have a frame member.
Returns:
Bunch or tuple: dataset.
Bunch:
By default dictionary-like object, with the following attributes:
* ``data`` (ndarray or DataFrame object): Dataset without target and treatment.
* ``target`` (Series object): Column target by values.
* ``treatment`` (Series object): Column treatment by values.
* ``DESCR`` (str): Description of the Lenta dataset.
* ``feature_names`` (list): Names of the features.
* ``target_name`` (str): Name of the target.
* ``treatment_name`` (str): Name of the treatment.
Tuple:
tuple (data, target, treatment) if `return_X_y` is True
References:
https://blog.minethatdata.com/2008/03/minethatdata-e-mail-analytics-and-data.html
"""
url = 'https://hillstorm1.s3.us-east-2.amazonaws.com/hillstorm_no_indices.csv.gz'
csv_path = _get_data(data_home=data_home,
url=url,
dest_subdir=dest_subdir,
dest_filename='hillstorm_no_indices.csv.gz',
download_if_missing=download_if_missing)
if target_col != ('visit' or 'conversion' or 'spend'):
raise ValueError(f"target_col value must be from {['visit', 'conversion', 'spend']}. "
f"Got value {target_col}.")
data = pd.read_csv(csv_path, usecols=[i for i in range(8)])
feature_names = list(data.columns)
treatment = pd.read_csv(csv_path, usecols=['segment'])
target = pd.read_csv(csv_path, usecols=[target_col])
if as_frame:
target = target[target_col]
treatment = treatment['segment']
else:
data = data.to_numpy()
target = target.to_numpy()
treatment = treatment.to_numpy()
module_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_path, 'descr', 'hillstrom.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y_t:
return data, target, treatment
else:
target_name = target_col
return Bunch(data=data, target=target, treatment=treatment, DESCR=fdescr,
feature_names=feature_names, target_name=target_name, treatment_name='segment')
|
a400779d477f413e88c252d2f47b6318385f8ab1
| 3,645,814
|
def api_update_note(note_id: int):
"""Update a note"""
db = get_db()
title = request.form["title"] if "title" in request.form.keys() else None
content = request.form["content"] if "content" in request.form.keys() else None
note = db.update_note(note_id, title, content)
return jsonify(note.__dict__)
|
d6668b89854e4aa6c248041a97a55c95cd568e9e
| 3,645,815
|
def padding_oracle(decrypt, cipher, *, bs, unknown=b"\x00", iv=None):
"""Padding Oracle Attack
Given a ciphersystem such that:
- The padding follows the format of PKCS7
- The mode of the block cipher is CBC
- We can check if the padding of a given cipher is correct
- We can try to decrypt ciphertexts without limit
we can break the ciphertext with Padding Oracle Attack.
Usage:
plain = padding_oracle(decrypt, cipher, bs, unknown)
The function decrypt must receive ciphertext and return True or False:
True when the given cipher could successfully be decrypted (No padding error)
False when the given cipher cannot be decrypted (Padding error detected)
"""
if len(cipher) % bs != 0:
raise ValueError("The length of `cipher` must be a multiple of `bs`")
# Split ciphertext into blocks
cipher_blocks = []
for i in range(0, len(cipher), bs):
cipher_blocks.append(cipher[i : i + bs])
plain_blocks = [None for i in range(len(cipher_blocks))]
# Break the cipher
for k in range(len(cipher_blocks) - 1, 0, -1):
plain_blocks[k] = padding_oracle_block(
decrypt, cipher_blocks[k - 1], cipher_blocks[k], bs
)
logger.info(
"decrypted a block {}/{}: {}".format(
len(cipher_blocks) - k + 1, len(cipher_blocks), plain_blocks[k]
) )
if isinstance(unknown, str):
unknown = str2bytes(unknown)
if iv:
plain_blocks[0] = padding_oracle_block(decrypt, iv, cipher_blocks[0], bs)
logger.info("decrypted an iv block: {}".format(plain_blocks[0]))
else:
plain_blocks[0] = unknown * bs
return b"".join(plain_blocks)
|
077eeed2f8f0f2e91aa482c93f36825bdbcef17a
| 3,645,816
|
def pixels():
"""
Raspberry Pi pixels
"""
return render_template("pixels.html")
|
d3af0be80b09096e05a29ef3e9209cef2dba8431
| 3,645,817
|
async def get_song_info(id: str):
"""
获取歌曲详情
"""
params = {'ids': id}
return get_json(base_url + '/song/detail', params=params)
|
2185c62db03bba3019d9d010fc5603c432a0048f
| 3,645,818
|
def _find_odf_idx(map, position):
"""Find odf_idx in the map from the position (col or row).
"""
odf_idx = bisect_left(map, position)
if odf_idx < len(map):
return odf_idx
return None
|
642398d72abe89aa63b7537372499655af5a5ded
| 3,645,819
|
def get_or_create(session, model, **kwargs):
"""
Creates and returns an instance of the model with given kwargs,
if it does not yet exist. Otherwise, get instance and return.
Parameters:
session: Current database session
model: The Class of the database model
**kwargds: The attributes for the desired instance
Returns:
(object): An object instance of the model with given kwargs
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
return instance
|
4d3e4f0da5ca61789171db5d8d16a5fa06e975cc
| 3,645,820
|
def pack_asn1(tag_class, constructed, tag_number, b_data):
"""Pack the value into an ASN.1 data structure.
The structure for an ASN.1 element is
| Identifier Octet(s) | Length Octet(s) | Data Octet(s) |
"""
b_asn1_data = bytearray()
if tag_class < 0 or tag_class > 3:
raise ValueError("tag_class must be between 0 and 3 not %s" % tag_class)
# Bit 8 and 7 denotes the class.
identifier_octets = tag_class << 6
# Bit 6 denotes whether the value is primitive or constructed.
identifier_octets |= ((1 if constructed else 0) << 5)
# Bits 5-1 contain the tag number, if it cannot be encoded in these 5 bits
# then they are set and another octet(s) is used to denote the tag number.
if tag_number < 31:
identifier_octets |= tag_number
b_asn1_data.append(identifier_octets)
else:
identifier_octets |= 31
b_asn1_data.append(identifier_octets)
b_asn1_data.extend(_pack_octet_integer(tag_number))
length = len(b_data)
# If the length can be encoded in 7 bits only 1 octet is required.
if length < 128:
b_asn1_data.append(length)
else:
# Otherwise the length must be encoded across multiple octets
length_octets = bytearray()
while length:
length_octets.append(length & 0b11111111)
length >>= 8
length_octets.reverse() # Reverse to make the higher octets first.
# The first length octet must have the MSB set alongside the number of
# octets the length was encoded in.
b_asn1_data.append(len(length_octets) | 0b10000000)
b_asn1_data.extend(length_octets)
return bytes(b_asn1_data) + b_data
|
14aad1709b5efa46edc5d7ac8659fe1de0615a57
| 3,645,821
|
from typing import Dict
def choose(text: str, prompt: str, options: Dict[str, str], suggestion: str, none_allowed: bool):
"""
Helper function to ask user to select from a list of options (with optional description).
Suggestion can be given. 'None' can be allowed as a valid input value.
"""
p = ColorPrint()
key_list = list(options.keys())
p.print('\n'.join(wrap(text + ':', 80)))
p.print('{!y}[')
for k in range(len(key_list)):
elem = key_list[k]
description = options[elem]
if description:
p.print(' {!m}#{k}{!} {!y}{elem}{!}:', k=k, elem=elem)
for line in description.split('\n'):
p.print(' {line}', line=line)
else:
p.print(' {!m}#{k}{!} {!y}{elem}{!}', k=k, elem=elem)
p.print('{!y}]')
p.print('Selection can be made by unique prefix or index.')
while True:
val = ask(prompt, suggestion, str, none_allowed)
if val is None:
return val
try:
index = int(val)
if index in range(len(key_list)):
return key_list[index]
else:
p.error('{!r}No match for given index.')
except:
matches = [key for key in options.keys() if key[:len(val)] == val]
if len(matches) == 0:
p.error('{!r}No match for given substring.')
elif len(matches) > 1:
p.error('{!r}Selection not unique for given substring.')
else:
return matches[0]
|
0b43452f00378ddc1345b85ca72b37ff1edfae05
| 3,645,822
|
import os
def get_environ_list(name, default=None):
"""Return the split colon-delimited list from an environment variable.
Returns an empty list if the variable didn't exist.
"""
packed = os.environ.get(name)
if packed is not None:
return packed.split(':')
elif default is not None:
return default
else:
return []
|
3e59962558b127790e456a79edf6175d1c3f7bbe
| 3,645,823
|
def util_color(
graph: list[list[int]], max_color: int, colored_vertices: list[int], index: int
) -> bool:
"""
alur :
1. Periksa apakah pewarnaan selesai
1.1 Jika pengembalian lengkap True
(artinya kita berhasil mewarnai grafik)
Langkah Rekursif:
2. Iterasi atas setiap warna:
Periksa apakah pewarnaan saat ini valid:
2.1. Warna yang diberikan vertex
2.2. Lakukan pemeriksaan panggilan rekursif
jika pewarnaan ini mengarah pada pemecahan masalah
2.4. jika pewarnaan saat ini mengarah ke pengembalian solusi
2.5. Uncolor diberikan vertex
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> colored_vertices = [0, 1, 0, 0, 0]
>>> index = 3
>>> util_color(graph, max_colors, colored_vertices, index)
True
>>> max_colors = 2
>>> util_color(graph, max_colors, colored_vertices, index)
False
"""
if index == len(graph):
return True
for i in range(max_color):
if coloring(graph[index], colored_vertices, i):
colored_vertices[index] = i
if util_color(graph, max_color, colored_vertices, index + 1):
return True
colored_vertices[i] = -1
return False
|
081bf9e8b1e0dcc847fdd0bc78167819506e3f1c
| 3,645,824
|
def reverse_complement(sequence):
""" Return reverse complement of a sequence. """
complement_bases = {
'g':'c', 'c':'g', 'a':'t', 't':'a', 'n':'n',
'G':'C', 'C':'G', 'A':'T', 'T':'A', 'N':'N', "-":"-",
"R":"Y", "Y":"R", "S":"W", "W":"S", "K":"M", "M":"K",
"B":"V", "V":"B", "D": "H", "H": "D",
"r":"y", "y":"r", "s":"w", "w":"s", "k":"m", "m":"k",
"b":"v", "v":"b", "d": "h", "h": "d"
}
bases = list(sequence)
bases.reverse()
revcomp = []
for base in bases:
try:
revcomp.append(complement_bases[base])
except KeyError:
print("Unexpected base encountered: ", base, " returned as X!!!")
revcomp.append("X")
return "".join(revcomp)
|
d28e520a9159cb4812079b4a7a5f2f6eb5723403
| 3,645,825
|
def get_variable_ddi(
name, shape, value, init, initializer=None, dtype=tf.float32,
regularizer=None, trainable=True):
"""Wrapper for data-dependent initialization."""
kwargs = {"trainable": trainable}
if initializer:
kwargs["initializer"] = initializer
if regularizer:
kwargs["regularizer"] = regularizer
w = tf.get_variable(name, shape, dtype, **kwargs)
if isinstance(init, bool):
if init:
return assign(w, value)
return w
else:
return tf.cond(init, lambda: assign(w, value), lambda: w)
|
b941f110ee8efbdfb9e4d2a6b5ba0a5b3e5881ed
| 3,645,826
|
def convert_to_mp3(path, start=None, end=None, cleanup_after_done=True):
"""Covert to mp3 using the python ffmpeg module."""
new_name = path + '_new.mp3'
params = {
"loglevel": "panic",
"ar": 44100,
"ac": 2,
"ab": '{}k'.format(defaults.DEFAULT.SONG_QUALITY),
"f": "mp3"
}
try:
if start is not None and end is not None:
params["ss"] = start
params["to"] = end
job = ffmpeg.input(path).output(
new_name,
**params
)
job.run()
# Delete the temp file now
if cleanup_after_done:
remove(path)
return new_name
except ffmpeg._run.Error:
# This error is usually thrown where ffmpeg doesn't have to
# overwrite a file.
# The bug is from ffmpeg, I'm just adding this catch to
# handle that.
return new_name
|
9d30c593e761103e6b434b530290637e8a4c345c
| 3,645,827
|
def conv3x3(in_planes, out_planes, Conv=nn.Conv2d, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return Conv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
7741248a5af70e33abe803469c8a20eb2f4bcdb1
| 3,645,828
|
async def async_setup(hass, config):
"""Set up the AirVisual component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
|
e44beaaf7657848fa377700021671d6c27317696
| 3,645,829
|
def hasConnection(document):
"""
Check whether document has a child of :class:`Sea.adapter.connection.Connection`.
:param document: a :class:`FreeCAD.Document` instance
"""
return _hasObject(document, 'Connection')
|
d0999c488ea1af1d0117eb53a6e67d5ce876a142
| 3,645,830
|
from typing import List
def trsfrm_aggregeate_mulindex(df:pd.DataFrame,
grouped_cols:List[str],
agg_col:str,
operation:str,
k:int=5):
"""transform aggregate statistics for multiindex
Examples:
>>> df_agg = trsfrm_aggregeate_mulindex( df_train, ["store", "item"], 'sales', 'mean')
"""
cols = ["sum", "mean", "median", "std", "min", "max", "skew"]
lvl0,lvl1 = grouped_cols
df_agg = pd.DataFrame( df.groupby(grouped_cols)[agg_col].agg(cols) )[operation]
df_agg = df_agg.groupby(level=lvl0).nlargest(k).reset_index(level=1, drop=True)
df_agg = df_agg.reset_index()
df_agg[lvl1] = df_agg.item.astype('category')
return df_agg
|
f84ac88bb3f3474fe5611486031e746e4dc9954d
| 3,645,831
|
from typing import Optional
def get_hub_virtual_network_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_hub_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHubVirtualNetworkConnectionResult:
"""
HubVirtualNetworkConnection Resource.
:param str connection_name: The name of the vpn connection.
:param str resource_group_name: The resource group name of the VirtualHub.
:param str virtual_hub_name: The name of the VirtualHub.
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualHubName'] = virtual_hub_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200501:getHubVirtualNetworkConnection', __args__, opts=opts, typ=GetHubVirtualNetworkConnectionResult).value
return AwaitableGetHubVirtualNetworkConnectionResult(
allow_hub_to_remote_vnet_transit=__ret__.allow_hub_to_remote_vnet_transit,
allow_remote_vnet_to_use_hub_vnet_gateways=__ret__.allow_remote_vnet_to_use_hub_vnet_gateways,
enable_internet_security=__ret__.enable_internet_security,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
remote_virtual_network=__ret__.remote_virtual_network,
routing_configuration=__ret__.routing_configuration)
|
3275fdf70d088df2f00bfe9e0148026caca16fcc
| 3,645,832
|
def newcombe_binomial_ratio_err(k1,n1, k2,n2, z=1.0):
""" Newcombe-Brice-Bonnett ratio confidence interval of two binomial proportions.
"""
RR = (k1/n1) / (k2/n2) # mean
logRR = np.log(RR)
seLogRR = np.sqrt(1/k1 + 1/k2 - 1/n1 - 1/n2)
ash = 2 * np.arcsinh(z/2 * seLogRR)
lower = np.exp(logRR - ash)
upper = np.exp(logRR + ash)
return np.array([lower, upper])
|
8ea31bcbbc1d6393e2d60d9ef6a1052b3b5347c5
| 3,645,833
|
from typing import Any
from typing import Optional
import json
def parse_metrics(rpcs: Any, detokenizer: Optional[detokenize.Detokenizer],
timeout_s: Optional[float]):
"""Detokenizes metric names and retrieves their values."""
# Creates a defaultdict that can infinitely have other defaultdicts
# without a specified type.
metrics: defaultdict = _tree()
if not detokenizer:
_LOG.error('No metrics token database set.')
return metrics
stream_response = rpcs.pw.metric.MetricService.Get(
pw_rpc_timeout_s=timeout_s)
if not stream_response.status.ok():
_LOG.error('Unexpected status %s', stream_response.status)
return metrics
for metric_response in stream_response.responses:
for metric in metric_response.metrics:
path_names = []
for path in metric.token_path:
path_name = str(
detokenize.DetokenizedString(path,
detokenizer.lookup(path), b'',
False)).strip('"')
path_names.append(path_name)
value = metric.as_float if metric.HasField(
'as_float') else metric.as_int
# inserting path_names into metrics.
_insert(metrics, path_names, value)
# Converts default dict objects into standard dictionaries.
return json.loads(json.dumps(metrics))
|
d169e9e247d8b969f6adf5161f0f7399a7b69da6
| 3,645,834
|
def cigarlist_to_cigarstring(cigar_list):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
if isinstance(cigar_list, Cigar):
try:
for i in cigar_list:
cigar += str(i.length) + i.code
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
else:
try:
for i in cigar_list:
cigar += str(i[1]) + CIGAR_N2C[i[0]]
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
|
4d3a039f60f8976893e5ad3775f61fbfa2656acc
| 3,645,835
|
def add(x, y):
"""Add two numbers"""
return x+y
|
7f18ee62d6cd75e44a9401d000d9bcada63f2c24
| 3,645,836
|
import os
def generateCSR(host_id, key):
"""Generate a Certificate Signing Request"""
pod_name = os.environ['MY_POD_NAME']
namespace = os.environ['TEST_APP_NAMESPACE']
SANURI = f'spiffe://cluster.local/namespace/{namespace}/podname/{pod_name}'
req = crypto.X509Req()
req.get_subject().CN = host_id
req.set_pubkey(key)
formatted_SAN = f'URI:{SANURI}'
req.add_extensions([
crypto.X509Extension(
'subjectAltName'.encode('ascii'), False, formatted_SAN.encode('ascii')
)
])
req.sign(key, "sha1")
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
|
761cfe7b2627c38dcddce68be99f7ead4965369c
| 3,645,837
|
import logging
def sdecorator(decoratorHandleDelete: bool = False, expectedProperties: list = None, genUUID: bool = True,
enforceUseOfClass: bool = False, hideResourceDeleteFailure: bool = False,
redactConfig: RedactionConfig = None, timeoutFunction: bool = True):
"""Decorate a function to add input validation for resource handler functions, exception handling and send
CloudFormation responses.
Usage with Lambda:
import accustom
@accustom.sdecorator(expectedProperties=['key1','key2'],genUUID=False)
def resource_handler(event, context):
sum = (float(event['ResourceProperties']['key1']) +
float(event['ResourceProperties']['key2']))
return { 'sum' : sum }
Usage outside Lambda:
import accustom
@accustom.sdecorator(expectedProperties=['key1','key2'])
def resource_handler(event, context=None)
sum = (float(event['ResourceProperties']['key1']) +
float(event['ResourceProperties']['key2']))
r = accustom.ResponseObject(data={'sum':sum},physicalResourceId=event['PhysicalResourceId'])
return r
Args:
decoratorHandleDelete (boolean): When set to true, if a delete request is made in event the decorator will
return SUCCESS to CloudFormation without actually executing the decorated function
genUUID (boolean): When set to true, if the PhysicalResourceId in the event is not set, automatically generate
a UUID4 and put it in the PhysicalResourceId field.
expectedProperties (list of expected properties): Pass in a list or tuple of properties that you want to check
for before running the decorated function.
enforceUseOfClass (boolean): When true send a FAILED signal if a ResponseObject class is not utilised.
This is implicitly set to true if no Lambda Context is provided.
hideResourceDeleteFailure (boolean): When true will return SUCCESS even on getting an Exception for DELETE
requests. Note that this particular flag is made redundant if decoratorHandleDelete is set to True.
redactConfig (StandaloneRedactionConfig): Configuration of how to redact the event object.
timeoutFunction (boolean): Will automatically send a failure signal to CloudFormation 1 second before Lambda
timeout provided that this function is executed in Lambda
Returns:
The response object sent to CloudFormation
Raises:
FailedToSendResponseException
NotValidRequestObjectException
"""
if not isinstance(redactConfig, StandaloneRedactionConfig) and logger.getEffectiveLevel() <= logging.DEBUG:
logger.warning('A non valid StandaloneRedactionConfig was provided, and ignored')
redactConfig = None
def standalone_decorator_inner(func):
@wraps(func)
@decorator(enforceUseOfClass=enforceUseOfClass, hideResourceDeleteFailure=hideResourceDeleteFailure,
redactConfig=redactConfig, timeoutFunction=timeoutFunction)
@rdecorator(decoratorHandleDelete=decoratorHandleDelete, expectedProperties=expectedProperties, genUUID=genUUID)
def standalone_decorator_handler(event: dict, context: dict = None):
return func(event, context)
return standalone_decorator_handler
return standalone_decorator_inner
|
f06647b034c2c5fa10a84afed83673f3a8be15f7
| 3,645,838
|
def clean_acl(name, value):
"""
Returns a cleaned ACL header value, validating that it meets the formatting
requirements for standard Swift ACL strings.
The ACL format is::
[item[,item...]]
Each item can be a group name to give access to or a referrer designation
to grant or deny based on the HTTP Referer header.
The referrer designation format is::
.r:[-]value
The ``.r`` can also be ``.ref``, ``.referer``, or ``.referrer``; though it
will be shortened to just ``.r`` for decreased character count usage.
The value can be ``*`` to specify any referrer host is allowed access, a
specific host name like ``www.example.com``, or if it has a leading period
``.`` or leading ``*.`` it is a domain name specification, like
``.example.com`` or ``*.example.com``. The leading minus sign ``-``
indicates referrer hosts that should be denied access.
Referrer access is applied in the order they are specified. For example,
.r:.example.com,.r:-thief.example.com would allow all hosts ending with
.example.com except for the specific host thief.example.com.
Example valid ACLs::
.r:*
.r:*,.r:-.thief.com
.r:*,.r:.example.com,.r:-thief.example.com
.r:*,.r:-.thief.com,bobs_account,sues_account:sue
bobs_account,sues_account:sue
Example invalid ACLs::
.r:
.r:-
By default, allowing read access via .r will not allow listing objects in
the container -- just retrieving objects from the container. To turn on
listings, use the .rlistings directive.
Also, .r designations aren't allowed in headers whose names include the
word 'write'.
ACLs that are "messy" will be cleaned up. Examples:
====================== ======================
Original Cleaned
---------------------- ----------------------
``bob, sue`` ``bob,sue``
``bob , sue`` ``bob,sue``
``bob,,,sue`` ``bob,sue``
``.referrer : *`` ``.r:*``
``.ref:*.example.com`` ``.r:.example.com``
``.r:*, .rlistings`` ``.r:*,.rlistings``
====================== ======================
:param name: The name of the header being cleaned, such as X-Container-Read
or X-Container-Write.
:param value: The value of the header being cleaned.
:returns: The value, cleaned of extraneous formatting.
:raises ValueError: If the value does not meet the ACL formatting
requirements; the error message will indicate why.
"""
name = name.lower()
values = []
for raw_value in value.split(','):
raw_value = raw_value.strip()
if not raw_value:
continue
if ':' not in raw_value:
values.append(raw_value)
continue
first, second = (v.strip() for v in raw_value.split(':', 1))
if not first or not first.startswith('.'):
values.append(raw_value)
elif first in ('.r', '.ref', '.referer', '.referrer'):
if 'write' in name:
raise ValueError('Referrers not allowed in write ACL: '
'%s' % repr(raw_value))
negate = False
if second and second.startswith('-'):
negate = True
second = second[1:].strip()
if second and second != '*' and second.startswith('*'):
second = second[1:].strip()
if not second or second == '.':
raise ValueError('No host/domain value after referrer '
'designation in ACL: %s' % repr(raw_value))
values.append('.r:%s%s' % ('-' if negate else '', second))
else:
raise ValueError('Unknown designator %s in ACL: %s' %
(repr(first), repr(raw_value)))
return ','.join(values)
|
1cceb2af22d2f5bbf223a0eb381b4c6643d76f0e
| 3,645,839
|
def test(X, Y, perms=10000, method="pearson", tail="two-tail", ignore_nans=False):
"""
Takes two distance matrices (either redundant matrices or condensed vectors)
and performs a Mantel test. The Mantel test is a significance test of the
correlation between two distance matrices.
Parameters
----------
X : array_like
First distance matrix (condensed or redundant).
Y : array_like
Second distance matrix (condensed or redundant), where the order of
elements corresponds to the order of elements in the first matrix.
perms : int, optional
The number of permutations to perform (default: 10000). A larger
number gives more reliable results but takes longer to run. If the
number of possible permutations is smaller, all permutations will
be tested. This can be forced by setting perms to 0.
method : str, optional
Type of correlation coefficient to use; either 'pearson' or 'spearman'
(default: 'pearson').
tail : str, optional
Which tail to test in the calculation of the empirical p-value; either
'upper', 'lower', or 'two-tail' (default: 'two-tail').
ignore_nans : bool, optional
Ignore NaN values in the Y matrix (default: False). This can be
useful if you have missing values in one of the matrices.
Returns
-------
r : float
Veridical correlation
p : float
Empirical p-value
z : float
Standard score (z-score)
"""
# Ensure that X and Y are represented as Numpy arrays.
X = np.asarray(X)
Y = np.asarray(Y)
# Check that X and Y are valid distance matrices.
if (
spatial.distance.is_valid_dm(np.nan_to_num(X)) == False
and spatial.distance.is_valid_y(X) == False
):
raise ValueError("X is not a valid condensed or redundant distance matrix")
if (
spatial.distance.is_valid_dm(np.nan_to_num(Y)) == False
and spatial.distance.is_valid_y(Y) == False
):
raise ValueError("Y is not a valid condensed or redundant distance matrix")
# If X or Y is a redundant distance matrix, reduce it to a condensed distance matrix.
if len(X.shape) == 2:
X = spatial.distance.squareform(X, force="tovector", checks=False)
if len(Y.shape) == 2:
Y = spatial.distance.squareform(Y, force="tovector", checks=False)
# Check for size equality.
if len(X) != len(Y):
raise ValueError("X and Y are not of equal size")
# Check for minimum size.
if len(X) < 3:
raise ValueError("X and Y should represent at least 3 objects")
# Check finiteness of X and Y
if not np.isfinite(X).all():
raise ValueError(
"X cannot contain NaNs (but Y may contain NaNs, so consider reordering X and Y)"
)
finite_Y = np.isfinite(Y)
if not ignore_nans and not finite_Y.all():
raise ValueError('Y may contain NaNs, but "ignore_nans" must be set to True')
if ignore_nans and finite_Y.all():
ignore_nans = False # ignore_nans is True but Y contains no nans
# If Spearman correlation is requested, convert X and Y to ranks.
method = method.lower()
if method == "spearman":
X, Y = stats.rankdata(X), stats.rankdata(Y)
Y[~finite_Y] = np.nan # retain any nans, so that these can be ignored later
# Check for valid method parameter.
elif method != "pearson":
raise ValueError('The method should be set to "pearson" or "spearman"')
# Check for valid tail parameter.
tail = tail.lower()
if tail not in ["upper", "lower", "two-tail"]:
raise ValueError('The tail should be set to "upper", "lower", or "two-tail"')
# Now we're ready to start the Mantel test using a number of optimizations:
#
# 1. Rather than compute correlation coefficients, we'll just compute the
# covariances. This works because the denominator in the equation for the
# correlation coefficient will yield the same result however the objects
# are permuted, making it redundant. Removing the denominator leaves us
# with the covariance.
#
# 2. Rather than permute the Y distances and derive the residuals to calculate
# the covariance with the X distances, we'll represent the Y residuals in
# the matrix and shuffle those directly.
#
# 3. If the number of possible permutations is less than the number of
# permutations that were requested, we'll run a deterministic test where
# we try all possible permutations rather than sample the permutation
# space. This gives a faster, deterministic result.
# Calculate the X and Y residuals, which will be used to compute the
# covariance under each permutation.
X_residuals = X - np.mean(X[finite_Y])
Y_residuals = Y - np.mean(Y[finite_Y])
# Expand the Y residuals to a redundant matrix.
Y_residuals_as_matrix = spatial.distance.squareform(
Y_residuals, force="tomatrix", checks=False
)
m = len(Y_residuals_as_matrix) # number of objects
n = np.math.factorial(m) # number of possible matrix permutations
# If the number of requested permutations is greater than the number of
# possible permutations (m!) or the perms parameter is set to 0, then run a
# deterministic Mantel test
if perms >= n or perms == 0:
if ignore_nans:
correlations = deterministic_test_with_nans(m, n, X, Y_residuals_as_matrix)
else:
correlations = deterministic_test(m, n, X_residuals, Y_residuals_as_matrix)
# correlations[0] is the veridical correlation
else:
if ignore_nans:
correlations = stochastic_test_with_nans(m, perms, X, Y_residuals_as_matrix)
else:
correlations = stochastic_test(m, perms, X_residuals, Y_residuals_as_matrix)
correlations[0] = sum(X_residuals[finite_Y] * Y_residuals[finite_Y]) / np.sqrt(
sum(X_residuals[finite_Y] ** 2) * sum(Y_residuals[finite_Y] ** 2)
) # compute veridical correlation and place in positon 0
r = correlations[0]
if tail == "upper":
p = sum(correlations >= r) / len(correlations)
elif tail == "lower":
p = sum(correlations <= r) / len(correlations)
elif tail == "two-tail":
p = sum(abs(correlations) >= abs(r)) / len(correlations)
z = (r - np.mean(correlations)) / np.std(correlations)
return r, p, z
|
7f0d7447ed475292f221e1dc6e4944f5cb2e8bd4
| 3,645,840
|
def get_format_datestr(date_str, to_format='%Y-%m-%d'):
"""
Args:
date_str (str): ''
to_format (str): '%Y-%m-%d'
Returns:
date string (str)
"""
date_obj = parser.parse(date_str).date()
return date_obj.strftime(to_format)
|
bf443aad3ca38eb35b647d26b38b1404cf82f387
| 3,645,841
|
def lor(*goalconsts):
""" Logical or for goal constructors
>>> from logpy.arith import lor, eq, gt
>>> gte = lor(eq, gt) # greater than or equal to is `eq or gt`
"""
def goal(*args):
return lany(*[gc(*args) for gc in goalconsts])
return goal
|
9726cc24f6d79214e652d42ff1b872f60b5a4594
| 3,645,842
|
import time
def kalman_smoother(Z, M_inv, plotting=False):
"""
X: state
U: control
Z: observation (position and forces)
F: state transition model
B: control input model
Q: process variance
R: observation variance
"""
t_steps = Z.shape[0]
x0 = np.r_[Z[0,0:6],
np.zeros(4*6)]
P0 = np.eye(5*6)
P0[0*6:1*6,0*6:1*6] *= 0.005 # small
P0[1*6:2*6,1*6:2*6] *= 0 # small
P0[2*6:3*6,2*6:3*6] *= 0 # small
P0[3*6:4*6,3*6:4*6] *= 100 # medium # TODO
P0[4*6:5*6,4*6:5*6] *= 1 # high # TODO
# transition matrix
F = np.zeros((t_steps-1, 5*6, 5*6))
# observation matrix
H = np.r_[np.c_[ np.eye(6), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.eye(6)]]
for t in range(t_steps-1):
F[t] = np.r_[np.c_[ np.eye(6), DT*np.eye(6), (DT**2)*np.eye(6), np.zeros((6,6)), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.eye(6), DT*np.eye(6), np.zeros((6,6)), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), M_inv[t], M_inv[t]],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.eye(6), np.zeros((6,6))],
np.c_[np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.zeros((6,6)), np.eye(6)]]
# transition covariance
Q = np.eye(5*6)
Q[0*6:1*6,0*6:1*6] *= 0.005 # small
Q[1*6:2*6,1*6:2*6] *= (0.005/DT) # small
Q[2*6:3*6,2*6:3*6] *= (1/(DT*DT)) # small
Q[3*6:4*6,3*6:4*6] *= 100 # medium # TODO
Q[4*6:5*6,4*6:5*6] *= 1 # high # TODO
# observation covariance
R = np.eye(2*6)
R[0*6:1*6,0*6:1*6] *= 0.005
R[1*6:2*6,1*6:2*6] *= 5 # TODO
def em_transition_matrix(transition_offsets, smoothed_state_means,
smoothed_state_covariances, pairwise_covariances):
res = F.copy()
n_timesteps, n_dim_state, _ = smoothed_state_covariances.shape
print "em_transition_matrix"
time_start = time.time()
for tt in range(1, n_timesteps):
if tt % 100 == 0:
print tt
t_start = np.clip(tt-500, 1, n_timesteps)
t_end = np.clip(tt+500+1, 1, n_timesteps)
res1 = np.zeros((n_dim_state, n_dim_state))
res2 = np.zeros((n_dim_state, n_dim_state))
ws = np.exp(-((np.arange(t_start, t_end)-tt)**2)/(200.0**2))
ws /= ws.sum()
for t, w in zip(range(t_start, t_end), ws):
transition_offset = _last_dims(transition_offsets, t - 1, ndims=1)
res1 += w * (
pairwise_covariances[t]
+ np.outer(smoothed_state_means[t],
smoothed_state_means[t - 1])
- np.outer(transition_offset, smoothed_state_means[t - 1])
)
res2 += w * (
smoothed_state_covariances[t - 1]
+ np.outer(smoothed_state_means[t - 1],
smoothed_state_means[t - 1])
)
# res[tt-1] = np.linalg.solve(res2.T, res1.T).T
# M_inv = np.linalg.solve((res2[0*6:1*6,:] + res2[1*6:2*6,:]).T, res1[2*6:3*6,:].T)
F_tmp = np.dot(res1, np.linalg.pinv(res2))
m_inv0 = F_tmp[2*6:3*6,3*6:4*6]
m_inv1 = F_tmp[2*6:3*6,4*6:5*6]
m_inv = (m_inv0 + m_inv1) / 2.
res[tt-1,2*6:3*6,3*6:4*6] = m_inv
res[tt-1,2*6:3*6,4*6:5*6] = m_inv
print "time", time.time() - time_start
return res
kf = KalmanFilter(transition_matrices=F, observation_matrices=H, transition_covariance=Q, observation_covariance=R,
initial_state_mean=x0, initial_state_covariance=P0)
kf = kf.em(Z, n_iter=5, em_vars=['transition_covariance', 'observation_covariance'])
# kf = kf.em(Z, n_iter=5, em_vars=['transition_matrices'], em_transition_matrix=em_transition_matrix)
(X_smoothed, P_smoothed) = kf.smooth(Z)
if plotting:
plt.ion()
fig = plt.figure()
for i in range(5):
plt.subplot(5,2,2*i+1)
plt.plot(X_smoothed[:,6*i], 'r')
plt.plot(X_smoothed[:,6*i+1], 'g')
plt.plot(X_smoothed[:,6*i+2], 'b')
plt.subplot(5,2,2*i+1+1)
plt.plot(X_smoothed[:,6*i+3], 'r')
plt.plot(X_smoothed[:,6*i+4], 'g')
plt.plot(X_smoothed[:,6*i+5], 'b')
plt.draw()
fig = plt.figure()
for i in range(2):
plt.subplot(2,2,2*i+1)
plt.plot(Z[:,6*i], 'r')
plt.plot(Z[:,6*i+1], 'g')
plt.plot(Z[:,6*i+2], 'b')
plt.subplot(2,2,2*i+1+1)
plt.plot(Z[:,6*i+3], 'r')
plt.plot(Z[:,6*i+4], 'g')
plt.plot(Z[:,6*i+5], 'b')
plt.draw()
return X_smoothed
|
927062585686897462ca866819ecdff22aed245c
| 3,645,843
|
def get_convex_hull(coords, dim = 2, needs_at_least_n_points = 6): #FIXME restrict only for 2D?
"""
For fitting an ellipse, at least 6 points are needed
Parameters
----------
coords : 2D np.array of points
dim : dimensions to keep when calculating convex hull
Returns
---------
coords_hull : 2D np.array of points
keeps original number of dimension as input coords
"""
assert len(coords[0]) >= dim
hull = ConvexHull([i[:dim] for i in coords])
coords_hull = [coords[i] for i in range(len(coords)) if i in hull.vertices]
for i in range(needs_at_least_n_points - len(hull.vertices)):
coords_hull.append(0.9999 * coords_hull[i]) #making the point slightly different
coords_hull = np.array(coords_hull)
return coords_hull
|
6939db4475b9e11c8d0e53a5d820773bb899f15a
| 3,645,844
|
import logging
from operator import gt
def score(input,
index,
output=None,
scoring="+U,+u,-s,-t,+1,-i,-a",
filter=None, # "1,2,25"
quality=None,
compress=False,
threads=1,
raw=False,
remove_existing=False):
"""Score the input. In addition, you can specify a tuple with (<score_strata_to_keep>,<max_strata_distance>,<max_alignments>) to
filter the result further.
"""
if compress and output is None:
logging.warning("Disabeling stream compression")
compress = False
if compress and not output.endswith(".gz"):
output += ".gz"
quality = _prepare_quality_parameter(quality)
if quality in ['none', 'ignore']:
quality = 'offset-33'
index = _prepare_index_parameter(index, gem_suffix=True)
score_p = [executables['gem-2-gem'],
'-I', index,
'-q', quality,
'-s', scoring,
'-T', str(threads)
]
if filter is not None:
score_p.append("-f")
ff = filter
if not isinstance(filter, basestring):
ff = ",".join([str(f) for f in filter])
score_p.append(ff)
if raw or isinstance(input, gt.InputFile):
raw = True
if isinstance(input, gt.InputFile) and remove_existing:
input.remove_scores = True
raw = False
#input = input.raw_stream()
tools = [score_p]
if compress:
gzip = _compressor(threads=threads)
tools.append(gzip)
process = utils.run_tools(tools, input=input, output=output, name="GEM-Score", write_map=True, raw=raw)
return _prepare_output(process, output=output)
|
c4b0fee2df964e65ee0aec12c84b0b1d7985a254
| 3,645,845
|
def keyword_search(queryset: QuerySet, keywords: str) -> QuerySet:
"""
Performs a keyword search over a QuerySet
Uses PostgreSQL's full text search features
Args:
queryset (QuerySet): A QuerySet to be searched
keywords (str): A string of keywords to search the QuerySet
Returns:
QuerySet: A QuerySet filtered by keywords
"""
query = SearchQuery(keywords)
rank_annotation = SearchRank(F("search_vector"), query)
filtered_queryset = (
queryset.annotate(rank=rank_annotation)
.filter(search_vector=query)
.order_by("-rank")
)
return filtered_queryset
|
1fb38af2c3aa3bdf092196e8e12539e0a2cf9e58
| 3,645,846
|
def classification_loss(hidden, labels, n_class, initializer, name, reuse=None,
return_logits=False):
"""
Different classification tasks should use different scope names to ensure
different dense layers (parameters) are used to produce the logits.
An exception will be in transfer learning, where one hopes to transfer
the classification weights.
"""
logits = fluid.layers.fc(
input=hidden,
size=n_class,
param_attr=fluid.ParamAttr(name=name+'_logits', initializer=initializer))
one_hot_target = fluid.layers.one_hot(labels, depth=n_class, dtype=hidden.dtype)
loss = -fuid.layers.reduce_sum(fluid.layers.log_softmax(logits) * one_hot_target, -1)
if return_logits:
return loss, logits
return loss
|
c89fd14fae7099b43f639bf0825600e26b60e417
| 3,645,847
|
def do_pdfimages(pdf_file, state, page_number=None, use_tmp_identifier=True):
"""Convert a PDF file to images in the TIFF format.
:param pdf_file: The input file.
:type pdf_file: jfscripts._utils.FilePath
:param state: The state object.
:type state: jfscripts.pdf_compress.State
:param int page_number: Extract only the page with a specific page number.
:return: The return value of `subprocess.run`.
:rtype: subprocess.CompletedProcess
"""
if use_tmp_identifier:
image_root = '{}_{}'.format(pdf_file.basename, tmp_identifier)
else:
image_root = pdf_file.basename
command = ['pdfimages', '-tiff', str(pdf_file), image_root]
if page_number:
page_number = str(page_number)
page_segments = ['-f', page_number, '-l', page_number]
command = command[:2] + page_segments + command[2:]
return run.run(command, cwd=state.common_path)
|
e5a48cdf2c93b037c4f983a56467e839920fa06c
| 3,645,848
|
from pathlib import Path
import subprocess
def git_patch_tracked(path: Path) -> str:
""" Generate a patchfile of the diff for all tracked files in the repo
This function catches all exceptions to make it safe to call at the end of
dataset creation or model training
Args:
path (Path): path to a directory inside a git repo. Unless you have a reason
not to, this should be the root of the repo for maximum coverage
Returns:
str: patchfile for tracked files, or error message if unable to excecute cmd
"""
# store cwd and change to path
cwd = Path.cwd()
chdir(path)
# execute command
tracked_patch = "git --no-pager diff -u ."
try:
out = subprocess.check_output(tracked_patch.split()).decode('utf-8')
except Exception as e:
out = str(e)
# restore cwd
chdir(cwd)
return out
|
374d80f8d1f74de76ab1dc1305f0772fde85d4f5
| 3,645,849
|
def connect(transport=None, host='localhost', username='admin',
password='', port=None, key_file=None, cert_file=None,
ca_file=None, timeout=60, return_node=False, **kwargs):
""" Creates a connection using the supplied settings
This function will create a connection to an Arista EOS node using
the arguments. All arguments are optional with default values.
Args:
transport (str): Specifies the type of connection transport to use.
Valid values for the connection are socket, http_local, http, and
https. The default value is specified in DEFAULT_TRANSPORT
host (str): The IP addres or DNS host name of the connection device.
The default value is 'localhost'
username (str): The username to pass to the device to authenticate
the eAPI connection. The default value is 'admin'
password (str): The password to pass to the device to authenticate
the eAPI connection. The default value is ''
port (int): The TCP port of the endpoint for the eAPI connection. If
this keyword is not specified, the default value is automatically
determined by the transport type. (http=80, https=443)
key_file (str): Path to private key file for ssl validation
cert_file (str): Path to PEM formatted cert file for ssl validation
ca_file (str): Path to CA PEM formatted cert file for ssl validation
timeout (int): timeout
return_node (bool): Returns a Node object if True, otherwise
returns an EapiConnection object.
Returns:
An instance of an EapiConnection object for the specified transport.
"""
transport = transport or DEFAULT_TRANSPORT
connection = make_connection(transport, host=host, username=username,
password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file,
port=port, timeout=timeout)
if return_node:
return Node(connection, transport=transport, host=host,
username=username, password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file, port=port, **kwargs)
return connection
|
09407d39e624f9a863a7633627d042b17b7a6158
| 3,645,850
|
def cov_hc2(results):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(np.dot(results.model.exog,
np.dot(results.normalized_cov_params,
results.model.exog.T)))
het_scale = results.resid**2/(1-h)
cov_hc2_ = _HCCM(results, het_scale)
return cov_hc2_
|
328eeb88e37a2d78a6c0f0f9b3b81459230d87d5
| 3,645,851
|
def add_people():
"""
Show add form
"""
if request.method == 'POST':
#save data to database
db_conn = get_connection()
cur = db_conn.cursor()
print ('>'*10, request.form)
firstname = request.form['first-name']
lastname = request.form['last-name']
address = request.form['address']
country = request.form['country']
# if firstname is not empty, insert into table:
if firstname.strip():
_add_sql = '''
INSERT INTO peoples(firstname, lastname, address, country)
VALUES(?,?,?,?)
'''
cur.execute(_add_sql, (firstname.strip(),
lastname.strip(), address.strip(), country.strip()
))
db_conn.commit()
#redirect to list page
return redirect(url_for('list_people'))
else:
#redirect to add page with error
return redirect(url_for('add_people'))
return render_template('add.jinja2')
|
db2fcd7a2d9ed0073741d02a0bcafef37f714299
| 3,645,852
|
import inspect
def api_to_schema(api: "lightbus.Api") -> dict:
"""Produce a lightbus schema for the given API"""
schema = {"rpcs": {}, "events": {}}
if isinstance(api, type):
raise InvalidApiForSchemaCreation(
"An attempt was made to derive an API schema from a type/class, rather than "
"from an instance of an API. This is probably because you are passing an API "
"class to api_to_schema(), rather than an instance of the API class."
)
for member_name, member in inspect.getmembers(api):
if member_name.startswith("_"):
# Don't create schema from private methods
continue
if hasattr(Api, member_name):
# Don't create schema for methods defined on Api class
continue
if inspect.ismethod(member):
schema["rpcs"][member_name] = {
"parameters": make_rpc_parameter_schema(api.meta.name, member_name, method=member),
"response": make_response_schema(api.meta.name, member_name, method=member),
}
elif isinstance(member, Event):
schema["events"][member_name] = {
"parameters": make_event_parameter_schema(api.meta.name, member_name, event=member)
}
return schema
|
d07f6c6915967a1e61bc8f9bd1b72adb24207684
| 3,645,853
|
def sum2(u : SignalUserTemplate, initial_state=0):
"""Accumulative sum
Parameters
----------
u : SignalUserTemplate
the input signal
initial_state : float, SignalUserTemplate
the initial state
Returns
-------
SignalUserTemplate
the output signal of the filter
Details:
--------
The difference equation
y[k+1] = y[k] + u[k]
is evaluated. The return values are
y[k], y[k+1]
"""
y_k = dy.signal()
y_kp1 = y_k + u
y_k << dy.delay(y_kp1, initial_state=initial_state)
return y_k, y_kp1
|
3649942de13f698a92703747d8ea73be7ece4ddb
| 3,645,854
|
def approve_report(id):
"""
Function to approve a report
"""
# Approve the vulnerability_document record
resource = s3db.resource("vulnerability_document", id=id, unapproved=True)
resource.approve()
# Read the record details
vdoc_table = db.vulnerability_document
record = db(vdoc_table.id == id).select(vdoc_table.document_type,
vdoc_table.doc_id,
vdoc_table.source_id,
limitby=(0, 1)).first()
# Approve the linked records
document_type = record.document_type
if document_type == "indicator":
tablename = "vulnerability_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "vulnerability_update_aggregates"
elif document_type == "demographic":
tablename = "stats_demographic_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "stats_demographic_update_aggregates"
elif document_type in ("map", "image"):
tablename = "doc_image"
query = (s3db[tablename].doc_id == record.doc_id)
elif document_type in ("vca", "other"):
tablename = "doc_document"
query = (s3db[tablename].doc_id == record.doc_id)
else:
current.log.error("Report not Approved as unknown type", document_type)
return False
resource = s3db.resource(tablename, filter=query, unapproved=True)
resource.approve()
if document_type in ("indicator", "demographic"):
# Rebuild the relevant aggregates
rows = resource.select(fields=["data_id",
"parameter_id",
"date",
"location_id",
"value"],
as_rows=True)
s3task.run_async(agg_function, vars = {"records": rows.json()})
return True
|
ce1bdb00a5fb6958c51422543e62f289de5e96cb
| 3,645,855
|
def sparse_column_multiply(E, a):
"""
Multiply each columns of the sparse matrix E by a scalar a
Parameters
----------
E: `np.array` or `sp.spmatrix`
a: `np.array`
A scalar vector.
Returns
-------
Rescaled sparse matrix
"""
ncol = E.shape[1]
if ncol != a.shape[0]:
logg.error("Dimension mismatch, multiplication failed")
return E
else:
w = ssp.lil_matrix((ncol, ncol))
w.setdiag(a)
return ssp.csr_matrix(E) * w
|
a215440e630aeb79758e8b0d324ae52ea87eba52
| 3,645,856
|
def soup_extract_enzymelinks(tabletag):
"""Extract all URLs for enzyme families from first table."""
return {link.string: link['href']
for link in tabletag.find_all("a", href=True)}
|
7baabd98042ab59feb5d8527c18fe9fa4b6a50af
| 3,645,857
|
def choose(db_issue: Issue, db_user: User, pgroup_ids: [int], history: str, path: str) -> dict:
"""
Initialize the choose step for more than one premise in a discussion. Creates helper and returns a dictionary
containing several feedback options regarding this argument.
:param db_issue:
:param db_user:
:param pgroup_ids:
:param history:
:param path:
:return:
"""
LOG.debug("Entering choose function")
issue_dict = issue_helper.prepare_json_of_issue(db_issue, db_user)
disc_ui_locales = issue_dict['lang']
created_argument: Argument = DBDiscussionSession.query(Argument).filter(
Argument.premisegroup_uid == pgroup_ids[0]).one()
is_supportive = created_argument.is_supportive
conclusion_is_argument = created_argument.attacks is not None
if conclusion_is_argument:
conclusion = created_argument.attacks
else:
conclusion = created_argument.conclusion
_ddh = DiscussionDictHelper(disc_ui_locales, db_user.nickname, history, slug=db_issue.slug)
_idh = ItemDictHelper(disc_ui_locales, db_issue, path=path, history=history)
discussion_dict = _ddh.get_dict_for_choosing(conclusion.uid, conclusion_is_argument, is_supportive)
item_dict = _idh.get_array_for_choosing(conclusion.uid, pgroup_ids, conclusion_is_argument, is_supportive,
db_user.nickname)
return {
'issues': issue_dict,
'discussion': discussion_dict,
'items': item_dict,
'title': issue_dict['title']
}
|
0404e955a7872086d45ccf4018bc8a9977c2df21
| 3,645,858
|
def loops_NumbaJit_parallelFast(csm, r0, rm, kj):
""" This method implements the prange over the Gridpoints, which is a direct
implementation of the currently used c++ methods created with scipy.wave.
Very strange: Just like with Cython, this implementation (prange over Gridpoints)
produces wrong results. If one doesn't parallelize -> everything is good
(just like with Cython). Maybe Cython and Numba.jit use the same interpreter
to generate OpenMP-parallelizable code.
BUT: If one uncomments the 'steerVec' declaration in the prange-loop over the
gridpoints an error occurs. After commenting the line again and executing
the script once more, THE BEAMFORMER-RESULTS ARE CORRECT (for repeated tries).
Funny enough the method is now twice as slow in comparison to the
'wrong version' (before invoking the error).
"""
# init
nFreqs = csm.shape[0]
nGridPoints = len(r0)
nMics = csm.shape[1]
beamformOutput = np.zeros((nFreqs, nGridPoints), np.float64)
steerVec = np.zeros((nMics), np.complex128)
for cntFreqs in xrange(nFreqs):
kjj = kj[cntFreqs].imag
for cntGrid in prange(nGridPoints):
# steerVec = np.zeros((nMics), np.complex128) # This is the line that has to be uncommented (see this methods documentation comment)
rs = 0
r01 = r0[cntGrid]
for cntMics in xrange(nMics):
rm1 = rm[cntGrid, cntMics]
rs += 1.0 / (rm1**2)
temp3 = np.float32(kjj * (rm1 - r01))
steerVec[cntMics] = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1
rs = r01 ** 2
temp1 = 0.0
for cntMics in xrange(nMics):
temp2 = 0.0
for cntMics2 in xrange(cntMics):
temp2 = temp2 + csm[cntFreqs, cntMics2, cntMics] * steerVec[cntMics2]
temp1 = temp1 + 2 * (temp2 * steerVec[cntMics].conjugate()).real
temp1 = temp1 + (csm[cntFreqs, cntMics, cntMics] * np.conjugate(steerVec[cntMics]) * steerVec[cntMics]).real
beamformOutput[cntFreqs, cntGrid] = (temp1 / rs).real
return beamformOutput
|
82201310483b72c525d1488b5229e628d44a65ca
| 3,645,859
|
import numpy
import scipy
def sobel_vertical_gradient(image: numpy.ndarray) -> numpy.ndarray:
"""
Computes the Sobel gradient in the vertical direction.
Args:
image: A two dimensional array, representing the image from which the vertical gradient will be calculated.
Returns:
A two dimensional array, representing the vertical gradient of the image.
"""
ky = numpy.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
return scipy.ndimage.convolve(image, ky)
|
f8f9bf6fadbae962206255ab3de57edbab9d935e
| 3,645,860
|
def custom_field_sum(issues, custom_field):
"""Sums custom field values together.
Args:
issues: List The issue list from the JQL query
custom_field: String The custom field to sum.
Returns:
Integer of the sum of all the found values of the custom_field.
"""
custom_field_running_total = 0
for issue in issues:
if getattr(issue.fields, custom_field) is None:
custom_field_running_total = custom_field_running_total + 2
else:
custom_field_running_total = custom_field_running_total + \
getattr(issue.fields, custom_field)
return custom_field_running_total
|
32c1cce310c06f81036ee79d70a8d4bbe28c8417
| 3,645,861
|
def routingAreaUpdateReject():
"""ROUTING AREA UPDATE REJECT Section 9.4.17"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0xb) # 00001011
c = GmmCause()
d = ForceToStandbyAndSpareHalfOctets()
packet = a / b / c / d
return packet
|
b9bb0e498a768eb6b7875018c78ca23e54353620
| 3,645,862
|
def doRipsFiltration(X, maxHomDim, thresh = -1, coeff = 2, getCocycles = False):
"""
Run ripser assuming Euclidean distance of a point cloud X
:param X: An N x d dimensional point cloud
:param maxHomDim: The dimension up to which to compute persistent homology
:param thresh: Threshold up to which to add edges. If not specified, add all
edges up to the full clique
:param coeff: A prime to use as the field coefficients for the PH computation
:param getCocycles: True if cocycles should be computed and returned
:return: PDs (array of all persistence diagrams from 0D up to maxHomDim).
Each persistence diagram is a numpy array
OR
tuple (PDs, Cocycles) if returning cocycles
"""
D = getSSM(X)
return doRipsFiltrationDM(D, maxHomDim, thresh, coeff, getCocycles)
|
a0e4cabb613ac77659fda2d31867a7e9df32f288
| 3,645,863
|
def build_target_areas(entry):
"""Cleanup the raw target areas description string"""
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas
|
48e76a5c1ed42aed696d441c71799b47f9193b29
| 3,645,864
|
def convert_to_celcius(scale, temp):
"""Convert the specified temperature to Celcius scale.
:param int scale: The scale to convert to Celcius.
:param float temp: The temperature value to convert.
:returns: The temperature in degrees Celcius.
:rtype: float
"""
if scale == temp_scale.FARENHEIT:
return convert_farenheit_to_celcius(temp)
elif scale == temp_scale.CELCIUS:
return temp
elif scale == temp_scale.KELVIN:
return convert_kelvin_to_celcius(temp)
elif scale == temp_scale.RANKINE:
return convert_rankine_to_celcius(temp)
else:
return 0.0
|
a7c2f0f7405eea96c1ebb4a7e103cf28a95b6f5a
| 3,645,865
|
def config_file_settings(request):
"""
Update file metadata settings
"""
if request.user.username != 'admin':
return redirect('project-admin:home')
if request.method == 'POST':
update_file_metadata(request.POST)
return redirect('project-admin:home')
files = FileMetaData.objects.all()
for file in files:
file.tags = file.get_tags()
return render(request, 'project_admin/config-file-settings.html',
context={"files": files})
|
c8e91ac49305e3aa7aa33961939c3add23fc5327
| 3,645,866
|
def roundtrip(sender, receiver):
"""
Send datagrams from `sender` to `receiver` and back.
"""
return transfer(sender, receiver), transfer(receiver, sender)
|
939d9fd861b89037322fcc7c851d291ab073b520
| 3,645,867
|
import json
def loadHashDictionaries():
"""
Load dictionaries containing id -> hash and hash -> id mappings
These dictionaries are essential due to some restrictive properties
of the anserini repository
Return both dictionaries
"""
with open(PATH + PATH_ID_TO_HASH, "r") as f:
id_to_hash_dict = json.load(f)
with open(PATH + PATH_HASH_TO_ID, "r") as f:
hash_to_id_dict = json.load(f)
return id_to_hash_dict, hash_to_id_dict
|
de8af6d5e5562869c992e08343aadb77c48933b0
| 3,645,868
|
def preprocess(tensor_dict, preprocess_options, func_arg_map=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
# if len(images.get_shape()) != 4:
# raise ValueError('images in tensor_dict should be rank 4')
# image = tf.squeeze(images, squeeze_dims=[0])
if len(image.get_shape()) != 3:
raise ValueError('images in tensor_dict should be rank 3')
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# # changes the image to images (rank 3 to rank 4) to be compatible to what
# # we received in the first place
# if fields.InputDataFields.image in tensor_dict:
# image = tensor_dict[fields.InputDataFields.image]
# images = tf.expand_dims(image, 0)
# tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
|
141b170e0d4c6447750e2ece967afec7a92a37ea
| 3,645,869
|
def update_comment(id):
"""修改单条评论"""
comment = Comment.query.get_or_404(id)
if g.current_user != comment.author and not g.current_user.can(Permission.COMMENT):
return error_response(403)
data = request.get_json()
if not data:
return bad_request('You must put JSON data.')
comment.from_dict(data)
db.session.commit()
return jsonify(comment.to_dict())
|
59db7122f9139f7fda744284e83045533d6361fb
| 3,645,870
|
def resnet18(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
encoder = ResNetEncoder(BasicBlock, [2, 2, 2, 2])
if pretrained:
encoder.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='models'), strict=False)
model = RetinaNet(encoder=encoder, num_classes=num_classes)
return model
|
b342fa322cb26571b5df7e5e8f117ce016a7febf
| 3,645,871
|
import html
def display_page(pathname):
"""displays dash page"""
if pathname == '/':
return main.layout
elif pathname == '/explore':
return explore.layout
elif pathname == '/eval':
return eval.layout
elif pathname == '/train':
return train.layout
else:
return html.Div(dbc.Col(dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognized..."),
]
), width = 9), style = CONTENT_STYLE
)
|
3aeb44ca1974b63f63b9ef97526aef20d2d92ddb
| 3,645,872
|
def getFilterDict(args):
"""
Function: An entire function just to notify the user of the arguments they've passed to the script? Seems reasonable.
Called from: main
"""
## Set variables for organization; this can be probably be removed later
outText = {}
outAction = ""
userString = ""
ipString = ""
ctryString = ""
domainString = ""
evntString = ""
## Split the modifierData variable if user passed multiple values in a comma-separated list
if args.modifierData:
modData = args.modifierData.split(",")
## Set analysis type as one of the three main functions available
if args.lIPs:
outAction = "Analysis Type: IP dump"
elif args.topNum:
outAction = "Analysis Type: Log summary"
else:
outAction = "Analysis Type: Detailed Analysis"
## Determine if results will be filtered or excluded by user & create output string. Note
## that usernames passed in DOMAIN\USERNAME format will need to be converted back to a
## single backslash (\) where the user escaped command input with a double backslash (\\)
try:
if args.filterType.lower() == "user" or args.excludeType.lower() == "user":
for i in range(0,(len(modData))):
if userString == "":
userString = modData[i].replace("\\\\","\\")
else:
userString = userString + ", " + modData[i].replace("\\\\","\\")
if args.filterType:
userString = " Users - Only " + userString
else:
userString = " Users - All except " + userString
except:
pass
## Determine if results will be filtered or excluded by IP address & create output string
try:
if args.filterType.lower() == "ip" or args.excludeType.lower() == "ip":
for i in range(0,(len(modData))):
if ipString == "":
ipString = modData[i]
else:
ipString = ipString + ", " + modData[i]
if args.filterType:
ipString = " IPs - Only " + ipString
else:
ipString = " IPs - All except " + ipString
except:
pass
## If the user passed the -P argument to omit private IP addresses, add it to IP line
if args.privIP:
if ipString == "":
ipString = " IPs - All except internal addresses"
else:
ipString += ", and internal addresses"
## Determine if results will be filtered or excluded by country & create output string
try:
if args.filterType.lower() == "country" or args.excludeType.lower() == "country":
for i in range(0,(len(modData))):
if ctryString == "":
ctryString = modData[i]
else:
ctryString = ctryString + ", " + modData[i]
if args.filterType:
ctryString = " Countries - Only " + ctryString
else:
ctryString = " Countries - All except " + ctryString
except:
pass
## Determine if results will be filtered or excluded by domain & create output string
try:
if args.filterType.lower() == "domain" or args.excludeType.lower() == "domain":
for i in range(0,(len(modData))):
if domainString == "":
domainString = modData[i]
else:
domainString = domainString + ", " + modData[i]
if args.filterType:
domainString = " Domains - Only " + domainString
else:
domainString = " Domains - All except " + domainString
except:
pass
## Determine if benign 'garbage' events will be filtered out and update misc event filter string
if args.logGarbage:
evntString = "No garbage events"
## Determine if only known cities will be presented in the results and update misc event filter string
if args.kCity:
if evntString == "":
evntString = "No unknown cities"
else:
evntString = evntString + ", no unknown cities"
## Determine if events will only be filtered to IPs with foreign geolocation and update filter string
if args.warnIP:
if ipString == "":
ipString = " IPs - Only IPs foreign to current location"
else:
ipString = ipString + ", only IPs foreign to current location"
## If any filter strings are empty, replace them with notice that all events of the given type will be included in output
if userString == "":
userString = " Users - ALL"
if ipString == "":
ipString = " IPs - ALL"
if ctryString == "":
ctryString = " Countries - ALL"
if domainString == "":
domainString = " Domains - ALL"
if evntString == "":
evntString = " Events - ALL"
else:
evntString = " Events - " + evntString
## Arrange the outText dictionary to be passed back to main and ship it
outText["outAction"] = outAction
outText["userString"] = userString
outText["ipString"] = ipString
outText["ctryString"] = ctryString
outText["domainString"] = domainString
outText["evntString"] = evntString
return outText
|
ea175812465fa30866fe90c6461f416c4af1d6b2
| 3,645,873
|
def pointwise_multiply(A, B):
"""Pointwise multiply
Args:
-----------------------------
A: tvm.te.tensor.Tensor
shape [...]
B: tvm.te.tensor.Tensor
shape same as A
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape same as A
-----------------------------
"""
assert_print(len(A.shape) == len(B.shape))
for i in range(len(A.shape)):
assert_print(A.shape[i].value == B.shape[i].value)
def _mul(*args):
return A[args] * B[args]
return tvm.te.compute(A.shape, _mul)
|
37c27cced9cc77f3a3aefef32d56f92f0ceb292f
| 3,645,874
|
import traceback
def create_website(self):
"""
:param self:
:return:
"""
try:
query = {}
show = {"_id": 0}
website_list = yield self.mongodb.website.find(query, show)
return website_list
except:
logger.error(traceback.format_exc())
return ""
|
b7b8faf55095288e5c2d693aeed85f6412449c08
| 3,645,875
|
def _get_sparsity(A, tolerance=0.01):
"""Returns ~% of zeros."""
positives = np.abs(A) > tolerance
non_zeros = np.count_nonzero(positives)
return (A.size - non_zeros) / float(A.size)
|
44b7fb501a10551167ad37ffdafaef42c6c849b9
| 3,645,876
|
def findPeaks(hist):
"""
Take in histogram
Go through each bin in the histogram and:
Find local maximum and:
Fit a parabola around the two neighbor bins and local max bin
Calculate the critical point that produces the max of the parabola
(critical point represents orientation, max is the peak)
Add both to list of peaks
Return sorted list of peaks
"""
peaks = []
offsets = []
binRanges = np.arange(-175, 185, 10)
max = np.max(hist)
for i in range(len(hist)):
if i == 0:
left, right = -1, 1
elif i == len(hist) - 1:
left, right = -2, 0
else:
left, right = i-1, i+1
if (hist[i] - hist[left]) >= (0.01*max) \
and (hist[i] - hist[right]) >= (0.01*max):
a = (hist[right] - 2*hist[i] + hist[left]) / 2
b = (hist[right] - hist[left]) / 2
c = hist[i]
aDx = a*2
bDx = -1*b
#critical point
x = bDx/aDx
# max
max = a*(x**2) + b*x + c
offset = (x*10) + binRanges[i]
peaks.append((max, offset))
return sorted(peaks, reverse=True)
|
99b89d4fd9f35deab141e178aaa107dabf35ccfe
| 3,645,877
|
def tph_chart_view(request, template_name="monitor/chart.html", **kwargs):
"""Create example view.
that inserts content into the dash context passed to the dash application.
"""
logger.debug('start')
context = {
'site_title': 'TPH monitor',
'title': 'TPH chart via Plotly Dash for Django.',
'year': ts.COPYRIGHT_YEAR,
'owner': ts.OWNER,
}
# create some context to send over to Dash:
dash_context = request.session.get("django_plotly_dash", dict())
dash_context['django_to_dash_context'] = "I am Dash receiving context from Django"
request.session['django_plotly_dash'] = dash_context
logger.debug('end')
return render(request, template_name=template_name, context=context)
|
135adb437fb3c27327ea8c6a83e33dfadec1f3ce
| 3,645,878
|
def gradient_descent(x_0, a, eta, alpha, beta, it_max, *args, **kwargs):
"""Perform simple gradient descent with back-tracking line search.
"""
# Get a copy of x_0 so we don't modify it for other project parts.
x = x_0.copy()
# Get an initial gradient.
g = gradient(x, a)
# Compute the norm.
norm = np.linalg.norm(g)
# Initialize lists to track our objective values and step sizes.
obj_list = []
t_list = []
# Loop while the norm is less than eta.
i = 0
while (eta <= norm) and (i < it_max):
# Perform back-tracking line search to get our step size.
t = backtrack_line_search(x=x, a=a, g=g, dx=-g, alpha=alpha, beta=beta)
t_list.append(t)
# Perform the x update.
x = x - t * g
# Compute new gradient and norm.
g = gradient(x, a)
norm = np.linalg.norm(g)
# Compute new value of objective function, append to list.
obj_list.append(objective(x, a))
if np.isnan(obj_list[-1]):
raise ValueError(
'NaN objective value encountered in gradient_descent')
# Update iteration counter.
i += 1
if i >= it_max:
raise ValueError(f'Hit {i} iterations in gradient_descent.')
return x, np.array(obj_list), t_list
|
701097aaebbe15306818593daf501b0f7d622f49
| 3,645,879
|
import re
def ExtractCalledByNatives(contents):
"""Parses all methods annotated with @CalledByNative.
Args:
contents: the contents of the java file.
Returns:
A list of dict with information about the annotated methods.
TODO(bulach): return a CalledByNative object.
Raises:
ParseError: if unable to parse.
"""
called_by_natives = []
for match in re.finditer(RE_CALLED_BY_NATIVE, contents):
called_by_natives += [CalledByNative(
system_class=False,
unchecked='Unchecked' in match.group('Unchecked'),
static='static' in match.group('prefix'),
java_class_name=match.group('annotation') or '',
return_type=match.group('return_type'),
name=match.group('name'),
params=JniParams.Parse(match.group('params')))]
# Check for any @CalledByNative occurrences that weren't matched.
unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\n')
for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]):
if '@CalledByNative' in line1:
raise ParseError('could not parse @CalledByNative method signature',
line1, line2)
return MangleCalledByNatives(called_by_natives)
|
bbf8c80cc7ac323469de7bf8a2fdf0da84b834e1
| 3,645,880
|
from typing import Counter
def knn_python(input_x, dataset, labels, k):
"""
:param input_x: 待分类的输入向量
:param dataset: 作为参考计算距离的训练样本集
:param labels: 数据样本对应的分类标签
:param k: 选择最近邻样本的数目
"""
# 1. 计算待测样本与参考样本之间的欧式距离
dist = np.sum((input_x - dataset) ** 2, axis=1) ** 0.5
# 2. 选取 k 个最近邻样本的标签
k_labels = [labels[index] for index in dist.argsort()[0: k]]
# 3. 得到出现次数最多的标签作为最终的分类类别
label = Counter(k_labels).most_common(1)[0][0]
return label
|
8deaec88369d2d0cb42ebdd3961caf891357335b
| 3,645,881
|
from datetime import datetime
import re
def charReplace(contentData, modificationFlag):
"""
Attempts to convert PowerShell char data types using Hex and Int values into ASCII.
Args:
contentData: [char]101
modificationFlag: Boolean
Returns:
contentData: "e"
modificationFlag: Boolean
"""
startTime = datetime.now()
# Hex needs to go first otherwise the 0x gets gobbled by second Int loop/PCRE (0x41 -> 65 -> "A")
for value in re.findall("\[char\]0x[0-9a-z]{1,2}", contentData):
charConvert = int(value.split("]")[1], 0)
if 10 <= charConvert <= 127:
contentData = contentData.replace(value, '"%s"' % chr(charConvert))
modificationFlag = True
# Int values
for value in re.findall("\[char\][0-9]{1,3}", contentData, re.IGNORECASE):
charConvert = int(value.split("]")[1])
if 10 <= charConvert <= 127:
contentData = contentData.replace(value, '"%s"' % chr(charConvert))
modificationFlag = True
if debugFlag:
print("\t[!] Char Replace - %s: %s" % (modificationFlag, datetime.now() - startTime))
return contentData, modificationFlag
|
ca321869608e7524c260a8feeea6f2cf8bd6fd49
| 3,645,882
|
import six
import os
def _prepare_config(separate, resources, flavor_ref,
git_command, zip_patch,
directory, image_ref, architecture, use_arestor):
"""Prepare the Argus config file."""
conf = six.moves.configparser.SafeConfigParser()
conf.add_section("argus")
conf.add_section("openstack")
conf.set("argus", "output_directory", os.path.join(directory, "output"))
conf.set("argus", "argus_log_file", os.path.join(directory, "argus.log"))
conf.set("argus", "git_command", str(git_command))
conf.set("argus", "patch_install", str(zip_patch))
conf.set("argus", "log_each_scenario", str(separate))
conf.set("argus", "arch", str(architecture))
conf.set("argus", "use_arestor", str(use_arestor))
conf.set("openstack", "image_ref", str(image_ref))
if resources:
conf.set("argus", "resources", str(resources))
if flavor_ref:
conf.set("openstack", "flavor_ref", str(flavor_ref))
config_path = os.path.join(directory, "argus.conf")
with open(config_path, 'w') as file_handle:
conf.write(file_handle)
return config_path
|
b2f1528ea0d8426316b7b5a1f6b40f5cc723f5d5
| 3,645,883
|
def all_logit_coverage_function(coverage_batches):
"""Computes coverage based on the sum of the absolute values of the logits.
Args:
coverage_batches: Numpy arrays containing coverage information pulled from
a call to sess.run. In this case, we assume that these correspond to a
batch of logits.
Returns:
A python integer corresponding to the sum of the absolute values of the
logits.
"""
coverage_batch = coverage_batches[0]
coverage_list = []
for idx in range(coverage_batch.shape[0]):
elt = coverage_batch[idx]
elt = np.expand_dims(np.sum(np.abs(elt)), 0)
coverage_list.append(elt)
return coverage_list
|
32674a4528b69b756b3fc5f161dcbfd3ceaba01f
| 3,645,884
|
import asyncio
async def create_audio(request):
"""Process the request from the 'asterisk_ws_monitor' and creates the audio file"""
try:
message = request.rel_url.query["message"]
except KeyError:
message = None
LOGGER.error(f"No 'message' parameter passed on: '{request.rel_url}'")
raise web.HTTPClientError(
reason=GENERATE_AUDIO_ERROR, body=None, text=None, content_type=None
)
try:
msg_chk_sum = request.rel_url.query["msg_chk_sum"]
except KeyError:
msg_chk_sum = None
LOGGER.error(f"No 'msg_chk_sum' parameter passed on: '{request.rel_url}'")
raise web.HTTPClientError(
reason=GENERATE_AUDIO_ERROR, body=None, text=None, content_type=None
)
inner_loop = asyncio.get_running_loop()
executor = ThreadPoolExecutor(max_workers=NUM_OF_CPUS)
futures = inner_loop.run_in_executor(
executor, create_audio_file, message, msg_chk_sum
)
try:
await asyncio.ensure_future(futures)
status_code = 200
except Exception as e:
status_code = 500
LOGGER.error(f"Unable to generate the audio file: '{e}'")
return web.json_response({"status": status_code})
|
6aa90764c167be9a1d980dea0e54243a9467c276
| 3,645,885
|
def reinterpret_axis(block, axis, label, scale=None, units=None):
""" Manually reinterpret the scale and/or units on an axis """
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
if label is not None:
tensor['labels'][axis] = label
if scale is not None:
tensor['scales'][axis] = scale
if units is not None:
tensor['units'][axis] = units
return hdr
return block_view(block, header_transform)
|
e19d1a5cf567f72ae261cc0fef69b03e2d8a9696
| 3,645,886
|
def duel(board_size, player_map):
"""
:param board_size: the board size (i.e. a 2-tuple)
:param player_map: a dict, where the key is an int, 0 or 1, representing the player, and the value is the policy
:return: the resulting game outcomes
"""
board_state = init_board_state(board_size)
results = {p: {"won": 0, "lost": 0, "tied": 0} for p in player_map}
for player in player_map:
for edge_index in range(len(board_state)):
players = [player, (1 - player)]
if edge_index % 2 == 0:
players = [x for x in reversed(players)]
game = Game(board_size, players)
current_player = game.get_current_player()
# select the first edge for the first player
current_player, _ = game.select_edge(edge_index, current_player)
while not game.is_finished():
state = game.get_board_state()
edge = player_map[current_player].select_edge(
state, game.get_score(current_player), game.get_score(1 - current_player))
current_player, _ = game.select_edge(edge, current_player)
p0_score = game.get_score(0)
p1_score = game.get_score(1)
if p0_score > p1_score:
results[0]["won"] += 1
results[1]["lost"] += 1
if p1_score > p0_score:
results[1]["won"] += 1
results[0]["lost"] += 1
if p0_score == p1_score:
results[0]["tied"] += 1
results[1]["tied"] += 1
return results
|
3fbcd1477fc90553cdc5371440083c9737b4bf5b
| 3,645,887
|
def set_processor_type(*args):
"""
set_processor_type(procname, level) -> bool
Set target processor type. Once a processor module is loaded, it
cannot be replaced until we close the idb.
@param procname: name of processor type (one of names present in
\ph{psnames}) (C++: const char *)
@param level: SETPROC_ (C++: setproc_level_t)
@return: success
"""
return _ida_idp.set_processor_type(*args)
|
32d827fe0c0d152af98e6bed5baa7a24d372c4f8
| 3,645,888
|
from onnx.helper import make_node
from onnx import TensorProto
def convert_repeat(node, **kwargs):
"""Map MXNet's repeat operator attributes to onnx's Tile operator.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs['opset_version']
if opset_version < 11:
raise AttributeError('ONNX opset 11 or greater is required to export this operator')
repeats = int(attrs.get('repeats', 1))
axis = attrs.get('axis', 'None')
if repeats <= 0:
raise NotImplementedError('repeat operator does not support parameter repeats==0')
nodes = []
if axis == 'None':
create_tensor([-1], name+'_-1', kwargs['initializer'])
create_tensor([repeats], name+'_rep', kwargs['initializer'])
create_tensor([1, repeats], name+'_repeats', kwargs['initializer'])
nodes += [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('ReduceProd', [name+'_shape'], [name+'_size']),
make_node('Reshape', [input_nodes[0], name+'_size'], [name+'_flat']),
make_node('Unsqueeze', [name+'_flat', name+'_-1'], [name+'_unsqueeze']),
make_node('Tile', [name+'_unsqueeze', name+'_repeats'], [name+'_tile']),
make_node('Mul', [name+'_size', name+'_rep'], [name+'_new_size']),
make_node('Reshape', [name+'_tile', name+'_new_size'], [name], name=name)
]
else:
axis = int(axis)
repeats -= 1
create_tensor([repeats], name+'_repeats', kwargs['initializer'])
create_tensor([1], name+'_1', kwargs['initializer'])
create_tensor([0], name+'_0', kwargs['initializer'])
create_tensor([axis], name+'_axis', kwargs['initializer'])
create_const_scalar_node(name+"_0_s", np.int64(0), kwargs)
create_const_scalar_node(name+"_1_s", np.int64(1), kwargs)
nodes += [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('Shape', [name+'_shape'], [name+'_dim']),
make_node('Squeeze', [name+'_dim', name+'_0'], [name+'_dim_s']),
make_node('Range', [name+'_0_s', name+'_dim_s', name+'_1_s'], [name+'_range'])
]
if axis < 0:
nodes += [
make_node('Add', [name+'_axis', name+'_dim'], [name+'_true_axis']),
make_node('Equal', [name+'_range', name+'_true_axis'], [name+'_one_hot'])
]
else:
nodes += [
make_node('Equal', [name+'_range', name+'_axis'], [name+'_one_hot'])
]
nodes += [
make_node('Cast', [name+'_one_hot'], [name+'_one_hot_int'], to=int(TensorProto.INT64)),
make_node('Mul', [name+'_repeats', name+'_one_hot_int'], [name+'_mul']),
make_node('Add', [name+'_mul', name+'_1'], [name+'_add']),
make_node('Concat', [name+'_1', name+'_add'], [name+'_repeats_tensor'], axis=0)
]
if axis == -1:
nodes += [
make_node('Concat', [name+'_shape', name+'_1'], [name+'_unsqueeze_shape'], axis=0),
make_node('Reshape', [input_nodes[0], name+'_unsqueeze_shape'],
[name+'_unsqueeze'])
]
else:
create_tensor([axis+1], name+'_axis+1', kwargs['initializer'])
nodes += [
make_node('Unsqueeze', [input_nodes[0], name+'_axis+1'], [name+'_unsqueeze'])
]
nodes += [
make_node('Tile', [name+'_unsqueeze', name+'_repeats_tensor'], [name+'_tile']),
make_node('Mul', [name+'_shape', name+'_add'], [name+'_new_shape']),
make_node('Reshape', [name+'_tile', name+'_new_shape'], [name], name=name)
]
return nodes
|
120e1ee364bf64b00b504fcdc8d0769a6d02db7b
| 3,645,889
|
def my_quote(s, safe = '/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
cachekey = (safe, always_safe)
try:
safe_map = _safemaps[cachekey]
except KeyError:
safe += always_safe
safe_map = {}
for i in range(256):
c = chr(i)
safe_map[c] = (c in safe) and c or ('%%%02x' % i)
_safemaps[cachekey] = safe_map
res = map(safe_map.__getitem__, s)
return ''.join(res)
|
c5c28b7779e9cab2488696435832f9f7cbd03e57
| 3,645,890
|
def GetExperimentStatus(experiment, knobs, exp_data, track='stable'):
"""Determine the status and source of a given experiment.
Take into account all ways that a given experiment may be enabled and allow
the client to determine why a given experiment has a particular status.
Experiments at 100% are always on.
If the machine is set to ignore experiments, it will ignore any experiments
not at 100%.
If the machine is set to always apply experiments, the experiment will be on.
If the machine is in an explicitly enabled track, the experiment will be on.
If the machine is manually opted in or out, that option is applied.
Otherwise the bucket algorithm is applied.
Args:
experiment: a string identifier for a given experiment.
knobs: knobs for a host (in dict form)
exp_data: a dict containing experiment data (yaml.load(...))
track: a string of the machine's release track
Returns:
an object with three attributes, status, source, and rollout_percent
"""
ReturnEarly = lambda ret: ret.source is not None # pylint: disable=g-bad-name
ret = type('obj', (object,), {})
ret.status = DISABLED
ret.source = None
ret.rollout_percent = float(exp_data.get(experiment, {}).get(PERCENT_KEY, -1))
if ret.rollout_percent == 100:
logging.debug('Experiment %s is at 100%%, enabling', experiment)
ret.status = ENABLED
ret.source = ALWAYS
return ret
auto_knob = knobs.get(EXPERIMENTS_KNOB, 'recommended')
if auto_knob == ALWAYS:
ret.status = ENABLED
ret.source = ALWAYS
elif auto_knob == NEVER:
ret.status = DISABLED
ret.source = ALWAYS
if ReturnEarly(ret): return ret
manual_on_knob = knobs.get(MANUAL_ON_KNOB, [])
manual_off_knob = knobs.get(MANUAL_OFF_KNOB, [])
if experiment in manual_on_knob:
ret.status = ENABLED
ret.source = MANUAL
elif experiment in manual_off_knob:
ret.status = DISABLED
ret.source = MANUAL
if ReturnEarly(ret): return ret
enable_unstable = exp_data.get(experiment, {}).get(ENABLE_UNSTABLE, False)
enable_testing = exp_data.get(experiment, {}).get(ENABLE_TESTING, False)
if ((track == 'testing' and enable_testing) or
(track == 'unstable' and (enable_unstable or enable_testing))):
ret.status = ENABLED
ret.source = ALWAYS
if ReturnEarly(ret): return ret
try:
mach_uuid = FetchUUID()
except ExperimentsError, e:
raise MissingUUID(e)
logging.debug('Found uuid %s', mach_uuid)
return ExperimentIsBucket(experiment, exp_data, mach_uuid)
|
9deef37c2e517987bd49cf91892149f202b43993
| 3,645,891
|
from masci_tools.tools.cf_calculation import CFCalculation, plot_crystal_field_calculation
def test_plot_crystal_field_calculation():
"""
Test of the plot illustrating the potential and charge density going into the calculation
"""
cf = CFCalculation()
cf.readPot('files/cf_calculation/CFdata.hdf')
cf.readCDN('files/cf_calculation/CFdata.hdf')
plt.gcf().clear()
plot_crystal_field_calculation(cf, show=False)
return plt.gcf()
|
90488103d929929615dc1e5de1531102a9f7b96a
| 3,645,892
|
import re
import tempfile
from pathlib import Path
async def submit_changesheet(
uploaded_file: UploadFile = File(...),
mdb: MongoDatabase = Depends(get_mongo_db),
user: User = Depends(get_current_active_user),
):
"""
Example changesheet [here](https://github.com/microbiomedata/nmdc-runtime/blob/main/metadata-translation/notebooks/data/changesheet-without-separator3.tsv).
"""
allowed_to_submit = ("dehays", "dwinston")
if user.username not in allowed_to_submit:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=(
f"Only users {allowed_to_submit} "
"are allowed to apply changesheets at this time."
),
)
sheet_in = await raw_changesheet_from_uploaded_file(uploaded_file)
df_change = df_from_sheet_in(sheet_in, mdb)
_ = _validate_changesheet(df_change, mdb)
# create object (backed by gridfs). use "gfs0" id shoulder for drs_object access_id.
sheet_id = generate_one_id(mdb, ns="changesheets", shoulder="gfs0")
mdb_fs = GridFS(mdb)
filename = re.sub(r"[^A-Za-z0-9\.\_\-]", "_", sheet_in.name)
PortableFilename(filename) # validates
sheet_text = sheet_in.text
drs_id = local_part(sheet_id)
DrsId(drs_id) # validates
mdb_fs.put(
sheet_text,
_id=drs_id,
filename=filename,
content_type=sheet_in.content_type,
encoding="utf-8",
)
with tempfile.TemporaryDirectory() as save_dir:
filepath = str(Path(save_dir).joinpath(filename))
with open(filepath, "w") as f:
f.write(sheet_text)
object_in = DrsObjectIn(
**drs_metadata_for(
filepath,
base={
"description": f"changesheet submitted by {user.username}",
"access_methods": [{"access_id": drs_id}],
},
)
)
self_uri = f"drs://{HOSTNAME_EXTERNAL}/{drs_id}"
drs_obj_doc = _create_object(
mdb, object_in, mgr_site="nmdc-runtime", drs_id=drs_id, self_uri=self_uri
)
doc_after = mdb.objects.find_one_and_update(
{"id": drs_obj_doc["id"]},
{"$set": {"types": ["metadata-changesheet"]}},
return_document=ReturnDocument.AFTER,
)
return doc_after
|
ebe9306aba0fef88c906c3c584f87e7c783fe9d8
| 3,645,893
|
import json
def get_notes(request, course, page=DEFAULT_PAGE, page_size=DEFAULT_PAGE_SIZE, text=None):
"""
Returns paginated list of notes for the user.
Arguments:
request: HTTP request object
course: Course descriptor
page: requested or default page number
page_size: requested or default page size
text: text to search. If None then return all results for the current logged in user.
Returns:
Paginated dictionary with these key:
start: start of the current page
current_page: current page number
next: url for next page
previous: url for previous page
count: total number of notes available for the sent query
num_pages: number of pages available
results: list with notes info dictionary. each item in this list will be a dict
"""
path = 'search' if text else 'annotations'
response = send_request(request.user, course.id, page, page_size, path, text)
try:
collection = json.loads(response.content.decode('utf-8'))
except ValueError:
log.error("Invalid JSON response received from notes api: response_content=%s", response.content)
raise EdxNotesParseError(_("Invalid JSON response received from notes api.")) # lint-amnesty, pylint: disable=raise-missing-from
# Verify response dict structure
expected_keys = ['total', 'rows', 'num_pages', 'start', 'next', 'previous', 'current_page']
keys = list(collection.keys())
if not keys or not all(key in expected_keys for key in keys):
log.error("Incorrect data received from notes api: collection_data=%s", str(collection))
raise EdxNotesParseError(_("Incorrect data received from notes api."))
filtered_results = preprocess_collection(request.user, course, collection['rows'])
# Notes API is called from:
# 1. The annotatorjs in courseware. It expects these attributes to be named "total" and "rows".
# 2. The Notes tab Javascript proxied through LMS. It expects these attributes to be called "count" and "results".
collection['count'] = collection['total']
del collection['total']
collection['results'] = filtered_results
del collection['rows']
collection['next'], collection['previous'] = construct_pagination_urls(
request,
course.id,
collection['next'],
collection['previous']
)
return collection
|
3256cacd845cf2fd07027cf6b3f2547a59cefd0f
| 3,645,894
|
import numpy
def convert_hdf_to_gaintable(f):
""" Convert HDF root to a GainTable
:param f:
:return:
"""
assert f.attrs['ARL_data_model'] == "GainTable", "Not a GainTable"
receptor_frame = ReceptorFrame(f.attrs['receptor_frame'])
frequency = numpy.array(f.attrs['frequency'])
data = numpy.array(f['data'])
gt = GainTable(data=data, receptor_frame=receptor_frame, frequency=frequency)
return gt
|
dd816eb0730b0f9993efe07c2b28db692ac6a06e
| 3,645,895
|
import pathlib
def list_files(directory):
"""Returns all files in a given directory
"""
return [f for f in pathlib.Path(directory).iterdir() if f.is_file() and not f.name.startswith('.')]
|
a8c5fea794198c17c2aff41a1a07009984a8e61f
| 3,645,896
|
def condition_conjunction(conditions):
"""Do conjuction of conditions if there are more than one, otherwise just
return the single condition."""
if not conditions:
return None
elif len(conditions) == 1:
return conditions[0]
else:
return sql.expression.and_(*conditions)
|
acf26bd9b8e47d27ad83815be70216db0e4ad091
| 3,645,897
|
def get_claimed_referrals(char):
""" Return how many claimed referrals this character has. """
return db((db.referral.referrer==char) & (db.referral.claimed==True)).count()
|
5820cdd21cbb77f6a43537ae18dc227ad4fec1b8
| 3,645,898
|
def groupsplit(X, y, valsplit):
"""
Used to split the dataset by datapoint_id into train and test sets.
The data is split to ensure all datapoints for each datapoint_id occurs completely in the respective dataset split.
Note that where there is validation set, data is split with 80% for training and 20% for test set.
Otherwise, the test set is split further with 60% as test set and 40% as validation set.
Args:
X: data excluding the target_variable
y: target variable with datapoint_id
valsplit: flag to indicate if there is a dataframe for the validation set. Accepeted values are "yes" or "no"
Returns:
X_train: X trainset
y_train: y trainset
X_test: X testset
y_test_complete: Dataframe containing the target variable with corresponding datapointid
"""
logger.info("groupsplit with valsplit: %s", valsplit)
if valsplit == 'yes':
gs = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42)
else:
gs = GroupShuffleSplit(n_splits=2, test_size=.4, random_state=42)
train_ix, test_ix = next(gs.split(X, y, groups=X.datapoint_id))
X_train = X.loc[train_ix]
y_train = y.loc[train_ix]
X_test = X.loc[test_ix]
y_test_complete = y.loc[test_ix]
return X_train, y_train, X_test, y_test_complete
|
e8ba393270a32e2464c30409a13b2c5e9528afdd
| 3,645,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.