content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def verifica_cc(numero):
"""verifica_cc(numero): int -> tuple
Funcao que verifica o numero do cartao, indicando a categoria e a rede emissora"""
numero_final = str(numero)
if luhn_verifica(numero_final) == True:
categor = categoria(numero_final)
rede_cartao = valida_iin(numero_final)
if rede_cartao == "":
return "cartao invalido"
else:
return (categor, rede_cartao)
else:
return "cartao invalido"
|
f6d3501b8154c05058006575f8aa33c228b9ade6
| 3,642,200
|
def create_security_group(stack, name, rules=()):
"""Add EC2 Security Group Resource."""
ingress_rules = []
for rule in rules:
ingress_rules.append(
SecurityGroupRule(
"{0}".format(rule['name']),
CidrIp=rule['cidr'],
FromPort=rule['from_port'],
ToPort=rule['to_port'],
IpProtocol=rule['protocol'],
)
)
return stack.stack.add_resource(
SecurityGroup(
'{0}SecurityGroup'.format(name),
GroupDescription="{0} Security Group".format(name),
SecurityGroupIngress=ingress_rules,
VpcId=Ref(stack.vpc),
))
|
e4d2b81fc1c3b0b3231725aa8757ea644d2efdf6
| 3,642,201
|
from typing import List
import tqdm
def features_targets_and_externals(
df: pd.DataFrame,
region_ordering: List[str],
id_col: str,
time_col: str,
time_encoder: OneHotEncoder,
weather: Weather_container,
time_interval: str,
latitude: str,
longitude: str,
):
"""
Function that computes the node features (outflows), target values (next step prediction)
and external data such as time_encoding and weather information
Args:
df (pd.DataFrame): [description]
region_ordering (List[str]): [description]
id_col (str): [description]
time_col (str): [description]
time_encoder (OneHotEncoder): [description]
weather (Weather_container): [description]
Returns:
[type]: [description]
"""
id_grouped_df = df.groupby(id_col)
lat_dict = dict()
lng_dict = dict()
for node in region_ordering:
grid_group_df = id_grouped_df.get_group(node)
lat_dict[node] = grid_group_df[latitude].mean()
lng_dict[node] = grid_group_df[longitude].mean()
grouped_df = df.groupby([time_col, id_col])
dt_range = pd.date_range(df[time_col].min(), df[time_col].max(), freq=time_interval)
node_inflows = np.zeros((len(dt_range), len(region_ordering), 1))
lat_vals = np.zeros((len(dt_range), len(region_ordering)))
lng_vals = np.zeros((len(dt_range), len(region_ordering)))
targets = np.zeros((len(dt_range) - 1, len(region_ordering)))
# arrays for external data
weather_external = np.zeros((len(dt_range), 4))
num_cats = 0
for cats in time_encoder.categories_:
num_cats += len(cats)
time_external = np.zeros((len(dt_range), num_cats))
# Loop through every (timestep, node) pair in dataset. For each find number of outflows and set as feature
# also set the next timestep for the same node as the target.
for t, starttime in tqdm(enumerate(dt_range), total=len(dt_range)):
for i, node in enumerate(region_ordering):
query = (starttime, node)
try:
group = grouped_df.get_group(query)
node_inflows[t, i] = len(group)
except KeyError:
node_inflows[t, i] = 0
lat_vals[t, i] = lat_dict[node]
lng_vals[t, i] = lng_dict[node]
# current solution:
# The target to predict, is the number of inflows at next timestep.
if t > 0:
targets[t - 1, i] = node_inflows[t, i]
time_obj = group[time_col].iloc[0]
time_external[t, :] = time_encoder.transform(
np.array([[time_obj.hour, time_obj.weekday(), time_obj.month]])
).toarray()
start_time_dt = pd.Timestamp(starttime).to_pydatetime()
weather_dat = weather.get_weather_df(start=start_time_dt, end=start_time_dt + timedelta(hours=1))
weather_dat = np.nan_to_num(weather_dat, copy=False, nan=0.0)
weather_external[t, :] = weather_dat
time_external = time_external[:-1, :]
# normalize weather features
weather_external = (weather_external - weather_external.mean(axis=0)) / (weather_external.std(axis=0) + 1e-6)
weather_external = weather_external[:-1, :]
X = node_inflows[:-1, :, :]
lng_vals = lng_vals[:-1, :]
lat_vals = lat_vals[:-1, :]
feature_scaler = StandardScaler()
feature_scaler.fit(X[:, :, 0])
target_scaler = StandardScaler()
target_scaler.fit(targets)
return X, lat_vals, lng_vals, targets, time_external, weather_external, feature_scaler, target_scaler
|
c9fc9fc210407ec596facda1bc43952ce9c6b98a
| 3,642,202
|
def main(argv=None):
""" Execute the application CLI.
Arguments are taken from sys.argv by default.
"""
args = _cmdline(argv)
config.load(args.config)
results = get_package_list(args.search_term)
results = sorted(results, key=lambda a: sort_function(a[1]), reverse=True)
results_normalized = list()
last_result = None
for result in results:
if result[0] == last_result:
continue
results_normalized.append(result)
last_result = result[0]
print('\n'.join(["%s - %s" % (_[0], _[1]) for _ in results_normalized]))
return 0
|
1bfa7c6bc8181b32859089b0ac2be8b231882ec5
| 3,642,203
|
def transform_child_joint_frame_to_parent_inertial_frame(child_body):
"""Return the homogeneous transform from the child joint frame to the parent inertial frame."""
parent_joint = child_body.parent_joint
parent = child_body.parent_body
if parent_joint is not None and parent.inertial is not None:
h_p_c = parent_joint.homogeneous # from parent to child link/joint frame
h_c_p = get_inverse_homogeneous(h_p_c) # from child to parent link/joint frame
h_p_pi = parent.inertial.homogeneous # from parent link/joint frame to inertial frame
h_c_pi = h_c_p.dot(h_p_pi) # from child link/joint frame to parent inertial frame
return h_c_pi
|
0ab8761ef40101368fb3f2b657c329cd8cf5cf2b
| 3,642,204
|
def team_to_repos(api, no_repos, organization):
"""Create a team_to_repos mapping for use in _add_repos_to_teams, anc create
each team and repo. Return the team_to_repos mapping.
"""
num_teams = 10
# arrange
team_names = ["team-{}".format(i) for i in range(num_teams)]
repo_names = ["some-repo-{}".format(i) for i in range(num_teams)]
for name in team_names:
organization.create_team(name, permission="pull")
for name in repo_names:
organization.create_repo(name)
team_to_repos = {
team_name: [repo_name]
for team_name, repo_name in zip(team_names, repo_names)
}
return team_to_repos
|
390da146c3f96c554f9194f8551a066eec535533
| 3,642,205
|
def box_minus(plus_transform: pin.SE3, minus_transform: pin.SE3) -> np.ndarray:
"""
Compute the box minus between two transforms:
.. math::
T_1 \\boxminus T_2 = \\log(T_1 \\cdot T_2^{-1})
This operator allows us to think about orientation "differences" as
similarly as possible to position differences, but mind the frames! Its
formula has two use cases, depending on whether the common frame :math:`C`
between the two transforms is their source or their target.
When the common frame is the target, denoting by :math:`T_{CP}` the
transform from frame :math:`P` (source) to frame :math:`C` (target), the
resulting twist is expressed in the target frame:
.. math::
{}_C \\xi_{CM} = T_{CP} \\boxminus T_{CM}
When the common frame is the source frame, denoting by :math:`T_{MC}` the
transform from frame :math:`C` (source) to frame :math:`M` (target), the
resulting twist is expressed in the target frame of the transform on the
right-hand side of the operator:
.. math::
-{}_M \\xi_{M} = T_{PC} \\boxminus T_{MC}
Args:
plus_transform: Transform :math:`T_1` on the left-hand side of the box
minus operator.
minus_transform: Transform :math:`T_2` on the right-hand side of the
box minus operator.
Returns:
In the first case :math:`T_{CP} \\boxminus T_{CM}`, the outcome is a
spatial twist :math:`{}_C \\xi_{CM}` expressed in the common frame
:math:`C`.
In the second case :math:`T_{PC} \\boxminus T_{MC}`, the outcome is a
body twist :math:`-{}_M \\xi_{CM}` (mind the unitary minus).
Note:
Prefer using :func:`pink.tasks.utils.body_box_minus` to calling this
function in the second use case :math:`T_{PC} \\boxminus T_{MC}`.
"""
diff_array = plus_transform.act(minus_transform.inverse())
twist: np.ndarray = pin.log(diff_array).vector
return twist
|
838f5e8b4f91450c311c72d4526e4c8fd3c9d6f7
| 3,642,206
|
import struct
def padandsplit(message):
"""
returns a two-dimensional array X[i][j] of 32-bit integers, where j ranges
from 0 to 16.
First pads the message to length in bytes is congruent to 56 (mod 64),
by first adding a byte 0x80, and then padding with 0x00 bytes until the
message length is congruent to 56 (mod 64). Then adds the little-endian
64-bit representation of the original length. Finally, splits the result
up into 64-byte blocks, which are further parsed as 32-bit integers.
"""
origlen = len(message)
padlength = 64 - ((origlen - 56) % 64) # minimum padding is 1!
message += b"\x80"
message += b"\x00" * (padlength - 1)
message += struct.pack("<Q", origlen * 8)
assert (len(message) % 64 == 0)
return [
[
struct.unpack("<L", message[i + j:i + j + 4])[0]
for j in range(0, 64, 4)
]
for i in range(0, len(message), 64)
]
|
ea06a3fc91e19ed0dbea6ddcc2ee6d554fb5a40f
| 3,642,207
|
import requests
def base_put(url_path, content):
"""
Do a PUT to the REST API
"""
response = requests.put(url=settings.URL_API + url_path, json=content)
return response
|
dde94c1dba0d8a931a0eae0e8f5ce63d1f5a62a1
| 3,642,208
|
def inverse_rotation(theta: float) -> np.ndarray:
"""
Compute inverse of the 2d rotation matrix that rotates a
given vector by theta without use of numpy.linalg.inv and numpy.linalg.solve.
Arguments:
theta: rotation angle
Return:
Inverse of the rotation matrix
"""
rotation_matrix(theta)
m = np.zeros((2, 2))
m[0, 0] = (np.cos(theta_rad)) / (diag - offDiag)
m[0, 1] = (np.sin(theta_rad)) / (diag - offDiag)
m[1, 0] = -(np.sin(theta_rad)) / (diag - offDiag)
m[1, 1] = (np.cos(theta_rad)) / (diag - offDiag)
return m
|
732183f7577969a1ecbbd0ee5ed86342c65991fc
| 3,642,209
|
import functools
def _config_validation_decorator(func):
"""A decorator used to easily run validations on configs loaded into dicts.
Add this decorator to any method that returns the config as a dict.
Raises:
ValueError: If the configuration fails validation
"""
@functools.wraps(func)
def validation_wrapper(*args, **kwargs):
config_dict = func(*args, **kwargs)
validate_dict(config_dict)
return config_dict
return validation_wrapper
|
1a63254e43c2920d6952105d9860138c395cbf2b
| 3,642,210
|
import functools
def image_transpose_exif(im):
"""
https://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
Parameters
----------
im: PIL.Image
The image to be rotated.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [ # Val 0th row 0th col
[], # 0 (reserved)
[], # 1 top left
[Image.FLIP_LEFT_RIGHT], # 2 top right
[Image.ROTATE_180], # 3 bottom right
[Image.FLIP_TOP_BOTTOM], # 4 bottom left
[Image.FLIP_LEFT_RIGHT, Image.ROTATE_90], # 5 left top
[Image.ROTATE_270], # 6 right top
[Image.FLIP_TOP_BOTTOM, Image.ROTATE_90], # 7 right bottom
[Image.ROTATE_90], # 8 left bottom
]
try:
seq = exif_transpose_sequences[im._getexif()[exif_orientation_tag]]
except Exception:
return im
else:
return functools.reduce(type(im).transpose, seq, im)
|
4f166ea59c097e4306bd43db7165e56e8d289b6a
| 3,642,211
|
import os
def GetFileList(folder, surfixs=".xls,.xlsx"):
""" 遍历文件夹查找所有满足后缀的文件 """
surfix = surfixs.split(",")
if type(folder) == str:
folder = folder.decode('utf-8')
p = os.path.abspath(folder)
flist = []
if os.path.isdir(p):
FindFileBySurfix(flist, p, surfix)
else:
raise "folder param(%s) is not a real folder" % str(folder)
utf8list=[]
for it in flist:
utf8list.append(it.encode('utf-8'))
return utf8list
|
4b4def62c47335474fe2a4b3e4b6bce341e05bf9
| 3,642,212
|
def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs):
"""Return optimised pore diameter and it's COM."""
args = elements, coordinates
if com is not None:
pass
else:
com = center_of_mass(elements, coordinates)
if bounds is None:
pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2
bounds = (
(com[0]-pore_r, com[0]+pore_r),
(com[1]-pore_r, com[1]+pore_r),
(com[2]-pore_r, com[2]+pore_r)
)
minimisation = minimize(
correct_pore_diameter, x0=com, args=args, bounds=bounds)
pored = pore_diameter(elements, coordinates, com=minimisation.x)
return (pored[0], pored[1], minimisation.x)
|
f75a7c4246bc2ad096de309795d61afea78f7c3e
| 3,642,213
|
def animate_operators(operators, date):
"""Main."""
results = []
failures = []
length = len(operators)
count = 1
for i in operators:
try:
i = i.encode('utf-8')
except:
i = unicode(i, 'utf-8')
i = i.encode('utf-8')
print(i, count, "/", length)
try:
output = animate_one_day(i, date)
results.append(output)
print("success!")
output.to_csv("sketches/{}/{}/data/indiv_operators/{}.csv".format(OUTPUT_NAME, DATE, i))
except Exception:
failures.append(i)
print("failed:")
count += 1
return results, failures
|
d8dd6afdd4a13ab62a4c821bb43050af07fdc455
| 3,642,214
|
import warnings
import os
import json
import time
def random_sampler(vocs, evaluate_f,
executor=None,
output_path=None,
chunk_size=10,
max_samples=100,
verbose=None):
"""
Makes random samples based on vocs
"""
if verbose is not None:
warnings.warn('xopt.cnsga verbose option has been deprecated')
toolbox = Toolbox()
toolbox.register('evaluate', sampler_evaluate, evaluate_f=evaluate_f)
# Logo
logger.info(sampler_logo)
if not executor:
executor = DummyExecutor()
logger.info('No executor given. Running in serial mode.')
# Setup saving to file
if output_path:
path = full_path(output_path)
assert os.path.exists(path), f'output_path does not exist {path}'
def save(data):
file = new_date_filename(prefix='sampler-', path=path)
with open(file, 'w') as f:
json.dump(data, f, ensure_ascii=True, cls=NpEncoder) # , indent=4)
logger.info(f'Samples written to: {file}')
else:
# Dummy save
def save(data):
pass
# Initial batch
futures = [executor.submit(toolbox.evaluate, random_settings(vocs)) for _ in range(chunk_size)]
# Continuous loop
ii = 0
t0 = time.time()
done = False
results = []
all_results = []
while not done:
if ii > max_samples:
done = True
# Check the status of all futures
for ix in range(len(futures)):
# Examine a future
fut = futures[ix]
if not fut.done():
continue
# Future is done.
results.append(fut.result())
all_results.append(fut.result())
ii += 1
# Submit new job, keep in futures list
future = executor.submit(toolbox.evaluate, random_settings(vocs))
futures[ix] = future
# output
if ii % chunk_size == 0:
t1 = time.time()
dt = t1 - t0
t0 = t1
logger.info(f'{chunk_size} samples completed in {dt / 60:0.5f} minutes')
data = {'vocs': vocs}
# Reshape data
for k in ['inputs', 'outputs', 'error']:
data[k] = [r[k] for r in results]
save(data)
results = []
# Slow down polling. Needed for MPI to work well.
time.sleep(0.001)
# Cancel remaining jobs
for future in futures:
future.cancel()
data = {'vocs': vocs}
# Reshape data
for k in ['inputs', 'outputs', 'error']:
data[k] = [r[k] for r in all_results]
return data
|
ff93cd2fabe3b769975645a7f2335727702a079f
| 3,642,215
|
import torch
def get_span_encoding(key, zero_span_rep=None):
"""
Input: document key
Output: all possible span tuples and their encodings
"""
instance = reader.text_to_instance(combined_json[key]["sentences"])
instance.index_fields(model.vocab)
generator = iterator(instances=[instance])
batch = next(generator)
if type(get_map_loc().index) == int:
batch = move_to_device(batch, get_map_loc().index)
if zero_span_rep is not None: # for debugging
assert (
zero_span_rep % 2 == 0
), "zero_span_rep must be even as it corresponds to concat(endpoint, attended)"
shape = list(batch["spans"].shape)
shape[-1] = int(zero_span_rep / 2)
zeros = torch.zeros(shape)
return {
"original_text": batch["metadata"][0]["original_text"],
"all_spans": batch["spans"],
"endpoint_span_embeddings": zeros,
"attended_span_embeddings": zeros,
"roberta_embeddings": zeros,
}
output = model.forward(tensor_batch=batch, task_name="coref", for_training=False)
reps = {
"original_text": batch["metadata"][0]["original_text"],
"all_spans": output["all_spans"],
"endpoint_span_embeddings": output["endpoint_span_embeddings"],
"attended_span_embeddings": output["attended_span_embeddings"],
}
if include_bert:
reps["roberta_embeddings"] = get_bert_reps(
combined_json[key]["sentences"], output["all_spans"][0]
)
return reps
|
feff6b3a31471fe31cd2481d3ffcad5a87107371
| 3,642,216
|
def add_stocks(letter, page, get_last_page=False):
"""
goes through each row in table and adds to df if it is a stock
returns the appended df
"""
df = pd.DataFrame()
res = req.get(BASE_LINK.format(letter, page))
soup = bs(res.content, 'lxml')
table = soup.find('table', {'id': 'CompanylistResults'})
stks = table.findAll('tr')
stocks_on_page = (len(stks) - 1) / 2
for stk in stks[1:]:
deets = stk.findAll('td')
if len(deets) != 7:
continue
company_name = deets[0].text.strip()
ticker = deets[1].text.strip()
market_cap = deets[2].text.strip()
# 4th entry is blank
country = deets[4].text.strip()
ipo_year = deets[5].text.strip()
subsector = deets[6].text.strip()
df = df.append(pd.Series({'company_name': company_name,
'market_cap': market_cap,
'country': country,
'ipo_year': ipo_year,
'subsector': subsector},
name=ticker))
if get_last_page:
# get number of pages
lastpage_link = soup.find('a', {'id': 'two_column_main_content_lb_LastPage'})
last_page_num = int(lastpage_link['href'].split('=')[-1])
return df, total_num_stocks, last_page_num
return df, stocks_on_page
|
ce86ef68a107fbae8d0028486bef8567dc24c43e
| 3,642,217
|
def available_parent_amount_rule(model, pr):
"""
Each parent has a limited resource budget; it cannot allocate more than that.
:param ConcreteModel model:
:param int pr: parent resource
:return: boolean indicating whether pr is staying within budget
"""
if model.parent_possible_allocations[pr]:
return sum(model.PARENT_AMT[pr, i] for i in model.parent_possible_allocations[pr]) <= model.avail_parent_amt[pr]
else:
return Constraint.Skip
|
e1ccc7e9ad4941bfffebefd34217cd58c5bc18e5
| 3,642,218
|
def extract_coords(filename):
"""Extract J2000 coordinates from filename or filepath
Parameters
----------
filename : str
name or path of file
Returns
-------
str
J2000 coordinates
"""
# in case path is entered as argument
filename = filename.split("/")[-1] if "/" in filename else filename
# to check whether declination is positive or negative
plus_minus = "+" if "+" in filename else "-"
# extracting right acesnsion (ra) and declination(dec) from filename
filename = filename.split("_")[0].strip("J").split(plus_minus)
ra_extracted = [
"".join(filename[0][0:2]),
"".join(filename[0][2:4]),
"".join(filename[0][4:]),
]
dec_extracted = [
"".join(filename[1][0:2]),
"".join(filename[1][2:4]),
"".join(filename[1][4:]),
]
coordinates = " ".join(ra_extracted) + " " + plus_minus + " ".join(dec_extracted)
# return coordinates as a string in HH MM SS.SSS format
return coordinates
|
57f0ca79223116caa770a1dbea2eda84df146855
| 3,642,219
|
def exponential(mantissa, base, power, left, right):
"""Return the exponential signal.
The signal's value will be `mantissa * base ^ (power * time)`.
Parameters:
mantissa: The mantissa, i.e. the scale of the signal
base: The exponential base
power: The exponential power
left: Left bound of the signal
right: Rright bound of the signal
Returns:
ndarray[float]: The values of the signal
ndarray[int]: The interval of the signal from left bound
to right bound
"""
n = np.arange(left, right+1, 1)
x = mantissa * (base ** (power * n))
return x, n
|
a2fbd76b6426f600d19eb9caeb4edac88dea9a9c
| 3,642,220
|
def get_features(features, featurestore=None, featuregroups_version_dict={}, join_key=None, online=False):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored. It will try to construct the query first from the cached metadata,
if that fails it will re-try after reloading the cache
Example usage:
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"],
>>> featurestore=featurestore.project_featurestore(),
>>> featuregroups_version_dict={"trx_graph_summary_features": 1,
>>> "trx_summary_features": 1}, join_key="cust_id")
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: the version of the featuregroup, defaults to 1
:join_key: (Optional) column name to join on
:online: a boolean flag whether to fetch the online feature group or the offline one (assuming that the
feature group has online serving enabled)
Returns:
A dataframe with all the features
"""
# try with cached metadata
try:
return core._do_get_features(features,
core._get_featurestore_metadata(featurestore,
update_cache=update_cache_default),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
# Try again after updating cache
except:
return core._do_get_features(features, core._get_featurestore_metadata(featurestore, update_cache=True),
featurestore=featurestore,
featuregroups_version_dict=featuregroups_version_dict,
join_key=join_key,
online=online)
|
03cfc250bd921b291ac38fce5beddac3144e65ba
| 3,642,221
|
def get_flex_bounds(x, samples, nsig=1):
"""
Here, we wish to report the distribution of the subchunks 'sample'
along with the value of the full sample 'x'
So this function will return x, x_lower_bound, x_upper_bound,
where the range of the lower and upper bound expresses
the standard deviation of the sample distribution, the mean
of which is often not aligned with x.
"""
mean=np.mean(samples); sig=np.std(samples)
return [x, nsig*sig+x-mean, nsig*sig+mean-x]
|
0fb4120307f61aafce902e92a32c66fd9aad91bf
| 3,642,222
|
def _parse_multi_header(headers):
"""
Parse out and return the data necessary for generating ZipkinAttrs.
Returns a dict with the following keys:
'trace_id': str or None
'span_id': str or None
'parent_span_id': str or None
'sampled_str': '0', '1', 'd', or None (defer)
"""
parsed = {
"trace_id": headers.get("X-B3-TraceId", None),
"span_id": headers.get("X-B3-SpanId", None),
"parent_span_id": headers.get("X-B3-ParentSpanId", None),
"sampled_str": headers.get("X-B3-Sampled", None),
}
# Normalize X-B3-Flags and X-B3-Sampled to None, '0', '1', or 'd'
if headers.get("X-B3-Flags") == "1":
parsed["sampled_str"] = "d"
if parsed["sampled_str"] == "true":
parsed["sampled_str"] = "1"
elif parsed["sampled_str"] == "false":
parsed["sampled_str"] = "0"
if parsed["sampled_str"] not in (None, "1", "0", "d"):
raise ValueError("Got invalid X-B3-Sampled: %s" % parsed["sampled_str"])
for k in ("trace_id", "span_id", "parent_span_id"):
if parsed[k] == "":
raise ValueError("Got empty-string %r" % k)
if parsed["trace_id"] and not parsed["span_id"]:
raise ValueError("Got X-B3-TraceId but not X-B3-SpanId")
elif parsed["span_id"] and not parsed["trace_id"]:
raise ValueError("Got X-B3-SpanId but not X-B3-TraceId")
# Handle the common case of no headers at all
if not parsed["trace_id"] and not parsed["sampled_str"]:
raise ValueError() # won't trigger a log message
return parsed
|
2ac3d0cbee196385e970bcc85827c1a467b5bb3b
| 3,642,223
|
import numpy
def get_tgimg(img):
"""
处理提示图片,提取提示字符
:param img: 提示图片
:type img:
:return: 返回原图描边,提示图片按顺序用不同颜色框,字符特征图片列表
:rtype: img 原图, out 特征图片列表(每个字), templets 角度变换后的图
"""
imgBW = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = imgBW.shape
_, imgBW = cv2.threshold(imgBW, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img2 = cv2.erode(imgBW, None, iterations=3)
img2 = cv2.dilate(img2, None, iterations=3)
out = numpy.full((20 + h, 20 + w), 255, numpy.uint8)
copy_image(out, 10, 10, img2)
out, cnts, hierarchy = cv2.findContours(out, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE)
rects = []
# cnts[-1] 边框
for cnt in cnts[:-1]:
cnt -= 10
x1 = cnt[:, :, 0].min()
y1 = cnt[:, :, 1].min()
x2 = cnt[:, :, 0].max()
y2 = cnt[:, :, 1].max()
x1 = 0 if x1 < 0 else x1
y1 = 0 if y1 < 0 else y1
x2 = w - 1 if x2 > w - 1 else x2
y2 = h - 1 if y2 > h - 1 else y2
rects.append((x1, y1, x2, y2))
cv2.drawContours(img, cnt, -1, [0, 0, 255])
# cv2.rectangle(img, (x1, y1), (x2, y2), [0, 0, 255])
rects.sort()
out = numpy.full(imgBW.shape, 255, numpy.uint8)
x0 = spacing = 3
templets = []
for x1, y1, x2, y2 in rects:
imgchar = numpy.full((30, 30), 255, numpy.uint8)
tmpl = imgBW[y1:y2 + 1, x1:x2 + 1]
if value2 != (max_value2 // 2):
tmpl = rotate_image(tmpl, (max_value2 // 2 - value2) * 10)
templets.append(tmpl)
copy_image(imgchar, 0, (30 - y2 + y1 - 1) // 2, tmpl)
copy_image(out, x0, 0, imgchar)
x0 += x2 - x1 + 1 + spacing
out = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR)
i = 0
x0 = spacing
for x1, y1, x2, y2 in rects:
cv2.rectangle(out, (x0, 0), (x0 + x2 - x1 + 1, 29), COLORS[i])
x0 += x2 - x1 + 1 + spacing
i += 1
return img, out, templets
|
5f48e2b639dd3027e6463b1a99a8b7c13c043f88
| 3,642,224
|
def brand_profitsharing_order_query(self, transaction_id, out_order_no, sub_mchid):
"""查询连锁品牌分账结果
:param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472'
:param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346'
:param sub_mchid: 子商户的商户号,由微信支付生成并下发。示例值:'1900000109'
"""
if sub_mchid:
path = '/v3/brand/profitsharing/orders?sub_mchid=%s' % sub_mchid
else:
raise Exception('sub_mchid is not assigned.')
if transaction_id and out_order_no:
path = '%s&transaction_id=%s&out_order_no=%s' % (transaction_id, out_order_no)
else:
raise Exception('transaction_id or out_order_no is not assigned.')
return self._core.request(path)
|
cb1af072f2b4f94f632817baff6cdfea66110873
| 3,642,225
|
def get_controller_from_module(module, cname):
"""
Extract classes that inherit from BaseController
"""
if hasattr(module, '__controller__'):
controller_classname = module.__controller__
else:
controller_classname = cname[0].upper() + cname[1:].lower() + 'Controller'
controller_class = module.__dict__.get(controller_classname, None)
return controller_class
|
b450105f6ec38a03fe461c5d9c07c4652da0efd3
| 3,642,226
|
def exp(d: D) -> NumDict:
"""Compute the base-e exponential of d."""
return d.exp()
|
a4d5baf6bdfadb48add80096bb4d167f01572b69
| 3,642,227
|
def Main(operation, args):
"""Supports 2 operations
1. Consulting the existing data (get)
> get ["{address}"]
2. Inserting data about someone else (certify)
> certify ["{address}","{hash}"]
"""
if len(args) == 0:
Log('You need to provide at least 1 parameter - [address]')
return 'Error: You need to provide at least 1 parameter - [address]'
address = args[0]
if len(address) != 20:
Log('Wrong address size')
return 'Error: Wrong address size'
if operation == 'get':
return get_certs(address)
elif operation == 'certify':
# Caller cannot add certifications to his address
if CheckWitness(address):
Log('You cannot add certifications for yourself')
return 'Error: You cannot add certifications for yourself'
if 3 != len(args):
Log('Certify requires 3 parameters - [address] [caller_address] [hash]')
return 'Error: Certify requires 3 parameters - [address] [caller_address] [hash]'
caller_address = args[1]
# To make sure the address is from the caller
if not CheckWitness(caller_address):
Log('You need to provide your own address')
return 'Error: You need to provide your own address'
content = args[2]
return add_certification(address, caller_address, content)
else:
Log('Invalid Operation')
return 'Error": "Invalid Operation'
|
0dac2ddb4dc3d259e30f5a3c100a39ff8d7b940d
| 3,642,228
|
def get_latest_file_list_orig1(input_list, start_time, num_files):
"""
Return a list of file names, trying to get one from each index file in input_list.
The starting time is start_time and the number of days to investigate is num_days.
"""
out = []
for rind in input_list:
# Create time_list
time_list = time_list(start_time, rind.get_hours() * 3600, num_files)
# print "rind: dir", rind.get_base_dir(), rind.get_index_date()
line_list, index_date_list = rind.readlines_list_rev(time_list, 1)
flist = get_files(line_list)
if flist != []:
out.append("%s/%s/%s" % (rind.get_base_dir(), index_date_list[0], flist[0]))
else:
out.append("None")
print out
return out
|
744b5392d136129a1135cea3ad577817798ef582
| 3,642,229
|
def get_ogheader(blob, url=None):
"""extract Open Graph markup into a dict
The OG header section is delimited by a line of only `---`.
Note that the page title is not provided as Open Graph metadata if
the image metadata is not specified.
"""
found = False
ogheader = dict()
for line in blob.split('\n'):
if line == '---':
found = True
break
if line.startswith('image: '):
toks = line.split()
assert len(toks) == 2
ogheader['image'] = toks[1]
if not found:
ogheader = dict() # Ignore any matches as false positives
return ogheader
if url is not None:
assert 'url' not in ogheader
ogheader['url'] = url
for line in blob.split('\n'):
if line.startswith('# '):
ogheader['title'] = line[2:]
return ogheader
|
4edd7c5545ddef241ee2bfd5e316e47a336aaa3f
| 3,642,230
|
def list_ingredient():
"""List all ingredients currently in the database"""
ingredients = IngredientCollection()
ingredients.load_all()
return jsonify(ingredients=[x.to_dict() for x in ingredients.models])
|
d3275dba18922b9f4558f23eedda3ae25d8a25d9
| 3,642,231
|
import re
def ParseSavedQueries(cnxn, post_data, project_service, prefix=''):
"""Parse form data for the Saved Queries part of an admin form."""
saved_queries = []
for i in xrange(1, MAX_QUERIES + 1):
if ('%ssavedquery_name_%s' % (prefix, i)) not in post_data:
continue # skip any entries that are blank or have no predicate.
name = post_data['%ssavedquery_name_%s' % (prefix, i)].strip()
if not name:
continue # skip any blank entries
if '%ssavedquery_id_%s' % (prefix, i) in post_data:
query_id = int(post_data['%ssavedquery_id_%s' % (prefix, i)])
else:
query_id = None # a new query_id will be generated by the DB.
project_names_str = post_data.get(
'%ssavedquery_projects_%s' % (prefix, i), '')
project_names = [pn.strip().lower()
for pn in re.split('[],;\s]+', project_names_str)
if pn.strip()]
project_ids = project_service.LookupProjectIDs(
cnxn, project_names).values()
base_id = int(post_data['%ssavedquery_base_%s' % (prefix, i)])
query = post_data['%ssavedquery_query_%s' % (prefix, i)].strip()
subscription_mode_field = '%ssavedquery_sub_mode_%s' % (prefix, i)
if subscription_mode_field in post_data:
subscription_mode = post_data[subscription_mode_field].strip()
else:
subscription_mode = None
saved_queries.append(tracker_bizobj.MakeSavedQuery(
query_id, name, base_id, query, subscription_mode=subscription_mode,
executes_in_project_ids=project_ids))
return saved_queries
|
5db4ecdf22eb61c1c43914f00042862142664590
| 3,642,232
|
def label_anchors(anchors, anchor_is_untruncated, gt_classes, gt_bboxes, background_id, iou_low_threshold=0.41, iou_high_threshold=0.61):
""" Get the labels of the anchors. Each anchor can be labeled as positive (1), negative (0) or ambiguous (-1). Truncated anchors are always labeled as ambiguous. """
n = anchors.shape[0]
k = gt_bboxes.shape[0]
# Compute the IoUs of the anchors and ground truth boxes
tiled_anchors = np.tile(np.expand_dims(anchors, 1), (1, k, 1))
tiled_gt_bboxes = np.tile(np.expand_dims(gt_bboxes, 0), (n, 1, 1))
tiled_anchors = tiled_anchors.reshape((-1, 4))
tiled_gt_bboxes = tiled_gt_bboxes.reshape((-1, 4))
ious, ioas, iogs = iou_bbox(tiled_anchors, tiled_gt_bboxes)
ious = ious.reshape(n, k)
ioas = ioas.reshape(n, k)
iogs = iogs.reshape(n, k)
# Label each anchor based on its max IoU
max_ious = np.max(ious, axis=1)
max_ioas = np.max(ioas, axis=1)
max_iogs = np.max(iogs, axis=1)
best_gt_bbox_ids = np.argmax(ious, axis=1)
labels = -np.ones((n), np.int32)
positive_idx = np.where(max_ious >= iou_high_threshold)[0]
negative_idx = np.where(max_ious < iou_low_threshold)[0]
labels[positive_idx] = 1
labels[negative_idx] = 0
# Truncated anchors are always ambiguous
ignore_idx = np.where(anchor_is_untruncated==0)[0]
labels[ignore_idx] = -1
bboxes = gt_bboxes[best_gt_bbox_ids]
classes = gt_classes[best_gt_bbox_ids]
classes[np.where(labels<1)[0]] = background_id
max_ious[np.where(anchor_is_untruncated==0)[0]] = -1
max_ioas[np.where(anchor_is_untruncated==0)[0]] = -1
max_iogs[np.where(anchor_is_untruncated==0)[0]] = -1
return labels, bboxes, classes, max_ious, max_ioas, max_iogs
|
39dc4d29f5a2491c2f818e7af2c01e1824afff56
| 3,642,233
|
import hashlib
def make_hash_md5(obj):
"""make_hash_md5
Args:
obj (any): anything that can be hashed.
Returns:
hash (str): hash from object.
"""
hasher = hashlib.md5()
hasher.update(repr(make_hashable(obj)).encode())
return hasher.hexdigest()
|
c8c0f0202f171e2557eba6a3824ac2f9a07dada9
| 3,642,234
|
def fbx_data_bindpose_element(root, me_obj, me, scene_data, arm_obj=None, mat_world_arm=None, bones=[]):
"""
Helper, since bindpose are used by both meshes shape keys and armature bones...
"""
if arm_obj is None:
arm_obj = me_obj
# We assume bind pose for our bones are their "Editmode" pose...
# All matrices are expected in global (world) space.
bindpose_key = get_blender_bindpose_key(arm_obj.bdata, me)
fbx_pose = elem_data_single_int64(root, b"Pose", get_fbx_uuid_from_key(bindpose_key))
fbx_pose.add_string(fbx_name_class(me.name.encode(), b"Pose"))
fbx_pose.add_string(b"BindPose")
elem_data_single_string(fbx_pose, b"Type", b"BindPose")
elem_data_single_int32(fbx_pose, b"Version", FBX_POSE_BIND_VERSION)
elem_data_single_int32(fbx_pose, b"NbPoseNodes", 1 + (1 if (arm_obj != me_obj) else 0) + len(bones))
# First node is mesh/object.
mat_world_obj = me_obj.fbx_object_matrix(scene_data, global_space=True)
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", me_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_obj))
# Second node is armature object itself.
if arm_obj != me_obj:
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", arm_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_arm))
# And all bones of armature!
mat_world_bones = {}
for bo_obj in bones:
bomat = bo_obj.fbx_object_matrix(scene_data, rest=True, global_space=True)
mat_world_bones[bo_obj] = bomat
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", bo_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(bomat))
return mat_world_obj, mat_world_bones
|
9d205cd3c7a0242dbfaad42d1e7f0b9b3b81eb75
| 3,642,235
|
def partitioned_rml_estimator(y, sigma2i, iterations=50):
"""
Implementation of the robust maximum likelihood estimator.
Parameters
----------
y : :py:class:`~numpy.ndarray`, (n_replicates, n_variants)
The variant scores matrix
sigma2i : :py:class:`~numpy.ndarray`, (n_replicates, n_variants)
The score variance matrix
iterations : `int`
Number of iterations to perform.
Returns
-------
`tuple`
Tuple of :py:class:`~numpy.ndarray` objects, corresponding to
``betaML``, ``var_betaML``, ``eps``.
Notes
-----
@book{demidenko2013mixed,
title={Mixed models: theory and applications with R},
author={Demidenko, Eugene},
year={2013},
publisher={John Wiley \& Sons}
}
"""
# Initialize each array to be have len number of variants
max_replicates = y.shape[0]
betaML = np.zeros(shape=(y.shape[1],)) * np.nan
var_betaML = np.zeros(shape=(y.shape[1],)) * np.nan
eps = np.zeros(shape=(y.shape[1],)) * np.nan
nreps = np.zeros(shape=(y.shape[1],)) * np.nan
y_num_nans = np.sum(np.isnan(y), axis=0)
for k in range(0, max_replicates - 1, 1):
# Partition y based on the number of NaNs a column has,
# corresponding to the number of replicates a variant has
# across selections.
selector = y_num_nans == k
if np.sum(selector) == 0:
continue
y_k = np.apply_along_axis(lambda col: col[~np.isnan(col)], 0, y[:, selector])
sigma2i_k = np.apply_along_axis(
lambda col: col[~np.isnan(col)], 0, sigma2i[:, selector]
)
betaML_k, var_betaML_k, eps_k = rml_estimator(y_k, sigma2i_k, iterations)
# Handles the case when SE is 0 resulting in NaN values.
betaML_k[np.isnan(betaML_k)] = 0.0
var_betaML_k[np.isnan(var_betaML_k)] = 0.0
eps_k[np.isnan(eps_k)] = 0.0
betaML[selector] = betaML_k
var_betaML[selector] = var_betaML_k
eps[selector] = eps_k
nreps[selector] = max_replicates - k
return betaML, var_betaML, eps, nreps
|
b4ec6ad8af85cdf29470fa132d7f6008617b3a66
| 3,642,236
|
import math
def inv_kinema_cal_3(JOINT_ANGLE_OFFSET, L, H, position_to_move):
"""逆運動学を解析的に解く関数.
指先のなす角がηになるようなジョイント角度拘束条件を追加して逆運動学問題を解析的に解く
引数1:リンク長さの配列.nd.array(6).単位は[m]
引数2:リンク高さの配列.nd.array(1).単位は[m]
引数3:目標位置(直交座標系)行列.nd.array((3, 1)).単位は[m]
戻り値(成功したとき):ジョイント角度配列.nd.array((6)).単位は[°]
戻り値(失敗したとき):引数に関係なくジョイント角度配列(90, 90, 90, 90, 90, 0).nd.array((6)).単位は[°]を返す
※戻り値のq_3,q_4はサーボの定義と異なる
"""
final_offset = 0.012
#final_offset = 0
# position_to_move(移動先位置)の円筒座標系表現
r_before = math.sqrt(position_to_move[0, 0] ** 2 + position_to_move[1, 0] ** 2) + 0.03
r_to_move = math.sqrt(r_before ** 2 + final_offset ** 2) # [m]
#r_to_move = math.sqrt(r_before ** 2) # [m]
#theta_to_move = np.arctan2(position_to_move[1, 0], position_to_move[0, 0]) # [rad]
theta_to_move = np.arctan2(position_to_move[1, 0], position_to_move[0, 0]) - np.arcsin(final_offset / r_before) # [rad]
#theta_to_move = np.arccos(position_to_move[0, 0] / r_to_move) - np.arcsin(final_offset / r_before) # [rad]
z_to_move = position_to_move[2, 0] # [m]
print('移動先の円筒座標系表現は\n', r_to_move, '[m]\n', int(theta_to_move * 180 / np.pi), '[°]\n', z_to_move, '[m]')
# 計算のため定義する定数
A = L[2]
B = L[3]
# 逆運動学解析解計算
#old1 = time.time()
deta = np.pi / 180 # ηの刻み幅.i[°]ずつ実行
eta = np.arange(0, np.pi + deta, deta, dtype = 'float64') # 全ηの配列
print('etaの形は', eta.shape)
# パターンa
q_2_a = np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_a_1 = np.concatenate([[eta], [q_2_a]], 0) # 縦に連結
qlist_a_2 = np.delete(qlist_a_1, np.where((np.isnan(qlist_a_1)) | (qlist_a_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180)) < qlist_a_1))[1], 1) # q_2_aがNAN,またはジョイント制限外の列を削除
q_3_a = np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_a_2[0, :]) - H[0] * np.sin(qlist_a_2[0, :])- A * np.cos(qlist_a_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_a_2[0, :]) - H[0] * np.cos(qlist_a_2[0, :]) - A * np.sin(qlist_a_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_a_2[1, :] + np.pi / 4 # [rad]
qlist_a_3 = np.concatenate([qlist_a_2, [q_3_a]], 0) # 縦に連結
qlist_a_4 = np.delete(qlist_a_3, np.where((np.isnan(qlist_a_3)) | (qlist_a_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_a_3))[1], 1) # q_3_aがNAN,またはジョイント制限外の列を削除
q_4_a = -qlist_a_4[0, :] + np.pi - qlist_a_4[1, :] - qlist_a_4[2, :]
qlist_a_5 = np.concatenate([qlist_a_4, [q_4_a]], 0) # 縦に連結
qlist_a_6 = np.delete(qlist_a_5, np.where((qlist_a_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_a_5))[1], 1) # q_4_aがジョイント制限外の列を削除
#print('qlist_a_6の形は', qlist_a_6.shape)
#print('qlist_a_6 = ', qlist_a_6)
# パターンb
q_2_b = np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_b_1 = np.concatenate([[eta], [q_2_b]], 0) # 縦に連結
qlist_b_2 = np.delete(qlist_b_1, np.where((np.isnan(qlist_b_1)) | (qlist_b_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_bがNAN,またはジョイント制限外の列を削除
q_3_b = np.pi - np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_b_2[0, :]) - H[0] * np.sin(qlist_b_2[0, :])- A * np.cos(qlist_b_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_b_2[0, :]) - H[0] * np.cos(qlist_b_2[0, :]) - A * np.sin(qlist_b_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_b_2[1, :] + np.pi / 4 # [rad]
qlist_b_3 = np.concatenate([qlist_b_2, [q_3_b]], 0) # 縦に連結
qlist_b_4 = np.delete(qlist_b_3, np.where((np.isnan(qlist_b_3)) | (qlist_b_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_b_3))[1], 1) # q_3_bがNAN,またはジョイント制限外の列を削除
q_4_b = -qlist_b_4[0, :] + np.pi - qlist_b_4[1, :] - qlist_b_4[2, :]
qlist_b_5 = np.concatenate([qlist_b_4, [q_4_b]], 0) # 縦に連結
qlist_b_6 = np.delete(qlist_b_5, np.where((qlist_b_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_b_5))[1], 1) # q_3_bがジョイント制限外の列を削除
#print('qlist_b_6の形は', qlist_b_6.shape)
#print('qlist_b_6 = ', qlist_b_6)
# パターンc
q_2_c = np.pi - np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_c_1 = np.concatenate([[eta], [q_2_c]], 0) # 縦に連結
qlist_c_2 = np.delete(qlist_c_1, np.where((np.isnan(qlist_c_1)) | (qlist_c_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_cがNAN,またはジョイント制限外の列を削除
q_3_c = np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_c_2[0, :]) - H[0] * np.sin(qlist_c_2[0, :])- A * np.cos(qlist_c_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_c_2[0, :]) - H[0] * np.cos(qlist_c_2[0, :]) - A * np.sin(qlist_c_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_c_2[1, :] + np.pi / 4 # [rad]
qlist_c_3 = np.concatenate([qlist_c_2, [q_3_c]], 0) # 縦に連結
qlist_c_4 = np.delete(qlist_c_3, np.where((np.isnan(qlist_c_3)) | (qlist_c_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_c_3))[1], 1) # q_3_cがNAN,またはジョイント制限外の列を削除
q_4_c = -qlist_c_4[0, :] + np.pi - qlist_c_4[1, :] - qlist_c_4[2, :]
qlist_c_5 = np.concatenate([qlist_c_4, [q_4_c]], 0) # 縦に連結
qlist_c_6 = np.delete(qlist_c_5, np.where((qlist_c_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_c_5))[1], 1) # q_3_cがジョイント制限外の列を削除
#print('qlist_c_6の形は', qlist_c_6.shape)
#print('qlist_c_6 = ', (qlist_c_6 * 180 / np.pi).astype('int64'))
# パターンd
q_2_d = np.pi - np.arcsin((A ** 2 - B ** 2 + (r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2) \
/ (2 * A * np.sqrt((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) ** 2 + (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta)) ** 2))) \
- np.arctan((r_to_move - (L[4] + L[5] + L[6]) * np.cos(eta) - H[0] * np.sin(eta)) / (z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(eta) - H[0] * np.cos(eta))) # [rad]
qlist_d_1 = np.concatenate([[eta], [q_2_d]], 0) # 縦に連結
qlist_d_2 = np.delete(qlist_d_1, np.where((np.isnan(qlist_d_1)) | (qlist_d_1 < 0) | ((np.pi * (1 - JOINT_ANGLE_OFFSET[1] / 180))< qlist_a_1))[1], 1) # q_2_dがNAN,またはジョイント制限外の列を削除
q_3_d = np.pi - np.arcsin((r_to_move - (L[4] + L[5] + L[6]) * np.cos(qlist_d_2[0, :]) - H[0] * np.sin(qlist_d_2[0, :])- A * np.cos(qlist_d_2[1, :]) + z_to_move - L[0] - L[1] + (L[4] + L[5] + L[6]) * np.sin(qlist_d_2[0, :]) - H[0] * np.cos(qlist_d_2[0, :]) - A * np.sin(qlist_d_2[1, :])) \
/ (np.sqrt(2) * B)) - qlist_d_2[1, :] + np.pi / 4 # [rad]
qlist_d_3 = np.concatenate([qlist_d_2, [q_3_d]], 0) # 縦に連結
qlist_d_4 = np.delete(qlist_d_3, np.where((np.isnan(qlist_d_3)) | (qlist_d_3 < (np.pi * (JOINT_ANGLE_OFFSET[2] / 180))) | (np.pi < qlist_d_3))[1], 1) # q_3_dがNAN,またはジョイント制限外の列を削除
q_4_d = -qlist_d_4[0, :] + np.pi - qlist_d_4[1, :] - qlist_d_4[2, :]
qlist_d_5 = np.concatenate([qlist_d_4, [q_4_d]], 0) # 縦に連結
qlist_d_6 = np.delete(qlist_d_5, np.where((qlist_d_5 < (np.pi * (JOINT_ANGLE_OFFSET[3] / 180))) | (np.pi < qlist_d_5))[1], 1) # q_3_dがジョイント制限外の列を削除
#print('qlist_d_6の形は', qlist_d_6.shape)
#print('qlist_d_6 = ', qlist_d_6)
#print('ベクトル化で計算', time.time() - old1,'[s]')
qlist_abcd_6 = np.concatenate([qlist_a_6, qlist_b_6, qlist_c_6, qlist_d_6], 1) # パターンa,b,c,dの実行結果を横に連結
print(qlist_abcd_6)
qlist_q2norm = np.abs(np.pi / 2 - qlist_abcd_6[1, :]) # π/2 - q_2の絶対値
print(qlist_q2norm)
qlist_abcd_62 = np.concatenate([qlist_abcd_6, [qlist_q2norm]], 0) # 縦連結
print(qlist_abcd_62)
k = np.where(qlist_abcd_62[4, :] == np.min(qlist_abcd_62[4, :])) # 最もq_2がπ/2に近い列のタプルを取得
print(k)
print(qlist_abcd_62[:, k])
# サーボ指令角度への変換とint化(pyFirmataのpwmは整数値指令しか受け付けない)
q_1_command = int(np.round(theta_to_move * 180 / np.pi)) # [°]
q_2_command = int(np.round(qlist_abcd_62[1, k] * 180 / np.pi)) # [°]
q_3_command = int(np.round(qlist_abcd_62[2, k] * 180 / np.pi)) # [°]
q_4_command = int(np.round(qlist_abcd_62[3, k] * 180 / np.pi)) # [°]
q_5_command = int(np.round(np.pi / 2 * 180 / np.pi)) # [°]
q_6_command = int(np.round(0 * 180 / np.pi)) # [°]
z = np.array([q_1_command, q_2_command, q_3_command, q_4_command, q_5_command, q_6_command])
print(z)
return z
|
4368c847b9918f3682e2ca0336008af49f0823cf
| 3,642,237
|
import time
import os
def get_log_filename(log_directory, device_name, name_prefix=""):
"""Returns the full path of log filename using the information provided.
Args:
log_directory (path): to where the log file should be created.
device_name (str): to use in the log filename
name_prefix (str): string to prepend to the start of the log file.
Returns:
str: Path to log filename using the information provided.
"""
log_timestamp = time.strftime("%Y%m%d-%H%M%S")
if name_prefix:
log_file_name = "{}-{}-{}.txt".format(name_prefix, device_name,
log_timestamp)
else:
log_file_name = "{}-{}.txt".format(device_name, log_timestamp)
return os.path.join(log_directory, log_file_name)
|
48c0540c9717e54ab4389a2c9f6a5e31696c4595
| 3,642,238
|
def delete_voting(request, slug):
"""Delete voting view."""
if request.method == 'POST':
poll = get_object_or_404(Poll, slug=slug)
if poll.automated_poll and Bug.objects.filter(id=poll.bug.id):
# This will trigger a cascade delete, removing also the poll.
Bug.objects.filter(id=poll.bug.id).delete()
else:
poll.delete()
messages.success(request, 'Voting successfully deleted.')
statsd.incr('voting.delete_voting')
return redirect('voting_list_votings')
|
bd604104fe17321b7a66a6b742134734d196d8c8
| 3,642,239
|
import time
import requests
def http_delete_request(
portia_config: dict,
endpoint: str,
payload: dict=None,
params: dict=None,
optional_headers: dict=None
) -> object:
"""Makes an HTTP DELETE request.
Arguments:
portia_config {dict} -- Portia's configuration arguments
endpoint {str} -- endpoint to make the request to
Keyword Arguments:
payload {dict} -- payload to send to the service (default: {None})
params {dict} -- params to send to the service (default: {None})
optional_headers {dict} -- dictionary with other headers
(default: {None})
Returns:
object -- response object
"""
headers = {
'Authorization': 'Bearer {0}' \
.format(portia_config.get('authorization'))
}
if optional_headers is not None:
headers = {**headers, **optional_headers}
start = time.time()
response = requests.delete(
'{0}{1}'.format(portia_config.get('baseurl'), endpoint),
headers=headers,
params=params,
json=payload
)
end = time.time()
if portia_config.get('debug') == True:
print(
'[portia-debug]: status: {0} | {1:.4f} sec. | {2}' \
.format(response.status_code, end - start, response.url.encode('utf8'))
)
return response
|
86096d2fbfd950cafdbb9689f48d7078ed1c545c
| 3,642,240
|
from django.contrib.auth import authenticate, login
def http_basic_auth(func):
"""
Attempts to login user with u/p provided in HTTP_AUTHORIZATION header.
If successful, returns the view, otherwise returns a 401.
If PING_BASIC_AUTH is False, then just return the view function
Modified code by:
http://djangosnippets.org/users/bthomas/
from
http://djangosnippets.org/snippets/1304/
"""
@wraps(func)
def _decorator(request, *args, **kwargs):
if getattr(settings, 'PING_BASIC_AUTH', PING_BASIC_AUTH):
if request.META.has_key('HTTP_AUTHORIZATION'):
authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
user = authenticate(username=username, password=password)
if user:
login(request, user)
return func(request, *args, **kwargs)
else:
return HttpResponse("Invalid Credentials", status=401)
else:
return HttpResponse("No Credentials Provided", status=401)
else:
return func(request, *args, **kwargs)
return _decorator
|
fd99ce1464acb88bd9f68b6b85233dd44cb81bfd
| 3,642,241
|
def stats_to_df(stats_data):
""" Transform Statistical API response into a pandas.DataFrame
"""
df_data = []
for single_data in stats_data['data']:
df_entry = {}
is_valid_entry = True
df_entry['interval_from'] = parse_time(
single_data['interval']['from']).date()
df_entry['interval_to'] = parse_time(
single_data['interval']['to']).date()
for output_name, output_data in single_data['outputs'].items():
for band_name, band_values in output_data['bands'].items():
band_stats = band_values['stats']
if band_stats['sampleCount'] == band_stats['noDataCount']:
is_valid_entry = False
break
for stat_name, value in band_stats.items():
col_name = f'{output_name}_{band_name}_{stat_name}'
if stat_name == 'percentiles':
for perc, perc_val in value.items():
perc_col_name = f'{col_name}_{perc}'
df_entry[perc_col_name] = perc_val
else:
df_entry[col_name] = value
if is_valid_entry:
df_data.append(df_entry)
return pd.DataFrame(df_data)
|
d77d3ee46c68c737ce8274458d8564256f8121a7
| 3,642,242
|
def make_risk_metrics(
stocks,
weights,
start_date,
end_date
):
"""
Parameters:
stocks: List of tickers compatiable with the yfinance module
weights: List of weights, probably going to be evenly distributed
"""
if mlfinlabExists:
Var, VaR, CVaR, CDaR = generate_risk_stats(
stocks,
weights,
start_date=start_date,
end_date=end_date
)
else:
Var, VaR, CVaR, CDaR = 0,0,0,0
return [
{
"value": Var,
"name": "Variance",
"description": "This measure can be used to compare portfolios" \
" based on estimations of the volatility of returns."
},
{
"value": VaR,
"name": "Value at Risk",
"description": "This measure can be used to compare portfolios" \
" based on the amount of investments that can be lost in the next observation, assuming the returns for assets follow a multivariate normal distribution."
},
{
"value": CVaR,
"name": "Expected Shortfall",
"description": "This measure can be used to compare portfolios" \
" based on the average amount of investments that can be lost in a worst-case scenario, assuming the returns for assets follow a multivariate normal distribution."
},
{
"value": CDaR,
"name": "Conditional Drawdown at Risk",
"description": "This measure can be used to compare portfolios"
" based on the average amount of a portfolio drawdown in a worst-case scenario, assuming the drawdowns follow a normal distribution."
}
]
|
8a24d542a8b7475a66c0c914866ee4225564b8ed
| 3,642,243
|
def decrypt(bin_k, bin_cipher):
"""decrypt w/ DES"""
return Crypto.Cipher.DES.new(bin_k).decrypt(bin_cipher)
|
fa8331b792ae4003c2fc14fd84b2ac82306bc7b2
| 3,642,244
|
import subprocess
def remove_app(app_name, app_path):
"""Remove an application."""
# usage: mbl-app-manager remove [-h] app_name app_path
print("Remove {} from {}".format(app_name, app_path))
command = [MBL_APP_MANAGER, "-v", "remove", app_name, app_path]
print("Executing command: {}".format(command))
return subprocess.run(command, check=False).returncode
|
2d939608db40731ce4c20d0c587a8b99c04d864b
| 3,642,245
|
from ucscsdk.mometa.vnic.VnicIScsiLCP import VnicIScsiLCP
from ucscsdk.mometa.vnic.VnicVlan import VnicVlan
def lcp_iscsi_vnic_add(handle, name, parent_dn, addr="derived",
admin_host_port="ANY",
admin_vcon="any", stats_policy_name="global-default",
admin_cdn_name=None, cdn_source="vnic-name",
switch_id="A", pin_to_group_name=None, vnic_name=None,
qos_policy_name=None,
adaptor_profile_name="global-default",
ident_pool_name=None, order="unspecified",
nw_templ_name=None, vlan_name="default",
**kwargs):
"""
Adds iSCSI vNIC to LAN Connectivity Policy
Args:
handle (UcscHandle)
parent_dn (string) : Dn of LAN connectivity policy name
name (string) : Name of iscsi vnic
admin_host_port (string) : Admin host port placement for vnic
admin_vcon (string) : Admin vcon for vnic
stats_policy_name (string) : Stats policy name
cdn_source (string) : CDN source ['vnic-name', 'user-defined']
admin_cdn_name (string) : CDN name
switch_id (string): Switch id
pin_to_group_name (string) : Pinning group name
vnic_name (string): Overlay vnic name
qos_policy_name (string): Qos policy name
adaptor_profile_name (string): Adaptor profile name
ident_pool_name (string) : Identity pool name
order (string) : Order of the vnic
nw_templ_name (string) : Network template name
addr (string) : Address of the vnic
vlan_name (string): Name of the vlan
**kwargs: Any additional key-value pair of managed object(MO)'s
property and value, which are not part of regular args.
This should be used for future version compatibility.
Returns:
VnicIScsiLCP : Managed Object
Example:
lcp_iscsi_vnic_add(handle, "test_iscsi",
"org-root/lan-conn-pol-samppol2",
nw_ctrl_policy_name="test_nwpol", switch_id= "A",
vnic_name="vnic1",
adaptor_profile_name="global-SRIOV")
"""
mo = handle.query_dn(parent_dn)
if not mo:
raise UcscOperationError("lcp_iscsi_vnic_add",
"LAN connectivity policy '%s' does not exist"
% parent_dn)
if cdn_source not in ['vnic-name', 'user-defined']:
raise UcscOperationError("lcp_iscsi_vnic_add",
"Invalid CDN source name")
admin_cdn_name = "" if cdn_source == "vnic-name" else admin_cdn_name
mo_1 = VnicIScsiLCP(parent_mo_or_dn=mo,
addr=addr,
admin_host_port=admin_host_port,
admin_vcon=admin_vcon,
stats_policy_name=stats_policy_name,
cdn_source=cdn_source,
admin_cdn_name=admin_cdn_name,
switch_id=switch_id,
pin_to_group_name=pin_to_group_name,
vnic_name=vnic_name,
qos_policy_name=qos_policy_name,
adaptor_profile_name=adaptor_profile_name,
ident_pool_name=ident_pool_name,
order=order,
nw_templ_name=nw_templ_name,
name=name)
mo_1.set_prop_multiple(**kwargs)
VnicVlan(parent_mo_or_dn=mo_1, name="", vlan_name=vlan_name)
handle.add_mo(mo_1)
handle.commit()
return mo_1
|
6d87f8f3adebaa56850dfb14137fe049ff6e01ee
| 3,642,246
|
def fixture_ecomax_with_data(ecomax: EcoMAX) -> EcoMAX:
"""Return ecoMAX instance with test data."""
ecomax.product = ProductInfo(model="test_model")
ecomax.set_data(_test_data)
ecomax.set_parameters(_test_parameters)
return ecomax
|
4f496342d461eb39e4689ed266471b11bdf3f1f5
| 3,642,247
|
import os
import io
def _get_cached_setup(setup_id):
"""Load a run from the cache."""
cache_dir = config.get_cache_directory()
setup_cache_dir = os.path.join(cache_dir, "setups", str(setup_id))
try:
setup_file = os.path.join(setup_cache_dir, "description.xml")
with io.open(setup_file, encoding='utf8') as fh:
setup_xml = xmltodict.parse(fh.read())
setup = _create_setup_from_xml(setup_xml)
return setup
except (OSError, IOError):
raise openml.exceptions.OpenMLCacheException("Setup file for setup id %d not cached" % setup_id)
|
4cb89eab27e0c9a0c5da050a8e2f8b88494ca243
| 3,642,248
|
async def request_get_stub(url: str, stub_for: str, status_code: int = 200):
"""Returns an object with stub response.
Args:
url (str): A request URL.
stub_for (str): Type of stub required.
Returns:
StubResponse: A StubResponse object.
"""
return StubResponse(stub_for=stub_for, status_code=status_code)
|
f4c4f9a0610e8d95f920ddee76c4264e23c08283
| 3,642,249
|
import torch
def single_gpu_test(model, data_loader, rescale=True, show=False, out_dir=None):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
show (bool): Whether show results during infernece. Default: False.
out_dir (str, optional): If specified, the results will be dumped
into the directory to save output results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
seg_targets = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
if 'gt_semantic_seg' in data:
target = data.pop('gt_semantic_seg')
for gt in target:
gt = gt.cpu().numpy()[0] # 1*h*w ==> h*w
seg_targets.append(gt)
with torch.no_grad():
result = model(return_loss=False, rescale=rescale, **data)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
if seg_targets:
return [results, seg_targets]
return results
|
0e548186e5909b1a7b72d6fd6ed16c80e233e0b6
| 3,642,250
|
def readAllCarts():
"""
This function responds to a request for /api/people
with the complete lists of people
:return: json string of list of people
"""
# Create the list of people from our data
return[CART[key] for key in sorted(CART.keys())]
|
7ec9b25b36c238a6bfae3963482d610ed09d1d75
| 3,642,251
|
import random
import logging
def build_encapsulated_packet(select_test_interface, ptfadapter, tor, tunnel_traffic_monitor):
"""Build the encapsulated packet sent from T1 to ToR."""
_, server_ipv4 = select_test_interface
config_facts = tor.get_running_config_facts()
try:
peer_ipv4_address = [_["address_ipv4"] for _ in config_facts["PEER_SWITCH"].values()][0]
except IndexError:
raise ValueError("Failed to get peer ToR address from CONFIG_DB")
tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]
if is_ipv4_address(_.split("/")[0])][0]
tor_ipv4_address = tor_ipv4_address.split("/")[0]
inner_dscp = random.choice(range(0, 33))
inner_ttl = random.choice(range(3, 65))
inner_packet = testutils.simple_ip_packet(
ip_src="1.1.1.1",
ip_dst=server_ipv4,
ip_dscp=inner_dscp,
ip_ttl=inner_ttl
)[IP]
packet = testutils.simple_ipv4ip_packet(
eth_dst=tor.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_src=peer_ipv4_address,
ip_dst=tor_ipv4_address,
ip_dscp=inner_dscp,
ip_ttl=255,
inner_frame=inner_packet
)
logging.info("the encapsulated packet to send:\n%s", tunnel_traffic_monitor._dump_show_str(packet))
return packet
|
e7776a602eeb0dbe9bcd8707b71dacfe4ac36338
| 3,642,252
|
def index():
"""
Renders the index page.
"""
return render_template("index.html")
|
cc7630c3bbaf32c3be705a7205df715f959a5683
| 3,642,253
|
import re
def condense_colors(svg):
"""Condense colors by using hexadecimal abbreviations where possible.
Consider using an abstract, general approach instead of hard-coding.
"""
svg = re.sub('#000000', '#000', svg)
svg = re.sub('#ff0000', '#f00', svg)
svg = re.sub('#00ff00', '#0f0', svg)
svg = re.sub('#0000ff', '#00f', svg)
svg = re.sub('#00ffff', '#0ff', svg)
svg = re.sub('#ff00ff', '#f0f', svg)
svg = re.sub('#ffff00', '#ff0', svg)
svg = re.sub('#ffffff', '#fff', svg)
svg = re.sub('#cc0000', '#c00', svg)
svg = re.sub('#00cc00', '#0c0', svg)
svg = re.sub('#0000cc', '#00c', svg)
svg = re.sub('#00cccc', '#0cc', svg)
svg = re.sub('#cc00cc', '#c0c', svg)
svg = re.sub('#cccc00', '#cc0', svg)
svg = re.sub('#cccccc', '#ccc', svg)
svg = re.sub('#999999', '#999', svg)
svg = re.sub('#808080', 'grey', svg)
return svg
|
413f1d7c69a52384fc21ee6f8eda6f2a63833e66
| 3,642,254
|
import attr
def install_pytest_confirmation():
"""Ask if pytest should be installed"""
return f'{fg(2)} Do you want to install pytest? {attr(0)}'
|
b81da35d4eb7e755f7780cf0b6da056096613549
| 3,642,255
|
def dense_nopack(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense without packing"""
debug = True
if debug:
print("bias", bias)
print("data_dtype", data.dtype)
print("weight_dtype", weight.dtype)
print("out_dtype", out_dtype)
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
if debug:
print("data", M, K)
print("weight", N, _)
print("bias", bias)
# create tuning space
# cfg.define_split("tile_y", 32 if isinstance(M, tvm.tir.Var) else M, num_outputs=2)
# cfg.define_split("tile_x", 32 if isinstance(N, tvm.tir.Var) else N, num_outputs=2)
# cfg.define_split("tile_k", 32 if isinstance(K, tvm.tir.Var) else K, num_outputs=2)
# if cfg.is_fallback:
# _default_dense_nopack_config(cfg, M, N, K)
#
# vec = cfg["tile_k"].size[-1]
# k = te.reduce_axis((0, K // vec), "k")
# CC = te.compute(
# (M, N, vec),
# lambda z, y, x: te.sum(
# data[z, k * vec + x].astype(out_dtype) * weight[y, k * vec + x].astype(out_dtype),
# axis=k,
# ),
# )
#
# kk = te.reduce_axis((0, vec), "kk")
# C = te.compute((M, N), lambda y, x: te.sum(CC[y, x, kk], axis=kk), tag="dense_nopack")
# if bias is not None:
# C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
out = te.placeholder((M,N,), name="out", dtype=out_dtype)
CC = te.extern(
(M, N),
[data, weight],
lambda ins, outs: tvm.tir.call_packed("tvm.contrib.xilinx_matmul_pynq", ins[0], ins[1], outs[0]),
dtype=out_dtype,
name="matmul_pynq",
)
# kk = te.reduce_axis((0, vec), "kk")
# C = te.compute((M, N), lambda y, x: te.sum(CC[y, x, kk], axis=kk), tag="dense_nopack")
if bias is not None:
C = te.compute((M, N), lambda i, j: CC[i, j] + bias[j].astype(out_dtype))
return C
return CC
|
8b7c9101afce8bc6f89f9eca8a103082e2448c9c
| 3,642,256
|
def rgb(r=0, g=0, b=0, mode='RGB'):
"""
Convert **r**, **g**, **b** values to a `string`.
:param r: red part
:param g: green part
:param b: blue part
:param string mode: ``'RGB | %'``
:rtype: string
========= =============================================================
mode Description
========= =============================================================
``'RGB'`` returns a rgb-string format: ``'rgb(r, g, b)'``
``'%'`` returns percent-values as rgb-string format: ``'rgb(r%, g%, b%)'``
========= =============================================================
"""
def percent(value):
value = int(value)
if value < 0:
value = 0
if value > 100:
value = 100
return value
if mode.upper() == 'RGB':
return "rgb(%d,%d,%d)" % (int(r) & 255, int(g) & 255, int(b) & 255)
elif mode == "%":
# see http://www.w3.org/TR/SVG11/types.html#DataTypeColor
# percentage is an 'integer' value
return "rgb(%d%%,%d%%,%d%%)" % (percent(r), percent(g), percent(b))
else:
raise ValueError("Invalid mode '%s'" % mode)
|
563b8fe8273ce4534567687df01cebe79b9f58dc
| 3,642,257
|
def load_csv_translations(fname, pfx=''):
"""
Load translations from a tab-delimited file. Add prefix
to the keys. Return a dictionary.
"""
translations = {}
with open(fname, 'r', encoding='utf-8-sig') as fIn:
for line in fIn:
line = line.strip('\r\n ')
if len(line) <= 2 or line.count('\t') != 1:
continue
key, value = line.split('\t')
key = pfx + key
translations[key] = value
return translations
|
e8b4707fe5eeb0f0f4f4859bd9a5f2272387a022
| 3,642,258
|
def compute_bleu_rouge(pred_dict, ref_dict, bleu_order=4):
"""
Compute bleu and rouge scores.
"""
assert set(pred_dict.keys()) == set(ref_dict.keys()), \
"missing keys: {}".format(set(ref_dict.keys()) - set(pred_dict.keys()))
scores = {}
bleu_scores, _ = Bleu(bleu_order).compute_score(ref_dict, pred_dict)
for i, bleu_score in enumerate(bleu_scores):
scores['Bleu-%d' % (i + 1)] = bleu_score
rouge_score, _ = Rouge().compute_score(ref_dict, pred_dict)
scores['Rouge-L'] = rouge_score
f1_exact = f1_exact_eval()
pred_list, ref_list = [], []
for k in pred_dict.keys():
pred_list.append(pred_dict[k][0])
ref_list.append(ref_dict[k][0])
f1_score, exact_score = f1_exact.compute_scores(pred_list, ref_list)
meter_score = compute_meter_score(pred_list, ref_list)
scores['f1'] = f1_score
scores['exact'] = exact_score
scores['meter'] = meter_score
return scores
|
b000f97208fc8254e28ebc85501912e568c7b2d7
| 3,642,259
|
from datetime import datetime
def check_upload_details(study_id=None, patient_id=None):
""" Get patient data upload details """
participant_set = Participant.objects.filter(patient_id=patient_id)
if not participant_set.exists() or str(participant_set.values_list('study', flat=True).get()) != study_id:
Response('Error: failed to get upload details for Patient %s'%patient_id, mimetype='text/plain')
user = participant_set.get()
upinfo = user.get_upload_info()
sorted_dates = sorted(upinfo.keys())
dates = [str(datetime.now())[:10]]
if sorted_dates:
first_date = datetime.strptime(sorted_dates[0], '%Y-%m-%d')
today_date = datetime.strptime(dates[0], '%Y-%m-%d')
day = first_date
dates = []
while day <= today_date:
dates += [str(day)[:10]]
day += timedelta(days=1)
dev_settings = user.study.device_settings.as_dict()
checkable_states = [[f, ('black' if f in ALLOW_EMPTY_FILES else 'red') if dst else 'lightgray']
for f in CHECKABLE_FILES for dst in [dev_settings.get(UPLOAD_FILE_TYPE_MAPPING[f], False)]]
return render_template(
'upload_details.html',
dates=dates,
upinfo=upinfo,
checkables=checkable_states,
patient=user
)
|
6611e9235a6085635e19a9cad8e1920adb757a87
| 3,642,260
|
def crc32c_rev(name):
"""Compute the reversed CRC32C of the given function name"""
value = 0
for char in name:
value ^= ord(char)
for _ in range(8):
carry = value & 1
value = value >> 1
if carry:
value ^= CRC32_REV_POLYNOM
return value
|
0aea3d45c0efc136be56bac2ee44ba8e08945de3
| 3,642,261
|
def sils_cut(T,f,c,d,h):
"""solve_sils -- solve the lot sizing problem with cutting planes
- start with a relaxed model
- add cuts until there are no fractional setup variables
Parameters:
- T: number of periods
- P: set of products
- f[t]: set-up costs (on period t)
- c[t]: variable costs
- d[t]: demand values
- h[t]: holding costs
Returns the final model solved, with all necessary cuts added.
"""
Ts = range(1,T+1)
model = sils(T,f,c,d,h)
y,x,I = model.data
# relax integer variables
for t in Ts:
y[t].vtype = "C"
# compute D[i,j] = sum_{t=i}^j d[t]
D = {}
for t in Ts:
s = 0
for j in range(t,T+1):
s += d[j]
D[t,j] = s
EPS = 1.e-6
cuts = True
while cuts:
model.optimize()
cuts = False
for ell in Ts:
lhs = 0
S,L = [],[]
for t in range(1,ell+1):
yt = model.getVal(y[t])
xt = model.getVal(x[t])
if D[t,ell]*yt < xt:
S.append(t)
lhs += D[t,ell]*yt
else:
L.append(t)
lhs += xt
if lhs < D[1,ell]:
# add cutting plane constraint
model.addCons(quicksum([x[t] for t in L]) +\
quicksum(D[t,ell] * y[t] for t in S)
>= D[1,ell])
cuts = True
model.data = y,x,I
return model
|
ca689370fe928b38cdd96cdd7b227699f0979a1c
| 3,642,262
|
def progressive_fixed_point(func, start, init_disc, final_disc, ratio=2):
"""Progressive fixed point calculation"""
while init_disc <= final_disc * ratio:
start = fixedpoint.fixed_point(func, start, disc=init_disc)
init_disc *= ratio
return start
|
d46d325bdc3ddb1c5627e231f9659e992a9a0748
| 3,642,263
|
import json
async def create_new_game(redis: Redis = Depends(redis.wrapper.get)):
"""Create a new game with an unique ID."""
game = get_new_game()
handle_score(game)
game_dict = game_to_dict(game)
game_id = token_urlsafe(32)
await redis.set(game_id, json.dumps(game_dict))
return GameState(gameId=game_id, **handle_hidden_cards(game_dict))
|
45de279daba553d4e722f64d0ccb921f90332112
| 3,642,264
|
def _write_reaction_lines(reactions, species_delimiter, reaction_delimiter,
include_TS, stoich_format, act_method_name,
ads_act_method, act_unit, float_format,
column_delimiter, sden_operation,
**kwargs):
"""Write the reaction lines in the Chemkin format
Parameters
----------
reactions : list of :class:`~pmutt.reaction.ChemkinReaction` objects
Chemkin reactions to write in surf.inp file
species_delimiter : str
Delimiter to separate species when writing reactions
reaction_delimiter : str
Delimiter to separate reaction sides
act_method_name : str
Name of method to use to calculate activation function
act_unit : str
Units to calculate activation energy
float_format : str
String format to print floating numbers
stoich_format : str
String format to print stoichiometric coefficients
column_delimiter : str
Delimiter to separate columns
kwargs : keyword arguments
Parameters needed to calculate activation energy and preexponential
factor
Returns
-------
reaction_lines : str
Reactions represented in Chemkin format
"""
max_reaction_len = _get_max_reaction_len(
reactions=reactions,
species_delimiter=species_delimiter,
reaction_delimiter=reaction_delimiter,
stoich_format=stoich_format,
include_TS=include_TS)
float_field = '{:%s}' % float_format
reaction_lines = []
for reaction in reactions:
# Get reaction string
reaction_str = reaction.to_string(
species_delimiter=species_delimiter,
reaction_delimiter=reaction_delimiter,
stoich_format=stoich_format,
include_TS=include_TS).ljust(max_reaction_len)
# Calculate preexponential factor and determine activation energy method
if reaction.is_adsorption:
A = reaction.sticking_coeff
Ea_method = getattr(reaction, ads_act_method)
else:
# If using delta_G, take out entropic contribution in A
if act_method_name in ('get_GoRT_act', 'get_G_act',
'get_delta_GoRT', 'get_delta_G'):
include_entropy = False
else:
include_entropy = True
A = reaction.get_A(include_entropy=include_entropy,
sden_operation=sden_operation,
**kwargs)
Ea_method = getattr(reaction, act_method_name)
if act_method_name != 'get_EoRT_act' and \
act_method_name != 'get_E_act':
kwargs['activation'] = True
A_str = float_field.format(A)
# Format beta value
beta_str = float_field.format(reaction.beta)
# Calculate activation energy
kwargs['units'] = act_unit
try:
Ea = _force_pass_arguments(Ea_method, **kwargs)
except AttributeError:
Ea = 0.
Ea_str = float_field.format(Ea)
reaction_line = '{0}{4}{1}{4}{2}{4}{3}'.format(reaction_str, A_str,
beta_str, Ea_str,
column_delimiter)
if reaction.is_adsorption:
reaction_line = '{}\nSTICK'.format(reaction_line)
reaction_lines.append(reaction_line)
return reaction_lines
|
9787279dee81cd97922657739975c93fcbed8249
| 3,642,265
|
def refines_constraints(storage, constraints):
"""
Determines whether with the storage as basis for the substitution map there is a substitution that can be performed
on the constraints, therefore refining them.
:param storage: The storage basis for the substitution map
:param constraints: The constraint list containing the expressions to be substituted.
:return: True if the substitution would change the constraint list.
"""
storage_names = ["storage[" + str(key) + "]" for key, _ in storage.items()]
for name in storage_names:
for constraint in constraints:
if name in constraint.slot_names:
return True
return False
|
de82087c41d95240ee9d15bd51810b7c5594ef0f
| 3,642,266
|
def dot_fp(x, y):
"""Dot products for consistent scalars, vectors, and matrices.
Possible combinations for x, y:
scal, scal
scal, vec
scal, mat
vec, scal
mat, scal
vec, vec (same length)
mat, vec (n_column of mat == length of vec)
Warning: No broadcasting! There are special functions for that!
Args:
x: scalar, vector, or matrix (fixed point or float)
y: scalar, vector or matrix (fixed point or float)
"""
# If both inputs are np.ndarray we can use np.dot
if _np_instance(x, y):
return np.dot(x, y)
optype = _operation_type(x, y)
if optype == 'scal_scal':
return x * y
elif optype == 'vec_vec':
return _vec_vec_dot_fp(x, y)
elif optype in ['mat_mat_dot', 'mat_mat_all']:
return _mat_mat_dot_fp(x, y)
elif optype in ['mat_vec_dot', 'mat_vec_all']:
return _mat_vec_dot_fp(x, y)
elif optype in ['vec_scal', 'mat_scal']:
return _scal_dot_fp(x, y)
elif optype in ['scal_vec', 'scal_mat']:
return _scal_dot_fp(y, x)
else:
raise ValueError("Dot not possible for {}".format(optype))
|
d1bf8bd32727973ebb65ead39921202bfd842973
| 3,642,267
|
def add_one(num: int) -> int:
"""Increment arg by one."""
return num + 1
|
72d8ff69fa5766e813f637f9796753ae51e493b9
| 3,642,268
|
def normalize(data, train_split):
""" Get the standard score of the data.
:param data: data set
:param train_split: number of training samples
:return: normalized data, mean, std
"""
mean = data[:train_split].mean(axis=0)
std = data[:train_split].std(axis=0)
return (data - mean) / std, mean, std
|
cfc45ac5bd6ae7a30169253a1ae3ed64c1bd1118
| 3,642,269
|
def lemma(name_synsets):
"""
This function return lemma object given the name.
.. note::
Support only English language (*eng*).
:param str name_synsets: name of the synset
:return: lemma object with the given name
:rtype: :class:`Lemma`
:Example:
>>> from pythainlp.corpus.wordnet import lemma
>>>
>>> lemma('practice.v.01.exercise')
Lemma('practice.v.01.exercise')
>>>
>>> lemma('drill.v.03.exercise')
Lemma('drill.v.03.exercise')
>>>
>>> lemma('exercise.n.01.exercise')
Lemma('exercise.n.01.exercise')
"""
return wordnet.lemma(name_synsets)
|
84a9ae7dbb1679477257ff03557afa75f950a542
| 3,642,270
|
def export_cookies(domain, cookies, savelist=None, sp_domain=None):
"""
Export cookies used for remembered device/other non-session use
as list of Cookie objects. Only looks in jar matching host name.
Args:
domain (str) - Domain to select cookies from
cookies (requests.cookies.RequestsCookieJar) - Cookie jar object
savelist (list(str)) - (Optional) List of cookies to export
Returns:
list(Cookie) - restorable using set_device_cookies() function
"""
if savelist is None:
savelist = DEFAULT_COOKIE_SAVELIST
# Pulling directly from internal data structure as there is
# no get_cookies method.
cookies_dict = cookies._cookies.get(domain, {}).get('/', None)
# if they exist, add sp cookies to idp cookies
if 'sp_domain' in locals() and sp_domain is not None:
cookies_dict.update(cookies._cookies.get(sp_domain, {}).get('/', None))
if cookies_dict is None:
return []
return [c for c in [cookies_dict.get(si) for si in savelist] if c is not None]
|
7609ac6452ed49dae5cd66f280bbeb4b27b17034
| 3,642,271
|
from typing import Tuple
from typing import OrderedDict
def adaptive_crossover(parents: Tuple[AbstractSolution, AbstractSolution],
variables_number: int,
crossover_pattern: int) -> ChildrenValuesTyping:
"""
Adaptive crossover function.
Crossover is performed according to a pattern that determines which gene (decision variable) value
should be picked from which parent.
:param parents: Pair of parent solution that provides genes for a new pair of children.
:param variables_number: Number of decision variables (genes).
:param crossover_pattern: Pattern of crossover to be used.
:return: Pair of children data sets.
"""
parents_values = list(parents[0].decision_variables_values.items()), \
list(parents[1].decision_variables_values.items())
child_1_values: OrderedDict = OrderedDict()
child_2_values: OrderedDict = OrderedDict()
for i in range(variables_number):
pattern_value = (crossover_pattern >> i) & 1
child_1_values.update([parents_values[pattern_value][i]])
child_2_values.update([parents_values[pattern_value ^ 1][i]])
return child_1_values, child_2_values
|
76446c9c35739dddd9bfb797379b44616c552bbd
| 3,642,272
|
def get_type_dict(kb_path, dstc2=False):
"""
Specifically, we augment the vocabulary with some special words, one for each of the KB entity types
For each type, the corresponding type word is added to the candidate representation if a word is found that appears
1) as a KB entity of that type,
"""
type_dict = {'R_restaurant':[]}
kb_path_temp = kb_path
fd = open(kb_path_temp,'r')
for line in fd:
if dstc2:
x = line.replace('\n','').split(' ')
rest_name = x[1]
entity = x[2]
entity_value = x[3]
else:
x = line.split('\t')[0].split(' ')
rest_name = x[1]
entity = x[2]
entity_value = line.split('\t')[1].replace('\n','')
if rest_name not in type_dict['R_restaurant']:
type_dict['R_restaurant'].append(rest_name)
if entity not in type_dict.keys():
type_dict[entity] = []
if entity_value not in type_dict[entity]:
type_dict[entity].append(entity_value)
return type_dict
|
cd35054505c429cc1ad17eabe1cafb1aa6b38a1f
| 3,642,273
|
def parse_time(duration: str, minimum: int = None, maximum: int = None, error_on_exceeded: bool = True) -> int:
"""Function that parses time in a NhNmNs format. Supports weeks, days, hours, minutes and seconds, positive and
negative amounts and max values. Minimum and maximum values can be set (in seconds), and whether a error should
occur or the max / min value should be used when these are exceeded."""
last, t_total = 0, 0
t_frames = {"w": 604800, "d": 86400, "h": 3600, "m": 60, "s": 1}
for index, char in enumerate(duration): # For every character in time string.
if char.lower() in t_frames.keys():
if duration[last:index] != "":
t_total += int(duration[last:index]) * t_frames[char.lower()]
last = index + 1
elif char not in ["+", "-", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]: # Valid characters.
raise ValueError("Invalid character encountered during time parsing.")
if minimum and t_total < minimum: # If total time is less than minimum.
if error_on_exceeded:
raise ValueError("Time too short.")
t_total = minimum
if maximum and t_total > maximum: # If total time is more than maximum.
if error_on_exceeded:
raise ValueError("Time too long.")
t_total = maximum
return t_total
|
9878d744cd9cc60696d414a31ff121e384e261a9
| 3,642,274
|
import logging
import array
def correl_align(s_orig, align_phases=False,tol=1e-4,indirect_dim='indirect',
fig_title='correlation alignment',signal_pathway = {'ph1':0,'ph2':1},
shift_bounds=False, avg_dim = None, max_shift = 100., sigma=20.,direct='t2',fl=None):
"""
Align transients collected with chunked phase cycling dimensions along an indirect
dimension based on maximizing the correlation across all the transients and repeat
alignment until the calculated signal energy remains constant to within a given
tolerance level.
Parameters
==========
s_orig: nddata
A nddata object which contains phase cycle dimensions and an
indirect dimension.
align_phases: boolean
tol: float
Sets the tolerance limit for the alignment procedure.
indirect_dim: str
Name of the indirect dimension along which you seek to align
the transients.
fig_title: str
Title for the figures generated.
signal_pathway: dict
Dictionary containing the signal pathway.
shift_bounds: boolean
Keeps f_shift to be within a specified
limit (upper and lower bounds given by max_shift)
which should be around the location of the expected
signal.
avg_dim: str
Dimension along which the data is being averaged.
max_shift: float
Specifies the upper and lower bounds to the range over
which f_shift will be taken from the correlation function.
Shift_bounds must be True.
sigma: int
Sigma value for the Gaussian mask. Related to the line width
of the given data.
fl: boolean
fl=fl to show the plots and figures produced by this function
otherwise, fl=None.
Returns
=======
f_shift: array
The optimized frequency shifts for each transient which will
maximize their correlation amongst each other, thereby aligning
them.
sigma: float
The width of the Gaussian function used to frequency filter
the data in the calculation of the correlation function.
"""
logging.debug(strm("Applying the correlation routine"))
if avg_dim:
phcycdims = [j for j in s_orig.dimlabels if j.startswith('ph')]
indirect = set(s_orig.dimlabels)-set(phcycdims)-set([direct])
indirect = [j for j in s_orig.dimlabels if j in indirect]
avg_dim_len = len(s_orig.getaxis(avg_dim))
s_orig.smoosh(indirect)
for j in signal_pathway.keys():
assert not s_orig.get_ft_prop(j), str(j)+" must not be in the coherence domain"
signal_keys = list(signal_pathway)
signal_values = list(signal_pathway.values())
ph_len = {j:ndshape(s_orig)[j] for j in signal_pathway.keys()}
N = ndshape(s_orig)[indirect_dim]
sig_energy = (abs(s_orig)**2).data.sum().item() / N
if fl:
fl.push_marker()
fig_forlist, ax_list = plt.subplots(1, 5, figsize=(7,7))
fl.next("Correlation Diagnostics")
fig_forlist.suptitle(" ".join(["Correlation Diagnostic"] + [j for j in [fl.basename] if j is not None]))
fl.image(s_orig.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),ax=ax_list[0],human_units=False)
ax_list[0].set_title('before correlation\nsig. energy=%g'%sig_energy)
energy_diff = 1.
i = 0
energy_vals = []
this_E = (abs(s_orig.C.sum(indirect_dim))**2).data.sum().item() / N**2
energy_vals.append(this_E / sig_energy)
last_E = None
for_nu_center =s_orig.C
for_nu_center.ft(list(signal_pathway))
for x in range(len(signal_keys)):
for_nu_center = for_nu_center[signal_keys[x],signal_values[x]]
nu_center = for_nu_center.mean(indirect_dim).C.argmax(direct)
logging.debug(strm("Center frequency", nu_center))
for my_iter in range(100):
i += 1
logging.debug(strm("*** *** ***"))
logging.debug(strm("CORRELATION ALIGNMENT ITERATION NO. ",i))
logging.debug(strm("*** *** ***"))
s_orig.ift(direct)
s_copy = s_orig.C
if align_phases:
ph0 = s_orig.C.sum(direct)
ph0 /= abs(ph0)
s_copy /= ph0
s_copy.ft(direct)
this_mask = exp(-(s_copy.fromaxis(direct)-nu_center)**2/(2*sigma**2))
s_copy *= this_mask
s_copy.ift(direct)
s_copy2 = s_orig.C
for k,v in ph_len.items():
ph = ones(v)
s_copy *= nddata(ph,'Delta'+k.capitalize())
s_copy.setaxis('Delta'+k.capitalize(),'#')
correl = s_copy * 0
for k,v in ph_len.items():
for ph_index in range(v):
s_copy['Delta%s'%k.capitalize(),ph_index] = s_copy['Delta%s'%k.capitalize(),
ph_index].run(lambda x, axis=None: roll(x, ph_index, axis=axis), k)
for j in range(1,N):
correl += s_copy2 * s_copy.C.run(lambda x, axis=None: roll(x,j,axis=axis),
indirect_dim).run(conj)
correl.reorder([indirect_dim,direct],first=False)
if my_iter ==0:
logging.debug(strm("holder"))
if fl:
fl.image(correl.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),
ax=ax_list[1])
ax_list[1].set_title('correlation function (t), \nafter apod')
correl.ft_clear_startpoints(direct)
correl.ft(direct, shift=True, pad=2**14)
for k,v in signal_pathway.items():
correl.ft(['Delta%s'%k.capitalize()])
correl = correl['Delta'+k.capitalize(),v]+correl['Delta'+k.capitalize(),0]
if my_iter ==0:
logging.debug(strm("holder"))
if fl:
fl.image(correl.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),
ax=ax_list[2],human_units=False)
ax_list[2].set_title('correlation function (v), \nafter apod')
if shift_bounds:
f_shift = correl[direct:(-max_shift,max_shift)].run(real).argmax(direct)
else:
f_shift = correl.run(real).argmax(direct)
s_copy = s_orig.C
s_copy *= exp(-1j*2*pi*f_shift*s_copy.fromaxis(direct))
s_orig.ft(direct)
s_copy.ft(direct)
if my_iter == 0:
logging.debug(strm("holder"))
if fl:
fl.image(s_copy.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),
ax=ax_list[3],human_units=False)
ax_list[3].set_title('after correlation\nbefore ph0 restore')
logging.debug(strm('signal energy per transient (recalc to check that it stays the same):',(abs(s_copy**2).data.sum().item() / N)))
this_E = (abs(s_copy.C.sum(indirect_dim))**2).data.sum().item() / N**2
energy_vals.append(this_E / sig_energy)
logging.debug(strm('averaged signal energy (per transient):', this_E))
if last_E is not None:
energy_diff = (this_E - last_E)/sig_energy
logging.debug(strm(energy_diff))
if abs(energy_diff) < tol and my_iter > 4:
break
last_E = this_E
if fl is not None:
fl.next('correlation convergence')
fl.plot(array(energy_vals),'x')
gca().yaxis.set_major_formatter(to_percent)
if fl is not None:
fl.image(s_copy.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),ax=ax_list[4])
ax_list[4].set_title('after correlation\nph0 restored \nsig. energy=%g'%sig_energy)
fl.pop_marker()
if avg_dim:
s_orig.chunk(avg_dim,[avg_dim,'power'],[avg_dim_len,-1])
s_orig.reorder(['ph1',avg_dim,'power',direct])
return f_shift, sigma, this_mask
|
73235551904db27b639d04a66140d3f07c5dfa3e
| 3,642,275
|
def get_fragment_mz_dict(pep, fragments, mod=None):
"""
:param pep:
:param fragments:
:param mod:
:return:
"""
mz_dict = dict()
for each_fragment in fragments:
frag_type, frag_num, frag_charge = rapid_kit.split_fragment_name(each_fragment)
mz_dict[each_fragment] = calc_fragment_mz(
pep, frag_type, frag_num, frag_charge, mod)
return mz_dict
|
ffc79c35111548471a3a98f5999dee013975752d
| 3,642,276
|
def merge_dicts(iphonecontrollers, ipadcontrollers):
"""Add ipad controllers to the iphone controllers dict, but never overwrite a custom controller with None!"""
all_controllers = iphonecontrollers.copy()
for identifier, customclass in ipadcontrollers.items():
if all_controllers.get(identifier) is None:
all_controllers[identifier] = customclass
return all_controllers
|
10638e775d6578e2553ff5b2b47aff8a17051c7e
| 3,642,277
|
def perimRect(length,width):
"""
Compute perimiter of rectangle
>>> perimRect(2,3)
10
>>> perimRect(4, 2.5)
13.0
>>> perimRect(3, 3)
12
>>>
"""
return 2*(length+width)
|
50fdd92430352f443d313d0931bab50ad5617622
| 3,642,278
|
def h_search(endpoint, query, sort, order, per_page, page):
"""
- Executes search.search and returns a dictionary
of results
`return {headers,status_code,items}`
"""
current_search_params = {
"query": query,
"sort": sort,
"order": order,
"per_page": per_page,
"page": page,
}
logger.debug(f"current_search_params = {current_search_params}")
status_code, items, headers = search.search(
endpoint,
query,
sort=sort,
order=order,
page=page,
per_page=per_page,
strict=True,
)
results = {"headers": headers, "status_code": status_code, "items": items}
logger.debug(f"status_code = {status_code} num_items = {len(items)}")
return results
|
6b0ee993b2baf440f1e6c835a003bebe06dc448a
| 3,642,279
|
import asyncio
import traceback
import functools
import inspect
def async_wrapper(fn):
"""
Wraps an async function or generator with a function which runs that generator on the thread's
event loop. The wrapped function requires an 'xloil_thread_context' argument which provides a
callback object to return a result. xlOil will pass this object automatically to functions
declared async.
This function is used by the `func` decorator and generally should not be invoked
directly.
"""
@functools.wraps(fn)
def synchronised(xloil_thread_context, *args, **kwargs):
ctx = xloil_thread_context
async def run_async():
_async_caller.set(ctx.caller)
try:
# TODO: is inspect.isasyncgenfunction expensive?
if inspect.isasyncgenfunction(fn):
async for result in fn(*args, **kwargs):
ctx.set_result(result)
else:
result = await fn(*args, **kwargs)
ctx.set_result(result)
except (asyncio.CancelledError, StopAsyncIteration):
ctx.set_done()
raise
except Exception as e:
ctx.set_result(str(e) + ": " + traceback.format_exc())
ctx.set_done()
ctx.set_task(asyncio.run_coroutine_threadsafe(run_async(), ctx.loop))
return synchronised
|
a2929f74d12b153d0cd0720effb618b376bed56f
| 3,642,280
|
def add_deprecated_species_alias(registry, ftype, alias_species, species, suffix):
"""
Add a deprecated species alias field.
"""
unit_system = registry.ds.unit_system
if suffix == "fraction":
my_units = ""
else:
my_units = unit_system[suffix]
def _dep_field(field, data):
if not isinstance(data, FieldDetector):
issue_deprecation_warning(
('The "%s_%s" field is deprecated. ' + 'Please use "%s_%s" instead.')
% (alias_species, suffix, species, suffix)
)
return data[ftype, "%s_%s" % (species, suffix)]
registry.add_field(
(ftype, "%s_%s" % (alias_species, suffix)),
sampling_type="local",
function=_dep_field,
units=my_units,
)
|
2782079c908227d11780f66fb2d809b69680a31a
| 3,642,281
|
def docker_client():
"""
Return the current docker client in a manner that works with both the
docker-py and docker modules.
"""
try:
client = docker.from_env(version='auto', timeout=3600)
except TypeError:
# On older versions of docker-py (such as 1.9), version isn't a
# parameter, so try without it
client = docker.from_env()
client = client if not hasattr(client, 'api') else client.api
return client
|
7761299ea845577d67815df9fdd98dd74e828454
| 3,642,282
|
import typing
import copy
def bulk_generate_metadata(html_page: str,
description: dict=None,
enable_two_ravens_profiler=False
) -> typing.List[typing.List[dict]]:
"""
:param html_page:
:param description:
:param es_index:
:return:
"""
successed = []
hp = HTMLProcesser(html_page)
html_meta = hp.extract_description_from_meta()
for text, href in hp.generate_a_tags_from_html():
try:
cur_description = copy.deepcopy(description) or {}
if not Utils.validate_url(href):
continue
if not cur_description.get('title'):
black_list = set(text.lower().split()).intersection(TITLE_BLACK_LIST)
if not black_list:
cur_description['title'] = text.strip()
if not cur_description.get('description'):
cur_description['description'] = html_meta
cur_description['materialization_arguments'] = {'url': href}
# Not to extract html tables, otherwise there will be too many FPs:
cur_metadata = generate_metadata(cur_description, ignore_html=True,
enable_two_ravens_profiler=enable_two_ravens_profiler)
if cur_metadata:
successed.append(cur_metadata)
except Exception as e:
print(' - FAILED GENERATE METADATA ON \n\ttext = %s, \n\thref = %s \n%s' % (text, href, str(e)))
return successed
|
333d4ce53eac5b7214d516a09c5070b718b7a165
| 3,642,283
|
def add_cals():
"""
Add nutrients from products.
"""
if 'username' in session:
user_obj = users_db.get(escape(session['username']))
calc = Calculator(user_obj.weight, user_obj.height,
user_obj.age, user_obj.gender, user_obj.activity)
food = request.form.get("menu")
weight = request.form.get('keyword')
pr = Product(food)
try:
weig = float(weight)
nutr = pr.choose_product(food, weig)
a.append(nutr)
return render_template('home.html', username=escape(session['username']), normas=[calc.calories_need(), calc.proteins_need(), calc.fats_need(), calc.carbohydrates_need()], vars=nutr)
except TypeError:
return "Wrong weight"
else:
return "You are not logged in"
|
f052b1d314fa3005f7bcd74b5890dba049bb3827
| 3,642,284
|
def parse(q):
"""http://en.wikipedia.org/wiki/Shunting-yard_algorithm"""
def _merge(output, scache, pos):
if scache:
s = " ".join(scache)
output.append((s, TOKEN_VALUE, pos - len(s)))
del scache[:]
try:
tokens = lex(q)
except Exception as e:
raise ParseError(e.message)
tokens.reverse()
scache, stack, output = list(), list(), list()
while tokens:
tup = tokens.pop()
token, token_type, pos = tup[0], tup[1], tup[2]
utoken = token.upper()
if token_type in (TOKEN_OPER, TOKEN_LOGIC):
_merge(output, scache, pos)
if stack and not (stack[-1][1] == TOKEN_PARENTHESIS
and stack[-1][0] == "("):
# compare with old token on the top of stack
top = stack[-1]
if utoken not in OPERS:
raise ParseError(
"invalid operator `%s' at position %s" % (token, pos))
p = (OPERS[utoken], OPERS[top[0]])
if ((p[0].assoc == "L" and p[0].p <= p[1].p) or
(p[0].assoc == "R" and p[0].p < p[1].p)):
output.append(stack.pop())
# push new token onto stack
if token_type == TOKEN_LOGIC:
stack.append((utoken, TOKEN_LOGIC, pos))
else:
stack.append((utoken, TOKEN_OPER, pos))
elif token_type == TOKEN_PARENTHESIS and token == "(":
_merge(output, scache, pos)
stack.append((token, TOKEN_PARENTHESIS, pos))
elif token_type == TOKEN_PARENTHESIS and token == ")":
_merge(output, scache, pos)
del scache[:]
try:
while not (stack[-1][1] == TOKEN_PARENTHESIS
and stack[-1][0] == "("):
output.append(stack.pop())
except IndexError:
raise ParseError(
"parenthesis mismatch at position %s" % (pos))
stack.pop()
else:
scache.append(token)
_merge(output, scache, pos)
if stack and stack[-1][0] == "(":
raise ParseError(
"parenthesis mismatch at position %s" % output[2])
while stack:
output.append(stack.pop())
return output
|
484b240d1ec2cea3cf553bcbac5bef33efd1f74a
| 3,642,285
|
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
# Run on upload, not commit, since the presubmit bot apparently doesn't have
# coverage or Go installed.
results.extend(_InfraTests(input_api, output_api))
results.extend(_CheckGNFormatted(input_api, output_api))
return results
|
4791e348cfa7e4e1d25341bf44f8ddb8c8a84d4e
| 3,642,286
|
def compute_compression_rate(file: str, zip_archive) -> float:
"""Compute the compression rate of two files.
More info: https://en.m.wikipedia.org/wiki/Data_compression_ratio
:param file: the uncompressed file.
:param zip_archive the same file but compressed.
:returns the compression rate.
"""
uncompressed = Helper.Helper.get_file_size(file)
compressed = Helper.Helper.get_file_size(archive(file, zip_archive))
return uncompressed / compressed
|
f92f81282b51da253b1f06ad63d72555917b8256
| 3,642,287
|
def set_idc_func_ex(name, fp=None, args=(), flags=0):
"""
Extends the IDC language by exposing a new IDC function that is backed up by a Python function
This function also unregisters the IDC function if 'fp' was passed as None
@param name: IDC function name to expose
@param fp: Python callable that will receive the arguments and return a tuple.
If this argument is None then the IDC function is unregistered
@param args: Arguments. A tuple of idaapi.VT_XXX constants
@param flags: IDC function flags. A combination of EXTFUN_XXX constants
@return: Boolean.
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
# Unregistering?
if fp is None:
# Not registered?
if f is None:
return False
# Break circular reference
del f.cb
# Delete the name from the dictionary
del __IDC_FUNC_CTXS[name]
# Delete the context and unregister the function
return _idaapi.pyw_unregister_idc_func(f.ctxptr)
# Registering a function that is already registered?
if f is not None:
# Unregister it first
set_idc_func_ex(name, None)
# Convert the tupple argument info to a string
args = "".join([chr(x) for x in args])
# Create a context
ctxptr = _idaapi.pyw_register_idc_func(name, args, fp)
if ctxptr == 0:
return False
# Bind the context with the IdcFunc object
f = _IdcFunction(ctxptr)
# Remember the Python context
__IDC_FUNC_CTXS[name] = f
# Register IDC function with a callback
return _idaapi.py_set_idc_func_ex(
name,
f.fp_ptr,
args,
flags)
|
fe3656d7e33285eecc79b497866529e81ff15e64
| 3,642,288
|
from typing import Dict
def get_hidden_plugins() -> Dict[str, str]:
"""
Get the dictionary of hidden plugins and versions.
:return: dict of hidden plugins and their versions
"""
hidden_plugins = get_cache('cache/hidden-plugins.json')
if hidden_plugins:
return hidden_plugins
else:
return {}
|
bf4db552411576a4840b72e2e10aab25030d6191
| 3,642,289
|
import time
def wait_for_re_doc(coll, key, timeout=180):
"""Fetch a doc with the RE API, waiting for it to become available with a 30s timeout."""
start_time = time.time()
while True:
print(f'Waiting for doc {coll}/{key}')
results = re_client.get_doc(coll, key)
if results['count'] > 0:
break
else:
if int(time.time() - start_time) > timeout:
raise RuntimeError('Timed out trying to fetch', key)
time.sleep(1)
return results['results'][0]
|
0aa70ddd010f2d60ef2340d17ddf94b8660d0f1c
| 3,642,290
|
def split_year_from_week(data: pd.DataFrame) -> pd.DataFrame:
"""
Because we have used the partition key as the NFL year, the year/week need to be put into the appropriate columns
"""
data[[Stats.YEAR, Stats.NFL_WEEK]] = data[Stats.YEAR].str.split("/", expand=True)
data[Stats.NFL_WEEK] = data[Stats.NFL_WEEK].apply(lambda x: int(x.lstrip("week")))
return data
|
52e65d28b3169759e949725b17d159d319514a61
| 3,642,291
|
from .core import Array, from_array
def normalize_index(idx, shape):
"""Normalize slicing indexes
1. Replaces ellipses with many full slices
2. Adds full slices to end of index
3. Checks bounding conditions
4. Replace multidimensional numpy arrays with dask arrays
5. Replaces numpy arrays with lists
6. Posify's integers and lists
7. Normalizes slices to canonical form
Examples
--------
>>> normalize_index(1, (10,))
(1,)
>>> normalize_index(-1, (10,))
(9,)
>>> normalize_index([-1], (10,))
(array([9]),)
>>> normalize_index(slice(-3, 10, 1), (10,))
(slice(7, None, None),)
>>> normalize_index((Ellipsis, None), (10,))
(slice(None, None, None), None)
>>> normalize_index(np.array([[True, False], [False, True], [True, True]]), (3, 2))
(dask.array<array, shape=(3, 2), dtype=bool, chunksize=(3, 2), chunktype=numpy.ndarray>,)
"""
if not isinstance(idx, tuple):
idx = (idx,)
# if a > 1D numpy.array is provided, cast it to a dask array
if len(idx) > 0 and len(shape) > 1:
i = idx[0]
if is_arraylike(i) and not isinstance(i, Array) and i.shape == shape:
idx = (from_array(i), *idx[1:])
idx = replace_ellipsis(len(shape), idx)
n_sliced_dims = 0
for i in idx:
if hasattr(i, "ndim") and i.ndim >= 1:
n_sliced_dims += i.ndim
elif i is None:
continue
else:
n_sliced_dims += 1
idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)
if len([i for i in idx if i is not None]) > len(shape):
raise IndexError("Too many indices for array")
none_shape = []
i = 0
for ind in idx:
if ind is not None:
none_shape.append(shape[i])
i += 1
else:
none_shape.append(None)
for axis, (i, d) in enumerate(zip(idx, none_shape)):
if d is not None:
check_index(axis, i, d)
idx = tuple(map(sanitize_index, idx))
idx = tuple(map(normalize_slice, idx, none_shape))
idx = posify_index(none_shape, idx)
return idx
|
63b98679ca435a238682d86124d6a60e81dd1f89
| 3,642,292
|
def relu(data):
"""Rectified linear unit.
.. math::
out = max(x, 0)
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.relu(data)
|
3c4602d68ca18a851ed6c24a35c348b1a97a2a4d
| 3,642,293
|
def levsim(args):
"""Returns the Levenshtein similarity between two terms."""
term_i, term_j, j = args
return (MLEV_ALPHA * (1 - Levenshtein.distance(term_i, term_j) \
/ max(len(term_i), len(term_j)))**MLEV_BETA, term_j, j)
|
5c4c70cbf0c172ac42ad26d15753a18e79fdd1f4
| 3,642,294
|
def retrieve_job_logs(job_id):
"""Retrieve job's logs.
:param job_id: UUID which identifies the job.
:returns: Job's logs.
"""
return JOB_DB[job_id].get('log')
|
b2759f3af8316272c4f4ef7f63939634a114c0c9
| 3,642,295
|
import sys
def parse_argv():
"""Parses arguments for use with the test launcher.
Arguments are:
1. Working directory.
2. Test runner, `pytest` or `nose`
3. debugSecret
4. debugPort
5. Debugger search path
6. Mixed-mode debugging (non-empty string to enable, empty string to disable)
7. Enable code coverage and specify filename
8. TestFile, with a list of testIds to run
9. Rest of the arguments are passed into the test runner.
"""
return (sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9:])
|
08ddb56f0fa0d0ef58d5cc3a437037ee2356d1a6
| 3,642,296
|
def _normalize_format(fmt):
"""Return normalized format string, is_compound format."""
if _is_string_or_bytes(fmt):
return _compound_format(sorted(
_factor_format(fmt.lower()))), ',' in fmt
else:
return _compound_format(sorted([_normalize_format(f)[0] for f in
fmt])), True
|
30094ec1b00205ae321a53f1bcba3ae250cd740d
| 3,642,297
|
def get_cevioai_version() -> str:
"""
CeVIO AIのバージョンを取得します。
Returns
-------
str
CeVIO AIのバージョン
"""
_check_cevioai_status()
return _service_control.host_version
|
1186878664776d445402d04251bec140cda3390a
| 3,642,298
|
import subprocess
from bs4 import BeautifulSoup
import operator
def get_all_revisions_available(link):
""" List all the revisions availabel for a particular link
"""
svn_username = current_user.svn_username
svn_password = current_user.svn_password
link = link.replace("dllohsr222","10.133.0.222")
args = ["svn", "log",
link,"--xml",
"--username", svn_username,
"--password", svn_password,
"--non-interactive", "--no-auth-cache"]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
#logger.info(out)
soup = BeautifulSoup(out, "xml")
revision_data = soup.find_all('logentry')
#logger.info(revision_data)
rev_list = []
for rev in revision_data:
rev_list.append((int(rev["revision"]),rev.msg.getText()))
#revision_list = []
##for rev in revision_data:
rev_list.sort(key=operator.itemgetter(0), reverse = True)
updated_rev_list = []
first_flag = False
for rev_data in rev_list:
if not first_flag:
updated_rev_list.append("{}->{}(Latest)".format(rev_data[0], rev_data[1]))
first_flag = True
else:
updated_rev_list.append("{}->{}".format(rev_data[0], rev_data[1]))
return updated_rev_list
|
a531e6bd9e2fb1e7f35af2a4b51e167634f8f4f6
| 3,642,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.