content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def default(typ, default=None, frm=None):
""" optional value """
def _internal(val):
if typ(val) is Consts.Fail:
return default
else:
return val
return u_condition_checker(frm, _internal) | 07374d4e71693640eacf7ccf003843d37284231b | 27,400 |
def fltflag(*args):
"""fltflag() -> flags_t"""
return _idaapi.fltflag(*args) | 1cb3f8849523c9811c0e3fa2b4bf50e5f72750cc | 27,401 |
from matplotlib import pyplot
import numpy as np
import nibabel as nb
import os
def make_histogram(measure_file, measure):
"""
Generates Histogram Image of intensities for a given input
nifti file.
Parameters
----------
measure_file : string
path to input nifti file
measure : string
Name of the measure label in the plot
Returns
-------
hist_path : string
Path to the generated histogram png
"""
data = nb.load(measure_file).get_data()
data_flat = data.flatten(order='F')
y, binEdges = np.histogram(data_flat[data_flat != 0], bins=100)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
fig = pyplot.figure()
fig.suptitle('%s intensity plot' % measure)
pyplot.plot(bincenters, y, '-')
pyplot.xlabel('intensity')
pyplot.ylabel('# of voxels')
png_name = os.path.join(os.getcwd(), '%s_hist_plot.png' % measure)
fig.savefig(os.path.join(os.getcwd(), png_name))
pyplot.close()
hist_path = os.path.join(os.getcwd(), png_name)
"""
###
hist_file = os.path.join(os.getcwd(), '%s_hist_path_file.txt' % measure)
fl = open(hist_file, 'w')
fl.write(str(measure_file) + '\n')
fl.write(str(hist_path) + '\n')
fl.close()
"""
return hist_path | 9745c426e1e2132a6a6c293328b1b6d79d85d0ca | 27,402 |
import io
def get_mopac_deltaH0(lines):
"""
Return delta H in kcal/mol from mopac output.
#>>> s = io.read_file('test/input.out')
#>>> print(get_mopac_deltaH(s))
-13.02534
"""
if isinstance(lines, str):
lines = lines.splitlines()
keyword = 'FINAL HEAT OF FORMATION'
n = io.get_line_number(keyword, lines=lines)
return float(lines[n].split()[5]) | a5b01de232b8e3f8c6d74c1e48095af8b9880696 | 27,403 |
def abstract(f):
"""
make method abstract.
class Foo(object):
@abstract
def bar(self):
pass
foo = Foo()
foo.bar() # NotImplementedError: can't invoke abstract method 'bar'
"""
@wraps(f)
def wrapper(*args, **kwargs):
msg = "can't invoke abstract method '%s'" % f.__name__
logger.error(msg)
raise NotImplementedError(msg)
return wrapper | a652e2440a2b9b0606f18dd91ea77b3ebafb25fe | 27,404 |
def capitalize_1(string):
"""
Capitalizes a string using a combination of the upper and lower methods.
:author: jrg94
:param string: any string
:return: a string with the first character capitalized and the rest lowercased
"""
return string[0].upper() + string[1:].lower() | 9ad830a6d38e19b195cd3dff9a38fe89c49bd5c8 | 27,405 |
def edit_current_stock(current_stock_id, current_stock_data):
"""
修改信息
:param current_stock_id:
:param current_stock_data:
:return: Number of affected rows (Example: 0/1)
:except:
"""
return db_instance.edit(STCurrentStock, current_stock_id, current_stock_data) | bdc96043b74c2c02ae633700e0ec9860b61c7f11 | 27,406 |
def fit_via_yule_walker(x, order, acf_method="mle", demean=True):
"""
Estimate AR(p) parameters of a sequence x using the Yule-Walker equation.
Parameters
----------
x : 1d numpy array
order : integer
The order of the autoregressive process.
acf_method : {'unbiased', 'mle'}, optional
Method can be 'unbiased' or 'mle' and this determines denominator in
estimating autocorrelation function (ACF) at lag k. If 'mle', the
denominator is `n = x.shape[0]`, if 'unbiased' the denominator is `n - k`.
demean : bool
True, the mean is subtracted from `x` before estimation.
"""
if demean:
x = x.copy()
x -= x.mean()
if acf_method == "unbiased":
denom = lambda lag: len(x) - lag
else:
denom = lambda lag: len(x)
if x.ndim > 1 and x.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
auto_cov = np.zeros(order + 1, np.float64)
auto_cov[0] = (x ** 2).sum() / denom(0)
for lag in range(1, order + 1):
auto_cov[lag] = np.sum(x[0:-lag] * x[lag:]) / denom(lag)
if order == 0:
ar_coef = None
innovation_var = auto_cov[0]
else:
ar_coef = _solve_yule_walker(auto_cov)
innovation_var = auto_cov[0] - (auto_cov[1:] * ar_coef).sum()
aic = compute_aic(innovation_var, order, len(x))
return ar_coef, aic | c96f29d72d88427fa4c9e345e22e41913218113f | 27,407 |
import logging
def parse_challenge(verify_mfa, challenge_result):
"""Parse the challenge response.
:param mfa_result: response from MFA challenge status request
:return challenge status: status of MFA challenge
:return challenge reason: additional info about challenge status
"""
challenge_reason = None
if "status" in verify_mfa:
logging.info("Status received:")
print(verify_mfa["status"])
if "reason" in verify_mfa:
challenge_reason = verify_mfa["reason"]
if "result" in verify_mfa:
logging.info("Result received: {}".format(verify_mfa["result"]))
challenge_result = verify_mfa["result"].lower()
logging.debug("Challenge result is {}".format(challenge_result))
return challenge_result, challenge_reason | 0d5b5c764afb970d8e7c09f15100bfcaee48a52f | 27,408 |
def beta_create_Bookstore_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('endpoints.examples.bookstore.Bookstore', 'CreateBook'): CreateBookRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'CreateShelf'): CreateShelfRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'DeleteBook'): DeleteBookRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'DeleteShelf'): DeleteShelfRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'GetBook'): GetBookRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'GetShelf'): GetShelfRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'ListBooks'): ListBooksRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'ListShelves'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
}
response_serializers = {
('endpoints.examples.bookstore.Bookstore', 'CreateBook'): Book.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'CreateShelf'): Shelf.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'DeleteBook'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'DeleteShelf'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'GetBook'): Book.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'GetShelf'): Shelf.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'ListBooks'): ListBooksResponse.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'ListShelves'): ListShelvesResponse.SerializeToString,
}
method_implementations = {
('endpoints.examples.bookstore.Bookstore', 'CreateBook'): face_utilities.unary_unary_inline(servicer.CreateBook),
('endpoints.examples.bookstore.Bookstore', 'CreateShelf'): face_utilities.unary_unary_inline(servicer.CreateShelf),
('endpoints.examples.bookstore.Bookstore', 'DeleteBook'): face_utilities.unary_unary_inline(servicer.DeleteBook),
('endpoints.examples.bookstore.Bookstore', 'DeleteShelf'): face_utilities.unary_unary_inline(servicer.DeleteShelf),
('endpoints.examples.bookstore.Bookstore', 'GetBook'): face_utilities.unary_unary_inline(servicer.GetBook),
('endpoints.examples.bookstore.Bookstore', 'GetShelf'): face_utilities.unary_unary_inline(servicer.GetShelf),
('endpoints.examples.bookstore.Bookstore', 'ListBooks'): face_utilities.unary_unary_inline(servicer.ListBooks),
('endpoints.examples.bookstore.Bookstore', 'ListShelves'): face_utilities.unary_unary_inline(servicer.ListShelves),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options) | 91f15dc278a8f273232334e75e729819ec33f521 | 27,409 |
import yaml
import json
def domain_parser_yml_to_json(path_to_domain_yml, path_to_domain_json):
"""
Chatbot Domain Parser, will parse 'domain.yml' file to json object and write to file
:param path_to_domain_yml: Complete path to YML domain file
:param path_to_domain_json: Complete path to file where Json Object will be written
:return: Json string of parsed object
"""
with open(path_to_domain_yml, "r") as file:
json_domain = yaml.load(file)
json_domain["entities"] = parse_entities_to_json(path_to_domain_yml)
with open(path_to_domain_json, "w") as file:
json.dump(json_domain, file)
return json.dumps(json_domain) | 9ea6459136eba27d1b60cfc590a3d3f526e9222b | 27,410 |
def get_url_and_token(string):
""" extract url and token from API format """
try:
[token, api] = string.split(":", 1)
[_, _, addr, _, port, proto] = api.split("/", 5)
url = f"{proto}://{addr}:{port}/rpc/v0"
except Exception:
raise ValueError(f"malformed API string : {string}")
return (url, token) | f3abd327c9de2d098100e539f701bf2fff1742f5 | 27,411 |
def checkZone(false, usrdata):
""" Check the cloudflare zone record for entries to remove """
# Call api
request = [('a', 'rec_load_all')]
json_data = callAPI(request, usrdata)
# Dicts and Lists for later
falserecs = {}
names = {}
recs = []
falsedata = {}
recdata = {}
# Parse the response and tally false entries
# Check if the json request is successful
if json_data["result"] == "success":
# each zone is a dict in objs dict
for line in json_data["response"]["recs"]["objs"]:
# Added this to avoid duplicate zone names (i.e. MX records)
# example key is A-somedomain.com
key = line["type"] + "-" + line["name"]
try:
names[key] = names[key] + 1
except KeyError:
names[key] = 1
if line["content"] == false:
falserecs[line["rec_id"]] = key
falsedata[line["rec_id"]] = {'type': line["type"],
'name': line["name"],
'content': line["content"],
'service_mode': line["service_mode"],
'ttl': line["ttl"],
'prio': line["prio"]}
# Go through false records
for rec in falserecs.keys():
if names[falserecs[rec]] > 1:
# if record isn't the last entry add for removal
recs.append(rec)
recdata[rec] = falsedata[rec]
# Return the records that have false
return recs, recdata | 4f36ba5c7f20b15bfe80a80eda41ae8bf6141e16 | 27,412 |
import io
def read_temp(buffer: io.BytesIO, offset: int = None) -> float:
"""Retrieve temperature [C] value (2 bytes) from buffer"""
if offset:
buffer.seek(offset)
value = int.from_bytes(buffer.read(2), byteorder="big", signed=True)
return float(value) / 10 | e7cb28977af49fd3c52438357b02eb11f05d1e9d | 27,413 |
def convert_netaddr(netaddr: str) -> str:
"""
Converts network address from hex_ip:hex_port format to ip:port
e,g: 573B1FAC:BE46 to 172.31.59.87:48710
"""
try:
if check_netaddr_pattern(netaddr):
addr, port = netaddr.split(':')
addr = convert_addr(addr)
port = convert_port(port)
return '{}:{}'.format(addr, port)
except BaseException:
raise InvalidNetAddrFormat | 9d6979f0303ede07fcb7b92a4cfc8579b90fca84 | 27,414 |
import copy
import logging
import traceback
def derive_prehashes_from_events(events, join_by=DEFAULT_JOIN_BY):
"""
Compute a normalized form (pre-hash string) for each event.
This is the main functionality of the hash generator.
"""
events = copy.deepcopy(events) # do not change parameter!
global JOIN_BY
join_by = join_by.replace(r"\n", "\n").replace(r"\t", "\t")
logging.debug("Setting JOIN_BY='%s'", join_by)
JOIN_BY = join_by
logging.info("#events = %s", len(events[2]))
for i in range(len(events[2])):
logging.info("%s: %s\n", i, events[2][i])
prehash_string_list = []
for event in events[2]:
logging.debug("prehashing event:\n%s", event)
try:
prehash_string_list.append("eventType=" + event[0] + JOIN_BY
+ _recurse_through_children_in_order(event[2], PROP_ORDER) + JOIN_BY
+ _gather_elements_not_in_order(event[2], PROP_ORDER)
)
except Exception as ex:
logging.error("could not parse event:\n%s\n\nerror: %s", event, ex)
logging.debug("".join(traceback.format_tb(ex.__traceback__)))
pass
# To see/check concatenated value string before hash algorithm is performed:
logging.debug("prehash_string_list = {}".format(prehash_string_list))
return prehash_string_list | 86f11cf5967ee8ac559f972cf9409ca33cc48185 | 27,415 |
def make_bsa_2d(betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0,
method='simple',verbose = 0):
"""
Function for performing bayesian structural analysis
on a set of images.
Parameters
----------
betas, array of shape (nsubj, dimx, dimy) the data used
Note that it is assumed to be a t- or z-variate
theta=3., float,
first level threshold of betas
dmax=5., float, expected between subject variability
ths=0, float,
null hypothesis for the prevalence statistic
thq=0.5, float,
p-value of the null rejection
smin=0, int,
threshold on the nu_mber of contiguous voxels
to make regions meaningful structures
method= 'simple', string,
estimation method used ; to be chosen among
'simple', 'dev', 'loo', 'ipmi'
verbose=0, verbosity mode
Returns
-------
AF the landmark_regions instance describing the result
BF: list of hroi instances describing the individual data
"""
ref_dim = np.shape(betas[0])
nsubj = betas.shape[0]
xyz = np.array(np.where(betas[:1])).T.astype(np.int)
nvox = np.size(xyz, 0)
# create the field strcture that encodes image topology
Fbeta = ff.Field(nvox)
Fbeta.from_3d_grid(xyz, 18)
# Get coordinates in mm
coord = xyz.astype(np.float)
# get the functional information
lbeta = np.array([np.ravel(betas[k]) for k in range(nsubj)]).T
# the voxel volume is 1.0
g0 = 1.0/(1.0*nvox)*1./np.sqrt(2*np.pi*dmax**2)
affine = np.eye(4)
shape = (1, ref_dim[0], ref_dim[1])
lmax=0
bdensity = 1
if method=='ipmi':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_ipmi(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq,
smin, ths, theta, g0, bdensity)
if method=='simple':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_simple(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
if method=='loo':
mll, ll0 = bsa.compute_BSA_loo(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
return mll, ll0
if method=='dev':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_dev(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq,
smin, ths, theta, g0, bdensity)
if method=='simple_quick':
likelihood = np.zeros(ref_dim)
group_map, AF, BF, coclustering = \
bsa.compute_BSA_simple_quick(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
if method=='sbf':
likelihood = np.zeros(ref_dim)
group_map, AF, BF = sbf.Compute_Amers (Fbeta, lbeta, xyz, affine, shape,
coord, dmax=dmax, thr=theta,
ths=ths , pval=thq)
if method not in['loo', 'dev','simple','ipmi','simple_quick','sbf']:
raise ValueError,'method is not ocrreactly defined'
if verbose==0:
return AF,BF
if AF != None:
lmax = AF.k+2
AF.show()
group_map.shape = ref_dim
mp.figure()
mp.subplot(1,3,1)
mp.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax)
mp.title('Blob separation map')
mp.colorbar()
if AF != None:
group_map = AF.map_label(coord,0.95,dmax)
group_map.shape = ref_dim
mp.subplot(1,3,2)
mp.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax)
mp.title('group-level position 95% \n confidence regions')
mp.colorbar()
mp.subplot(1,3,3)
likelihood.shape = ref_dim
mp.imshow(likelihood, interpolation='nearest')
mp.title('Spatial density under h1')
mp.colorbar()
mp.figure()
if nsubj==10:
for s in range(nsubj):
mp.subplot(2, 5, s+1)
lw = -np.ones(ref_dim)
if BF[s]!=None:
nls = BF[s].get_roi_feature('label')
nls[nls==-1] = np.size(AF)+2
for k in range(BF[s].k):
xyzk = BF[s].xyz[k].T
lw[xyzk[1],xyzk[2]] = nls[k]
mp.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax)
mp.axis('off')
mp.figure()
if nsubj==10:
for s in range(nsubj):
mp.subplot(2,5,s+1)
mp.imshow(betas[s],interpolation='nearest',vmin=betas.min(),
vmax=betas.max())
mp.axis('off')
return AF, BF | 37d2727ce71530e70750696fb1e852b822e5d54e | 27,416 |
import typing
import math
import logging
def train_surrogate_on_task(task_id: typing.Union[int, str],
hyperparameter_names: typing.List[str],
setup_data: pd.DataFrame,
evaluation_measure: str,
normalize: bool,
n_estimators,
random_seed) \
-> typing.Tuple[sklearn.pipeline.Pipeline, typing.List]:
"""
Trains a surrogate on the meta-data from a task.
"""
# delete unnecessary columns
legal_columns = set(hyperparameter_names + [evaluation_measure])
for column in setup_data.columns.values:
if column not in legal_columns:
del setup_data[column]
if set(setup_data.columns.values) != legal_columns:
missing = legal_columns - set(setup_data.columns.values)
over = set(setup_data.columns.values) - legal_columns
raise ValueError('Columns for surrogate do not align with expectations. Missing: %s, over: %s' % (missing, over))
nominal_values_min = 10
# obtain the data
scaler = sklearn.preprocessing.MinMaxScaler()
# sort columns!
setup_data.sort_index(axis=1, inplace=True)
# reshape because of sklearn api (does not work on vectors)
if normalize:
y_reshaped = setup_data[evaluation_measure].values.reshape(-1, 1)
setup_data[evaluation_measure] = scaler.fit_transform(y_reshaped)[:, 0]
min_val = min(setup_data[evaluation_measure])
assert math.isclose(min_val, 0.0), 'Not close to 0.0: %f' % min_val
max_val = max(setup_data[evaluation_measure])
assert math.isclose(max_val, 1.0), 'Not close to 1.0: %f' % max_val
# assert that we have ample values for all categorical options
for hyperparameter in hyperparameter_names:
if isinstance(hyperparameter, ConfigSpace.CategoricalHyperparameter):
for value in hyperparameter.choices:
num_occurances = len(setup_data.loc[setup_data[hyperparameter.name] == value])
if num_occurances < nominal_values_min:
raise ValueError('Nominal hyperparameter %s value %s does not have enough values. Required '
'%d, got: %d' % (hyperparameter.name, value, nominal_values_min, num_occurances))
y = setup_data[evaluation_measure].values
del setup_data[evaluation_measure]
logging.info('Dimensions of meta-data task %s: %s. Target %s [%f-%f]' % (task_id,
str(setup_data.shape),
evaluation_measure,
min(y), max(y)))
# TODO: HPO
nominal_pipe = sklearn.pipeline.Pipeline(steps=[
('imputer', sklearn.impute.SimpleImputer(strategy='constant', fill_value='-1')),
('encoder', sklearn.preprocessing.OneHotEncoder(handle_unknown='ignore'))
])
nominal_indicators = []
for idx, (name, col) in enumerate(setup_data.dtypes.iteritems()):
nominal_indicators.append(col == object)
nominal_indicators = np.array(nominal_indicators, dtype=bool)
col_trans = sklearn.compose.ColumnTransformer(transformers=[
('numeric', sklearn.impute.SimpleImputer(strategy='constant', fill_value=-1), ~nominal_indicators),
('nominal', nominal_pipe, nominal_indicators)
])
surrogate = sklearn.pipeline.Pipeline(steps=[
('transformer', col_trans),
('classifier', sklearn.ensemble.RandomForestRegressor(n_estimators=n_estimators,
random_state=random_seed))
])
surrogate.fit(setup_data.values, y)
# the column vector is good to return, as the get_dummies function might behave in-stable
return surrogate, setup_data.columns.values | 52d081426560507bf76678a50561ef4a8b4df867 | 27,417 |
import sys
def build_label_dict(acts, use_loaded_files=True, verbose=True, doMean=False):
"""
Builds a dictionary of labels to points in local format
Gives out a dictionary and a list of found labels
acts: activation table object
use_loaded_files: whether to assume one file per label (current default)
This version can deal with the filenames and labels being different
"""
sys.stdout.write('About to build the label dict (slow)')
sys.stdout.flush()
if use_loaded_files == True:
# we use the filenames as the labels
files = acts.get_loaded_files()
big_list = []
no_of_files = len(files)
found_labels = []
label_dict = {}
no_files_in_label = {}
for file_name in files:
big_list.append([])
label = filename_to_label(file_name.split('_')[0])
found_labels.append(label)
if verbose:
print('Found {} files in activation table object'.format(no_of_files))
# print('Be patient, I found {} points'.format(len(acts.get_all_activation_indices())))
for current_point in acts.get_all_point_indices():
# TODO:: Make this work with multiple labels
if isinstance(acts.get_activation(current_point).labels, (bytes, bytearray, str)):
# old style, the labels are a numpy byte string
assigned_label = acts.get_activation(current_point).labels.decode('UTF-8')
else:
# new style, labels are a list
assigned_label = acts.get_activation(current_point).labels[0].decode('UTF-8')
assigned_label = filename_to_label(assigned_label)
# except AttributeError:
# assigned_label = acts.get_activation(current_point).labels.decode('UTF-8')
# except ValueError:
# import pdb; pdb.set_trace()
for f_no, file_name in enumerate(files):
label = filename_to_label(file_name.split('_')[0])
if assigned_label == label:
big_list[f_no].append(current_point)
break
if not len(found_labels) == len(files):
print('The number of found labels does not match the number of files in activation table')
if verbose:
print('Found label: \t No. of points')
for i in range(len(found_labels)):
print('{}: \t {}'.format(found_labels[i], len(big_list[i])))
for i in range(len(found_labels)):
# print(i, found_labels[i])
label_dict[found_labels[i]] = big_list[i]
no_files_in_label[found_labels[i]] = len(big_list[i])
else:
# we assume acts already has the labels
files = acts.get_loaded_files()
big_list = []
no_of_files = len(files)
found_labels = []
big_dict = {}
label_dict = {}
no_files_in_label = {}
for file_name in files:
big_list.append([])
label = filename_to_label(file_name.split('_')[0])
found_labels.append(label)
if verbose:
print('Found {} files in activation table object'.format(no_of_files))
# print('Be patient, I found {} points'.format(len(acts.get_all_activation_indices())))
for current_point in acts.get_all_point_indices():
# TODO:: Make this work with multiple labels
if isinstance(acts.get_activation(current_point).labels, (bytes, bytearray, str)):
# old style, the labels are a numpy byte string
assigned_label = acts.get_activation(current_point).labels.decode('UTF-8')
else:
# new style, labels are a list
assigned_label = acts.get_activation(current_point).labels[0].decode('UTF-8')
# assigned_label = filename_to_label(assigned_label)
# except AttributeError:
# assigned_label = acts.get_activation(current_point).labels.decode('UTF-8')
# except ValueError:
# import pdb; pdb.set_trace()
big_dict[current_point] = acts.get_activation(current_point).labels[0].decode('UTF-8')
# we've got all the points
if verbose:
print('Found label: \t No. of points')
unique_values = set(val for dic in big_dict for val in big_dict.values())
found_labels = [x for x in unique_values]
for f_label in found_labels:
list_of_tuples = [x[0] for x in big_dict.items() if x[1] == f_label]
label_dict[f_label] = list_of_tuples
# for f_no, file_name in enumerate(files):
# label=filename_to_label(file_name.split('_')[0])
# if assigned_label == label:
# big_list[f_no].append(current_point)
# break
if verbose:
print('{}: \t {}'.format(f_label, len(list_of_tuples)))
sys.stdout.write('Built the label dict')
sys.stdout.flush()
return label_dict, found_labels, no_files_in_label | faf5c9d8c239301795331be62c5e72d61831cbb9 | 27,418 |
def test_hhs_hospital_dataset_non_default_start_date():
"""Tests HHSHopsitalStateDataset imports adult_icu_beds_capacity
correctly for a state with a non-default start date (Alaska) verifying
that data prior to its start date (2020-10-06) is dropped."""
variable = ccd_helpers.ScraperVariable(
variable_name="adult_icu_beds_capacity",
measurement="current",
unit="beds",
provider="hhs",
common_field=CommonFields.ICU_BEDS,
)
source_url = UrlStr("http://foo.com")
# Do location Alaska and start at 2020-10-05 so the first date gets dropped.
input_data = build_can_scraper_dataframe(
{variable: [10, 20, 30]},
source_url=source_url,
start_date="2020-10-05",
location=2,
location_id="iso1:us#iso2:us-ak",
)
class CANScraperForTest(hhs_hospital_dataset.HHSHospitalStateDataset):
@staticmethod
def _get_covid_county_dataset() -> ccd_helpers.CanScraperLoader:
return ccd_helpers.CanScraperLoader(input_data)
ds = CANScraperForTest.make_dataset()
# Data before 2020-10-05 should have been dropped, so we are left with [20, 30]
icu_beds = test_helpers.TimeseriesLiteral(
[20, 30], source=taglib.Source(type="HHSHospitalState", url=source_url)
)
expected_ds = test_helpers.build_default_region_dataset(
{CommonFields.ICU_BEDS: icu_beds},
region=pipeline.Region.from_fips("02"),
start_date="2020-10-06",
static={CommonFields.ICU_BEDS: 30},
)
test_helpers.assert_dataset_like(ds, expected_ds) | 19de6db3884f5d0fea55c72bfee01502462fd029 | 27,419 |
def fibonacci(n: int) -> int:
"""
Iteratively compute fibonacci of n
"""
if n==0: return 0
if n==1: return 1
fp2 = 0
fp1 = 1
for _ in range(1,n):
f = fp1 + fp2
fp2 = fp1
fp1 = f
return f | e9f60ad7ae5187c516dba5e473fec86b154347d5 | 27,420 |
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
def do_make_label_group(
text: List[str], **kwargs: Dict[str, Any]
) -> Tuple[int, List[RAMSTKLabel]]:
"""Make and place a group of labels.
The width of each label is set using a natural request. This ensures the
label doesn't cut off letters. The maximum size of the labels is
determined and used to set the left position of widget displaying the data
described by the label. This ensures everything lines up. It also returns
a list of y-coordinates indicating the placement of each label that is used
to place the corresponding widget.
:param text: a list containing the text for each label.
:return: (_max_x, _lst_labels)
the width of the label with the longest text and a list of the
RAMSTKLabel() instances.
:rtype: tuple of (integer, list of RAMSTKLabel())
"""
_bold = kwargs.get("bold", True)
_justify = kwargs.get("justify", Gtk.Justification.RIGHT)
_wrap = kwargs.get("wrap", True)
_lst_labels = []
_max_x = 0
_char_width = max(len(_label_text) for _label_text in text)
# pylint: disable=unused-variable
for _label_text in text:
_label = RAMSTKLabel(_label_text)
_label.do_set_properties(
bold=_bold, height=-1, justify=_justify, width=-1, wrap=_wrap
)
_label.set_width_chars(_char_width)
_max_x = max(_max_x, _label.get_attribute("width"))
_lst_labels.append(_label)
return _max_x, _lst_labels | 200f65febc4dddf9c2f72e64963a34c2295bfcc8 | 27,421 |
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs | 6f328e923ea3ffec95c6ce6ff41c3ece53a0bd2c | 27,422 |
def _variable_with_weight_decay(name, shape, stddev, wd, index):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float64
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
#i=var.op.name.find('/')-2
'''
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses'+index, weight_decay)
'''
return var | bdd51083cb93557c3588c5b6dea75968f000a02f | 27,423 |
from typing import Optional
from typing import Tuple
def plot_line_line_y1_y2(
y1: pd.Series,
y2: pd.Series,
*,
figsize: Optional[Tuple[float, float]] = None,
smoothing: Optional[str] = None,
number_knots: Optional[int] = None,
marker1: Optional[str] = '.',
marker2: Optional[str] = '.',
markersize1: Optional[int] = 8,
markersize2: Optional[int] = 8,
linestyle1: Optional[str] = '-',
linestyle2: Optional[str] = '-',
linewidth1: Optional[float] = 1,
linewidth2: Optional[float] = 1,
colour1: Optional[str] = '#0077bb',
colour2: Optional[str] = '#33bbee',
labellegendy1: Optional[str] = None,
labellegendy2: Optional[str] = None
) -> Tuple[plt.Figure, axes.Axes]:
"""
Line plot of y1 and y2.
Optional smoothing applied to y1 and y2.
y1 and y2 are of the same length.
y1 and y2 have the same units.
If smoothing is applied, the series must not contain NaN, inf, or -inf.
Fit a piecewise cubic function the the constraint that the fitted curve is
linear outside the range of the knots. The fitter curve is continuously
differentiable to the second order at all of the knots.
Parameters
----------
y1 : pd.Series
The data to plot on the ordinate.
y2 : pd.Series
The data to plot on the ordinate.
figsize : Optional[Tuple[float, float]] = None
The (width, height) of the figure (in, in).
smoothing : Optional[str] = None
The type of smoothing to apply.
number_knots : Optional[int] = None
The number of knots for natural cubic spline smoothing.
marker1 : Optional[str] = '.'
The type of plot point for y1.
marker2 : Optional[str] = '.'
The type of plot point for y2.
markersize1 : Optional[int] = 8
The size of the plot point for y1.
markersize2 : Optional[int] = 8
The size of the plot point for y2.
linestyle1 : Optional[str] = 'None'
The style of the line for y1.
linestyle2 : Optional[str] = 'None'
The style of the line for y2.
linewidth1 : Optional[float] = 0
The width of the line for y1.
linewidth2 : Optional[float] = 0
The width of the line for y2.
colour1 : Optional[str] = '#0077bb'
The colour of the line for y1.
colour2 : Optional[str] = '#33bbee'
The colour of the line for y2.
labellegendy1 : Optional[str] = None
The legend label of the line y1.
labellegendy2 : Optional[str] = None
The legend label of the line y2.
Returns
-------
Tuple[plt.Figure, axes.Axes]
A matplotlib figure and Axes tuple.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import datasense as ds
>>>
>>> series_y1 = ds.random_data()
>>> series_y2 = ds.random_data()
>>> fig, ax = ds.plot_line_line_y1_y2(
>>> y1=series_y1,
>>> y2=series_y2
>>> )
>>> plt.show()
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
X = pd.Series(range(1, y1.size + 1, 1))
if smoothing is None:
ax.plot(
X,
y1,
marker=marker1,
markersize=markersize1,
linestyle=linestyle1,
linewidth=linewidth1,
color=colour1,
label=labellegendy1
)
ax.plot(
X,
y2,
marker=marker2,
markersize=markersize2,
linestyle=linestyle2,
linewidth=linewidth2,
color=colour2,
label=labellegendy2
)
elif smoothing == 'natural_cubic_spline':
model1 = natural_cubic_spline(
X=X,
y=y1,
number_knots=number_knots
)
model2 = natural_cubic_spline(
X=X,
y=y2,
number_knots=number_knots
)
ax.plot(
X,
model1.predict(X),
marker=None,
linestyle='-',
color=colour1
)
ax.plot(
X,
model2.predict(X),
marker=None,
linestyle='-',
color=colour2
)
return (fig, ax) | 363a35ef49d7dba61050c64f97308808fcc739eb | 27,424 |
def make_admin_versionable(cls):
"""Make Admin class versionable"""
class AdminVersionable(AdminVersionableMixIn, cls):
pass
return AdminVersionable | c36d1a18aba44baddbd449b9f673241923251a01 | 27,425 |
import os
def _get_temp_folder_linux():
"""Get the temp folder on a Linux system.
This method may also be used by unknown operating systems.
"""
# On Linux, we prefer to use "$XDG_RUNTIME_DIR", since it is dedicated
# to this kind of purpose. If it's not available, Porthole will create
# a new temporary directory in the user's home dir.
if "XDG_RUNTIME_DIR" in os.environ:
return os.environ["XDG_RUNTIME_DIR"]
elif "HOME" in os.environ:
return os.path.join(os.environ["HOME"], "tmp")
else:
raise IOError(
"Neither $XDG_RUNTIME_DIR or $HOME could be read. Cannot "
"automatically query server information on this system."
) | 302f99fb5562d7a6f24c3b1da9ee3293bf81a170 | 27,426 |
def generate_model (d):
"""Returns a set of (random) d+1 linear model coefficients."""
return np.random.rand (d+1, 1) | b7b6e85cdd033e5e89aaaae5db258d97a1a61534 | 27,427 |
import json
def handle_error(ex, hed_info=None, title=None, return_as_str=True):
"""Handles an error by returning a dictionary or simple string
Parameters
----------
ex: Exception
The exception raised.
hed_info: dict
A dictionary of information.
title: str
A title to be included with the message.
return_as_str: bool
If true return as string otherwise as dictionary
Returns
-------
str or dict
"""
if not hed_info:
hed_info = {}
if hasattr(ex, 'error_type'):
error_code = ex.error_type
else:
error_code = type(ex).__name__
if not title:
title = ''
if hasattr(ex, 'message'):
message = ex.message
else:
message = str(ex)
hed_info['message'] = f"{title}[{error_code}: {message}]"
if return_as_str:
return json.dumps(hed_info)
else:
return hed_info | 4b7bc24c9b4fd83d39f4447e29e383d1769e6b0f | 27,428 |
import re
def read(filename, ffmpeg_bin="ffmpeg", debug=False):
"""
Read an audio file into python using FFMPEG. The syntax
is similar to `scipy.io.wavfile.read`.
Note: only supports signed 16 bit audio
Parameters
----------
filename: str
The audio filename
ffmpeg_bin: str, optional
The name of the ffmpeg executable
debug: bool, optional
Print some debug information
Returns
-------
samplerate: The samplerate of the audio signal.
audio: An ndarray containing the audio samples. For multichannel audio it returns
a 2D array with every column corresponding to a channel.
"""
command = [ffmpeg_bin, "-i", filename]
with sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=100000) as pipe:
_, stderr = pipe.communicate()
for l in stderr.decode("utf-8").split("\n"):
if debug:
print(l)
words = l.split()
if len(words) >= 1 and words[0] != "Stream":
continue
if re.search("Audio:", l) is None:
continue
fmt = re.search("Audio: (.*?) ", l).group(1)
samplerate = re.search(", (\d+) Hz,", l).group(1)
n_channels, sampleformat, n_bits = re.search(
", (\d+ channels|mono|stereo), ([a-z]+)([0-9]*)", l
).group(1, 2, 3)
break
if n_channels == "mono":
n_channels = 1
elif n_channels == "stereo":
n_channels = 2
else:
n_channels = int(re.search("(\d+) channels", n_channels).group(1))
samplerate = int(samplerate)
if sampleformat == "flt" or sampleformat == "fltp":
dtype = np.float32
out_format = "f32le"
n_bits = 32
elif sampleformat == "s":
n_bits = int(n_bits)
if n_bits == 16:
dtype = np.int16
out_format = "s16le"
elif n_bits == 32:
dtype = np.int32
out_format = "s32le"
else:
raise ValueError("For now only signed 16/32 bit audio is supported. Sorry")
else:
raise ValueError(
"For now only signed 16/32 or float 32 bit audio is supported. Sorry"
)
n_bytes = n_bits // 8
# chunks of a second of audio
n_chunk = samplerate
# now read the samples
command = [ffmpeg_bin, "-i", filename, "-f", out_format, "-"]
with sp.Popen(
command, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=n_channels * n_bytes * n_chunk
) as pipe:
raw_audio, _ = pipe.communicate()
audio = np.frombuffer(raw_audio, dtype=dtype).reshape((-1, n_channels))
if audio.shape[1] == 1:
audio = audio[:, 0]
return samplerate, audio | 225f0c300f612b876c1721da80ff5b5613910d50 | 27,429 |
def get_parent_epic(issue):
"""Get the parent epic of `issue`.
Different boards have different meta data formats.
"""
return (
# DEV board
getattr(ticket.fields, JIRA_FIELD_EPIC_LINK, None)
# BS board
or getattr(getattr(ticket.fields, 'parent', None), 'key', None)
) | 807763ee13fb1dd639155f6b9efca057a0d2b5a1 | 27,430 |
import os
def shortcuts_backup_path(directory, user, timestamp_format="%Y%m%d%H%M%S"):
"""
Returns the path for a shortcuts.vdf backup file.
This path is in the designated backup directory, and includes a timestamp
before the extension to allow many backups to exist at once.
"""
assert (directory is not None)
return os.path.join(
directory,
str(user.user_id),
backup_filename(user, timestamp_format)
) | 3579a961c94abbc294587efa3c7a78e86c3a1089 | 27,431 |
def get_elements(driver,
selector,
text='',
selector_type=By.CSS_SELECTOR,
timeout=DEFAULT_TIMEOUT,
must_be_visible=True):
"""
Pauses execution until one or more elements matching the selector is visible.
:param driver: webdriver
:param selector: str, CSS selector
:param text: text that the element should contain
:param selector_type: selector format. Default is By.CSS_SELECTOR
:param timeout: int, time to wait before raising exception
:param must_be_visible: bool, true if the returned components must be visible
:return: the matched element
"""
callback = ElementCriteriaCondition(
(selector_type, selector),
text,
must_be_visible=must_be_visible,
return_all_matching=True)
message = "Expected at least one element matching {} `{}` to become " \
"visible".format(selector_type, selector)
if text:
message += ' containing text `{}`'.format(text)
try:
return wait_until(driver, callback, message, timeout)
except TimeoutException as e:
raise WebException(e.msg) from e | a9b3a5bad8ad8a9d32e6460ff72b089b20b5b713 | 27,432 |
import re
def get_date_from_folder(file_str):
"""
get datetime from file folder of .et3, .erd, i.e.,
'DATA2015-01-29-16-57-30/', '2020-10-11-03-48-52/'
"""
f = file_str.strip()
f = f[:-1] # remove trailing '/'
f = f.replace("DATA", "")
# replace the 3rd occurrence of '-'
w = [m.start() for m in re.finditer(r"-", f)][2]
# before w do not change, after w, '-' -> ':'
f = f[:w] + " " + f[w + 1 :].replace("-", ":")
# now f becomes '2015-01-29 16:57:30'
return pd.to_datetime(f) | 1176501b771e1f7b9721f8b78516c69878417ab5 | 27,433 |
def translate_api2db(namespace, alias):
"""
>>> translate_api2db("ga4gh", "SQ.1234")
[('VMC', 'GS_1234')]
"""
if namespace.lower() == "refseq":
return [("NCBI", alias)]
if namespace == "ensembl":
return [("Ensembl", alias)]
if namespace == "lrg":
return [("LRG", alias)]
if namespace == "sha512t24u":
return [
("VMC", "GS_" + alias if alias else None),
]
if namespace == "ga4gh":
return [
("VMC", "GS_" + alias[3:]),
]
return [] | 843f67f12024222f271f9f1826e2530b5a7834b4 | 27,434 |
import requests
def register_view(request):
"""Renders the register page."""
if request.method == 'GET':
# Get signup form to display
form = SignUpForm()
return render(request, 'myroot/registration/register.html',
{'form': form,
'title': "Register | " + settings.SITE_SHORT_NAME,
'meta_desc': """A step-by-step guide on how to create a user registration form using Django 2.1+ with Python 3.7+""",
})
data = dict()
if request.method == 'POST':
form = SignUpForm(request.POST)
username = request.POST.get('username')
email = request.POST.get('email')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
is_pass_valid, msg, title = is_password_valid(password1, password2)
is_user_name_valid, msg1, title1 = is_username_valid(username)
if not is_user_name_valid:
# Return some json response back to user
data = dict_alert_msg('False', title1, msg1, 'error')
elif not is_pass_valid:
# Return some json response back to user
data = dict_alert_msg('False', title, msg, 'error')
# Check if email exist in our users list
elif User.objects.filter(email=email):
# Return some json response back to user
msg = """A user with that email address already exist."""
data = dict_alert_msg('False', 'Invalid Email!', msg, 'error')
elif User.objects.filter(username=username):
# Return some json response back to user
msg = """Username already taken, please try another one."""
data = dict_alert_msg('False', 'Invalid Username!',
msg, 'error')
# To check prohibited username match with our list
elif SiteConfig.objects.filter(property_name=username):
# Return some json response back to user
msg = """A username you have entered is not allowed."""
data = dict_alert_msg('False', 'Prohibited Username!',
msg, 'error')
# To check if Prohibited email match with our list
elif SiteConfig.objects.filter(property_name=email):
# Return some json response back to user
msg = """The email you have entered is not allowed."""
data = dict_alert_msg('False', 'Prohibited Email!',
msg, 'error')
else:
''' Begin reCAPTCHA validation '''
recaptcha_response = request.POST.get('g-recaptcha-response')
data = {
'secret': settings.GRECAP_SECRET_KEY,
'response': recaptcha_response
}
r = requests.post(settings.GRECAP_VERIFY_URL, data=data)
result = r.json()
''' End reCAPTCHA validation '''
if result['success']:
# Validate email address if exist from an email server.
is_email_real = is_email_valid(email)
if is_email_real:
# Proceed with the rest of registering new user
user = form.save(commit=False)
user.is_active = False
user.save() # Finally save the form data
user.pk # Get the latest id
current_site = get_current_site(request)
subject = 'Activate Your ' + \
str(settings.SITE_SHORT_NAME) + ' Account'
message = render_to_string(
'myroot/account/account_activation_email.html',
{
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': account_activation_token.make_token(user),
})
user.email_user(subject, message, settings.APP_EMAIL_FROM)
# Return some json response back to user
msg = """New user has been created successfully!"""
data = dict_alert_msg('True', 'Awesome', msg, 'success')
else:
# Return some json response back to user
msg = """Invalid or non-existed email address."""
data = dict_alert_msg('False', 'Oops, Invalid Email Address', msg, 'error')
else:
# Return some json response back to user
msg = """Invalid reCAPTCHA, please try again."""
data = dict_alert_msg('False', 'Oops, Error', msg, 'error')
return JsonResponse(data) | aa854a427f8900a6b19ffef37598d1fa8f7b8d4d | 27,435 |
def make_environment():
"""Creates an OpenAI Gym environment."""
# Load the gym environment.
environment = fakes.ContinuousEnvironment(
action_dim=1, observation_dim=2, episode_length=10
)
return environment | 61156d212a14f5214e017f34792532c1dfa8d6b8 | 27,436 |
def device_get(context, device_id):
"""get a device according to specify device_id"""
return IMPL.device_get(context, device_id) | 6bdc60e0fd177b29568c181870b1825746953751 | 27,437 |
import scipy
def convolve_gaussian_2d(image, gaussian_kernel_1d):
"""Convolve 2d gaussian."""
result = scipy.ndimage.filters.correlate1d(
image, gaussian_kernel_1d, axis=0)
result = scipy.ndimage.filters.correlate1d(
result, gaussian_kernel_1d, axis=1)
return result | 00046cd47d324a3f391ad0a84f31eb0368857afb | 27,438 |
from unittest.mock import call
def refresh_viewer(viewer, pdf_path,
tm_bundle_support=getenv('TM_BUNDLE_SUPPORT')):
"""Tell the specified PDF viewer to refresh the PDF output.
If the viewer does not support refreshing PDFs (e.g. “Preview”) then this
command will do nothing. This command will return a non-zero value if the
the viewer could not be found or the PDF viewer does not support a
“manual” refresh. For this method to work correctly ``viewer`` needs to be
open beforehand.
Arguments:
viewer
The viewer for which we want to refresh the output of the PDF file
specified in ``pdf_path``.
pdf_path
The path to the PDF file for which we want to refresh the output.
tm_bundle_support
The location of the “LaTeX Bundle” support folder
Returns: ``int``
Examples:
>>> # The viewer application needs to be open before we call the
>>> # function
>>> call('open -a Skim', shell=True)
0
>>> refresh_viewer('Skim', 'test.pdf',
... tm_bundle_support=realpath('Support'))
<p class="info">Tell Skim to refresh 'test.pdf'</p>
0
"""
print('<p class="info">Tell {} to refresh \'{}\'</p>'.format(viewer,
pdf_path))
if viewer in ['Skim', 'TeXShop']:
return call("osascript '{}/bin/refresh_viewer.scpt' {} {} ".format(
tm_bundle_support, viewer, shellquote(pdf_path)),
shell=True)
return 1 | 629e5b8677ad8355acf7372690abff8c7864cd5f | 27,439 |
def getLineagesFromChangeo(changeodb, print_summary):
"""subsets the changeo_db output by bracer by only those cells which are within lineages (non singletons)"""
df = changeodb
_df = df[df.CLONE != "None"] # get rid of unassigned cells (no BCR reconstructed)
_df = (df.CLONE.value_counts() > 1) #find clones with more than 1 member
if print_summary == True:
print( "There are", len(_df[_df == 1]), "lineages with more than one member")
CHANGEO_confidentlineages = df[df.CLONE.isin(_df[_df == 1].index)].sort_values('CLONE')
CHANGEO_confidentlineages = CHANGEO_confidentlineages[CHANGEO_confidentlineages.CLONE != 'None']
if print_summary == True:
print("number of cells in original dataframe", df.shape[0])
print("number of distinct Clones in original dataframe", df.drop_duplicates('CLONE').shape[0] -1) #subtract 1 for the 'None' entry
print(CHANGEO_confidentlineages.shape[0]/df.shape[0], 'percent of cells in a lineage' )
return CHANGEO_confidentlineages | 1a497b084118ce0993cf6509889831cab78d2a36 | 27,440 |
def pop():
""" Clear the current execution environment for whatever parallel mechanism is used. """
with _lock:
ident = identifier()
envs = _current_envs.get(ident)
if envs:
env = envs.pop()
env.deactivate()
if _current_envs[ident]:
current = _current_envs[ident][-1]
current.activate()
return env
raise ValueError('No environment to clear.') | 70192457f7ac15eeb763650415ee82ac2b6409e8 | 27,441 |
def _run_cnvkit_cancer(items, background, access_file):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(items[0])
ckout = _run_cnvkit_shared(items[0], [paired.tumor_bam], [paired.normal_bam],
access_file, work_dir, background_name=paired.normal_name)
ckout = theta.run(ckout, paired)
return _associate_cnvkit_out(ckout, items) | 99ac686b0a24cfd87fdebd59fc691fbbe839323e | 27,442 |
def meanAdjustELE(site_residuals, azSpacing=0.5,zenSpacing=0.5):
"""
PWL piece-wise-linear interpolation fit of phase residuals
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
Neq = np.eye(numZD,dtype=float) * 0.01
Apart = np.zeros((numd,numZD))
sd = np.zeros(numd)
for i in range(0,numd):
iz = np.floor(data[i,2]/zenSpacing)
sd[i] = np.sin(data[i,2]/180.*np.pi)
Apart[i,iz] = 1.#-(data[i,2]-iz*zenSpacing)/zenSpacing)
prechi = np.dot(data[:,3].T,data[:,3])
Neq = np.add(Neq, np.dot(Apart.T,Apart) )
Bvec = np.dot(Apart.T,data[:,3])
Cov = np.linalg.pinv(Neq)
Sol = np.dot(Cov,Bvec)
postchi = prechi - np.dot(Bvec.T,Sol)
pwl = Sol
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
model = np.dot(Apart,Sol)
f = loglikelihood(data[:,3],model)
dof = numd - np.shape(Sol)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
#print("My loglikelihood:",f,aic,bic,dof,numd)
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
stats = {}
stats['prechi'] = np.sqrt(prechi/numd)
stats['postchi'] = np.sqrt(postchi/numd)
stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)
stats['aic'] = aic
stats['bic'] = bic
return pwl,pwlsig,stats | 5c543247d64e9c644f6b9401f2d662f62de23bd7 | 27,443 |
import math
def rotateImage(input_img, angle):
"""
Rotate the input_img by angle degrees, Rotate center is image center.
:param input_img:np.array, the image to be rotated
:param angle:float, the counterclockwise rotate angle
:return:np.array, the rotated image
"""
radian = angle * math.pi / 180.0
a, b = math.sin(radian), math.cos(radian)
h, w = input_img.shape
w_r = int(math.ceil(h * math.fabs(a) + w * math.fabs(b)))
h_r = int(math.ceil(w * math.fabs(a) + h * math.fabs(b)))
dx, dy = max(0, (w_r - w) / 2), max(0, (h_r - h) / 2)
img_rotate = cv2.copyMakeBorder(input_img, dy, dy, dx, dx, cv2.BORDER_CONSTANT, value=(0,0,0))
center = (img_rotate.shape[1] / 2.0, img_rotate.shape[0] / 2.0)
affine_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
img_rotate = cv2.warpAffine(img_rotate, affine_matrix, (img_rotate.shape[1], img_rotate.shape[0]), flags=cv2.INTER_NEAREST)
return img_rotate | 4c16e6a0af3637d21ffb27fbe0f5fad8e72c5bd2 | 27,444 |
def get_content_model_prefetch(content_model, content_attr='content'):
""" Returns the fields that should be prefetched, for a relation that
starts with '<content_attr>__'. If the model has MTURK_PREFETCH, then that
is used. Otherwise, some common attributes are tested (photo, shape) and
used if those foreign keys exist. """
if hasattr(content_model, 'MTURK_PREFETCH'):
return ['%s__%s' % (content_attr, k)
for k in content_model.MTURK_PREFETCH]
else:
# guess if there is no default
prefetch = []
if has_foreign_key(content_model, 'photo'):
prefetch.append('%s__photo' % content_attr)
if has_foreign_key(content_model, 'shape'):
prefetch.append('%s__shape' % content_attr)
prefetch.append('%s__shape__photo' % content_attr)
return prefetch | 5abeb44353723ffc8cd17ab9509e5a747d01dd24 | 27,445 |
def dispatch(obj, replacements):
"""make the replacement based on type
:param obj: an obj in which replacements will be made
:type obj: any
:param replacements: the things to replace
:type replacements: tuple of tuples
"""
if isinstance(obj, dict):
obj = {k: dispatch(v, replacements) for k, v in obj.items()}
elif isinstance(obj, list):
obj = [dispatch(l, replacements) for l in obj]
elif isinstance(obj, str):
for replacement in replacements:
obj = obj.replace(replacement[0], replacement[1])
return obj | 8542214f83b6f04f1bbe21baa0710e7885a3b259 | 27,446 |
def process(
mode,
infile,
outdir,
genes,
genes_drop,
genes_bed,
igh,
mmrf,
bolli,
lohr,
normals,
mytype):
"""Main function to process myTYPE SNV and indel output"""
## IMPORTING DATA
variants = import_variants(infile)
## ANNOTATIONS
variants = annotate_cosmic(variants)
if genes:
# Only runs if a path was passed to optional argument "gene"
variants = annotate_genefreq(variants, genes)
# Replace this with mutation frequency from MMRF? (and other raw data?)
variants = annotate_maf(variants)
variants = annotate_normals(variants, normals)
variants = annotate_mmrf(variants, mmrf)
variants = annotate_bolli(variants, bolli)
variants = annotate_lohr(variants, lohr)
if mytype:
# Only runs if a path was passed to optional argument "mytype"
variants = annotate_mytype(variants, mytype)
variants = annotate_known(variants, mytype)
## FILTERS
variants = filter_panel(variants, genes_bed)
if genes_drop:
variants = filter_drop(variants, genes_drop)
variants = filter_igh(variants, igh)
variants = filter_maf(variants)
variants = filter_maf_cosmic(variants, mode)
variants = filter_nonpass(variants, mode)
variants = filter_normals(variants)
variants = filter_vaf(variants)
variants = filter_bidir(variants)
## OUTPUT
name = namecore(infile)
filter_export(variants, outdir, name, mode)
print('Variant processing complete')
return(variants) | 606cbf58fa9974e968897a9c8289fbe5f1197d77 | 27,447 |
def set_axis_formatter(axis, params=None, **kwargs):
"""
Set axis formatter.
:param axis: Matplotlib axis instance.
:type axis: :class:`matplotlib.axes.Axes`
:param params: Axis formatter parameters.
:type params: dict, optional
Example config:
.. code-block:: python
'formatter': {
'x': {
'major': {
'format': '%.3f'
},
'minor': {
'name': 'PercentFormatter',
'params': [],
'keyword_params': {
}
}
}
}
Default formatter is FormatStrFormatter and it reads 'format' value. Other
formatter is specified with params and keyword_params to pass these values
into formatter class.
"""
config = params or {}
axis_methods = {
'x': 'get_xaxis',
'y': 'get_yaxis',
}
formatter_methods = {
'major': 'set_major_formatter',
'minor': 'set_minor_formatter'
}
instances = {}
need_locator_instances = [
matplotlib.dates.AutoDateFormatter,
]
try:
# ConciseDateFormatter is not supported on Python 3.5 and Matplotlib
# below version 3.1.0. So import it only when it is available.
need_locator_instances.append(matplotlib.dates.ConciseDateFormatter)
except ImportError:
pass
locators = kwargs.get('locators', {})
def get_locator_instance(locators, on, which):
instance = locators.get(on, {}).get(which)
if instance is None:
raise ChartError("Could'nt find locator instance "
"required for formatter class")
return instance
for key, value in config.items():
if key not in axis_methods:
continue
gca = getattr(axis, axis_methods[key])()
for which, data in value.items():
if which in formatter_methods:
tick_format = data.get('format')
if tick_format:
name = data.get('name', 'FormatStrFormatter')
formatter = getattr(matplotlib.ticker, name)
getattr(gca, formatter_methods[which])(
formatter(data.get('format')))
else:
name = data.get('name')
if name is None:
continue
for attr in [matplotlib.ticker, matplotlib.dates]:
formatter = getattr(attr, name, None)
if formatter:
break
if formatter is None:
raise ChartError(
'Unsupported formatter class {}'.format(name))
if formatter in need_locator_instances:
locator = get_locator_instance(locators, key, which)
instance = formatter(
locator,
*data.get('params', []),
**data.get('keyword_params', {})
)
else:
instance = formatter(*data.get('params', []),
**data.get('keyword_params', {}))
try:
instances[key].update({which: instance})
except KeyError:
instances.update({key: {which: instance}})
getattr(gca, formatter_methods[which])(instance)
return instances | 8e5a1ca49b1ed6c29950a8d357664a7902f793ba | 27,448 |
import time
from datetime import datetime
def convert_tzaware_time(t: time, tz_out: tp.Optional[tzinfo]) -> time:
"""Return as non-naive time.
`datetime.time` should have `tzinfo` set."""
return datetime.combine(datetime.today(), t).astimezone(tz_out).timetz() | 8dd59b59d4679789687b3c94d28e4dafbbc1fd01 | 27,449 |
def total_posts():
"""
A simple template tag that shows the number
of posts that have been uploaded so far
"""
return Post.published.count() | 77ebf33ec5e646e461e57ae18e606766e4679a82 | 27,450 |
from typing import Tuple
def make_policy_prior_network(
spec: specs.EnvironmentSpec, hidden_layer_sizes: Tuple[int, ...] = (64, 64)
) -> networks.FeedForwardNetwork:
"""Creates a policy prior network used by the agent."""
action_size = np.prod(spec.actions.shape, dtype=int)
def _policy_prior_fn(observation_t, action_tm1, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
network = hk.nets.MLP(hidden_layer_sizes + (action_size,))
# Policy prior returns an action.
return network(jnp.concatenate([observation_t, action_tm1], axis=-1))
policy_prior = hk.without_apply_rng(hk.transform(_policy_prior_fn))
return make_network_from_module(policy_prior, spec) | 7720bb9b182a3c19d80e3ecd241e7197fdec6c14 | 27,451 |
def simulate_moment_contributions(params, x, y):
"""Calculate moment contributions for example from Honore, DePaula, Jorgensen."""
y_estimated = x.to_numpy() @ (params["value"].to_numpy())
x_np = x.T.to_numpy()
residual = y.T.to_numpy() - stats.norm.cdf(y_estimated)
mom_value = []
length = len(x_np)
for i in range(length):
for j in range(i, length):
moment = residual * x_np[i] * x_np[j]
mom_value.append(moment)
mom_value = np.stack(mom_value, axis=1)[0]
mom_value = pd.DataFrame(data=mom_value)
return mom_value | b3c38c3fb85d353eecf4185bd600cb3a6eade732 | 27,452 |
def get_app_host_list(app_id):
"""
获取指定业务所有主机信息
:param app_id
:return dict: {"InnerIP": host_info}
"""
cc_result = bk.cc.get_app_host_list(app_id=app_id)
return cc_result | ce5b305c76b320c60963fa135734ac55e2960896 | 27,453 |
def staking_product_quota(self, product: str, productId: str, **kwargs):
"""Get Personal Left Quota of Staking Product (USER_DATA)
Weight(IP): 1
GET /sapi/v1/staking/personalLeftQuota
https://binance-docs.github.io/apidocs/spot/en/#get-personal-left-quota-of-staking-product-user_data
Args:
product (str)
productId (str)
Keyword Args:
recvWindow (int, optional): The value cannot be greater than 60000
"""
check_required_parameters([[product, "product"], [productId, "productId"]])
params = {"product": product, "productId": productId, **kwargs}
url_path = "/sapi/v1/staking/personalLeftQuota"
return self.sign_request("GET", url_path, params) | 64587a64a25fedea39e53aa8d5f8857d1844da89 | 27,454 |
import sys
def run(nbtxt, output=None, view=False):
""" Run a notebook app 100% from JSON (text stream), return the JSON (text stream)
:param nbtxt: JSON representation of notebook app, ready to run
:param view: don't invoke notebook, just view in current form
NOTE: `view` probably isn't useful, since the input will just be output again
"""
# TODO: support output parameter to specify only returning certain attributes from notebook
# create a notebook object from the JSON
nb_obj = nb_read_json(nbtxt)
nb_runner = NotebookRunner(nb_obj)
try: # get the app name from metadata
name = nb_obj['metadata']['conda.app']['name']
except KeyError as ex:
name = "nbapp"
try:
if view:
pass # then don't run it
else:
nb_runner.run_notebook(skip_exceptions=False)
return nb_runner.nb
except Empty as ex:
sys.stderr.write("IPython Kernel timeout")
err = mini_markdown_nb("""
Notebook Error
==============
ERROR: IPython Kernel timeout
```
{error}
```
""".format(error=str(ex).split(':')[-1]))
return err
except (NotImplementedError, NotebookError, ValueError) as ex:
msg = str(ex).splitlines()[-1]
sys.stderr.write(msg)
err = mini_markdown_nb("""
Notebook Error
==============
Notebook contains unsupported feature or bad argument:
```
{error}
```
""".format(error=msg))
return err
except ImportError:
msg = "nodejs or pandoc must be installed"
sys.stderr.write(msg)
err = mini_markdown_nb(msg)
return err | 5d2866fbe6d34e0d813a106e82c7618e6d950e5d | 27,455 |
def softmax(x):
"""
개선된 softmax 함수
"""
max_x = np.max(x)
y = np.exp(x - max_x) / np.sum(np.exp(x-max_x))
return y | 8912601dd73a10dfeb0391b9e292be0de6c1165a | 27,456 |
def lambda_handler(event, context):
"""
Event shape:
..code:: json
{
"Name": "layer name",
"Requirements": [
{
"Name": "Requirement Name",
"Details": "Requirement version or other identifying details"
}
]
}
Return shape:
..code:: json
{
"Installed": ["Actual versions of all installed requirements"],
"Runtimes": ["Lambda runtime name"],
"ArtifactKey": "S3 key containing built zip",
"ManifestKey": "S3 key containing job manifest"
}
Required permissions:
* s3:PutObject for S3_BUCKET/accretion/artifacts/*
* s3:PutObject for S3_BUCKET/accretion/manifests/*
:param event:
:param context:
:return:
"""
try:
if not _is_setup:
_setup()
_clean_env()
requirements = [PackageDetails(**reqs) for reqs in event["Requirements"]]
installed = build_requirements(build_dir=BUILD_DIR, venv_dir=VENV_DIR, requirements=requirements)
artifact_key, manifest_key = _upload_artifacts(event["Name"], event["Requirements"], installed)
return {
"Installed": installed,
"Runtimes": [_runtime_name()],
"ArtifactKey": artifact_key,
"ManifestKey": manifest_key,
}
except Exception:
# TODO: Turn these into known-cause state machine failures.
raise | 5bc688e18e33258371558a07a3610236851686f6 | 27,457 |
def _FloatTraitsBaseClass_write_values_attribute(a, v):
"""_FloatTraitsBaseClass_write_values_attribute(hid_t a, Floats v)"""
return _RMF_HDF5._FloatTraitsBaseClass_write_values_attribute(a, v) | 8e93038969b901dc9b2696e89a6c8110ed5becb4 | 27,458 |
def get_cifar(root, dataset, transform, mode = 'train'):
"""Get cifar data set
Args :
--root: data root
--dataset: dataset name
--transform: transformation
--mode: 'train'/'test'
"""
assert dataset.count('cifar')
if dataset == 'cifar10':
dataset = tv.datasets.CIFAR10(root = root,
download = True,
train = True if mode == 'train' else False,
transform = transform)
elif dataset == 'cifar100':
dataset = tv.datasets.CIFAR100(root = root,
download = True,
train = True if mode == 'train' else False,
transform = transform)
else:
raise Exception('No other data sets!')
return dataset | daf68549c2d6719f4a1fe7ae8c252e264288a5da | 27,459 |
from pathlib import Path
import textwrap
def config(base_config):
""":py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests."""
config_file = Path(base_config.file)
with config_file.open("at") as f:
f.write(
textwrap.dedent(
"""\
file group: allen
temperature salinity:
download:
status file url template: 'https://liveocean.apl.uw.edu/output/f{yyyymmdd}/ubc_done.txt'
bc file url template: 'https://liveocean.apl.uw.edu/output/f{yyyymmdd}/ubc.nc'
file name: low_passed_UBC.nc
dest dir: forcing/LiveOcean/downloaded
"""
)
)
config_ = nemo_nowcast.Config()
config_.load(config_file)
return config_ | 9c23b47ff355a68562446d41bca3d01b5a90d2d1 | 27,460 |
import logging
async def test_client_app_delete_error_handling(
aiohttp_server, config, db, loop, caplog
):
"""For the delete() method of the KubernetesClient, test the handling of response
with a 404 error and with an error with another status.
There are three states:
0. initialisation
1. when deleting a resource from the cluster, a 403 is sent. The Kubernetes
client does not know how to handle it, and raises an exception.
2. when deleting a resource from the cluster, a 404 is sent. The Kubernetes
client must handle it as if the resource was already deleted.
"""
caplog.set_level(logging.DEBUG)
current_state = 0
copy_deployment_manifest = deepcopy(nginx_manifest[:1])
routes = web.RouteTableDef()
@routes.delete("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
nonlocal current_state
if current_state == 1:
return web.Response(status=403)
elif current_state == 2:
return web.Response(status=404)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
async with KubernetesClient(cluster.spec.kubeconfig, []) as kube:
# State (1): Return a 403
current_state = 1
with pytest.raises(ApiException):
await kube.delete(copy_deployment_manifest[0])
# State (2): Return a 404
current_state = 2
resp = await kube.delete(copy_deployment_manifest[0])
assert resp is None
already_deleted = 0
for record in caplog.records:
if (
record.levelname == "DEBUG"
and record.message == "Deployment already deleted"
):
already_deleted += 1
# The logs for the resource already deleted must only appear once, in the state (2)
assert already_deleted == 1 | 5ca86a097e0060ed7f16523ccc153f4c2d732b3c | 27,461 |
def mark_cell_function(fun, mesh, foc, regions):
"""
Iterates over the mesh and stores the
region number in a meshfunction
"""
if foc is None:
foc = calculus.estimate_focal_point(mesh)
for cell in dolfin.cells(mesh):
# Get coordinates to cell midpoint
x = cell.midpoint().x()
y = cell.midpoint().y()
z = cell.midpoint().z()
T = calculus.cartesian_to_prolate_ellipsoidal(x, y, z, foc)
fun[cell] = calculus.strain_region_number(T, regions)
return fun | 0d91fa752350de5f51cf841770234675712f6f06 | 27,462 |
import json
def incorrect_format():
"""for handling incorrect json format"""
js = json.dumps({'error': 'Incorrect format.'})
return Response(js, status=422, mimetype='application/json') | 971096fde565d66786196773d99d3d7849fd6d14 | 27,463 |
import multiprocessing
def start_multiprocessing(function_list, data_list):
"""
Creates and runs a multiprocessing pool for (1..n) functions all of which
use the same data_list (e.g. YouTube video links). Returns a dictionary
of results indexed by function.
"""
with multiprocessing.Pool(initializer=set_global_session) as pool:
results = {}
for function in function_list:
print(f"\n* Setting up multiprocessing for: {function.__name__} ({len(data_list)} items).\n")
results[function] = pool.map(function, data_list)
print(f"\n* {function.__name__}: multiprocessing complete.\n")
return(results) | d02e00107f52bf0fa0a6bbc0da834c5c7638b33c | 27,464 |
def GetNodeProperty(line):
"""
Get node property from a string.
:param line: a string
:return: name, size, and position of the node
"""
name, attr = NameAndAttribute(line)
name = ProcessName(name, False)
position = GetAttributeValue("pos", attr)[:-1].replace(",", "-")
attr = CleanAttribute(attr)
width = GetAttributeValue("width", attr)
#group = GetAttributeValue("color", attr)
size = SizeScale(GetSize(width))
return name, size, position | 2f63af91864236033783d773439f829b9d7f405a | 27,465 |
def login(request):
"""
Logs in a user.
"""
if request.method != "GET":
return _methodNotAllowed()
options = _validateOptions(request, {})
if type(options) is str:
return _response(options)
user = userauth.authenticateRequest(request, storeSessionCookie=True)
if type(user) is str:
return _response(user)
elif user == None:
return _unauthorized()
else:
return _response("success: session cookie returned") | abde5c258899cc8fcfdef0e0286e5e846d075a27 | 27,466 |
def wavenumber(f, h, g=9.81):
""" solves the dispersion relation, returns the wave number k
INPUTS:
omega: wave cyclic frequency [rad/s], scalar or array-like
h : water depth [m]
g: gravity [m/s^2]
OUTPUTS:
k: wavenumber
"""
omega = 2*np.pi*f
if hasattr(omega, '__len__'):
k = np.array([fsolve(lambda k: om**2/g - k*np.tanh(k*h), (om**2)/g)[0] for om in omega])
else:
func = lambda k: omega**2/g - k*np.tanh(k*h)
k_guess = (omega**2)/g
k = fsolve(func, k_guess)[0]
return k | a07541e5327cd778cf34b792380f2f25f9617c05 | 27,467 |
def command(cmd, label, env={}):
"""Create a Benchpress command, which define a single benchmark execution
This is a help function to create a Benchpress command, which is a Python `dict` of the parameters given.
Parameters
----------
cmd : str
The bash string that makes up the command
label : str
The human readable label of the command
env : dict
The Python dictionary of environment variables to define before execution'
Returns
-------
command : dict
The created Benchpress command
"""
return {'cmd': cmd,
'label': label,
'env': env} | 487e7b8518ae202756177fc103561ea03ded7470 | 27,468 |
def adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-03-28: Russell Desiderio. Corrected documentation only.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_corr = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor | 9f7c427a7f3ddbbfbdfe1f56269ddf04f99ea2bb | 27,469 |
import re
def get_text(string):
"""
normalizing white space and stripping HTML markups.
"""
text = re.sub('\s+',' ',string)
text = re.sub(r'<.*?>',' ',text)
return text | 5ef04effe14ee9b0eee90de791b3e3f3be6c15e3 | 27,470 |
import struct
def decode_ia(ia: int) -> str:
""" Decode an individual address into human readable string representation
>>> decode_ia(4606)
'1.1.254'
See also: http://www.openremote.org/display/knowledge/KNX+Individual+Address
"""
if not isinstance(ia, int):
ia = struct.unpack('>H', ia)[0]
return '{}.{}.{}'.format((ia >> 12) & 0x1f, (ia >> 8) & 0x07, (ia) & 0xff) | 6f107f47110a59ca16fe8cf1a7ef8f061bf117c7 | 27,471 |
import array
def dd_to_dms(deg):
"""Return degrees, minutes, seconds from degrees decimal"""
mins, secs = divmod(deg * 3600, 60)
degs, mins = divmod(mins, 60)
return array((degs, mins, secs), dtype='i4') | f0bb8287ffb14f5d5643d5f9709dbad37381a8ae | 27,472 |
def _compare_labels(primary, label1, label2):
"""
Compare two labels disposition
:param primary: the primary label
:param label1: the first label
:param label2: the second label
:return A tuple containing True if there is no difference False otherwise
and the output text
"""
output = ""
try:
l1_disp = label1['disp'][primary]
l2_disp = label2['disp'][primary]
except KeyError:
raise InvalidSymmetry()
if l1_disp != l2_disp:
output += "Change in disposition for {cat} {label} [{cp}] (2->1):" \
" {l2disp} => {l1disp}\n".format(cat=label2['cat'],
label=label2['bidi'],
cp=label2['cp_out'],
l2disp=l2_disp,
l1disp=l1_disp)
if label2['cat'] == 'Primary':
return False, output
try:
output += "\nRules for LGR2:\n{}\n".format(label2['rules'][primary])
output += "\nRules for LGR1:\n{}".format(label1['rules'][primary])
except KeyError:
raise InvalidSymmetry()
return False, output
return True, output | 9b1802d4b9bab0a71e1509bb5e49388267a96a29 | 27,473 |
def set_pause(df, index, ts_name):
"""
:param df:
Spark DataFrame object with timestamp data
:param index:
fix index type: 'i1', 'i2', 'i3', 'j1', 'j2', 'j3'
:param ts_name:
column with timestamp data
:return:
Spark DataFrame object with timestamp data
"""
"""
run after proc_segment
start from set: 3 3 duration 0.0
"""
## calculate distance between two fixes
app_fun = F.udf(lambda a, b, c, d: calc_distance(a, b, c, d))
w2 = Window.partitionBy('segment').orderBy(ts_name)
pcol = 'pause2'
df2 = df.withColumn(pcol, F.when(F.col(index).isNotNull() &
(F.lag('state', 1).over(w2) == 3) &
(F.lag('tripType', 1).over(w2) == 2) &
(F.col('state') == 3) &
(F.col('tripType') == 3) &
(F.lead('tripType', 1).over(w2) != 3) & ####### CHECK
(F.col('pause') == F.col('duration')),
F.col('duration')
)
).orderBy(ts_name)
## avoid to pick up multiple lines with pcol non-null
df2 = df2.withColumn('check', F.when(F.col(index).isNotNull(),
F.last(pcol, ignorenulls=True)
.over(w2.rowsBetween(Window.unboundedPreceding, 0))
).otherwise(F.col(pcol))
).orderBy(ts_name)
df2 = df2.withColumn(pcol, F.when(F.col(index).isNotNull() &
F.col(pcol).isNotNull() &
F.lag('check', 1).over(w2).isNull(),
F.col(pcol)
)
).drop('check').orderBy(ts_name)
df2 = df2.withColumn('pause_cp', F.when(F.col(index).isNotNull(), F.col('pause')))
df2 = df2.withColumn('pause_dist_cp', F.when(F.col(index).isNotNull(), F.col('pause_dist')))
df2 = df2.withColumn(pcol, F.when(F.col(index).isNotNull(),
F.last(pcol, ignorenulls=True)
.over(w2.rowsBetween(0, Window.unboundedFollowing))
).otherwise(F.col(pcol))
).orderBy(ts_name)
## calculate pause-time for merged segments
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNotNull() &
F.lead(pcol, 1).over(w2).isNull() &
(F.col('pause') == F.col('duration')),
F.col('cum_pause') - F.col('duration')
).otherwise(F.col('pause'))
).orderBy(ts_name)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull() &
(F.col('pause') != F.col('cum_pause') - F.col('duration')),
None
).otherwise(F.col('pause'))
)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull(),
F.last('pause', ignorenulls=True).over(w2)
).otherwise(F.col('pause'))
).orderBy(ts_name)
df2 = df2.withColumn(pcol, F.when(F.col(index).isNotNull() &
(F.col('pause') == F.col('cum_pause') - F.col('duration')),
None
).otherwise(F.col(pcol))
)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull(),
F.col('cum_pause') - F.col('pause')
).otherwise(F.col('pause'))
)
## compute the distance traveled from the beginning of a pause
df2 = df2.withColumn('pause_dist', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull() &
(F.col('pause') != F.col('duration')),
None
).otherwise(F.col('pause_dist'))
)
df2 = df2.withColumn('lat2', F.when(F.col('pause_dist').isNotNull(), F.col('lat')))
df2 = df2.withColumn('lat2', F.when(F.col('pause_dist').isNull(),
F.last('lat2', ignorenulls=True).over(w2)
)
.otherwise(F.col('lat'))
).orderBy(ts_name)
df2 = df2.withColumn('lat2', F.when(F.col('lat2').isNull(), F.col('lat')).otherwise(F.col('lat2')))
df2 = df2.withColumn('lon2', F.when(F.col('pause_dist').isNotNull(), F.col('lon')))
df2 = df2.withColumn('lon2', F.when(F.col('pause_dist').isNull(),
F.last('lon2', ignorenulls=True).over(w2)
).otherwise(F.col('lon'))
).orderBy(ts_name)
df2 = df2.withColumn('lon2', F.when(F.col('lon2').isNull(), F.col('lon')).otherwise(F.col('lon2')))
df2 = df2.withColumn('pause_dist', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull(),
app_fun(F.col('lat'), F.col('lon'), F.col('lat2'), F.col('lon2'))
).otherwise(F.col('pause_dist'))
)
df2 = df2.withColumn('pause_dist', F.when(F.col(index).isNotNull() &
F.col('pause').isNull(),
F.col('pause_dist_cp')).otherwise(F.col('pause_dist'))
).orderBy(ts_name)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col('pause').isNull(),
F.col('pause_cp')).otherwise(F.col('pause'))
).orderBy(ts_name)
df2 = df2.drop(*['lat2', 'lon2', 'pause_cp', 'pause_dist_cp', pcol])
return df2 | cd7356ca134b5b0d3c91351478a9bd202cf2fcad | 27,474 |
from pathlib import Path
def _validate_magics_flake8_warnings(actual: str, test_nb_path: Path) -> bool:
"""Validate the results of notebooks with warnings."""
expected = (
f"{str(test_nb_path)}:cell_1:1:1: F401 'random.randint' imported but unused\n"
f"{str(test_nb_path)}:cell_1:2:1: F401 'IPython.get_ipython' imported but unused\n"
f"{str(test_nb_path)}:cell_3:6:21: E231 missing whitespace after ','\n"
f"{str(test_nb_path)}:cell_3:11:10: E231 missing whitespace after ','\n"
)
return actual == expected | 4baa419ad4e95bf8cc794298e70211c0fa148e5b | 27,475 |
def _drop_index(index, autogen_context):
"""
Generate Alembic operations for the DROP INDEX of an
:class:`~sqlalchemy.schema.Index` instance.
"""
text = "%(prefix)sdrop_index(%(name)r, "\
"table_name='%(table_name)s'%(schema)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': _render_gen_name(autogen_context, index.name),
'table_name': index.table.name,
'schema': ((", schema='%s'" % index.table.schema)
if index.table.schema else '')
}
return text | 43b6b5391b69896b1e7409108d79661a13697391 | 27,476 |
def add_metabolite_drain_reactions(mod_, metab_list, prefix_id="MDS"):
"""Take in a model and add metabolite drain reactions for a list of metabolites
in the model. These metabolite drain reactions will have the identification
of (MDS)__(metabolite id). (i.e., MDS__atp_c for the atp_c metabolite)
"""
mod = mod_.copy()
metab_obj_names=[]
for metab in metab_list:
obj_name = prefix_id+"__"+metab
metab_drain = Reaction(obj_name)
metab_drain.lower_bound = 0
metab_drain.upper_bound = 1000.
metab_drain.add_metabolites({mod.metabolites.get_by_id(metab): -1.0})
mod.add_reaction(metab_drain)
metab_obj_names.append(obj_name)
return mod, metab_obj_names | ea8cdf76896d4ab70dbb5f7fd4c15f4e83be7ba5 | 27,477 |
def tax_total(par):
"""
Finds total tax burden in a log normal distributed population
Args:
par: simplenamespace containing relevant parameters
phi (float): C-D weights
epsilon (float): public housing assement factor
r (float): mortgage interest
tau_g (float): base housing tax
tau_p (float): progressive housing tax
p_bar (float): cutoff price
m (float): cash-on-hand
seed (int): seed number for random draws
mu (float): mean value for the distribution
sigma (float): standard deviation for the distribution
Local variables:
h_cit (float): housing quality choice of one citizen in the population
c_cit (float): other consumption choice of one citizen in the population
u_cit (float): utility for one citizen in the population given chice of h and c
Returns:
T (float): total tax burden
"""
# Set seed and tax = 0
np.random.seed(par.seed)
T = 0
# Loop through every citizen in the population and calculate optimal choices
# and tax given those choices
for i in range(par.pop):
par.m = np.random.lognormal(par.mu, par.sigma)
h_cit, c_cit, u_cit = u_optimize(par)
T += par.tau_g*(h_cit*par.epsilon) + par.tau_p*max((par.epsilon*h_cit)-par.p_bar, 0)
return T | f3a2ace349bcf25bffda855d3bad11cbeef9b6c0 | 27,478 |
def _split_uri(uri):
"""
Get slash-delimited parts of a ConceptNet URI.
Args:
uri (str)
Returns:
List[str]
"""
uri = uri.lstrip("/")
if not uri:
return []
return uri.split("/") | 91b48fff83041fe225a851a9e3016e3722bd9771 | 27,479 |
def file_len(fname):
""" Calculate the length of a file
Arguments:
Filename: Name of the file wanting to count the rows of
Returns:
i+1: Number of lines in file
"""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1 | d571b048649c636c359e731d72693aed26ef1595 | 27,480 |
def create_initializer(initializer_range=0.001):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range) | 70eb43744202c5abb34fca4bb0bce6b2b2d31d72 | 27,481 |
from typing import List
def collapse_columns(
df: ContactsTable, names: List[str], new_name: str
) -> ContactsTable:
"""
This function assumes that df has both columns and indexes identified by the same `names`. They will all be added
together to create a new column and row named `new_name`. Eg.:
>>> df = ContactsTable(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=list("abc")))
>>> collapse_columns(df, ["a", "b"], "a'")
a' c
a' 12 15
c 9 9
:param df: a contacts table type table. That means it's a upper triangle matrix
:param names: name of the columns and indexes to aggregate
:param new_name: name of the new column and row that will be created
:return: A dataframe with collapsed columns and indexes
"""
if not names:
raise ValueError("Names must be a non-empty list")
missing_columns = set(names) - set(df.columns)
if missing_columns:
raise ValueError(f"Names mismatch: {missing_columns}")
if not all(df.columns == df.index):
raise ValueError("Indexes and columns must match")
agg = df.copy()
agg[names[0]] = df[names].sum(axis=1)
agg = agg.rename({names[0]: new_name}, axis=1)
agg = agg.drop(columns=names[1:])
agg.loc[names[0]] = agg.loc[names].sum()
agg = agg.rename({names[0]: new_name}, axis=0)
agg = agg.drop(index=names[1:])
return ContactsTable(agg) | 7dd38ec18ca26b4757f7b743813401e6d7b2a7d4 | 27,482 |
import time
def current_date(pattern="%Y-%m-%d %H:%M:%S"):
"""
获取当前日期
:param: pattern:指定获取日期的格式
:return: 字符串 "20200615 14:57:23"
"""
return time.strftime(pattern, time.localtime(time.time())) | 9a554e91e0842fe52f822f4403366695c92a609b | 27,483 |
def ubi(funding_billions=0, percent=0):
""" Calculate the poverty rate among the total US population by:
-passing a total level of funding for a UBI proposal (billions USD),
-passing a percent of the benefit recieved by a child and the benefit
recieved by an adult
AND
taking into account that funding will be raise by a flat tax leveled on each households
taxable income """
percent = percent / 100
funding = funding_billions * 1e9
target_persons = person.copy(deep=True)
# i think this is % funding, not % benefit
adult_ubi = ((1 - percent) * funding) / adult_pop
child_ubi = (percent * funding) / child_pop
tax_rate = funding / total_taxable_income
target_persons['hh_new_tax'] = target_persons.hh_tax_income * tax_rate
target_persons['hh_ubi'] = (target_persons.hh_adults * adult_ubi +
target_persons.hh_children * child_ubi)
target_persons['new_spm_resources'] = (target_persons.spm_resources +
target_persons.hh_ubi -
target_persons.hh_new_tax)
target_persons['new_spm_resources_pp'] = (target_persons.new_spm_resources /
(target_persons.hh_total_people))
# Calculate poverty rate
target_persons['poor'] = (target_persons.new_spm_resources <
target_persons.spm_povthreshold)
total_poor = (target_persons.poor * target_persons.weight).sum()
poverty_rate = (total_poor / pop * 100)
# Calculate poverty gap
target_persons['poverty_gap'] = target_persons.spm_povthreshold - target_persons.new_spm_resources
poverty_gap = (((target_persons.poor * target_persons.poverty_gap
* target_persons.weight).sum()))
# Calculate Gini
gini = mdf.gini(target_persons, 'new_spm_resources_pp', w='weight')
# Percent winners
target_persons['better_off'] = (target_persons.new_spm_resources > target_persons.spm_resources)
total_better_off = (target_persons.better_off * target_persons.weight).sum()
percent_better_off = total_better_off / pop
return pd.Series([poverty_rate, gini, poverty_gap, percent_better_off, adult_ubi, child_ubi]) | 3cdb9ea40085dce37f5746ff74b37f18c87e6c48 | 27,484 |
import logging
import textwrap
def validate_record(valrecord):
""" primary procedure which calls the coder with the parse in valrecord and compares the coded results with the expected
as well as writing assorted intermediate data structures to fout per test_script_ud.py """
def process_event_output(str):
""" from test_script_ud.py """
logger.debug("pso(): " + str)
str = str.replace("{","")
str = str.replace("}","")
res = ""
events = str[str.find(":"):].split("':")
for event in events:
event = event[0:event.rfind("]")]
event = event.replace(" ","")
event = event.replace(":","")
event = event.replace("u","")
event = event.replace("\'","")
event = event[1:]
event = event.replace("~","")
res = res+"\n("+event+")"
return res[1:]
def parse_parser(parse):
""" from test_script_ud.py """
phrase_dict = {}
for line in parse.splitlines():
lines = line.split('\t')
num = lines[0].strip()
str = lines[1].strip()
phrase_dict[num]=str
#print(num)
#print(str)
phrase_dict['-'] = " "
return phrase_dict
def parse_verb(phrase_dict,sentence):
""" from test_script_ud.py with minor modifications """
if '0' not in return_dict[idstrg]['sents']:
return
if 'verbs' not in return_dict[idstrg]['sents']['0']:
return
str_arr = str(return_dict[idstrg]['sents']['0']['verbs']).strip("{").split(",")
fout.write("Verbs found:\n")
for verbID in return_dict[idstrg]['sents']['0']['verbs']:
verb = return_dict[idstrg]['sents']['0']['verbs'][verbID]
str_add = str(verbID) + " : text = " + str(verb.text) +", head="+ str(verb.head) +", meaning="+ str(verb.meaning)+", code="+ str(verb.code)+" ,passive="+str(verb.passive) + "\n"
fout.write(" " + str_add)
'''
for x in str_arr[:1]:
str_num = x.find(":")
try:
np = sentence.get_verbPhrase(int(x[:str_num].strip()))
str_add = x[:str_num].strip() + " : text = " + str(np.text) +", head="+ str(np.head) +", meaning="+ str(np.meaning)+", code="+ str(np.code)+" ,passive="+str(np.passive) + "\n"
fout.write(" " + str_add)
except Exception as e:
print(e)
fout.write(" --> Exception generating sentence.get_verbPhrase(): " + str(e) + '\n')
'''
return
def check_none_verb(expected):
""" from test_script_ud.py with minor modifications """
if '0' not in return_dict[idstrg]['sents']:
return
if 'verbs' not in return_dict[idstrg]['sents']['0']:
return
found = False
for verbID in return_dict[idstrg]['sents']['0']['verbs']:
verb = return_dict[idstrg]['sents']['0']['verbs'][verbID]
for i in range(0,len(expected['events'])):
goldevent = expected['events'][i]
goldeventtext = expected['eventtexts'][i]
if verb.rawtext in goldeventtext['eventtext'] and verb.code == None:
code = verb.code if verb.code != None else "None"
#noneverb = idstrg+"\t"+"verb_raw:"+verb.rawtext+"\tverb_lemma:"+verb.text+"\tgold:"+edict['eventtext']+"\tcode:"+code
#noneverb = idstrg+"\t"+expected['text']+"\t"+verb.text+"\t["+code+"]\t"+ goldeventtext['eventtext']+"\t["+goldevent['eventcode']+"]"+"\t"+ goldevent['plover']+"\t"+goldeventtext['sourcetext']+"\t"+goldeventtext['targettext']
noneverb = [idstrg,expected['text'],verb.text,"["+code+"]", goldeventtext['eventtext'],"["+goldevent['eventcode']+"]"]
none_verb.append(noneverb)
none_verb_filenames.append(idstrg)
found = True
'''
for x in str_arr[:1]:
str_num = x.find(":")
try:
np = sentence.get_verbPhrase(int(x[:str_num].strip()))
str_add = x[:str_num].strip() + " : text = " + str(np.text) +", head="+ str(np.head) +", meaning="+ str(np.meaning)+", code="+ str(np.code)+" ,passive="+str(np.passive) + "\n"
fout.write(" " + str_add)
except Exception as e:
print(e)
fout.write(" --> Exception generating sentence.get_verbPhrase(): " + str(e) + '\n')
'''
return found
def parse_triplets(phrase_dict):
""" from test_script_ud.py with minor modifications """
if '0' not in return_dict[idstrg]['sents']:
return
if 'triplets' not in return_dict[idstrg]['sents']['0']:
return
triplets=return_dict[idstrg]['sents']['0']['triplets']
fout.write("Triplets found:\n")
'''
for triple in triplets:
strs = triplets[triple]
meaning = strs['meaning']
verbcode = strs['verbcode']
matched_text = strs['matched_txt']
codes = str(triple).split("#")
event = "(" + phrase_dict[codes[0]] + "," + phrase_dict[codes[1]] + "," + phrase_dict[codes[2]] + ")"
str_add = str(triple) + event +": Meaning = " + str(meaning) + ", VerbCode = " + str(verbcode) + ", Matched Text = " + str(matched_text) + "\n"
fout.write(" " + str_add)
return
'''
for triple in triplets:
strs = triplets[triple]
source_text = strs['source_text']
target_text = strs['target_text']
verb_text = strs['verb_text']
matched_text = strs['matched_txt']
codes = str(triple).split("#")
#event = "(" + phrase_dict[codes[0]] + "," + phrase_dict[codes[1]] + "," + phrase_dict[codes[2]] + ")"
event = "(" + source_text + "," + target_text + "," + verb_text +")"
str_add = str(triple) + event + ": Matched Text = " + str(matched_text) + "\n"
fout.write(" " + str_add)
return
def check_unmatched_triplets(valrecord):
""" from test_script_ud.py with minor modifications """
if '0' not in return_dict[idstrg]['sents']:
return
if 'triplets' not in return_dict[idstrg]['sents']['0']:
return
triplets=return_dict[idstrg]['sents']['0']['triplets']
found = False
for triple in triplets:
strs = triplets[triple]
source_text = strs['source_text']
target_text = strs['target_text']
verb_text = strs['verb_text']
matched_text = strs['matched_txt']
code = strs['verbcode']
codes = str(triple).split("#")
#event = "(" + phrase_dict[codes[0]] + "," + phrase_dict[codes[1]] + "," + phrase_dict[codes[2]] + ")"
#event = "(" + source_text + "," + target_text + "," + verb_text +")"
event = verb_text
for i in range(0,len(valrecord['events'])):
goldevent = valrecord['events'][i]
goldeventtext = valrecord['eventtexts'][i]
if verb_text != None and verb_text.lower() in goldeventtext['eventtext'] and goldevent['eventcode'] not in code:
#raw_input(event+"\tsyscode:"+code+"\tmatched:"+matched_text.strip()+"\tgold:"+goldeventtext['eventtext']+","+goldevent['eventcode'])
#unmatched_patterns.append(idstrg+"\t"+valrecord['text']+"\t"+event+"\t["+code+"]\t"+matched_text.strip().replace("\t"," ")+"\t"+
# goldeventtext['eventtext']+"\t["+goldevent['eventcode']+"]"+"\t"+ goldevent['plover']) #+"\t"+goldeventtext['sourcetext']+"\t"+goldeventtext['targettext'])
unmatched_patterns.append([idstrg,valrecord['text'],event,"["+code+"]",matched_text.strip().replace("\t"," "),goldeventtext['eventtext'],"["+goldevent['eventcode']+"]"])
unmatched_filenames.append(idstrg)
found = True
return found
def check_missing_pattern(expected):
""" from test_script_ud.py with minor modifications """
if '0' not in return_dict[idstrg]['sents']:
return
if 'triplets' not in return_dict[idstrg]['sents']['0']:
return
found = False
for verbID in return_dict[idstrg]['sents']['0']['verbs']:
verb = return_dict[idstrg]['sents']['0']['verbs'][verbID]
for i in range(0,len(expected['events'])):
goldevent = expected['events'][i]
goldeventtext = expected['eventtexts'][i]
if verb.rawtext in goldeventtext['eventtext'] and verb.code == '---':
code = verb.code if verb.code != None else "None"
#noneverb = idstrg+"\t"+"verb_raw:"+verb.rawtext+"\tverb_lemma:"+verb.text+"\tgold:"+edict['eventtext']+"\tcode:"+code
#noneverb = idstrg+"\t"+expected['text']+"\t"+verb.text+"\t["+code+"]\t"+ goldeventtext['eventtext']+"\t["+goldevent['eventcode']+"]"+"\t"+ goldevent['plover']+"\t"+goldeventtext['sourcetext']+"\t"+goldeventtext['targettext']
noneverb = [idstrg,expected['text'],verb.text,"["+code+"]", goldeventtext['eventtext'],"["+goldevent['eventcode']+"]", goldevent['plover']]
missing_patterns.append(noneverb)
found = True
return found
'''
def check_if_matched(return_dict, idstrg):
if '0' in return_dict[idstrg]['sents'] and 'events' in return_dict[idstrg]['sents']['0'] and len(return_dict[idstrg]['sents']['0']['events']) > 0:
print(return_dict[idstrg]['sents']['0']['events'])
event_out = process_event_output(str(return_dict[idstrg]['sents']['0']['events']))
nfound, ncoded, nnull = 0, 0, 0
for key, evt in return_dict[idstrg]['sents']['0']['events'].items():
try:
#if evt[0][0].startswith("---") or evt[1][0].startswith("---") or evt[2].startswith("---") :
if evt[2].startswith("---") :
nnull += 1
continue
except: # handles [] cases
nnull += 1
continue
try:
ncoded += 1
for edict in valrecord['events']:
if "noevents" in edict:
break
else:
sourcematch = True
targetmatch = True
#if (edict['eventcode'] == evt[2] and
# edict['sourcecode'] == evt[0][0] and
# edict['targetcode'] == evt[1][0]) :
if edict['eventcode'][:2] not in ["18","19"]:
if edict['eventcode'][:2] == evt[2][:2] and (sourcematch and targetmatch): # for spanish now only match event code
fout.write(" CORRECT\n")
nfound += 1
edict['found'] = True
matched = True
break
else:
# if 2-digit system event code is 18 and 2-digit gold code is 19, it is counted as correct
# and vice-versa
if evt[2][:2] in ["18","19"] and (sourcematch and targetmatch):
fout.write(" CORRECT\n")
nfound += 1
edict['found'] = True
matched = True
break
except:
pass
else:
if "noevents" in valrecord['events'][0]:
nfound, ncoded, nnull = 1, 1, 0 # count this as a match
else:
nfound, ncoded, nnull = 0, 0, 0
return nfound,ncoded,nnull
'''
logger = logging.getLogger('petr_log.validate')
# logger.addFilter(NoLoggingFilter()) # uncomment to decactivate logging for this function
parse = valrecord['parse']
idstrg = valrecord['id']
print("evaluating", idstrg)
phrase_dict = parse_parser(parse)
parsed = utilities._format_ud_parsed_str(parse)
dict = {idstrg: {u'sents': {u'0': {u'content': valrecord['text'], u'parsed': parsed}},
u'meta': {u'date': valrecord['date']}}}
return_dict = ""
return_dict = petrarch_ud.do_coding(dict)
#numcorrect, numcoded, numnull = check_if_matched(return_dict,idstrg)
#if not (numnull < 1 and numcorrect > 0):
# return
'''
if numcorrect == 0:
check_none_verb(valrecord)
check_unmatched_triplets(valrecord)
if idstrg in none_verb_filenames or idstrg in unmatched_filenames:
#raw_input(idstrg)
return
'''
logger.debug("\nevaluating: "+ idstrg)
fout.write("Record ID: " + idstrg + '\n')
if not doing_compare:
fout.write("Text:\n")
for li in textwrap.wrap(valrecord['text'], width = 100):
fout.write(" " + li + '\n')
fout.write("Parse:\n")
for strg in parse[:-1].split("\n"):
fout.write(" " + strg + '\n')
fout.write("Expected events:\n")
for i in range(len(valrecord['events'])):
edict = valrecord['events'][i]
if "noevents" in edict:
fout.write(" noevents\n")
else:
fout.write(" " + edict['eventcode'] + ' ' + edict['sourcecode'] + ' ' + edict['targetcode'] + '\n')
#fout.write(" " + edict['eventcode'] + ' ' + valrecord['eventtexts'][i]['eventtext'] + '\n')
fout.write("Coded events:\n")
if '0' not in return_dict[idstrg]['sents']:
print(return_dict[idstrg])
#raw_input()
if '0' in return_dict[idstrg]['sents'] and 'events' in return_dict[idstrg]['sents']['0'] and len(return_dict[idstrg]['sents']['0']['events']) > 0:
print(return_dict[idstrg]['sents']['0']['events'])
event_out = process_event_output(str(return_dict[idstrg]['sents']['0']['events']))
nfound, ncoded, nnull = 0, 0, 0
for key, evt in return_dict[idstrg]['sents']['0']['events'].items():
try:
#if evt[0][0].startswith("---") or evt[1][0].startswith("---") or evt[2].startswith("---") :
if evt[2].startswith("---") :
nnull += 1
continue
except: # handles [] cases
nnull += 1
continue
try:
fout.write(" " + evt[2] + ' ' + evt[0][0] + ' ' + evt[1][0] + " (" + key + ")")
ncoded += 1
for edict in valrecord['events']:
if "noevents" in edict:
fout.write(" ERROR: NO EVENTS\n")
break
else:
sourcematch = False
targetmatch = False
if edict['sourcecode'].replace("---","") == evt[0][0].replace("---",""):
sourcematch = True
#elif edict['sourcecode'] == "---" and evt[0][0] != "---":
# sourcematch = True
else:
#allow partial actor code match in some cases
goldsrcset = []
goldtemp = edict['sourcecode'].replace("---","").replace("COL","")
goldn = len(goldtemp)//3
for i in range(goldn):
goldsrcset.append(goldtemp[i*3:(i+1)*3])
print("g:",goldsrcset)
#input(" ")
syssrcset = []
systemp = evt[0][0].replace("---","").replace("COL","")
sysn = len(systemp)//3
for i in range(sysn):
syssrcset.append(systemp[i*3:(i+1)*3])
print("s:",syssrcset)
#input(" ")
if len(syssrcset) >0 and len(goldsrcset)>0:
if len(syssrcset)<= len(goldsrcset):
fc = 0
for src in syssrcset:
if src in goldsrcset:
fc += 1
if fc/len(syssrcset) >= 0.5:
sourcematch = True
#input(" ")
else:
fc = 0
for src in goldsrcset:
if src in syssrcset:
fc += 1
if fc/len(goldsrcset) >= 0.5:
sourcematch = True
#input(" ")
if edict['targetcode'].replace("---","") == evt[1][0].replace("---",""):
targetmatch = True
#elif edict['targetcode'] == "---" and evt[1][0] != "---":
# targetmatch = True
else:
goldsrcset = []
goldtemp = edict['targetcode'].replace("---","").replace("COL","")
goldn = len(goldtemp)//3
for i in range(goldn):
goldsrcset.append(goldtemp[i*3:(i+1)*3])
print("g:",goldsrcset)
#input(" ")
syssrcset = []
systemp = evt[1][0].replace("---","").replace("COL","")
sysn = len(systemp)//3
for i in range(sysn):
syssrcset.append(systemp[i*3:(i+1)*3])
print("s:",syssrcset)
#input(" ")
if len(syssrcset) >0 and len(goldsrcset)>0:
if len(syssrcset)<= len(goldsrcset):
fc = 0
for src in syssrcset:
if src in goldsrcset:
fc += 1
if fc/len(syssrcset) >= 0.5:
targetmatch = True
#input(" ")
else:
fc = 0
for src in goldsrcset:
if src in syssrcset:
fc += 1
if fc/len(goldsrcset) >= 0.5:
targetmatch = True
#input(" ")
#if (edict['eventcode'] == evt[2] and
# edict['sourcecode'] == evt[0][0] and
# edict['targetcode'] == evt[1][0]) :
if edict['eventcode'][:2] not in ["18","19"]:
if edict['eventcode'][:2] == evt[2][:2] and (sourcematch and targetmatch): # for spanish now only match event code
fout.write(" CORRECT\n")
nfound += 1
edict['found'] = True
matched = True
break
else:
# if 2-digit system event code is 18 and 2-digit gold code is 19, it is counted as correct
# and vice-versa
if evt[2][:2] in ["18","19"] and (sourcematch and targetmatch):
fout.write(" CORRECT\n")
nfound += 1
edict['found'] = True
matched = True
break
else:
fout.write(" ERROR\n") # do we ever hit this now?
if doing_compare and len(valrecord['events']) == 1 and len(return_dict[idstrg]['sents']['0']['events']) == 1:
type_counts[0] += 1
if valrecord['events'][0]['eventcode'] == evt[2]:
type_counts[1] += 1
if valrecord['events'][0]['eventcode'][:2] == evt[2][:2]: # cue category
type_counts[2] += 1
if valrecord['events'][0]['sourcecode'] == evt[0][0]:
type_counts[3] += 1
if valrecord['events'][0]['sourcecode'][:3] == evt[0][0][:3]: # country code
type_counts[4] += 1
if valrecord['events'][0]['targetcode'] == evt[1][0]:
type_counts[5] += 1
if valrecord['events'][0]['targetcode'][:3] == evt[1][0][:3]:
type_counts[6] += 1
except:
pass
if ncoded == 0:
fout.write(" No events returned ")
if "noevents" in valrecord['events'][0]:
fout.write("CORRECT\n")
else:
fout.write("ERROR\n")
if nnull > 0:
fout.write("Null events: " + str(nnull) + '\n')
if not doing_compare:
fout.write("Event source:\n")
for key, val in return_dict[idstrg]['sents']['0']['events'].items():
fout.write(" " + key + ': ' + str(val) + '\n')
else:
fout.write(" No events returned")
if "noevents" in valrecord['events'][0]:
nfound, ncoded, nnull = 1, 1, 0 # count this as a match
fout.write(" CORRECT\n")
else:
nfound, ncoded, nnull = 0, 0, 0
fout.write(" ERROR\n")
if nfound > 0:
correct_files.append(idstrg)
num_found = 0
num_notcoded = 0
numcoded = 0
for edict in valrecord['events']:
if "noevents" in edict:
num_found = 1
num_coded = 1
else:
if 'found' in edict and edict['found'] == True:
num_found +=1
else:
num_notcoded +=1
#fout.write("Stats:\n Correct: " + str(nfound) + " Not coded: " + str(len(valrecord['events']) - nfound)
# + " Extra events: " + str(ncoded - nfound) + " Null events: " + str(nnull) + '\n')
fout.write("Stats:\n Correct: " + str(num_found) + " Not coded: " + str(num_notcoded)
+ " Extra events: " + str(ncoded - nfound) + " Null events: " + str(nnull) + '\n')
if valrecord['category'] in valid_counts:
valid_counts[valrecord['category']][0] += 1 # records
valid_counts[valrecord['category']][1] += num_found #nfound # correct
valid_counts[valrecord['category']][2] += num_notcoded #len(valrecord['events']) - nfound # uncoded
valid_counts[valrecord['category']][3] += ncoded - nfound # extra
valid_counts[valrecord['category']][4] += nnull # null
else:
#valid_counts[valrecord['category']] = [1, nfound, len(valrecord['events']) - nfound, ncoded - nfound, nnull]
valid_counts[valrecord['category']] = [1, num_found, num_notcoded, ncoded - nfound, nnull]
valid_counts['catlist'].append(valrecord['category']) # keep track of the order of the categories found
try:
sentence = PETRgraph.Sentence(parsed, valrecord['text'] , 0000) # 18.05.22: what the heck is this doing???
except Exception as e:
print(" --> Exception generating PETRgraph.Sentence(): ",e)
fout.write(" --> Exception generating PETRgraph.Sentence(): " + str(e) + '\n')
if not doing_compare:
parse_verb(phrase_dict, sentence)
if idstrg not in correct_files:
noneverbflag = check_none_verb(valrecord)
unmatchflag = check_unmatched_triplets(valrecord)
missingflag = check_missing_pattern(valrecord)
if valrecord['id'] not in stats_dict:
stats_dict[valrecord['id']] = {}
stats_dict[valrecord['id']]['noneverb'] = noneverbflag
stats_dict[valrecord['id']]['unmatch'] = unmatchflag
stats_dict[valrecord['id']]['missing'] = missingflag
stats_dict[valrecord['id']]['correct'] = False
#stats.append(idstrg+"False\t"+str(noneverbflag)+"\t"+str(unmatchflag)+"\t"+str(missingflag)+"\t"+str(nfound) + "\t" + str(len(valrecord['events']) - nfound)
# + "\t" + str(ncoded - nfound) + "\t" + str(nnull))
else:
if valrecord['id'] not in stats_dict:
stats_dict[valrecord['id']] = {}
stats_dict[valrecord['id']]['noneverb'] = False
stats_dict[valrecord['id']]['unmatch'] = False
stats_dict[valrecord['id']]['missing'] = False
stats_dict[valrecord['id']]['correct'] = True
#stats.append(idstrg+"True\t"+"False\t"+"False\t"+"False\t"+str(nfound) + "\t" + str(len(valrecord['events']) - nfound)
# + "\t" + str(ncoded - nfound) + "\t" + str(nnull))
stats_dict[valrecord['id']]['numbers'] = [nfound,(len(valrecord['events']) - nfound),(ncoded-nfound),nnull]
parse_triplets(phrase_dict)
fout.write('\n') | 571d03c9d00e427e31852296f6ee46fd811b8b20 | 27,485 |
def local_disp_centr(x, y, image, disp_n, size_k, mode):
"""
Returns a tuple: (disp_l, centr_l)
"""
border_pixels = image[x-size_k//2 : x+size_k//2+1, y-size_k//2 : y+size_k//2+1]
if mode == 'robust':
percentiles = np.percentile(border_pixels, [75, 50, 25])
disp_l = percentiles[2] - percentiles[0] # interquartile range
centr_l = percentiles[1] # the 50 percentile is the median
elif mode == 'average':
disp_l = np.std(border_pixels)
centr_l = np.mean(border_pixels)
else:
raise ValueError('Invalid denoising mode')
if disp_l == 0:
disp_l = disp_n
return disp_l, centr_l | c74c72d732b3c7ec9cf7c4b10a13e9b38af1a3d9 | 27,486 |
import aiohttp
async def get_music_list() -> MusicList:
"""
获取所有数据
"""
async with aiohttp.request("GET", 'https://www.diving-fish.com/api/maimaidxprober/music_data') as obj_data:
if obj_data.status != 200:
raise aiohttp.ClientResponseError('maimaiDX曲目数据获取失败,请检查网络环境')
else:
data = await obj_data.json()
async with aiohttp.request("GET", 'https://www.diving-fish.com/api/maimaidxprober/chart_stats') as obj_stats:
if obj_stats.status != 200:
raise aiohttp.ClientResponseError('maimaiDX数据获取错误,请检查网络环境')
else:
stats = await obj_stats.json()
total_list: MusicList = MusicList(data)
for i in range(len(total_list)):
total_list[i] = Music(total_list[i])
total_list[i]['stats'] = stats[total_list[i].id]
for j in range(len(total_list[i].charts)):
total_list[i].charts[j] = Chart(total_list[i].charts[j])
total_list[i].stats[j] = Stats(total_list[i].stats[j])
return total_list | 44b6f42377391ab3eccda8f8fb770dc2ab238019 | 27,487 |
import csv
import ast
def split_data(data_index,data_dir,amount):
"""
Split data into 'train' and 'test' sets.
Data index = csv filename
Data dir = directory in which data is stored
Amount = percentage of data to be imported ( to reduce memory usage )
"""
lfiles = []
with open(data_index, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
lfiles = list(reader)
dfiles = []
dimages = []
dlabels = []
dlabel_names = []
total_l = len(lfiles)
amount = int(total_l / 100 * int(amount))
for idx,val in enumerate(lfiles):
if idx < amount:
labels = ast.literal_eval(val[0])
#labels = [n.strip() for n in x]
labels_names = ast.literal_eval(val[2])
#label_names = [n.strip() for n in y]
dlabels.append(labels)
# Fix file path to full path depending data_dir argv:
dfiles.append(data_dir + val[1])
dlabel_names.append(labels_names)
ds_train = {
'files':dfiles[:int(len(dfiles)/2)],
'images':[],
'labels':dlabels[:int(len(dlabels)/2)],
'label_names':dlabel_names[:int(len(dlabel_names)/2)]
}
ds_test = {
'files':dfiles[int(len(dfiles)/2):],
'images':[],
'labels':dlabels[int(len(dlabels)/2):],
'label_names':dlabel_names[int(len(dlabel_names)/2):]
}
return [ds_train,ds_test] | 70aa1df0b666b231b0ec5d8b3d6bfec7409e26c9 | 27,488 |
def isValidPasswordPartTwo(firstIndex: int, secondIndex: int, targetLetter: str, password: str) -> int:
"""
Takes a password and returns 1 if valid, 0 otherwise. Second part of the puzzle
"""
bool1: bool = password[firstIndex - 1] == targetLetter
bool2: bool = password[secondIndex - 1] == targetLetter
return 1 if bool1 ^ bool2 else 0 | 81f85c3848909b5037f13ed641ec3a1b77dff3b1 | 27,489 |
import requests
import os
import json
def get_fasta(proteinID, disorder, outdir):
"""
Takes uniprot id, save a fasta file with a sequence
and returns sequence length
"""
try:
url = "http://mobidb.bio.unipd.it/ws/entries/" + proteinID + "/uniprot"
response = requests.get(url)
print(url)
if response.status_code == requests.codes.ok:
data = response.json()
else:
print(response.status_code)
return 0
except requests.exceptions.RequestException as e:
print(e)
return 0
protein = SeqRecord(Seq(data["sequence"]), id=proteinID, description="")
proteindir = outdir + proteinID+"/"
if not os.path.exists(proteindir):
os.makedirs(proteindir)
else:
return len(data["sequence"])
f_result = open(proteindir+proteinID+".fa", "w+")
SeqIO.write(protein, f_result, "fasta")
f_result.close()
with open(proteindir+proteinID+"_disorder.json", "w+") as f_dis:
json.dump(disorder, f_dis)
return len(data["sequence"]) | 03e950356dd49e4320a25fbcd8f80a79729210b4 | 27,490 |
from typing import Any
def debug_validator(validator: ValidatorType) -> ValidatorType:
"""
Use as a wrapper around a validator, e.g.
.. code-block:: python
self.validator = debug_validator(OneOf(["some", "values"]))
If you do this, the log will show the thinking of the validator (what it's
trying to validate, and whether it accepted or rejected the value).
"""
def _validate(node: SchemaNode, value: Any) -> None:
log.debug("Validating: {!r}", value)
try:
validator(node, value)
log.debug("... accepted")
except Invalid:
log.debug("... rejected")
raise
return _validate | fe25bae9b5f70be9ef1e589686af772172c6e10d | 27,491 |
import json
from typing import OrderedDict
def read_yaml_or_json(url: str) -> dict:
"""Read a YAML or JSON document.
:param url: URL or path; if `url` ends with '.yaml', the document is interpreted
as YAML, otherwise it is assumed to be JSON.
:return: a dictionary with the document contents.
"""
with open_url(url, decode=True) as fp:
if url.endswith(".yaml"):
return yaml_ordered_load(fp)
else:
return json.load(fp, object_pairs_hook=OrderedDict) | 27cb578e7194e1f1635cfd4d083c846f4bb5fdf2 | 27,492 |
def diagEst(matFun, n, k=None, approach='Probing'):
"""
Estimate the diagonal of a matrix, A. Note that the matrix may be a
function which returns A times a vector.
Three different approaches have been implemented:
1. Probing: cyclic permutations of vectors with 1's and 0's (default)
2. Ones: random +/- 1 entries
3. Random: random vectors
:param callable matFun: takes a (numpy.array) and multiplies it by a matrix to estimate the diagonal
:param int n: size of the vector that should be used to compute matFun(v)
:param int k: number of vectors to be used to estimate the diagonal
:param str approach: approach to be used for getting vectors
:rtype: numpy.array
:return: est_diag(A)
Based on Saad http://www-users.cs.umn.edu/~saad/PDF/umsi-2005-082.pdf,
and http://www.cita.utoronto.ca/~niels/diagonal.pdf
"""
if type(matFun).__name__ == 'ndarray':
A = matFun
def matFun(v):
return A.dot(v)
if k is None:
k = np.floor(n/10.)
if approach.upper() == 'ONES':
def getv(n, i=None):
v = np.random.randn(n)
v[v < 0] = -1.
v[v >= 0] = 1.
return v
elif approach.upper() == 'RANDOM':
def getv(n, i=None):
return np.random.randn(n)
else: # if approach == 'Probing':
def getv(n, i):
v = np.zeros(n)
v[i:n:k] = 1.
return v
Mv = np.zeros(n)
vv = np.zeros(n)
for i in range(0, k):
vk = getv(n, i)
Mv += matFun(vk)*vk
vv += vk*vk
d = Mv/vv
return d | 631d11b6f9201404ec75d9ffbc04b8c77b82e244 | 27,493 |
def delete_a_recipe(category_id, recipe_id):
"""Method to handle delete permanently a single recipe"""
user_id = decode_auth_token(request.headers.get("Authorization"))
if isinstance(user_id, int):
recipe = Recipe()
response = recipe.delete_recipe(category_id, recipe_id)
return response
else:
return invalid_token() | 216e1b40a49bd477d4bdd5c748672ae4286b8e46 | 27,494 |
def hydrate_datatrust_state(data={}):
"""
Given a dictionary, allow the viewmodel to hydrate the data needed by this view
"""
vm = State()
return vm.hydrate(data) | 69091f66cce513eafb7e14c13e43fa5a4ce9d2f6 | 27,495 |
def preprocess_data(X, Y):
""" This method has the preprocess to train a model """
X_p = X
# changind labels to one-hot representation
Y_p = K.utils.to_categorical(Y, 9)
return (X_p, Y_p) | f2288be9fdb752ea67ddd0d493375d019a5981ca | 27,496 |
import json
def readLog(logPath):
"""Reads a file containing an array of json objects. \
Used with the 'gcloudformatter' function.
Args:
logPath: file system path to map file
Returns:
An array of json objects
"""
logger.info("Reading log {}".format(logPath))
with open(logPath, 'r') as f:
log = json.load(f)
return log | 9e946ae1735025be2f9b4b5f271c19bbc64d4a61 | 27,497 |
from typing import List
import csv
from datetime import datetime
def save_traces_file(traces_file_name, animals_list: List[Animal]):
"""
Saves TRACES compatible import file, based on animals in animals_list
:param traces_file_name:
:param animals_list:
:return:
"""
def save_traces_file_cattles():
fd = open(traces_file_name, 'w')
try:
writer = csv.writer(fd, dialect='excel', delimiter=';', quoting=csv.QUOTE_NONE)
writer.writerow(['[COLUMNS]'])
writer.writerow(['official_ident', 'numpassportemp'])
writer.writerow([])
writer.writerow(['[DATA]'])
for animal in animals_list:
row = [animal.animal_id, animal.passport_id]
writer.writerow(row)
writer.writerow(['[DATA]'])
finally:
fd.close()
def save_traces_file_sheeps():
fd = open(traces_file_name, 'w')
try:
writer = csv.writer(fd, dialect='excel', delimiter=';', quoting=csv.QUOTE_NONE)
writer.writerow(['[COLUMNS]'])
writer.writerow(['official_indiv', 'age_month', 'sexinfo'])
writer.writerow([''])
writer.writerow(['[DATA]'])
now = datetime.now()
for animal in animals_list:
row = [animal.animal_id, animal.age_in_months(now), animal.get_traces_sex()]
writer.writerow(row)
writer.writerow(['[DATA]'])
finally:
fd.close()
if not animals_list:
return False
if animals_list[0].species == ANIMAL_SPECIES_CATTLE:
save_traces_file_cattles()
elif animals_list[0].species == ANIMAL_SPECIES_SHEEP:
save_traces_file_sheeps()
else:
raise NotImplementedError
return True | 8af33024ee413300147c7dcb6cd34a211609ef03 | 27,498 |
def get_KNN(X, k):
"""Identify nearest neighbours
Parameters
----------
D : array, [n_samples, n_features]
input data
k : int
number of nearest neighbours
Returns
-------
knn_graph : array, [n_samples, n_samples]
Connectivity matrix with binary edges connecting nearest neighbours
"""
knn = NearestNeighbors(n_neighbors=k, metric='cosine')
# sparse neighbourhood graph
W = knn.fit(X).kneighbors_graph(mode='connectivity')
# into square matrix
W = W.toarray()
# enforce symmetry (not true kNN)
W = np.fmax(W,W.T)
#knn_graph = W*(1-D) # similarity not distance
return W | 9b62be61837a0c500451f286eb1a043a81939e19 | 27,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.