content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def productos_pos():
"""
Muestra las configuraciones para productos pos
"""
productos = db(db.maestro_pos).select()
return dict(productos=productos) | 87e80d5789f65df46c3178a9d83e575321b423a7 | 3,632,000 |
def _build_measurement_vectors(ppci):
"""
Building measurement vector z, pandapower to ppci measurement mapping and covariance matrix R
:param ppci: generated ppci which contains the measurement columns
:param branch_cols: number of columns in original ppci["branch"] without measurements
:param bus_cols: number of columns in original ppci["bus"] without measurements
:return: both created vectors
"""
p_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + P])
p_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_FROM])
p_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_TO])
q_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + Q])
q_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_FROM])
q_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_TO])
v_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + VM])
i_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_FROM])
i_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_TO])
# piece together our measurement vector z
z = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO],
ppci["bus"][q_bus_not_nan, bus_cols + Q],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO],
ppci["bus"][v_bus_not_nan, bus_cols + VM],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO]
)).real.astype(np.float64)
# conserve the pandapower indices of measurements in the ppci order
pp_meas_indices = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_IDX],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_IDX],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_IDX],
ppci["bus"][q_bus_not_nan, bus_cols + Q_IDX],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_IDX],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_IDX],
ppci["bus"][v_bus_not_nan, bus_cols + VM_IDX],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_IDX],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_IDX]
)).real.astype(int)
# Covariance matrix R
r_cov = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_STD],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_STD],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_STD],
ppci["bus"][q_bus_not_nan, bus_cols + Q_STD],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_STD],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_STD],
ppci["bus"][v_bus_not_nan, bus_cols + VM_STD],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_STD],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_STD]
)).real.astype(np.float64)
return z, pp_meas_indices, r_cov | b51be7da841ce5c54942834133d64263fcf29ff1 | 3,632,001 |
import zipfile
from io import StringIO
import numpy
import json
import pickle
def load(path):
"""Load data and reconstruct model."""
with zipfile.ZipFile(path,'r') as zf:
buf = StringIO.StringIO(zf.read('weights.npy'))
weights = numpy.load(buf)
config = json.loads(zf.read('config.json'))
data = pickle.loads(zf.read('data.pkl'))
task = pickle.loads(config['task'].encode('utf-8'))
return GenericBundle(data, config, task, weights=weights) | bee50593f16a534c16f9961e53aaf6a31b5bd3d1 | 3,632,002 |
def env(
a,
import_models=False,
c=None,
f=None,
dir='',
):
"""
Return web2py execution environment for application (a), controller (c),
function (f).
If import_models is True the exec all application models into the
environment.
"""
request = Request()
response = Response()
session = Session()
request.application = a
# Populate the dummy environment with sensible defaults.
if not dir:
request.folder = os.path.join('applications', a)
else:
request.folder = dir
request.controller = c or 'default'
request.function = f or 'index'
response.view = '%s/%s.html' % (request.controller,
request.function)
request.env.path_info = '/%s/%s/%s' % (a, c, f)
request.env.http_host = '127.0.0.1:8000'
request.env.remote_addr = '127.0.0.1'
# Monkey patch so credentials checks pass.
def check_credentials(request, other_application='admin'):
return True
fileutils.check_credentials = check_credentials
environment = build_environment(request, response, session)
if import_models:
try:
run_models_in(environment)
except RestrictedError, e:
sys.stderr.write(e.traceback+'\n')
sys.exit(1)
return environment | 3d78e71866eb1daf06e6e20e9437a0bf39210363 | 3,632,003 |
import copy
import hashlib
import json
def get_payload_hash(payload):
"""Return unique hash of HySDS job JSON payload."""
clean_payload = copy.deepcopy(payload)
for k in ('_disk_usage', '_sciflo_job_num', '_sciflo_wuid'):
if k in clean_payload:
del clean_payload[k]
return hashlib.md5(json.dumps(clean_payload, sort_keys=2,
ensure_ascii=True).encode()).hexdigest() | e5122c85c3bfc358dda0404d029d06efbe2fefde | 3,632,004 |
def binStack(array, bins, id0=1):
"""
Bin a hyperstack according to a known list of frames per bin
@param array: input array to be binned, hyperstack or otherwise.
Binning occurs along the first axis
@type array: numpy.ndarray
@param bins: list of frames in each bin. Each index in bins is a
sublist of all frames that should be averaged to yield the index-th
frame in the binned image. ie the following list of lists:
[[1, 5, 8, 9]
[2, 3, 6, 10]
[4, 7, 11]]
indicates that there are 3 binned frames from an imaging array with
11 unbinned frame. The first binned image in this example includes all
frames in bins[0]: 1, 5, 8, and 9
@type bins: list
@param id0: index of first frame according to frame numbering scheme in bins
id0=0 indicates frames are 0-indexes (first frame at index 0)
id0=1 indicates frames are 1-indexed (first frame at ndex 1)
@type id0: int
@return: binned image of same shape as input array except in axis 0
@rtype: numpy.ndarray
"""
# form a list of binned images with list comprehension
bins = [set([min(x, array.shape[0] - id0) for x in b]) for b in bins]
binned = [np.mean(np.array(
[array[t] for t in bin]), axis=0) for bin in bins]
binned = np.array(binned, dtype=array.dtype)
return binned | 3367566a37e6328cd8af039e91a40d176b488617 | 3,632,005 |
import torch
def knn(A, B, k, distFcn):
"""
Returns the indices of the k-nearest neighbors of A in B
Parameters
----------
A : Tensor
a (N,F,) tensor
B : Tensor
a (M,F,) tensor
k : int
the number of neighbors to find
distFcn : callable
the distance function to use
Returns
-------
(LongTensor, Tensor)
the indices of the k neaighbors and their distance
Raises
------
AssertionError
if k is lower than 1
"""
assert k > 0, 'k value must be greater than 0'
a, b = prepare_broadcast(A, B)
if k > 1:
d, i = torch.topk(-distFcn(a, b, dim=-1), k, dim=1)
return i, -d
d, i = torch.min(distFcn(a, b, dim=-1), 1, keepdim=True)
return i, d | f9c3bac58ff3fcffe53bb9b2a25ffbcc57870ec9 | 3,632,006 |
def set_purpose(slack_client, channel, purpose):
"""
Set the purpose of a given channel.
"""
response = slack_client.api_call("channels.setPurpose",
purpose=purpose, channel=channel)
return response | 786a495b55300b955e2f7ec525117be75b251a07 | 3,632,007 |
import os
def generate_aes_key(size=256):
"""
Generates aes key with specified size
:param size: aes bits, default 256
:return: generated key bytes
"""
return os.urandom(int(size / 8)) | 604b626ae0996499c2d855ede91f70ded8f585cc | 3,632,008 |
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimum: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = tf.reduce_max(data, dim, keepdims=True)
masked_minimums = tf.reduce_min(
tf.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums | 7629c1a2ba9c2089935a68354d748a80e1eff488 | 3,632,009 |
from typing import Tuple
import subprocess
import os
def spawn_node_process(output_dir: PathLike) -> Tuple[subprocess.Popen, str]:
"""
Spawn an rfbrowser node process, that can be shared between library instances.
Usage example:
rc = 1
background_process, port = spawn_node_process(ATEST_OUTPUT / "playwright-log.txt")
try:
os.environ["ROBOT_FRAMEWORK_BROWSER_NODE_PORT"] = port
rc = _run_pabot(args)
finally:
background_process.kill()
"""
logfile = open(output_dir, "w")
os.environ["DEBUG"] = "pw:api"
os.environ["PLAYWRIGHT_BROWSERS_PATH"] = "0"
port = str(find_free_port())
process = subprocess.Popen(
[
"node",
"Browser/wrapper/index.js",
port,
],
stdout=logfile,
stderr=subprocess.STDOUT,
)
return process, port | cbdd9660060910b74f75803e1dfa0f6cacfbc25d | 3,632,010 |
import json
def decode_stderr_json(stderr):
""" return a list of decoded json messages in stderr """
# - check for blank input
if not stderr:
# - nothing to do
return list()
# - split the input (based on newlines) into list of json strings
output = list()
for line in stderr.split('\n'):
if not line:
# - skip blank lines: no valid json or message to decode
continue
json_message = list()
try:
json_message = json.loads(line)
except ValueError:
# - if json cannot be decoded, just log as ERROR prefixed by '!'
json_message = {'level': 'ERROR', 'message': '!' + line}
output.append(json_message)
return output | d527730d8d9a77a1ec434ee6203c4e08433306c9 | 3,632,011 |
import collections
def hash_params(params):
"""
Construct a data structure of parameters that is hashable.
This requires changing any mutable data structures into immutable ones.
We chose a frozenset because role parameters have to be unique.
.. warning:: this does not handle unhashable scalars. Two things
mitigate that limitation:
1) There shouldn't be any unhashable scalars specified in the yaml
2) Our only choice would be to return an error anyway.
"""
# Any container is unhashable if it contains unhashable items (for
# instance, tuple() is a Hashable subclass but if it contains a dict, it
# cannot be hashed)
if isinstance(params, collections.Container) and not isinstance(params, (text_type, binary_type)):
if isinstance(params, collections.Mapping):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params.items())
except TypeError:
new_params = set()
for k, v in params.items():
# Hash each entry individually
new_params.update((k, hash_params(v)))
new_params = frozenset(new_params)
elif isinstance(params, (collections.Set, collections.Sequence)):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params)
except TypeError:
new_params = set()
for v in params:
# Hash each entry individually
new_params.update(hash_params(v))
new_params = frozenset(new_params)
else:
# This is just a guess.
new_params = frozenset(params)
return new_params
# Note: We do not handle unhashable scalars but our only choice would be
# to raise an error there anyway.
return frozenset((params,)) | eb83122e2b1f7097917f1029e84f2053352127f4 | 3,632,012 |
def get_validate_result_form(tel_num, validate_code):
"""
Assemble form for get_validate_result
:param tel_num: Tel number
:param validate_code: Validate code from capcha image
:return: Param in dict
"""
post_data_dict = dict()
post_data_dict['source'] = 'wsyyt'
post_data_dict['telno'] = tel_num
post_data_dict['validcode'] = validate_code
return post_data_dict | 6340c97522a097c0cf96170e08466fb795e16dc3 | 3,632,013 |
def create_user():
"""Create a new user record."""
request = flask.request.get_json()
try:
# NOTE(jk0): We expect all of these keys to exist to be considered a
# valid user record. Ignore all others.
user = _lookup_user(request["userid"])
if user:
flask.abort(409)
USERS[request["userid"]] = {
"first_name": request["first_name"],
"last_name": request["last_name"],
"userid": request["userid"],
"groups": request["groups"]
}
except KeyError:
flask.abort(422)
_set_groups(request, flask.request.json["groups"])
return flask.jsonify(USERS[request["userid"]]), 201 | 8d98667aba51172535768d6c57d9fc8f2e2e7821 | 3,632,014 |
def softmax_cross_entropy(logits, labels):
"""
Cross-entropy loss applied to softmax.
"""
one_hot = hk.one_hot(labels, logits.shape[-1])
return -jnp.sum(jax.nn.log_softmax(logits) * one_hot, axis=-1) | c0850fdebbf69629763e94c489ed0aaf67e67184 | 3,632,015 |
def get_category_detail(comp_id, cat_id):
"""Retrives information about the category and the reviews in it"""
json = [Category.query.filter_by(id=cat_id).filter_by(comp_id=comp_id).first_or_404().to_json()]
submissions = Submission.query.filter_by(comp_id=comp_id).all()
cat_submissions = []
for _, submission in enumerate(submissions):
for j in range(len(submission.categories)):
if submission.categories[j].id == cat_id:
cat_submissions.append(submission.to_json(detail=False))
json.append(cat_submissions)
return jsonify(json) | d6b780a8d2c985f55a6500faf386c437669d0e9a | 3,632,016 |
def build_input_from_segments(persona, history, reply, vocab,
labels=False, with_eos=True):
"""
Build a sequence of input from 3 segments:
persona, history and last reply.
"""
bos, eos, speaker1, speaker2 = vocab[SPECIAL_TOKENS[:-1]]
sequence = [[bos] + list(chain(*persona))] + history + [reply + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance = {}
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s]
instance["labels"] = [-100] * len(instance["input_ids"])
if labels:
instance["labels"] = ([-100] * sum(len(s) for s in sequence[:-1])) + [-100] + sequence[-1][1:]
return instance | 3b47bbc0fb7c666188f36a1e1df520d84327d336 | 3,632,017 |
from scipy.stats import norm
def dual_gaussian(x, amp1=1.0, mean1=0.0, std1=1.0, amp2=1.0, mean2=0.0, std2=1.0):
"""Sum of two Gaussians.
Parameters
----------
x : array
Function argument
amp1: float
Amplitude parameter of the first Gaussian
mean1: float
Mean parameter of the first Gaussian
std1: float
Standard deviation parameter of the first Gaussian
amp2: float
Amplitude parameter of the second Gaussian
mean2: float
Mean parameter of the second Gaussian
std2: float
Standard deviation parameter of the second Gaussian
"""
if std1 <= 0 or std2 <= 0:
return np.nan
return (amp1 * norm.pdf(x, mean1, std1)) + (amp2 * norm.pdf(x, mean2, std2)) | 6d46ffcfcfd0327d06ccc8c92157bd9f82813124 | 3,632,018 |
import os
def getFileInfoFromXML(thisfile):
""" Get the PFN from the XML """
pfn = thisfile.getElementsByTagName("pfn")[0].getAttribute("name")
# lfn will not be present in XML any longer, get it from the PFN - possible problem with LFN file name extensions
# lfn = thisfile.getElementsByTagName("lfn")[0].getAttribute("name")
lfn = os.path.basename(pfn)
guid = thisfile.getAttribute("ID")
return lfn, pfn, guid | 788080388a4c7984f8646a944eefa04a7ce22536 | 3,632,019 |
def get_district_info(request, code):
"""
Get district info by 'code'
"""
try:
district = District.objects.get(code=code)
child_districts = District.objects.filter(parent=district.code)
data = district_model2dict(district)
children = [district_model2dict(c) for c in child_districts]
data.setdefault('children', children)
rsp = dict(status=0, data=data)
except Exception:
rsp = dict(status=1, message='No such district')
return MyJsonResponse(rsp) | 57136fe7d77375b6d9c258b42bccf24e4c7a3750 | 3,632,020 |
def calculate_price(prices, concentrations):
"""
From a list of prices in $USD / kg and concentrations in mass %,
calculate the price of the formulation in $USD / kg
"""
# Normalise ingredient concentrations
concentrations = np.asarray(concentrations) / np.sum(concentrations)
# Calculate all ingredient price * conc weights
price_conc = prices * concentrations
return sum(price_conc) | 7131bb0aa5f9502f564e43eba76ee3b878041b22 | 3,632,021 |
def cos_convolve(evidence):
"""Take as input the classifier evidence for single trials in dictionary format
and return alligned evidence and evidence convolved with a cosine.
Input:
dictionary:
accuracy : ndarray
dimensions: time
matrix containing class predictions for each time point.
single_trial_evidence: ndarray
dimensions: trials, classes, time
evidence for each class, for each timepoint, for each trial
y: ndarray
dimensions: ndarray
vector of integers indicating the class on each trial.
returns:
dictionary, same as input with added:
centered_prediction: ndarray
dimensions: classes, time
matrix containing evidence for each class for each time point.
cos_convolved: ndarray
dimensions: time
cosine convolved evidence for each timepoint.
single_trial_cosine_fit: ndarray
dimensions: trial by time
cosine convolved evidence for each timepoint and each trial.
single_trial_ev_centered: ndarray
dimensions: trials, classes, time
evidence for each class, for each timepoint, for each trial
centered around the class of interest.
"""
n_trials, n_bins, n_time = evidence["single_trial_evidence"].shape
evidence_shifted = np.zeros((n_trials, n_bins, n_time))
cos_ev_single_trial = np.zeros((n_trials, n_time))
centered_prediction = np.zeros((n_bins, n_time))
for tp in range(n_time):
#we want the predicted class to always be in the centre.
evidence_shifted[:,:,tp] = matrix_vector_shift(evidence["single_trial_evidence"][:, :, tp], evidence["y"], n_bins)
centered_prediction[:, tp] = evidence_shifted[:, :, tp].mean(0) # avg across trials
for trl in range(n_trials):
cos_ev_single_trial[trl, :] = convolve_matrix_with_cosine(evidence_shifted[trl, :, :])
# convolve trial average tuning curves with cosine
cos_convolved = convolve_matrix_with_cosine(centered_prediction)
#fit tuning curve
out_tc = least_squares_fit_cos(centered_prediction)
evidence["centered_prediction"] = centered_prediction
evidence["cos_convolved"] = cos_convolved
evidence["tuning_curve_conv"] = out_tc['amplitude']
evidence["single_trial_ev_centered"] = evidence_shifted
evidence["single_trial_cosine_fit"] = cos_ev_single_trial
return evidence | cf292eb0af8d4f44a0d64c02b9a09e6b7d149eb9 | 3,632,022 |
from io import StringIO
def get_image_info(real_image_type, body):
""" only in webp, gif, png, jpeg, bmp
"""
image_fp = StringIO(body)
if real_image_type == 'webp':
data = image_fp.read()
width, height = decode.GetInfo(data)
image_pix_count = int(width) * int(height)
else:
image = Image.open(image_fp)
width, height = image.size
image_pix_count = width * height
image_fp.seek(0)
md5_code = md5(image_fp.read()).hexdigest()
return md5_code, width, height, image_pix_count | 9570de917deafdcaa2d2a2d208e40710bcb12d04 | 3,632,023 |
def estimate_ranks(layer):
""" Unfold the 2 modes of the Tensor the decomposition will
be performed on, and estimates the ranks of the matrices using VBMF
source: https://github.com/jacobgil/pytorch-tensor-decompositions/blob/master/decompositions.py
"""
weights = layer.weight.data
unfold_0 = tl.base.unfold(weights, 0)
unfold_1 = tl.base.unfold(weights, 1)
_, diag_0, _, _ = VBMF.EVBMF(unfold_0)
_, diag_1, _, _ = VBMF.EVBMF(unfold_1)
ranks = [diag_0.shape[0], diag_1.shape[1]]
return ranks | 3c4efeb5ad56a32ad3908657c013bfb153aaf01e | 3,632,024 |
def _add_batch_dim(img):
"""Many TF functions require NWHC input. Convert WHC image to NWHC of batch size 1."""
get_hwc(img) # validate dimensions
return tf.expand_dims(img, 0) | 53a995d6a1398f137abb69b38aa91b98257e50c3 | 3,632,025 |
def read_inventory_file(inventory):
"""
Read an inventory file, return the list of dicts
:param str inventory: The inventory file
:return list[dict, ..]: List of hostname and IP definitions
"""
log.info("Reading and validating inventory file")
inventory_hosts = load_json(inventory)
if not all('dst' in i for i in inventory_hosts):
raise ValueError(
"Not all inventory items specified a "
"destination like {'dst': '1.2.3.4'}"
)
all_dst = [i['dst'] for i in inventory_hosts]
all_via = filter(None, [i.get('via') for i in inventory_hosts])
if not set(all_via).issubset(all_dst):
raise ValueError(
"Specified a 'via' item that "
"is not defined as a 'dst' item."
)
# Note that there is no cycle detection here
if any(i['dst'] == i.get('via') for i in inventory_hosts):
raise ValueError(
"You can not specify the "
"same 'via' as 'dst' for one item"
)
return inventory_hosts | cff1d3c84b3617b12e4bee7b4726207d4d4747cd | 3,632,026 |
def ground_truth_to_word(ground_truth):
"""
Return the word string based on the input ground_truth
"""
try:
return ''.join([config.CHAR_VECTOR[np.argmax(arr)] for arr in ground_truth if np.argmax(arr) < len(config.CHAR_VECTOR)])
except Exception as ex:
print(ground_truth)
print(ex)
input() | ae07266f34f01d605705d60e7c331cefb1fb845a | 3,632,027 |
import itertools
def build_dataframe(dimension_names, dimension_members, data_values,
null_values, sd_values):
"""Build a dataframe from dimensions and data.
Adds the cartesian product of dimension members plus the series of data.
Args:
dimension_names (list of string)
dimension_members (list of string)
data_values(Series): pandas series with the data values column.
null_values(str): regex with the pattern for the null values in the px
file. Defaults to '.'.
sd_values(str): regex with the pattern for the statistical disclosured
values in the px file. Defaults to '..'.
Returns:
df (pandas dataframe)
"""
# cartesian product of dimension members
dim_exploded = list(itertools.product(*dimension_members))
df = DataFrame(data=dim_exploded, columns=dimension_names)
# column of data values
df['DATA'] = data_values
# null values and statistical disclosure treatment
df = df.replace({'DATA': {null_values: ''}}, regex=True)
df = df.replace({'DATA': {sd_values: nan}}, regex=True)
return df | 1d5621d753466a69bd0bef4120c2c445e959bbb9 | 3,632,028 |
def fs_url_exists(fs_url):
"""
verifies for a valid fs url
:param fs_url: fs_url string
:return: boolean
"""
try:
fs.open_fs(fs_url)
except fs.errors.CreateFailed:
return False
return True | 98aad242d04b169e1a1e3204bf40e0b4ac9c4018 | 3,632,029 |
def _serialize_noise_model(config):
"""Traverse the dictionary looking for noise_model keys and apply
a transformation so it can be serialized.
Args:
config (dict): The dictionary to traverse
Returns:
dict: The transformed dictionary
"""
for k, v in config.items():
if isinstance(config[k], dict):
_serialize_noise_model(config[k])
else:
if k == 'noise_model':
try:
config[k] = v.to_dict(serializable=True)
except AttributeError:
# if .to_dict() fails is probably because the noise_model
# has been already transformed elsewhere
pass
return config | f3453e174d5ba858b9eec678e7bc1574f74d50eb | 3,632,030 |
def create_app() -> falcon.API:
"""
Typical application factory style setup.
Returns:
falcon.API: The falcon API object.
"""
engine = create_engine("sqlite:///")
app = falcon.API(middleware=[DbSessionMiddleware(engine)])
app.add_route("/", ExampleResource())
return app | ab134f8d25644da01718a16e4887d023f5e0221f | 3,632,031 |
def show_tracker(secure=False):
"""
Output the analytics tracker code.
"""
google = getattr(settings, 'ANALYTICS', {})
if google:
analytics_code = google.get('ANALYTICS_CODE')
if analytics_code:
return {"analytics_code": analytics_code}
return {} | 32d30b031e979cf91165dba4872a93995289bbc4 | 3,632,032 |
from typing import Awaitable
import re
async def formatCommand(cls:"PhaazebotDiscord", Command:DiscordCommand, CommandContext:DiscordCommandContext, direct_call:bool=False) -> dict:
"""
This function is suppost to do everything.
It takes the placeholder in Command.content and replaces them with the wanted data.
That also applies to module/function calls in Command.content.
There are 2 main stages a command can have,
a 'simple' commands that has one clear return from a function
and 'complex' commands that may have multiple fields in which single return values from a function are inserted
"""
# it's a 'simple' function
# get the associated function and execute it
# and return content
if not Command.complex:
function_str:str = Command.function
# get function from fucntions index
func:Awaitable = getDiscordCommandFunction(function_str)
# this happens if a user enters @phaazebot and then some garbage
if direct_call and func.__name__ == "textOnly":
cls.BASE.Logger.debug(f"(Discord) direct call failed, user entered: '{function_str}'", require="discord:commands")
return {}
cls.BASE.Logger.debug(f"(Discord) execute command '{func.__name__}'", require="discord:commands")
return await func(cls, Command, CommandContext)
else:
# TODO: to complex functions
FunctionHits = re.search(ReDiscord.CommandFunctionString, Command.content)
print(FunctionHits)
VarHits = re.search(ReDiscord.CommandVariableString, Command.content)
print(VarHits)
return { "content": "Complex functions are under construction", "embed": None } | 306e44d6b493c08140f24834d715669afb00477b | 3,632,033 |
def k8s_net_client(k8s_conf):
"""
Retrieves the kubernetes networking client
:param k8s_conf: the k8s configuration used to deploy the cluster
:return: a kubernetes.client.NetworkingV1Api instance
"""
logger.debug('Retrieving K8s networking API client')
return client.NetworkingV1Api(get_client_conn(k8s_conf)) | be57c7ba0558db35237b426dbfbc813b9f435ff4 | 3,632,034 |
import os
def sequential_name(folder, basename):
"""
Given a proposed name for a file (string 'basename') to be saved in a
folder (identified by its path in string 'folder'), produces a new
name to use that avoids overwriting other files - as long as their
names were made with this function, too.
"""
if not os.access(folder, os.F_OK):
return '{:s}/{:s}'.format(folder, basename)
else:
existing_files = os.listdir(folder)
matches = sum([basename in x for x in existing_files])
if matches == 0:
return '{:s}/{:s}'.format(folder, basename)
else:
return '{:s}/{:s} ({:d})'.format(folder, basename, matches) | 1a1afd78371da050ef6e44aa909d8c800f82ac21 | 3,632,035 |
def metric_max_over_ground_truths(metric_fn, predictions, ground_truths):
"""Take the average best score against all ground truth answers.
This is a bit different than SQuAD in that there are multiple answers
**and** predictions that we average over. For some situations (e.g., *top k*
beams or multiple human references) we might want to calculate the average
performance. In most cases, however, predictions will be a list of length 1.
Args:
metric_fn: Callable on (prediction, ground_truth).
predictions: List of whitespace separated prediction tokens.
ground_truths: List of whitespace separated answer tokens.
Returns:
max_score: Max output of metric_fn.
"""
all_metrics = []
for prediction in predictions:
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
all_metrics.append(max(scores_for_ground_truths))
return sum(all_metrics) / len(all_metrics) | 7c78fc1cca29bc9784a4e4687d794c1f2b6872c9 | 3,632,036 |
def parse_gsod_data(filename):
"""Parse Global Summary of the Day (GSOD) data from a comma separated
.txt file.
Source: https://www7.ncdc.noaa.gov/CDO/cdoselect.cmd?datasetabbv=GSOD&countryabbv=&georegionabbv=
Format Specification: https://www7.ncdc.noaa.gov/CDO/GSOD_DESC.txt
Parameters:
filename(Path): the .txt/.csv file to parse
Return:
pandas DataFrame containing GSOD meteorological data converted into
SI units (Fahrenheit -> Celsius, miles -> m, knots -> m/s)
"""
# parse .csv
data = read_csv(
filename,
sep=",",
parse_dates=["YEARMODA"],
index_col="YEARMODA",
infer_datetime_format=True,
skipinitialspace=True,
)
# remove space from end of column names (reserved for flag indent)
data.columns = [x.strip() for x in data.columns]
# remove '*' flag from MAX and MIN
for key in ["MAX", "MIN"]:
data[key] = data[key].map(lambda x: float(x.strip("*")))
# remove missing data
for key in ["WBAN"]:
data[key].replace({99999: None}, inplace=True)
for key in ["TEMP", "DEWP", "SLP", "STP", "MAX", "MIN"]:
data[key].replace({9999.9: None}, inplace=True)
for key in ["VISIB", "WDSP", "MXSPD", "GUST", "SNDP"]:
data[key].replace({999.9: None}, inplace=True)
for key in ["PRCP"]:
data[key].replace({99.99: None}, inplace=True)
# convert Fahrenheit to Celsius
for key in ["TEMP", "DEWP", "MAX", "MIN"]:
data[key] = (data[key] - 32) / 1.8
# convert miles to meters
for key in ["VISIB"]:
data[key] = data[key] * 1609.344
# convert knots to m/s
for key in ["WDSP", "MXSPD", "GUST"]:
data[key] = data[key] * 0.51444444444444
return data | 963444222e7627c25354ec6d0d891df4fee4fe8c | 3,632,037 |
def deserialize(xml):
""" Deserializes a Pubmed response into an article object."""
article = {}
root = ET.fromstring(xml)
article_el = root.find('.//PubmedArticle')
if article_el is None:
print('INFO: XML did not contain a Pubmed Article.')
return None
pmid_el = article_el.find('.//MedlineCitation/PMID')
if pmid_el is not None:
article['pmid'] = pmid_el = pmid_el.text
# Work on parsing on
date_published = None
journal_el = article_el.find('.//MedlineCitation/Article/Journal')
if journal_el is not None:
# First try and get the published date from the journel element.
journal_date_el = journal_el.find('.//JournalIssue/PubDate')
journal_date_published = _deserialize_date_published(journal_date_el)
date_published = journal_date_published
if date_published is None or len(date_published) < 5:
# If the journal date doesn't exist or is too short, try and get
# the date published from the article date element.
article_el_date = article_el.find('.//MedlineCitation/Article/ArticleDate')
article_date_published = _deserialize_date_published(article_el_date)
# If we have an article date published and it's length is longer than the journal
# date use it.
if article_date_published is not None:
if date_published is None or len(article_date_published) > len(date_published):
date_published = article_date_published
# If we still don't have a date published fall back to the MedlineDate element.
if date_published is None:
medline_date_el = article_el.find('.//MedlineCitation/Article/Journal/JournalIssue/PubDate/MedlineDate')
if medline_date_el is not None:
date_published = medline_date_el.text
article['date'] = date_published if date_published is not None else ''
if journal_el is not None:
# Jounal Title
journal_title_el = journal_el.find('.//Title')
if journal_title_el is not None:
journal_title = journal_title_el.text.rstrip('.')
else:
journal_title = ''
article['journal'] = journal_title
# Issue, Volume, Periodical
volume_el = journal_el.find('.//Volume')
publication_data = volume_el.text if volume_el is not None else ''
issue_el = journal_el.find('.//Issue')
publication_data = publication_data + '(' + issue_el.text + ')' if issue_el is not None else publication_data
pagination_el = article_el.find('.//MedlineCitation/Article/Pagination/MedlinePgn')
publication_data = publication_data + ':' + pagination_el.text if pagination_el is not None else publication_date
if len(publication_data) > 0:
article['date'] = article['date'] + ';' + publication_data + '.'
else:
article['date'] = article['date'] + '.'
# Try and parse jounal authors.
article['authors'] = _deserialize_authors(article_el.find('.//MedlineCitation/Article/AuthorList'))
# Parse title.
article_title_el = article_el.find('.//MedlineCitation/Article/ArticleTitle')
if article_title_el is not None:
article['title'] = article_title_el.text
# Parse abstract.
abstract_text_el = article_el.find('.//MedlineCitation/Article/Abstract/AbstractText')
if abstract_text_el is not None:
article['abstract'] = abstract_text_el.text
else:
article['abstract'] = ''
return article | 5cdb8c622f9155eaf36659bd9cab092e3adc4c44 | 3,632,038 |
def attSummaryDict(request, reqs, flist):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
for req in reqs:
for f in flist:
if f in req and req[f]:
if not f in sumd: sumd[f] = {}
if not req[f] in sumd[f]: sumd[f][req[f]] = 0
sumd[f][req[f]] += 1
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] })
if 'sortby' in request.GET and request.GET['sortby'] == 'count':
iteml = sorted(iteml, key=lambda x:x['kvalue'], reverse=True)
else:
iteml = sorted(iteml, key=lambda x:str(x['kname']).lower())
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
return suml | bafbbe51555cb46c664d33ea31a0e36c56152fa9 | 3,632,039 |
def immutable_kwargs(
kwargs: tp.Dict[str, str]
) -> tp.Tuple[tp.Tuple[str, str], ...]:
"""
Convert str-typed kwargs into a hashable tuple.
"""
return tuple((k, v) for k, v in kwargs.items()) | 900e263e0a7928bfb2c65e3dc7f9c8e405014fb5 | 3,632,040 |
import os
def get_markings(catalogue_dir, cid, result):
"""
Take a directory of catalogue entries and extract the specific metadata related to CID
Add it to fields as defined in serene_metadata
"""
catalogue = load_markings(os.path.normpath(catalogue_dir))
catalogue_markings = catalogue[cid]
for catalogue_entry, serene_field in CATALOGUE_MAPPINGS.iteritems():
if catalogue_entry in catalogue_markings:
if type(catalogue_markings[catalogue_entry]) is list:
values = [
serene_field.catalogue_map(_)
for _ in catalogue_markings[catalogue_entry]
]
else:
values = [
serene_field.catalogue_map(catalogue_markings[catalogue_entry])
]
#remove any None values
values = filter(lambda _:_ is not None, values)
if values:
result[serene_field.name()] = serene_field.validate_final(values)
for field in REQUIRED_INDEX_FIELDS:
if field in REQUIRED_LOAD_FIELDS:
#if it was required at the LOAD stage we don't test for it here - we test for it in the record_builder
continue
assert field in result, '"{}" is required at the index step but was not present in this catalogue entry {}'.format(field, cid)
return result | c4ad3863559e35b55993771d5b02ee274da9fea0 | 3,632,041 |
import numpy
def revise_max_intake(
max_intake, total_digestibility, energy_intake, energy_maintenance,
degr_protein_intake, protein_req, animal_type, CRD1, CRD2):
"""Calculate revised maximum intake from protein content of the diet.
When animals are unable to obtain enough protein from the diet to maintain
microbial activity in the rumen, the passage rate of feed slows and the
animal is able to eat less forage overall. Here that dynamic is reflected
by reducing maximum potential intake if protein content of the initially
selected diet is low.
Parameters:
max_intake (numpy.ndarray): derived, initial maximum potential daily
intake of forage
total_digestibility (numpy.ndarray): derived, average dry matter
digestibility of forage in the diet
energy_intake (numpy.ndarray): derived, total metabolizable energy
intake from the diet
energy_maintenance (numpy.ndarray): derived, energy requirements of
maintenance
degr_protein_intake (numpy.ndarray): derived, total rumen degradable
protein intake from the diet
protein_req (numpy.ndarray): derived, rumen degradable protein required
to maintain microbial activity
animal_type (numpy.ndarray): parameter, integer indication of animal
type or breed:
1: Bos indicus, default
2: Bos taurus
3: Bos indicus * taurus cross
4: sheep or goat
5: camelid
6: hindgut fermenter
CRD1 (numpy.ndarray): parameter, intercept of regression predicting
degradability of protein from digestibility of the diet
CRD2 (numpy.ndarray): parameter, slope of relationship predicting
degradability of protein from digestibility of the diet
Returns:
max_intake_revised, revised maximum potential daily intake of forage
"""
valid_mask = (
(max_intake != _TARGET_NODATA) &
(total_digestibility != _TARGET_NODATA) &
(energy_intake != _TARGET_NODATA) &
(energy_maintenance != _TARGET_NODATA) &
(degr_protein_intake != _TARGET_NODATA) &
(protein_req != _TARGET_NODATA) &
(animal_type != _IC_NODATA) &
(CRD1 != _IC_NODATA) &
(CRD2 != _IC_NODATA))
corrected_protein_intake = degr_protein_intake.copy()
feeding_level = numpy.zeros(max_intake.shape, dtype=numpy.float32)
feeding_level[valid_mask] = (
energy_intake[valid_mask] / energy_maintenance[valid_mask]) - 1
high_intake_mask = (
(feeding_level > 0) &
valid_mask)
corrected_protein_intake[high_intake_mask] = (
degr_protein_intake[high_intake_mask] * (
1. - (CRD1[high_intake_mask] - CRD2[high_intake_mask] *
total_digestibility[high_intake_mask]) *
feeding_level[high_intake_mask]))
reduction_factor = numpy.empty(max_intake.shape, dtype=numpy.float32)
reduction_factor[valid_mask] = 1.
# maximum intake is reduced by the ratio of protein intake to requirement
insuff_mask = ((protein_req > corrected_protein_intake) & valid_mask)
reduction_factor[insuff_mask] = (
corrected_protein_intake[insuff_mask] / protein_req[insuff_mask])
# for Bos indicus cattle, intake is reduced by only half of this factor
insuff_indicus_mask = ((animal_type == 1) & insuff_mask)
reduction_factor[insuff_indicus_mask] = (
1. - ((1. - (
corrected_protein_intake[insuff_indicus_mask] /
protein_req[insuff_indicus_mask])) * 0.5))
# for cattle of crossed indicus * taurus breed, intake is reduced by 75%
# of this factor
insuff_cross_mask = ((animal_type == 3) & insuff_mask)
reduction_factor[insuff_cross_mask] = (
1. - ((1. - (
corrected_protein_intake[insuff_cross_mask] /
protein_req[insuff_cross_mask])) * 0.75))
# apply the reduction factor
max_intake_revised = max_intake.copy()
max_intake_revised[valid_mask] = (
max_intake[valid_mask] * reduction_factor[valid_mask])
return max_intake_revised | cfe61d2717fcf42104423499e1e1343873c905a0 | 3,632,042 |
from pegasusio.cylib.io import read_fcs
def load_fcs_file(input_fcs: str, genome: str = None) -> MultimodalData:
"""Load Cyto data from a FCS file, support v2.0, v3.0 and v3.1.
Parameters
----------
input_fcs : `str`
The FCS file.
genome : `str`, optional (default None)
The genome reference. If None, use "unknown" instead.
Returns
-------
A MultimodalData object containing a (genome, CytoData) pair.
Examples
--------
>>> io.load_fcs_file('example.fcs', genome = 'GRCh38')
"""
try:
except ModuleNotFoundError:
print("No module named 'pegasusio.cylib.io'")
if not os.path.isfile(input_fcs):
raise FileNotFoundError(f"File {input_fcs} does not exist!")
feature_metadata, matrix, metadata = read_fcs(input_fcs)
barcode_metadata = {"barcodekey": [f"event{i}" for i in range(1, matrix.shape[0] + 1)]}
genome = "unknown" if genome is None else genome
metadata["genome"] = genome
metadata["modality"] = "cyto"
cytodata = CytoData(barcode_metadata, feature_metadata, {"raw.data": matrix}, metadata)
data = MultimodalData(cytodata)
return data | 232b849679c209863fdc3cc17ad1dc254ed23ab0 | 3,632,043 |
def ext_bottom_up_cut_rod(price, length):
""" bottom up implementation of cut rod memoized algorithm """
incomelst = [float("-Inf") for _ in range(length + 1)]
cutlst = [0 for _ in range(length + 1)]
# set zero income for zero length
incomelst[0] = 0
for j in range(1, length + 1):
income = float("-Inf")
for i in range(j):
if income < price[i] + incomelst[j - i - 1]:
income = price[i] + incomelst[j - i - 1]
cutlst[j] = i + 1
# set income for current length
incomelst[j] = income
# income for whole rod
return incomelst, cutlst | 7dd8c43afa9f71793d372b474963ff84d2ce607f | 3,632,044 |
def _build_config_dict(cfg_node):
"""
Updates the config dict provided from the given etcd node, which
should point at a config directory.
"""
config_dict = {}
for child in cfg_node.children:
key = child.key.rsplit("/").pop()
value = str(child.value)
config_dict[key] = value
return config_dict | 567fca19a6e1890c881170200ba44fc262148948 | 3,632,045 |
import time
def stamp_to_ymd(timestamp):
"""
Caller sends a timestamp in seconds of epoch. Return string for
year month day of that time as YYYYMMDD' as used by url requests, as in
http://<fitsstore_server>/qaforgui/20130616
parameters: <float>, seconds of epochs.
return: <string>, YYYYMMDD of passed time.
"""
return time.strftime("%Y%m%d", time.localtime(timestamp)) | 2928e93a48f1a5c3abdddcb6285bed7b0cebb369 | 3,632,046 |
def ESMP_GridCreateCubedSphere(tilesize, regDecompPTile=None,
#decompFlagPTile=None, deLabelList=None,
staggerLocList=None, name=None):
"""
Preconditions: ESMP has been initialized.\n
Postconditions: An ESMP_Grid has been created.\n
Arguments:\n
:RETURN: ESMP_Grid :: grid\n
Integer :: tilesize\n
Numpy.array(dtype=int32) (optional) :: regDecompPTile\n
Numpy.array(dtype=int32) (optional) :: staggerLocList\n
String (optional) :: name\n
"""
# Numpy.array(dtype=int32) (optional) :: decompFlagPTile\n
# Numpy.array(dtype=int32) (optional) :: deLabelList\n
lrc = ct.c_int(0)
lts = ct.c_int(tilesize)
# InterfaceInt requires int32 type numpy arrays
regDecompPTile_i = regDecompPTile
if (regDecompPTile is not None):
if (regDecompPTile.dtype != np.int32):
raise TypeError('regDecompPTile must have dtype==int32')
regDecompPTile_i = ESMP_InterfaceInt(regDecompPTile)
# # InterfaceInt requires int32 type numpy arrays
# decompFlagPTile_i = decompFlagPTile
# if (decompFlagPTile is not None):
# if (decompFlagPTile.dtype != np.int32):
# raise TypeError('decompFlagPTile must have dtype==int32')
# decompFlagPTile_i = ESMP_InterfaceInt(decompFlagPTile)
#
# # InterfaceInt requires int32 type numpy arrays
# deLabelList_i = deLabelList
# if (deLabelList is not None):
# if (deLabelList.dtype != np.int32):
# raise TypeError('deLabelList must have dtype==int32')
# deLabelList_i = ESMP_InterfaceInt(deLabelList)
# staggerLocList
staggerLocList_i = staggerLocList
if (staggerLocList is not None):
if (staggerLocList.dtype != np.int32):
raise TypeError('staggerLocList must have dtype==int32')
staggerLocList_i = ESMP_InterfaceInt(staggerLocList)
# create the ESMF Grid and retrieve a ctypes pointer to it
gridstruct = _ESMF.ESMC_GridCreateCubedSphere(lts, regDecompPTile_i,
#decompFlagPTile_i,
#deLabelList_i,
staggerLocList_i,
name,
ct.byref(lrc))
# check the return code from ESMF
rc = lrc.value
if rc != constants._ESMP_SUCCESS:
raise ValueError('ESMC_GridCreateCubedSphere() failed with rc = '+str(rc)+
'. '+constants._errmsg)
# create the ESMP Grid object from ctypes pointer
return gridstruct | 999d9b995671af9e410f73e29b2e1af8d79ad5a4 | 3,632,047 |
def extract_asymbox2(image,left_in,right_in, ycen=None, weight_image=None):
""" Extract the total flux within a variable window at many positions. This routine will accept an asymmetric/variable window
specified by the left_in and right_in traces. The ycen position is optional. If it is not provied, it is assumed to be integers
in the spectral direction (as is typical for traces). Traces are expected to run vertically to be consistent with other
extract_ routines. Based on idlspec2d/spec2d/extract_asymbox2.pro
Args:
image : float ndarray
Image to extract from. It is a 2-d array with shape (nspec, nspat)
left : float ndarray
Left boundary of region to be extracted (given as floating pt pixels). This can either be an 2-d array with shape
(nspec, nTrace) array, or a 1-d array with shape (nspec) forthe case of a single trace.
right : float ndarray
Right boundary of region to be extracted (given as floating pt pixels). This can either be an 2-d array with shape
(nspec, nTrace) array, or a 1-d array with shape (nspec) forthe case of a single trace.
Returns:
ycen : float ndarray
Y positions corresponding to "Left" and "Right" (expected as integers). Will be cast to an integer if floats
are provided. This needs to have the same shape as left and right broundarys provided above. In other words,
either a 2-d array with shape (nspec, nTrace) array, or a 1-d array with shape (nspec) forthe case of a single trace.
weight_image: float ndarray
Weight map to be applied to image before boxcar. It is a 2-d array with shape (nspec, nspat)
Returns
-------
fextract: ndarray
Extracted flux at positions specified by (left<-->right, ycen). The output will have the same shape as
Left and Right, i.e. an 2-d array with shape (nspec, nTrace) array if multiple traces were input, or a 1-d array with shape (nspec) for
the case of a single trace.
Revision History
----------------
24-Mar-1999 Written by David Schlegel, Princeton.
17-Feb-2003 Written with slow IDL routine, S. Burles, MIT
22-Apr-2018 Ported to python by Joe Hennawi
"""
# ToDO it would be nice to avoid this transposing, but I got confused during the IDL port
left = left_in.T
right = right_in.T
dim = left.shape
ndim = left.ndim
if (ndim == 1):
nTrace = 1
npix = dim[0]
else:
nTrace = dim[0]
npix = dim[1]
if ycen is None:
if ndim == 1:
ycen_out = np.arange(npix, dtype=int)
elif ndim == 2:
ycen_out = np.outer(np.ones(nTrace, dtype=int), np.arange(npix, dtype=int))
else:
raise ValueError('trace is not 1 or 2 dimensional')
else:
ycen_out = ycen.T
ycen_out = np.rint(ycen_out).astype(int)
if ((np.size(left) != np.size(ycen_out)) | (np.shape(left) != np.shape(ycen_out))):
raise ValueError('Number of elements and left of trace and ycen must be equal')
idims = image.shape
nspat = idims[1]
nspec = idims[0]
maxwindow = np.max(right - left)
tempx = np.int(maxwindow + 3.0)
bigleft = np.outer(left[:], np.ones(tempx))
bigright = np.outer(right[:], np.ones(tempx))
spot = np.outer(np.ones(npix * nTrace), np.arange(tempx)) + bigleft - 1
bigy = np.outer(ycen_out[:], np.ones(tempx, dtype='int'))
fullspot = np.array(np.fmin(np.fmax(np.round(spot + 1) - 1, 0), nspat - 1), int)
fracleft = np.fmax(np.fmin(fullspot - bigleft, 0.5), -0.5)
fracright = np.fmax(np.fmin(bigright - fullspot, 0.5), -0.5)
del bigleft
del bigright
bool_mask1 = (spot >= -0.5) & (spot < (nspat - 0.5))
bool_mask2 = (bigy >= 0) & (bigy <= (nspec - 1))
weight = (np.fmin(np.fmax(fracleft + fracright, 0), 1)) * bool_mask1 * bool_mask2
del spot
del fracleft
del fracright
bigy = np.fmin(np.fmax(bigy, 0), nspec - 1)
if weight_image is not None:
temp = np.array([weight_image[x1, y1] * image[x1, y1] for (x1, y1) in zip(bigy.flatten(), fullspot.flatten())])
temp2 = np.reshape(weight.flatten() * temp, (nTrace, npix, tempx))
fextract = np.sum(temp2, axis=2)
temp_wi = np.array([weight_image[x1, y1] for (x1, y1) in zip(bigy.flatten(), fullspot.flatten())])
temp2_wi = np.reshape(weight.flatten() * temp_wi, (nTrace, npix, tempx))
f_ivar = np.sum(temp2_wi, axis=2)
fextract = fextract / (f_ivar + (f_ivar == 0)) * (f_ivar > 0)
else:
# Might be a more pythonic way to code this. I needed to switch the flattening order in order to get
# this to work
temp = np.array([image[x1, y1] for (x1, y1) in zip(bigy.flatten(), fullspot.flatten())])
temp2 = np.reshape(weight.flatten() * temp, (nTrace, npix, tempx))
fextract = np.sum(temp2, axis=2)
# IDL version model functionality not implemented yet
# At the moment I'm not reutnring the f_ivar for the weight_image mode. I'm not sure that this functionality is even
# ever used
if(nTrace ==1):
fextract = fextract.reshape(npix)
return fextract.T | 34a7b2bf8ea79023f6847469ceafe376ef4d3fd0 | 3,632,048 |
import os
def exists(b, d, n):
"""Check if the folder specified by the given parameters exists"""
return os.path.isdir("../Output/B" +
str(b) + " D" + str(d) + " N" + str(n)) | b94f8bfb38351127e77fa9f19b706906d9805e82 | 3,632,049 |
def version() -> str:
"""版本号"""
return f'Version: {VERSION}' | df0dee3edebdaf24b52a9ad128b5198c89c779a5 | 3,632,050 |
import ctypes
def UnpackMessage(swig_obj_pointer, msg_name):
"""Unpack a SWIG-wrapped memory object into an AIO message.
Args:
swig_obj_pointer: A SWIG-wrapped memory object pointing to the raw AIO
message payload.
msg_name: Name or short name of the message type.
Returns:
An AIO message struct.
"""
ptr = int(swig_obj_pointer)
c_array = ctypes.c_char * aio.GetPackMessageSize(msg_name)
received = c_array.from_address(ptr)
msg_type = MESSAGE_TYPE_HELPER.Value(msg_name)
return c_helpers.Unpack(received[:], MESSAGE_STRUCTS[msg_type]) | 2e445f5248ba023190298eec30e0e473804f3df5 | 3,632,051 |
def a2b_hashed_base58(s):
"""
If the passed string is hashed_base58, return the binary data.
Otherwise raises an EncodingError.
"""
data = a2b_base58(s)
data, the_hash = data[:-4], data[-4:]
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError("hashed base58 has bad checksum %s" % s) | 82276533405e952f8f89cf3caefce6e653ab5694 | 3,632,052 |
def predict(theta, X):
""" computes the predictions for X using a threshold at 0.5
(i.e., if sigmoid(theta'*x) >= 0.5, predict 1)
"""
return np.array([1 if theta.dot(xi) >= 0.5 else 0 for xi in X]) | 3a80add19d08989f94cb3f9e4c058a37bd128f20 | 3,632,053 |
def not_contains(a, b):
"""Evaluates a does not contain b"""
result = False if b in a else True
return result | a0dc087049c8e93c1acdf0e59e3530a6ff8b54e5 | 3,632,054 |
def build_positional_encoding(cfg, default_args=None):
"""Builder for Position Encoding."""
return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) | 9db2eb7d88b5d4ceea0a9d62adc3239035a34cec | 3,632,055 |
def create_func_result_identifier(func, params_str, key=None, key_separator="__"):
"""
Creates a string of the following format:
If ``key`` is None:
``<FUNC_NAME><PARAMS_STR>``
If ``key`` is not None:
``<FUNC_NAME><PARAMS_STR>__key``
In both cases, ``<FUNC_NAME>`` represents the name of the function object ``func``
(via ``func.__name__``) and ``<PARAMS_STR>`` represents a string object given by
``params_str`` (e.g., obtained via the method ``create_params_str``).
In the latter case, ``key`` represents the function return identifier (e.g., for
a multi-value result, the ``key`` is the identifier for one such value) and it
is separated by a double underscore.
The double underscore separator can be changed by specifying the default
parameters ``key_separator``.
**IMPORTANT NOTE:**
This function is rather specific to time series characteristic features (such as tsfresh),
so it should be used only internally.
"""
return f"{func.__name__}{params_str}{key_separator}{key}" if key is not None else f"{func.__name__}{params_str}" | 6f3a7a6a8a94629dae7817403d78ef1f970ad5b2 | 3,632,056 |
def quat_to_euler(q):
"""
Converts a unit quaternion:
q = (w, x, y, z) = w + (x i, y j, z k)
into the aircraft Euler angles (roll, pitch, yaw) = (phi, th, psi).
"""
R00 = 1 - 2*q[2]**2 - 2*q[3]**2
R10 = 2*q[1]*q[2] + 2*q[3]*q[0]
if np.sqrt(R00**2 + R10**2) >= .000001:
phi = np.arctan2(2*q[2]*q[3] + 2*q[1]*q[0], 1 - 2*q[1]**2 - 2*q[2]**2)
th = np.arcsin(-2*q[1]*q[3] + 2*q[2]*q[0])
psi = np.arctan2(R10, R00)
else:
phi = np.arctan2(-2*q[2]*q[3] - 2*q[1]*q[0], 1 - 2*q[1]**2 - 2*q[3]**2)
th = np.arcsin(-2*q[1]*q[3] + 2*q[2]*q[0])
psi = 0.0
return phi, th, psi | 507e26d657a868bc136c901612ffba5dff62975d | 3,632,057 |
def idc_get_local_type_name(*args):
"""
idc_get_local_type_name(ordinal) -> char
"""
return _ida_typeinf.idc_get_local_type_name(*args) | c0921c70f0f42d913bbbe4e6d41a02a58660da0e | 3,632,058 |
import csv
def import_town(data_file):
"""
Reads town raster data from a CSV file.
Parameters
----------
data_file : str
Name of CSV raster data file to use for the town.
Returns
-------
town : list
List (cols) of lists (rows) representing raster data of the town.
"""
# Read in town data and format it as a list of lists
with open(data_file, newline = "") as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
town = []
for row in reader:
rowlist = []
for value in row:
rowlist.append(value)
town.append(rowlist)
return town | b7749dfd4d698fddfe610c6a51c8ccc43c375cc2 | 3,632,059 |
def init_network():
"""신경망(neural network)에서 사용되는 가중치 행렬과 bias 행렬을 생성
교재 p.88
입력층: (x1, x2) -> 1x2 행렬
은닉층:
- 1st 은닉층: 뉴런 3개 (x @ W1 + b1)
- 2nd 은닉층: 뉴런 2개
출력층: (y1, y2) -> 1x2 행렬
W1, W2, W3, b1, b2, b3를 난수로 생성
1x2 2x3 3x2
"""
np.random.seed(1224)
network = dict() # 가중치/bias 행렬을 저장하기 위한 딕셔너리 -> return 값
# x @ W1 + b1: 1x3 행렬
# (1x2) @ (2x3) + (1x3)
network['W1'] = np.random.random(size=(2, 3)).round(2)
network['b1'] = np.random.random(3).round(2)
# z1 @ W2 + b2: 1x2 행렬
# (1x3) @ (3x2) + (1x2)
network['W2'] = np.random.random((3, 2)).round(2)
network['b2'] = np.random.random(2).round(2)
# z2 @ W3 + b3: 1x2 행렬
# (1x2) @ (2x2) + (1x2)
network['W3'] = np.random.random((2, 2)).round(2)
network['b3'] = np.random.random(2).round(2)
return network | ddf727e651d46523f83d5d4e870c26e6bbc0b54c | 3,632,060 |
def reduce_detections(detections, detection_indices, img_w, img_h):
"""
Removes overlapping detections
Important note: Tensorflow detections are already sorted by detection score!
(optional)
TODO: Could tweak this paramter based on num of faces (2 ears, 1 of each class, per person max)
TODO: Improvement - merge with face detector?
TODO: improve selection accuracy by questioning first selection:
example case: High confidence for left ear, but all further detections pick right ear in same spot
(with similar score)
"""
unique_classes, unique_indices, unique_boxes = [], [], []
for i in detection_indices:
# det_class = detections["detection_classes"][i]
new_box = get_polygon_bbox(get_pixel_bbox(detections["detection_boxes"][i], img_w, img_h))
if not any([intersect_boxes(new_box, box_x) for box_x in unique_boxes]):
# unique_classes.append(det_class)
unique_indices.append(i)
unique_boxes.append(
get_polygon_bbox(get_pixel_bbox(detections["detection_boxes"][i], img_w, img_h))
)
return unique_indices | d774f88eaa91c79b6e9982520a836a4f5e1fb609 | 3,632,061 |
import os
def load_fiveplates_summary(platerun):
"""
"""
summary_file = paths.fiveplates_summary(platerun)
if summary_file.exists():
pass
else:
raise FileNotFoundError(os.fspath(summary_file))
return Table.read(os.fspath(summary_file), format='ascii') | 4abd9d534647546a793f796bd006e2ff19474a48 | 3,632,062 |
def make_connection():
"""Connection function to establish database connection. During development, the connection information will be
hard coded, however during actual deployment to AWS, the connection information will be retrieved from
other services eg aws secret manager etc.
Returns:
Postgrest connection.
"""
conn_str = "host={0} dbname={1} user={2} password={3} port={4}".format(endpoint, database, db_user, password, port)
conn = psycopg2.connect(conn_str)
conn.set_client_encoding('UTF8')
conn.autocommit = True
return conn | fa69c2752444cbee1d5630ee829bd5ebc4c0e0c5 | 3,632,063 |
import random
def a_noun(random=random, *args, **kwargs):
"""
Return a noun, but with an 'a' in front of it. Or an 'an', depending!
>>> mock_random.seed(0)
>>> a_noun(random=mock_random)
'an onion'
>>> a_noun(random=mock_random, capitalize=True)
'A Chimp'
>>> a_noun(random=mock_random, slugify=True)
'a-blister'
"""
return inflectify.a(noun(random=random)) | 1eae1f7b445017d64fc17fe0364275d73ead1b87 | 3,632,064 |
def _tree_flatten_with_names(tree):
"""Populates tree_flatten with leaf names.
This function populates output of tree_flatten with leaf names, using a
custom traversal that produces names is provided. The custom traversal does
NOT have to traverse tree in the same order as jax, as we take care of
automatically aligning jax' and custom traversals.
Args:
tree: python tree.
Returns:
A list of values with names: [(name, value), ...]
"""
vals, tree_def = jax.tree_flatten(tree)
# "Fake" token tree that is use to track jax internal tree traversal and
# adjust our custom tree traversal to be compatible with it.
tokens = range(len(vals))
token_tree = tree_def.unflatten(tokens)
val_names, perm = zip(*_traverse_with_names(token_tree))
inv_perm = np.argsort(perm)
# Custom traversal should visit the same number of leaves.
assert len(val_names) == len(vals)
return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def | 2878ece5d63d09d42d4b195706a4594b135ce849 | 3,632,065 |
def inhomogeneous_poisson_process(rate, as_array=False,
refractory_period=None):
"""
Returns a spike train whose spikes are a realization of an inhomogeneous
Poisson process with the given rate profile.
Parameters
----------
rate : neo.AnalogSignal
A `neo.AnalogSignal` representing the rate profile evolving over time.
Its values have all to be `>=0`. The output spiketrain will have
`t_start = rate.t_start` and `t_stop = rate.t_stop`
as_array : bool, optional
If True, a NumPy array of sorted spikes is returned,
rather than a SpikeTrain object.
Default: False.
refractory_period : pq.Quantity or None, optional
`pq.Quantity` scalar with dimension time. The time period after one
spike no other spike is emitted.
Default: None.
Returns
-------
spiketrain : neo.SpikeTrain or np.ndarray
Inhomogeneous Poisson process realization, of type `neo.SpikeTrain`
if `as_array` is False (default) and `np.ndarray` otherwise.
Raises
------
ValueError
If `rate` contains a negative value.
If `refractory_period` is not None or not of type `pq.Quantity`.
If `refractory_period` is not None and the period between two
successive spikes (`1 / rate`) is smaller than the `refractory_period`.
"""
# Check rate contains only positive values
if np.any(rate < 0) or rate.size == 0:
raise ValueError(
'rate must be a positive non empty signal, representing the'
'rate at time t')
if not isinstance(refractory_period, pq.Quantity) and \
refractory_period is not None:
raise ValueError("refr_period must be of type pq.Quantity or None")
rate_max = np.max(rate)
if refractory_period is not None:
if (rate_max * refractory_period).simplified >= 1.:
raise ValueError(
"Period between two successive spikes must be larger "
"than the refractory period. Decrease either the "
"firing rate or the refractory period.")
# effective rate parameter for the refractory period case
rate = rate / (1. - (rate * refractory_period).simplified)
rate_max = np.max(rate)
# Generate n hidden Poisson SpikeTrains with rate equal
# to the peak rate
homogeneous_poiss = homogeneous_poisson_process(
rate=rate_max, t_stop=rate.t_stop, t_start=rate.t_start)
# Compute the rate profile at each spike time by interpolation
rate_interpolated = _analog_signal_linear_interp(
signal=rate, times=homogeneous_poiss.times)
# Accept each spike at time t with probability rate(t)/max_rate
random_uniforms = np.random.uniform(size=len(homogeneous_poiss)) * rate_max
spikes = homogeneous_poiss[random_uniforms < rate_interpolated.flatten()]
if refractory_period is not None:
refractory_period = refractory_period.rescale(
rate.t_stop.units).magnitude
# thinning in average cancels the effect of the effective firing rate
spikes = _thinning_for_refractory_period(spikes.magnitude,
refractory_period)
if not as_array:
spikes = neo.SpikeTrain(spikes * rate.t_stop.units,
t_start=rate.t_start,
t_stop=rate.t_stop)
else:
if as_array:
spikes = spikes.magnitude
return spikes | 240b04f5e5ed316d911aad0ee690516982f123cd | 3,632,066 |
import re
def enumerate_quotes(filename, encoding="utf-8", empty_name="Inconnu"):
"""
Enumerates quote from a filename or a stream
@param filename filename or stream
@param encoding applicable only if filename
@param empty_name replces an empty author name
@return enumerate on quote
A quote is defined a dictionary.
"""
if isinstance(filename, str):
with open(filename, "r", encoding=encoding) as f:
for q in enumerate_quotes(f):
yield q
else:
re1 = re.compile("chapter[{]([0-9]+)[}]")
re2 = re.compile(
"[\\]begin[{]xcitt?[}][{](.*?)[}][{](.*?)[}][{](.*?)[}][{](.+?)[}]")
re3 = re.compile(
"[\\]begin[{]xcita[}][{](.*?)[}][{](.*?)[}][{](.+?)[}][{](.*?)[}][{](.*?)[}][{](.+?)[}]")
re4 = re.compile(
"[\\]begin[{]xcitenfant[}][{](.*?)[}][{](.*?)[}][{](.*?)[}][{](.+?)[}]")
re5 = re.compile(
"[\\]begin[{]xcitw[}][{](.*?)[}][{](.*?)[}][{](.*?)[}][{](.+?)[}][{](.+?)[}]")
re6 = re.compile(
"[\\]begin[{]xcita3[}][{](.*?)[}][{](.*?)[}][{](.+?)[}][{](.*?)[}][{](.+?)[}][{](.*?)[}][{](.*?)[}][{](.+?)[}]")
def process_content(il, content):
find = re2.search(content[0])
if find:
author, name, book, index = find.groups()
obs = dict(author="{0} {1}".format(name, author),
book=book, index=index, year=year)
else:
find = re3.search(content[0])
if find:
author1, name1, author2, name2, book, index = find.groups()
obs = dict(author="{0} {1}, {2} {3}".format(name1, author1, name2, author2),
book=book, index=index, year=year)
else:
find = re4.search(content[0])
if find:
author, name, book, index = find.groups()
obs = dict(author="{0} {1}".format(name, author),
book=book, index=index, year=year,
tag="enfant")
else:
find = re5.search(content[0])
if find:
author, name, book, index, date = find.groups()
obs = dict(author="{0} {1}".format(name, author),
book=book, index=index, year=year,
date=date)
else:
find = re6.search(content[0])
if find:
author, name, a2, n2, a3, n3, book, index = find.groups()
obs = dict(author="{} {}, {} {}, {} {}".format(name, author, n2, a2, n3, a3),
book=book, index=index, year=year)
else:
raise FormatException( # pragma: no cover
"Unable to interpret line {0}: '{1}'".format(il, content[0]))
content = "\n".join(content[1:-1])
content = content.replace("~", " ")
content = content.replace("\\quad", "...")
obs["content"] = content
if not obs["author"]:
obs["author"] = empty_name
return obs
year = None
content = []
for il, line in enumerate(filename):
sline = line.strip()
if sline.startswith("\\chapter{"):
chap = re1.search(sline)
if chap:
year = chap.groups()[0]
else:
raise FormatException( # pragma: no cover
"Unable to process line {0}: '{1}'".format(il, sline))
else:
if sline.startswith("\\begin{xcit"):
content.append(sline)
elif sline.startswith("\\end{xcit"):
content.append(sline)
yield process_content(il, content)
content.clear()
else:
if content:
content.append(sline)
else:
# between quotes
pass | 51e3a406c05ab81ade918123ee4fba801fb9ef9e | 3,632,067 |
def _params_to_df(params: Parameters) -> DataFrame:
"""Convert lmfit.Parameters to pandas.DataFrame."""
return DataFrame(
[
(p.name, p.vary, p.value, p.stderr, p.min, p.max, p.brute_step, p.expr)
for p in params.values()
],
columns=(
"name",
"vary",
"value",
"stderr",
"min",
"max",
"brute step",
"expr",
),
) | 7485f43a474eef1ebb20ddb73bb411dad52cd918 | 3,632,068 |
def w(P, T, region = 0):
""" Speed of sound [m / s]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.w(P, T)
elif region is 2:
return region2.w(P, T)
else:
return 0.000 | 7589c57071484d0f46e67a18bd125ad46edbe777 | 3,632,069 |
def WaitForOperation(api_version, response, asynchronous):
"""Handles waiting for the operation and printing information about it.
Args:
api_version: Cloud Domains API version to call.
response: Response from the API call
asynchronous: If true, do not wait for the operation
Returns:
The last information about the operation.
"""
operation_ref = ParseOperation(api_version, response.name)
if asynchronous:
log.status.Print('Started \'{}\''.format(operation_ref.Name()))
else:
message = 'Waiting for \'{}\' to complete'
operations_client = operations.Client.FromApiVersion(api_version)
response = operations_client.WaitForOperation(
operation_ref, message.format(operation_ref.Name()))
return response | 518e68421082bb3a2a90b38ae62c5be02f20d9fb | 3,632,070 |
from datetime import datetime
def generateVtBar(symbol, d):
"""生成K线"""
bar = VtBarData()
bar.symbol = symbol
bar.vtSymbol = symbol
bar.open = d['open']
bar.high = d['high']
bar.low = d['low']
bar.close = d['close']
bar.volume = d['volume']
bar.openInterest = d['open_oi']
bar.datetime = datetime.fromtimestamp(d['datetime']/1000000000)
bar.date = bar.datetime.strftime("%Y%m%d")
bar.time = bar.datetime.strftime("%H:%M:%S")
return bar | d155bda31fba71f07af4d23d17fcaa09c6a277e4 | 3,632,071 |
def produce_validation_report(stages, jobs, validation_json, **kwargs):
"""Produce validation report inside CI pipeline.
@param stages: the GitLab CI stages to consider
@param jobs: the job names to consider
@param validation_json: local job path to validation JSON output
@return summary of validation findings with pointers to details
"""
download_json = dict(validation_json=validation_json)
jobs = gitlab.get_jobs_for_stages(stages, download_json=download_json, job_filter=jobs)
data = {}
for name, job in jobs.items():
outputs = download_validation_outputs(job)
# write out .md with full paths, HTML with local paths and PDF with local paths
data[name] = job['validation_json'][name]
data[name]['job_name'] = name
data[name]['images'] = []
for d, info in data[name]['distributions'].items():
if 'image' in info:
image = outputs[d]['image']
logger.debug('Loading image {0} for PDF validation report'.format(image))
data[name]['images'].append(image)
validation_output_file = 'validation_report_{0}'.format(name)
details = create_detailed_report(
data[name], output_dir='.',
output_file=validation_output_file,
formats=['pdf']
)
outputs = update_image_urls(outputs)
data[name]['images'] = []
for d, info in data[name]['distributions'].items():
if 'image' in info:
image = outputs[d]['image']
info['image'] = image
logger.debug('Loading image {0} for HTML/MD validation report'.format(image))
data[name]['images'].append(image)
details.update(create_detailed_report(
data[name], output_dir='.',
output_file=validation_output_file,
formats=['md', 'html']
))
data[name]['web_url_to_details'] = details['pdf']
summary = create_summary(data)
return summary | bdcb0c87db61c78766bd3afddc1cd74737a43b51 | 3,632,072 |
def search_file(drive_service, num_of_responses, query):
"""
Search for files and store results of query in pd.DataFrame
"""
results = (
drive_service.files()
.list(
pageSize=num_of_responses,
q=query,
fields="nextPageToken, files(id, name, kind, exportLinks, mimeType, parents, size, createdTime, modifiedTime, trashed, ownedByMe, capabilities/canCopy, exportLinks/application)",
)
.execute()
)
items = results.get("files", [])
if not items:
print("No files found.")
else:
return pd.DataFrame(items) | b7bb340f0c1bb89bc76ecc420b473609a0bbbb2c | 3,632,073 |
from datetime import datetime
import numpy
def L6_summary_daily(ds,series_dict):
"""
Purpose:
Calculate the daily averages or sums of various quantities and write
them to a worksheet in an Excel workbook.
Usage:
L6_summary_daily(ds,series_dict)
where ds is an OzFluxQC data structure
series_dict is a dictionary of various variable lists
Author: PRI
Date: June 2015
"""
log.info(" Doing the daily summary (data) at L6")
dt = ds.series["DateTime"]["Data"]
ts = int(ds.globalattributes["time_step"])
si = qcutils.GetDateIndex(dt,str(dt[0]),ts=ts,default=0,match="startnextday")
ei = qcutils.GetDateIndex(dt,str(dt[-1]),ts=ts,default=len(dt)-1,match="endpreviousday")
ldt = dt[si:ei+1]
ntsInDay = int(24.0*60.0/float(ts))
nDays = int(len(ldt))/ntsInDay
ldt_daily = [ldt[0]+datetime.timedelta(days=i) for i in range(0,nDays)]
daily_dict = {}
daily_dict["DateTime"] = {"data":ldt_daily,"units":"Days","format":"dd/mm/yyyy"}
series_list = series_dict["daily"].keys()
series_list.sort()
for item in series_list:
if item not in ds.series.keys(): continue
daily_dict[item] = {}
data_1d,flag_1d,attr = qcutils.GetSeriesasMA(ds,item,si=si,ei=ei)
if item in series_dict["lists"]["co2"]:
data_1d = qcutils.convert_units_func(ds,data_1d,attr["units"],"gC/m2",ts)
daily_dict[item]["units"] = "gC/m2"
else:
daily_dict[item]["units"] = attr["units"]
data_2d = data_1d.reshape(nDays,ntsInDay)
if series_dict["daily"][item]["operator"].lower()=="average":
daily_dict[item]["data"] = numpy.ma.average(data_2d,axis=1)
elif series_dict["daily"][item]["operator"].lower()=="sum":
daily_dict[item]["data"] = numpy.ma.sum(data_2d,axis=1)
daily_dict[item]["units"] = daily_dict[item]["units"]+"/day"
else:
msg = "Unrecognised operator ("+series_dict["daily"][item]["operator"]
msg = msg+") for series "+item
log.error(msg)
continue
# add the format to be used
daily_dict[item]["format"] = series_dict["daily"][item]["format"]
# now do the flag, this is the fraction of data with QC flag = 0 in the day
daily_dict[item]["flag"] = numpy.zeros(nDays,dtype=numpy.float64)
flag_2d = flag_1d.reshape(nDays,ntsInDay)
for i in range(nDays):
daily_dict[item]["flag"][i] = 1-float(numpy.count_nonzero(flag_2d[i,:]))/float(ntsInDay)
return daily_dict | d1f57bb364abd7c92b73eab2d9787f5893264a3c | 3,632,074 |
def check_eol(file, eol):
"""Check file EOL.
:param file: Path to file to check
:param eol: Expected End of Line
:return: Resulting error messages
:rtype: str
"""
error = ''
with open(file, 'rb') as open_file:
content = open_file.read()
if eol == '\n':
if b'\r\n' in content:
error = "Incorrect EOL in file, LF (\\n) expected."
elif eol == '\r\n':
if content.count(b'\r\n') != content.count(b'\n'):
error = "Incorrect EOL in file, CRLF (\\r\\n) expected."
else:
error_message(f"Incorrect EOL in configuration: {eol}")
return error | 44bb060531c50ab5d072906414b8c948c9d6ecfa | 3,632,075 |
def pe44(limit=1500):
"""
>>> pe44()
(5482660, 7042750, 1560090, 2166, 1019)
"""
pents = [i * (3 * i - 1) >> 1 for i in range(1, limit << 1)]
ps = set(pents)
for i in range(limit):
p1 = pents[i]
for j in range(i + 1, (limit << 1) - 1):
p2 = pents[j]
diff = p2 - p1
if p2 + p1 in ps and diff in ps:
return (diff, p2, p1, j, i)
return None | e41f513c518b502de0c47f3a70390f9df01a1868 | 3,632,076 |
import re
def text_cut(questions):
"""
This def will cut the text into words by jieba and del the stopwords then return the words,else return a string when
fail to find the stop_list.
:param questions: A list of text.
:return: A list of cut-words
Raises: FileNotFoundError: An error occurred searching stop-list and return a string when occurred.
"""
pattern = re.compile(u'[\u4e00-\u9fa5]+')
questions_chinese = questions.apply(lambda x: ' '.join(re.findall(pattern, x)))
# del the punctuation
pattern_cut_punctuation = re.compile((u'[\,\。\?\!\~\(\)\〈\《\》\——\“\”\<\>\[\]\、\:\「\」\【\】]'))
questions_chinese = questions_chinese.apply(lambda x: pattern_cut_punctuation.sub('', x))
cut = lambda s: jieba.lcut(s) # use it to cut question
question_cut = questions_chinese.apply(cut) # use broadcast to improve the speed of cut words
return question_cut | c7cc8e52265b6e7cbe222d899952ee2ddc13231d | 3,632,077 |
import logging
import sys
def mask_3D(hPa, sect, MBL=True, res='4x5', extra_mask=None,
M_all=False, use_multiply_method=True, trop_limit=False,
verbose=True, debug=False):
"""
Creates Maskes by pressure array (required shape: 72,46,47),
with conditions (lower and upper bounds) set by given cases for
MBL, UT, FT
Parameters
-------
sect (Str): section of the atmosphere of interest (e.g. MBL, UT...)
hPa (array): array for pressures ( in hPa)
MBL (boolean): apply a mask for the marine boundary layer
res (str): the resolution of required output/input arrays (e.g. '4x5' )
use_multiply_method (boolean): Create arrays of ones and zeros
trop_limit (boolean): limit 3D arrays to troposphere
debug (boolean): legacy debug option, replaced by python logging
verbose (boolean): legacy debug option, replaced by python logging
extra_mask (str): name of additional region (e.g. ocean) to mask
M_all (boolean): apply oceanic masking to all regions
Returns
-------
(np.ma.mask)
NOTES:
- originally written to generate masks for mulitplication
(i.e. use_multiply_method = True ), but can also be use to make
more pythonic masks ( if use_multiply_method=False )
"""
if verbose:
print(('mask_3D called for sect={}, use_multiply_method={}'.format(
sect, use_multiply_method) + ', M_all={}, and '.format(M_all) +
'with debug: {}, verbose:{}'.format(sect, debug, verbose)))
# Get atmospheric region as case defining lower and upper bounds
cases = {
'BL': [1200., 900.], 'MBL': [1200., 900.], 'FT': [900., 350.],
'UT': [350., 75.], 'All': [1200., 75.]
}
l, h = cases[sect]
# --- Mask between upper and lower values
m = np.ones(get_dims4res(res))
m[(hPa >= l)] = 0
m[(hPa < h)] = 0
logging.debug('Sect={}, l={}, h={}'.format(sect, l, h))
logging.debug('{}'.format(
*[[i.min(), i.max(), i.mean(), i.sum(), i.shape] for i in [m]]))
# Mask off the 'sect' area that still equals 1
m = np.ma.masked_equal(m, 1)
# --- Remove above the "chemical tropopause" from GEOS-Chem (v9-2)
if trop_limit:
m = m[..., :38]
if not isinstance(extra_mask, type(None)):
# only consider MBL
if (MBL and sect == 'BL') or (sect == 'MBL') or M_all:
if use_multiply_method:
return m.mask * extra_mask * land_unmasked(res)
else:
print("WARNING: needs 3D arrays, use 'mask_all_but' instead")
sys.exit()
else:
if use_multiply_method:
return m.mask * extra_mask
else:
print("WARNING: needs 3D arrays, use 'mask_all_but' instead")
sys.exit()
# --- Only consider MBL (or MFT/MFT for Saiz-Lopez 2014 comparison)
if (MBL and sect == 'BL') or (sect == 'MBL') or M_all:
if use_multiply_method:
return m.mask * land_unmasked(res)
else:
land_unmasked_ = mask_all_but('Land', mask3D=True, res=res,
use_multiply_method=False, trop_limit=trop_limit)
# MBL unmasked
m = np.logical_or(np.logical_not(m.mask),
np.logical_not(land_unmasked_))
# Invert as function expected to return oposite
m = np.logical_not(m)
return m # m is a mask here
return m.mask | b44124174b8c9ed82989d31f6b5a4ed6b43d73a0 | 3,632,078 |
def export_visible_cells(
self,
export_keyword="FLUXNUM",
visible_active_cells_value=1,
hidden_active_cells_value=0,
inactive_cells_value=0,
):
"""Export special properties for all visible cells.
Arguments:
export_keyword (string): The keyword to export.
Choices: 'FLUXNUM' or 'MULTNUM'. Default: 'FLUXNUM'
visible_active_cells_value (int): Value to export forvisible active cells. Default: 1
hidden_active_cells_value (int): Value to export for hidden active cells. Default: 0
inactive_cells_value (int): Value to export for inactive cells. Default: 0
"""
case_id = self.case().id
return self._execute_command(
exportVisibleCells=Cmd.ExportVisibleCellsRequest(
caseId=case_id,
viewId=self.id,
exportKeyword=export_keyword,
visibleActiveCellsValue=visible_active_cells_value,
hiddenActiveCellsValue=hidden_active_cells_value,
inactiveCellsValue=inactive_cells_value,
)
) | e7f03371a7c14385a2039ccb597e7e464f54b4f1 | 3,632,079 |
def sanitize_markdown(markdown_body):
"""
There are some symbols used in the markdown body, which when go through Markdown -> HTML
conversion, break. This does a global replace on markdown strings for these symbols.
"""
return markdown_body.replace(
# This is to solve the issue where <s> and <e> are interpreted as HTML tags
'<', '<').replace(
'>', '>').replace(
'\<s>', '<s>').replace(
'\<e>', '<e>') | adf21a9bbea1a95f0f4c0aca8d61ab6d69627074 | 3,632,080 |
def ProjectsInsightTypeInsightsService(api_version):
"""Returns the service class for the Project insights."""
client = RecommenderClient(api_version)
return client.projects_locations_insightTypes_insights | 7173907ad599fd4457050a2fb1d15d01d19098cf | 3,632,081 |
def get_course_dict(only_active=True):
""" Return a dictionary of courses.
By default only active courses.
key will be course ID.
courses[cid] = {id:, name:, title:}
"""
cdict = {}
reload_if_needed()
for course in COURSES:
if only_active:
if COURSES[course]['active'] == 1:
cdict[course] = COURSES[course]
else:
cdict[course] = COURSES[course]
return cdict | 6e538b473224274c5ec7e8db17975c934e126c44 | 3,632,082 |
def _build_jinja2_expr_tmp(jinja2_exprs):
"""Build a template to evaluate jinja2 expressions."""
exprs = []
tmpls = []
for var, expr in jinja2_exprs.items():
tmpl = f"{var}: >-\n {{{{ {var} }}}}"
if tmpl not in tmpls:
tmpls.append(tmpl)
if expr.strip() not in exprs:
exprs.append(expr.strip())
return "\n".join(exprs + tmpls) | 3e5d944345316a40b7b8052f9b13801228607099 | 3,632,083 |
def create_blueprint(app):
"""Register blueprint routes on app."""
routes = app.config.get("APP_ROUTES")
blueprint = Blueprint(
"{{cookiecutter.package_name}}_records",
__name__,
template_folder="../templates",
)
# TODO: register your record views here.
# Register template filters
blueprint.add_app_template_filter(can_list_files)
blueprint.add_app_template_filter(make_files_preview_compatible)
blueprint.add_app_template_filter(pid_url)
blueprint.add_app_template_filter(select_preview_file)
blueprint.add_app_template_filter(to_previewer_files)
blueprint.add_app_template_filter(has_previewable_files)
blueprint.add_app_template_filter(order_entries)
blueprint.add_app_template_filter(get_scheme_label)
# Register context processor
blueprint.app_context_processor(search_app_context)
return blueprint | 28e8a6c1b54b2aeee4f901e110d9915c4f4f89f0 | 3,632,084 |
def normal(x, mu=0, sig=1):
""" Normal distribution log-likelihood.
:param x: *int, float, np.array.*
:param mu: (optional) *int, float, np.array.*
Location parameter of the normal distribution. Defaults to 0.
:param sig: (optional) *int, float.*
Standard deviation of the normal distribution, :math:`\sigma > 0`.
Defaults to 1.
.. math::
\log{P(x; \mu, \sigma)} \propto -\log{\sigma} \
- \\frac{(x - \mu)^2}{2 \sigma^2}
"""
if np.size(mu) != 1 and len(x) != len(mu):
raise ValueError('If mu is a vector, x must be the same size as mu.'
' We got x={}, mu={}'.format(x, mu))
if fails_constraints(sig >= 0):
return -np.inf
return np.sum(-np.log(sig) - (x - mu)**2/(2*sig**2)) | fc88fd34b4c5e5be835c1d29b2f8f38b7f1f21b9 | 3,632,085 |
import optparse
import platform
def parse_args(args):
"""
Parse arguments
"""
parser = optparse.OptionParser()
parser.add_option('--os', default=platform.system().lower(),
help='Set the target os (default %default)')
parser.add_option('--cc', default='gcc',
help='Set the target compiler type (default %default)')
parser.add_option('--cc-bin', default=None,
help='Set path to compiler')
parser.add_option('--root-dir', metavar='D', default='.',
help='Set directory to execute from (default %default)')
parser.add_option('--make-tool', metavar='TOOL', default='make',
help='Specify tool to run to build source (default %default)')
parser.add_option('--cpu', default=None,
help='Specify a target CPU platform')
parser.add_option('--with-debug', action='store_true', default=False,
help='Include debug information')
parser.add_option('--amalgamation', action='store_true', default=False,
help='Build via amalgamation')
parser.add_option('--disable-shared', action='store_true', default=False,
help='Disable building shared libraries')
parser.add_option('--branch', metavar='B', default=None,
help='Specify branch being built')
parser.add_option('--add-travis-folds', action='store_true', default=False,
help='Add fold markers for Travis UI')
parser.add_option('--dry-run', action='store_true', default=False,
help='Just show commands to be executed')
parser.add_option('--build-jobs', metavar='J', default=get_concurrency(),
help='Set number of jobs to run in parallel (default %default)')
parser.add_option('--compiler-cache', default=None,
help='Set a compiler cache to use (ccache, clcache)')
parser.add_option('--pkcs11-lib', default=None,
help='Set PKCS11 lib to use for testing')
parser.add_option('--with-python3', dest='use_python3', action='store_true', default=None,
help='Enable using python3')
parser.add_option('--without-python3', dest='use_python3', action='store_false',
help='Disable using python3')
return parser.parse_args(args) | 04d5c4c4168f04c2e4ec9de39a3a6c44a9c131e4 | 3,632,086 |
def BitmapFromImage(image):
"""
A compatibility wrapper for the wx.Bitmap(wx.Image) constructor
"""
return Bitmap(image) | 4593506342bfd8b3f1bb3e6077031291a8eb87aa | 3,632,087 |
def load_coco_name(path):
"""Load labels from coco.name
"""
coco = {}
with open(path, 'rt') as file:
for index, label in enumerate(file):
coco[index] = label.strip()
return coco | 2da456b7c2879ec5725172280dacbcaaacd86bfc | 3,632,088 |
def qsat(T, p) :
"""
Saturation vapour pressure.
Derived variable name: qsat
Parameters
----------
T : numpy array or xarray DataArray
Temperature. (K)
p : numpy array or xarray DataArray
Pressure (Pa).
Returns
-------
qs : numpy array or xarray DataArray
Saturation specific humidity (kg/kg) over water.
"""
es = esat(T)
fsubw = 1.0 + 1.0E-8 * p * (4.5 + 6.0E-4 * (T - tc.freeze_pt) * (T - tc.freeze_pt) )
es = es * fsubw
qs = e_p_to_q(es, p)
if type(qs) is xr.core.dataarray.DataArray:
qs.name = 'qsat'
return qs | 2e257dade4531e49f813b3fb96b71f0298819509 | 3,632,089 |
def extract_features(df, action):
"""
提取特征
:param df: DataFrame 样本(训练和测试)
:param action: str action
:return:
x: DataFrame 特征
y: Series 标签
"""
# 特征-训练每个任务都需要初始化
DENSE_FEATURE_COLUMNS = ['videoplayseconds']
# 1,特征处理
# dense
df.fillna(value={f: 0.0 for f in DENSE_FEATURE_COLUMNS}, inplace=True)
df[DENSE_FEATURE_COLUMNS] = np.log(1.0 + df[DENSE_FEATURE_COLUMNS]) # 平滑
mms = MinMaxScaler(feature_range=(0, 1))
df[DENSE_FEATURE_COLUMNS] = mms.fit_transform(df[DENSE_FEATURE_COLUMNS]) # 归一化
# one-hot也是dense的一种,只是不需要进行平滑和归一化
for col in ONE_HOT_COLUMNS:
df[col] += 1
df.fillna(value={col: 0}, inplace=True)
encoder = OneHotEncoder(sparse=False)
tmp = encoder.fit_transform(df[[col]])
for idx in range(tmp.shape[1]):
DENSE_FEATURE_COLUMNS.append(str(col) + '_' + str(idx))
df[str(col) + '_' + str(idx)] = tmp[:, idx]
# 数据类型转化
df[DENSE_FEATURE_COLUMNS] = df[DENSE_FEATURE_COLUMNS].astype('float32')
# varlen sparse
df = df.merge(FEED_TAG, on=['feedid'], how='left')
df = df.merge(FEED_KEYWORD, on=['feedid'], how='left')
# sparse
for col in SPARSE_FEATURE_COLUMNS:
if col == 'userid':
pass
elif col == 'feedid':
df[col] = df[col].apply(lambda x: FEEDID_MAP.get(x, 0))
elif col == 'feed':
df[col] = df[col].apply(lambda x: FEED_MAP.get(x, 0))
elif col == 'authorid':
pass
else:
df[col] += 1 # 0 用于填未知
df.fillna(value={col: 0}, inplace=True)
le = LabelEncoder()
df[col] = le.fit_transform(df[col])
# 2,格式化输出
day = STAGE_END_DAY['test']
train_df, test_df = df.loc[df.date_ != day, :], df.loc[df.date_ == day, :]
feature_columns = DENSE_FEATURE_COLUMNS + SPARSE_FEATURE_COLUMNS + \
VARLEN_SPARSE_FEATURE_COLUMNS + list(WEIGHT_NAME.values())
train_x, train_y = train_df[feature_columns], train_df[action]
test_x, test_y = test_df[feature_columns], test_df[action]
return train_x, train_y, test_x, test_y | af648a66146d1af34777c85417745c55e95f0122 | 3,632,090 |
def quick_sort(input_list):
"""Quick sort."""
if not isinstance(input_list, (list, tuple)):
raise ValueError('input takes list/tuple only')
if isinstance(input_list, (tuple)):
input_list = list(input_list)
if not all(isinstance(val, (int, float)) for val in input_list):
raise ValueError('all items in list must be a number')
quick_list = input_list[:]
return _quicksort(quick_list) | 41224487ab352dc88ebd39007a02cf2fe2993651 | 3,632,091 |
from typing import List
def validate_execution_order(relations: List[RelationDescription], keep_going=False):
"""
Make sure we can build an execution order.
We'll catch an exception and set a flag if the keep_going option is true.
"""
try:
ordered_relations = etl.relation.order_by_dependencies(relations)
except ETLConfigError:
if keep_going:
_error_occurred.set()
logger.exception("Failed to determine evaluation order, proceeding as requested:")
return relations
else:
raise
return ordered_relations | 773303496c3a0ac62e195ea62622e4605acb19fc | 3,632,092 |
def now():
"""Returns a java.util.Date object that represents the current time
according to the local system clock.
Returns:
Date: A new date, set to the current date and time.
"""
return Date() | 4d32130ed1af12370012186084b270dde6fdd986 | 3,632,093 |
def create_session_factory(session_id_store, backend_store, *, loop=None):
"""Creates new session factory.
Create new session factory from two storage:
session_id_store and backend_store.
"""
return _SessionFactory(session_id_store=session_id_store,
backend_store=backend_store,
loop=loop) | c8f302af26894e16f5a24aa9ca460400746fed06 | 3,632,094 |
import os
async def get_mnemonic(strict=False):
"""Attempt to gather a mnemonic from one of the available sources
First, if a mnemonic is defined in the env, use that.
Next, check the config file for the secret
If no mnemonic can be found, optionally raise an Exception
Args:
strict (bool, optional): When set to true, if no mnemonic is found an exception is raised. Defaults to False.
Returns:
str: The mnemonic found either in the env or in the config file
"""
if os.getenv("CAPSULE_MNEMONIC", False):
return os.environ["CAPSULE_MNEMONIC"]
config = await get_config()
if config.get("deploy_info", {}).get("mnemonic", False):
return config.get("deploy_info", {}).get("mnemonic", False)
if strict:
raise Exception("No Mnemonic was found either in the specified config file or in the environment. Strict mode is set to true")
return None | b0056eb251c4426ab252ceb1a628a7ef3e057ab9 | 3,632,095 |
def process(callable_, *args, **kwargs):
"""
Submit a callable to a background process.
Return an proxy object for the future return value.
NOTE: Use only, if you really need control over the type of background execution.
"""
return _submit(callable_, 'cpu', *args, **kwargs) | ac596fef8e72f1ba65450928aba9ea173764d564 | 3,632,096 |
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, basestring):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string | 949d76b25ff65020b3b560a0fd984107c13d1bfb | 3,632,097 |
import os
def load_node_handle(fname, mode="r"):
"""
Read a conduit file node handle. Does not read into memory.
"""
if os.path.exists(fname):
options = conduit.Node()
options["mode"] = mode
handle = conduit.relay.io.IOHandle()
handle.open(fname, options=options)
return handle
else:
raise IOError("No such file: " + fname) | dce4c6aa262e6ae27ea0804959459dbeb20348eb | 3,632,098 |
async def upload_data_generation_file(
background_tasks: BackgroundTasks,
doc: UploadFile = File(...),
current_user: User = Depends(auth.get_current_user_and_bot)
):
"""
Uploads document for training data generation and triggers event for intent creation
"""
TrainingDataGenerationProcessor.is_in_progress(current_user.get_bot())
TrainingDataGenerationProcessor.check_data_generation_limit(current_user.get_bot())
file_path = await Utility.upload_document(doc)
TrainingDataGenerationProcessor.set_status(bot=current_user.get_bot(),
user=current_user.get_user(), status=EVENT_STATUS.INITIATED.value,
document_path=file_path)
token = auth.create_access_token(data={"sub": current_user.email})
background_tasks.add_task(
Utility.trigger_data_generation_event, current_user.get_bot(), current_user.get_user(), token.decode('utf8')
)
return {"message": "File uploaded successfully and training data generation has begun"} | 742f61a13491f2fe84301d3bcd9c5f5ee18e3df0 | 3,632,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.