content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import math
def fnCalculate_Bistatic_Coordinates(a,B):
"""
Calculate the coordinates of the target in the bistatic plane
A,B,C = angles in the triangle
a,b,c = length of the side opposite the angle
Created: 22 April 2017
"""
u = a*math.cos(B);
v = a*math.sin(B);
return u,v
|
cc1dce6ef0506b987e42e3967cf36ea7b46a30d7
| 3,641,400
|
def _fn_lgamma_ ( self , b = 1 ) :
""" Gamma function: f = log(Gamma(ab))
>>> f =
>>> a = f.lgamma ( )
>>> a = f.lgamma ( b )
>>> a = lgamma ( f )
"""
return _fn_make_fun_ ( self ,
b ,
Ostap.MoreRooFit.LGamma ,
'lgamma_%s_%s' )
|
62183327967840e26dfc009c2357de2c31171082
| 3,641,401
|
def convolve_smooth(x, win=10, mode="same"):
"""Smooth data using a given window size, in units of array elements, using
the numpy.convolve function."""
return np.convolve(x, np.ones((win,)), mode=mode) / win
|
b41edf8c0d58355e28b507a96b129c4720412a81
| 3,641,402
|
def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False, Lm=None):
"""
Multioutput conditional for an independent kernel and shared inducing features.
Same behaviour as conditional with non-multioutput kernels.
The covariance matrices used to calculate the conditional have the following shape:
- Kuu: M x M
- Kuf: M x N
- Kff: N or N x N
Further reference
-----------------
- See `gpflow.conditionals._conditional` for a detailed explanation of
conditional in the single-output case.
- See the multiouput notebook for more information about the multiouput framework.
Parameters
----------
:param Xnew: data matrix, size N x D.
:param f: data matrix, P x M or P x M x N
:param full_cov: return the covariance between the datapoints
:param full_output_cov: return the covariance between the outputs.
Note: as we are using a independent kernel these covariances will be zero.
:param q_sqrt: matrix of standard-deviations or Cholesky matrices,
size P x M or P x M x M.
:param white: boolean of whether to use the whitened representation
:return:
- mean: N x P
- variance: N x P, P x N x N, N x P x P or N x P x N x P
Please see `gpflow.conditional._expand_independent_outputs` for more information
about the shape of the variance, depending on `full_cov` and `full_output_cov`.
"""
logger.debug("Conditional: SharedIndependentMof - SharedIndepedentMok")
f_ndims = f.shape.ndims
assert f_ndims is not None
Kmn = Kuf(feat, kern, Xnew) # M x N
Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level) if Lm is None else None # M x M
if full_cov:
Knn = kern.K(Xnew, full_output_cov=False)[0, ...] # N x N
else:
Knn = kern.Kdiag(Xnew, full_output_cov=False)[..., 0] # N
_f = tf.transpose(f) if f_ndims == 2 else f
fmean, fvar = base_conditional(Kmn, Kmm, Knn, _f, full_cov=full_cov, q_sqrt=q_sqrt, white=white, Lm=Lm)
if f_ndims == 3: fmean = tf.transpose(fmean)
if q_sqrt is None:
if full_cov:
fvar = tf.tile(fvar[None, :, :], [tf.shape(f)[0], 1, 1])
else:
fvar = tf.tile(fvar[:, None], [1, tf.shape(f)[0]])
return fmean, _expand_independent_outputs(fvar, full_cov, full_output_cov)
|
5158eb93f30fdac14bafb959e63cd8ad26fc45a7
| 3,641,403
|
import array
def descent(x0, fn, iterations=1000, gtol=10**(-6), bounds=None, limit=0, args=()):
"""A gradient descent optimisation solver.
Parameters
----------
x0 : array-like
n x 1 starting guess of x.
fn : obj
The objective function to minimise.
iterations : int
Maximum number of iterations.
gtol : float
Mean residual of the gradient for convergence.
bounds : list
List of lower and upper bound pairs [lb, ub], None=unconstrained.
limit : float
Value of the objective function for which to terminate optimisation.
args : tuple
Additional parameters needed for fn.
Returns
-------
float
Final value of the objective function.
array
Values of x at the found local minimum.
"""
r = 0.5
c = 0.0001
n = len(x0)
x0 = reshape(array(x0), (n, 1))
if bounds:
bounds = array(bounds)
lb = bounds[:, 0][:, newaxis]
ub = bounds[:, 1][:, newaxis]
else:
lb = ones((n, 1)) * -10**20
ub = ones((n, 1)) * +10**20
zn = zeros((n, 1))
g = zeros((n, 1))
v = eye(n) * e
def phi(x, mu, *args):
p = mu * (sum(maximum(lb - x, zn)) + sum(maximum(x - ub, zn)))**2
return fn(x, *args) + p
i = 0
mu = 1
while i < iterations:
p0 = phi(x0, mu, *args)
for j in range(n):
vj = v[:, j][:, newaxis]
g[j, 0] = (phi(x0 + vj, mu, *args) - p0) / e
D = sum(-g * g)
a = 1
x1 = x0 - a * g
while phi(x1, mu, *args) > p0 + c * a * D:
a *= r
x1 = x0 - a * g
x0 -= a * g
mu *= 10
res = mean(abs(g))
i += 1
f1 = phi(x0, mu, *args)
if f1 < limit:
break
if res < gtol:
break
print('Iteration: {0} fopt: {1:.3g} gres: {2:.3g} step: {3}'.format(i, f1, res, a))
return f1, x0
|
ec132e7857cf4a941c54fc5db9085bdf013fb7a2
| 3,641,404
|
def count_teams_for_party(party_id: PartyID) -> int:
"""Return the number of orga teams for that party."""
return db.session \
.query(DbOrgaTeam) \
.filter_by(party_id=party_id) \
.count()
|
07373325dd7d7ab21ef0cb1145d37b2d85292358
| 3,641,405
|
def num_series(datetime_series) -> pd.Series:
"""Return a datetime series with numeric values."""
return datetime_series(LENGTH)
|
4d208bfbae5f3e7263663d06102aa0b290f4fd4e
| 3,641,406
|
import re
def obtain_ranks(outputs, targets, mode=0):
"""
outputs : tensor of size (batch_size, 1), required_grad = False, model predictions
targets : tensor of size (batch_size, ), required_grad = False, labels
Assume to be of format [1, 0, ..., 0, 1, 0, ..., 0, ..., 0]
mode == 0: rank from distance (smaller is preferred)
mode == 1: rank from similarity (larger is preferred)
"""
if mode == 0:
calculate_ranks = calculate_ranks_from_distance
else:
calculate_ranks = calculate_ranks_from_similarities
all_ranks = []
prediction = outputs.cpu().numpy().squeeze()
label = targets.cpu().numpy()
sep = np.array([0, 1], dtype=label.dtype)
# fast way to find subarray indices in a large array, c.f. https://stackoverflow.com/questions/14890216/return-the-indexes-of-a-sub-array-in-an-array
end_indices = [(m.start() // label.itemsize)+1 for m in re.finditer(sep.tostring(), label.tostring())]
end_indices.append(len(label)+1)
start_indices = [0] + end_indices[:-1]
for start_idx, end_idx in zip(start_indices, end_indices):
distances = prediction[start_idx: end_idx]
labels = label[start_idx:end_idx]
positive_relations = list(np.where(labels == 1)[0])
ranks = calculate_ranks(distances, positive_relations)
all_ranks.append(ranks)
return all_ranks
|
72fc737d72fe0d6d3ff4e08a5a16acf05e0e88cb
| 3,641,407
|
from typing import Dict
from typing import Any
def sample_a2c_params(trial: optuna.Trial) -> Dict[str, Any]:
"""
Sampler for A2C hyperparams.
"""
lr_schedule = trial.suggest_categorical("lr_schedule", ["linear", "constant"])
learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1)
n_steps = trial.suggest_categorical("n_steps", [4, 8, 16, 32, 64, 128])
gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
ent_coef = trial.suggest_loguniform("ent_coef", 0.0000001, 0.1)
vf_coef = trial.suggest_uniform("vf_coef", 0, 1)
normalize_advantage = trial.suggest_categorical("normalize_advantage", [False, True])
max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5])
# Toggle PyTorch RMS Prop (different from TF one, cf doc)
use_rms_prop = trial.suggest_categorical("use_rms_prop", [False, True])
# Uncomment for gSDE (continuous actions)
#log_std_init = trial.suggest_uniform("log_std_init", -4, 1)
#ortho_init = trial.suggest_categorical("ortho_init", [False, True])
# Uncomment for network architecture setting
#net_arch = trial.suggest_categorical("net_arch", ["small", "medium"])
# sde_net_arch = trial.suggest_categorical("sde_net_arch", [None, "tiny", "small"])
# full_std = trial.suggest_categorical("full_std", [False, True])
# activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu'])
activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"])
if lr_schedule == "linear":
learning_rate = linear_schedule(learning_rate)
# net_arch = {
# "small": [dict(pi=[64, 64], vf=[64, 64])],
# "medium": [dict(pi=[256, 256], vf=[256, 256])],
# }[net_arch]
activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn]
return {
"learning_rate": learning_rate,
"n_steps": n_steps,
"gae_lambda": gae_lambda,
"ent_coef": ent_coef,
"vf_coef": vf_coef,
"max_grad_norm": max_grad_norm,
"use_rms_prop": use_rms_prop,
"normalize_advantage": normalize_advantage,
"policy_kwargs": dict(
#log_std_init=log_std_init,
#net_arch=net_arch,
activation_fn=activation_fn
#ortho_init=ortho_init,
),
}
|
f9f966f3c41a32a15253ba612d94e1254a586e86
| 3,641,408
|
def location_parser(selected_variables, column):
"""
Parse the location variable by creating a list of tuples.
Remove the hyphen between the start/stop positions. Convert all elements to
integers and create a list of tuples.
Parameters:
selected_variables (dataframe): The dataframe containing the location of
the variables contained in the cps_selected_variables file
column (character): The name of the column containing the start/stop positions
Returns:
selected_fields: A list of tuples containing the start/stop positions
"""
fields = []
for field in selected_variables[column]:
field = field.split('-')
field = [int(i) for i in field]
fields.append(field)
return fields
|
106f669269276c37652e92e62eb8c2c52dfe7637
| 3,641,409
|
import torch
import math
def get_qmf_bank(h, n_band):
"""
Modulates an input protoype filter into a bank of
cosine modulated filters
Parameters
----------
h: torch.Tensor
prototype filter
n_band: int
number of sub-bands
"""
k = torch.arange(n_band).reshape(-1, 1)
N = h.shape[-1]
t = torch.arange(-(N // 2), N // 2 + 1)
p = (-1)**k * math.pi / 4
mod = torch.cos((2 * k + 1) * math.pi / (2 * n_band) * t + p)
hk = 2 * h * mod
return hk
|
87e8cf3b0d85a6717cce9dc09f7a0a3e3581e498
| 3,641,410
|
import math
def compare_one(col, cons_aa, aln_size, weights, aa_freqs, pseudo_size):
"""Compare column amino acid frequencies to overall via G-test."""
observed = count_col(col, weights, aa_freqs, pseudo_size)
G = 2 * sum(obsv * math.log(obsv / aa_freqs.get(aa, 0.0))
for aa, obsv in observed.items())
pvalue = chisqprob(G, 19)
return pvalue
|
910431062ac9ddef467d4818d3960385a2d4392b
| 3,641,411
|
def open(uri, mode='a', eclass=_eclass.manifest):
"""Open a Blaze object via an `uri` (Uniform Resource Identifier).
Parameters
----------
uri : str
Specifies the URI for the Blaze object. It can be a regular file too.
The URL scheme indicates the storage type:
* carray: Chunked array
* ctable: Chunked table
* sqlite: SQLite table (the URI 'sqlite://' creates in-memory table)
If no URI scheme is given, carray is assumed.
mode : the open mode (string)
Specifies the mode in which the object is opened. The supported
values are:
* 'r' for read-only
* 'w' for emptying the previous underlying data
* 'a' for allowing read/write on top of existing data
Returns
-------
out : an Array or Table object.
"""
ARRAY = 1
TABLE = 2
uri = urlparse(uri)
path = uri.netloc + uri.path
parms = params(storage=path)
if uri.scheme == 'carray':
source = CArraySource(params=parms)
structure = ARRAY
elif uri.scheme == 'ctable':
source = CTableSource(params=parms)
structure = TABLE
elif uri.scheme == 'sqlite':
# Empty path means memory storage
parms = params(storage=path or None)
source = SqliteSource(params=parms)
structure = TABLE
else:
# Default is to treat the URI as a regular path
parms = params(storage=path)
source = CArraySource(params=parms)
structure = ARRAY
# Don't want a deferred array (yet)
# return NDArray(source)
if structure == ARRAY:
if eclass is _eclass.manifest:
return Array(source)
elif eclass is _eclass.delayed:
return NDArray(source)
elif structure == TABLE:
if eclass is _eclass.manifest:
return Table(source)
elif eclass is _eclass.delayed:
return NDTable(source)
|
c0a5069f5d7f39c87aae5af361df86b6f4fc4189
| 3,641,412
|
def create_df(dic_in, cols, input_type):
"""
Convert JSON output from OpenSea API to pandas DataFrame
:param dic_in: JSON output from OpenSea API
:param cols: Keys in JSON output from OpenSea API
:param input_type: <TBD> save the columns with dictionaries as entries seperately
:return: Cleaned DataFrame
"""
# First pass create dataframe where some of the values are a dictionary with multiple values
df = pd.DataFrame(columns=cols)
for col in cols:
data = []
for row in dic_in:
data.append(row.get(col))
df[col] = data
# Second Pass get rid of columns with dictionaries, for now just forgetting about dictionary
df_2 = df.copy()
for col in df_2.columns:
if col in map_dic:
for df_index, df_row in df.iterrows():
embed_dic_type = map_dic[col]
df_2.at[df_index, col] = map_replace_dic[embed_dic_type]
return df_2
|
7b6a9445c956cc5d2850516d4c7dc2208b7391f7
| 3,641,413
|
def file_updated_at(file_id, db_cursor):
"""
Update the last time the file was checked
"""
db_cursor.execute(queries.file_updated_at, {'file_id': file_id})
db_cursor.execute(queries.insert_log, {'project_id': settings.project_id, 'file_id': file_id,
'log_area': 'file_updated_at',
'log_text': db_cursor.query.decode("utf-8")})
return True
|
bb0ec859c249b96e3ed066c3664e792100f5f23c
| 3,641,414
|
def action_to_upper(action):
"""
action to upper receives an action in pddl_action_representation, and returns it in upper case.
:param action: A action in PddlActionRepresentation
:return: PddlActionRepresentation: The action in upper case
"""
if action:
action.name = action.name.upper()
action.types = [type.upper() for type in action.types]
action.predicates = [pred.upper() for pred in action.predicates]
action.requirements = [req.upper() for req in action.requirements]
action.action = action.action.upper()
return action
|
e9266ad79d60a58bf61d6ce81284fa2accbb0b8d
| 3,641,415
|
from lvmspec.qa.qa_frame import QA_Frame
import os
def load_qa_frame(filename, frame=None, flavor=None):
""" Load an existing QA_Frame or generate one, as needed
Args:
filename: str
frame: Frame object, optional
flavor: str, optional
Type of QA_Frame
Returns:
qa_frame: QA_Frame object
"""
log=get_logger()
if os.path.isfile(filename): # Read from file, if it exists
qaframe = read_qa_frame(filename)
log.info("Loaded QA file {:s}".format(filename))
# Check against frame, if provided
if frame is not None:
for key in ['camera','expid','night','flavor']:
assert getattr(qaframe, key) == frame.meta[key.upper()]
else: # Init
if frame is None:
log.error("QA file {:s} does not exist. Expecting frame input".format(filename))
qaframe = QA_Frame(frame)
# Set flavor?
if flavor is not None:
qaframe.flavor = flavor
# Return
return qaframe
|
5c8b7d279d98ddb76c6ca54a53779d88d27dab3b
| 3,641,416
|
from typing import Type
from typing import Dict
from typing import Any
def generate_model_example(model: Type["Model"], relation_map: Dict = None) -> Dict:
"""
Generates example to be included in schema in fastapi.
:param model: ormar.Model
:type model: Type["Model"]
:param relation_map: dict with relations to follow
:type relation_map: Optional[Dict]
:return: dict with example values
:rtype: Dict[str, int]
"""
example: Dict[str, Any] = dict()
relation_map = (
relation_map
if relation_map is not None
else translate_list_to_dict(model._iterate_related_models())
)
for name, field in model.Meta.model_fields.items():
populates_sample_fields_values(
example=example, name=name, field=field, relation_map=relation_map
)
to_exclude = {name for name in model.Meta.model_fields}
pydantic_repr = generate_pydantic_example(pydantic_model=model, exclude=to_exclude)
example.update(pydantic_repr)
return example
|
1aafb069ff129453f9012de79d09c326224ceb5b
| 3,641,417
|
def compare_folder(request):
""" Creates the compare folder path `dione-sr/tests/data/test_name/compare`.
"""
return get_test_path('compare', request)
|
b78bc261373d47bd3444c24c54c57a600a3855ad
| 3,641,418
|
def _get_param_combinations(lists):
"""Recursive function which generates a list of all possible parameter values"""
if len(lists) == 1:
list_p_1 = [[e] for e in lists[0]]
return list_p_1
list_p_n_minus_1 = _get_param_combinations(lists[1:])
list_p_1 = [[e] for e in lists[0]]
list_p_n = [p_1 + p_n_minus_1 for p_1 in list_p_1 for p_n_minus_1 in list_p_n_minus_1]
return list_p_n
|
b4903bea79aebeabf3123f03de986058a06a21f4
| 3,641,419
|
def system_mass_spring_dumper():
"""マスバネダンパ系の設計例"""
# define the system
m = 1.0
k = 1.0
c = 1.0
A = np.array([
[0.0, 1.0],
[-k/m, -c/m]
])
B = np.array([
[0],
[1/m]
])
C = np.eye(2)
D = np.zeros((2,1),dtype=float)
W = np.diag([1.0, 1.0])
S1, S2, A_, B_, T = optimal_hyperplane_vector(A, B, W)
S = np.hstack((S1, S2))
x, u = initialize_system(A, B)
x[0] = 0.0
x[1] = 10.0
# define the gain of
k = 10
return C, D, S, k, x, u, A_, B_, T
|
8a054753d7bbaa06b7217ce98d38074122d41f32
| 3,641,420
|
def explore_voxel(
start_pos: tuple,
masked_atlas: ma.MaskedArray,
*,
count: int = -1,
) -> int:
"""Explore a given voxel.
Ask Dimitri for more details.
Seems like this is a BFS until a voxel with a different value is
found or the maximal number of new voxels were seen.
Parameters
----------
start_pos
A triplet with the (x, y, z) coordinates of the starting voxel.
masked_atlas
A masked 3D array with the volume data.
count
Maximal number of iterations. A negative value means no limit on
the number of iterations.
Returns
-------
int
The value of some voxel in the data volume.
"""
logger.debug("exploring voxel %s", start_pos)
if not isinstance(start_pos, tuple):
raise ValueError(
f"The starting position must be a tuple (got {type(start_pos)})"
)
def in_bounds(pos_):
"""Check that the position is within the atlas bounds."""
return all(0 <= x < x_max for x, x_max in zip(pos_, masked_atlas.shape))
# The order in which the neighbours are explored probably matters
deltas = [(-1, 0, 0), (0, -1, 0), (1, 0, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]
start_value = masked_atlas[start_pos]
seen = {start_pos}
queue = deque([start_pos])
while len(queue) > 0 and count != 0:
pos = queue.popleft()
value = masked_atlas[pos]
# Found a different value?
if value != start_value and value is not ma.masked:
return value
# BFS step
for dx, dy, dz in deltas:
new_pos = (pos[0] + dx, pos[1] + dy, pos[2] + dz)
if in_bounds(new_pos) and new_pos not in seen:
seen.add(new_pos)
queue.append(new_pos)
count -= 1
return start_value
|
d2f73562497a325a42a0322f3aba9be995809a24
| 3,641,421
|
import requests
def get_green_button_xml(
session: requests.Session, start_date: date, end_date: date
) -> str:
"""Download Green Button XML."""
response = session.get(
f'https://myusage.torontohydro.com/cassandra/getfile/period/custom/start_date/{start_date:%m-%d-%Y}/to_date/{end_date:%m-%d-%Y}/format/xml'
)
response.raise_for_status()
return response.text
|
2ed71202a40214b75007db7b16d5c1806ae35406
| 3,641,422
|
def calculateSecFromEpoch(date,hour):
"""
Calculates seconds from EPOCH
"""
months={
'01':'Jan',
'02':'Feb',
'03':'Mar',
'04':'Apr',
'05':'May',
'06':'Jun',
'07':'Jul',
'08':'Aug',
'09':'Sep',
'10':'Oct',
'11':'Nov',
'12':'Dec'
}
year=YEAR_PREFIX+date[0:2]
month=months[date[2:4]]
day=date[4:6]
hourF=hour[0:2]+':'+hour[2:4]+':'+hour[4:6]
dateFormatted=month+' '+day+','+' '+year+' @ '+hourF+' '+TIME_ZONE
secs=timegm(strptime(dateFormatted, '%b %d, %Y @ %H:%M:%S '+TIME_ZONE))
return secs
|
29adf78dbe795c70cb84f66b1dc249674869c417
| 3,641,423
|
def star_noise_simulation(Variance, Pk, nongaussian = False):
"""simulates star + noise signal, Pk is hyperprior on star variability and flat at high frequencies which is stationary noise"""
Pk_double = np.concatenate((Pk, Pk))
phases = np.random.uniform(0, 2 * np.pi, len(Pk))
nodes0 = np.sqrt(Pk_double) * np.concatenate((np.cos(phases), np.sin(phases)))
if nongaussian:
flux= flux_nodes(nodes0, len(Variance))
#average, sigma = prepare_data.normalization(flux)
#flux /= sigma
mask = np.random.random(len(flux)) < distribution_parameters[0]
outliers = stats.nct.rvs(*distribution_parameters[1:], size=np.sum(mask))
flux[mask] = outliers
return flux / Variance
else:
return (flux_nodes(nodes0, len(Variance))) / Variance
|
5ccc89f455b7347c11cac36abead172b352f7b9c
| 3,641,424
|
from datetime import datetime
import time
def get_seq_num():
"""
Simple class for creating sequence numbers
Truncate epoch time to 7 digits which is about one month
"""
t = datetime.datetime.now()
mt = time.mktime(t.timetuple())
nextnum = int(mt)
retval = nextnum % 10000000
return retval
|
34a2b3a7082d061987c7a0b67c91df040b86938c
| 3,641,425
|
import logging
def get_packages_for_file_or_folder(source_file, source_folder):
"""
Collects all the files based on given parameters. Exactly one of the parameters has to be specified.
If source_file is given, it will return with a list containing source_file.
If source_folder is given, it will search recursively all files in the directory and return the list of found files.
"""
if not bool(source_folder) ^ bool(source_file):
log('Source_folder XOR source_file has to be specified, exactly one of them.', logging.ERROR,
source_file=source_file, source_folder=source_folder)
return ()
# validate path parameters, collect packages
entries = ()
if source_file:
source = abspath(source_file)
if isfile(source):
entries = [source]
else:
log('Source file does not exist', logging.ERROR)
else:
source = abspath(source_folder)
if isdir(source):
entries = get_files(source)
else:
log('Source folder does not exist', logging.ERROR)
return entries
|
fc047dd10dfd18fc8efecb240d06aeb91686c0cb
| 3,641,426
|
def sanitize_tag(tag: str) -> str:
"""Clean tag by replacing empty spaces with underscore.
Parameters
----------
tag: str
Returns
-------
str
Cleaned tag
Examples
--------
>>> sanitize_tag(" Machine Learning ")
"Machine_Learning"
"""
return tag.strip().replace(" ", "_")
|
40ac78846f03e8b57b5660dd246c8a15fed8e008
| 3,641,427
|
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2.0 - 1.0)
if dim / 2.0 - 1.0 < 1e-15:
denom = np.power(2.0 * np.pi, dim / 2.0) * i0(kappa)
else:
denom = np.power(2.0 * np.pi, dim / 2.0) * iv(dim / 2.0 - 1.0, kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
|
24d22469a572e7ff4b7e1c918fce7001731cec2a
| 3,641,428
|
import urllib
def twitter_map():
"""
Gets all the required information and returns the start page or map with
people locations depending on input
"""
# get arguments from url
account = request.args.get('q')
count = request.args.get('count')
if account and count:
# create map and add custom styles to html or display error
try:
new_map = create_map(account, count)
new_map += render_template('styles.html')
return new_map
except urllib.error.HTTPError:
return render_template('error.html', error='User was not found.')
else:
# render start page
return render_template('index.html')
|
54a37f91141e52d24f88214ea476a2f199c78674
| 3,641,429
|
def path_states(node):
"""The sequence of states to get to this node."""
if node in (cutoff, failure, None):
return []
return path_states(node.parent) + [node.state]
|
21ed5eb98eca0113dd5f446066cd10df73665f10
| 3,641,430
|
def find_named_variables(mapping):
"""Find correspondance between variable and relation and its attribute."""
var_dictionary = dict()
for relation_instance in mapping.lhs:
for i, variable in enumerate(relation_instance.variables):
name = relation_instance.relation.name
field = relation_instance.relation.fields[i]
if variable not in var_dictionary.keys():
var_dictionary.update({variable: []})
var_dictionary[variable].append((name, field))
else:
if (name, field) not in var_dictionary[variable]:
var_dictionary[variable].append((name, field))
return var_dictionary
|
0b9a78ca94b25e7a91fe88f0f15f8a8d408cb2fd
| 3,641,431
|
import urllib
def attribute_formatter(attribute):
""" translate non-alphabetic chars and 'spaces' to a URL applicable format
:param attribute: text string that may contain not url compatible chars (e.g. ' 무작위의')
:return: text string with riot API compatible url encoding (e.g. %20%EB%AC%B4%EC%9E%91%EC%9C%84%EC%9D%98)
"""
tempdict = {'': attribute}
formatted = urllib.parse.urlencode(tempdict)[1:].replace('+', '%20')
return formatted
|
6c6745a5cea9a3f6bcee8cbcedb7a1493372dc96
| 3,641,432
|
from typing import NoReturn
import os
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
if not os.path.exists("graphs"):
os.mkdir("graphs")
figs = []
for i, feature in enumerate(X.columns):
correlation = X[feature].cov(y) / (np.std(X[feature]) * np.std(y))
table = pd.DataFrame(dict(
feature=X[feature],
price=y
))
fig = px.scatter(table, x='feature', y='price',
title=f"Pearson Correlation "
f"between {feature} and price is {correlation}",
trendline="ols", trendline_color_override="red")
pio.write_image(fig, f"graphs/Pearson correlation between {feature} "
f"and price.png")
figs.append(fig)
return "./graphs/"
|
ec2b5116a6cf23b41e0009e5a1ad810daf23bc5f
| 3,641,433
|
import json
def maestro_splits():
"""
Get list of indices for each split. Stolen from my work on Perceptual
Evaluation of AMT Resynthesized.
Leve here for reference.
"""
d = asmd.Dataset().filter(datasets=['Maestro'])
maestro = json.load(open(MAESTRO_JSON))
train, validation, test = [], [], []
for i in range(len(d)):
filename = d.paths[i][0][0][23:]
split = search_audio_filename_in_original_maestro(filename, maestro)
if split == "train":
train.append(i)
elif split == "validation":
validation.append(i)
elif split == "test":
test.append(i)
else:
raise RuntimeError(filename +
" not found in maestro original json")
return train, validation, test
|
119b033d3fd507b77bbb3d16d993237f8658b5f5
| 3,641,434
|
def get_choice_selectivity(trials, perf, r):
"""
Compute d' for choice.
"""
N = r.shape[-1]
L = np.zeros(N)
L2 = np.zeros(N)
R = np.zeros(N)
R2 = np.zeros(N)
nL = 0
nR = 0
for n, trial in enumerate(trials):
if not perf.decisions[n]:
continue
stimulus = trial['epochs']['stimulus']
r_n = r[stimulus,n]
left_right = trial['left_right']
if left_right < 0:
L += np.sum(r_n, axis=0)
L2 += np.sum(r_n**2, axis=0)
nL += len(stimulus)
else:
R += np.sum(r_n, axis=0)
R2 += np.sum(r_n**2, axis=0)
nR += len(stimulus)
mean_L = L/nL
var_L = L2/nL - mean_L**2
mean_R = R/nR
var_R = R2/nR - mean_R**2
return -utils.div(mean_L - mean_R, np.sqrt((var_L + var_R)/2))
|
f33593ad06bf3c54c950eda562a93e348320a5e1
| 3,641,435
|
def author_productivity(pub2author_df, colgroupby = 'AuthorId', colcountby = 'PublicationId', show_progress=False):
"""
Calculate the total number of publications for each author.
Parameters
----------
pub2author_df : DataFrame, default None, Optional
A DataFrame with the author2publication information.
colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
colcountby : str, default 'PublicationId', Optional
The DataFrame column with Publication Ids. If None then the database 'PublicationId' is used.
Returns
-------
DataFrame
Productivity DataFrame with 2 columns: 'AuthorId', 'Productivity'
"""
# we can use show_progress to pass a label for the progress bar
if show_progress:
show_progress='Author Productivity'
newname_dict = zip2dict([str(colcountby)+'Count', '0'], ['Productivity']*2)
return groupby_count(pub2author_df, colgroupby, colcountby, count_unique=True, show_progress=show_progress).rename(columns=newname_dict)
|
15c56b22cc9d5014fe4dcfab8be37a9e4b0ef329
| 3,641,436
|
def smoothed_epmi(matrix, alpha=0.75):
"""
Performs smoothed epmi.
See smoothed_ppmi for more info.
Derived from this:
#(w,c) / #(TOT)
--------------
(#(w) / #(TOT)) * (#(c)^a / #(TOT)^a)
==>
#(w,c) / #(TOT)
--------------
(#(w) * #(c)^a) / #(TOT)^(a+1))
==>
#(w,c)
----------
(#(w) * #(c)^a) / #(TOT)^a
==>
#(w,c) * #(TOT)^a
----------
#(w) * #(c)^a
"""
row_sum = matrix.sum(axis=1)
col_sum = matrix.sum(axis=0).power(alpha)
total = row_sum.sum(axis=0).power(alpha)[0, 0]
inv_col_sum = 1 / col_sum # shape (1,n)
inv_row_sum = 1 / row_sum # shape (n,1)
inv_col_sum = inv_col_sum * total
mat = matrix * inv_row_sum
mat = mat * inv_col_sum
return mat
|
e2f72c4169aee2f394445f42e4835f1b55f347c9
| 3,641,437
|
import six
def encode(input, errors='strict'):
""" convert from unicode text (with possible UTF-16 surrogates) to wtf-8
encoded bytes. If this is a python narrow build this will actually
produce UTF-16 encoded unicode text (e.g. with surrogates).
"""
# method to convert surrogate pairs to unicode code points permitting
# lone surrogate pairs (aka potentially ill-formed UTF-16)
def to_code_point(it):
hi = None
try:
while True:
c = ord(next(it))
if c >= 0xD800 and c <= 0xDBFF: # high surrogate
hi = c
c = ord(next(it))
if c >= 0xDC00 and c <= 0xDFFF: # paired
c = 0x10000 + ((hi - 0xD800) << 10) + (c - 0xDC00)
else:
yield hi
hi = None
yield c
except StopIteration:
if hi is not None:
yield hi
buf = six.binary_type()
for code in to_code_point(iter(input)):
if (0 == (code & 0xFFFFFF80)):
buf += six.int2byte(code)
continue
elif (0 == (code & 0xFFFFF800)):
buf += six.int2byte(((code >> 6) & 0x1F) | 0xC0)
elif (0 == (code & 0xFFFF0000)):
buf += six.int2byte(((code >> 12) & 0x0F) | 0xE0)
buf += six.int2byte(((code >> 6) & 0x3F) | 0x80)
elif (0 == (code & 0xFF300000)):
buf += six.int2byte(((code >> 18) & 0x07) | 0xF0)
buf += six.int2byte(((code >> 12) & 0x3F) | 0x80)
buf += six.int2byte(((code >> 6) & 0x3F) | 0x80)
buf += six.int2byte((code & 0x3F) | 0x80)
return buf, len(buf)
|
525199690f384304a72176bd1eaeeb1b9cb30880
| 3,641,438
|
def forgot_password(request, mobile=False):
"""Password reset form. This view sends an email with a reset link.
"""
if request.method == "POST":
form = PasswordResetForm(request.POST)
valid = form.is_valid()
if valid:
form.save(use_https=request.is_secure(),
token_generator=default_token_generator,
email_template_name='users/email/pw_reset.ltxt')
if mobile:
if valid:
return HttpResponseRedirect(reverse('users.mobile_pw_reset_sent'))
else:
if not valid:
return {'status': 'error',
'errors': dict(form.errors.iteritems())}
else:
return {'status': 'success'}
else:
form = PasswordResetForm()
if mobile:
return jingo.render(request, 'users/mobile/pw_reset_form.html', {'form': form})
|
ea27378253a7ed1b98cb91fd52fe724e79f35e26
| 3,641,439
|
def rotation_components(x, y, eps=1e-12, costh=None):
"""Components for the operator Rotation(x,y)
Together with `rotation_operator` achieves best memory complexity: O(N_batch * N_hidden)
Args:
x: a tensor from where we want to start
y: a tensor at which we want to finish
eps: the cutoff for the normalizations (avoiding division by zero)
Returns:
Five components: u, v, [u,v] and `2x2 rotation by theta`, cos(theta)
"""
size_batch = tf.shape(x)[0]
hidden_size = tf.shape(x)[1]
# construct the 2x2 rotation
u = tf.nn.l2_normalize(x, 1, epsilon=eps)
if costh == None:
costh = tf.reduce_sum(u * tf.nn.l2_normalize(y, 1, epsilon=eps), 1)
sinth = tf.sqrt(1 - costh ** 2)
step1 = tf.reshape(costh, [size_batch, 1])
step2 = tf.reshape(sinth, [size_batch, 1])
Rth = tf.reshape(
tf.concat([step1, -step2, step2, step1], axis=1), [size_batch, 2, 2])
# get v and concatenate u and v
v = tf.nn.l2_normalize(
y - tf.reshape(tf.reduce_sum(u * y, 1), [size_batch, 1]) * u, 1, epsilon=eps)
step3 = tf.concat([tf.reshape(u, [size_batch, 1, hidden_size]),
tf.reshape(v, [size_batch, 1, hidden_size])],
axis=1)
# do the batch matmul
step4 = tf.reshape(u, [size_batch, hidden_size, 1])
step5 = tf.reshape(v, [size_batch, hidden_size, 1])
return step4, step5, step3, Rth, costh
|
79cec86425bce65ac92ce8cf9c720f98857d7e1a
| 3,641,440
|
def erode(np_image_bin, struct_elem='rect', size=3):
"""Execute erode morphological operation on binaryzed image
Keyword argument:
np_image_bin -- binaryzed image
struct_elem:
cross - cross structural element
rect - rectangle structural element
circ -- cricle structural element(maybe implemente)
size: size of struct element, should be 2N+1
Return:
Binarized image after erode operation
"""
np_image_bin = np_image_bin.astype(np.uint8)
np_image_er = np.zeros(np_image_bin.shape, dtype=np.uint8)
#np_image_bin = np.arange(625).reshape((25,25))
#rectangle
dir_size = int((size-1)/2)
#print(x_max, y_max)
for index, x in np.ndenumerate(np_image_bin):
np_window = bs.getWindow(np_image_bin, index, dir_size, struct_elem)
if np_window.max() == 255:
np_image_er[index[0], index[1]] = 255
return np_image_er
|
4692b40555a8047d70ad8c4b33de636a0c6c87b0
| 3,641,441
|
from typing import Type
from typing import Union
from typing import Any
import inspect
def from_argparse_args(
cls: Type[ParseArgparserDataType], args: Union[Namespace, ArgumentParser], **kwargs: Any
) -> ParseArgparserDataType:
"""Create an instance from CLI arguments.
Eventually use varibles from OS environement which are defined as "PL_<CLASS-NAME>_<CLASS_ARUMENT_NAME>"
Args:
cls: Lightning class
args: The parser or namespace to take arguments from. Only known arguments will be
parsed and passed to the :class:`Trainer`.
**kwargs: Additional keyword arguments that may override ones in the parser or namespace.
These must be valid Trainer arguments.
Example:
>>> from pytorch_lightning import Trainer
>>> parser = ArgumentParser(add_help=False)
>>> parser = Trainer.add_argparse_args(parser)
>>> parser.add_argument('--my_custom_arg', default='something') # doctest: +SKIP
>>> args = Trainer.parse_argparser(parser.parse_args(""))
>>> trainer = Trainer.from_argparse_args(args, logger=False)
"""
if isinstance(args, ArgumentParser):
args = cls.parse_argparser(args)
params = vars(args)
# we only want to pass in valid Trainer args, the rest may be user specific
valid_kwargs = inspect.signature(cls.__init__).parameters
trainer_kwargs = {name: params[name] for name in valid_kwargs if name in params}
trainer_kwargs.update(**kwargs)
return cls(**trainer_kwargs)
|
8d418557437dfc8f2522f4681cf31b9e81065e54
| 3,641,442
|
def setup_counter_and_timer(nodemap):
"""
This function configures the camera to setup a Pulse Width Modulation signal using
Counter and Timer functionality. By default, the PWM signal will be set to run at
50hz, with a duty cycle of 70%.
:param nodemap: Device nodemap.
:type nodemap: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print('Configuring Pulse Width Modulation signal')
try:
result = True
# Set Counter Selector to Counter 0
node_counter_selector = PySpin.CEnumerationPtr(nodemap.GetNode('CounterSelector'))
# Check to see if camera supports Counter and Timer functionality
if not PySpin.IsAvailable(node_counter_selector):
print('\nCamera does not support Counter and Timer Functionality. Aborting...\n')
return False
if not PySpin.IsWritable(node_counter_selector):
print('\nUnable to set Counter Selector (enumeration retrieval). Aborting...\n')
return False
entry_counter_0 = node_counter_selector.GetEntryByName('Counter0')
if not PySpin.IsAvailable(entry_counter_0) or not PySpin.IsReadable(entry_counter_0):
print('\nUnable to set Counter Selector (entry retrieval). Aborting...\n')
return False
counter_0 = entry_counter_0.GetValue()
node_counter_selector.SetIntValue(counter_0)
# Set Counter Event Source to MHzTick
node_counter_event_source = PySpin.CEnumerationPtr(nodemap.GetNode('CounterEventSource'))
if not PySpin.IsAvailable(node_counter_event_source) or not PySpin.IsWritable(node_counter_event_source):
print('\nUnable to set Counter Event Source (enumeration retrieval). Aborting...\n')
return False
entry_counter_event_source_mhz_tick = node_counter_event_source.GetEntryByName('MHzTick')
if not PySpin.IsAvailable(entry_counter_event_source_mhz_tick) \
or not PySpin.IsReadable(entry_counter_event_source_mhz_tick):
print('\nUnable to set Counter Event Source (entry retrieval). Aborting...\n')
return False
counter_event_source_mhz_tick = entry_counter_event_source_mhz_tick.GetValue()
node_counter_event_source.SetIntValue(counter_event_source_mhz_tick)
# Set Counter Duration to 14000
node_counter_duration = PySpin.CIntegerPtr(nodemap.GetNode('CounterDuration'))
if not PySpin.IsAvailable(node_counter_duration) or not PySpin.IsWritable(node_counter_duration):
print('\nUnable to set Counter Duration (integer retrieval). Aborting...\n')
return False
node_counter_duration.SetValue(14000)
# Set Counter Delay to 6000
node_counter_delay = PySpin.CIntegerPtr(nodemap.GetNode('CounterDelay'))
if not PySpin.IsAvailable(node_counter_delay) or not PySpin.IsWritable(node_counter_delay):
print('\nUnable to set Counter Delay (integer retrieval). Aborting...\n')
return False
node_counter_delay.SetValue(6000)
# Determine Duty Cycle of PWM signal
duty_cycle = float(node_counter_duration.GetValue()) / (float(node_counter_duration.GetValue() +
node_counter_delay.GetValue())) * 100
print('\nThe duty cycle has been set to {}%'.format(duty_cycle))
# Determine pulse rate of PWM signal
pulse_rate = 1000000 / float(node_counter_duration.GetValue() + node_counter_delay.GetValue())
print('\nThe pulse rate has been set to {} Hz'.format(pulse_rate))
# Set Counter Trigger Source to Frame Trigger Wait
node_counter_trigger_source = PySpin.CEnumerationPtr(nodemap.GetNode('CounterTriggerSource'))
if not PySpin.IsAvailable(node_counter_trigger_source) or not PySpin.IsWritable(node_counter_trigger_source):
print('\nUnable to set Counter Trigger Source (enumeration retrieval). Aborting...\n')
return False
entry_counter_trigger_source_ftw = node_counter_trigger_source.GetEntryByName('FrameTriggerWait')
if not PySpin.IsAvailable(entry_counter_trigger_source_ftw)\
or not PySpin.IsReadable(entry_counter_trigger_source_ftw):
print('\nUnable to set Counter Trigger Source (entry retrieval). Aborting...\n')
return False
counter_trigger_source_ftw = entry_counter_trigger_source_ftw.GetValue()
node_counter_trigger_source.SetIntValue(counter_trigger_source_ftw)
# Set Counter Trigger Activation to Level High
node_counter_trigger_activation = PySpin.CEnumerationPtr(nodemap.GetNode('CounterTriggerActivation'))
if not PySpin.IsAvailable(node_counter_trigger_activation) or \
not PySpin.IsWritable(node_counter_trigger_activation):
print('\nUnable to set Counter Trigger Activation (enumeration retrieval). Aborting...\n')
return False
entry_counter_trigger_source_lh = node_counter_trigger_activation.GetEntryByName('LevelHigh')
if not PySpin.IsAvailable(entry_counter_trigger_source_lh) \
or not PySpin.IsReadable(entry_counter_trigger_source_lh):
print('\nUnable to set Counter Trigger Activation (entry retrieval). Aborting...\n')
return False
counter_trigger_level_high = entry_counter_trigger_source_lh.GetValue()
node_counter_trigger_activation.SetIntValue(counter_trigger_level_high)
except PySpin.SpinnakerException as ex:
print('Error: {}'.format(ex))
return False
return result
|
9874b17ce49aca766504891bd9828aad1e075e21
| 3,641,443
|
def concat(l1, l2):
""" Join two possibly None lists """
if l1 is None:
return l2
if l2 is None:
return l1
return l1 + l2
|
9e87bead7eedc4c47f665808b9e0222437bc01b5
| 3,641,444
|
def get_model_data(n_samples=None, ratio=None):
"""
Provides train and validation data to train the model. If n_samples and
ratio are not None, it returns data according to the ratio between v1 and v2.
V1 is data comming from the original distribution of SIRD parameters, and
V2 is data comming from distributions based on errors of trained ML models.
Parameters
----------
n_samples : int, optional
Subset of samples from the original set. The default is None.
ratio : float, optional
Ratio of the data from distribudion based on errors. The default is None.
Returns
-------
df_train_val : pandas.DataFrame
"""
df_train_val = pd.read_pickle(
f"{root_project}/data/processed/train_val_set.pickle")
df_train_val_rev = pd.read_pickle(
f"{root_project}/data/processed/train_val_set_rev.pickle")
df_v1_train_val = pd.read_pickle(
f"{root_project}/data/processed/train_val_set_v1.pickle")
df_v2_train_val = pd.read_pickle(
f"{root_project}/data/processed/train_val_set_v2.pickle")
if n_samples is not None and ratio is not None:
df_train_val = take_samples(df_v1_train_val,
df_v2_train_val,
n_samples,
ratio)
return df_train_val
elif n_samples is not None:
df_train_val = df_train_val.sample(n_samples, random_state=42)
return df_train_val
else:
return df_train_val_rev
|
540706ca37decbf718fcedeef30beb235e98ded8
| 3,641,445
|
def skip():
""" Decorator for marking test function that should not be executed."""
def wrapper(fn):
fn.__status__ = "skip"
return fn
return wrapper
|
0b966c306515073bfb52427b78c65822ee09a060
| 3,641,446
|
import os
def upload_original_to(instance, filename):
""" Return the path this file should be stored at. """
filename_base, filename_ext = os.path.splitext(filename)
filename_ext = filename_ext.lower()
origin_path = instance.release_agency_slug
if '--' in instance.release_agency_slug:
agency_slug, office_slug = instance.release_agency_slug.split('--')
origin_path = '%s/%s' % (agency_slug, office_slug)
right_now = now().strftime("%Y%m%d%H%M")
upload_path = '%s/%s/%s%s' % (
origin_path, right_now, filename_base, filename_ext)
return upload_path
|
5dede23726ecb1ef06b4de1fb29d96b1f909f980
| 3,641,447
|
def _GetThumbnailType(destination_id):
"""Returns the thumbnail type for the destination with the id."""
destination_type = _GetDestinationType(destination_id)
if destination_type == _DestinationType.HOTSPOT:
return _ThumbnailType.PRETTY_EARTH
else:
return _ThumbnailType.GEOMETRY_OUTLINE
|
3452044aae2f9660084be46d840747089f271b1b
| 3,641,448
|
async def postAsync(text: str, *, url: str = "auto", config: ConfigOptions = ConfigOptions(), timeout: float = 30.0,
retries: int = 3):
"""Alias function for AsyncHaste().post(...)"""
return await AsyncHaste().post(text, url=url, config=config, timeout=timeout, retries=retries)
|
bfa5460ac6f469c123eb1bef6e2430f2251809c9
| 3,641,449
|
from typing import OrderedDict
def gpu_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'
]
dtypes = OrderedDict([
("loan_id", "int64"),
("orig_channel", "category"),
("seller_name", "category"),
("orig_interest_rate", "float64"),
("orig_upb", "int64"),
("orig_loan_term", "int64"),
("orig_date", "date"),
("first_pay_date", "date"),
("orig_ltv", "float64"),
("orig_cltv", "float64"),
("num_borrowers", "float64"),
("dti", "float64"),
("borrower_credit_score", "float64"),
("first_home_buyer", "category"),
("loan_purpose", "category"),
("property_type", "category"),
("num_units", "int64"),
("occupancy_status", "category"),
("property_state", "category"),
("zip", "int64"),
("mortgage_insurance_percent", "float64"),
("product_type", "category"),
("coborrow_credit_score", "float64"),
("mortgage_insurance_type", "float64"),
("relocation_mortgage_indicator", "category")
])
print(acquisition_path)
acquisition_table = pyblazing.create_table(table_name='acq', type=get_type_schema(acquisition_path), path=acquisition_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Acquisition CSV')
return acquisition_table
|
fdc2281a6bc31547f60c9c8d8585cdc1d101d88f
| 3,641,450
|
def get_flows_src_dst_address_pairs(device, flow_monitor):
""" Gets flows under flow_monitor and returns source and destination address pairs
Args:
device ('obj'): Device to use
flow_monitor ('str'): Flow monitor name
Raises:
N/A
Returns:
[('source_address', 'destination_address'), ...]
"""
log.info('Getting all source and destination address pairs under flow monitor {name}'
.format(name=flow_monitor))
try:
output = device.parse('show flow monitor {name} cache format table'
.format(name=flow_monitor))
except SchemaEmptyParserError:
return []
pairs = []
# All hardcoded keys are mandatory in the parser
for src in output.get('ipv4_src_addr', {}):
for dst in output['ipv4_src_addr'][src]['ipv4_dst_addr']:
pairs.append((src, dst))
return pairs
|
61ffbe3e0e81acf8c408df7b5ca0f8ff9519f87b
| 3,641,451
|
def imthresh(im, thresh):
"""
Sets pixels in image below threshold value to 0
Args:
im (ndarray): image
thresh (float): threshold
Returns:
ndarray: thresholded image
"""
thresh_im = im.copy()
thresh_im[thresh_im < thresh] = 0
return thresh_im
|
180dc1eba6320c21273e50e4cf7b3f28c786b839
| 3,641,452
|
import _winreg
def set_serv_parms(service, args):
""" Set the service command line parameters in Registry """
uargs = []
for arg in args:
uargs.append(unicoder(arg))
try:
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, _SERVICE_KEY + service)
_winreg.SetValueEx(key, _SERVICE_PARM, None, _winreg.REG_MULTI_SZ, uargs)
_winreg.CloseKey(key)
except WindowsError:
return False
return True
|
90eb2ac8ea6e9a11b7ff8c266017fe027503f159
| 3,641,453
|
def isRenderNode():
# type: () -> bool
"""
Returns
-------
bool
"""
return flavor() == 'Render'
|
b0d5799f755c9c6a72f851ad325b4d8ddf3dec70
| 3,641,454
|
def test_clean_data_contains_instance_value():
"""
Test values from instances remain when not in data.
"""
data = {'first_name': 'John'}
fields = ['job_title', 'first_name']
class Job(object):
job_title = 'swamper'
first_name = ''
class Swamper(BaseSwamper):
def build_instances(self):
obj = Job()
self.instances = {}
self.instances[Job] = obj
def clean_instances(self):
"""
`clean` depends on having both job_title and first_name, so provide
the data which isn't available from self.data by copying it from
an instance to self.cleaned_data.
"""
if self.instances:
for model, instance in self.instances.items():
# Update cleaned_data with fields from instance that aren't
# part of the new data.
initial_fields = set(fields) - set(self.data.keys())
obj_data = {field: getattr(instance, field) for field in initial_fields}
self.cleaned_data.update(obj_data)
def clean(self):
if self.cleaned_data['job_title'] == 'swamper':
if not self.cleaned_data['first_name'].startswith('J'):
raise ValueError('Only people with a first name that begin '
'with the letter J can become swampers.')
return self.cleaned_data
swamper = Swamper(fields, data)
assert swamper.errors == {}
assert swamper.cleaned_data['job_title'] == 'swamper'
assert swamper.cleaned_data['first_name'] == 'John'
obj = Job()
obj = swamper.build_or_update(obj, fields)
assert obj.job_title == 'swamper'
assert obj.first_name == 'John'
|
d2c310cfd760ddea7241ccb469ae7cffdaf373ea
| 3,641,455
|
def wrap_application(app: App, wsgi: WSGICallable) -> WSGICallable:
"""Wrap a given WSGI callable in all active middleware."""
for middleware_instance in reversed(ACTIVE_MIDDLEWARES):
wsgi = middleware_instance(app, wsgi)
return wsgi
|
09574d87e241c19cae30c2db29ee1ed4744a0c68
| 3,641,456
|
def cal_rpn(imgsize, featuresize, scale, gtboxes):
"""
Args:
imgsize: [h, w]
featuresize: the size of each output feature map, e.g. [19, 19]
scale: the scale factor of the base anchor to the feature map, e.g. [32, 32]
gtboxes: ground truth boxes in the image, shape of [N, 4].
stride: the stride of the output feature map.
Returns:
labels: label for each anchor, shape of [N, ], -1 for ignore, 0 for background, 1 for object
bbox_targets: bbox regrssion target for each anchor, shape of [N, 4]
"""
imgh, imgw = imgsize
# gen base anchor
base_anchor = gen_anchor(featuresize, scale)
# calculate iou
overlaps = cal_overlaps(base_anchor, gtboxes)
# init labels -1 don't care 0 is negative 1 is positive
labels = np.empty(base_anchor.shape[0])
labels.fill(-1)
# for each GT box corresponds to an anchor which has highest IOU
gt_argmax_overlaps = overlaps.argmax(axis=0)
# the anchor with the highest IOU overlap with a GT box
anchor_argmax_overlaps = overlaps.argmax(axis=1)
anchor_max_overlaps = overlaps[range(overlaps.shape[0]), anchor_argmax_overlaps]
# IOU > IOU_POSITIVE
labels[anchor_max_overlaps > config.IOU_POSITIVE] = 1
# IOU <IOU_NEGATIVE
labels[anchor_max_overlaps < config.IOU_NEGATIVE] = 0
# ensure that every GT box has at least one positive RPN region
labels[gt_argmax_overlaps] = 1
# only keep anchors inside the image
outside_anchor = np.where(
(base_anchor[:, 0] < 0)
| (base_anchor[:, 1] < 0)
| (base_anchor[:, 2] >= imgw)
| (base_anchor[:, 3] >= imgh)
)[0]
labels[outside_anchor] = -1
# subsample positive labels ,if greater than RPN_POSITIVE_NUM(default 128)
fg_index = np.where(labels == 1)[0]
# print(len(fg_index))
if len(fg_index) > config.RPN_POSITIVE_NUM:
labels[
np.random.choice(
fg_index, len(fg_index) - config.RPN_POSITIVE_NUM, replace=False
)
] = -1
# subsample negative labels
if not config.OHEM:
bg_index = np.where(labels == 0)[0]
num_bg = config.RPN_TOTAL_NUM - np.sum(labels == 1)
if len(bg_index) > num_bg:
# print('bgindex:',len(bg_index),'num_bg',num_bg)
labels[
np.random.choice(bg_index, len(bg_index) - num_bg, replace=False)
] = -1
bbox_targets = bbox_transfrom(base_anchor, gtboxes[anchor_argmax_overlaps, :])
return [labels, bbox_targets], base_anchor
|
19178b125024d213808a497a005336678589588f
| 3,641,457
|
def run(args):
"""This function is called by a user to recover or reset their primary
one-time-password secret. This is used, e.g. if a user has changed
their phone, or if they think the secret has been compromised, or
if they have lost the secret completely (in which case they will
need to log in using a backup method and then call this function
from that login)
The user will need to pass in a validated Authorisation, meaning
they must have a login by at least one method (e.g. a pre-approved
device or a one-time-login requested via backup codes or via
an admin-authorised login)
"""
auth = Authorisation.from_data(args["authorisation"])
try:
reset_otp = bool(args["reset_otp"])
except:
reset_otp = False
auth.verify(resource="reset_otp")
identity_uid = auth.identity_uid()
service = get_this_service(need_private_access=True)
if service.uid() != identity_uid:
raise PermissionError(
"You can only reset the OTP on the identity service on "
"which the user is registered! %s != %s" %
(service.uid(), identity_uid))
user_uid = auth.user_uid()
return (user_uid, reset_otp)
|
c81e533c7dead3fcda028ef85e566d290a85ec74
| 3,641,458
|
import time
def adjust_price(iteration, current_price, global_start, last_tx_time):
""" Function that decides to lower or increase the price, according to the
time of previous transaction and the progress in reaching TARGET in
TARGET_TIME.
Args:
iteration (int) - Number of previous successful transactions. Iterator
which changes with the changing of nonce;
current_price (int) - Current gas price in Wei;
global_start (float/Unix format) - The start of the whole process;
last_tx_time (float/Unix format) - Time spent in previous iteration.
Return:
current_price (int) - New gas price after adjustments.
"""
if iteration > 0:
target_ratio = TARGET_TIME / TARGET
actual_ratio = (time.time() - global_start) / iteration
# If we check only the duration of the latest tx, it will increase
# the price very rapidly, ignoring the global progress.
# So it is necessary to control the price according to plan.
if actual_ratio < target_ratio:
current_price -= int(current_price / 10)
elif last_tx_time >= target_ratio:
current_price += int(current_price / 10)
return current_price
|
f27f13e7b4a753d6b912ed1d795383f0d206b2ef
| 3,641,459
|
import copy
def read_csv_batch(file: str, offset, cnt, **read_csv_params):
"""
Args:
file:
offset:
cnt:
read_csv_params:
Returns:
"""
read_csv_params = copy(read_csv_params)
if read_csv_params is None:
read_csv_params = {}
try:
usecols = read_csv_params.pop('usecols')
except KeyError:
usecols = None
header = pd.read_csv(file, nrows=0, **read_csv_params).columns
with open(file, 'rb') as f:
f.seek(offset)
data = pd.read_csv(f, header=None, names=header, chunksize=None, nrows=cnt, usecols=usecols, **read_csv_params)
return data
|
cc6699db5b9ecae9706d52768c8a1dcd084062ea
| 3,641,460
|
def fault_ack_faults_by_dn(cookie, in_dns):
""" Auto-generated UCSC XML API Method. """
method = ExternalMethod("FaultAckFaultsByDn")
method.cookie = cookie
method.in_dns = in_dns
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request
|
532e925b560d02a0ed47d61f9aa721f55fc6b650
| 3,641,461
|
from typing import List
def provides(name=None, needs: List[str] = None):
"""A shortcut for defining a factory function that also needs dependencies
itself."""
if not needs:
needs = []
def decorator(f):
decorated = _needs(*needs)(f)
set(name or f.__name__, decorated)
return f
return decorator
|
e28e8d5690b7fa53907864c6d17e199a491ccada
| 3,641,462
|
def clip_xyxy_to_image(x1, y1, x2, y2, height, width):
"""Clip coordinates to an image with the given height and width."""
x1 = np.minimum(width - 1.0, np.maximum(0.0, x1))
y1 = np.minimum(height - 1.0, np.maximum(0.0, y1))
x2 = np.minimum(width - 1.0, np.maximum(0.0, x2))
y2 = np.minimum(height - 1.0, np.maximum(0.0, y2))
return x1, y1, x2, y2
|
cf0fe5269afe2a5cbe94efb3221184f706fcb59d
| 3,641,463
|
import re
def build_url(urlo, base, end, url_whitespace, url_case):
""" Build and return a valid url.
Parameters
----------
urlo A ParseResult object returned by urlparse
base base_url from config
end end_url from config
url_whitespace url_whitespace from config
url_case url_case from config
Returns
-------
URL string
"""
if not urlo.netloc:
if not end:
clean_target = re.sub(r'\s+', url_whitespace, urlo.path)
else:
clean_target = re.sub(r'\s+', url_whitespace, urlo.path.rstrip('/'))
if clean_target.endswith(end):
end = ''
if base.endswith('/'):
path = "%s%s%s" % (base, clean_target.lstrip('/'), end)
elif base and not clean_target.startswith('/'):
path = "%s/%s%s" % (base, clean_target, end)
else:
path = "%s%s%s" % (base, clean_target, end)
if url_case == 'lowercase':
urlo = urlo._replace(path=path.lower() )
elif url_case == 'uppercase':
urlo = urlo._replace(path=path.upper() )
else:
urlo = urlo._replace(path=path)
return urlunparse(urlo)
|
b6fa39062502a7d862b17cd079de3c4cfa3720c4
| 3,641,464
|
def AnomalyDicts(anomalies, v2=False):
"""Makes a list of dicts with properties of Anomaly entities."""
bisect_statuses = _GetBisectStatusDict(anomalies)
return [GetAnomalyDict(a, bisect_statuses.get(a.bug_id), v2)
for a in anomalies]
|
ffa8e0f93245f49e0857a19d4e154d47f3dd7f16
| 3,641,465
|
import re
def remove_links(txt: str):
"""
Remove weblinks from the text
"""
pattern = r'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)'
txt = re.sub(pattern, " ", txt)
txt = re.sub('http|https', " ", txt)
return txt
|
4ccaae84d12ab47e70482d15100ba2e60ef476e8
| 3,641,466
|
def timefn(fn):
"""Times a function and stores the result in LOG variables"""
@wraps(fn)
def inside(*args, **kwargs):
start = timer()
result = fn(*args, **kwargs)
end = timer()
gv.TIME_LOG += f'Fn : {fn.__name__} - {end - start}\n'
return result
return inside
|
8dddcd54489d2fb754d9c4de7bc1b084a10840e2
| 3,641,467
|
def get_tweet_stream(output_file, twitter_credentials):
"""
This function is given and returns a "stream" to listen to tweets and store them in output_file
To understand how this function works, check it against the code of twitter_streaming in part00_preclass
:param output_file: the file where the returned stream will store tweets
:param twitter_credentials: a dicionary containing the credentials to aceess twitter (you should have created your own!_
:return: a "stream" variable to track live tweets
"""
access_token = twitter_credentials['access_token']
access_token_secret = twitter_credentials['access_token_secret']
consumer_key = twitter_credentials['consumer_key']
consumer_secret = twitter_credentials['consumer_secret']
l = TweetToFileListener()
l.set_output_file(output_file)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
return stream
|
71a336b71760ef74e14b6472cb8f2a8510d9acb3
| 3,641,468
|
def conv3d_3x3(filters,
stride=1,
padding=1,
kernel_initializer=None,
bias_initializer=None,
name=None):
"""3D convolution with padding."""
return keras.Sequential([
layers.ZeroPadding3D(padding),
layers.Conv3D(filters,
kernel_size=3,
strides=stride,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
padding='valid')
], name=name)
|
f8035b8be1bf82385c31aa0810e7addd8027b5cd
| 3,641,469
|
def get_available_quests(user, num_quests):
"""Get the quests the user could participate in."""
quests = []
for quest in Quest.objects.exclude(questmember__user=user).order_by('priority'):
if quest.can_add_quest(user) and not quest.completed_quest(user):
quests.append(quest)
if len(quests) == num_quests:
return quests
return quests
|
d1bdbe96dbd0b7fd5295ec43153249e7b93c7339
| 3,641,470
|
def height():
""" Default window height """
return get_default_height()
|
fab02ec1881d1c2ccda9f59e6a72d2990d815973
| 3,641,471
|
def no_adjust_tp_func_nb(c: AdjustTPContext, *args) -> float:
"""Placeholder function that returns the initial take-profit value."""
return c.curr_stop
|
6896b72c20c79156c97ba09ba87a1947c7df04d6
| 3,641,472
|
def inverse_theoretical_laser_position(y, a, b, c):
"""
theoretical angular position of the wire in respect to the laser position
"""
return np.pi - a - np.arccos((b - y) / c)
|
0ba87442954fd3bc832edec9adc082e1d2448347
| 3,641,473
|
def ad_roc(y_true, y_score):
""" Compute ROC-curve.
"""
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_score, pos_label=1, drop_intermediate=False)
return fpr, tpr, thresholds
|
db161f7099aab4e1d266ab2b0c8aac0b96076a49
| 3,641,474
|
import time
import platform
import subprocess
def check_reachability(gateway):
# from https://stackoverflow.com/questions/2953462/pinging-servers-in-python
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
time.sleep(15)
# Option for the number of packets as a function of
param = '-n' if platform.system().lower()=='windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', '-w', '1', gateway]
return subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0
|
76580279cfc65f5a74015d63fb0eaccae9b8e141
| 3,641,475
|
import torch
def greedy_decoding(baseline_transformer, src_representations_batch, src_mask, trg_field_processor, max_target_tokens=100):
"""
Supports batch (decode multiple source sentences) greedy decoding.
Decoding could be further optimized to cache old token activations because they can't look ahead and so
adding a newly predicted token won't change old token's activations.
Example: we input <s> and do a forward pass. We get intermediate activations for <s> and at the output at position
0, after the doing linear layer we get e.g. token <I>. Now we input <s>,<I> but <s>'s activations will remain
the same. Similarly say we now got <am> at output position 1, in the next step we input <s>,<I>,<am> and so <I>'s
activations will remain the same as it only looks at/attends to itself and to <s> and so forth.
"""
device = next(baseline_transformer.parameters()).device
pad_token_id = trg_field_processor.vocab.stoi[PAD_TOKEN]
# Initial prompt is the beginning/start of the sentence token. Make it compatible shape with source batch => (B,1)
target_sentences_tokens = [[BOS_TOKEN] for _ in range(src_representations_batch.shape[0])]
trg_token_ids_batch = torch.tensor([[trg_field_processor.vocab.stoi[tokens[0]]] for tokens in target_sentences_tokens], device=device)
# Set to true for a particular target sentence once it reaches the EOS (end-of-sentence) token
is_decoded = [False] * src_representations_batch.shape[0]
while True:
trg_mask, _ = get_masks_and_count_tokens_trg(trg_token_ids_batch, pad_token_id)
# Shape = (B*T, V) where T is the current token-sequence length and V target vocab size
predicted_log_distributions = baseline_transformer.decode(trg_token_ids_batch, src_representations_batch, trg_mask, src_mask)
# Extract only the indices of last token for every target sentence (we take every T-th token)
num_of_trg_tokens = len(target_sentences_tokens[0])
predicted_log_distributions = predicted_log_distributions[num_of_trg_tokens-1::num_of_trg_tokens]
# This is the "greedy" part of the greedy decoding:
# We find indices of the highest probability target tokens and discard every other possibility
most_probable_last_token_indices = torch.argmax(predicted_log_distributions, dim=-1).cpu().numpy()
# Find target tokens associated with these indices
predicted_words = [trg_field_processor.vocab.itos[index] for index in most_probable_last_token_indices]
for idx, predicted_word in enumerate(predicted_words):
target_sentences_tokens[idx].append(predicted_word)
if predicted_word == EOS_TOKEN: # once we find EOS token for a particular sentence we flag it
is_decoded[idx] = True
if all(is_decoded) or num_of_trg_tokens == max_target_tokens:
break
# Prepare the input for the next iteration (merge old token ids with the new column of most probable token ids)
trg_token_ids_batch = torch.cat((trg_token_ids_batch, torch.unsqueeze(torch.tensor(most_probable_last_token_indices, device=device), 1)), 1)
# Post process the sentences - remove everything after the EOS token
target_sentences_tokens_post = []
for target_sentence_tokens in target_sentences_tokens:
try:
target_index = target_sentence_tokens.index(EOS_TOKEN) + 1
except:
target_index = None
target_sentence_tokens = target_sentence_tokens[:target_index]
target_sentences_tokens_post.append(target_sentence_tokens)
return target_sentences_tokens_post
|
dbdf636979f28ea09b261fd3947068d6e4e359ad
| 3,641,476
|
def _parse_cell_type(cell_type_arg):
""" Convert the cell type representation to the expected JVM CellType object."""
def to_jvm(ct):
return _context_call('_parse_cell_type', ct)
if isinstance(cell_type_arg, str):
return to_jvm(cell_type_arg)
elif isinstance(cell_type_arg, CellType):
return to_jvm(cell_type_arg.cell_type_name)
|
1afa4b2ed28d08ebc3526b8462673e5aa7f8a47f
| 3,641,477
|
def Ht(mu, q=None, t=None, pi=None):
"""
Returns the symmetric Macdonald polynomial using the Haiman,
Haglund, and Loehr formula.
Note that if both `q` and `t` are specified, then they must have the
same parent.
REFERENCE:
- J. Haglund, M. Haiman, N. Loehr.
*A combinatorial formula for non-symmetric Macdonald polynomials*.
:arXiv:`math/0601693v3`.
EXAMPLES::
sage: from sage.combinat.sf.ns_macdonald import Ht
sage: HHt = SymmetricFunctions(QQ['q','t'].fraction_field()).macdonald().Ht()
sage: Ht([0,0,1])
x0 + x1 + x2
sage: HHt([1]).expand(3)
x0 + x1 + x2
sage: Ht([0,0,2])
x0^2 + (q + 1)*x0*x1 + x1^2 + (q + 1)*x0*x2 + (q + 1)*x1*x2 + x2^2
sage: HHt([2]).expand(3)
x0^2 + (q + 1)*x0*x1 + x1^2 + (q + 1)*x0*x2 + (q + 1)*x1*x2 + x2^2
"""
P, q, t, n, R, x = _check_muqt(mu, q, t, pi)
res = 0
for a in n:
weight = a.weight()
res += q**a.maj()*t**a.inv()*prod( x[i]**weight[i] for i in range(len(weight)) )
return res
|
d3a46458215417db0789d3601163e105c9712c75
| 3,641,478
|
def is_generic_alias_of(to_check, type_def):
"""
:param to_check: the type that is supposed to be a generic alias of ``type_def`` if this function returns ``True``.
:param type_def: the type that is supposed to be a generic version of ``to_check`` if this function returns \
``True``.
:return: ``True`` if ``to_check`` is a generic alias of ``type_def``, ``False`` otherwise.
"""
if isinstance(to_check, type) and issubclass(to_check, type_def):
return True
origin = getattr(to_check, "__origin__", None)
if origin is not None:
return issubclass(origin, type_def)
return False
|
d09b255e9ff44a65565196dd6643564aea181433
| 3,641,479
|
def train_PCA(X,n_dims,model='pca'):
"""
name: train_PCA
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
returns: the transformer model
"""
estimator=transformer[model].set_params(pca__n_components=n_dims)
estimator.fit(X)
return estimator
|
1909d154d778864c2eba0819e43a2bbcb260edbf
| 3,641,480
|
import os
def find_song(hash_dictionary, sample_dictionary, id_to_song):
"""
Run our song matching algorithm to find the song
:param hash_dictionary:
:param sample_dictionary:
:param id_to_song:
:return max_frequencies, max_frequencies_keys:
"""
offset_dictionary = dict()
for song_id in id_to_song.keys():
offset_dictionary[song_id] = {}
song_size = {}
for song_id in id_to_song.keys():
rate, data = wavfile.read(os.path.join(os.path.join(
AppDirs('Presto-Chango').user_data_dir, 'Songs'), id_to_song[song_id]))
song_size[song_id] = len(data) / rate
for sample_hash_value, sample_offsets in sample_dictionary.items():
for sample_offset in sample_offsets:
try:
for song_id, offset in hash_dictionary[sample_hash_value]:
try:
offset_dictionary[song_id][(
offset - sample_offset) // 1] += 1
except KeyError:
offset_dictionary[song_id][(
offset - sample_offset) // 1] = 1
except KeyError:
pass
max_frequencies = {}
for song_id, offset_dict in offset_dictionary.items():
for relative_set, frequency in offset_dict.items():
try:
max_frequencies[song_id] = max(
max_frequencies[song_id], frequency)
except KeyError:
max_frequencies[song_id] = frequency
max_frequencies_keys = sorted(
max_frequencies, key=max_frequencies.get, reverse=True)
return max_frequencies, max_frequencies_keys
|
73ec2b2783dddd00c7a744af732737f6e6bce8b2
| 3,641,481
|
def phedex_url(api=''):
"""Return Phedex URL for given API name"""
return 'https://cmsweb.cern.ch/phedex/datasvc/json/prod/%s' % api
|
a642cd138d9be4945dcbd924c7b5c9892de36baa
| 3,641,482
|
import csv
def extract_emails(fname, email='Email Address',
outfile="emails_from_mailchimp.txt",
nofile=False, nolog=False, sort=True):
"""
Extract e-mail addresses from a CSV-exported MailChimp list.
:param fname: the input .csv file
:param email: the header of the column containing all e-mail addresses
:param outfile: the .txt file the addresses will get written to
:param nofile: suppresses the creation of a text file if set to True
:param nolog: suppresses logging the addresses to stdout if set to True
:param sort: sorts e-mail addresses alphabetically if set to True
:return a list containing all e-mail addresses
"""
addresses = []
try:
with open(fname, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
next(reader)
for item in reader:
try:
addresses.append(item[email])
except KeyError:
log.error("The provided CSV file does not contain "
"the header \"{}\".\n"
"Please provide the correct header name "
"for the column containing e-mail "
"addresses.".format(email))
return
except FileNotFoundError:
log.error("The input file is not available. "
"Please provide a valid path.")
except IsADirectoryError:
log.error("The input file is not a CSV file but a directory.")
except StopIteration:
log.error("The input file cannot be read. "
"Please provide a valid CSV file.")
if sort:
addresses.sort()
if not nolog:
for address in addresses:
log.info(address)
if not nofile:
try:
with open(outfile, 'w') as txtfile:
for address in addresses:
txtfile.write(address + '\n')
except FileNotFoundError:
log.error("The file you are trying to write to "
"does not exist.")
except PermissionError:
log.error("You do not have permission to write to the file "
"whose path you provided.")
return addresses
|
1b4e5f60eacd4843e1c9ba6a72c866c52b5bd8d9
| 3,641,483
|
def continuations(tree, *, syntax, expander, **kw):
"""[syntax, block] call/cc for Python.
This allows saving the control state and then jumping back later
(in principle, any time later). Some possible use cases:
- Tree traversal (possibly a cartesian product of multiple trees, with the
current position in each tracked automatically).
- McCarthy's amb operator.
- Generators. (Python already has those, so only for teaching.)
This is a very loose pythonification of Paul Graham's continuation-passing
macros, which implement continuations by chaining closures and passing the
continuation semi-implicitly. For details, see chapter 20 in On Lisp:
http://paulgraham.com/onlisp.html
Continuations are most readily implemented when the program is written in
continuation-passing style (CPS), but that is unreadable for humans.
The purpose of this macro is to partly automate the CPS transformation, so
that at the use site, we can write CPS code in a much more readable fashion.
A ``with continuations`` block implies TCO; the same rules apply as in a
``with tco`` block. Furthermore, ``with continuations`` introduces the
following additional rules:
- Functions which make use of continuations, or call other functions that do,
must be defined within a ``with continuations`` block, using the usual
``def`` or ``lambda`` forms.
- All function definitions in a ``with continuations`` block, including
any nested definitions, have an implicit formal parameter ``cc``,
**even if not explicitly declared** in the formal parameter list.
If declared explicitly, ``cc`` must be in a position that can accept a
default value.
This means ``cc`` must be declared either as by-name-only::
with continuations:
def myfunc(a, b, *, cc):
...
f = lambda *, cc: ...
or as the last parameter that has no default::
with continuations:
def myfunc(a, b, cc):
...
f = lambda cc: ...
Then the continuation machinery will automatically set the default value
of ``cc`` to the default continuation (``identity``), which just returns
its arguments.
The most common use case for explicitly declaring ``cc`` is that the
function is the target of a ``call_cc[]``; then it helps readability
to make the ``cc`` parameter explicit.
- A ``with continuations`` block will automatically transform all
function definitions and ``return`` statements lexically contained
within the block to use the continuation machinery.
- ``return somevalue`` actually means a tail-call to ``cc`` with the
given ``somevalue``.
Multiple values can be returned as a ``Values``. Multiple-valueness
is tested at run time.
Any ``Values`` return value is automatically unpacked to the args
and kwargs of ``cc``.
- An explicit ``return somefunc(arg0, ..., k0=v0, ...)`` actually means
a tail-call to ``somefunc``, with its ``cc`` automatically set to our
``cc``. Hence this inserts a call to ``somefunc`` before proceeding
with our current continuation. (This is most often what we want when
making a tail-call from a continuation-enabled function.)
Here ``somefunc`` **must** be a continuation-enabled function;
otherwise the TCO chain will break and the result is immediately
returned to the top-level caller.
(If the call succeeds at all; the ``cc`` argument is implicitly
filled in and passed by name. Regular functions usually do not
accept a named parameter ``cc``, let alone know what to do with it.)
- Just like in ``with tco``, a lambda body is analyzed as one big
return-value expression. This uses the exact same analyzer; for example,
``do[]`` (including any implicit ``do[]``) and the ``let[]`` expression
family are supported.
- Calls from functions defined in one ``with continuations`` block to those
defined in another are ok; there is no state or context associated with
the block.
- Much of the language works as usual.
Any non-tail calls can be made normally. Regular functions can be called
normally in any non-tail position.
Continuation-enabled functions behave as regular functions when
called normally; only tail calls implicitly set ``cc``. A normal call
uses ``identity`` as the default ``cc``.
- For technical reasons, the ``return`` statement is not allowed at the
top level of the ``with continuations:`` block. (Because a continuation
is essentially a function, ``return`` would behave differently based on
whether it is placed lexically before or after a ``call_cc[]``.)
If you absolutely need to terminate the function surrounding the
``with continuations:`` block from inside the block, use an exception
to escape; see ``call_ec``, ``catch``, ``throw``.
**Capturing the continuation**:
Inside a ``with continuations:`` block, the ``call_cc[]`` statement
captures a continuation. (It is actually a macro, for technical reasons.)
For various possible program topologies that continuations may introduce, see
the clarifying pictures under ``doc/`` in the source distribution.
Syntax::
x = call_cc[func(...)]
*xs = call_cc[func(...)]
x0, ... = call_cc[func(...)]
x0, ..., *xs = call_cc[func(...)]
call_cc[func(...)]
Conditional variant::
x = call_cc[f(...) if p else g(...)]
*xs = call_cc[f(...) if p else g(...)]
x0, ... = call_cc[f(...) if p else g(...)]
x0, ..., *xs = call_cc[f(...) if p else g(...)]
call_cc[f(...) if p else g(...)]
Assignment targets:
- To destructure positional multiple-values (from a `Values` return value),
use a tuple assignment target (comma-separated names, as usual).
Destructuring *named* return values from a `call_cc` is currently not supported.
- The last assignment target may be starred. It is transformed into
the vararg (a.k.a. ``*args``) of the continuation function.
(It will capture a whole tuple, or any excess items, as usual.)
- To ignore the return value (useful if ``func`` was called only to
perform its side-effects), just omit the assignment part.
Conditional variant:
- ``p`` is any expression. If truthy, ``f(...)`` is called, and if falsey,
``g(...)`` is called.
- Each of ``f(...)``, ``g(...)`` may be ``None``. A ``None`` skips the
function call, proceeding directly to the continuation. Upon skipping,
all assignment targets (if any are present) are set to ``None``.
The starred assignment target (if present) gets the empty tuple.
- The main use case of the conditional variant is for things like::
with continuations:
k = None
def setk(cc):
global k
k = cc
def dostuff(x):
call_cc[setk() if x > 10 else None] # capture only if x > 10
...
To keep things relatively straightforward, a ``call_cc[]`` is only
allowed to appear **at the top level** of:
- the ``with continuations:`` block itself
- a ``def`` or ``async def``
Nested defs are ok; here *top level* only means the top level of the
*currently innermost* ``def``.
If you need to place ``call_cc[]`` inside a loop, use ``@looped`` et al.
from ``unpythonic.fploop``; this has the loop body represented as the
top level of a ``def``.
Multiple ``call_cc[]`` statements in the same function body are allowed.
These essentially create nested closures.
**Main differences to Scheme and Racket**:
Compared to Scheme/Racket, where ``call/cc`` will capture also expressions
occurring further up in the call stack, our ``call_cc`` may be need to be
placed differently (further out, depending on what needs to be captured)
due to the delimited nature of the continuations implemented here.
Scheme and Racket implicitly capture the continuation at every position,
whereas we do it explicitly, only at the use sites of the ``call_cc`` macro.
Also, since there are limitations to where a ``call_cc[]`` may appear, some
code may need to be structured differently to do some particular thing, if
porting code examples originally written in Scheme or Racket.
Unlike ``call/cc`` in Scheme/Racket, ``call_cc`` takes **a function call**
as its argument, not just a function reference. Also, there's no need for
it to be a one-argument function; any other args can be passed in the call.
The ``cc`` argument is filled implicitly and passed by name; any others are
passed exactly as written in the client code.
**Technical notes**:
The ``call_cc[]`` statement essentially splits its use site into *before*
and *after* parts, where the *after* part (the continuation) can be run
a second and further times, by later calling the callable that represents
the continuation. This makes a computation resumable from a desired point.
The return value of the continuation is whatever the original function
returns, for any ``return`` statement that appears lexically after the
``call_cc[]``.
The effect of ``call_cc[]`` is that the function call ``func(...)`` in
the brackets is performed, with its ``cc`` argument set to the lexically
remaining statements of the current ``def`` (at the top level, the rest
of the ``with continuations`` block), represented as a callable.
The continuation itself ends there (it is *delimited* in this particular
sense), but it will chain to the ``cc`` of the function it appears in.
This is termed the *parent continuation* (**pcc**), stored in the internal
variable ``_pcc`` (which defaults to ``None``).
Via the use of the pcc, here ``f`` will maintain the illusion of being
just one function, even though a ``call_cc`` appears there::
def f(*, cc):
...
call_cc[g(1, 2, 3)]
...
The continuation is a closure. For its pcc, it will use the value the
original function's ``cc`` had when the definition of the continuation
was executed (for that particular instance of the closure). Hence, calling
the original function again with its ``cc`` set to something else will
produce a new continuation instance that chains into that new ``cc``.
The continuation's own ``cc`` will be ``identity``, to allow its use just
like any other function (also as argument of a ``call_cc`` or target of a
tail call).
When the pcc is set (not ``None``), the effect is to run the pcc first,
and ``cc`` only after that. This preserves the whole captured tail of a
computation also in the presence of nested ``call_cc`` invocations (in the
above example, this would occur if also ``g`` used ``call_cc``).
Continuations are not accessible by name (their definitions are named by
gensym). To get a reference to a continuation instance, stash the value
of the ``cc`` argument somewhere while inside the ``call_cc``.
The function ``func`` called by a ``call_cc[func(...)]`` is (almost) the
only place where the ``cc`` argument is actually set. There it is the
captured continuation. Roughly everywhere else, ``cc`` is just ``identity``.
Tail calls are an exception to this rule; a tail call passes along the current
value of ``cc``, unless overridden manually (by setting the ``cc=...`` kwarg
in the tail call).
When the pcc is set (not ``None``) at the site of the tail call, the
machinery will create a composed continuation that runs the pcc first,
and ``cc`` (whether current or manually overridden) after that. This
composed continuation is then passed to the tail call as its ``cc``.
**Tips**:
- Once you have a captured continuation, one way to use it is to set
``cc=...`` manually in a tail call, as was mentioned. Example::
def main():
call_cc[myfunc()] # call myfunc, capturing the current cont...
... # ...which is the rest of "main"
def myfunc(cc):
ourcc = cc # save the captured continuation (sent by call_cc[])
def somefunc():
return dostuff(..., cc=ourcc) # and use it here
somestack.append(somefunc)
In this example, when ``somefunc`` is eventually called, it will tail-call
``dostuff`` and then proceed with the continuation ``myfunc`` had
at the time when that instance of the ``somefunc`` closure was created.
(This pattern is essentially how to build the ``amb`` operator.)
- Instead of setting ``cc``, you can also overwrite ``cc`` with a captured
continuation inside a function body. That overrides the continuation
for the rest of the dynamic extent of the function, not only for a
particular tail call::
def myfunc(cc):
ourcc = cc
def somefunc():
cc = ourcc
return dostuff(...)
somestack.append(somefunc)
- A captured continuation can also be called manually; it's just a callable.
The assignment targets, at the ``call_cc[]`` use site that spawned this
particular continuation, specify its call signature. All args are
positional, except the implicit ``cc``, which is by-name-only.
- Just like in Scheme/Racket's ``call/cc``, the values that get bound
to the ``call_cc[]`` assignment targets on second and further calls
(when the continuation runs) are the arguments given to the continuation
when it is called (whether implicitly or manually).
- Setting ``cc`` to ``unpythonic.fun.identity``, while inside a ``call_cc``,
will short-circuit the rest of the computation. In such a case, the
continuation will not be invoked automatically. A useful pattern for
suspend/resume.
- However, it is currently not possible to prevent the rest of the tail
of a captured continuation (the pcc) from running, apart from manually
setting ``_pcc`` to ``None`` before executing a ``return``. Note that
doing that is not strictly speaking supported (and may be subject to
change in a future version).
- When ``call_cc[]`` appears inside a function definition:
- It tail-calls ``func``, with its ``cc`` set to the captured
continuation.
- The return value of the function containing one or more ``call_cc[]``
statements is the return value of the continuation.
- When ``call_cc[]`` appears at the top level of ``with continuations``:
- A normal call to ``func`` is made, with its ``cc`` set to the captured
continuation.
- In this case, if the continuation is called later, it always
returns ``None``, because the use site of ``call_cc[]`` is not
inside a function definition.
- If you need to insert just a tail call (no further statements) before
proceeding with the current continuation, no need for ``call_cc[]``;
use ``return func(...)`` instead.
The purpose of ``call_cc[func(...)]`` is to capture the current
continuation (the remaining statements), and hand it to ``func``
as a first-class value.
- To combo with ``multilambda``, use this ordering::
with multilambda, continuations:
...
- Some very limited comboability with ``call_ec``. May be better to plan
ahead, using ``call_cc[]`` at the appropriate outer level, and then
short-circuit (when needed) by setting ``cc`` to ``identity``.
This avoids the need to have both ``call_cc`` and ``call_ec`` at the
same time.
- ``unpythonic.ec.call_ec`` can be used normally **lexically before any**
``call_cc[]``, but (in a given function) after at least one ``call_cc[]``
has run, the ``ec`` ceases to be valid. This is because our ``call_cc[]``
actually splits the function into *before* and *after* parts, and
**tail-calls** the *after* part.
(Wrapping the ``def`` in another ``def``, and placing the ``call_ec``
on the outer ``def``, does not help either, because even the outer
function has exited by the time *the continuation* is later called
the second and further times.)
Usage of ``call_ec`` while inside a ``with continuations`` block is::
with continuations:
@call_ec
def result(ec):
print("hi")
ec(42)
print("not reached")
assert result == 42
result = call_ec(lambda ec: do[print("hi"),
ec(42),
print("not reached")])
Note the signature of ``result``. Essentially, ``ec`` is a function
that raises an exception (to escape to a dynamically outer context),
whereas the implicit ``cc`` is the closure-based continuation handled
by the continuation machinery.
See the ``tco`` macro for details on the ``call_ec`` combo.
"""
if syntax != "block":
raise SyntaxError("continuations is a block macro only") # pragma: no cover
if syntax == "block" and kw['optional_vars'] is not None:
raise SyntaxError("continuations does not take an as-part") # pragma: no cover
# Two-pass macro.
with dyn.let(_macro_expander=expander):
return _continuations(block_body=tree)
|
333494e07462ee554701616c5069fa61c5f46841
| 3,641,484
|
import os
def get_autotune_level() -> int:
"""Get the autotune level.
Returns:
The autotune level.
"""
return int(os.environ.get("BAGUA_AUTOTUNE", 0))
|
661fc4a7580fffdc7eef18ff7eb22e56ece2b468
| 3,641,485
|
def DNA_dynamic_pressure(y, r, h, yunits='kT', dunits='m', opunits='kg/cm^2'):
"""Estimate peak pynamic overpressure at range r from a burst of yield y using the
the Defense Nuclear Agency 1kT standard free airburst overpressure, assuming an ideal
surface. Many real-world surfaces are not ideal (most, in the opinion of Soviet
analysts), meaning that this function has only limited predictove capability."""
yld = convert_units(y, yunits, 'kT')
gr = convert_units(r, dunits, 'm')
height = convert_units(h, dunits, 'm')
dyn = _DNAairburstpeakdyn(gr, yld, height)
return convert_units(dyn, 'Pa', opunits)
|
ac56c9d72c516658384ac313c64ba7ed1235e0ea
| 3,641,486
|
def revcumsum(U):
"""
Reverse cumulative sum for faster performance.
"""
return U.flip(dims=[0]).cumsum(dim=0).flip(dims=[0])
|
da147820073f5be9d00b137e48a28d726516dcd0
| 3,641,487
|
def http_trace_parser_hook(request):
"""
Retrieves the propagation context out of the request. Uses the honeycomb header, with W3C header as fallback.
"""
honeycomb_header_value = honeycomb.http_trace_parser_hook(request)
w3c_header_value = w3c.http_trace_parser_hook(request)
if honeycomb_header_value:
return honeycomb_header_value
else:
return w3c_header_value
|
7c97ed82f22357d3867e8a504a30f3a857837bcf
| 3,641,488
|
import torch
def format_attn(attention_tuples: tuple):
"""
Input: N tuples (N = layer num)
Each tuple item is Tensor of shape
Batch x num heads x from x to
Output: Tensor of shape layer x from x to
(averaged over heads)
"""
# Combine tuples into large Tensor, then avg
return torch.cat([l for l in attention_tuples], dim=0).mean(dim=1)
|
8d25d081992099835a21cdbefb406f378350f983
| 3,641,489
|
def fit_gaussian2d(img, coords, boxsize, plot=False,
fwhm_min=1.7, fwhm_max=30, pos_delta_max=1.7):
"""
Calculate the FWHM of an objected located at the pixel
coordinates in the image. The FWHM will be estimated
from a cutout with the specified boxsize.
Parameters
----------
img : ndarray, 2D
The image where a star is located for calculating a FWHM.
coords : len=2 ndarray
The [x, y] pixel position of the star in the image.
boxsize : int
The size of the box (on the side), in pixels.
fwhm_min : float, optional
The minimum allowed FWHM for constraining the fit (pixels).
fwhm_max : float, optional
The maximum allowed FWHM for constraining the fit (pixels).
pos_delta_max : float, optional
The maximum allowed positional offset for constraining the fit (pixels).
This ensures that the fitter doesn't wonder off to a bad pixel.
"""
cutout_obj = Cutout2D(img, coords, boxsize, mode='strict')
cutout = cutout_obj.data
x1d = np.arange(0, cutout.shape[0])
y1d = np.arange(0, cutout.shape[1])
x2d, y2d = np.meshgrid(x1d, y1d)
# Setup our model with some initial guess
x_init = boxsize/2.0
y_init = boxsize/2.0
stddev_init = fwhm_to_stddev(fwhm_min)
g2d_init = models.Gaussian2D(x_mean = x_init,
y_mean = y_init,
x_stddev = stddev_init,
y_stddev = stddev_init,
amplitude=cutout.max())
g2d_init += models.Const2D(amplitude=0.0)
g2d_init.x_stddev_0.min = fwhm_to_stddev(fwhm_min)
g2d_init.y_stddev_0.min = fwhm_to_stddev(fwhm_min)
g2d_init.x_stddev_0.max = fwhm_to_stddev(fwhm_max)
g2d_init.y_stddev_0.max = fwhm_to_stddev(fwhm_max)
g2d_init.x_mean_0.min = x_init - pos_delta_max
g2d_init.x_mean_0.max = x_init + pos_delta_max
g2d_init.y_mean_0.min = y_init - pos_delta_max
g2d_init.y_mean_0.max = y_init + pos_delta_max
# print(g2d_init)
# pdb.set_trace()
fit_g = fitting.LevMarLSQFitter()
g2d = fit_g(g2d_init, x2d, y2d, cutout)
if plot:
mod_img = g2d(x2d, y2d)
plt.figure(1, figsize=(15,5))
plt.clf()
plt.subplots_adjust(left=0.05, wspace=0.3)
plt.subplot(1, 3, 1)
plt.imshow(cutout, vmin=mod_img.min(), vmax=mod_img.max())
plt.colorbar()
plt.title("Original")
plt.subplot(1, 3, 2)
plt.imshow(mod_img, vmin=mod_img.min(), vmax=mod_img.max())
plt.colorbar()
plt.title("Model")
plt.subplot(1, 3, 3)
plt.imshow(cutout - mod_img)
plt.colorbar()
plt.title("Orig - Mod")
# Adjust Gaussian parameters to the original coordinates.
cutout_pos = np.array([g2d.x_mean_0.value, g2d.y_mean_0.value])
origin_pos = cutout_obj.to_original_position(cutout_pos)
g2d.x_mean_0 = origin_pos[0]
g2d.y_mean_0 = origin_pos[1]
return g2d
|
c3e69f93fdf84c7f895f9cb01adf3e6a0aa3001d
| 3,641,490
|
def _ensure_aware(series, tz_local):
"""Convert naive datetimes to timezone-aware, or return them as-is.
Args:
tz_local (str, pytz.timezone, dateutil.tz.tzfile):
Time zone for time which timestamps will be converted to.
If the series already has local timezone info, it is returned as-is.
"""
if pd.api.types.is_datetime64tz_dtype(series):
return series
return series.dt.tz_localize(tz=tz_local)
|
fbb99be365a47507ae676fc90601d13cfa46832b
| 3,641,491
|
import os
import pickle
def one_mask(df, mask_type, sample_type, data, logger=None):
""" return a vector of booleans from the lower triangle of a matching-matrix based on 'mask_type'
:param df: pandas.DataFrame with samples as columns
:param str mask_type: A list of strings to specify matching masks, or a minimum distance to mask out
:param str sample_type: Samples can be 'wellid' or 'parcelid'
:param data: PyGEST.Data object with access to AHBA data
:param logger: A logger object to receive debug information
:return: Boolean 1-D vector to remove items (False values in mask) from any sample x sample triangle vector
"""
def handle_log(maybe_logger, severity, message):
if maybe_logger is None:
print(message)
else:
if severity == "info":
maybe_logger.info(message)
if severity == "warn":
maybe_logger.warn(message)
# If mask is a number, use it as a distance filter
try:
# Too-short values to mask out are False, keepers are True.
min_dist = float(mask_type)
distance_vector = data.distance_vector(df.columns, sample_type=sample_type)
if len(distance_vector) != (len(df.columns) * (len(df.columns) - 1)) / 2:
handle_log(logger, "warn", " MISMATCH in expr and dist!!! Some sample IDs probably not found.")
mask_vector = np.array(distance_vector > min_dist, dtype=bool)
handle_log(logger, "info", " masking out {:,} of {:,} edges closer than {}mm apart.".format(
np.count_nonzero(np.invert(mask_vector)), len(mask_vector), min_dist
))
handle_log(logger, "info", " mean dist of masked edges : {:0.2f} [{:0.2f} to {:0.2f}].".format(
np.mean(distance_vector[~mask_vector]),
np.min(distance_vector[~mask_vector]),
np.max(distance_vector[~mask_vector]),
))
handle_log(logger, "info", " mean dist of unmasked edges: {:0.2f} [{:0.2f} to {:0.2f}].".format(
np.mean(distance_vector[mask_vector]),
np.min(distance_vector[mask_vector]),
np.max(distance_vector[mask_vector]),
))
return mask_vector
except TypeError:
pass
except ValueError:
pass
# Mask is not a number, see if it's a pickled dataframe
if os.path.isfile(mask_type):
with open(mask_type, 'rb') as f:
mask_df = pickle.load(f)
if isinstance(mask_df, pd.DataFrame):
# Note what we started with so we can report after we tweak the dataframe.
# Too-variant values to mask out are False, keepers are True.
orig_vector = mask_df.values[np.tril_indices(n=mask_df.shape[0], k=-1)]
orig_falses = np.count_nonzero(~orig_vector)
orig_length = len(orig_vector)
handle_log(logger, "info", "Found {} containing {:,} x {:,} mask".format(
mask_type, mask_df.shape[0], mask_df.shape[1]
))
handle_log(logger, "info", " generating {:,}-len vector with {:,} False values to mask.".format(
orig_length, orig_falses
))
# We can only use well_ids found in BOTH df and our new mask, make shapes match.
unmasked_ids = [well_id for well_id in df.columns if well_id not in mask_df.columns]
usable_ids = [well_id for well_id in df.columns if well_id in mask_df.columns]
usable_df = mask_df.reindex(index=usable_ids, columns=usable_ids)
usable_vector = usable_df.values[np.tril_indices(n=len(usable_ids), k=-1)]
usable_falses = np.count_nonzero(~usable_vector)
usable_length = len(usable_vector)
handle_log(logger, "info", " {:,} well_ids not found in the mask; padding with Falses.".format(
len(unmasked_ids)
))
pad_rows = pd.DataFrame(np.zeros((len(unmasked_ids), len(mask_df.columns)), dtype=bool),
columns=mask_df.columns, index=unmasked_ids)
mask_df = pd.concat([mask_df, pad_rows], axis=0)
pad_cols = pd.DataFrame(np.zeros((len(mask_df.index), len(unmasked_ids)), dtype=bool),
columns=unmasked_ids, index=mask_df.index)
mask_df = pd.concat([mask_df, pad_cols], axis=1)
mask_vector = mask_df.values[np.tril_indices(n=mask_df.shape[0], k=-1)]
mask_falses = np.count_nonzero(~mask_vector)
mask_trues = np.count_nonzero(mask_vector)
handle_log(logger, "info", " padded mask matrix out to {:,} x {:,}".format(
mask_df.shape[0], mask_df.shape[1]
))
handle_log(logger, "info", " with {:,} True, {:,} False, {:,} NaNs in triangle.".format(
mask_trues, mask_falses, np.count_nonzero(np.isnan(mask_vector))
))
shaped_mask_df = mask_df.reindex(index=df.columns, columns=df.columns)
shaped_vector = shaped_mask_df.values[np.tril_indices(n=len(df.columns), k=-1)]
handle_log(logger, "info", " masking out {:,} (orig {:,}, {:,} usable) hi-var".format(
np.count_nonzero(~shaped_vector), orig_falses, usable_falses,
))
handle_log(logger, "info", " of {:,} (orig {:,}, {:,} usable) edges.".format(
len(shaped_vector), orig_length, usable_length
))
return shaped_vector
else:
handle_log(logger, "warn", "{} is a file, but not a pickled dataframe. Skipping mask.".format(mask_type))
do_nothing_mask = np.ones((len(df.columns), len(df.columns)), dtype=bool)
return do_nothing_mask[np.tril_indices(n=len(df.columns), k=-1)]
# Mask is not a number, so treat it as a matching filter
if mask_type[:4] == 'none':
items = list(df.columns)
elif mask_type[:4] == 'fine':
items = data.samples(samples=df.columns)['fine_name']
elif mask_type[:6] == 'coarse':
items = data.samples(samples=df.columns)['coarse_name']
else:
items = data.samples(samples=df.columns)['structure_name']
mask_array = np.ndarray((len(items), len(items)), dtype=bool)
# There is, potentially, a nice vectorized way to mark matching values as True, but I can't find it.
# So, looping works and is easy to read, although it might cost us a few extra ms.
for i, y in enumerate(items):
for j, x in enumerate(items):
# Generate one edge of the match matrix
mask_array[i][j] = True if mask_type == 'none' else (x != y)
mask_vector = mask_array[np.tril_indices(n=mask_array.shape[0], k=-1)]
handle_log(logger, "info", " masking out {:,} of {:,} '{}' edges.".format(
sum(np.invert(mask_vector)), len(mask_vector), mask_type
))
# if len(mask_vector) == 0:
# mask_vector = np.ones(int(len(df.columns) * (len(df.columns) - 1) / 2), dtype=bool)
return mask_vector
|
cdb0305f716c9d0b9e1ab2c1e89f8dfbeb6d5504
| 3,641,492
|
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / NB_TRAIN
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because Penn TreeBank has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
|
8c998fdcafaac3c99a87b4020ae6959e64170d36
| 3,641,493
|
def extract_tumblr_posts(client, nb_requests, search_query, before, delta_limit):
"""Extract Tumblr posts with a given emotion.
Parameters:
client: Authenticated Tumblr client with the pytumblr package.
nb_requests: Number of API request.
search_query: Emotion to search for.
before: A timestamp to search for posts before that value.
delta_limit: Maximum difference of timestamp between two queries.
Returns:
posts: List of Tumblr posts.
"""
posts = []
for i in range(nb_requests):
tagged = client.tagged(search_query, filter='text', before=before)
for elt in tagged:
timestamp = elt['timestamp']
if (abs(timestamp - before) < delta_limit):
before = timestamp
current_post = []
current_post.append(elt['id'])
current_post.append(elt['post_url'])
elt_type = elt['type']
current_post.append(elt_type)
current_post.append(timestamp)
current_post.append(elt['date'])
current_post.append(elt['tags'])
current_post.append(elt['liked'])
current_post.append(elt['note_count'])
if (elt_type == 'photo'):
# Only take the first image
current_post.append(elt['photos'][0]['original_size']['url'])
current_post.append(elt['caption'].replace('\n',' ').replace('\r',' '))
current_post.append(search_query)
posts.append(current_post)
elif (elt_type == 'text'):
current_post.append(np.nan)
current_post.append(elt['body'].replace('\n',' ').replace('\r',' '))
current_post.append(search_query)
posts.append(current_post)
return posts
|
bff51bcdc945244a47a88d32139749dddf25f0cf
| 3,641,494
|
def total_curtailment_expression_rule(mod, g, tmp):
"""
**Expression Name**: GenVar_Total_Curtailment_MW
**Defined Over**: GEN_VAR_OPR_TMPS
Available energy that was not delivered
There's an adjustment for subhourly reserve provision:
1) if downward reserves are provided, they will be called upon
occasionally, so power provision will have to decrease and additional
curtailment will be incurred;
2) if upward reserves are provided (energy is being curtailed),
they will be called upon occasionally, so power provision will have to
increase and less curtailment will be incurred
The subhourly adjustment here is a simple linear function of reserve
Assume cap factors don't incorporate availability derates,
so don't multiply capacity by Availability_Derate here (will count
as curtailment).
"""
return (
mod.Capacity_MW[g, mod.period[tmp]] * mod.gen_var_cap_factor[g, tmp]
- mod.GenVar_Provide_Power_MW[g, tmp]
+ mod.GenVar_Subhourly_Curtailment_MW[g, tmp]
- mod.GenVar_Subhourly_Energy_Delivered_MW[g, tmp]
)
|
9a1466dbbbc945b30c1df04dc86a2134b3d0659a
| 3,641,495
|
def transpose(m):
"""Compute the inverse of `m`
Args:
m (Matrix3):
Returns:
Matrix3: the inverse
"""
return Matrix3(m[0], m[3], m[6],
m[1], m[4], m[7],
m[2], m[5], m[8])
|
843a4b9d52f7c15772957b7abe05ef8c32c8370b
| 3,641,496
|
def reverse_string(string):
"""Solution to exercise C-4.16.
Write a short recursive Python function that takes a character string s and
outputs its reverse. For example, the reverse of "pots&pans" would be
"snap&stop".
"""
n = len(string)
def recurse(idx):
if idx == 0:
return string[0] # Base case, decremented to beginning of string
return string[idx] + recurse(idx-1)
return recurse(n-1)
|
6d4472fb9c042939020e8b819b4c9b705afd1e60
| 3,641,497
|
def result_to_df(model, data,
path: str = None,
prediction: str = 'prediction',
residual: str = 'residual') -> pd.DataFrame:
"""Create result data frame.
Args:
model (Union[NodeModel, StagewiseModel]): Model instance.
data (MRData): Data object try to predict.s
prediction (str, optional):
Column name of the prediction. Defaults to 'prediction'.
residual (str, optional):
Column name of the residual. Defaults to 'residual'.
path (Union[str, None], optional):
Address that save the result, include the file name.
If ``None`` do not save the result, only return the result data
frame. Defaults to None.
Returns:
pd.DataFrame: Result data frame.
"""
data._sort_by_data_id()
pred = model.predict(data)
resi = data.obs - pred
df = data.to_df()
df[prediction] = pred
df[residual] = resi
if path is not None:
df.to_csv(path)
return df
|
8b089569d628a0f89381240a133b21ae926da7f9
| 3,641,498
|
import re
def auto(frmt, minV = None, maxV = None):
"""
Generating regular expressions for integer, real, date and time.
:param format: format similar to C printf function (description below)
:param min: optional minimum value
:param max: optional maximum value
:return: regular expression for a given format
Supported formats: see :py:class:`regexpgen.integer`, :py:class:`regexpgen.real`, :py:class:`regexpgen.date`, :py:class:`regexpgen.time`
Additional information:
Because single %d occurs as well in integer format and in date format, the integer function is preferred. To generate single %d for date please use regexpgen.date
Examples of use:
>>> import regexpgen
>>> regexpgen.auto("%Y-%m-%d", "2013-03-15", "2013-04-24")
'^(2013\\-03\\-(1[5-9]|2[0-9]|3[0-1])|2013\\-03\\-(0[1-9]|1[0-9]|2[0-9]|3[0-1])|2013\\-04\\-(0[1-9]|1[0-9]|2[0-9]|30)|2013\\-04\\-(0[1-9]|1[0-9]|2[0-4]))$'
>>> regexpgen.auto("%0d", -10, 10)
'^(-?([0-9]|10))$'
"""
if (frmt is None or not isinstance(frmt, str)):
raise ValueError("Bad input")
b = builder.RegexpBuilder()
integerFormats = frmt in ["%d", "%0d"] or re.match("^%0[0-9]+d$", frmt)
integerFormatsNotd = frmt in ["%0d"] or re.match("^%0[0-9]+d$", frmt)
realFormats = frmt in ["%lf", "%0lf"] or re.match("^%\.[0-9]+lf$", frmt) or re.match("^%0\.[0-9]+lf$", frmt) or re.match("^%0[1-9][0-9]*\.[0-9]+lf$", frmt) or re.match("^%[1-9][0-9]*\.[0-9]+lf$", frmt)
timeFormats = str(frmt).find("%H") >= 0 or str(frmt).find("%I") >= 0 or str(frmt).find("%M") >= 0 or str(frmt).find("%p") >= 0 or str(frmt).find("%P") >= 0 or str(frmt).find("%S") >= 0
dateFormats = str(frmt).find("%d") >= 0 or str(frmt).find("%m") >= 0 or str(frmt).find("%Y") >= 0 or str(frmt).find("%y") >= 0
if integerFormats and realFormats:
raise ValueError("Bad input")
elif integerFormatsNotd and dateFormats:
raise ValueError("Bad input")
elif integerFormats and timeFormats:
raise ValueError("Bad input")
elif realFormats and dateFormats:
raise ValueError("Bad input")
elif realFormats and timeFormats:
raise ValueError("Bad input")
elif dateFormats and timeFormats:
raise ValueError("Bad input")
elif integerFormats:
return b.createIntegerRegex(frmt, minV, maxV)
elif realFormats:
return b.createRealRegex(frmt, minV, maxV)
elif dateFormats:
return b.createDateRegex(frmt, minV, maxV)
elif timeFormats:
return b.createTimeRegex(frmt, minV, maxV)
else:
raise ValueError("Bad input")
|
a160b2c49baf875adb0a7949b8ea0e0e92dc936a
| 3,641,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.