content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def transaction_update_spents(txs, address):
"""
Update spent information for list of transactions for a specific address. This method assumes the list of
transaction complete and up-to-date.
This methods loops through all the transaction and update all transaction outputs for given address, checks
if the output is spent and add the spending transaction ID and index number to the outputs.
The same list of transactions with updates outputs will be returned
:param txs: Complete list of transactions for given address
:type txs: list of Transaction
:param address: Address string
:type address: str
:return list of Transaction:
"""
spend_list = {}
for t in txs:
for inp in t.inputs:
if inp.address == address:
spend_list.update({(inp.prev_txid.hex(), inp.output_n_int): t})
address_inputs = list(spend_list.keys())
for t in txs:
for to in t.outputs:
if to.address != address:
continue
spent = True if (t.txid, to.output_n) in address_inputs else False
txs[txs.index(t)].outputs[to.output_n].spent = spent
if spent:
spending_tx = spend_list[(t.txid, to.output_n)]
spending_index_n = \
[inp for inp in txs[txs.index(spending_tx)].inputs
if inp.prev_txid.hex() == t.txid and inp.output_n_int == to.output_n][0].index_n
txs[txs.index(t)].outputs[to.output_n].spending_txid = spending_tx.txid
txs[txs.index(t)].outputs[to.output_n].spending_index_n = spending_index_n
return txs
|
6ac33306cafd5c75b37e73c405fff4bcc732226f
| 3,643,800
|
def count_tilings(n: int) -> int:
"""Returns the number of unique ways to tile a row of length n >= 1."""
if n < 5:
# handle recursive base case
return 2**(n - 1)
else:
# place each tile at end of row and recurse on remainder
return (count_tilings(n - 1) +
count_tilings(n - 2) +
count_tilings(n - 3) +
count_tilings(n - 4))
|
70f9caa9a27c65c73862dd8c415d93f5a7122632
| 3,643,801
|
import math
def _meters_per_pixel(zoom, lat=0.0, tilesize=256):
"""
Return the pixel resolution for a given mercator tile zoom and lattitude.
Parameters
----------
zoom: int
Mercator zoom level
lat: float, optional
Latitude in decimal degree (default: 0)
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Pixel resolution in meters
"""
return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / (
tilesize * 2 ** zoom
)
|
467d23bd437f153345c67c8c1cab1a086fde4995
| 3,643,802
|
import time
import random
def _generate_submit_id():
"""Generates a submit id in form of <timestamp>-##### where ##### are 5 random digits."""
timestamp = int(time())
return "%d-%05d" % (timestamp, random.randint(0, 99999))
|
285c975e626f0ef1ffe9482432c70b981c9bdea7
| 3,643,803
|
def draw_from_simplex(ndim: int, nsample: int = 1) -> np.ndarray:
"""Draw uniformly from an n-dimensional simplex.
Args:
ndim: Dimensionality of simplex to draw from.
nsample: Number of samples to draw from the simplex.
Returns:
A matrix of shape (nsample, ndim) that sums to one along axis 1.
"""
if ndim < 1:
raise ValueError("Cannot generate less than 1D samples")
if nsample < 1:
raise ValueError("Generating less than one sample doesn't make sense")
rand = np.random.uniform(size=(nsample, ndim-1))
unsorted = np.concatenate(
[np.zeros(shape=(nsample,1)), rand, np.ones(shape=(nsample,1))],
axis=1
)
sorted = np.sort(unsorted, axis=1)
diff_arr = np.concatenate([[-1., 1.], np.zeros(ndim-1)])
diff_mat = np.array([np.roll(diff_arr, i) for i in range(ndim)]).T
res = sorted @ diff_mat
return res
|
8dac53212a7ccdab7ed9e6cbbffdf437442de393
| 3,643,804
|
def manhattanDistance( xy1, xy2 ):
"""Returns the Manhattan distance between points xy1 and xy2"""
return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
|
ce0ee21237f253b1af33fbf088292405fd046fe3
| 3,643,805
|
import math
def Linear(in_features, out_features, dropout=0.0, bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
|
38decbeda35ef9a6ab5d1397af224b77d49b3342
| 3,643,806
|
def homogeneous_type(obj):
"""
Checks that the type is "homogeneous" in that all lists are of objects of the same type, etc.
"""
return same_types(obj, obj)
|
e44a29de0651175f543cb9dc0d64a01e5a495e42
| 3,643,807
|
def crosscorr(f, g):
"""
Takes two vectors of the same size, subtracts the vector elements by their
respective means, and passes one over the other to construct a
cross-correlation vector
"""
N = len(f)
r = np.array([], dtype=np.single)
r1 = np.array([], dtype=np.single)
r2 = np.array([], dtype=np.single)
f = f - np.mean(f)
g = g - np.mean(g)
for i in range(N-1):
r1i = np.dot(f[N-i-1:N], g[0:i+1])
r2i = np.dot(f[0:N-i-1], g[i+1:N])
r1 = np.append(r1, r1i)
r2 = np.append(r2, r2i)
r = np.append(r, r1)
r = np.append(r, np.dot(f, g))
r = np.append(r, r2)
return r/N
|
6a4fec358404b7ca4f1df764c38518d39f635ed9
| 3,643,808
|
def nearest_neighbors(point_cloud_A, point_cloud_B, alg='knn'):
"""Find the nearest (Euclidean) neighbor in point_cloud_B (model) for each
point in point_cloud_A (data).
Parameters
----------
point_cloud_A: Nx3 numpy array
data points
point_cloud_B: Mx3 numpy array
model points
Returns
-------
distances: (N, ) numpy array
Euclidean distances from each point in
point_cloud_A to its nearest neighbor in point_cloud_B.
indices: (N, ) numpy array
indices in point_cloud_B of each
point_cloud_A point's nearest neighbor - these are the c_i's
"""
assert 3 == point_cloud_A.shape[1] and 3 == point_cloud_B.shape[1]
n, m = point_cloud_A.shape[0], point_cloud_B.shape[0]
assert n == m
distances = np.zeros(n)
indices = np.zeros(n)
if alg == 'knn':
nbrs = NearestNeighbors(n_neighbors=1).fit(point_cloud_B)
d, ids = nbrs.kneighbors(point_cloud_A)
distances = np.array(d).flatten()
indices = np.array(ids).flatten()
elif alg == 'hungarian':
cost = np.zeros((n, m))
for i, j in product(range(n), range(m)):
cost[i,j] = norm(point_cloud_A[i,:]- point_cloud_B[j,:])
row_ids, indices = linear_sum_assignment(cost)
distances = cost[row_ids, indices]
else:
raise NotImplementedError('NN algorithm must be one of: {}'.format(NN_ALGS))
return distances, indices
|
0849c372c6358ded16c7907631a3bdd3c53385c6
| 3,643,809
|
def us_1040(form_values, year="latest"):
"""Compute US federal tax return."""
_dispatch = {
"latest": (ots_2020.us_main, data.US_1040_2020),
"2020": (ots_2020.us_main, data.US_1040_2020),
"2019": (ots_2019.us_main, data.US_1040_2019),
"2018": (ots_2018.us_main, data.US_1040_2018),
"2017": (ots_2017.us_main, data.US_1040_2017),
}
main_fn, schema = _dispatch[str(year)]
return helpers.parse_ots_return(
main_fn(helpers.generate_ots_return(form_values, schema["input_wrap"])),
schema["output_wrap"],
)
|
8056ea5dfae8698dd1e695b96680251f1fb45b63
| 3,643,810
|
def resolve_service_deps(services: list) -> dict:
"""loop through services and handle needed_by"""
needed_by = {}
for name in services:
service = services.get(name)
needs = service.get_tasks_needed_by()
for need, provides in needs.items():
needed_by[need] = list(set(needed_by.get(need, []) + provides))
for name in services:
service = services.get(name)
service.update_task_requires(needed_by)
return services
|
4979d24aa6105579c3208f2953f8bdc276ad127b
| 3,643,811
|
def rolling_window(series, window_size):
"""
Transforms an array of series into an array of sliding window arrays. If
the passed in series is a matrix, each column will be transformed into an
array of sliding windows.
"""
return np.array(
[
series[i : (i + window_size)]
for i in range(0, series.shape[0] - window_size + 1)
]
)
|
dfa95d12f287aeeb2f328919979376c0c890c0eb
| 3,643,812
|
def ldns_key_set_inception(*args):
"""LDNS buffer."""
return _ldns.ldns_key_set_inception(*args)
|
0411dd40b6d61740d872f1e4ac4f50683540de57
| 3,643,813
|
def verifyIP(ip):
"""Verifies an IP is valid"""
try:
#Split ip and integer-ize it
octets = [int(x) for x in ip.split('.')]
except ValueError:
return False
#First verify length
if len(octets) != 4:
return False
#Then check octet values
for octet in octets:
if octet < 0 or octet > 255:
return False
return True
|
72c373099a75adb2a1e776c863b6a2d1cb2698df
| 3,643,814
|
from datetime import datetime
def get_datetime_now(t=None, fmt='%Y_%m%d_%H%M_%S'):
"""Return timestamp as a string; default: current time, format: YYYY_DDMM_hhmm_ss."""
if t is None:
t = datetime.now()
return t.strftime(fmt)
|
c4fc830b7ede9d6f52ee81c014c03bb2ef5552dc
| 3,643,815
|
def is_firstline(text, medicine, disease):
"""Detect if first-line treatment is mentioned with a medicine in a sentence.
Use keyword matching to detect if the keywords "first-line treatment" or "first-or second-line treatment", medicine name, and disease name all appear in the sentence.
Parameters
----------
text : str
A single sentence.
medicine : str
A medicine's name.
Returns
-------
bool
Return True if the medicine and first-line treatment are mentioned in the sentence, False otherwise.
Examples
--------
Import the module
>>> from biomarker_nlp import biomarker_extraction
Example
>>> txt = "TECENTRIQ, in combination with carboplatin and etoposide, is indicated for the first-line treatment of adult patients with extensive-stage small cell lung cancer (ES-SCLC)."
>>> medicine = "TECENTRIQ"
>>> disease = "small cell lung cancer"
>>> biomarker_extraction.is_firstline(text = txt, medicine = medicine, disease = disease)
True
"""
text = text.lower()
medicine = medicine.lower()
disease = disease.lower()
if medicine in text and ('first-line treatment' in text or 'first-or second-line treatment' in text) and disease in text:
return True
else:
return False
|
c9f8a31c6089c4f7545780028ccb1a033372c284
| 3,643,816
|
def mac_address(addr):
""" mac_address checks that a given string is in MAC address format """
mac = addr.upper()
if not _mac_address_pattern.fullmatch(mac):
raise TypeError('{} does not match a MAC address pattern'.format(addr))
return mac
|
201d32bd73f50c2818feef7c9c9be5371739dfcf
| 3,643,817
|
def py3_classifiers():
"""Fetch the Python 3-related trove classifiers."""
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
response = urllib_request.urlopen(url)
try:
try:
status = response.status
except AttributeError: #pragma: no cover
status = response.code
if status != 200: #pragma: no cover
msg = 'PyPI responded with status {0} for {1}'.format(status, url)
raise ValueError(msg)
data = response.read()
finally:
response.close()
classifiers = data.decode('utf-8').splitlines()
base_classifier = 'Programming Language :: Python :: 3'
return (classifier for classifier in classifiers
if classifier.startswith(base_classifier))
|
70e769811758bef05a9e3d8722eca13808acd514
| 3,643,818
|
def match(i, j):
"""
returns (red, white) count,
where red is matches in color and position,
and white is a match in color but not position
"""
red_count = 0
# these are counts only of the items that are not exact matches
i_colors = [0]*6
j_colors = [0]*6
for i_c, j_c in zip(color_inds(i), color_inds(j)):
if i_c == j_c:
red_count += 1
else:
i_colors[i_c] += 1
j_colors[j_c] += 1
white_count = 0
for i_c, j_c in zip(i_colors, j_colors):
white_count += min(i_c, j_c)
return (red_count, white_count)
|
06ddf17b6de367cd9158a33834431f3bc1c9e821
| 3,643,819
|
def time_delay_runge_kutta_4(fun, t_0, y_0, tau, history=None, steps=1000,
width=1):
"""
apply the classic Runge Kutta method to a time delay differential equation
f: t, y(t), y(t-tau) -> y'(t)
"""
width = float(width)
if not isinstance(y_0, np.ndarray):
y_0 = np.ones((1,), dtype=np.float)*y_0
dim = len(y_0)
hist_steps = np.floor(tau/width)
assert tau/width == hist_steps, "tau must be a multiple of width"
hist_steps = int(hist_steps)
if history is None:
history = np.zeros((hist_steps, dim), dtype=np.float)
else:
assert len(history) == hist_steps
fun_eval = np.zeros((steps+1+hist_steps, dim), dtype=y_0.dtype)
fun_eval[:hist_steps] = history
fun_eval[hist_steps] = y_0
for step in range(steps):
k_1 = fun(t_0, y_0, fun_eval[step])
k_2 = fun(t_0 + width/2, y_0 + width/2*k_1, fun_eval[step])
k_3 = fun(t_0 + width/2, y_0 + width/2*k_2, fun_eval[step])
k_4 = fun(t_0 + width, y_0 + width*k_3, fun_eval[step])
t_0 += width
y_0 += width*(k_1 + 2*k_2 + 2*k_3 + k_4)/6
fun_eval[step+1+hist_steps] = y_0
return fun_eval[hist_steps:]
|
02905a447e07857fdacc4c6b3e34ddf15726b141
| 3,643,820
|
def Vstagger_to_mass(V):
"""
V are the data on the top and bottom of a grid box
A simple conversion of the V stagger grid to the mass points.
Calculates the average of the top and bottom value of a grid box. Looping
over all rows reduces the staggered grid to the same dimensions as the
mass point.
Useful for converting V, XLAT_V, and XLONG_V to masspoints
Differnce between XLAT_V and XLAT is usually small, on order of 10e-5
(row_j1+row_j2)/2 = masspoint_inrow
Input:
Vgrid with size (##+1, ##)
Output:
V on mass points with size (##,##)
"""
# create the first column manually to initialize the array with correct dimensions
V_masspoint = (V[0,:]+V[1,:])/2. # average of first and second column
V_num_rows = int(V.shape[0])-1 # we want one less row than we have
# Loop through the rest of the rows
# We want the same number of rows as we have columns.
# Take the first and second row, average them, and store in first row in V_masspoint
for row in range(1,V_num_rows):
row_avg = (V[row,:]+V[row+1,:])/2.
# Stack those onto the previous for the final array
V_masspoint = np.row_stack((V_masspoint,row_avg))
return V_masspoint
|
f3dbb75506f05acb9f65ff0fe0335f4fe139127b
| 3,643,821
|
import base64
def verify_l4_block_pow(hash_type: SupportedHashes, block: "l4_block_model.L4BlockModel", complexity: int = 8) -> bool:
"""Verify a level 4 block with proof of work scheme
Args:
hash_type: SupportedHashes enum type
block: L4BlockModel with appropriate data to verify
Returns:
Boolean if valid hashed block with appropriate nonce
"""
# Get hash for PoW calculation to compare
hash_bytes = hash_l4_block(hash_type, block, block.nonce)
# Make sure it matches complexity requirements
if not check_complexity(hash_bytes, complexity):
return False
# Check that the hash bytes match what the block provided
return hash_bytes == base64.b64decode(block.proof)
|
301ea1c4e74ae34fb61610a7e614ac1af437a6c3
| 3,643,822
|
from datetime import datetime
def report_reply(report_id):
"""
Replies to an existing report. The email reply is constructed and sent
to the email address that original reported the phish.
Args:
report_id - str - The urlsafe key for the EmailReport
TODO: Make this a nice template or something
"""
report = EmailReport.get_by_id(report_id)
if not report:
return json_error(404, 'Report not found', {})
sender_address = g.user.email()
response = EmailResponse.from_dict(request.get_json())
if not response:
return json_error(400, 'Invalid JSON', {})
response.responder = sender_address
response.subject = render_template_string(response.subject, report=report)
response.content = render_template_string(response.content, report=report)
try:
response_key = response.put()
report.responses.append(response_key)
if not report.date_responded:
report.date_responded = datetime.now()
event_key = EventReportResponded(
response=response, report=report).put()
report.events.append(event_key)
report.put()
email_provider.send(
to=report.reported_by,
sender=g.user.email(),
subject=response.subject,
body=response.content)
except Exception as e:
return json_error(400, str(e), {})
return jsonify(report.to_dict())
|
651bd18c60254105e16b20aed9daaa3534c75537
| 3,643,823
|
def file_reader(file_name):
"""file_reader"""
data = None
with open(file_name, "r") as f:
for line in f.readlines():
data = eval(line)
f.close()
return data
|
6d3d63840cc48ccfdd5beefedf0d3a60c0f44cf9
| 3,643,824
|
import os
import torch
def train(model, data, params):
""" Trains a model.
Inputs:
model (ATISModel): The model to train.
data (ATISData): The data that is used to train.
params (namespace): Training parameters.
"""
# Get the training batches.
log = Logger(os.path.join(params.logdir, params.logfile), "w")
num_train_original = atis_data.num_utterances(data.train_data)
log.put("Original number of training utterances:\t"
+ str(num_train_original))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size,
max_output_length=maximum_output_length,
randomize=not params.deterministic)
if params.num_train >= 0:
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size,
max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data,
max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(
"Actual number of used training examples:\t" +
str(num_train_examples))
log.put("(Shortened by output limit of " +
str(maximum_output_length) +
")")
log.put("Number of steps per epoch:\t" + str(num_steps_per_epoch))
log.put("Batch size:\t" + str(batch_size))
print(
"Kept " +
str(num_train_examples) +
"/" +
str(num_train_original) +
" examples")
print(
"Batch size of " +
str(batch_size) +
" gives " +
str(num_steps_per_epoch) +
" steps per epoch")
# Keeping track of things during training.
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.
previous_epoch_loss = float('inf')
previous_valid_acc = 0.
maximum_validation_accuracy = 0.
maximum_string_accuracy = 0.
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min', )
keep_training = True
step = 0
while keep_training:
log.put("Epoch:\t" + str(epochs))
model.set_dropout(params.dropout_amount)
model.train()
if not params.scheduler:
model.set_learning_rate(learning_rate_coefficient * params.initial_learning_rate)
# Run a training step.
if params.interaction_level:
epoch_loss, step = train_epoch_with_interactions(
train_batches,
params,
model,
randomize=not params.deterministic,
db2id=data.db2id,
id2db=data.id2db,
step=step)
else:
epoch_loss = train_epoch_with_utterances(
train_batches,
model,
randomize=not params.deterministic)
log.put("train epoch loss:\t" + str(epoch_loss))
model.set_dropout(0.)
model.eval()
# Run an evaluation step on a sample of the training data.
train_eval_results = eval_fn(training_sample,
model,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
# Run an evaluation step on the validation set.
valid_eval_results = eval_fn(valid_examples,
model,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if params.scheduler:
scheduler.step(valid_loss)
if valid_loss > previous_epoch_loss and valid_token_accuracy < previous_valid_acc and step >= params.warmup_step:
learning_rate_coefficient *= params.learning_rate_ratio
log.put(
"learning rate coefficient:\t" +
str(learning_rate_coefficient))
previous_epoch_loss = valid_loss
previous_valid_acc = valid_token_accuracy
saved = False
if not saved and string_accuracy > maximum_string_accuracy:
maximum_string_accuracy = string_accuracy
patience = patience * params.patience_ratio
countdown = int(patience)
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
log.put(
"maximum string accuracy:\t" +
str(maximum_string_accuracy))
log.put("patience:\t" + str(patience))
log.put("save file:\t" + str(last_save_file))
else:
log.put("still saved")
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
if countdown <= 0:
keep_training = False
countdown -= 1
log.put("countdown:\t" + str(countdown))
log.put("")
epochs += 1
log.put("Finished training!")
log.close()
return last_save_file
|
c7af3d193247561a2a13713196052f61ae82f214
| 3,643,825
|
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
account = model.authenticate(username, password)
if account is None:
return AuthResponse.no_account
if not model.hasAssignedBlock(account):
return AuthResponse.no_block
return AuthResponse.success
|
5c735f354ed56a5bc3960de96a76eacbc5a3bdd1
| 3,643,826
|
def plot_energy_ratio(
reference_power_baseline,
test_power_baseline,
wind_speed_array_baseline,
wind_direction_array_baseline,
reference_power_controlled,
test_power_controlled,
wind_speed_array_controlled,
wind_direction_array_controlled,
wind_direction_bins,
confidence=95,
n_boostrap=None,
wind_direction_bin_p_overlap=None,
axarr=None,
base_color="b",
con_color="g",
label_array=None,
label_pchange=None,
plot_simple=False,
plot_ratio_scatter=False,
marker_scale=1.0,
show_count=True,
hide_controlled_case=False,
ls="--",
marker=None,
):
"""
Plot the balanced energy ratio.
Function mainly acts as a wrapper to call
calculate_balanced_energy_ratio and plot the results.
Args:
reference_power_baseline (np.array): Array of power
of reference turbine in baseline conditions.
test_power_baseline (np.array): Array of power of
test turbine in baseline conditions.
wind_speed_array_baseline (np.array): Array of wind
speeds in baseline conditions.
wind_direction_array_baseline (np.array): Array of
wind directions in baseline case.
reference_power_controlled (np.array): Array of power
of reference turbine in controlled conditions.
test_power_controlled (np.array): Array of power of
test turbine in controlled conditions.
wind_speed_array_controlled (np.array): Array of wind
speeds in controlled conditions.
wind_direction_array_controlled (np.array): Array of
wind directions in controlled case.
wind_direction_bins (np.array): Wind directions bins.
confidence (int, optional): Confidence level to use.
Defaults to 95.
n_boostrap (int, optional): Number of bootstaps, if
none, _calculate_bootstrap_iterations is called. Defaults
to None.
wind_direction_bin_p_overlap (np.array, optional):
Percentage overlap between wind direction bin. Defaults to
None.
axarr ([axes], optional): list of axes to plot to.
Defaults to None.
base_color (str, optional): Color of baseline in
plots. Defaults to 'b'.
con_color (str, optional): Color of controlled in
plots. Defaults to 'g'.
label_array ([str], optional): List of labels to
apply Defaults to None.
label_pchange ([type], optional): Label for
percentage change. Defaults to None.
plot_simple (bool, optional): Plot only the ratio, no
confidence. Defaults to False.
plot_ratio_scatter (bool, optional): Include scatter
plot of values, sized to indicate counts. Defaults to False.
marker_scale ([type], optional): Marker scale.
Defaults to 1.
show_count (bool, optional): Show the counts as scatter plot
hide_controlled_case (bool, optional): Option to hide the control case from plots, for demonstration
"""
if axarr is None:
fig, axarr = plt.subplots(3, 1, sharex=True)
if label_array is None:
label_array = ["Baseline", "Controlled"]
if label_pchange is None:
label_pchange = "Energy Gain"
(
ratio_array_base,
lower_ratio_array_base,
upper_ratio_array_base,
counts_ratio_array_base,
ratio_array_con,
lower_ratio_array_con,
upper_ratio_array_con,
counts_ratio_array_con,
diff_array,
lower_diff_array,
upper_diff_array,
counts_diff_array,
p_change_array,
lower_p_change_array,
upper_p_change_array,
counts_p_change_array,
) = calculate_balanced_energy_ratio(
reference_power_baseline,
test_power_baseline,
wind_speed_array_baseline,
wind_direction_array_baseline,
reference_power_controlled,
test_power_controlled,
wind_speed_array_controlled,
wind_direction_array_controlled,
wind_direction_bins,
confidence=95,
n_boostrap=n_boostrap,
wind_direction_bin_p_overlap=wind_direction_bin_p_overlap,
)
if plot_simple:
ax = axarr[0]
ax.plot(
wind_direction_bins,
ratio_array_base,
label=label_array[0],
color=base_color,
ls=ls,
marker=marker,
)
if not hide_controlled_case:
ax.plot(
wind_direction_bins,
ratio_array_con,
label=label_array[1],
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(1, color="k")
ax.set_ylabel("Energy Ratio (-)")
ax = axarr[1]
ax.plot(
wind_direction_bins,
diff_array,
label=label_pchange,
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(0, color="k")
ax.set_ylabel("Change in Energy Ratio (-)")
ax = axarr[2]
ax.plot(
wind_direction_bins,
p_change_array,
label=label_pchange,
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(0, color="k")
ax.set_ylabel("% Change in Energy Ratio (-)")
else:
ax = axarr[0]
ax.plot(
wind_direction_bins,
ratio_array_base,
label=label_array[0],
color=base_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_ratio_array_base,
upper_ratio_array_base,
alpha=0.3,
color=base_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
ratio_array_base,
s=counts_ratio_array_base * marker_scale,
label="_nolegend_",
color=base_color,
marker="o",
alpha=0.2,
)
if not hide_controlled_case:
ax.plot(
wind_direction_bins,
ratio_array_con,
label=label_array[1],
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_ratio_array_con,
upper_ratio_array_con,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
ratio_array_con,
s=counts_ratio_array_con * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(1, color="k")
ax.set_ylabel("Energy Ratio (-)")
ax = axarr[1]
ax.plot(
wind_direction_bins,
diff_array,
label=label_pchange,
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_diff_array,
upper_diff_array,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
diff_array,
s=counts_diff_array * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(0, color="k")
ax.set_ylabel("Change in Energy Ratio (-)")
ax = axarr[2]
ax.plot(
wind_direction_bins,
p_change_array,
label=label_pchange,
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_p_change_array,
upper_p_change_array,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
p_change_array,
s=counts_p_change_array * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(0, color="k")
ax.set_ylabel("% Change in Energy Ratio (-)")
for ax in axarr:
ax.grid(True)
ax.set_xlabel("Wind Direction (Deg)")
return diff_array
|
2ccdfa20dc8a475ab6c65086ab1f39d6db5e211f
| 3,643,827
|
import os
def dir_is_cachedir(path):
"""Determines whether the specified path is a cache directory (and
therefore should potentially be excluded from the backup) according to
the CACHEDIR.TAG protocol
(http://www.brynosaurus.com/cachedir/spec.html).
"""
tag_contents = b'Signature: 8a477f597d28d172789f06886806bc55'
tag_path = os.path.join(path, 'CACHEDIR.TAG')
try:
if os.path.exists(tag_path):
with open(tag_path, 'rb') as tag_file:
tag_data = tag_file.read(len(tag_contents))
if tag_data == tag_contents:
return True
except OSError:
pass
return False
|
b63f46ebafe6ff3c917325e19adfec551497ce68
| 3,643,828
|
def first_position():
"""Sets up two positions in the
Upper left
.X.Xo.
X.Xoo.
XXX...
......
Lower right
......
..oooo
.oooXX
.oXXX.
(X = black, o = white)
They do not overlap as the Positions are size_limit 9 or greater.
"""
def position_moves(s):
rest_of_row = '.'*(s-5)
first_three = rest_of_row.join([
'.X.Xo',
'X.Xoo',
'XXX..',''])
last_three = rest_of_row.join(['',
'.oooo',
'oooXX',
'oXXX.',])
board = first_three + '.'*s*(s-6) + last_three
position = go.Position(size=s)
moves_played = defaultdict()
for pt, symbol in enumerate(board):
if symbol == 'X':
position.move(move_pt=pt, colour=go.BLACK)
moves_played[pt] = go.BLACK
elif symbol == 'o':
position.move(move_pt=pt, colour=go.WHITE)
moves_played[pt] = go.WHITE
return position, moves_played
return position_moves
|
029e965fe20f550030ece305975e96f7d1cd9115
| 3,643,829
|
def _create_teams(
pool: pd.DataFrame,
n_iterations: int = 500,
n_teams: int = 10,
n_players: int = 10,
probcol: str = 'probs'
) -> np.ndarray:
"""Creates initial set of teams
Returns:
np.ndarray of shape
axis 0 - number of iterations
axis 1 - number of teams in league
axis 2 - number of players on team
"""
# get the teams, which are represented as 3D array
# axis 0 = number of iterations (leagues)
# axis 1 = number of teams in league
# axis 2 = number of players on team
arr = _multidimensional_shifting(
elements=pool.index.values,
num_samples=n_iterations,
sample_size=n_teams * n_players,
probs=pool[probcol]
)
return arr.reshape(n_iterations, n_teams, n_players)
|
5889cc356a812c65ca7825e26c835b520cad1680
| 3,643,830
|
def calculate_magnitude(data: np.ndarray) -> np.ndarray:
"""Calculates the magnitude for given (x,y,z) axes stored in numpy array"""
assert data.shape[1] == 3, f"Numpy array should have 3 axes, got {data.shape[1]}"
return np.sqrt(np.square(data).sum(axis=1))
|
6493660467154d3e45c10a7a4350e87fa73c9719
| 3,643,831
|
def clean_str(string: str) -> str:
""" Cleans strings for SQL insertion """
return string.replace('\n', ' ').replace("'", "’")
|
d3833293163114642b4762ee25ea7c8f850e9d54
| 3,643,832
|
def zeros(shape, name=None):
"""All zeros."""
return tf.get_variable(name=name, shape=shape, dtype=tf.float32,
initializer=tf.zeros_initializer())
|
2c20b960bd17a0dc752883e65f7a18e77a7cde32
| 3,643,833
|
import io
def parseTemplate(bStream):
"""Parse the Template in current byte stream, it terminates when meets an object.
:param bStream: Byte stream
:return: The template.
"""
template = Template()
eof = endPos(bStream)
while True:
currPos = bStream.tell()
if currPos <eof:
desc = '{0:08b}'.format(readUSHORT(bStream))
bStream.seek(currPos, io.SEEK_SET)
if ComponentRole[desc[:3]] == OBJECT:
return template
else:
assert(int(desc[3])) # all components in Template must have label.
template._attrList.append(parseAttributeInTemplate(bStream))
else:
logger.warning("Encounter a Set without Objects")
break
|
716858cde357be4036b62824ac17ba60cf71eea1
| 3,643,834
|
import os
def explode(req: str):
"""Returns the exploded dependency list for a requirements file.
As requirements files can include other requirements files with the -r directive, it can be
useful to see a flattened version of all the constraints. This method unrolls a requirement file
and produces a list of strings for each constraint line in the order of inclusion.
Args:
req: path to a requirements file.
Returns:
list of lines of requirements
"""
res = []
d = os.path.dirname(req)
with open(req) as f:
for l in f.readlines():
l = l.rstrip("\n")
l = l.lstrip(" ")
if l.startswith("-r"):
include = l.lstrip(" ").lstrip("-r").lstrip(" ")
# assuming relative includes always
res += explode(os.path.join(d, include))
elif l:
res += [l]
return res
|
1a4c389537dcc9e5abd34d19ed386c48d3d6ecc6
| 3,643,835
|
def draw_lidar(
pc,
color=None,
fig=None,
bgcolor=(0, 0, 0),
pts_scale=0.3,
pts_mode="sphere",
pts_color=None,
color_by_intensity=False,
pc_label=False,
pc_range=[],
):
""" Draw lidar points
Args:
pc: numpy array (n,3) of XYZ
color: numpy array (n) of intensity or whatever
fig: mayavi figure handler, if None create new one otherwise will use it
Returns:
fig: created or used fig
"""
xmin, xmax, ymin, ymax, zmin, zmax = pc_range
pts_mode = "point"
print("====================", pc.shape)
if fig is None:
fig = mlab.figure(
figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000)
)
if color is None:
color = pc[:, 2]
mlab.points3d(
pc[:, 0],
pc[:, 1],
pc[:, 2],
color,
color=pts_color,
mode=pts_mode,
colormap="gnuplot",
scale_factor=pts_scale,
figure=fig,
vmax=zmax,
vmin=zmin,
)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode="sphere", scale_factor=0.2)
# draw axis
axes = np.array(
[[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]],
dtype=np.float64,
)
mlab.plot3d(
[0, axes[0, 0]],
[0, axes[0, 1]],
[0, axes[0, 2]],
color=(1, 0, 0),
tube_radius=None,
figure=fig,
)
mlab.plot3d(
[0, axes[1, 0]],
[0, axes[1, 1]],
[0, axes[1, 2]],
color=(0, 1, 0),
tube_radius=None,
figure=fig,
)
mlab.plot3d(
[0, axes[2, 0]],
[0, axes[2, 1]],
[0, axes[2, 2]],
color=(0, 0, 1),
tube_radius=None,
figure=fig,
)
# draw fov (todo: update to real sensor spec.)
a_ymin = abs(ymin)
a_ymax = abs(ymax)
fov = np.array(
[[a_ymax, a_ymax, 0.0, 0.0], [a_ymin, -a_ymin, 0.0, 0.0]], dtype=np.float64 # 45 degree
)
mlab.plot3d(
[0, fov[0, 0]],
[0, fov[0, 1]],
[0, fov[0, 2]],
color=(1, 1, 1),
tube_radius=None,
line_width=1,
figure=fig,
)
mlab.plot3d(
[0, fov[1, 0]],
[0, fov[1, 1]],
[0, fov[1, 2]],
color=(1, 1, 1),
tube_radius=None,
line_width=1,
figure=fig,
)
# draw square region
TOP_Y_MIN = ymin
TOP_Y_MAX = ymax
TOP_X_MIN = xmin
TOP_X_MAX = xmax
#TOP_Z_MIN = -2.0
#TOP_Z_MAX = 0.4
x1 = TOP_X_MIN
x2 = TOP_X_MAX
y1 = TOP_Y_MIN
y2 = TOP_Y_MAX
mlab.plot3d(
[x1, x1],
[y1, y2],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
mlab.plot3d(
[x2, x2],
[y1, y2],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
mlab.plot3d(
[x1, x2],
[y1, y1],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
mlab.plot3d(
[x1, x2],
[y2, y2],
[0, 0],
color=(0.5, 0.5, 0.5),
tube_radius=0.1,
line_width=1,
figure=fig,
)
# mlab.orientation_axes()
mlab.view(
azimuth=180,
elevation=70,
focalpoint=[12.0909996, -1.04700089, -2.03249991],
distance=62.0,
figure=fig,
)
return fig
|
b80aa7d51ea8e1ef3cc5498fecd25470a1a03531
| 3,643,836
|
def load_circuit(filename:str):
""" Reads a MNSensitivity cicuit file (.mc) and returns a Circuit list
(format is 1D array of tuples, the first element contains a Component
object, the 2nd a SER/PAL string).
Format of the .mc file is:
* each line contains a Component object init string (See Component class
doc string to see format) after an orientation string (SER or PAL,
specifies if the component is series or parallel to ground).
* Comments can be specified by '#'
* Blank lines are skipped
* Components with earliest line number is assumed closest to source,
last line number closest to load, and progressively inbetween.
"""
circuit = []
lnum = 0
#Open file...
with open(filename) as file:
#For each line...
while True:
#Read line...
line = file.readline()
lnum += 1;
if not line:
break;
#Break into tokens...
words = line.split()
if len(words) == 0:
continue
#Skip comments
if words[0] == "#" or words[0][0] == '#':
continue
if len(words) < 5:
print(f"ERROR: Fewer than 5 words on line {lnum}.")
print(words)
return []
try:
idx = line.find(" ")
new_comp = Component(line[idx+1:])
except:
print(f"Failed to interpret component string on line {lnum}.")
return []
if words[0].upper() == "SER":
circuit.append( (new_comp, "SER") )
elif words[0].upper() == "PAL":
circuit.append( (new_comp, "PAL") )
else:
unrectok = words[0]
print(f"ERROR: Unrecognized orientation token '{unrectok}' on line {lnum}. Acceptable tokens are 'SER' and 'PAL'.")
return []
return circuit
|
c77aa31f9a1c1f6803795c19de509ea967f65077
| 3,643,837
|
def get_output_attribute(out, attribute_name, cuda_device, reduction="sum"):
"""
This function handles processing/reduction of output for both
DataParallel or non-DataParallel situations.
For the case of multiple GPUs, This function will
sum all values for a certain output attribute in various batches
together.
Parameters
---------------------
:param out: Dictionary, output of model during forward pass,
:param attribute_name: str,
:param cuda_device: list or int
:param reduction: (string, optional) reduction to apply to the output. Default: 'sum'.
"""
if isinstance(cuda_device, list):
if reduction == "sum":
return out[attribute_name].sum()
elif reduction == "mean":
return out[attribute_name].sum() / float(len(out[attribute_name]))
else:
raise ValueError("invalid reduction type argument")
else:
return out[attribute_name]
|
c09ff6a3dd4ae2371b1bbec12d4617e9ed6c6e1e
| 3,643,838
|
def get_ref_aidxs(df_fs):
"""Part of the hotfix for redundant FCGs.
I did not record the occurrence id in the graphs, which was stupid.
So now I need to use the df_fs to get the information instead.
Needs to be used with fid col, which is defined in filter_out_fcgs_ffs_all.
"""
return {k: v for k, v in zip(df_fs['fid'], df_fs['_aidxf'])}
|
9b57d7297d96f6b711bb9d3c37f85a17c4ccacd5
| 3,643,839
|
def format_info(info):
""" Print info neatly """
sec_width = 64
eq = ' = '
# find key width
key_widths = []
for section, properties in info.items():
for prop_key, prop_val in properties.items():
if type(prop_val) is dict:
key_widths.append(len(max(list(prop_val.keys()), key=len)) + 4)
else:
key_widths.append(len(prop_key))
key_width = max(key_widths)
# format items
msg = []
for section, properties in info.items():
n0 = (sec_width - 2 - len(section)) // 2
n1 = n0 if n0 * 2 + 2 + len(section) == sec_width else n0 + 1
msg.append('\n' + '=' * n0 + f' {section} ' + '=' * n1)
for prop_key, prop_val in properties.items():
if type(prop_val) is dict:
msg.append((prop_key + ' ').ljust(sec_width, '_'))
for sub_key, sub_val in prop_val.items():
msg.append(' ' * 4 + sub_key.ljust(key_width - 4) +
eq + str(sub_val))
else:
msg.append(prop_key.ljust(key_width) + eq + str(prop_val))
msg.append('=' * (n0 + n1 + 2 + len(section)))
return '\n'.join(msg)
|
9dd3a6ef15909230725f2be6eb698e7ca08a2d8b
| 3,643,840
|
import itertools
import copy
def server_handle_hallu_message(
msg_output, controller, mi_info, options, curr_iter):
"""
Petridish server handles the return message of a forked
process that watches over a halluciniation job.
"""
log_dir_root = logger.get_logger_dir()
q_child = controller.q_child
model_str, model_iter, _parent_iter, search_depth = msg_output
# Record performance in the main log
jr = parse_remote_stop_file(_mi_to_dn(log_dir_root, model_iter))
if jr is None:
# job failure: reap the virtual resource and move on.
logger.info('Failed mi={}'.format(model_iter))
return curr_iter
(fp, ve, te, hallu_stats, l_op_indices, l_op_omega) = (
jr['fp'], jr['ve'], jr['te'], jr['l_stats'],
jr['l_op_indices'], jr['l_op_omega']
)
logger.info(
("HALLU : mi={} val_err={} test_err={} "
"Gflops={} hallu_stats={}").format(
model_iter, ve, te, fp * 1e-9, hallu_stats))
mi_info[model_iter].ve = ve
mi_info[model_iter].fp = fp
## compute hallucination related info in net_info
net_info = net_info_from_str(model_str)
hallu_locs = net_info.contained_hallucination() # contained
hallu_indices = net_info.sorted_hallu_indices(hallu_locs)
# feature selection based on params
l_fs_ops, l_fs_omega = feature_selection_cutoff(
l_op_indices, l_op_omega, options)
separated_hallu_info = net_info.separate_hallu_info_by_cname(
hallu_locs, hallu_indices, l_fs_ops, l_fs_omega)
## Select a subset of hallucination to add to child model
l_selected = []
# sort by -cos(grad, hallu) for the indices, 0,1,2,...,n_hallu-1.
processed_stats = [process_hallu_stats_for_critic_feat([stats]) \
for stats in hallu_stats]
logger.info('processed_stats={}'.format(processed_stats))
logger.info('separated_hallu_info={}'.format(separated_hallu_info))
# greedy select with gradient boosting
l_greedy_selected = []
if options.n_greed_select_per_init:
greedy_order = sorted(
range(len(hallu_indices)),
key=lambda i : - processed_stats[i][0])
min_select = options.n_hallus_per_select
max_select = max(min_select, len(hallu_indices) // 2)
for selected_len in range(min_select, max_select + 1):
selected = greedy_order[:selected_len]
l_greedy_selected.append(selected)
n_greedy_select = len(l_greedy_selected)
if n_greedy_select > options.n_greed_select_per_init:
# random choose
l_greedy_selected = list(np.random.choice(
l_greedy_selected,
options.n_greed_select_per_init,
replace=False))
# random select a subset
l_random_selected = []
if options.n_rand_select_per_init:
# also try some random samples
l_random_selected = online_sampling(
itertools.combinations(
range(len(hallu_indices)),
options.n_hallus_per_select
),
options.n_rand_select_per_init)
np.random.shuffle(l_random_selected)
l_selected = l_greedy_selected + l_random_selected
## for each selected subset of hallu, make a model for q_child
# since more recent ones tend to be better,
# we insert in reverse order, so greedy are inserted later.
for selected in reversed(l_selected):
# new model description
child_info = copy.deepcopy(net_info)
l_hi = [ hallu_indices[s] for s in selected ]
child_info = child_info.select_hallucination(
l_hi, separated_hallu_info)
# Compute initialization stat
stat = process_hallu_stats_for_critic_feat(
[hallu_stats[s] for s in selected])
# update mi_info
curr_iter += 1
child_str = child_info.to_str()
mi_info.append(ModelSearchInfo(
curr_iter, model_iter, search_depth+1,
None, None, child_str, stat))
controller.add_one_to_queue(
q_child, mi_info, curr_iter, child_info)
return curr_iter
|
a4dc3da855066d719ca8a798a691864ed9d04e7f
| 3,643,841
|
def pBottleneckSparse_model(inputs, train=True, norm=True, **kwargs):
"""
A pooled shallow bottleneck convolutional autoencoder model..
"""
# propagate input targets
outputs = inputs
# dropout = .5 if train else None
input_to_network = inputs['images']
shape = input_to_network.get_shape().as_list()
stride = 16
hidden_size = 2#np.ceil(shape[1]/stride)
deconv_size = 12#(shape[1]/hidden_size).astype(int)
### YOUR CODE HERE
with tf.variable_scope('conv1') as scope:
convweights = tf.get_variable(shape=[7, 7, 3, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(), name='weights')
conv = tf.nn.conv2d(input_to_network, convweights,[1, 4, 4, 1], padding='SAME')
biases = tf.get_variable(initializer=tf.constant_initializer(0),
shape=[64], dtype=tf.float32, trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
relu = tf.nn.relu(bias, name='relu')
pool = tf.nn.max_pool(value=relu, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME', name='pool')
# assign layers to output
outputs['input'] = input_to_network
outputs['conv1_kernel'] = convweights
outputs['conv1'] = relu
outputs['pool1'] = pool
outputs['convweights'] = convweights
print(outputs['input'].shape)
print(outputs['conv1'].shape)
print(outputs['pool1'].shape)
with tf.variable_scope('deconv2') as scope:
deconvweights = tf.get_variable(shape=[deconv_size, deconv_size, 3, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(), name='weights')
deconvRegularizer = tf.nn.l2_loss(deconvweights)
deconv = tf.nn.conv2d_transpose(outputs['pool1'], deconvweights,
outputs['input'].shape, [1, 12, 12, 1], padding='VALID', name=None)
# assign layers to output
outputs['deconv2'] = deconv
outputs['deconvweights'] = deconvweights
### END OF YOUR CODE
for k in ['input','conv1', 'deconv2']:
assert k in outputs, '%s was not found in outputs' % k
return outputs, {}
|
0a9609b776a9373f28bacf10f9f6aa9dcfbb17d2
| 3,643,842
|
def CoarseDropout(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None, mask=None):
"""
Augmenter that sets rectangular areas within images to zero.
In contrast to Dropout, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so when there is one large area that is dropped, there is a high likelihood
that all other dropped areas are also large.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all pixels. A value
of 1.0 would mean, that all pixels will be dropped. A value of
0.0 would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a ``1x1`` low resolution mask, leading easily
to the whole image being dropped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
drops 2 percent of all pixels on an lower-resolution image that has
50 percent of the original image's size, leading to dropped areas that
have roughly 2x2 pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
generates a dropout mask at 5 to 50 percent of image's size. In that mask,
0 to 5 percent of all pixels are dropped (random per image).
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
same as previous example, but the lower resolution image has 2 to 16 pixels
size.
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
drops 2 percent of all pixels at 50 percent resolution (2x2 sizes)
in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if size_px is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size)
elif size_percent is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state, mask=mask)
|
c60828aa2a81459ef0a84440305f6d73939e2eb5
| 3,643,843
|
def chenneling(x):
"""
This function makes the dataset suitable for training.
Especially, gray scale image does not have channel information.
This function forces one channel to be created for gray scale images.
"""
# if grayscale image
if(len(x.shape) == 3):
C = 1
N, H, W = x.shape
x = np.asarray(x).reshape((N, H, W, C))
else: # color image
pass
x = x.transpose(0, 3, 1, 2)
x = x.astype(float)
return x
|
c47c1690affbb52c98343185cae7e0679bfff41a
| 3,643,844
|
import collections
def _get_ordered_label_map(label_map):
"""Gets label_map as an OrderedDict instance with ids sorted."""
if not label_map:
return label_map
ordered_label_map = collections.OrderedDict()
for idx in sorted(label_map.keys()):
ordered_label_map[idx] = label_map[idx]
return ordered_label_map
|
4c5e56789f57edda61409f0693c3bccb57ddc7cf
| 3,643,845
|
def eight_interp(x, a0, a1, a2, a3, a4, a5, a6, a7):
"""``Approximation degree = 8``
"""
return (
a0
+ a1 * x
+ a2 * (x ** 2)
+ a3 * (x ** 3)
+ a4 * (x ** 4)
+ a5 * (x ** 5)
+ a6 * (x ** 6)
+ a7 * (x ** 7)
)
|
98be2259c9e0fae214234b635a3ff55608f707d1
| 3,643,846
|
import logging
def create_ec2_instance(image_id, instance_type, keypair_name, user_data):
"""Provision and launch an EC2 instance
The method returns without waiting for the instance to reach
a running state.
:param image_id: ID of AMI to launch, such as 'ami-XXXX'
:param instance_type: string, such as 't2.micro'
:param keypair_name: string, name of the key pair
:return Dictionary containing information about the instance. If error,
returns None.
"""
# Provision and launch the EC2 instance
ec2_client = boto3.client('ec2')
try:
response = ec2_client.run_instances(ImageId=image_id,
InstanceType=instance_type,
KeyName=keypair_name,
MinCount=1,
MaxCount=1,
UserData=user_data,
SecurityGroups=[
'AllowSSHandOSB',
]
)
instance = response['Instances'][0]
except ClientError as e:
logging.error(e)
return None
return response['Instances'][0]
|
4c1edda4b2aed0179026aacb6f5a95a0b550ef66
| 3,643,847
|
def get_pop(state):
"""Returns the population of the passed in state
Args:
- state: state in which to get the population
"""
abbrev = get_abbrev(state)
return int(us_areas[abbrev][1]) if abbrev != '' else -1
|
0d44a033eaff65c1430aab806a93686c68f5c490
| 3,643,848
|
import requests
import json
def GitHub_post(data, url, *, headers):
"""
POST the data ``data`` to GitHub.
Returns the json response from the server, or raises on error status.
"""
r = requests.post(url, headers=headers, data=json.dumps(data))
GitHub_raise_for_status(r)
return r.json()
|
7dbdbd3beed6e39ff3e20509114a11761a05ab52
| 3,643,849
|
def subsample(inputs, factor, scope=None):
"""Subsample the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels]
with the input, either intact (if factor == 1) or subsampled
(if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
|
32df6bccbb016d572bbff227cf42aadeb07c6242
| 3,643,850
|
def password_reset(*args, **kwargs):
"""
Override view to use a custom Form
"""
kwargs['password_reset_form'] = PasswordResetFormAccounts
return password_reset_base(*args, **kwargs)
|
a2764365118cc0264fbeddf0b79457a0f7bf3c62
| 3,643,851
|
def update_tab_six_two(
var,
time_filter,
month,
hour,
data_filter,
filter_var,
min_val,
max_val,
normalize,
global_local,
df,
):
"""Update the contents of tab size. Passing in the info from the dropdown and the general info."""
df = pd.read_json(df, orient="split")
time_filter_info = [time_filter, month, hour]
data_filter_info = [data_filter, filter_var, min_val, max_val]
heat_map = custom_heatmap(df, global_local, var, time_filter_info, data_filter_info)
no_display = {"display": "none"}
if data_filter:
return (
heat_map,
{},
barchart(df, var, time_filter_info, data_filter_info, normalize),
{},
)
return heat_map, no_display, {"data": [], "layout": {}, "frames": []}, no_display
|
0ce47fc30c088eae245de1da8bc4392408f16e26
| 3,643,852
|
import json
async def blog_api(request: Request, year: int, month: int, day: int,
title: str) -> json:
"""Handle blog."""
blog_date = {"year": year, "month": month, "day": day}
req_blog = app.blog.get(xxh64(unquote(title)).hexdigest())
if req_blog:
if all(
map(lambda x: req_blog["date"][x] == blog_date[x],
req_blog["date"])):
return json(
{
"message": f"Hope you enjoy \"{unquote(title)}\"",
"status": request.headers,
"error": None,
"results": req_blog
},
status = 200)
else:
return redirect(f"/{req_blog['blog_path']}")
else:
raise BlogNotFound(f"Blog \"{unquote(title)}\" Not Found!")
|
6c497a9280c8c8a1301f407c06065846267743f8
| 3,643,853
|
def coherence_score_umass(X, inv_vocabulary, top_words, normalized=False):
"""
Extrinsic UMass coherence measure
Parameter
----------
X : array-like, shape=(n_samples, n_features)
Document word matrix.
inv_vocabulary: dict
Dictionary of index and vocabulary from vectorizer.
top_words: list
List of top words for each topic-sentiment pair
normalized: bool
If true, return to NPMI
Returns
-----------
score: float
"""
wordoccurances = (X > 0).astype(int)
N = X.shape[0]
totalcnt = 0
PMI = 0
NPMI = 0
for allwords in top_words:
for word1 in allwords:
for word2 in allwords:
if word1 != word2:
ind1 = inv_vocabulary[word1]
ind2 = inv_vocabulary[word2]
if ind1 > ind2:
denominator = (np.count_nonzero(wordoccurances > 0, axis=0)[
ind1]/N) * (np.count_nonzero(wordoccurances > 0, axis=0)[ind2]/N)
numerator = (
(np.matmul(wordoccurances[:, ind1], wordoccurances[:, ind2])) + 1) / N
PMI += np.log(numerator) - np.log(denominator)
NPMI += (np.log(denominator) / np.log(numerator)) - 1
totalcnt += 1
if normalized:
score = NPMI / totalcnt
else:
score = PMI / totalcnt
return score
|
185cfa1e6df64e799ae07116c8f88ef9cd37c94b
| 3,643,854
|
def _splitaddr(addr):
"""
splits address into character and decimal
:param addr:
:return:
"""
col='';rown=0
for i in range(len(addr)):
if addr[i].isdigit():
col = addr[:i]
rown = int(addr[i:])
break
elif i==len(addr)-1:
col=addr
return col,rown
|
6f4ef43ed926a468ae5ae22fc062fe2b2701a18a
| 3,643,855
|
def checksum(data):
"""
:return: int
"""
assert isinstance(data, bytes)
assert len(data) >= MINIMUM_MESSAGE_SIZE - 2
assert len(data) <= MAXIMUM_MESSAGE_SIZE - 2
__checksum = 0
for data_byte in data:
__checksum += data_byte
__checksum = -(__checksum % 256) + 256
try:
__checksum = bytes([__checksum])
except ValueError:
__checksum = bytes([0])
return __checksum
|
105bb5a9fe748ee352c080939ea33936c661e77b
| 3,643,856
|
def as_character(
x,
str_dtype=str,
_na=np.nan,
):
"""Convert an object or elements of an iterable into string
Aliases `as_str` and `as_string`
Args:
x: The object
str_dtype: The string dtype to convert to
_na: How NAs should be casted. Specify np.nan will keep them unchanged.
But the dtype will be object then.
Returns:
When x is an array or a series, return x.astype(str).
When x is iterable, convert elements of it into strings
Otherwise, convert x to string.
"""
return _as_type(x, str_dtype, na=_na)
|
ed8653f5c713fd257062580e03d26d48aaac3421
| 3,643,857
|
def test_logger(request: HttpRequest) -> HttpResponse:
"""
Generate a log to test logging setup.
Use a GET parameter to specify level, default to INFO if absent. Value can be INFO, WARNING, ERROR,
EXCEPTION, UNCATCHED_EXCEPTION.
Use a GET parameter to specify message, default to "Test logger"
Example: test_logger?level=INFO&message=Test1
:param request: HttpRequest request
:return: HttpResponse web response
"""
message = request.GET.get("message", "Test logger")
level = request.GET.get("level", "INFO")
if level not in ("INFO", "WARNING", "ERROR", "EXCEPTION", "UNCATCHED_EXCEPTION"):
level = "INFO"
if level == "INFO":
logger.info(message)
elif level == "WARNING":
logger.warning(message)
elif level == "ERROR":
logger.error(message)
elif level == "EXCEPTION":
try:
raise Exception(message)
except Exception:
logger.exception("test_logger")
else:
assert level == "UNCATCHED_EXCEPTION", "should never happen"
raise Exception(message)
return HttpResponse("ok")
|
04ef0d03d85402b5005660d9a06ae6ec775cb712
| 3,643,858
|
import requests
import zipfile
import os
def cvm_informes (year: int, mth: int) -> pd.DataFrame:
"""Downloads the daily report (informe diario) from CVM for a given month and year\n
<b>Parameters:</b>\n
year (int): The year of the report the function should download\n
mth (int): The month of the report the function should download\n
<b>Returns:</b>\n
pd.DataFrame: Pandas dataframe with the report for the given month and year. If the year is previous to 2017, will contain data regarding the whole year
"""
if int(year) >= 2017: #uses download process from reports after the year of 2017
try:
mth = f"{mth:02d}"
year = str(year)
#creates url using the parameters provided to the function
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/inf_diario_fi_'+year+mth+'.csv'
#reads the csv returned by the link
cotas = pd.read_csv(url, sep =';')
cotas['DT_COMPTC'] = pd.to_datetime(cotas['DT_COMPTC']) #casts date column to datetime
try:
#removes column present in only a few reports to avoid inconsistency when making the union of reports
cotas.drop(columns = ['TP_FUNDO'], inplace = True)
except KeyError:
pass
return cotas
except HTTPError:
print('theres no report for this date yet!.\n')
if int(year) < 2017:
try:
year = str(year)
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/inf_diario_fi_' + year + '.zip'
#sends request to the url
r = requests.get(url, stream=True, allow_redirects=True)
with open('informe' + year + '.zip', 'wb') as fd: #writes the .zip file downloaded
fd.write(r.content)
zip_inf = zipfile.ZipFile('informe' + year + '.zip') #opens the .zip file
#le os arquivos csv dentro do arquivo zip
informes = [pd.read_csv(zip_inf.open(f), sep=";") for f in zip_inf.namelist()]
cotas = pd.concat(informes,ignore_index=True)
cotas['DT_COMPTC'] = pd.to_datetime(cotas['DT_COMPTC']) #casts date column to datetime
zip_inf.close() #fecha o arquivo zip
os.remove('informe' + year + '.zip') #deletes .zip file
return cotas
except Exception as E:
print(E)
|
43411c39375abecb98a54482a92078bb1e8058f0
| 3,643,859
|
def remoteness(N):
"""
Compute the remoteness of N.
Parameters
----------
N : Nimber
The nimber of interest.
Returns
-------
remote : int
The remoteness of N.
"""
if N.n == 0:
return 0
remotes = {remoteness(n) for n in N.left}
if all(remote % 2 == 1 for remote in remotes):
return 1 + max(remotes)
else:
return 1 + min(remote for remote in remotes if remote % 2 == 0)
|
6ea40df2a79a2188b3d7c9db69ee9038ec2e6462
| 3,643,860
|
def breakfast_analysis_variability(in_path,identifier, date_col, time_col, min_log_num=2, min_separation=4, plot=True):
"""
Description:\n
This function calculates the variability of loggings in good logging day by subtracting 5%,10%,25%,50%,75%,90%,95% quantile of breakfast time from the 50% breakfast time. It can also make a histogram that represents the 90%-10% interval for all subjects.\n
Input:\n
- in_path (str, pandas df): input path, file in pickle, csv or panda dataframe format.
- identitfier(str) : participants' unique identifier such as id, name, etc.
- date_col(str) : the column that represents the dates.
- time_col(str) : the column that represents the float time.
- min_log_num (count,int): filtration criteria on the minimum number of loggings each day.
- min_seperation(hours,int): filtration criteria on the minimum separations between the earliest and latest loggings each day.
- plot(bool) : Whether generating a histogram for breakfast variability. Default = True.
Return:\n
- A dataframe that contains 5%,10%,25%,50%,75%,90%,95% quantile of breakfast time minus 50% time for each subjects from the in_path file.\n
Requirements:\n
in_path file must have the following columns:\n
- unique_code\n
- date\n
- local_time\n
"""
df = universal_key(in_path)
# leave only the loggings in a good logging day
df['in_good_logging_day'] = in_good_logging_day(df, identifier, time_col, min_log_num, min_separation)
df = df[df['in_good_logging_day']==True]
breakfast_series = df.groupby(['unique_code', 'date'])['local_time'].min().groupby('unique_code').quantile([0.05, 0.10, 0.25, 0.5, 0.75, 0.90, 0.95])
breakfast_df = pd.DataFrame(breakfast_series)
all_rows = []
for index in breakfast_df.index:
tmp_dict = dict(breakfast_series[index[0]])
tmp_dict['id'] = index[0]
all_rows.append(tmp_dict)
breakfast_summary_df = pd.DataFrame(all_rows, columns = ['id', 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95])\
.rename(columns = {0.05: '5%', 0.1: '10%', 0.25: '25%', 0.5: '50%', 0.75: '75%', 0.9: '90%', 0.95: '95%'})\
.drop_duplicates().reset_index(drop = True)
breakfast_variability_df = breakfast_summary_df.copy()
for col in breakfast_variability_df.columns:
if col == 'id' or col == '50%':
continue
breakfast_variability_df[col] = breakfast_variability_df[col] - breakfast_variability_df['50%']
breakfast_variability_df['50%'] = breakfast_variability_df['50%'] - breakfast_variability_df['50%']
if plot == True:
fig, ax = plt.subplots(1, 1, figsize = (10, 10), dpi=80)
sns_plot = sns.distplot( breakfast_variability_df['90%'] - breakfast_variability_df['10%'] )
ax.set(xlabel='Variation Distribution for Breakfast (90% - 10%)', ylabel='Kernel Density Estimation')
return breakfast_variability_df
|
e174f57fd146e07d41f0fc21c028711ae581a580
| 3,643,861
|
def _sdss_wcs_to_log_wcs(old_wcs):
"""
The WCS in the SDSS files does not appear to follow the WCS standard - it
claims to be linear, but is logarithmic in base-10.
The wavelength is given by:
λ = 10^(w0 + w1 * i)
with i being the pixel index starting from 0.
The FITS standard uses a natural log with a sightly different formulation,
see WCS Paper 3 (which discusses spectral WCS).
This function does the conversion from the SDSS WCS to FITS WCS.
"""
w0 = old_wcs.wcs.crval[0]
w1 = old_wcs.wcs.cd[0,0]
crval = 10 ** w0
cdelt = crval * w1 * np.log(10)
cunit = old_wcs.wcs.cunit[0] or Unit('Angstrom')
ctype = "WAVE-LOG"
w = WCS(naxis=1)
w.wcs.crval[0] = crval
w.wcs.cdelt[0] = cdelt
w.wcs.ctype[0] = ctype
w.wcs.cunit[0] = cunit
w.wcs.set()
return w
|
b4b4427d5563e85f80ddc2200e9c323098ad35ae
| 3,643,862
|
def request_records(request):
"""show the datacap request records"""
address = request.POST.get('address')
page_index = request.POST.get('page_index', '1')
page_size = request.POST.get('page_size', '5')
page_size = interface.handle_page(page_size, 5)
page_index = interface.handle_page(page_index, 1)
msg_code, msg_data = interface.request_record(address=address)
obj = Page(msg_data, page_size).page(page_index)
data_list = []
for i in obj.get('objects'):
msg_cid = i.msg_cid
assignee = i.assignee
comments_url = i.comments_url
data_list.append({
'assignee': assignee,
'created_at': i.created_at.strftime('%Y-%m-%d %H:%M:%S') if i.created_at else i.created_at,
'region': i.region,
'request_datacap': i.request_datacap,
'status': i.status,
'allocated_datacap': i.allocated_datacap,
'msg_cid': msg_cid,
'url': interface.get_req_url(i.comments_url),
'height': get_height(msg_cid),
'name': i.name,
'media': i.media,
'github_url': get_github_url(comments_url),
'issue_id': get_api_issue_id(comments_url),
'notary': get_notary_by_github_account(assignee),
})
return format_return(0, data={"objs": data_list, "total_page": obj.get('total_page'),
"total_count": obj.get('total_count')})
|
6eac819ab78afa6e7df00be8e47b87344a129abc
| 3,643,863
|
def extendCorrespondingAtomsDictionary(names, str1, str2):
"""
extends the pairs based on list1 & list2
"""
list1 = str1.split()
list2 = str2.split()
for i in range(1, len(list1)):
names[list1[0]][list2[0]].append([list1[i], list2[i]])
names[list2[0]][list1[0]].append([list2[i], list1[i]])
return None
|
cb586be8dcf7a21af556b332cfedbdce0be6882a
| 3,643,864
|
def _device_name(data):
"""Return name of device tracker."""
if ATTR_BEACON_ID in data:
return "{}_{}".format(BEACON_DEV_PREFIX, data['name'])
return data['device']
|
7a3dd5765d12c7f1b78c87c6188d3afefd4228ee
| 3,643,865
|
def get_share_path(
storage_server: StorageServer, storage_index: bytes, sharenum: int
) -> FilePath:
"""
Get the path to the given storage server's storage for the given share.
"""
return (
FilePath(storage_server.sharedir)
.preauthChild(storage_index_to_dir(storage_index))
.child("{}".format(sharenum))
)
|
e37566e0cb09bf6c490e6e0faf024cedf91c4576
| 3,643,866
|
import torch
def focal_loss_with_prob(prob,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""A variant of Focal Loss used in TOOD."""
target_one_hot = prob.new_zeros(len(prob), len(prob[0]) + 1)
target_one_hot = target_one_hot.scatter_(1, target.unsqueeze(1), 1)[:, :-1]
flatten_alpha = torch.empty_like(prob).fill_(1 - alpha)
flatten_alpha[target_one_hot == 1] = alpha
pt = torch.where(target_one_hot == 1, prob, 1 - prob)
ce_loss = F.binary_cross_entropy(prob, target_one_hot, reduction='none')
loss = flatten_alpha * torch.pow(1 - pt, gamma) * ce_loss
if weight is not None:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
|
0c730a1eef5487d3ce5b79c06fda5d8a0e8542a7
| 3,643,867
|
def root_key_from_seed(seed):
"""This derives your master key the given seed.
Implemented in ripple-lib as ``Seed.prototype.get_key``, and further
is described here:
https://ripple.com/wiki/Account_Family#Root_Key_.28GenerateRootDeterministicKey.29
"""
seq = 0
while True:
private_gen = from_bytes(first_half_of_sha512(
b''.join([seed, to_bytes(seq, 4)])))
seq += 1
if curves.SECP256k1.order >= private_gen:
break
public_gen = curves.SECP256k1.generator * private_gen
# Now that we have the private and public generators, we apparently
# have to calculate a secret from them that can be used as a ECDSA
# signing key.
secret = i = 0
public_gen_compressed = ecc_point_to_bytes_compressed(public_gen)
while True:
secret = from_bytes(first_half_of_sha512(
b"".join([
public_gen_compressed, to_bytes(0, 4), to_bytes(i, 4)])))
i += 1
if curves.SECP256k1.order >= secret:
break
secret = (secret + private_gen) % curves.SECP256k1.order
# The ECDSA signing key object will, given this secret, then expose
# the actual private and public key we are supposed to work with.
key = SigningKey.from_secret_exponent(secret, curves.SECP256k1)
# Attach the generators as supplemental data
key.private_gen = private_gen
key.public_gen = public_gen
return key
|
b93cfa8c31ab061f6496f8e12f5c3d7ba5f0d7a7
| 3,643,868
|
import os
import logging
def main(
path_experiment,
path_table,
path_dataset,
path_output,
path_reference=None,
path_comp_bm=None,
min_landmarks=1.,
details=True,
allow_inverse=False,
):
""" main entry point
:param str path_experiment: path to experiment folder
:param str path_table: path to assignment file (requested registration pairs)
:param str path_dataset: path to provided landmarks
:param str path_output: path to generated results
:param str|None path_reference: path to the complete landmark collection,
if None use dataset folder
:param str|None path_comp_bm: path to reference comp. benchmark
:param int nb_workers: number of parallel processes
:param float min_landmarks: required number of submitted landmarks in range (0, 1),
match values in COL_PAIRED_LANDMARKS
:param bool details: exporting case details
:param bool allow_inverse: allow evaluate also inverse transformation,
warped landmarks from ref to move image
"""
path_results = os.path.join(path_experiment, ImRegBenchmark.NAME_CSV_REGISTRATION_PAIRS)
if not os.path.isfile(path_results):
raise AttributeError('Missing experiments results: %s' % path_results)
path_reference = path_dataset if not path_reference else path_reference
# drop time column from Cover which should be empty
df_overview = pd.read_csv(path_table).drop([ImRegBenchmark.COL_TIME], axis=1, errors='ignore')
df_overview = _df_drop_unnamed(df_overview)
# drop Warp* column from Cover which should be empty
df_overview = df_overview.drop(
[col for col in df_overview.columns if 'warped' in col.lower()],
axis=1,
errors='ignore',
)
df_results = pd.read_csv(path_results)
df_results = _df_drop_unnamed(df_results)
# df_results.drop(filter(lambda c: 'Unnamed' in c, df_results.columns), axis=1, inplace=True)
cols_ = list(ImRegBenchmark.COVER_COLUMNS_WRAP) + [ImRegBenchmark.COL_TIME]
df_results = df_results[[col for col in cols_ if col in df_results.columns]]
df_experiments = pd.merge(df_overview, df_results, how='left', on=ImRegBenchmark.COVER_COLUMNS)
df_experiments = swap_inverse_experiment(df_experiments, allow_inverse)
# df_experiments.drop([ImRegBenchmark.COL_IMAGE_REF_WARP, ImRegBenchmark.COL_POINTS_REF_WARP],
# axis=1, errors='ignore', inplace=True)
df_experiments.drop(filter(lambda c: 'Unnamed' in c, df_results.columns), axis=1, inplace=True)
df_experiments = replicate_missing_warped_landmarks(df_experiments, path_dataset, path_experiment)
normalize_exec_time(df_experiments, path_experiment, path_comp_bm)
# logging.info('Filter used landmarks.')
# path_filtered = os.path.join(path_output, FOLDER_FILTER_DATASET)
# create_folder(path_filtered, ok_existing=True)
# _filter_lnds = partial(filter_export_landmarks, path_output=path_filtered,
# path_dataset=path_dataset, path_reference=path_reference)
# for idx, ratio in iterate_mproc_map(_filter_lnds, df_experiments.iterrows(),
# desc='Filtering', nb_workers=nb_workers):
# df_experiments.loc[idx, COL_PAIRED_LANDMARKS] = np.round(ratio, 2)
logging.info('Compute landmarks statistic.')
_compute_lnds_stat = partial(
ImRegBenchmark.compute_registration_statistic,
df_experiments=df_experiments,
path_dataset=path_dataset,
path_experiment=path_experiment,
path_reference=path_reference,
)
# NOTE: this has to run in SINGLE thread so there is SINGLE table instance
list(iterate_mproc_map(_compute_lnds_stat, df_experiments.iterrows(), desc='Statistic', nb_workers=1))
name_results, _ = os.path.splitext(os.path.basename(path_results))
path_results = os.path.join(path_output, name_results + '_NEW.csv')
logging.debug('exporting CSV results: %s', path_results)
df_experiments.to_csv(path_results)
path_json = export_summary_json(df_experiments, path_experiment, path_output, min_landmarks, details)
return path_json
|
1f168d176c026e7dc8d9b78c09d812b4b1dabfc2
| 3,643,869
|
def fake_login(request):
"""Contrived version of a login form."""
if getattr(request, 'limited', False):
raise RateLimitError
if request.method == 'POST':
password = request.POST.get('password', 'fail')
if password is not 'correct':
return False
return True
|
41b2621b38a302837c9f8ab1fafa0a4f45ca2c26
| 3,643,870
|
def split_to_sentences(data):
"""
Split data by linebreak "\n"
Args:
data: str
Returns:
A list of sentences
"""
sentences = data.split('\n')
# Additional clearning (This part is already implemented)
# - Remove leading and trailing spaces from each sentence
# - Drop sentences if they are empty strings.
sentences = [s.strip() for s in sentences]
sentences = [s for s in sentences if len(s) > 0]
return sentences
|
56540da88e982615e3874ab9f6fd22229a076565
| 3,643,871
|
def read_config_file(fp: str, mode='r', encoding='utf8', prefix='#') -> dict:
"""
读取文本文件,忽略空行,忽略prefix开头的行,返回字典
:param fp: 配置文件路径
:param mode:
:param encoding:
:param prefix:
:return:
"""
with open(fp, mode, encoding=encoding) as f:
ll = f.readlines()
ll = [i for i in ll if all([i.strip(), i.startswith(prefix) == False])]
params = {i.split('=')[0].strip(): i.split('=')[1].strip() for i in ll}
print(params)
return params
|
94e6130de22b05ca9dd6855206ec748e63dad8ad
| 3,643,872
|
def PrepareForMakeGridData(
allowed_results, starred_iid_set, x_attr,
grid_col_values, y_attr, grid_row_values, users_by_id, all_label_values,
config, related_issues, hotlist_context_dict=None):
"""Return all data needed for EZT to render the body of the grid view."""
def IssueViewFactory(issue):
return template_helpers.EZTItem(
summary=issue.summary, local_id=issue.local_id, issue_id=issue.issue_id,
status=issue.status or issue.derived_status, starred=None, data_idx=0,
project_name=issue.project_name)
grid_data = MakeGridData(
allowed_results, x_attr, grid_col_values, y_attr, grid_row_values,
users_by_id, IssueViewFactory, all_label_values, config, related_issues,
hotlist_context_dict=hotlist_context_dict)
issue_dict = {issue.issue_id: issue for issue in allowed_results}
for grid_row in grid_data:
for grid_cell in grid_row.cells_in_row:
for tile in grid_cell.tiles:
if tile.issue_id in starred_iid_set:
tile.starred = ezt.boolean(True)
issue = issue_dict[tile.issue_id]
tile.issue_url = tracker_helpers.FormatRelativeIssueURL(
issue.project_name, urls.ISSUE_DETAIL, id=tile.local_id)
tile.issue_ref = issue.project_name + ':' + str(tile.local_id)
return grid_data
|
a8e8a70f56001398e75f1ab2e82c8e995e164203
| 3,643,873
|
def custom_address_validator(value, context):
"""
Address not required at all for this example,
skip default (required) validation.
"""
return value
|
06ec3af3b6103c06be5fc9cf30d1af28bd072193
| 3,643,874
|
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, backoff=0, debug=False):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn using the new single line for left and right lane line method.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
masked_lines = np.zeros(img.shape, dtype=np.uint8)
lane_info = draw_lines(masked_lines, lines, backoff=backoff, debug=debug)
return masked_lines, lane_info
|
7364ed62105d6e7475c658f7831fc3f9385b2b84
| 3,643,875
|
from typing import Tuple
def get_model(args) -> Tuple:
"""Choose the type of VQC to train. The normal vqc takes the latent space
data produced by a chosen auto-encoder. The hybrid vqc takes the same
data that an auto-encoder would take, since it has an encoder or a full
auto-encoder attached to it.
Args:
args: Dictionary of hyperparameters for the vqc.
Returns:
An instance of the vqc object with the given specifications (hyperparams).
"""
qdevice = get_qdevice(
args["run_type"],
wires=args["nqubits"],
backend_name=args["backend_name"],
config=args["config"],
)
if args["hybrid"]:
vqc_hybrid = VQCHybrid(qdevice, device="cpu", hpars=args)
return vqc_hybrid
vqc = VQC(qdevice, args)
return vqc
|
fb50a114efdd1f4f358edf2906aad861688056de
| 3,643,876
|
def tail_ratio(returns):
"""
Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
tail ratio
"""
return ep.tail_ratio(returns)
|
620fa7b5f5887f80b3fd56e2fb24077cbc3dcf86
| 3,643,877
|
def get_trajectory_for_weight(simulation_object, weight):
"""
:param weight:
:return:
"""
print(simulation_object.name+" - get trajectory for w=", weight)
controls, features, _ = simulation_object.find_optimal_path(weight)
weight = list(weight)
features = list(features)
return {"w": weight, "phi": features, "controls": controls}
|
e68827fc3631d4467ae1eb82b3c319a4e45d6a9b
| 3,643,878
|
def UnNT(X, Z, N, T, sampling_type):
"""Computes reshuffled block-wise complete U-statistic."""
return np.mean([UnN(X, Z, N, sampling_type=sampling_type)
for _ in range(T)])
|
e250de27fc9bfcd2244269630591ab8f925b29af
| 3,643,879
|
def boolean_matrix_of_image(image_mat, cutoff=0.5):
"""
Make a bool matrix from the input image_mat
:param image_mat: a 2d or 3d matrix of ints or floats
:param cutoff: The threshold to use to make the image pure black and white. Is applied to the max-normalized matrix.
:return:
"""
if not isinstance(image_mat, np.ndarray):
image_mat = np.array(image_mat)
if image_mat.ndim == 3:
image_mat = image_mat.sum(axis=2)
elif image_mat.ndim > 3 or image_mat.ndim == 1:
raise ValueError("The image_mat needs to have 2 or 3 dimensions")
if image_mat.dtype != np.dtype('bool'):
image_mat = image_mat.astype('float')
image_mat = image_mat / image_mat.max()
image_mat = image_mat > cutoff
return image_mat
|
3b23c946709cde552a8c2c2e2bee0a3c91107e85
| 3,643,880
|
import torch
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
if mask is not None:
mask = mask.unsqueeze_(2)
inputs = torch.matmul(inputs, mask)
if pooling_type == "MAX":
output, indices = torch.max(inputs, 1, keepdim=False, out=None)
elif pooling_type == "AVR":
if mask is not None:
output = torch.sum(inputs, 1, keepdim=False, dtype=None)
num_elems = torch.sum(mask, 1, keepdim=True)
output = torch.div(output, torch.max(num_elems, 1))
else:
output = torch.mean(inputs, axis=1)
return output
|
a8c7d51c76efaaae64a8725ae9296894fdc9b933
| 3,643,881
|
def _monte_carlo_trajectory_sampler(
time_horizon: int = None,
env: DynamicalSystem = None,
policy: BasePolicy = None,
state: np.ndarray = None,
):
"""Monte-Carlo trajectory sampler.
Args:
env: The system to sample from.
policy: The policy applied to the system during sampling.
sample_space: The space where initial conditions are drawn from.
Returns:
A generator function that yields system observations as tuples.
"""
@sample_generator
def _sample_generator():
state_sequence = []
state_sequence.append(state)
env.state = state
time = 0
for t in range(time_horizon):
action = policy(time=time, state=env.state)
next_state, cost, done, _ = env.step(time=t, action=action)
state_sequence.append(next_state)
time += 1
yield state_sequence
return _sample_generator
|
9107289e89a37bd29bc96d2d549b74f15d3008e0
| 3,643,882
|
def pi_mult(diff: float) -> int:
"""
Функция, вычисляющая множитель, на который нужно домножить 2 pi, чтобы компенсировать разрыв фазы
:param diff: разность фазы в двух ячейках матрицы
:return : целое число
"""
return int(0.5 * (diff / pi + 1)) if diff > 0 else int(0.5 * (diff / pi - 1))
|
041c4740fba4b9983ec927d3fb3d8f5421e4919c
| 3,643,883
|
import warnings
def get_integer(val=None, name="value", min_value=0, default_value=0):
"""Returns integer value from input, with basic validation
Parameters
----------
val : `float` or None, default None
Value to convert to integer.
name : `str`, default "value"
What the value represents.
min_value : `float`, default 0
Minimum allowed value.
default_value : `float` , default 0
Value to be used if ``val`` is None.
Returns
-------
val : `int`
Value parsed as an integer.
"""
if val is None:
val = default_value
try:
orig = val
val = int(val)
except ValueError:
raise ValueError(f"{name} must be an integer")
else:
if val != orig:
warnings.warn(f"{name} converted to integer {val} from {orig}")
if not val >= min_value:
raise ValueError(f"{name} must be >= {min_value}")
return val
|
9c967a415eaac58a4a4778239859d1f6d0a87820
| 3,643,884
|
import os
import random
def bb_moments_raincloud(region_idx=None, parcellation='aparc', title=''):
"""Stratify regional data according to BigBrain statistical moments (authors: @caseypaquola, @saratheriver)
Parameters
----------
region_idx : ndarray, shape = (n_val,)
Indices of regions to be included in analysis.
parcellation : string, optional
Name of parcellation. Options are 'aparc', 'schaefer_100', 'schaefer_200', 'schaefer_300',
'schaefer_400', 'glasser_360'. Default is 'aparc'.
title : string, optional
Title of raincloud plot. Default is empty.
Returns
-------
figure
Raincloud plot.
"""
def prctile(x, p):
"""Matlab-like percentile function (author: someone from the internet)"""
p = np.asarray(p, dtype=float)
n = len(x)
p = (p - 50) * n / (n - 1) + 50
p = np.clip(p, 0, 100)
return np.percentile(x, p)
# Load BigBrain statistical moments (mean, skewness)
bb_pth = os.path.dirname(os.path.dirname(__file__)) + '/histology/bb_moments_' + parcellation + '.csv'
bb_moments_aparc = np.loadtxt(bb_pth, delimiter=',', dtype=float)
# Initiate figure and axes
fig, axs = plt.subplots(1, 1, figsize=(15, 5))
axs2 = [axs.twinx(), axs.twinx()]
# Plot first moment at the top
inv = [(ii + 1) * 2 for ii in reversed(range(bb_moments_aparc.shape[0]))]
# Moments colors
spec = ['#9e0142', '#66c2a5']
# Loop over BigBrain moments
for ii in range(bb_moments_aparc.shape[0]):
# for ii in range(1):
jj = inv[ii]
# Random numbers to scatter points
rando = [(random.random() * .3) + (jj - 0.15) for rr in range(bb_moments_aparc[ii, region_idx].shape[1])]
# Scatter plot
axs2[ii].scatter(bb_moments_aparc[ii, region_idx], rando, c=spec[ii], alpha=0.88,
linewidth=0.88, edgecolors='w', s=122)
# Density distribution
data = sns.distplot(bb_moments_aparc[ii, region_idx], hist=False, kde=True, ax=axs2[ii]).get_lines()[0].get_data()
axs2[ii].fill_between(data[0], (jj + 0.3), data[1] + (jj + 0.3), facecolor=spec[ii])
# In-house box plot
qr = prctile(bb_moments_aparc[ii, region_idx].flatten(), [25, 75])
rect = pat.FancyBboxPatch((qr[0] + 0.01, jj - 0.1), qr[1] - qr[0] - 0.02, 0.2, fc=spec[ii], alpha=0.41,
ec=None, boxstyle="round,pad=0.01")
rectout = pat.FancyBboxPatch((qr[0] + 0.01, jj - 0.1), qr[1] - qr[0] - 0.02, 0.2, alpha=.88,
ec='k', boxstyle="round,pad=0.01", fill=False, lw=1.5)
axs2[ii].add_patch(rect)
axs2[ii].add_patch(rectout)
# Median line
axs2[ii].plot([np.median(bb_moments_aparc[ii, region_idx]), np.median(bb_moments_aparc[ii, region_idx])],
[jj - 0.1, jj + 0.1], lw=3, color='k')
# Detect outliers, and if any, excluse them from the whiskers
mad = 3 * median_absolute_deviation(bb_moments_aparc[ii, region_idx], axis=1)
if np.argwhere(np.abs(bb_moments_aparc[ii, region_idx]) > mad).shape[0] == 0:
mini = np.nanmin(bb_moments_aparc[ii, region_idx])
maxi = np.nanmax(bb_moments_aparc[ii, region_idx])
else:
mat = np.abs(bb_moments_aparc[ii, region_idx])
np.where(np.abs(mat) > mad, np.nan, mat)
mini = np.nanmin(mat)
maxi = np.nanmax(mat)
axs2[ii].plot([mini, qr[0]], [jj, jj], lw=1.5, color='k')
axs2[ii].plot([qr[1], maxi], [jj, jj], lw=1.5, color='k')
# Figure axes and other things to prettify
axs2[ii].set_ylim([1.5, 5.5])
axs2[ii].set_xlim([-1.6, 1.6])
fig.tight_layout()
sns.despine(fig=fig, ax=axs2[ii])
axs2[ii].axes.get_yaxis().set_ticks([])
axs2[ii].set_ylabel('')
axs.set_ylim([1.5, 5.5])
axs.tick_params(axis='y', length=0, rotation=90, labelsize=16)
axs.tick_params(axis='x', length=0, labelsize=16)
axs.set_yticks((2.75, 4.75))
axs.set_yticklabels(('Skewness', 'Mean'))
# Add title
if title:
plt.title(title)
return fig, axs, axs2
|
d70e45b9b94c4c496485bf46a7dfadbbfab860bb
| 3,643,885
|
def release(cohesin, occupied, args):
"""
AN opposite to capture - releasing cohesins from CTCF
"""
if not cohesin.any("CTCF"):
return cohesin # no CTCF: no release necessary
# attempting to release either side
for side in [-1, 1]:
if (np.random.random() < args["ctcfRelease"][side].get(cohesin[side].pos, 0)) and (cohesin[side].attrs["CTCF"]):
cohesin[side].attrs["CTCF"] = False
return cohesin
|
89d0d1446f1c5ee45a8e190dff76b91ea59a3bcf
| 3,643,886
|
def cosine(u, v):
"""
d = cosine(u, v)
Computes the Cosine distance between two n-vectors u and v,
(1-uv^T)/(||u||_2 * ||v||_2).
"""
u = np.asarray(u)
v = np.asarray(v)
return (1.0 - (np.dot(u, v.T) / \
(np.sqrt(np.dot(u, u.T)) * np.sqrt(np.dot(v, v.T)))))
|
139b38f674bc19e50bf37714b3593e7f055c5b7f
| 3,643,887
|
import zlib
import json
def get_data_from_redis_key(
label=None,
client=None,
host=None,
port=None,
password=None,
db=None,
key=None,
expire=None,
decompress_df=False,
serializer='json',
encoding='utf-8'):
"""get_data_from_redis_key
:param label: log tracking label
:param client: initialized redis client
:param host: not used yet - redis host
:param port: not used yet - redis port
:param password: not used yet - redis password
:param db: not used yet - redis db
:param key: not used yet - redis key
:param expire: not used yet - redis expire
:param decompress_df: used for decompressing
``pandas.DataFrame`` automatically
:param serializer: not used yet - support for future
pickle objects in redis
:param encoding: format of the encoded key in redis
"""
decoded_data = None
data = None
rec = {
'data': data
}
res = build_result.build_result(
status=ae_consts.NOT_RUN,
err=None,
rec=rec)
log_id = label if label else 'get-data'
try:
use_client = client
if not use_client:
log.debug(
f'{log_id} - get key={key} new '
f'client={host}:{port}@{db}')
use_client = redis.Redis(
host=host,
port=port,
password=password,
db=db)
else:
log.debug(f'{log_id} - get key={key} client')
# create Redis client if not set
# https://redis-py.readthedocs.io/en/latest/index.html#redis.StrictRedis.get # noqa
raw_data = use_client.get(
name=key)
if raw_data:
if decompress_df:
try:
data = zlib.decompress(
raw_data).decode(
encoding)
rec['data'] = json.loads(data)
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
except Exception as f:
if (
'while decompressing data: '
'incorrect header check') in str(f):
data = None
log.critical(
f'unable to decompress_df in redis_key={key} '
f'ex={f}')
else:
log.error(
f'failed decompress_df in redis_key={key} '
f'ex={f}')
raise f
# allow decompression failure to fallback to previous method
if not data:
log.debug(f'{log_id} - decoding key={key} encoding={encoding}')
decoded_data = raw_data.decode(encoding)
log.debug(
f'{log_id} - deserial key={key} serializer={serializer}')
if serializer == 'json':
data = json.loads(decoded_data)
elif serializer == 'df':
data = decoded_data
else:
data = decoded_data
if data:
if ae_consts.ev('DEBUG_REDIS', '0') == '1':
log.info(
f'{log_id} - found key={key} '
f'data={ae_consts.ppj(data)}')
else:
log.debug(f'{log_id} - found key={key}')
# log snippet - if data
rec['data'] = data
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
else:
log.debug(f'{log_id} - no data key={key}')
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
except Exception as e:
err = (
f'{log_id} failed - redis get from decoded={decoded_data} '
f'data={data} key={key} ex={e}')
log.error(err)
res = build_result.build_result(
status=ae_consts.ERR,
err=err,
rec=rec)
# end of try/ex for getting redis data
return res
|
f294e24258700c1da6eb5e6deee1a891484b8791
| 3,643,888
|
from typing import Iterator
from typing import Tuple
from typing import Any
def _train_model(
train_iter: Iterator[DataBatch],
test_iter: Iterator[DataBatch],
model_type: str,
num_train_iterations: int = 10000,
learning_rate: float = 1e-5
) -> Tuple[Tuple[Any, Any], Tuple[onp.ndarray, onp.ndarray]]:
"""Train a model and return weights and train/test loss."""
batch = next(train_iter)
key = jax.random.PRNGKey(0)
loss_fns = _loss_fns_for_model_type(model_type)
p, s = loss_fns.init(key, batch["feats"], batch["time"])
opt = opt_base.Adam(learning_rate=learning_rate)
opt_state = opt.init(p, s)
@jax.jit
def update(opt_state, key, feats, times):
key, key1 = jax.random.split(key)
p, s = opt.get_params_state(opt_state)
value_and_grad_fn = jax.value_and_grad(loss_fns.apply, has_aux=True)
(loss, s), g = value_and_grad_fn(p, s, key1, feats, times)
next_opt_state = opt.update(opt_state, g, loss=loss, model_state=s, key=key)
return next_opt_state, key, loss
train_loss = []
test_loss = []
for i in range(num_train_iterations):
batch = next(train_iter)
opt_state, key, unused_loss = update(opt_state, key, batch["feats"],
batch["time"])
if (i < 100 and i % 10 == 0) or i % 100 == 0:
p, s = opt.get_params_state(opt_state)
train_loss.append(
onp.asarray(eval_many(p, s, key, train_iter, model_type=model_type)))
test_loss.append(
onp.asarray(eval_many(p, s, key, test_iter, model_type=model_type)))
print(i, train_loss[-1], test_loss[-1])
return (p, s), (onp.asarray(train_loss), onp.asarray(test_loss))
|
46043beaf170f164f13e91fec3a30d024ede6dc8
| 3,643,889
|
from typing import Sequence
import os
import time
def main(_args: Sequence[str]) -> int:
"""Main program."""
config = create_configuration()
generator = create_generator(config)
while True:
if os.path.exists(config.trigger_stop_file):
warning("Stopping due to existence of stop trigger file.")
return 0
debug('Generating new discovery map.')
res = generator.update_discovery_map()
if res != 0:
warning("Envoy configuration generator returned {code}", code=res)
if config.exit_on_generation_failure:
warning("Stopping due to exit-on-failure.")
return res
time.sleep(config.failure_sleep)
else:
time.sleep(config.refresh_time)
|
17d7135dcf1032308c81d184cec7dd0d1b78e36a
| 3,643,890
|
def swig_base_TRGBPixel_getMin():
"""swig_base_TRGBPixel_getMin() -> CRGBPixel"""
return _Core.swig_base_TRGBPixel_getMin()
|
454de4b9f3014b950ebe609ab80d15f0c71cd175
| 3,643,891
|
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
|
c2c26191824edfe3d31ed5b0f321022f5bac85a5
| 3,643,892
|
from typing import TextIO
import json
def load_wavefunction(file: TextIO) -> Wavefunction:
"""Load a qubit wavefunction from a file.
Args:
file (str or file-like object): the name of the file, or a file-like object.
Returns:
wavefunction (pyquil.wavefunction.Wavefunction): the wavefunction object
"""
if isinstance(file, str):
with open(file, 'r') as f:
data = json.load(f)
else:
data = json.load(file)
wavefunction = Wavefunction(convert_dict_to_array(data['amplitudes']))
return wavefunction
|
23b38e0739f655e5625775c80baa81874b48d45f
| 3,643,893
|
import requests
def delete_alias(request, DOMAIN, ID):
"""
Delete Alias based on ID
ENDPOINT : /api/v1/alias/:domain/:id
"""
FORWARD_EMAIL_ENDPOINT = f"https://api.forwardemail.net/v1/domains/{DOMAIN}/aliases/{ID}"
res = requests.delete(FORWARD_EMAIL_ENDPOINT, auth=(USERNAME, ''))
if res.status_code == 200:
print("Deleted")
return JsonResponse(res.json())
|
ca59eccef303461b3be562c6167753959ad3eb67
| 3,643,894
|
import warnings
def _generate_input_weights(
N,
dim_input,
dist="custom_bernoulli",
connectivity=1.0,
dtype=global_dtype,
sparsity_type="csr",
seed=None,
input_bias=False,
**kwargs,
):
"""Generate input or feedback weights for a reservoir.
Weights are drawn by default from a discrete Bernoulli random variable,
i.e. are always equal to 1 or -1. Then, they can be rescaled to a specific constant
using the `input_scaling` parameter.
Warning
-------
This function is deprecated since version v0.3.1 and will be removed in future
versions. Please consider using :py:func:`bernoulli` or :py:func:`random_sparse`
instead.
Parameters
----------
N: int
Number of units in the connected reservoir.
dim_input: int
Dimension of the inputs connected to the reservoir.
dist: str, default to "norm"
A distribution name from :py:mod:`scipy.stats` module, such as "norm" or
"uniform". Parameters like `loc` and `scale` can be passed to the distribution
functions as keyword arguments to this function. Usual distributions for
internal weights are :py:class:`scipy.stats.norm` with parameters `loc` and
`scale` to obtain weights following the standard normal distribution,
or :py:class:`scipy.stats.uniform` with parameters `loc=-1` and `scale=2`
to obtain weights uniformly distributed between -1 and 1.
Can also have the value "custom_bernoulli". In that case, weights will be drawn
from a Bernoulli discrete random variable alternating between -1 and 1 and
drawing 1 with a probability `p` (default `p` parameter to 0.5).
connectivity: float, default to 0.1
Also called density of the sparse matrix.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
input_bias: bool, optional
'input_bias' parameter is deprecated. Bias should be initialized
separately from the input matrix.
If True, will add a row to the matrix to take into
account a constant bias added to the input.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
**kwargs : optional
Arguments for the scipy.stats distribution.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
warnings.warn(
"'generate_input_weights' is deprecated since v0.3.1 and will be removed in "
"future versions. Consider using 'normal', 'uniform' or 'random_sparse'.",
DeprecationWarning,
)
if input_bias:
warnings.warn(
"'input_bias' parameter is deprecated. Bias should be initialized "
"separately from the input matrix.",
DeprecationWarning,
)
dim_input += 1
return _random_sparse(
N,
dim_input,
connectivity=connectivity,
dtype=dtype,
dist=dist,
sparsity_type=sparsity_type,
seed=seed,
**kwargs,
)
|
4201fda2f693d0ee0f189e94762de09877059b08
| 3,643,895
|
import re
def _get_variable_name(param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
4f6258667383c80b584054af20ac9a61cf25381f
| 3,643,896
|
def np_gather(params, indices, axis=0, batch_dims=0):
"""numpy gather"""
if batch_dims == 0:
return gather(params, indices)
result = []
if batch_dims == 1:
for p, i in zip(params, indices):
axis = axis - batch_dims if axis - batch_dims > 0 else 0
r = gather(p, i, axis=axis)
result.append(r)
return np.stack(result)
for p, i in zip(params[0], indices[0]):
r = gather(p, i, axis=axis)
result.append(r)
res = np.stack(result)
return res.reshape((1,) + res.shape)
|
9dc89cb6e48a6c8126fbee1421a4d7058f35b9e0
| 3,643,897
|
def texture(data):
"""Compute the texture of data.
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`). NaN values in the original array have
NaN textures.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns
------
texture : :class:`numpy:numpy.ndarray`
array of textures with the same shape as data
"""
# one-element wrap-around padding
x = np.pad(data, 1, mode='wrap')
# set first and last range elements to NaN
x[:, 0] = np.nan
x[:, -1] = np.nan
# get neighbours using views into padded array
x1 = x[..., :-2, 1:-1] # center:2
x2 = x[..., 1:-1, :-2] # 4
x3 = x[..., 2:, 1:-1] # 8
x4 = x[..., 1:-1, 2:] # 6
x5 = x[..., :-2, :-2] # 1
x6 = x[..., :-2, 2:] # 3
x7 = x[..., 2:, 2:] # 9
x8 = x[..., 2:, :-2] # 7
# stack arrays
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighbouring pixels
xa_valid_count = np.count_nonzero(~np.isnan(xa), axis=0)
# root mean of squared differences
rmsd = np.sqrt(np.nansum((xa - data) ** 2, axis=0) / xa_valid_count)
# reinforce that NaN values should have NaN textures
rmsd[np.isnan(data)] = np.nan
return rmsd
|
e1a57e9e37a1730de5c4e919ba6fa65eaf301c79
| 3,643,898
|
from typing import Tuple
def joos_2013_monte_carlo(
runs: int = 100, t_horizon: int = 1001, **kwargs
) -> Tuple[pd.DataFrame, np.ndarray]:
"""Runs a monte carlo simulation for the Joos_2013 baseline IRF curve.
This function uses uncertainty parameters for the Joos_2013 curve calculated by
Olivie and Peters (2013): https://esd.copernicus.org/articles/4/267/2013/
Parameters
----------
runs : int
Number of runs for Monte Carlo simulation. Must be >1.
t_horizon : int
Length of the time horizon over which baseline curve is
calculated (years)
Returns
-------
summary : pd.DataFrame
Dataframe with 'mean', '+sigma', and '-sigma' columns summarizing
results of Monte Carlo simulation.
results : np.ndarray
Results from all Monte Carlo runs.
"""
if runs <= 1:
raise ValueError('number of runs must be >1')
results = np.zeros((t_horizon, runs))
# Monte Carlo simulations
# sigma and x are from Olivie and Peters (2013) Table 5 (J13 values)
# They are the covariance and mean arrays for CO2 IRF uncertainty
sigma = np.array(
[
[0.129, -0.058, 0.017, -0.042, -0.004, -0.009],
[-0.058, 0.167, -0.109, 0.072, -0.015, 0.003],
[0.017, -0.109, 0.148, -0.043, 0.013, -0.013],
[-0.042, 0.072, -0.043, 0.090, 0.009, 0.006],
[-0.004, -0.015, 0.013, 0.009, 0.082, 0.013],
[-0.009, 0.003, -0.013, 0.006, 0.013, 0.046],
]
)
x = np.array([5.479, 2.913, 0.496, 0.181, 0.401, -0.472])
p_samples = multivariate_normal.rvs(x, sigma, runs)
p_df = pd.DataFrame(p_samples, columns=['t1', 't2', 't3', 'b1', 'b2', 'b3'])
p_exp = np.exp(p_df)
a1 = p_exp['b1'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
a2 = p_exp['b2'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
a3 = p_exp['b3'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
tau1 = p_exp['t1']
tau2 = p_exp['t2']
tau3 = p_exp['t3']
for count in np.arange(runs):
co2_kwargs = {
'a1': a1[count],
'a2': a2[count],
'a3': a3[count],
'tau1': tau1[count],
'tau2': tau2[count],
'tau3': tau3[count],
}
irf = joos_2013(t_horizon, **co2_kwargs)
results[:, count] = irf
summary = pd.DataFrame(columns=['mean', '-2sigma', '+2sigma', '5th', '95th'])
summary['mean'] = np.mean(results, axis=1)
summary['+2sigma'] = summary['mean'] + (1.96 * np.std(results, axis=1))
summary['-2sigma'] = summary['mean'] - (1.96 * np.std(results, axis=1))
summary['5th'] = np.percentile(results, 5, axis=1)
summary['95th'] = np.percentile(results, 95, axis=1)
return summary, results
|
3fd791eae464bd1c73fcbf3fa16c7e8634dd6f80
| 3,643,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.