content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Dict
def get_type_specs_from_feature_specs(
feature_specs: Dict[str, common_types.FeatureSpecType]
) -> Dict[str, tf.TypeSpec]:
"""Returns `tf.TensorSpec`/`tf.SparseTensorSpec`s for the given feature spec.
Returns a dictionary of type_spec with the same type and shape as defined by
`feature_specs`.
Args:
feature_specs: A TensorFlow feature spec.
Returns:
A dictionary from strings to `tf.TensorSpec` or `tf.SparseTensorSpec`s.
Raises:
ValueError: If the feature spec contains feature types not supported.
"""
result = {}
for name, feature_spec in feature_specs.items():
if isinstance(feature_spec, tf.io.FixedLenFeature):
result[name] = tf.TensorSpec([None] + list(feature_spec.shape),
feature_spec.dtype)
elif isinstance(feature_spec, tf.io.VarLenFeature):
result[name] = tf.SparseTensorSpec([None, None], feature_spec.dtype)
elif isinstance(feature_spec, tf.io.SparseFeature):
# `TensorsToRecordBatchConverter` ignores `SparseFeature`s since arbitrary
# `SparseTensor`s are not yet supported. They are handled in
# `convert_to_arrow`.
# TODO(b/181868576): Handle `SparseFeature`s by the converter once the
# support is implemented.
pass
elif common_types.is_ragged_feature(feature_spec):
# Number of dimensions is number of partitions + 1 + 1 batch dimension.
shape = [None, None]
for partition in feature_spec.partitions:
if isinstance(partition, tf.io.RaggedFeature.UniformRowLength): # pytype: disable=attribute-error
shape.append(partition.length)
else:
shape.append(None)
result[name] = tf.RaggedTensorSpec(
shape=shape,
dtype=feature_spec.dtype,
row_splits_dtype=feature_spec.row_splits_dtype)
else:
raise ValueError('Invalid feature spec {}.'.format(feature_spec))
return result | 7ea1f204365380ffa48b98c5e5e7d31875a211c0 | 28,300 |
def read_fortran_namelist(fileobj):
"""Takes a fortran-namelist formatted file and returns appropriate
dictionaries, followed by lines of text that do not fit this
pattern.
"""
data = {}
extralines = []
indict = False
fileobj.seek(0)
for line in fileobj.readlines():
if indict and line.strip().startswith('/'):
indict = False
elif line.strip().startswith('&'):
indict = True
dictname = line.strip()[1:].lower()
data[dictname] = {}
elif (not indict) and (len(line.strip()) > 0):
extralines.append(line)
elif indict:
key, value = line.strip().split('=')
if value.endswith(','):
value = value[:-1]
value = value.strip()
try:
value = eval(value)
except SyntaxError:
value = {'.true.': True, '.false.': False}.get(value, value)
data[dictname][key.strip()] = value
return data, extralines | 3c3b96ca707c7f0492c2913c6b9496cb57fc969b | 28,301 |
from typing import Tuple
import re
def check_token(surface: str) -> Tuple[str, str]:
"""Adopted and modified from coltekin/childes-tr/misc/parse-chat.py
For a given surface form of the token, return (surface, clean), where
clean is the token form without CHAT codes.
"""
if surface is None:
return None, None
clean=''
if re.match(TO_OMIT, surface): # phonological forms are also omitted
return surface, clean
# remove unwanted markings to normalise token form
clean = surface.replace(' ', '')
clean = clean.replace('xxx', '') # unintelligible, 'wxxxord' --> 'word'
clean = clean.replace('(', '').replace(')', '')
clean = clean.replace('0', '') # 0token is omitted token
clean = clean.replace('‡', ',') # prefixed interactional marker
clean = clean.replace('„', ',') # suffixed interactional marker
# clean = clean.replace('_', ' ') # compound, uncomment to remove '_' in compounds
if "@" in clean:
clean = clean[:clean.index("@")] # drop any @endings
return surface, clean | c737c8acdce04597506e399a7d2fe0252634edc1 | 28,302 |
def remove_media_url(media_path):
"""
Strip leading MEDIA_URL from a media file url.
:param media_path:
:return:
"""
if media_path.startswith(MEDIA_URL):
return media_path[len(MEDIA_URL):]
else:
return media_path | 084773d30cc9c534a9347712058c581797a2b05b | 28,303 |
def _map_route_on_graph(ordered_cluster: sp.Cluster, graph: sp.Graph) -> list[sp.Segment]:
"""Построить маршрут в графе
Args:
ordered_cluster: Кластер с заданным порядком обхода точек
graph: Граф для прокладывания маршрута
Returns:
Построенный маршрут
"""
route = [] # Путь - список ребер графа
for i, start in enumerate(ordered_cluster):
finish = ordered_cluster[i + 1 if (i + 1) < len(ordered_cluster) else (i + 1 - len(ordered_cluster))]
route.extend(a_star.a_star(start, finish, graph))
return route | 1ca10abc6f9d88c08dbbbd63b48e62f7077c8a39 | 28,304 |
from typing import Tuple
from typing import Dict
def list_violation_data(client: Client, args) -> Tuple[str, Dict, Dict]:
"""List violation data.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
from_ = args.get('from')
to_ = args.get('to')
query = args.get('query')
violation_data = client.list_violation_data_request(from_, to_, query)
if violation_data.get('error'):
raise Exception(f'Failed to get violation data in the given time frame.\n'
f'Error from Securonix is: {violation_data.get("errorMessage")}')
violation_events = violation_data.get('events')
fields_to_include = ['Accountname', 'Baseeventid', 'Category', 'Categorybehavior', 'Categoryobject',
'Categoryseverity', 'Destinationaddress', 'Destinationntdomain', 'Destinationuserid',
'Gestinationusername', 'Deviceaddress', 'Deviceeventcategory', 'Deviceexternalid',
'Devicehostname', 'EventID', 'Eventoutcome', 'Eventtime', 'Generationtime', 'Invalid', 'JobID',
'Jobstarttime', 'Message', 'Policyname', 'Resourcename', 'Rg_id', 'Rg_name', 'Riskscore',
'Riskthreatname', 'Sessionid', 'Sourcehostname', 'Sourcentdomain', 'Sourceuserid',
'Sourceusername', 'Sourceuserprivileges', 'TenantID', 'Tenantname', 'Timeline',
'Createdate', 'Criticality', 'Datasourceid', 'Department', 'Division',
'EmployeeID', 'Encrypted', 'Firstname', 'Fullname', 'ID', 'LanID', 'Lastname',
'Lastsynctime', 'Masked', 'Mergeuniquecode', 'Riskscore', 'Skipencryption',
'Status', 'Timezoneoffset', 'Title', 'Uniquecode', 'UserID', 'Workemail',
'Violator']
violation_readable, violation_outputs = parse_data_arr(violation_events, fields_to_include=fields_to_include)
headers = ['EventID', 'Eventtime', 'Message', 'Policyname', 'Accountname']
human_readable = tableToMarkdown(name="Activity data:", t=violation_readable, headers=headers, removeNull=True)
entry_context = {f'Securonix.ViolationData(val.Uniquecode === obj.Uniquecode)': violation_outputs}
return human_readable, entry_context, violation_data | c7548b7a86bb63855ee5c9fc7ec602ffa39a608b | 28,305 |
def train_add_test(func=lambda a, b: a+b, results_dir=None, reg_weight=5e-2, learning_rate=1e-2, n_epochs=10001):
"""Addition of two MNIST digits with a symbolic regression network.
Withold sums > 15 for test data"""
tf.reset_default_graph()
# Symbolic regression network to combine the conv net outputs
PRIMITIVE_FUNCS = [
*[functions.Constant()] * 2,
*[functions.Identity()] * 4,
*[functions.Square()] * 4,
*[functions.Sin()] * 2,
*[functions.Exp()] * 2,
*[functions.Sigmoid()] * 2,
# *[functions.Product()] * 2,
]
sr_net = symbolic_network.SymbolicNet(2, funcs=PRIMITIVE_FUNCS) # Symbolic regression network
# Overall architecture
sym_digit_network = SymbolicDigit(sr_net=sr_net, normalize=normalize)
# Set up regularization term and training
penalty = regularization.l12_smooth(sr_net.get_weights())
epoch = tf.placeholder_with_default(0.0, [])
penalty = tf.sin(np.pi / n_epochs / 1.1 * epoch) ** 2 * regularization.l12_smooth(sr_net.get_weights())
penalty = reg_weight * penalty
sym_digit_network.set_training(reg=penalty)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # Take up variable amount of memory on GPU
sess = tf.Session(config=config)
batch = batch_generator(batch_size=100)
def train_fun(y):
return y < 15
def test_fun(y):
return np.logical_not(train_fun(y))
# Train, and restart training if loss goes to NaN
loss_i = np.nan
while np.isnan(loss_i):
sess.run(tf.global_variables_initializer())
loss_i = sym_digit_network.train(sess, n_epochs, batch, func, epoch, lr_val=learning_rate, train_fun=train_fun)
if np.isnan(loss_i):
continue
# Freezing weights
sr_net_masked = symbolic_network.MaskedSymbolicNet(sess, sr_net, threshold=0.01)
sym_digit_network = SymbolicDigitMasked(sym_digit_network, sr_net_masked, normalize=normalize)
sym_digit_network.set_training()
loss_i = sym_digit_network.train(sess, n_epochs, batch, func, lr_val=learning_rate/10, train_fun=train_fun)
# Print out human-readable equation (with regularization)
weights = sess.run(sr_net.get_weights())
expr = pretty_print.network(weights, PRIMITIVE_FUNCS, ["z1", "z2"])
expr = normalize(expr)
print(expr)
# Calculate accuracy on test dataset
acc_train, error_train = sym_digit_network.calc_accuracy(X_train, y_train, func, sess)
acc_train1, error_train1 = sym_digit_network.calc_accuracy(X_train, y_train, func, sess, filter_fun=train_fun)
acc_train2, error_train2 = sym_digit_network.calc_accuracy(X_train, y_train, func, sess, filter_fun=test_fun)
acc_test, error_test = sym_digit_network.calc_accuracy(X_test, y_test, func, sess)
acc_test1, error_test1 = sym_digit_network.calc_accuracy(X_test, y_test, func, sess, filter_fun=train_fun)
acc_test2, error_test2 = sym_digit_network.calc_accuracy(X_test, y_test, func, sess, filter_fun=test_fun)
result_str = "Train digits overall accuracy: %.3f\ttrain sum accuracy: %.3f\t test sum accuracy: %.3f\n" \
"Train digits overall error: %.3f\ttrain sum error: %.3f\t test sum error: %.3f\n" \
"Test digits overall accuracy: %.3f\ttrain sum accuracy: %.3f\t test sum accuracy: %.3f\n" \
"Test digits overall error: %.3f\ttrain sum error: %.3f\t test sum error: %.3f\n" % \
(acc_train, acc_train1, acc_train2, error_train, error_train1, error_train2,
acc_test, acc_test1, acc_test2, error_test, error_test1, error_test2)
print(result_str)
sym_digit_network.save_result(sess, results_dir, expr, result_str) | 9705cfb8cc8a321c16eb6f93dda1878e73e9328f | 28,306 |
def walk_graph(csr_matrix, labels, walk_length=40, num_walks=1, n_jobs=1):
"""Perform random walks on adjacency matrix.
Args:
csr_matrix: adjacency matrix.
labels: list of node labels where index align with CSR matrix
walk_length: maximum length of random walk (default=40)
num_walks: number of walks to do for each node
n_jobs: number of cores to use (default=1)
Returns:
np.ndarray: list of random walks
"""
normalized = normalize_csr_matrix(csr_matrix)
results = (Parallel(n_jobs=n_jobs, max_nbytes=None)
(delayed(walk_random, has_shareable_memory)
(normalized, labels, walk_length)
for _ in range(num_walks)))
walks, freqs = zip(*results)
random_walks = np.concatenate(walks)
word_freqs = np.sum(freqs, axis=0)
return random_walks, dict(zip(labels, word_freqs)) | 4a317aecbc88998469420575346c38da30f8bc90 | 28,307 |
def mutate_split(population, config):
"""
Splitting a non-zero dose (> 0.25Gy) into 2 doses.
population - next population, array [population_size, element_size].
"""
interval_in_indices = int(2 * config['time_interval_hours'])
mutation_config = config['mutations']['mutate_split']
min_dose = config['step_value']
max_dose = config['max_dose_value']
population = np.asarray(population)
for i, genome in enumerate(population):
if np.random.uniform() < mutation_config['mut_prob']:
non_zero_dose_indices = np.nonzero(genome)[0]
if non_zero_dose_indices.size:
gene_idx = np.random.choice(non_zero_dose_indices)
k = genome[gene_idx] / min_dose
split = np.random.randint(0, k)
d1 = split * min_dose
d2 = genome[gene_idx] - d1
for _ in range(len(population)):
new_gene_idx = np.random.randint(len(genome))
if genome[new_gene_idx] + d1 <= max_dose:
genome[new_gene_idx] = genome[new_gene_idx] + d1
break
for _ in range(len(population)):
new_gene_idx = np.random.randint(len(genome))
if genome[new_gene_idx] + d2 <= max_dose:
genome[new_gene_idx] = genome[new_gene_idx] + d2
break
genome[gene_idx] = 0
population[i] = refine_genome_around_cross_point_to_time_constraint(
genome=population[i], interval_in_indices=interval_in_indices, config=config)
return population.tolist() | db737191d5f7c1852410d6f1ad779e8ca58c658a | 28,308 |
def _copy_df(df):
""" Copy a DataFrame """
return df.copy() if df is not None else None | 263bf1cf9cbdae371ea3e4685b4638e8a5714d7f | 28,309 |
def findPossi(bo):
""" Find all possibilities for all fields and add them to a list."""
possis = []
for row,rowVal in enumerate(bo):
for col,colVal in enumerate(rowVal):
localpossi=newPossiFinder(bo, col, row)
if bo[row][col]==0:
# Here ujson.loads(ujson.dumps()) is used because it is much faster than copy.deepcopy() to make a copy of a list.
possis.append(ujson.loads(ujson.dumps([localpossi,rowcoltoNum(row,col)])))
possis.sort(key=getLen)
t.possibls = possis
return possis | c504ca243f631af135ae64f97b9f46b2cdb7d789 | 28,310 |
def modernforms_exception_handler(func):
"""Decorate Modern Forms calls to handle Modern Forms exceptions.
A decorator that wraps the passed in function, catches Modern Forms errors,
and handles the availability of the device in the data coordinator.
"""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
self.coordinator.async_update_listeners()
except ModernFormsConnectionError as error:
_LOGGER.error("Error communicating with API: %s", error)
self.coordinator.last_update_success = False
self.coordinator.async_update_listeners()
except ModernFormsError as error:
_LOGGER.error("Invalid response from API: %s", error)
return handler | c486173ef34f4c89fb3138cad989472f01d7bb7c | 28,311 |
def reads_per_insertion(tnpergene_list,readpergene_list,lines):
"""It computes the reads per insertion following the formula:
reads/(insertions-1) if the number of insertions is higher than 5,
if not then the reads per insertion will be 0.
Parameters
----------
tnpergene_list : list
A list with all insertions
readpergene_list : list
A list of the reads
lines : int
Number of genes mapped to in the reference genome
Returns
-------
list
A list containing all the reads per insertions per gene.
"""
readperinspergene_list = [np.nan]*len(lines)
for i in range(len(tnpergene_list)):
if not tnpergene_list[i] < 5:
readperinspergene_list[i] = readpergene_list[i] / (tnpergene_list[i] -1)
else:
readperinspergene_list[i] = 0
return readperinspergene_list | c5a3f06298d2e782d60b20d561d9d5f65a369dcd | 28,312 |
def getCurrDegreeSize(currDegree, spatialDim):
"""
Computes the number of polynomials of the current spatial dimension
"""
return np.math.factorial(currDegree + spatialDim - 1) / (
np.math.factorial(currDegree) * np.math.factorial(spatialDim - 1)) | 754440fde04f7fe30e336cf4d7c5efb75dd1aaac | 28,313 |
def split_last_dimension(x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
x_shape = shape_list(x)
m = x_shape[-1]
if isinstance(m, int) and isinstance(n, int):
assert m % n == 0
return tf.reshape(x, x_shape[:-1] + [n, m // n]) | c1f26106e0d11a5722191a52c86f90b9559d32dc | 28,314 |
import os
def get_base_dir_for_individual_image(dataset,
show_both_knees_in_each_image,
downsample_factor_on_reload,
normalization_method,
seed_to_further_shuffle_train_test_val_sets,
crop_to_just_the_knee):
"""
Get the path for an image.
"""
assert seed_to_further_shuffle_train_test_val_sets is None # this is deprecated; don't let us use it accidentally.
assert dataset in ['train', 'val', 'test', 'BLINDED_HOLD_OUT_DO_NOT_USE']
assert show_both_knees_in_each_image in [True, False]
assert downsample_factor_on_reload in [None, 0.7, 0.5, 0.3]
assert normalization_method in ['imagenet_statistics', 'our_statistics', 'zscore_individually']
assert crop_to_just_the_knee in [True, False]
if show_both_knees_in_each_image:
assert not crop_to_just_the_knee
if seed_to_further_shuffle_train_test_val_sets is None:
random_seed_suffix = ''
else:
random_seed_suffix = '_random_seed_%i' % seed_to_further_shuffle_train_test_val_sets
if not crop_to_just_the_knee:
base_dir = os.path.join(INDIVIDUAL_IMAGES_PATH,
dataset,
'show_both_knees_%s_downsample_factor_%s_normalization_method_%s%s' % (
show_both_knees_in_each_image,
downsample_factor_on_reload,
normalization_method,
random_seed_suffix))
else:
base_dir = os.path.join(INDIVIDUAL_IMAGES_PATH,
dataset,
'crop_to_just_the_knee_downsample_factor_%s_normalization_method_%s%s' % (
downsample_factor_on_reload,
normalization_method,
random_seed_suffix))
return base_dir | 568dd1ab16dc10e2fc2fa8fb083dad96793517ce | 28,315 |
def get_column_dtype(column, pd_or_sqla, index=False):
"""
Take a column (sqlalchemy table.Column or df.Series), return its dtype in Pandas or SQLA
If it doesn't match anything else, return String
Args:
column: pd.Series or SQLA.table.column
pd_or_sqla: either 'pd' or 'sqla': which kind of type to return
index: if True, column type cannot be boolean
Returns:
Type or None
if pd_or_sqla == 'sqla':
one of {Integer, Float, Boolean, DateTime, String, or None (for all-NaN column)}
if pd_or_sqla == 'pd':
one of {np.int64, np.float64, np.datetime64, np.bool_, np.str_}
"""
if isinstance(column, sqa.Column):
dtype = _get_type_from_db_col(column)
elif isinstance(column, (pd.Series, pd.Index)):
dtype = _get_type_from_df_col(column, index=index)
else:
raise ValueError(f'get_column_datatype takes a column; got {type(column)}')
if dtype is None:
return None
elif pd_or_sqla == 'sqla':
return dtype
elif pd_or_sqla == 'pd':
return _sqa_type2pandas_type(dtype, index=index)
else:
raise ValueError(f'Select pd_or_sqla must equal either "pd" or "sqla"') | c466405c66b24d48cc37920df2876e876d1d6885 | 28,316 |
def _stdlibs(tut):
"""Given a target, return the list of its standard rust libraries."""
libs = [
lib.static_library
for li in tut[CcInfo].linking_context.linker_inputs.to_list()
for lib in li.libraries
]
stdlibs = [lib for lib in libs if (tut.label.name not in lib.basename)]
return stdlibs | 8098406876684911df5c52413780305bea2d12a7 | 28,317 |
def _chebyshev(wcs_dict):
"""Returns a chebyshev model of the wavelength solution.
Constructs a Chebyshev1D mathematical model
Parameters
----------
wcs_dict : dict
Dictionary containing all the wcs information decoded from the header and
necessary for constructing the Chebyshev1D model.
Returns
-------
`~astropy.modeling.Model`
"""
model = models.Chebyshev1D(degree=wcs_dict['order'] - 1,
domain=[wcs_dict['pmin'], wcs_dict['pmax']], )
new_params = [wcs_dict['fpar'][i] for i in range(wcs_dict['order'])]
model.parameters = new_params
return model | 3d30fde977351a4e43a0940696c8fc988400ccda | 28,318 |
def autosolve(equation):
"""
Automatically solve an easy maths problem.
:type equation: string
:param equation: The equation to calculate.
>>> autosolve("300 + 600")
900
"""
try:
# Try to set a variable to an integer
num1 = int(equation.split(" ")[0])
except ValueError:
# Try to set a variable to a decimal
num1 = float(equation.split(" ")[0])
try:
# Try to set a variable to an integer
num2 = int(equation.split(" ")[2])
except ValueError:
# Try to set a variable to a decimal
num2 = float(equation.split(" ")[2])
# If the lowercase version of the operator is '+', 'plus' or 'add'
if equation.split(" ")[1].lower() in ["+", "plus", "add"]:
# Return the answer
return num1 + num2
# If the lowercase version of the operator is '-', 'minus' or 'subtract'
elif equation.split(" ")[1].lower() in ["-", "minus", "subtract"]:
# Return the answer
return num1 - num2
# If the lowercase version of the operator is '*', 'times', 'multiply'
elif equation.split(" ")[1].lower() in ["*", "times", "multiply"]:
# Return the answer
return num1 * num2
# If the lowercase version of the operator is '/', 'divide' or 'quotient'
elif equation.split(" ")[1].lower() in ["/", "divide", "quotient"]:
# Return the answer
return num1 / num2
# If the lowercase version of the operator is '%, 'remainder' or 'rem'
elif equation.split(" ")[1].lower() in ["%", "remainder", "rem"]:
# Return the answer
return num1 % num2
# Raise a warning
raise ValueError("Invalid operation provided.") | a4db1dedffdccc44d7747c4743f4f2eaf8dbd81a | 28,319 |
from bs4 import BeautifulSoup
import http
def request_champion(champion_name: str) -> BeautifulSoup:
"""
Get http request to website with all statistics about a
champion with html format.
"""
request = http.request(
'GET',
f'https://www.leaguespy.gg/league-of-legends/champion/{champion_name}/stats',
None,
HEADERS
)
return BeautifulSoup(request.data, 'lxml') | f0b5a0b1eb6cceec6c7e1c8c8cd1078a5f2505c3 | 28,320 |
def _act_drop(grid_world, agent, env_obj, drop_loc):
""" Private MATRX method.
Drops the carried object.
Parameters
----------
grid_world : GridWorld
The :class:`matrx.grid_world.GridWorld` instance in which the
object is dropped.
agent : AgentBody
The :class:`matrx.objects.agent_body.AgentBody` of the agent who
drops the object.
env_obj : EnvObject
The :class:`matrx.objects.env_object.EnvObject` to be dropped.
drop_loc : [x, y]
The drop location.
Returns
-------
DropObjectResult
The :class:`matrx.actions.action.ActionResult` depicting the
action's expected success or failure and reason for that result.
Returns the following results:
* RESULT_SUCCESS: When the object is successfully dropped.
"""
# Updating properties
agent.is_carrying.remove(env_obj)
env_obj.carried_by.remove(agent.obj_id)
# We return the object to the grid location we are standing at without registering a new ID
env_obj.location = drop_loc
grid_world._register_env_object(env_obj, ensure_unique_id=False)
return DropObjectResult(DropObjectResult.RESULT_SUCCESS, True) | 93511395fda0060d479284a4b97ccd181346292f | 28,321 |
def get_emoticon_radar_chart(scores_list, colors, names):
""" AAA
"""
data_radars = []
emotions = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust']
for score, color, name in zip(scores_list, colors, names):
data = go.Scatterpolar(r=score, theta=emotions, fill='toself', line=dict(color=color), name=name)
data_radars.append(data)
layout = go.Layout(polar=dict(radialaxis=dict(visible=True)), showlegend=True, margin=dict(t=30),
paper_bgcolor='rgba(0,0,0,0)')
fig = go.Figure(data=data_radars, layout=layout)
return fig | 9f147a9bdd5713a915b96a309bcd1086c9e17ba6 | 28,322 |
def get_polling_method(meth_name=None):
""" Grab a polling-method by string-key
Eventually these could be auto-registered somehow;
for now we just keep a look-up dict of them. """
methods = dict(
poll_game_unknowns=poll_game_unknowns,
poll_dan=poll_dan,
)
default_method = poll_game_unknowns
if meth_name is None:
return default_method
# Note missing-entries, other than `None`, will generate KeyErrors
# This is on the caller to handle.
return methods[meth_name] | 2faf19b3b6cf6decd230c5678591478eaf7839d6 | 28,323 |
def get_contour_list(image, preprocessed, MIN_FILTER=3000):
""" Given an image and its preprocessed version, returns the cropped image and its contours.
The return value is in the format: [(CroppedImage, Contour)]
Parameters
----------
image : opencv image
The original unprocessed image
preprocessed: opencv image
The processed image
MIN_FILTER : int
Contours with an area lower than this value are discarded
MAX_FILTER_PERCENT: float
Contours with dimensions that exceed this percentage of the image will be discarded
Returns
-------
result : array of tuples
"""
contours = find_contours(preprocessed) #gets contours in the preprocessed image
result = []
if utils.CV_V3 or utils.CV_V4:
orb = cv2.ORB_create()
else:
orb = cv2.ORB()
kp = orb.detect(image, None)
for cnt in contours:
c_area = cv2.contourArea(cnt)
has_keypoint = any([cv2.pointPolygonTest(cnt, k.pt, False) > -1 for k in kp])
if not has_keypoint:
continue
if(c_area > MIN_FILTER): #FILTERING MIN SIZE
if utils.DEBUG : print(cv2.contourArea(cnt))
(x,y),r = cv2.minEnclosingCircle(cnt)
(x,y, r) = (int(max(r,x)), int(max(r,y)), int(r))
#FILTERING MAX SIZE
#if r > MAX_FILTER_PERCENT*image.shape[1] or r > MAX_FILTER_PERCENT*image.shape[0]:
#continue
(y1,y2,x1,x2) = (y-r,y+r,x-r,x+r)
result.append( (image[y1:y2,x1:x2], cnt) )
return result | 0970c2e1549ff5a50b9f04c1139ab9983e7ee8c3 | 28,324 |
import inspect
import re
def doc_signature(f):
"""Attempt to parse the signature of a function at the beginning of
its documentation. Useful for many numpy functions.
"""
doc = inspect.getdoc(f)
if not doc:
# print(f"DD doc_signature: no doc for {qualname(f)}")
return None
m = re.search(re_parser.doc_sig, doc)
if m is None:
# if not doc.startswith("Not implemented"):
# doc_first = "\n".join(doc.split("\n")[:1])
# log.debug(
# f'doc_signature: no signature found for {qualname(f)} in doc {doc_first}...'
# )
return None
name = m.group(1)
args = m.group(2)
args = re.sub(r'\s+', ' ', args)
replacements = {
'(d0, d1, ..., dn)': '(d)',
'(a1, a2, ...)': '(a)',
}
args = replacements.get(args, args)
dummy_s = f"def __dummy_f_doc_sig{args}: pass"
# globs = dict()
locs = dict()
# print(f"DD doc sig: exec {dummy_s}")
orig_dummy = dummy_s
dummy_s = re.sub(r'<no value>', 'None', dummy_s)
dummy_s = re.sub(r'\[, start\[, end\]\]', ', start=None, end=None',
dummy_s)
dummy_s = re.sub(r'\[x, y\]', 'x=None, y=None', dummy_s)
dummy_s = re.sub(r',\],', r'],', dummy_s)
dummy_s = re.sub(r'\[(\s*,?\s*)(\w+)(\s*,?\s*)\]', r'\1\2=None\3', dummy_s)
dummy_s = re.sub(r'\(a1, a2, \.\.\.\)', 'a', dummy_s)
dummy_s = re.sub(r'dtype=(np\.\w+)', 'dtype="\1"', dummy_s)
# Reordering should not break the bindings, since we pass all
# arguments as named.
dummy_s = re.sub(r'start=None, stop,', 'stop, start=None,', dummy_s)
try:
exec(dummy_s, globals(), locs)
except Exception:
log.warning(
f"doc_sig: could not parse synthetic sig for {name}: '{dummy_s}' ({orig_dummy})"
)
return None
dummy = locs['__dummy_f_doc_sig']
sig = inspect.signature(dummy)
if hasattr(f, '__self__'):
assert list(sig.parameters.values())[0].name != 'self'
elif len(name.split('.')) > 1:
# The doc is like a.cumsum(b, c): it is most probably an
# instance method. Add self since the function does not have a
# __self__ attribute.
self_param = inspect.Parameter('self',
inspect.Parameter.POSITIONAL_OR_KEYWORD)
new_params = [self_param] + list(sig.parameters.values())
sig = sig.replace(parameters=new_params)
# print(f"DD dummy doc sig function: {dummy}: {sig}")
return sig | 5617cd674a57dd75d236e4eadb316e81640dd756 | 28,325 |
import pkg_resources
import scipy
def generate_wav(pattern, tempo=120, loops=1, saveName='audiofile.wav', fs=44100,
dynamics=False, customSound=None):
"""
Generate a .wav file from a pattern.
Specify a tempo (in BPM), loops, name of the file, sampling rate,
and decide if you want "dynamics". Dynamics adds offsets to the amplitude
of the onsets, thus generating more naturally sounding rhythm pattern.
Parameters
----------
pattern : A rhythm pattern.
tempo : Tempo in BPM, default is 120.
loops : Number of times to repeat the pattern.
saveName : Name of the output file.
fs : Integer, optional
Samplerate. The default is 44100.
dynamics : Boolean, optional
Setting this to true adds dynamics to the audio file. The default is False.
Returns
-------
saveName : Path and name of saved audio file.
"""
# input check
assert pattern.shape[0] == 3, 'Wrong shape of pattern, should be 3xn!'
#this experimentally just adds some dynamics
if dynamics:
dynamicsHihat = np.tile([0.7, 0.5, 1, 0.5], 8)
dynamicsSnare = np.tile([0.8, 0.7, 0.8, 0.5, 1, 0.5, 0.8, 0.5], 4)
dynamicsKick = np.tile([1, 0.5, 0.7, 0.5, 0.8, 0.5, 0.7, 0.5], 4)
else:
dynamicsHihat = np.ones(32)
dynamicsSnare = np.ones(32)
dynamicsKick = np.ones(32)
if saveName[-4:] != '.wav':
saveName = saveName + '.wav'
# read samples
if customSound:
if customSound == 'amen':
hihatLoc = pkg_resources.resource_stream(__name__, 'samples/amenRideLong.wav')
kickLoc = pkg_resources.resource_stream(__name__, 'samples/amenKickLong.wav')
snareLoc = pkg_resources.resource_stream(__name__, 'samples/amenSnareLong.wav')
elif customSound == '909':
hihatLoc = pkg_resources.resource_stream(__name__, 'samples/909hihatStereo.wav')
kickLoc = pkg_resources.resource_stream(__name__, 'samples/909kickStereo.wav')
snareLoc = pkg_resources.resource_stream(__name__, 'samples/909snareStereo.wav')
else:
hihatLoc = pkg_resources.resource_stream(__name__, 'samples/hihat.wav')
kickLoc = pkg_resources.resource_stream(__name__, 'samples/kick.wav')
snareLoc = pkg_resources.resource_stream(__name__, 'samples/snare.wav')
rate, hihatSample = scipy.io.wavfile.read(hihatLoc)
rate, kickSample = scipy.io.wavfile.read(kickLoc)
rate, snareSample = scipy.io.wavfile.read(snareLoc)
# just pushing down the amplitude a bit
if not customSound:
hihatSample = hihatSample * 0.25
kickSample = kickSample * 0.25
snareSample = snareSample * 0.25
maxLengthSample = max([len(hihatSample), len(snareSample), len(kickSample)])
if rate != fs:
print('Error: Sample rate mismatch between samples and specified sample rate')
return
# create three np arrays for each instrument, fill them, then merge them
quarter = 60/tempo
bar = 4 * quarter
length = 2 * bar * fs
# figure out a way to set dtype as same as the wav-files
hihats = np.zeros((int(length + maxLengthSample),2), dtype='int16')
snare = np.zeros((int(length + maxLengthSample),2), dtype='int16')
kick = np.zeros((int(length + maxLengthSample),2), dtype='int16')
# three separate loops
hihatEvents = pattern[0]
snareEvents = pattern[1]
kickEvents = pattern[2]
# for fast tempi, need to consider that the length won't be enough.
#hihats
for n in range(0, len(hihatEvents)):
if hihatEvents[n] == 1:
thisPosition = int(round((length/32) * n))
hihats[thisPosition:thisPosition+len(hihatSample),] = hihats[thisPosition:thisPosition+len(hihatSample),] + (hihatSample * dynamicsHihat[n])
#snare
for n in range(0, len(snareEvents)):
if snareEvents[n] == 1:
thisPosition = int(round((length/32) * n))
snare[thisPosition:thisPosition+len(snareSample),] = snare[thisPosition:thisPosition+len(snareSample),] + (snareSample * dynamicsSnare[n])
#kick
for n in range(0, len(kickEvents)):
if kickEvents[n] == 1:
thisPosition = int(round((length/32) * n))
kick[thisPosition:thisPosition+len(kickSample),] = kick[thisPosition:thisPosition+len(kickSample),] + (kickSample * dynamicsKick[n])
# mix together
jointSample = (hihats * 0.1) + (snare * 0.3) + (kick * 0.3)
# ensure length
#jointSample = jointSample[0:int(round(length)),].astype('int16')
# add loops
looped = np.zeros((int(((length) * loops + (2 * length))), 2), dtype='int16')
#if loops > 1:
for n in range(0, loops):
thisPosition = n*int(length)
looped[thisPosition:thisPosition+len(jointSample)] = looped[thisPosition:thisPosition+len(jointSample)] + jointSample
# now trim it
looped = looped[0:(int(round(length*loops))+maxLengthSample)]
#else:
# looped = jointSample[0:(int(round(length*loops))+maxLengthSample)]
normalized = np.array((looped / np.max(np.abs(looped.flatten()))) * 32767, dtype='int16')
# write wav
scipy.io.wavfile.write(saveName, fs, normalized)
return saveName | aa11722a40aca967d168f38ea1ae239eccfa3361 | 28,326 |
def svn_fs_upgrade(*args):
"""svn_fs_upgrade(char path, apr_pool_t pool) -> svn_error_t"""
return _fs.svn_fs_upgrade(*args) | 4f466df2d6f41cbe277370e3ec158e7737d271f0 | 28,327 |
def api_url(service: str = "IPublishedFileService",
function: str = "QueryFiles",
version: str = "v1") -> str:
"""
Builds a steam web API url.
:param service: The steam service to attach to.
:param function: The function to call.
:param version: The API version.
:return: The built URL.
"""
return "https://api.steampowered.com/%s/%s/%s/" % (
service, function, version
) | 2538ab8c8035c491611585089ddd3a1625e423cc | 28,328 |
import re
def reg_all_keywords(data):
"""
从meta file中提取所有关键词,格式为:
***[:###]***
提取出###
:param data:
:return:
"""
patt = re.compile(r"\[:([^\[\]]+)\]")
ret = patt.findall(data)
return ret if ret else None | d81f8dd5f04d9e65f61247a8c9857969cf7e514d | 28,329 |
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager | c5c589c214a70f07ace913b5b8fb13cd52c30240 | 28,330 |
def get_platform():
"""Gets the platform (example: azure)."""
return get_config_value("platform") | 693540442f23b21b9d983c9e7728d5397415544b | 28,331 |
import sys
import difflib
def transform_command(src, show_diff=True):
"""Returns the results of firing the precommand handles."""
i = 0
limit = sys.getrecursionlimit()
lst = ""
raw = src
while src != lst:
lst = src
srcs = events.on_transform_command.fire(cmd=src)
for s in srcs:
if s != lst:
src = s
break
i += 1
if i == limit:
print_exception(
"Modifications to source input took more than "
"the recursion limit number of iterations to "
"converge."
)
debug_level = XSH.env.get("XONSH_DEBUG")
if show_diff and debug_level >= 1 and src != raw:
sys.stderr.writelines(
difflib.unified_diff(
raw.splitlines(keepends=True),
src.splitlines(keepends=True),
fromfile="before precommand event",
tofile="after precommand event",
)
)
return src | fa51d455b9b7d724e8648e8f33a770ad6fb583b4 | 28,332 |
def ascii_from_object(space, w_obj):
"""Implements builtins.ascii()"""
# repr is guaranteed to be unicode
w_repr = space.repr(w_obj)
w_encoded = encode_object(space, w_repr, 'ascii', 'backslashreplace')
return decode_object(space, w_encoded, 'ascii', 'strict') | 14ff3217b42743c5e202db107914e5ee0df4a10d | 28,333 |
import os
def project_dir(project_name=None):
"""
获取当前项目根路径
:param project_name:
:return: 根路径
"""
PROJECT_NAME = 'stock-technical-analysis' if project_name is None else project_name
project_path = os.path.abspath(os.path.dirname(__file__))
root_path = project_path[:project_path.find("{}\\".format(PROJECT_NAME)) + len("{}\\".format(PROJECT_NAME))]
return root_path | 16a304e6c6fa068380e8908569b1f03f2ab3fb68 | 28,334 |
def capture(p):
"""Return a peg that acts like p, except it adds to the values
tuple the text that p matched."""
return _Peg(('capture(%r)', p),
lambda s, far, (i, vals):
[(i2, vals2 + (s[i:i2],))
for i2, vals2 in p.run(s, far, (i, vals))]) | 710e1cf4015b057e6898affa70ef380be0648ea3 | 28,335 |
def html_chart(df, height=1200):
"""
make interactive chart.
param df: inpute dataframe
param height: optional plot height
returns: plotly chart
"""
fig = make_subplots(rows=(len(df.columns)),
cols=1,
subplot_titles=df.columns,
shared_xaxes=True,
vertical_spacing=0.007
)
j = 1
for i in df.columns:
fig.add_trace(
go.Scatter(
{'x': df.index,
'y': df[i]}),
row=j, col=1)
j += 1
fig.update_layout(height=height, font_size=9)
return fig | 821f6ae8c10a80c32a932cd77beb2b0a3969d0af | 28,336 |
def build_0565_color_lookup():
"""Build the lookup table for the ARGB_0565 color format"""
bdG = 6
bdB = 5
redColorOffset = bdG + bdB
greenColorOffset = bdB
val_lookup_5 = BITDEPTH_VALUE_LOOKUPS[5]
val_lookup_6 = BITDEPTH_VALUE_LOOKUPS[6]
conversion_table = [None] * 65536
for r_short, r_value in enumerate(val_lookup_5):
r_offset = r_short << redColorOffset
for g_short, g_value in enumerate(val_lookup_6):
g_offset = g_short << greenColorOffset
rg_offset = r_offset | g_offset
for b_short, b_value in enumerate(val_lookup_5):
final_color_code = rg_offset | b_short
final_color_tuple = (r_value, g_value, b_value, 255)
conversion_table[final_color_code] = final_color_tuple
return conversion_table | c3a33e0355fb795e93ee722012faab6b83195bb4 | 28,337 |
def build_que_input_from_segments(context, answer, question, tokenizer,
max_input_length=1000, with_eos=True,
with_labels=True):
""" Build a sequence of input from 3 segments:
context, answer, question """
bos, eos, ctx, ans, que, pad, gen = \
tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
padded = []
context = [bos, ctx] + context
answer = [ans] + answer
question = [que] + question + ([eos] if with_eos else [])
combined = list(chain(context, answer, question))
len_combined = len(combined)
if len_combined > max_input_length:
len_context = max_input_length - len(answer) - len(question)
context = context[:len_context]
elif len_combined < max_input_length:
len_reamining = max_input_length - len_combined
padded = [pad] * len_reamining
instance = {}
instance["input_ids"] = list(chain(padded, context, answer, question))
instance["token_type_ids"] = [pad] * len(padded) + [ctx] * len(context)\
+ [ans] * len(answer) + [que] * len(question)
if with_labels:
instance["labels"] = [-1] * (len(padded) + len(context) + len(answer)
+ 1) + question[1:]
return instance | 400abaac1744bab2f665c8ffad50ba2b7030569b | 28,338 |
import logging
import sys
def default_logging_config(logger):
"""Set up the default handler and formatter on the given logger."""
default_handler = logging.StreamHandler(stream=sys.stdout)
default_handler.formatter = ColorFormatter()
logger.handlers = [default_handler]
logger.propagate = True
return logger | 2be70b389af4d38ecab594a74da37dd6b8b4d39d | 28,339 |
import traceback
def verify_preload(upload_id, language=None):
"""
Continue the verification process by counting the number of geounits
in the uploaded file and compare it to the number of geounits in the
basest geolevel. After this step completes, the copy_to_characteristics
method is called.
Parameters:
upload_id - The id of the SubjectUpload record.
language - Optional. If provided, translate the status messages
into the specified language (if message files are complete).
"""
prev_lang = None
if not language is None:
prev_lang = get_language()
activate(language)
upload = SubjectUpload.objects.get(id=upload_id)
geolevel, nunits = LegislativeLevel.get_basest_geolevel_and_count()
# This seizes postgres -- probably small memory limits.
#aligned_units = upload.subjectstage_set.filter(portable_id__in=permanent_units).count()
permanent_units = geolevel.geounit_set.all().order_by(
'portable_id').values_list(
'portable_id', flat=True)
temp_units = upload.subjectstage_set.all().order_by(
'portable_id').values_list(
'portable_id', flat=True)
# quick check: make sure the first and last items are aligned
ends_match = (permanent_units[0] == temp_units[0] and
permanent_units[permanent_units.count()
- 1] == temp_units[temp_units.count() - 1])
msg = _(
'There are a correct number of geounits in the uploaded Subject file, '
)
if not ends_match:
msg += _(
'but the geounits do not have the same portable ids as those in the database.'
)
# python foo here: count the number of zipped items in the
# permanent_units and temp_units lists that do not have the same portable_id
# thus counting the portable_ids that are not mutually shared
aligned_units = len(
filter(lambda x: x[0] == x[1], zip(permanent_units, temp_units)))
if ends_match and nunits != aligned_units:
# The number of geounits in the uploaded file match, but there are some mismatches.
mismatched = nunits - aligned_units
msg += _n(
'but %(count)d geounit does not match the geounits in the database.',
'but %(count)d geounits do not match the geounits in the database.',
mismatched) % {
'count': mismatched
}
if not ends_match or nunits != aligned_units:
logger.debug(msg)
upload.status = 'ER'
upload.save()
upload.subjectstage_set.all().delete()
status = {'task_id': None, 'success': False, 'messages': [msg]}
else:
try:
# The next task will load the units into the characteristic table
task = copy_to_characteristics.delay(
upload_id, language=language).task_id
status = {
'task_id': task,
'success': True,
'messages': [_('Copying records to characteristic table ...')]
}
except:
logger.error(
"Couldn't copy characteristics: %s" % traceback.format_exc())
# reset the language back to the default
if not prev_lang is None:
activate(prev_lang)
return status | bd1f43e8bff3c64badc6076fb879ee6c48193fd2 | 28,340 |
def transition(field, source='*', target=None, conditions=[], custom={}):
"""
Method decorator for mark allowed transitions
Set target to None if current state needs to be validated and
has not changed after the function call
"""
def inner_transition(func):
fsm_meta = getattr(func, '_django_fsm', None)
if not fsm_meta:
fsm_meta = FSMMeta(field=field, method=func)
setattr(func, '_django_fsm', fsm_meta)
@wraps(func)
def _change_state(instance, *args, **kwargs):
return fsm_meta.field.change_state(instance, func, *args, **kwargs)
if isinstance(source, (list, tuple)):
for state in source:
func._django_fsm.add_transition(state, target, conditions, custom)
else:
func._django_fsm.add_transition(source, target, conditions, custom)
return _change_state
return inner_transition | cf4066c8a21c89a793e526cf4a4171ac17cf7c42 | 28,341 |
def get_inspexp_frames(slice, inspexp_data, images_path):
"""
Loads inspiration and expiration frames for the specified cine-MRI slice
Parameters
----------
slice: CineMRISlice
A cine-MRI slice for which to extract inspiration and expiration frames
inspexp_data : dict
A dictionary with inspiration / expiration frames data
images_path : Path
A path to the image folder in cine-MRI archive
Returns
-------
insp_frame, exp_frame : ndarray
The inspiration and expiration frames
"""
insp_ind, exp_ind = get_insp_exp_indices(slice, inspexp_data)
# Load the expiration frame (visceral slide is computed for the expiration frame)
slice_path = slice.build_path(images_path)
slice_array = sitk.GetArrayFromImage(sitk.ReadImage(str(slice_path)))
insp_frame = slice_array[insp_ind]
exp_frame = slice_array[exp_ind]
return insp_frame, exp_frame | d0dda284af281ebca08ee494a1e5fdc8f97789e4 | 28,342 |
def cleaned_reviews_dataframe(reviews_df):
"""
Remove newline "\n" from titles and descriptions,
as well as the "Unnamed: 0" column generated when
loading DataFrame from CSV. This is the only cleaning
required prior to NLP preprocessing.
INPUT: Pandas DataFrame with 'title' and 'desc' column names
OUTPUT: Cleaned DataFrame with combined 'title_desc' column
"""
reviews_df['title'] = reviews_df['title'].str.replace('\n', '')
reviews_df['desc'] = reviews_df['desc'].str.replace('\n','')
reviews_df['title_desc'] = reviews_df['title'] + reviews_df['desc']
if 'Unnamed: 0' in set(reviews_df.columns):
reviews_df = reviews_df.drop('Unnamed: 0', axis=1)
return reviews_df | 8f805f556667f5d734d4d272a2194784d37ce99c | 28,343 |
def not_list(l):
"""Return the element wise negation of a list of booleans"""
assert all([isinstance(it, bool) for it in l])
return [not it for it in l] | 6d30f5dd587cdc69dc3db94abae92a7a8a7c610d | 28,344 |
def first_order_forward(n, zero = True):
"""
"""
m1 = -np.eye(n) + np.eye(n, k = 1)
return np.vstack([np.ones(n), m1]) | d79149614e15c8cce9f13a402f6321b43498862b | 28,345 |
def boil(config, recipe_config):
""" Boil wort. """
up = config['unit_parser']
if 'Hops' in recipe_config:
hops = recipe_config['Hops']
for hop in hops:
if 'addition type' in hop and hop['addition type'] == 'fwh':
if 'mass' in hop and 'name' in hop and 'type' in hop:
mass = up.convert(hop['mass'], 'ounces')
variety = hop['name']
pellets = hop['type']
print('Add {0:.2f}oz {1:s} {2:s} during lautering process (first wort hopping).'.format(mass, variety, pellets))
time_additions = []
for hop in hops:
if 'boil_time' in hop and 'mass' in hop and 'name' in hop and 'type' in hop:
boil_time = up.convert(hop['boil_time'], 'minutes')
mass = up.convert(hop['mass'], 'ounces')
variety = hop['name']
pellets = hop['type']
time_additions.append({'boil_time': boil_time, 'mass': mass, 'variety': variety, 'pellets': pellets})
time_additions = sorted(time_additions, key=lambda k: k['boil_time'], reverse=True)
for hop in time_additions:
if hop['boil_time'] == 1:
plural = ''
else:
plural = 's'
print('Add {0:.2f}oz {2:s} {3:s} at {1:.0f} minute{4:s}.'.format(hop['mass'], hop['boil_time'], hop['variety'], hop['pellets'], plural))
for hop in hops:
if 'addition type' in hop and hop['addition type'] == 'flameout':
if 'mass' in hop and 'name' in hop and 'type' in hop:
mass = up.convert(hop['mass'], 'ounces')
variety = hop['name']
pellets = hop['type']
print('Add {0:.2f}oz {1:s} {2:s} at flameout.'.format(mass, variety, pellets))
if ('Pre-Boil Volume' not in recipe_config or
'Pre-Boil Gravity' not in recipe_config):
return config, recipe_config
pre_bv = up.convert(recipe_config['Pre-Boil Volume'], 'gallons')
pre_bg = recipe_config['Pre-Boil Gravity']
if ('Brew Day' in recipe_config and
'Post-Boil Volume' in recipe_config['Brew Day'] and
'Original Gravity' in recipe_config['Brew Day']):
if 'Pre-Boil Volume' in recipe_config['Brew Day']:
actual_pre_bv = up.convert(recipe_config['Brew Day']['Pre-Boil Volume'], 'gallons')
if 'Boil Time' in recipe_config:
boil_time = up.convert(recipe_config['Boil Time'], 'hours')
elif 'Boil Time' in config:
boil_time = up.convert(config['Boil Time'], 'hours')
else:
boil_time = 1.0
post_bv = up.convert(recipe_config['Brew Day']['Post-Boil Volume'], 'gallons')
og = recipe_config['Brew Day']['Original Gravity']
post_gp = specific_gravity_to_gravity_points(og, post_bv)
pre_gp = specific_gravity_to_gravity_points(pre_bg, pre_bv)
if 'Brewhouse Efficiency' in recipe_config:
planned_efficiency = recipe_config['Brewhouse Efficiency']
elif 'Brewhouse Efficiency' in config:
planned_efficiency = config['Brewhouse Efficiency']
else:
planned_efficiency = 0.7
efficiency = planned_efficiency * post_gp / pre_gp
recipe_config['Brew Day']['Brewhouse Efficiency'] = efficiency
evaporation_rate = (actual_pre_bv - post_bv) / boil_time
recipe_config['Brew Day']['Evaporation Rate'] = '{0:.06f} gallons_per_hour'.format(evaporation_rate)
print('Actual post-boil volume: {0:.02f} gallons'.format(post_bv))
print('Evaporation rate: {0:.02f} gallons per hour'.format(evaporation_rate))
print('Original gravity: {0:.03f}'.format(og))
print('Efficiency: {0:.02f}'.format(efficiency))
elif ('Brew Day' in recipe_config
and 'Pre-Boil Volume' in recipe_config['Brew Day']
and 'Pre-Boil Gravity' in recipe_config['Brew Day']):
pre_boil_volume = up.convert(recipe_config['Brew Day']['Pre-Boil Volume'], 'gallons')
pre_boil_gravity = recipe_config['Brew Day']['Pre-Boil Gravity']
pre_gp = pre_boil_gravity - 1
if 'Boil Time' in recipe_config:
boil_time = up.convert(recipe_config['Boil Time'], 'hours')
elif 'Boil Time' in config:
boil_time = up.convert(config['Boil Time'], 'hours')
else:
boil_time = 1.0
if 'Evaporation Rate' in recipe_config:
evaporation_rate = up.convert(recipe_config['Evaporation Rate'], 'gallons_per_hour')
elif 'Evaporation Rate' in config:
evaporation_rate = up.convert(config['Evaporation Rate'], 'gallons_per_hour')
else:
evaporation_rate = 1.75
post_boil_volume = pre_boil_volume - evaporation_rate * boil_time
og = 1 + pre_gp * pre_boil_volume / post_boil_volume
print('Predicted original gravity: {0:.03f}'.format(og))
recipe_config['Brew Day']['Original Gravity'] = og
else:
if 'Original Gravity' in recipe_config:
print('Predicted original gravity: {0:.03f}'.format(recipe_config['Original Gravity']))
return config, recipe_config | 035de7c388e2c82962987c63c13679e6bd16222f | 28,346 |
def _ecdf(
data=None,
p=None,
x_axis_label=None,
y_axis_label="ECDF",
title=None,
plot_height=300,
plot_width=450,
staircase=False,
complementary=False,
x_axis_type="linear",
y_axis_type="linear",
**kwargs,
):
"""
Create a plot of an ECDF.
Parameters
----------
data : array_like
One-dimensional array of data. Nan's are ignored.
conf_int : bool, default False
If True, display a confidence interval on the ECDF.
ptiles : list, default [2.5, 97.5]
The percentiles to use for the confidence interval. Ignored it
`conf_int` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to do to compute confidence
interval. Ignored if `conf_int` is False.
fill_color : str, default 'lightgray'
Color of the confidence interbal. Ignored if `conf_int` is
False.
fill_alpha : float, default 1
Opacity of confidence interval. Ignored if `conf_int` is False.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
x_axis_label : str, default None
Label for the x-axis. Ignored if `p` is not None.
y_axis_label : str, default 'ECDF' or 'ECCDF'
Label for the y-axis. Ignored if `p` is not None.
title : str, default None
Title of the plot. Ignored if `p` is not None.
plot_height : int, default 300
Height of plot, in pixels. Ignored if `p` is not None.
plot_width : int, default 450
Width of plot, in pixels. Ignored if `p` is not None.
staircase : bool, default False
If True, make a plot of a staircase ECDF (staircase). If False,
plot the ECDF as dots.
complementary : bool, default False
If True, plot the empirical complementary cumulative
distribution functon.
x_axis_type : str, default 'linear'
Either 'linear' or 'log'.
y_axis_type : str, default 'linear'
Either 'linear' or 'log'.
kwargs
Any kwargs to be passed to either p.circle or p.line, for
`staircase` being False or True, respectively.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with ECDF.
"""
# Check data to make sure legit
data = utils._convert_data(data)
# Data points on ECDF
x, y = _ecdf_vals(data, staircase, complementary)
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "ECCDF" if complementary else "ECDF")
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_axis_type=x_axis_type,
y_axis_type=y_axis_type,
title=title,
)
if staircase:
# Line of steps
p.line(x, y, **kwargs)
# Rays for ends
if complementary:
p.ray(x=x[0], y=1, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=0, length=0, angle=0, **kwargs)
else:
p.ray(x=x[0], y=0, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=1, length=0, angle=0, **kwargs)
else:
p.circle(x, y, **kwargs)
return p | e3ae7e76eaa285506692ef48031cdb309fa732f1 | 28,347 |
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
def _get_cipher(key: bytes) -> Cipher:
"""获取 DES3 Cipher 对象"""
algorithm = algorithms.TripleDES(key)
cipher = Cipher(algorithm, modes.CBC(key[:8]), backend=default_backend())
return cipher | 13c046884ccd51ff19ed9eb80a1747f6888863a0 | 28,348 |
def get_real_dist2rim(x_dist, radius_cut, radius_sphere):
"""
Get the real distance to rim
:param x_dist:
:param radius_cut:
:param radius_sphere:
:return:
"""
x_transf = x_dist * ((radius_sphere) - np.sqrt((radius_sphere) ** 2 - (radius_cut) ** 2)) / radius_cut
return x_transf | 89f1a6ef3e020636537a8f229e082f7765410129 | 28,349 |
import os
import codecs
import json
import traceback
def read_json(filename):
"""
JSONファイル読み込む
"""
try:
basedir = os.path.dirname(os.path.abspath(__file__))
indir = os.path.join(basedir, "data")
readfilename = os.path.join(indir, filename)
with codecs.open(readfilename, 'r', "utf-8") as f:
jsonData = json.loads(f.read())
except IOError as e:
traceback.print_exc()
raise Exception(str(e))
return jsonData | 471f4a56abbae11eef262e6676294b5ec50c9198 | 28,350 |
def get_workers_stats(worker_class=None):
"""Get the RQ workers stats.
Args:
worker_class (type): RQ Worker class
Returns:
list: List of worker stats as a dict {name, queues, state}
Raises:
redis.exceptions.RedisError: On Redis connection errors
"""
worker_class = worker_class if worker_class is not None else Worker
workers = worker_class.all()
return [
{"name": w.name, "queues": w.queue_names(), "state": w.get_state()}
for w in workers
] | 33e86511051b15de07eceaa1ab0d8d609eecbafc | 28,351 |
def _get_trafo(cmatrix: cairo.Matrix) -> qc3const.TrafoType:
"""Converts cairo matrix to trafo list
:param cmatrix: (cairo.Matrix) cairo transformation matrix
:return: (qc3const.TrafoType) transformation matrix
"""
return [i for i in cmatrix] | ea9c3c3b8466a7025fce5f48ceb02baa5ae9d319 | 28,352 |
import bokeh
from bokeh.plotting import output_file, ColumnDataSource, show, figure
from bokeh.models import HoverTool, CategoricalColorMapper, LinearColorMapper, Legend, LegendItem, ColorBar
from bokeh.palettes import Category20
def mousover_plot(datadict, attr_x, attr_y, attr_color=None, attr_size=None, save_file=None, plot_title="",
point_transparency = 0.5, point_size=20, default_color="#2222aa", hidden_keys = []):
""" Produces dynamic scatter plot that can be interacted with by mousing over each point to see its label
Args:
datadict (dict): keys contain attributes, values of lists of data from each attribute to plot (each list index corresponds to datapoint).
The values of all extra keys in this dict are considered (string) labels to assign to datapoints when they are moused over.
Apply _formatDict() to any entries in datadict which are themselves dicts.
attr_x (str): name of column in dataframe whose values are shown on x-axis (eg. 'latency'). Can be categorical or numeric values
attr_y (str): name of column in dataframe whose values are shown on y-axis (eg. 'validation performance'). Must be numeric values.
attr_size (str): name of column in dataframe whose values determine size of dots (eg. 'memory consumption'). Must be numeric values.
attr_color (str): name of column in dataframe whose values determine color of dots (eg. one of the hyperparameters). Can be categorical or numeric values
point_labels (list): list of strings describing the label for each dot (must be in same order as rows of dataframe)
save_file (str): where to save plot to (html) file (if None, plot is not saved)
plot_title (str): Title of plot and html file
point_transparency (float): alpha value of points, lower = more transparent
point_size (int): size of points, higher = larger
hidden keys (list[str]): which keys of datadict NOT to show labels for.
"""
try:
with warning_filter():
bokeh_imported = True
except ImportError:
bokeh_imported = False
if not bokeh_imported:
warnings.warn('AutoGluon summary plots cannot be created because bokeh is not installed. To see plots, please do: "pip install bokeh==2.0.1"')
return None
n = len(datadict[attr_x])
for key in datadict.keys(): # Check lengths are all the same
if len(datadict[key]) != n:
raise ValueError("Key %s in datadict has different length than %s" % (key, attr_x))
attr_x_is_string = any([type(val)==str for val in datadict[attr_x]])
if attr_x_is_string:
attr_x_levels = list(set(datadict[attr_x])) # use this to translate between int-indices and x-values
og_x_vals = datadict[attr_x][:]
attr_x2 = attr_x + "___" # this key must not already be in datadict.
hidden_keys.append(attr_x2)
datadict[attr_x2] = [attr_x_levels.index(category) for category in og_x_vals] # convert to ints
legend = None
if attr_color is not None:
attr_color_is_string = any([type(val)==str for val in datadict[attr_color]])
color_datavals = datadict[attr_color]
if attr_color_is_string:
attr_color_levels = list(set(color_datavals))
colorpalette = Category20[20]
color_mapper = CategoricalColorMapper(factors=attr_color_levels, palette=[colorpalette[2*i % len(colorpalette)] for i in range(len(attr_color_levels))])
legend = attr_color
else:
color_mapper = LinearColorMapper(palette='Magma256', low=min(datadict[attr_color]), high=max(datadict[attr_color])*1.25)
default_color = {'field': attr_color, 'transform': color_mapper}
if attr_size is not None: # different size for each point, ensure mean-size == point_size
attr_size2 = attr_size + "____"
hidden_keys.append(attr_size2)
og_sizevals = np.array(datadict[attr_size])
sizevals = point_size + (og_sizevals - np.mean(og_sizevals))/np.std(og_sizevals) * (point_size/2)
if np.min(sizevals) < 0:
sizevals = -np.min(sizevals) + sizevals + 1.0
datadict[attr_size2] = list(sizevals)
point_size = attr_size2
if save_file is not None:
output_file(save_file, title=plot_title)
print("Plot summary of models saved to file: %s" % save_file)
source = ColumnDataSource(datadict)
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,save"
p = figure(title=plot_title, tools=TOOLS)
if attr_x_is_string:
circ = p.circle(attr_x2, attr_y, line_color=default_color, line_alpha = point_transparency,
fill_color = default_color, fill_alpha=point_transparency, size=point_size, source=source)
else:
circ = p.circle(attr_x, attr_y, line_color=default_color, line_alpha = point_transparency,
fill_color = default_color, fill_alpha=point_transparency, size=point_size, source=source)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([(key,'@'+key+'{safe}') for key in datadict.keys() if key not in hidden_keys])
# Format axes:
p.xaxis.axis_label = attr_x
p.yaxis.axis_label = attr_y
if attr_x_is_string: # add x-ticks:
p.xaxis.ticker = list(range(len(attr_x_levels)))
p.xaxis.major_label_overrides = {i: attr_x_levels[i] for i in range(len(attr_x_levels))}
# Legend additions:
if attr_color is not None and attr_color_is_string:
legend_it = []
for i in range(len(attr_color_levels)):
legend_it.append(LegendItem(label=attr_color_levels[i], renderers = [circ], index=datadict[attr_color].index(attr_color_levels[i])))
legend = Legend(items=legend_it, location=(0, 0))
p.add_layout(legend, 'right')
if attr_color is not None and not attr_color_is_string:
color_bar = ColorBar(color_mapper=color_mapper, title = attr_color,
label_standoff=12, border_line_color=None, location=(0,0))
p.add_layout(color_bar, 'right')
if attr_size is not None:
p.add_layout(Legend(items=[LegendItem(label='Size of points based on "'+attr_size + '"')]), 'below')
show(p) | 0cee54239d13e7c3ebd36972e7c0f259ff7de69a | 28,353 |
import numpy
def globalInequalityChanges(Y, fieldNames, outFile, permutations=9999):
"""Global inequality change test
This function tests whether global inequality has significantly changed
for the Theil statistic over the period t to t+k. For more information on
this function see [Rey_Sastre2010] (this function recreates Table 2 in
that paper).
Layer.inequality('globalInequalityChanges', var, outFile, <permutations>)
:keyword var: List with variables to be analyzed; e.g: ['Y1978', 'Y1979', 'Y1980', 'Y1981']
:type var: list
:keyword outFile: Name for the output file; e.g.: "regionsDifferenceTest.csv"
:type outFile: string
:keyword permutations: Number of random spatial permutations. Default value permutations = 9999.
:type permutations: integer
**Example 1** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
result = china.inequality('globalInequalityChanges',['Y1978', 'Y1979', 'Y1980', 'Y1981'], "interregional_inequality_differences.csv")
"""
def getVar(Y, possition):
result = {}
for k in Y:
result[k] = [Y[k][possition]]
return result
def shufflePeriods(Y,pos1,pos2):
result = {}
for k in Y:
possibilities = [Y[k][pos1],Y[k][pos2]]
result[k] = [possibilities[numpy.random.randint(0,2)]]
return result
print("Creating global Inequality Changes [Rey_Sastre2010 - Table 2]")
results = {}
r2a = list(range(len(Y)))
for nv1, var1 in enumerate(fieldNames):
var = getVar(Y,nv1)
t1,tb1,tw1 = theil(var,r2a)
results[(var1,var1)] = t1
for nv2, var2 in enumerate(fieldNames[nv1+1:]):
var = getVar(Y,nv1+nv2+1)
t2,tb2,tw2 = theil(var,r2a)
results[(var1,var2)] = t2 - t1
numerator = 1
for iter in range(permutations):
var = shufflePeriods(Y,nv1,nv1 + nv2 + 1)
t3,tb3,tw3 = theil(var,r2a)
if abs(t2-t1) < abs(t3-t1):
numerator += 1
results[(var2,var1)] = numerator/float(permutations+1)
if outFile:
fout = open(outFile,"w")
aux = str(fieldNames).replace("[","")
aux = aux.replace("]","")
aux = aux.replace("'","")
line = "".join([",",aux])
fout.write("".join([line,"\n"]))
for var1 in fieldNames:
line = [var1]
for var2 in fieldNames:
line += [results[(var1,var2)]]
line = str(line).replace("[","")
line = line.replace("]","")
line = line.replace("'","")
fout.write("".join([line,"\n"]))
fout.close()
print("global Inequality Changes created!")
return results | 6a9d0579c52083419d5f4cbedbe4b568d6b7e0a0 | 28,354 |
import re
def generate_gisaid_fasta_df(fname, rtype="nuc", ambiguous_tol=0.01, len_tol=0.9):
"""
Generate pandas dataframe for sequences downloaded from GISAID
"""
fdat_df = []
standardise_gene_name = {"PB2":1, "PB1":2, "PA":3, "HA":4, "NP":5, "NA":6, "MP":7, "NS":8}
subtype_to_influenza_gene_len = {"A":{'1-PB2': 2280, '2-PB1': 2274, '3-PA': 2151, '4-HA': 1701, '5-NP': 1497, '6-NA': 1410, '7-M': 982, '8-NS': 838}, "B":{'1-PB2': 2259, '2-PB1': 2313, '3-PA': 2178, '4-HA': 1749, '5-NP': 1683, '6-NA': 1398, '7-M': 1076, '8-NS': 1024}}
fasta_dat = parsefasta(fname, rtype=rtype)
print ("Number of input sequences: %i"%(len(fasta_dat)))
amb_count = 0
len_count = 0
for header, sequence in fasta_dat.items():
sname, gene, iid, date, passage, subtype = header.split("|")
gene = "%i-%s"%(standardise_gene_name[gene], gene)
flu_type = re.search("^(A|B)", subtype).group()
# uncount sequences with > amb_tol of amb res
amb_res = "n" if rtype == "nuc" else "X"
if sequence.count(amb_res)/len(sequence) > ambiguous_tol:
amb_count += 1
continue
# min sequence length
if len(sequence) < len_tol*subtype_to_influenza_gene_len[flu_type][gene]:
len_count += 1
continue
date = toYearFraction(date)
sname = re.sub("(\(h\dn\d\)|[^a-z0-9\-\.\/_])", "", sname.lower())
fdat_df.append({"sname":sname, "gene":gene, "iid":iid, "subtype":subtype, "date":date, "passage":passage, "seq":sequence})
fdat_df = pd.DataFrame.from_dict(fdat_df).set_index("iid")
print ("Number of output sequences: %i"%(len(fdat_df)))
print ("Removed because AMM(<%.2f)/LEN(>%.2f) = %i/%i"%(ambiguous_tol, len_tol, amb_count, len_count))
print ("Number of unique iid: %i"%(len(set(fdat_df.index))))
return fdat_df | bf7c09f1f2cfa935d93bbe46c25d55539f2c8bf8 | 28,355 |
def radial_trajectory(base_resolution,
views=1,
phases=None,
ordering='linear',
angle_range='full',
tiny_number=7,
readout_os=2.0):
"""Calculate a radial trajectory.
This function supports the following 2D ordering methods:
* **linear**: Uniformly spaced radial views. Views are interleaved if there
are multiple phases.
* **golden**: Consecutive views are spaced by the golden angle (222.49
degrees if `angle_range` is `'full'` and 111.25 degrees if `angle_range` is
`'half'`) [1]_.
* **golden_half**: Variant of `'golden'` in which views are spaced by 111.25
degrees even if `angle_range` is `'full'` [1]_.
* **tiny**: Consecutive views are spaced by the n-th tiny golden angle, where
`n` is given by `tiny_number` [2]_. The default tiny number is 7 (47.26
degrees if `angle_range` is `'full'` and 23.63 degrees if `angle_range` is
`'half'`).
* **tiny_half**: Variant of `'tiny'` in which views are spaced by a half angle
even if `angle_range` is `'full'` [2]_ (23.63 degrees for `tiny_number`
equal to 7).
* **sorted**: Like `golden`, but views within each phase are sorted by their
angle in ascending order. Can be an alternative to `'tiny'` ordering in
applications where small angle increments are required.
This function also supports the following 3D ordering methods:
* **sphere_archimedean**: 3D radial trajectory ("koosh-ball"). The starting
points of consecutive views trace an Archimedean spiral trajectory along
the surface of a sphere, if `angle_range` is `'full'`, or a hemisphere, if
`angle_range` is `'half'` [3]_. Views are interleaved if there are multiple
phases.
Args:
base_resolution: An `int`. The base resolution, or number of pixels in the
readout dimension.
views: An `int`. The number of radial views per phase.
phases: An `int`. The number of phases for cine acquisitions. If `None`,
this is assumed to be a non-cine acquisition with no time dimension.
ordering: A `string`. The ordering type. Must be one of: `{'linear',
'golden', 'tiny', 'sorted', 'sphere_archimedean'}`.
angle_range: A `string`. The range of the rotation angle. Must be one of:
`{'full', 'half'}`. If `angle_range` is `'full'`, the full circle/sphere
is included in the range. If `angle_range` is `'half'`, only a
semicircle/hemisphere is included.
tiny_number: An `int`. The tiny golden angle number. Only used if `ordering`
is `'tiny'` or `'tiny_half'`. Must be >= 2. Defaults to 7.
readout_os: A `float`. The readout oversampling factor. Defaults to 2.0.
Returns:
A `Tensor` of type `float32` and shape `[views, samples, 2]` if `phases` is
`None`, or of shape `[phases, views, samples, 2]` if `phases` is not `None`.
`samples` is equal to `base_resolution * readout_os`. The units are
radians/voxel, ie, values are in the range `[-pi, pi]`.
References:
.. [1] Winkelmann, S., Schaeffter, T., Koehler, T., Eggers, H. and
Doessel, O. (2007), An optimal radial profile order based on the golden
ratio for time-resolved MRI. IEEE Transactions on Medical Imaging,
26(1): 68-76, https://doi.org/10.1109/TMI.2006.885337
.. [2] Wundrak, S., Paul, J., Ulrici, J., Hell, E., Geibel, M.-A.,
Bernhardt, P., Rottbauer, W. and Rasche, V. (2016), Golden ratio sparse
MRI using tiny golden angles. Magn. Reson. Med., 75: 2372-2378.
https://doi.org/10.1002/mrm.25831
.. [3] Wong, S.T.S. and Roos, M.S. (1994), A strategy for sampling on a
sphere applied to 3D selective RF pulse design. Magn. Reson. Med.,
32: 778-784. https://doi.org/10.1002/mrm.1910320614
"""
return _kspace_trajectory('radial',
{'base_resolution': base_resolution,
'readout_os': readout_os},
views=views,
phases=phases,
ordering=ordering,
angle_range=angle_range,
tiny_number=tiny_number) | 3554fc0b833be552af31153c80c07863ab8f683d | 28,356 |
def highlight_threshold(image, img_data, threshold, color=(255, 0, 0)):
"""
Given an array of values for an image, highlights pixels whose value is greater than the given threshold.
:param image: The image to highlight
:param img_data: The values to use
:param threshold: The threshold above which pixels should the highlighted
:param color: The color to highlight pixels with
:return: The image, with high-value pixels highlighted
"""
out_pixels = list(image)
for i in range(len(image)):
p, e = image[i], img_data[i]
if e > threshold:
out_pixels[i] = color
return out_pixels | bc4b0c9f44f7d45b947c9913f6b6f43b73ea542b | 28,357 |
import numpy
def error_norm(q_numerical, q_exact, dx, p=2):
"""
Compute the discrete error in q in the p norm
Parameters
----------
q_numerical : numpy vector
The numerical solution, an array size (N,) or (N,1)
q_exact : numpy vector
The exact solution, whose size matches q_numerical
dx : float
The relevant grid spacing
p : int or 'inf', optional
The norm. The default is 2.
Returns
-------
error_value : float
(dx * sum((q_n - q_e)**p))**(1/p)
"""
if p == 'inf':
error_value = numpy.max(numpy.abs(q_numerical - q_exact))
else:
error_value = (dx * numpy.sum(numpy.abs(q_numerical - q_exact)**p))**(1/p)
return error_value | e4d33583ee2c5308a2eda9755c44961acba2603d | 28,358 |
def _validate_voter(request, end_field):
"""Returns: voter, election, denied_reason, denied_detail (all optional)."""
# TODO: Deprecate use of token here; can auto-generate log-in tokens instead.
token = request.GET.get("token")
election = get_current_election()
voter = None
if not election:
return voter, election, None, None
now = timezone.now()
if now > getattr(election, end_field, now):
return None, None, None, None
if token:
try:
voter = Voter.objects.get(token=token)
except Voter.DoesNotExist:
return None, election, "Bad Token", "Voter not found."
if request.user.is_authenticated and voter.user != request.user:
return None, election, "Bad Token", "Token is not for the logged in user."
if voter.election != election:
return None, election, "Bad Token", "URL is not for this election."
elif request.user.is_authenticated: # logged in
try:
voter = Voter.objects.get(election=election, user=request.user)
except Voter.DoesNotExist:
return None, election, "Not a Registered Voter", None
else:
return None, election, "Must be logged in or have a token.", None
return voter, election, None, None | c05170a6899a32b9f359b1ae9a2a61f077753d1b | 28,359 |
def flip(xyz_img):
"""
Take an xyz_img and flip its world from LPS / RAS to
RAS / LPS.
>>> data = np.random.standard_normal((30,40,50,5))
>>> metadata = {'name':'John Doe'}
>>> lps_im = XYZImage(data, np.diag([3,4,5,1]), 'ijkt', metadata)
>>> lps_im.xyz_transform
XYZTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x+LR', 'y+PA', 'z+SI'), name='world', coord_dtype=float64),
affine=array([[ 3., 0., 0., 0.],
[ 0., 4., 0., 0.],
[ 0., 0., 5., 0.],
[ 0., 0., 0., 1.]])
)
>>> ras_im = flip(lps_im)
>>> ras_im.xyz_transform
XYZTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x+RL', 'y+AP', 'z+SI'), name='world', coord_dtype=float64),
affine=array([[-3., 0., 0., 0.],
[ 0., -4., 0., 0.],
[ 0., 0., 5., 0.],
[ 0., 0., 0., 1.]])
)
>>> print np.allclose(ras_im.get_data(), lps_im.get_data())
True
>>> print ras_im.metadata == lps_im.metadata
True
>>>
>>> print flip(ras_im) == lps_im
True
>>>
"""
if xyz_img.reference.coord_names == lps_output_coordnames:
flipped_lps = False
else:
flipped_lps = True
xyz_transform = xyz_img.xyz_transform
new_xyz_transform_matrix = np.dot(np.diag([-1,-1,1,1]),
xyz_transform.affine)
return XYZImage(xyz_img._data, new_xyz_transform_matrix,
xyz_img.axes.coord_names,
metadata=xyz_img.metadata,
lps=flipped_lps) | e3f345c8c61043e8a46e8c9b603ed5de93125d63 | 28,360 |
def pivoting_remove(z, rule):
"""Choose which active constraint will be replaced
"""
if rule is None:
k = np.argmin(z)
elif rule.lower() == 'bland':
k = np.min(np.nonzero(z < 0)[0])
else:
raise('Undefined pivoting rule')
return k | 54186ddc15db3abca6853b928c6d51f145cbe248 | 28,361 |
import tqdm
def train_network(model, optimizer, train_loader, lss_fc) -> None:
"""Train Network for one Epoch."""
train_losses = []
for batch in tqdm(train_loader, total=len(train_loader)):
optimizer.zero_grad()
input_tensor, original = batch
input_tensor = input_tensor.to('cuda')
out = model(input_tensor)
loss = crop_mse(original=original,
out=out,
mask=input_tensor[:, 1],
mse=lss_fc)
loss.backward()
optimizer.step()
train_losses.append(loss.detach())
return sum(train_losses)/len(train_losses) | 1b90706348ceefe7840b16d29bfb0cc37229ec46 | 28,362 |
def get_class(cls):
"""Return TestModuleVisitor report from a class instance."""
ast = get_ast(cls.__module__)
nv = TestmoduleVisitor()
nv.visit(ast)
return nv._classes[cls.__name__] | 4d3bb56f9582edb1576db67a3094f6b3efa3e106 | 28,363 |
def show_system_timezone(
enode,
_shell='vtysh',
_shell_args={
'matches': None,
'newline': True,
'timeout': None,
'connection': None
}
):
"""
Display system timezone information
This function runs the following vtysh command:
::
# show system timezone
:param dict kwargs: arguments to pass to the send_command of the
vtysh shell.
:param str _shell: shell to be selected
:param dict _shell_args: low-level shell API arguments
:return: A dictionary as returned by
:func:`topology_lib_vtysh.parser.parse_show_system_timezone`
"""
cmd = [
'show system timezone'
]
shell = enode.get_shell(_shell)
shell.send_command(
(' '.join(cmd)).format(**locals()), **_shell_args
)
result = shell.get_response(
connection=_shell_args.get('connection', None)
)
return parse_show_system_timezone(result) | 67f08762a31c54fdaeb7c93c48c8c5d97ecf2f3e | 28,364 |
import numpy
def summarize_list(values):
"""
Takes a list of integers such as [1,2,3,4,6,7,8] and summarises it as a string "1-4,6-8"
:param values:
:return: string
"""
sorted_values = numpy.array(sorted(values))
summaries = [
(f'{chunk[0]}-{chunk[-1]}' if len(chunk) > 1 else f'{chunk[0]}')
for chunk in numpy.split(sorted_values, numpy.where(numpy.diff(sorted_values) > 1)[0] + 1)
if len(chunk)
]
return ','.join(summaries) | ea6e3501fb3340e0a78a71096129df5b3400fac9 | 28,365 |
import torch
def enforce_size(img, depth, instances, new_w, new_h):
""" Ensures that the image is the given size without distorting aspect ratio. """
with torch.no_grad():
_, h, w = img.size()
if h == new_h and w == new_w:
return img, depth, instances
# Resize the image so that it fits within new_w, new_h
w_prime = new_w
h_prime = h * new_w / w
if h_prime > new_h:
w_prime *= new_h / h_prime
h_prime = new_h
w_prime = int(w_prime)
h_prime = int(h_prime)
# Do all the resizing
img = F.interpolate(img.unsqueeze(0), (h_prime, w_prime), mode='bilinear', align_corners=False)
img.squeeze_(0)
depth = F.interpolate(depth.unsqueeze(0), (h_prime, w_prime), mode='bilinear', align_corners=False)
depth.squeeze_(0)
# Act like each object is a color channel
instances['masks'] = F.interpolate(instances['masks'].unsqueeze(0), (h_prime, w_prime), mode='bilinear', align_corners=False)
instances['masks'].squeeze_(0)
# Scale bounding boxes (this will put them in the top left corner in the case of padding)
instances['boxes'][:, [0, 2]] *= (w_prime / new_w)
instances['boxes'][:, [1, 3]] *= (h_prime / new_h)
# Finally, pad everything to be the new_w, new_h
pad_dims = (0, new_w - w_prime, 0, new_h - h_prime)
img = F.pad( img, pad_dims, mode='constant', value=0)
depth = F.pad(depth, pad_dims, mode='constant', value=0)
instances['masks'] = F.pad(instances['masks'], pad_dims, mode='constant', value=0)
return img, depth, instances | 5252b9c62af4ce909fb85856a78b7e4a697aaf74 | 28,366 |
import stat
def update_V_softmax(V,B,T,O,R,gamma,eps=None,PBVI_temps=None,
max_iter=100,verbose=False,n_samps=100,seed=False):
"""
inputs:
V (list):
V[0]: n_B x n_S array of alpha-vector values for each belief
V[1]: n_B array, denoting which action generated each alpha-vector
B: n_B x n_S array of belief states to be updated
optional inputs:
outputs:
V (same as input), updated
"""
if PBVI_temps is None:
temp1=.01
temp2=.01
temp3=.01
else:
temp1 = PBVI_temps[0]
temp2 = PBVI_temps[1]
temp3 = PBVI_temps[2]
if seed: #testing
np.random.seed(711)
n_B = np.shape(B)[0]
n_V = np.shape(B)[0]
n_A = np.shape(R)[1]
n_S = np.shape(R)[0]
O_dims = np.shape(O)[1]
O_means = O[0]; O_sds = O[1] #O_dims,n_S,n_A
if eps is None:
eps = 0.01*n_S
#### no reason to resample O each iteration; so sample obs beforehand and cache
O_samps = np.random.normal(0,1,(n_samps,O_dims,n_S,n_A)) #K x D x S x A
O_samps = O_means + O_sds*O_samps
#precompute and cache b^ao for sampled observations...
O_logprob = np.sum(stat.norm.logpdf(O_samps[:,:,:,:,None], #K x D x S x A x 1
np.transpose(O_means,[0,2,1])[:,None,:,:],
np.transpose(O_sds,[0,2,1])[:,None,:,:],),1)
#K: # samples drawn
log_B = np.log(B+1e-16) # B x S
log_T = np.log(T+1e-16) #S' x S x A
log_TB = logsumexp(log_B[None,:,:,None] + log_T[:,None,:,:],2)# S' x S
log_bao = np.transpose(O_logprob[:,:,None,:,:] + log_TB[None,:,:,:,None],[2,0,3,1,4])
b_ao = np.exp(log_bao - logsumexp(log_bao,4)[:,:,:,:,None]) #B x K x A x S' x S
for ct in range(max_iter):
old_V = np.array(V[0],copy=True)
alpha_bao = np.einsum('ab,cdefb->acdef',V[0],b_ao)/temp1 #V x B x K x A x S'
#softmax
exp_alpha_bao = np.exp(alpha_bao - np.max(alpha_bao,0)) #V x B x K x A x S'
alpha_bao_probs = exp_alpha_bao/np.sum(exp_alpha_bao,0) #V x B x K x A x S'
#soft mean
prob_meta_obs = np.mean(alpha_bao_probs,axis=2) #V x B x A x S'
alpha_aO_alpha2 = np.einsum('ab,bcd,efdb->efdac',V[0],T,prob_meta_obs) #V' x B x A x V x S
B_alpha_aO_alpha2 = np.einsum('ab,cadeb->cade',B,alpha_aO_alpha2)/temp2 #V' x B x A x V
#softmax
exp_aB = np.exp(B_alpha_aO_alpha2 - np.max(B_alpha_aO_alpha2,3)[:,:,:,None]) #V' x B x A x V
aB_probs = exp_aB/np.sum(exp_aB,3)[:,:,:,None] #V' x B x A x V
#soft mean
avg_B_alpha_aO_alpha2 = np.sum(alpha_aO_alpha2 * aB_probs[:,:,:,:,None], axis=3) #V' x B x A x S
alpha_ab = R.T + gamma*np.einsum('abcd->bcd',avg_B_alpha_aO_alpha2) #B x A x S
alpha_ab_B = np.einsum('ab,acb->ac',B,alpha_ab)/temp3 #B x A
#softmax
exp_alpha_ab_B = np.exp(alpha_ab_B - np.max(alpha_ab_B,1)[:,None]) #B x A
alpha_ab_B_probs = exp_alpha_ab_B/np.sum(exp_alpha_ab_B,1)[:,None] #B x A
#soft mean
avg_alpha_abB = np.sum(alpha_ab * alpha_ab_B_probs[:,:,None], 1) #B x S
V[0] = avg_alpha_abB #B x S; alpha-vecs
V[1] = alpha_ab_B_probs #B x A; action probs for each alpha-vec
diff = np.sum(np.abs(V[0]-old_V))
#check for convergence
if diff < eps:
return V
if verbose:
print("didn't converge during update :(" %np.sum(np.abs(V[0]-old_V)))
return V | 62910d068a59902d6a9f5f0c2b873cad551f9c17 | 28,367 |
def scaled_location_plot(yname, yopt, scaled_res):
"""
Plot the scaled location, given the dependant values and scaled residuals.
:param str yname: Name of the Y axis
:param ndarray yopt: Estimated values
:param ndarray scaled_res: Scaled residuals
:returns: the handles for the data and the smoothed curve
"""
scr = sqrt(abs(scaled_res))
p_scaled = plot(yopt, scr, '+')[0]
av = NonParamRegression(yopt, scr)
av.fit()
xr = arange(yopt.min(), yopt.max(), (yopt.max() - yopt.min()) / 1024)
rr = av(xr)
p_smooth = plot(xr, rr, 'g')[0]
expected_mean = 2 ** (1 / 4) * gamma(3 / 4) / sqrt(pi)
plot([yopt.min(), yopt.max()], [expected_mean, expected_mean], 'r--')
title('Scale-location')
xlabel(yname)
ylabel('$|$Normalized residuals$|^{1/2}$')
gca().set_yticks([0, 1, 2])
return [p_scaled, p_smooth] | 24e126f3bb60e5f46713d3a0c7da383684081afd | 28,368 |
def importNoiseTerms(filename):
""" Imports noise data from an FWH file; the returned data is a list of length
nProbes filled with (nTime,3) arrays """
f = open(filename,'r')
deltaT = []
while True:
line = f.readline(); # read line by line
if line == '': # check for EoF
break;
if (line != '\n'): # skip empty lines
if line.split()[1] == 'x':
# get the number of probes and allocate memory
noProbes = len(line.split())-2;
time = np.array([])
field = -1
# process the probe x-locations
x = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[1] == 'y':
y = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[1] == 'z':
z = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[1] == 'deltaT':
deltaT = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[0] != '#':
line = (line.replace('(',' ').replace(')',' ').replace(',',' ')).split()
# check if saving 3 or 5 terms
if field == -1 and ((len(line)-1)/noProbes)%3 == 0:
field = [np.zeros((0,3)) for i in range(noProbes)]
elif field == -1 and ((len(line)-1)/noProbes)%5 == 0:
field = [np.zeros((0,5)) for i in range(noProbes)]
# convert all to floats
vals = [float(s) for s in line]
time = np.append(time, vals[0])
for i in range(0,noProbes):
if ((len(line)-1)/noProbes)%3 == 0:
field[i] = np.vstack([field[i],np.array([vals[(i*3+1) : (i*3+1+3)]])])
elif ((len(line)-1)/noProbes)%5 == 0:
field[i] = np.vstack([field[i],np.array([vals[(i*5+1) : (i*5+1+5)]])])
f.close()
return time, field, np.transpose(np.vstack([x,y,z])), deltaT | b2066f37f7a030d1e330f9bcc0b13b7527caa1e1 | 28,369 |
def line_edit_style_factory(txt_color='white', tgt_layer_color='white',
bg_color='#232323'):
"""Generates a string of a qss style sheet for a line edit. Colors can be
supplied as strings of color name or hex value. If a color arg receives
a tuple we assume it is either an rgb or rgba tuple.
:param txt_color: Color the text of the line edit should be.
:param tgt_layer_color: The color of the current target layer.
:param bg_color: The color that will fill the background of the line eidit.
:return: string of qss
"""
def handle_rgb(color_tuple):
"""Assumes the tuple is rgba or rgb (len 4 or 3)"""
val = ','.join([str(i) for i in color_tuple])
if len(color_tuple) == 4:
rgb = 'rgba({})'.format(val)
else:
rgb = 'rgb({})'.format(val)
return rgb
if isinstance(bg_color, tuple):
bg_color = handle_rgb(bg_color)
style = '''
QTextEdit,
QLineEdit {
border-radius: 11px;
border: 1px solid transparent;
background-color: %s;
color: %s
}
QTextEdit:hover,
QLineEdit:hover {
border: 1px solid %s
}
QTextEdit:focus,
QLineEdit:focus {
border: 2px solid %s
}
''' % (bg_color, txt_color, tgt_layer_color,
tgt_layer_color)
return style | 10670afc32ec1c19d09dd72fc0e23bb1583ba3af | 28,370 |
import struct
import random
def create_key(key_len):
""" Generates key using random device if present
- key_len -- length of key
"""
try:
#generates truly random numbers
frand = open("/dev/random", "r")
data = frand.read(key_len/2)
frand.close()
return data.encode('hex')
except IOError:
buf =''
length = key_len/4
#generates truly pusedo random numbers
for i in range(length):
#read one byte at a time
buf = buf + struct.pack("!L", random.getrandbits(32)).encode('hex')
return buf[:key_len] | 84a9952a896855f04ddf6fedf8a81c1be6bdaa08 | 28,371 |
import os
import logging
def connectToPostgres():
"""
If rulemonitor database does not exist yet:
$ initdb /home/rulemonitor/postgres/data
$ pg_ctl -D /home/rulemonitor/postgres/data -l /home/rulemonitor/postgres/log
$ createdb rulemonitor
"""
pghost = os.getenv("POSTGRES_HOST", "192.168.5.83")
pguser = os.getenv("POSTGRES_USER", "rulemonitor")
pgpass = os.getenv("POSTGRES_PASSWORD", "BSJYngTW4k")
conn = psycopg2.connect(dbname='rulemonitor', user=pguser, host=pghost, password=pgpass)
logging.info("Connected to Postgres")
return conn | c8103d0c2ebff0e85532976b84539e4b04ca65f9 | 28,372 |
import re
def ipv6_from_string(string: str) -> netaddr.IPSet:
"""
Takes a string and extracts all valid IPv6 Addresses as a SET of Strings
Uses the validate_ip helper function to achieve.
"""
ipv6_regex = re.compile(
'(?<![a-zA-Z\d\.])((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?(\/[\d][\d]?[\d]?|1([01][0-9]|2[0-8]))?|(\.(\d{1,3}))(?<![a-zA-Z\d])')
potential_ipv6s = re.findall(ipv6_regex, string)
valid_ipv6s = []
for ipv6 in potential_ipv6s:
ipv6 = ipv6[0] + ipv6[75]
if validate_ip(ipv6) is True:
valid_ipv6s.append(ipv6)
return netaddr.IPSet(valid_ipv6s) | 2aa529b8561498384dea2ae6c18f44164710848a | 28,373 |
from typing import Tuple
from typing import List
def get_feature_location(
feature_text: str) -> Tuple[int, int, str, List, bool, bool]:
"""
Args:
feature_text: endswith '\n'
For example:
' CDS complement(join(<360626..360849,360919..360948,
' 361067..361220,361292..361470,361523..>361555))
' /gene="rIIA"
' /locus_tag="T4p001"
' /db_xref="GeneID:1258593"
Returns:
start: int
The example would be 12
end: int
The example would be 2189
strand: str, '+' or '-'
The example would be '-'
regions: list of tuple (int, int, str)
Indicating start, end, strand of each region (intron)
The example would be [(12, 2189, '-')]
partial_start: bool
partial_end: bool
"""
locstr = get_location_string(feature_text)
if locstr.startswith('complement('):
# Remove 'complement(' and ')'
locstr = locstr[len('complement('):-1]
all_complement = True
else:
all_complement = False
if locstr.startswith('join('):
# Remove 'join(' and ')'
locstr = locstr[len('join('):-1]
if locstr.startswith('order('):
# Remove 'join(' and ')'
locstr = locstr[len('order('):-1]
# loclist = list of strings
# e.g. ["complement(2853..2990)", "complement(2458..2802))"]
loclist = locstr.split(',')
partial_start, partial_end = False, False
regions = [] # e.g. [(100, 200, '-'), (<300, 400, '+'), (500, >600, '+')]
for i, s in enumerate(loclist):
# Tell the strand
if s.startswith('complement('):
# Remove 'complement(' and ')'
s = s[len('complement('):-1]
c = '-' # c = strand
elif all_complement:
c = '-'
else:
c = '+'
a, b = s.split('..') if ('..' in s) else (s, s) # a is start, b is end
# First start has '<' --> partial start
if i == 0 and a.startswith('<'):
partial_start = True
# Last end has '>' --> partial end
if i == len(loclist) - 1 and b.startswith('>'):
partial_end = True
if a.startswith('<') or a.startswith('>'):
a = a[1:]
if b.startswith('<') or b.startswith('>'):
b = b[1:]
a, b = int(a), int(b)
if a > b:
a, b = b, a # a must be < b
regions.append((a, b, c))
start, end, strand = regions[0][0], regions[-1][1], regions[0][2]
return start, end, strand, regions, partial_start, partial_end | 665649a7ea7c618a8830b0bf11c5d26a6e6d21fd | 28,374 |
import stat
import os
def isdir(path):
"""Like os.path.isdir, but raises an exception on error."""
return bool(stat.S_ISDIR(os.stat(path).st_mode)) | e68179caf5da3453f29ff8796702f494879dcca9 | 28,375 |
import json
import logging
def _update_port_rate_limits_v1(port_name, broadcast_limit=None, broadcast_units=None,
multicast_limit=None, multicast_units=None, unknown_unicast_limit=None,
unknown_unicast_units=None, **kwargs):
"""
Perform GET and PUT calls to update a Port's rate limits
:param port_name: Alphanumeric name of the Port
:param broadcast_limit: Rate limit for broadcast ingress traffic
:param broadcast_units: Units for broadcast rate limit; should be either "kbps" (kilobits/second) or
"pps" (packets/second)
:param multicast_limit: Rate limit in pps for multicast ingress traffic
:param multicast_units: Units for multicast rate limit; should be either "kbps" (kilobits/second) or
"pps" (packets/second)
:param unknown_unicast_limit: Rate limit in pps for unknown_unicast ingress traffic
:param unknown_unicast_units: Units for unknown unicast rate limit; should be either "kbps" (kilobits/second) or
"pps" (packets/second)
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
port_name_percents = common_ops._replace_special_characters(port_name)
port_data = port.get_port(port_name, depth=0, selector="configuration", **kwargs)
port_data['rate_limits'] = {}
if broadcast_limit is not None and broadcast_units is not None:
port_data['rate_limits']['broadcast'] = broadcast_limit
port_data['rate_limits']['broadcast_units'] = broadcast_units
if multicast_limit is not None and multicast_units is not None:
port_data['rate_limits']['multicast'] = multicast_limit
port_data['rate_limits']['multicast_units'] = multicast_units
if unknown_unicast_limit is not None and unknown_unicast_units is not None:
port_data['rate_limits']['unknown-unicast'] = unknown_unicast_limit
port_data['rate_limits']['unknown-unicast_units'] = unknown_unicast_units
# must remove these fields from the data since they can't be modified
port_data.pop('name', None)
port_data.pop('origin', None)
target_url = kwargs["url"] + "system/ports/%s" % port_name_percents
put_data = json.dumps(port_data, sort_keys=True, indent=4)
response = kwargs["s"].put(target_url, data=put_data, verify=False)
if not common_ops._response_ok(response, "PUT"):
logging.warning("FAIL: Updating rate limits for Port '%s' failed with status code %d: %s"
% (port_name, response.status_code, response.text))
return False
else:
logging.info("SUCCESS: Updating rate limits for Port '%s' succeeded"
% port_name)
return True | 28f21634af949b2e023db64ac6a4b850e2bf96cc | 28,376 |
def random_geom_sum(pmf, p, low_mem=False):
"""Calculates the distribution of Z = X_1 + X_2 + ... + X_N.
Parameters
----------
pmf : array
Probability distribution of X such that pmf[x] = Pr(X = x).
p : float
Probability such that N ~ geom(p), i.e. Pr(N = n) = p(1-p)^{n-1}.
low_mem : boolean
If set to True this function doesn't store or output the intermediate
results `pmf_given_N`, saving a lot of memory. Note that when next to
calculating the waiting time, the Werner parameters are calculated as
well, this value must be set to False, because the results of
`get_pmfs_after_fixed_lengths(pmf)` are required for the Werner
parameter calculation.
NOTE: refactoring would also allow for a lower memory implementation for
the Werner parameter calculation as well.
Returns
-------
Tuple (pmf_out, pmfs_given_N)
pmf_out[z] = Pr(sum^N X = z) = Pr(Z = z).
pmf_given_N[n,z] = Pr(sum^n X = z) = Pr(Z = z | N = n).
"""
if(low_mem):
pmf_final = get_pmf_after_prob_length_low_memory(pmf, p)
pmfs_given_N = None
else:
pmfs_given_N = get_pmfs_after_fixed_lengths(pmf)
pmf_final = get_pmf_after_prob_length(pmfs_given_N, p)
return pmf_final, pmfs_given_N | 3b7f75a248975dd78e3cf986bff02a6160c1a026 | 28,377 |
def noto_tools(default=""):
"""Local path to nototools git repo. If this is called, we require config
to be set up."""
result = _values.get("noto_tools", default)
if result:
return result
raise Exception(_ERR_MSG) | 42738c374bcd6d89baf4eabbe7c0f0a75fb1fd1d | 28,378 |
def fdc_windtur_west(timestamp, sonicU, sonicV, sonicW, heading,
rateX, rateY, rateZ, accX, accY, accZ, lat):
"""
Description:
Calculates the L1 windspeed data product WINDTUR-VLW_L1 from the FDCHP
instrument, which collects 20 minutes of data every hour. The L1 data
consists of these values less 30 seconds from both the beginning and
end of each 12000 point dataset.
Implemented by:
2014-11-17: Russell Desiderio. Initial Code
2015-01-29: Russell Desiderio. Removed temperature from calling arguments.
Usage:
wind_west = fdc_windtur_west(timestamp, sonicU, sonicV, sonicW, heading,
rateX, rateY, rateZ, accX, accY, accZ, lat)
where
wind_west = windspeed West WINDTUR-VLW_L1 [m/s], UNcorrected for magnetic variation
timestamp = data date and time values [seconds since 1900-01-01]
sonicU = WINDTUR-U_L0 [cm/s]; u-component of windspeed measured in the buoy
frame of reference
sonicV = WINDTUR-V_L0 [cm/s]; v-component of windspeed measured in the buoy
frame of reference
sonicW = WINDTUR-W_L0 [cm/s]; w-component of windspeed measured in the buoy
frame of reference
heading = MOTFLUX-YAW_L0 [radians] measured by the magnetometer (NOT msrd by the gyro).
***NOT USED*** roll: MOTFLUX-ROLL_L0 [radians] ***NOT USED***
***NOT USED*** pitch: MOTFLUX-PITCH_L0 [radians] ***NOT USED***
rateX = MOTFLUX-ROLL_RATE_L0 [radians/s] measured by the gyro
rateY = MOTFLUX-PITCH_RATE_L0 [radians/s] measured by the gyro
rateZ = MOTFLUX-YAW_RATE_L0 [radians/s] measured by the gyro
accX = MOTFLUX-ACX_L0 [9.80665 m^2/s^2] x-component of platform linear acceleration
accY = MOTFLUX-ACY_L0 [9.80665 m^2/s^2] y-component of platform linear acceleration
accZ = MOTFLUX-ACZ_L0 [9.80665 m^2/s^2] z-component of platform linear acceleration
lat = latitude of instrument in decimal degrees
References:
OOI (2014). Data Product Specification for FDCHP Data Products. Document
Control Number 1341-00280. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00280_Data_Product_Spec_FDCHP_OOI.pdf)
"""
# this data product is temperature independent
sonicT = sonicW * np.nan
_, windspeeds = fdc_flux_and_wind(timestamp, sonicU, sonicV, sonicW, sonicT,
heading, rateX, rateY, rateZ, accX, accY,
accZ, lat)
wind_west = np.asarray(windspeeds[1]).flatten()
return wind_west | e0136ff7ddaf676b99f28700e3613523bb57b12e | 28,379 |
def reports():
"""View reports"""
return render_template("reports.html") | b69119a97998595757d52e5641246bbeea007d18 | 28,380 |
def otr_statusbar_cb(data, item, window):
"""Update the statusbar."""
if window:
buf = weechat.window_get_pointer(window, 'buffer')
else:
# If the bar item is in a root bar that is not in a window, window
# will be empty.
buf = weechat.current_buffer()
result = ''
if buffer_is_private(buf):
local_user = irc_user(
buffer_get_string(buf, 'localvar_nick'),
buffer_get_string(buf, 'localvar_server'))
remote_user = irc_user(
buffer_get_string(buf, 'localvar_channel'),
buffer_get_string(buf, 'localvar_server'))
context = ACCOUNTS[local_user].getContext(remote_user)
encrypted_str = config_string('look.bar.state.encrypted')
unencrypted_str = config_string('look.bar.state.unencrypted')
authenticated_str = config_string('look.bar.state.authenticated')
unauthenticated_str = config_string('look.bar.state.unauthenticated')
logged_str = config_string('look.bar.state.logged')
notlogged_str = config_string('look.bar.state.notlogged')
bar_parts = []
if context.is_encrypted():
if encrypted_str:
bar_parts.append(''.join([
config_color('status.encrypted'),
encrypted_str,
config_color('status.default')]))
if context.is_verified():
if authenticated_str:
bar_parts.append(''.join([
config_color('status.authenticated'),
authenticated_str,
config_color('status.default')]))
elif unauthenticated_str:
bar_parts.append(''.join([
config_color('status.unauthenticated'),
unauthenticated_str,
config_color('status.default')]))
if context.is_logged():
if logged_str:
bar_parts.append(''.join([
config_color('status.logged'),
logged_str,
config_color('status.default')]))
elif notlogged_str:
bar_parts.append(''.join([
config_color('status.notlogged'),
notlogged_str,
config_color('status.default')]))
elif unencrypted_str:
bar_parts.append(''.join([
config_color('status.unencrypted'),
unencrypted_str,
config_color('status.default')]))
result = config_string('look.bar.state.separator').join(bar_parts)
if result:
result = '{color}{prefix}{result}'.format(
color=config_color('status.default'),
prefix=config_string('look.bar.prefix'),
result=result)
return result | 9fb58921b901e542ad6f8bed7207337db56de872 | 28,381 |
def rotate_ellipse_NS(time_deg, datastruc, const):
"""Rotate ellipse major/minor axis to north/south orientation."""
# Construct major and minor
major, minor, pha, inc = get_constituent(const, datastruc)
# construct current at this time
try:
major_current = major*np.cos(np.deg2rad(time_deg - pha))
minor_current = minor*np.cos(np.deg2rad(time_deg - pha) - np.pi/2)
except ValueError:
time_deg = np.expand_dims(time_deg, 2)
major_current = major*np.cos(np.deg2rad(time_deg - pha))
minor_current = minor*np.cos(np.deg2rad(time_deg - pha) - np.pi/2)
# Rotate to u and v
rotated_current = ((major_current + 1j*minor_current)
* np.exp(1j*np.deg2rad(inc)))
u = np.real(rotated_current)
v = np.imag(rotated_current)
return u, v | 48c4d4270e8a521969608369974c59cff6700a03 | 28,382 |
import requests
def img_lookup(pid):
"""Query for object type and return correct JPG location"""
r = requests.get("https://fsu.digital.flvc.org/islandora/object/{0}/datastream/JPG/view".format(pid))
if r.status_code == 200:
return r.url
elif r.status_code == 404:
r2 = requests.get("https://fsu.digital.flvc.org/islandora/object/{0}/pages".format(pid))
soup = bs4.BeautifulSoup(r2.text, 'lxml')
div = soup.find_all('div', class_="islandora-objects-grid-item")[0]
dd = div.find('dd')
a = dd.find('a')
if a is not None:
return "https://fsu.digital.flvc.org{0}/datastream/JPG/view".format(a['href'])
else:
return None
else:
return None | ac9ccfc64e4bf38b0f22e90649368cae1ad89b18 | 28,383 |
from datetime import datetime
def _get_midnight_date(date):
"""Return midnight date for the specified date.
Effectively, this function returns the start of the day for the
specified date.
Arguments:
date -- An arbitrary date (type: datetime.datetime)
Return: Midnight date (type: datetime.datetime)
"""
return datetime.datetime(date.year, date.month, date.day) | 165a884fd12e79f167c9818126e1e31a3b2dc8b3 | 28,384 |
def get_workflow_entrypoint(definition_class, workflow_name, workflow_version):
"""Get the entry point information from *workflow_class*.
This function provides a convenient way to extract the parameters
that need to be returned the *get_workflow* argument to
:py:class:`~.GenericWorkflowWorker`
:param definition_class: Class which defines the workflow
:type definition_class: child class of botoflow.workflow_definition.WorkflowDefinition
:param str workflow_name: The name of the workflow
:param str workflow_version: The version of the workflow
:return: Return a tuple of (*definition_class*, *workflow_type*, *entrypoint_func_name*)
"""
return extract_workflows_dict([definition_class])[workflow_name, workflow_version] | 87a7cbc1ad810e08033f19d1d3c7551ff8b4eb46 | 28,385 |
def expect_types(*_pos, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Usage
-----
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3')
Traceback (most recent call last):
...
TypeError: foo() expected an argument of type 'int' for argument 'x', but got float instead. # noqa
"""
if _pos:
raise TypeError("expect_types() only takes keyword arguments.")
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
return preprocess(**valmap(_expect_type, named)) | 92b7682bda54f02c095d10534b71fb02fea1a763 | 28,386 |
def squash_by(child_parent_ids, *attributes):
"""Squash a child-parent relationship
Arguments
---------
child_parent_ids - array of ids (unique values that identify the parent)
*attributes - other arrays that need to follow the sorting of ids
Returns
-------
child_parents_idx - an array of len(child) which points to the index of
parent
parent_ids - len(parent) of the ids
*parent_attrs - len(parent) of the other attributes
"""
unique_resids, sort_mask, atom_idx = np.unique(
child_parent_ids, return_index=True, return_inverse=True)
return atom_idx, unique_resids, [attr[sort_mask] for attr in attributes] | 1c68bb38ee10044803021f4d74b37ea4b161eef5 | 28,387 |
import random
import math
def _generate_quantsets(num_vars, num_qsets, ratio):
"""
_generate_quantsets(num_vars : int,
num_qsets : int,
ratio : float)
return (quantsets : list)
Generates a list of random quantifier sets according to given arguments
returns it.
Returns the list of generated quantifier sets.
"""
global _qcache, _vcache, _options
quantsets = []
quantifiers = [UNIVERSAL, EXISTENTIAL]
num_sets = {EXISTENTIAL : 0, UNIVERSAL: 0}
_num_vars = {EXISTENTIAL : 0, UNIVERSAL : 0}
rem_vars = {EXISTENTIAL : 0, UNIVERSAL : 0}
# prevent universal quantset at innermost scope, would be removed anyway
# by applying forall reduction
if _options.reduce:
# number of quantifier sets is even -> start with UNIVERSAL
# otherwise with EXISTENTIAL
qindex = num_qsets % 2
else:
qindex = random.randint(0, 1)
# special case
if ratio != None:
# if all variables have to be universal -> only one universal
# quantifier set exists
if ratio == 0.0:
qindex = 0
# if all variables have to be existential -> only one existential
# quantifier set exists
elif ratio == 1.0:
qindex = 1
# if only one quantifier set is given, change ratio in order to have only
# existential or universal variables
if num_qsets == 1:
if qindex == 1:
ratio = 1.0
else:
ratio = 0.0
# calculate number of existential and universal quantifier sets
if num_qsets % 2 == 0: # even number of quantifier sets
num_sets[EXISTENTIAL] = num_sets[UNIVERSAL] = num_qsets / 2
else:
if quantifiers[qindex] == EXISTENTIAL:
num_sets[EXISTENTIAL] = math.floor(num_qsets / 2) + 1
num_sets[UNIVERSAL] = num_sets[EXISTENTIAL] - 1
else:
num_sets[UNIVERSAL] = math.floor(num_qsets / 2) + 1
num_sets[EXISTENTIAL] = num_sets[UNIVERSAL] - 1
assert(num_sets[EXISTENTIAL] > 0 or num_sets[UNIVERSAL] > 0)
assert(num_sets[EXISTENTIAL] + num_sets[UNIVERSAL] == num_qsets)
# calculate number of existential and universal variables
if ratio != None:
if ratio > 0.0 and ratio < 1.0:
# there has to be at least 1 existential variable if given ratio is
# greater 0.0 and less than 1.0
_num_vars[EXISTENTIAL] = max(1, math.floor(num_vars * ratio))
else:
# special case: ratio is 0.0 or 1.0 -> all variables are either
# existential or universal
_num_vars[EXISTENTIAL] = math.floor(num_vars * ratio)
# just use a random number of existential variables
else:
# we need at least num_sets[EXISTENTIAL] and at most num_sets[UNIVERSAL]
# existential variables in order to be sure that we always have enough
# variables for the specified amount of quantifier sets
_num_vars[EXISTENTIAL] = random.randint(num_sets[EXISTENTIAL],
num_vars - num_sets[UNIVERSAL])
# remaining number of variables are universal
_num_vars[UNIVERSAL] = num_vars - _num_vars[EXISTENTIAL]
rem_vars = _num_vars.copy()
assert(_num_vars[EXISTENTIAL] + _num_vars[UNIVERSAL] == num_vars)
assert(num_sets[EXISTENTIAL] + num_sets[UNIVERSAL] == num_qsets)
# variables not yet used in quantifier sets
vars = [v for v in range(1, num_vars + 1)]
while num_sets[EXISTENTIAL] > 0 or num_sets[UNIVERSAL] > 0:
qset = []
quantifier = quantifiers[qindex]
# add quantifier to set
qset.append(quantifier)
# determine number of variables of new quantifier set
if num_sets[quantifier] == 1: # last quantifier set
vars_per_qset = rem_vars[quantifier]
else:
vars_per_qset = random.randint(1, int(rem_vars[quantifier] /
num_sets[quantifier]))
rem_vars[quantifier] -= vars_per_qset
num_sets[quantifier] -= 1
assert(rem_vars[quantifier] >= 0)
# add random variables to quantifier set
for i in range(vars_per_qset):
assert(len(vars) > 0)
rand_index = random.randint(0, len(vars) - 1) % len(vars)
assert(rand_index >= 0)
assert(rand_index < len(vars))
var = vars.pop(rand_index)
# cache variable information (quantifier, scope level, occurrences)
_vcache[var] = [quantifier, len(quantsets), 0]
# mark variable as not used yet
_qcache[quantifier][UNUSED].append(var)
# add variable to quantifier set
qset.append(var)
quantsets.append(qset)
# set next quantifier
qindex = (qindex + 1) & 1
assert(rem_vars[EXISTENTIAL] == 0)
assert(rem_vars[UNIVERSAL] == 0)
assert(num_sets[EXISTENTIAL] == 0)
assert(num_sets[UNIVERSAL] == 0)
assert(len(vars) == 0)
assert(len(quantsets) == num_qsets)
assert(len(_vcache) == num_vars)
assert(len(_qcache[EXISTENTIAL][UNUSED]) + \
len(_qcache[UNIVERSAL][UNUSED]) == num_vars)
return quantsets | e01e053e64384f0304fd21200bd31485f0e6eb06 | 28,388 |
import argparse
def get_parser():
"""
Parses the command line arguments.
:returns: a parser with command line arguments
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Modbus Slave Endpoint.")
port_choices = SerialPort.list_serial_ports() + ['tcp:502', 'udp:5020']
parser.add_argument('-t', '--template', dest='template', default='DEFAULT',
help="the template file to use")
parser.add_argument('-p', '--port', dest='port', default=PORT_DEFAULT,
choices=port_choices,
help="tcp:502, udp:5020, or a USB/serial port name")
parser.add_argument('-b', '--baud', dest='baudrate', default=9600, type=int,
choices=[2400, 4800, 9600, 19200, 38400, 57600, 115200],
help="baud rate (``int`` default 9600)", metavar="{2400..115200}")
# parser.add_argument('-f', '--framing', dest='framing', default='8N1',
# choices=['8N1'],
# help="serial port framing (byte size, parity, stop bits)")
#
parser.add_argument('-m', '--mode', dest='mode', default=None,
choices=['rtu', 'ascii', 'tcp'],
help="Modbus framing mode RTU, ASCII or TCP")
parser.add_argument('--logfile', default=None,
help="the log file name with optional extension (default extension .log)")
parser.add_argument('--logsize', type=int, default=5,
help="the maximum log file size, in MB (default 5 MB)")
parser.add_argument('--debug', action='store_true',
help="enable tick_log debug logging (default OFF)")
return parser | 42b18670fe981cf2b6fa9cb2983573b4eee73af6 | 28,389 |
def encoder(src_embedding, src_sequence_length):
"""Encoder: Bidirectional GRU"""
encoder_fwd_cell = layers.GRUCell(hidden_size=hidden_dim)
encoder_fwd_output, fwd_state = layers.rnn(
cell=encoder_fwd_cell,
inputs=src_embedding,
sequence_length=src_sequence_length,
time_major=False,
is_reverse=False)
encoder_bwd_cell = layers.GRUCell(hidden_size=hidden_dim)
encoder_bwd_output, bwd_state = layers.rnn(
cell=encoder_bwd_cell,
inputs=src_embedding,
sequence_length=src_sequence_length,
time_major=False,
is_reverse=True)
encoder_output = layers.concat(
input=[encoder_fwd_output, encoder_bwd_output], axis=2)
encoder_state = layers.concat(input=[fwd_state, bwd_state], axis=1)
return encoder_output, encoder_state | f23fb197838952017d3706db221ab55c9807bbb0 | 28,390 |
def _class_search_post_url_from(absolute_url, form):
"""Determines absolute URL to submit HTTP POST query request to"""
method = form.get(HTTP_METHOD)
if method != POST:
raise ValueError("Expected POST form submission method; Got "+repr(method))
action = form.get(ACTION)
dest_url = urljoin(absolute_url, action)
return dest_url | 9547643b49cec1a4ea272c31b6a5399c076fa487 | 28,391 |
def pcmh_2_2d__3_5_6_7_8():
"""Huddles, Meetings & Trainings"""
huddle_sheet_url = URL('init', 'word', 'huddle_sheet.doc', vars=dict(**request.get_vars), hmac_key=MY_KEY,
salt=session.MY_SALT, hash_vars=["app_id"])
# referral tracking chart
huddle_sheet = MultiQNA(
5, float('inf'), True,
'huddle_sheet',
"Please upload a minimum of 5 days' worth of <a href='{url}'>daily huddle sheets</a>. The huddles must filled "
"out every morning discussing tasks / reminders regarding a particular patient or a population of "
"patients.".format(url=huddle_sheet_url)
)
huddle_sheet.set_template("{choose_file}")
temp = "Please have all staff sign <a href='{url}'>this %s sign-in sheet</a> the next time " + \
"{practice} conducts a %s.".format(practice=APP.practice_name)
# meeting_sheet
meeting_sheet_url = URL('init', 'word', 'signin_sheet.doc', args=["meeting_signin_sheet"],
vars=dict(type="meeting", **request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
meeting_sheet = MultiQNA(
1, 1, True,
'meeting_sheet',
(temp % ("meeting", "meeting to discuss practice functioning")).format(
url=meeting_sheet_url)
)
meeting_sheet.set_template("{choose_file}")
# meeting_sheet
training_sheet_url = URL('init', 'word', 'signin_sheet.doc', args=["training_signin_sheet"],
vars=dict(type="training", **request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
training_sheet = MultiQNA(
1, float('inf'), True,
'training_sheet',
(temp % ("training", "training / re-training regarding patient and population management")).format(
url=training_sheet_url)
)
training_sheet.set_template("{choose_file}")
return dict(documents=[
dict(
description="Daily Huddle Sheet",
url=huddle_sheet_url,
permissions=["IS_TEAM"]
),
dict(
description="Training Sign-in Sheet",
url=training_sheet_url,
permissions=["IS_TEAM"]
),
dict(
description="Meeting Sign-in Sheet",
url=meeting_sheet_url,
permissions=["IS_TEAM"]
),
]) | 73946627100342c07083e656d92d2364d166cc16 | 28,392 |
def CallCountsToMockFunctions(mock_function):
"""A decorator that passes a call count to the function it decorates.
Examples:
@CallCountsToMockFunctions
def foo(call_count):
return call_count
...
...
[foo(), foo(), foo()]
[0, 1, 2]
"""
counter = [0]
def Result(*args, **kwargs):
# For some values of `counter`, the mock function would simulate raising
# an exception, so let the test case catch the exception via
# `unittest.TestCase.assertRaises()` and to also handle recursive functions.
prev_counter = counter[0]
counter[0] += 1
ret_value = mock_function(prev_counter, *args, **kwargs)
return ret_value
return Result | cc621cabdf87ff554bb02c25282e99fadcaaa833 | 28,393 |
from typing import Callable
from typing import Tuple
import scipy
def multi_start_maximise(objective_function: Callable,
initial_points: ndarray, **kwargs) -> Tuple[ndarray, float]:
"""Run multi-start maximisation of the given objective function.
Warnings
--------
This is a hack to take advantage of fast vectorised computation and avoid expensive python loops. There may be some
issues with this method!
The objective function provided here must be a vectorised function. We take advantage of the fast computation of
vectorised functions to view a multi-start optimisation as a single pass of a higher-dimensional optimisation,
rather than several passes of a low-dimensional optimisation (which would require an expensive python loop). We
simply concatenate all the points where the function is to be evaluated into a single high-dimensional vector, give
the function value as the sum of all the individual function values, and give the Jacobian as the concatenation of
all the individual Jacobians. In this way we can essentially perform many optimisations in parallel. Note that
there is an issue here with the stopping condition: we can only consider all optimisations together, so even if most
have come very close to an optimum, the process will continue as long as one is far away. However, this does seem to
perform well in practice.
Parameters
----------
objective_function
Function to be maximised. Must return both the function value and the Jacobian. Must also accept a 2D array of
points, returning a 1D array and a 2D array for the function values and Jacobians respectively.
initial_points
Points at which to begin the optimisation, as a 2D array of shape (num_points, num_dimensions).
**kwargs
Keyword arguments will be included in the 'options' dict passed to the underlying scipy optimiser.
Returns
-------
ndarray
The location of the found maximum.
float
The value of the objective function at the found maximum.
"""
minimizer_kwargs = DEFAULT_MINIMIZER_KWARGS.copy()
minimizer_kwargs['options'] = {**minimizer_kwargs['options'], **kwargs} # This merges the two dicts.
num_points, num_dims = np.shape(initial_points)
def function_to_minimise(x, *inner_args, **inner_kwargs):
x = np.reshape(x, (num_points, num_dims))
value, jacobian = objective_function(x, *inner_args, **inner_kwargs)
combined_value, combined_jacobian = -value.sum(), -jacobian.ravel()
if not np.isfinite(combined_value) or not np.all(np.isfinite(combined_jacobian)):
raise FloatingPointError("Objective function for multi-start optimisation returned NaN or infinity.")
return combined_value, combined_jacobian
maximum = scipy.optimize.minimize(function_to_minimise, initial_points, **minimizer_kwargs)
#print(maximum)
maxima = maximum.x.reshape(num_points, num_dims)
values, _ = objective_function(maxima)
max_index = np.argmax(values)
optimal_x = maxima[max_index, :]
optimal_y = values[max_index]
return optimal_x, optimal_y | 6eabacc0d84389c45ddbd75fd84a27cf312e65be | 28,394 |
async def ping():
"""
.ping: respond with pong
"""
return "pong" | 988165efb5087fd838a2930dbe4ed540b2d70037 | 28,395 |
from statsmodels.tsa.stattools import adfuller
def stationarity_check(TS,plot=True,col=None):
"""From: https://learn.co/tracks/data-science-career-v2/module-4-a-complete-data-science-project-using-multiple-regression/working-with-time-series-data/time-series-decomposition
"""
# Import adfuller
if col is not None:
# Perform the Dickey Fuller Test
dftest = adfuller(TS[col]) # change the passengers column as required
else:
dftest=adfuller(TS)
if plot:
# Calculate rolling statistics
rolmean = TS.rolling(window = 8, center = False).mean()
rolstd = TS.rolling(window = 8, center = False).std()
#Plot rolling statistics:
fig = plt.figure(figsize=(12,6))
orig = plt.plot(TS, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
# plt.show(block=False)
# Print Dickey-Fuller test results
print ('Results of Dickey-Fuller Test:')
dfoutput = pd.Series(dftest[0:4],
index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
sig = dfoutput['p-value']<.05
print (dfoutput)
print()
if sig:
print(f"[i] p-val {dfoutput['p-value'].round(4)} is <.05, so we reject the null hypothesis.")
print("\tThe time series is NOT stationary.")
else:
print(f"[i] p-val {dfoutput['p-value'].round(4)} is >.05, therefore we support the null hypothesis.")
print('\tThe time series IS stationary.')
return dfoutput | 4b2120b4da74a08e13f61220bd212ae6016f3a73 | 28,396 |
import re
def generate_modelname(tpl,
nlayers: int = -1,
nhid: int = -1,
nagts=-1,
bptt: int = -1,
pre: bool = False,
arch: str = None):
"""
Generates model name from parameters.
:param tpl:
:type tpl:
:param nagts:
:type nagts:
:param nlayers:
:type nlayers:
:param nhid:
:type nhid:
:param bptt:
:type bptt:
:param arch: architecture of the model whose the name pattern is
`<nlayers>-<nhid>`
:type arch:
:param pre: pre-model or not
:type pre:
:return:
:rtype:
"""
if arch is None:
if nlayers != -1 and nhid != -1:
arch = "{}-{}".format(nlayers, nhid)
else:
raise ValueError("`arch` or `nlayers` and `nhid` must be set.")
model_sfx = "{}-{}".format(arch, bptt)
if pre:
return re.sub("--.", "--1", "pre-{}-{}".format(tpl, model_sfx)) #
# Hack to guaranty that the RNN pre-models contain always a BPTT of
# "--1". Indeed, RNN pre-models will always have a BPTT of -1,
# by design
# TODO: rebuild to take into account this
else:
if nagts != -1:
return "{}-{}-{}". \
format(tpl, nagts, model_sfx)
else:
return "{}-{}". \
format(tpl, model_sfx) | 23c7b36c8e08376f15e8a53d569b33d2a5de770f | 28,397 |
def file_exists(session, ds_browser, ds_path, file_name):
"""Check if the file exists on the datastore."""
client_factory = session._get_vim().client.factory
search_spec = vm_util.search_datastore_spec(client_factory, file_name)
search_task = session._call_method(session._get_vim(),
"SearchDatastore_Task",
ds_browser,
datastorePath=ds_path,
searchSpec=search_spec)
try:
task_info = session._wait_for_task(search_task)
except error_util.FileNotFoundException:
return False
file_exists = (getattr(task_info.result, 'file', False) and
task_info.result.file[0].path == file_name)
return file_exists | 00b856d529f16ea05123f2b4447d94698d986902 | 28,398 |
def convert_binary_to_unicode(binary_input):
"""
converts binary string of length 18 input to unicode
:param binary_input: String
:return: String
"""
unicode_output = ''
for starting_position in range(0, len(binary_input), 18):
unicode_output += chr(int(binary_input[starting_position:starting_position + 18], 2))
return unicode_output | ae00c8b31779420662dca09e1ca6c23590b45e38 | 28,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.