content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def main(dns_name, host_names: str):
"""Console script for route53dyn"""
host_names_list = [x.strip() for x in host_names.split(',')] or [host_names]
monitor(dns_name, host_names_list) | 5,325,600 |
def Modelo(Mags, Phi, Me, alpha):
""" Modelo para ajustar
Parameters
----------
Mags, ERR : list
Magnitudes observadas
Phi, Me, alpha : .float, .float, .float
Parámetros del modelo
Returns
--------
F : list
Valores de la función
"""
import numpy as np
M = Mags # Definición para mejor vizualización
F = [] # Contendrá valores de la función
ij = 0
while ij<len(M):
# Para que no sea tan larga la def. de "F": parto en factores a la función
# F = f1*f2*f3
f1 = 0.4*np.log(10)*Phi
f2 = 10**(-0.4*(M[ij]-Me)*(alpha+1))
f3 = np.exp( -10**(-0.4*(M[ij]-Me)) )
F.append( f1*f2*f3 )
ij = ij + 1
return F | 5,325,601 |
def totient(n):
"""
Calculates Euler's totient
"""
count = 0
for i in range(1, n):
if (fractions.gcd(n, i) == 1):
count = count + 1
return count | 5,325,602 |
def read_img_pkl(path):
"""Real image from a pkl file.
:param path: the file path
:type path: str
:return: the image
:rtype: tuple
"""
with open(path, "rb") as file:
return pickle.load(file) | 5,325,603 |
def test_required():
"""Catch dataframe missing required columns"""
d = pd.DataFrame()
with raises(ValueError) as exc:
CyclingData.validate_dataframe(d)
assert 'missing a required column' in str(exc) | 5,325,604 |
def create_new_session(connection_handler, session_tablename="session"):
"""
Creates a new session record into the session datatable
:param connection_handler: the connection handler
:param session_tablename: the session tablename (default: session)
:return: last inserted row id, -1 if an exception is thrown
"""
try:
timestamp = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
sql = f"INSERT INTO {session_tablename}(start_timestamp)VALUES(?)"
cursor = connection_handler.cursor()
cursor.execute(sql, (timestamp,))
return cursor.lastrowid
except Exception as e:
logger.error(f"Exception: {str(e)}")
return -1 | 5,325,605 |
def splitData(y, tx, ratios=[0.4, 0.1]):
""" Split the dataset into train, test and validation sets """
indices = np.arange(len(y))
np.random.shuffle(indices)
splits = (np.array(ratios) * len(y)).astype(int).cumsum()
training_indices, validation_indices, test_indices = np.split(indices, splits)
tX_train = tx[training_indices]
y_train = y[training_indices]
tX_validation = tx[validation_indices]
y_validation = y[validation_indices]
tX_test = tx[test_indices]
y_test = y[test_indices]
return tX_train, y_train, tX_validation, y_validation, tX_test, y_test | 5,325,606 |
def test_salt_and_pepper_attack_in_batch():
"""
Salt and pepper attack method unit test in batch.
"""
batch_size = 32
np.random.seed(123)
net = SimpleNet()
inputs = np.random.rand(batch_size*2, 10)
model = ModelToBeAttacked(net)
labels = np.random.randint(low=0, high=10, size=batch_size*2)
labels = np.eye(10)[labels]
labels = labels.astype(np.float32)
attack = SaltAndPepperNoiseAttack(model, sparse=False)
adv_data = attack.batch_generate(inputs, labels, batch_size=32)
assert np.any(adv_data[0] != inputs[0]), 'Salt and pepper attack method: ' \
'generate value must not be equal' \
' to original value.' | 5,325,607 |
def conv_compare(node1, node2):
"""Compares two conv_general_dialted nodes."""
assert node1["op"] == node2["op"] == "conv_general_dilated"
params1, params2 = node1["eqn"].params, node2["eqn"].params
for k in ("window_strides", "padding", "lhs_dilation", "rhs_dilation",
"lhs_shape", "rhs_shape"):
if len(params1[k]) != len(params2[k]):
return False
if (len(params1["dimension_numbers"].lhs_spec) != #
len(params2["dimension_numbers"].lhs_spec)):
return False
if (len(params1["dimension_numbers"].rhs_spec) != #
len(params2["dimension_numbers"].rhs_spec)):
return False
if (len(params1["dimension_numbers"].out_spec) != #
len(params2["dimension_numbers"].out_spec)):
return False
if ((params1["feature_group_count"] > 1) != #
(params2["feature_group_count"] > 1)):
return False
if ((params1["batch_group_count"] > 1) != #
(params2["batch_group_count"] > 1)):
return False
return True | 5,325,608 |
def main():
"""Train a flow-based neural density estimator with maximum likelihood estimation"""
make_logger()
logger = logging.getLogger("tf")
np.set_printoptions(precision=3)
tf.reset_default_graph()
# load a config file whose contents are added to globals(), making them easily accessible elsewhere
config = make_config()
train_dp, val_dp = load_data_providers_and_update_conf(config)
# create a dictionary whose keys are tensorflow operations that can be accessed like attributes e.g graph.operation
graph = build_flow_graph(config)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
flow_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='flow')
saver = tf.train.Saver(var_list=flow_vars, max_to_keep=2, save_relative_paths=True)
if config.restore_model:
rel_path = "saved_models/{}/model/".format("/".join(config["config_path"].split("/")[:-1]))
saver.restore(sess, tf.train.latest_checkpoint(project_root + rel_path))
logger.info("Model restored!")
eval_model(graph, sess, train_dp, val_dp, config, all_train_data=True)
if not config.only_sample:
train(graph, sess, train_dp, val_dp, saver, config)
sample_and_assess_diagnostics(graph, sess, train_dp, config)
save_config(config)
logger.info("Finished!") | 5,325,609 |
def render_zones(zones: dict):
"""Render the zones based on accept header"""
requested_types = bottle.request.headers.get("Accept")
if "application/json" in requested_types:
output = json.dumps(zones)
content_type = "application/json"
elif "text/html" in requested_types:
output = bottle.template("zones", zones=zones)
content_type = "text/html"
elif "text/csv" in requested_types:
output = '"timezone","UTC offset"\n' + "\n".join(f'"{k}","{v}"' for k, v in zones.items()) + "\n"
content_type = "text/csv"
else:
output = "\n".join([f"{k}: {v}" for k, v in zones.items()])
content_type = "text/plain"
bottle.response.set_header("Content-Type", f"{content_type}; charset=UTF-8")
return output | 5,325,610 |
def less(left: Tensor, right: Union[Tensor, np.ndarray,numbers.Number],dtype=Dtype.float32,name='less'):
"""Elementwise 'less' comparison of two tensors. Result is 1 if left < right else 0.
Args:
left: left side tensor
right: right side tensor
dtype (dtype): output tensor dtype.
name(str):op name
Returns:
Result is 1 if left < right else 0.
Examples:
>>> less(to_tensor([41., 42., 43.]), to_tensor([42., 42., 42.]))
<Tensor: shape=(3,), dtype=float32, numpy=array([1.0000e+00, 0.0000e+00, 0.0000e+00], dtype=float32)>
>>> less(to_tensor([-1,0,1]), 0)
<Tensor: shape=(3,), dtype=float32, numpy=array([1.0000e+00, 0.0000e+00, 0.0000e+00], dtype=float32)>
"""
return tf.cast(tf.less(left, right,name=name), tf.float32,name='cast') | 5,325,611 |
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Read more in the :ref:`User Guide <sample_images>`.
Parameters
----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index] | 5,325,612 |
def assert_within(parent, child):
"""
Assert that a path is within another.
"""
assert is_within(parent, child), f"Path {child} is not within {parent}" | 5,325,613 |
def read_date_from_GPM(infile, radar_lat, radar_lon):
"""
Extract datetime from TRMM HDF files.
Parameters:
===========
infile: str
Satellite data filename.
radar_lat: float
Latitude of ground radar
radar_lon: float
Longitude of ground radar
Returns:
========
gpm_date: datetime
Datetime of satellite data at ground radar position.
min_dist: float
Minimal distance between satellite swath and ground radar, i.e.
is satellite swath are in ground radar domain?
"""
with h5py.File(infile, 'r') as file_id:
obj_id = file_id['NS']
# Read GPM lat/lon
latitude = obj_id['Latitude'].value
longitude = obj_id['Longitude'].value
# Read time data
mem_id = obj_id['ScanTime']
year = mem_id['Year'].value
month = mem_id['Month'].value
day = mem_id['DayOfMonth'].value
hour = mem_id['Hour'].value
minute = mem_id['Minute'].value
second = mem_id['Second'].value
# Using distance, find min to radar
dist = np.sqrt((latitude - radar_lat)**2 + (longitude - radar_lon)**2)
dist_atrack = np.amin(dist, axis=1) # Min distance along track axis
radar_center = np.argmin(dist_atrack)
min_dist = np.amin(dist_atrack)
gpm_date = datetime.datetime(year[radar_center], month[radar_center], day[radar_center],
hour[radar_center], minute[radar_center], second[radar_center])
return gpm_date, min_dist | 5,325,614 |
def build_cluster_endpoint(
domain_key: DomainKey,
custom_endpoint: Optional[CustomEndpoint] = None,
engine_type: EngineType = EngineType.OpenSearch,
preferred_port: Optional[int] = None,
) -> str:
"""
Builds the cluster endpoint from and optional custom_endpoint and the localstack opensearch config. Example
values:
- my-domain.us-east-1.opensearch.localhost.localstack.cloud:4566 (endpoint strategy = domain (default))
- localhost:4566/us-east-1/my-domain (endpoint strategy = path)
- localhost:[port-from-range] (endpoint strategy = port (or deprecated 'off'))
- my.domain:443/foo (arbitrary endpoints (technically not allowed by AWS, but there are no rules in localstack))
If preferred_port is not None, it is tried to reserve the given port. If the port is already bound, another port
will be used.
"""
# If we have a CustomEndpoint, we directly take its endpoint.
if custom_endpoint and custom_endpoint.enabled:
return custom_endpoint.endpoint
# different endpoints based on engine type
engine_domain = "opensearch" if engine_type == EngineType.OpenSearch else "es"
# Otherwise, the endpoint is either routed through the edge proxy via a sub-path (localhost:4566/opensearch/...)
if config.OPENSEARCH_ENDPOINT_STRATEGY == "port":
if preferred_port is not None:
try:
# if the preferred port is given, we explicitly try to reserve it
assigned_port = external_service_ports.reserve_port(preferred_port)
except PortNotAvailableException:
LOG.warning(
f"Preferred port {preferred_port} is not available, trying to reserve another port."
)
assigned_port = external_service_ports.reserve_port()
else:
assigned_port = external_service_ports.reserve_port()
return f"{config.LOCALSTACK_HOSTNAME}:{assigned_port}"
if config.OPENSEARCH_ENDPOINT_STRATEGY == "path":
return f"{config.LOCALSTACK_HOSTNAME}:{config.EDGE_PORT}/{engine_domain}/{domain_key.region}/{domain_key.domain_name}"
# or through a subdomain (domain-name.region.opensearch.localhost.localstack.cloud)
return f"{domain_key.domain_name}.{domain_key.region}.{engine_domain}.{LOCALHOST_HOSTNAME}:{config.EDGE_PORT}" | 5,325,615 |
def get_community_pools():
"""Get community pool coins
Returns:
List[dict]: A list of dicts which consists of following keys:
denom, amount
"""
url = f"{BLUZELLE_PRIVATE_TESTNET_URL}:{BLUZELLE_API_PORT}/cosmos/distribution/v1beta1/community_pool"
result = requests.get(url)
if result.status_code != 200:
returnReqError(url, result)
return None
pools = result.json()["pool"]
pool_list = []
for pool in pools:
denom = BLZ_SYMBOL if pool["denom"] == BLZ_DENOM else pool["denom"]
amount_partition = str(float(pool["amount"]) / BLZ_UBNT_RATIO).partition(".")
amount_seperated = re.sub(r"(?<!^)(?=(\d{3})+$)", r",", amount_partition[0])
pool_list.append(
{
"denom": denom,
"amount": f"{amount_seperated}{amount_partition[1]}{amount_partition[2]}",
}
)
return pool_list | 5,325,616 |
def rgb2bgr(x):
"""
given an array representation of an RGB image, change the image
into an BGR representtaion of the image
"""
return(bgr2rgb(x)) | 5,325,617 |
def test_confirmation(test_client, login, confirmation, user):
"""
Тест процесса записи на курс: стоит защита от подмены
ссылки на редирект, категория появляющегося алерта - success
"""
response = test_client.get('/')
assert response.status_code == 200
assert b'success' in response.data
assert b'Index page' in response.data
assert user.courses != [] | 5,325,618 |
def draw_des1_plot(date, plot_A, plot_B):
"""
This function is to draw the plot of DES 1.
"""
#make up some data for the plot
df = pd.DataFrame({'date': np.array([datetime.datetime(2020, 1, i+1)
for i in range(12)]),
'Worldwide': [3, 4, 4, 7, 8, 9, 14, 17, 12, 8, 8, 13],
'Malaysia': [1, 1, 2, 3, 3, 3, 4, 3, 2, 3, 4, 7]})
plt.xkcd() # comic style function
fig = plt.figure(figsize=(9, 6), dpi=35) # define the size of the figure
fig.suptitle('Monthly new cases') # title of the chart
ax = fig.add_subplot(111)
# plot function to create 2 time series plots in a chart
ax.plot(df[date], df[plot_A], label=plot_A, linewidth=3)
ax.plot(df[date], df[plot_B], color='red', label=plot_B, linewidth=3)
# legend for 2 times series plots
ax.legend()
ax.set_xlabel('Date') # define x axis label
ax.set_ylabel('Cases per million people') # define y axis label
return fig | 5,325,619 |
def kolmogorov_smirnov_rank_test(gene_set, gene_list, adj_corr, plot=False):
"""
Rank test used in GSEA method. It measures dispersion of genes from
gene_set over a gene_list. Every gene from gene_list has its weight
specified by adj_corr, where adj_corr are gene weights (correlation
with fenotype) already raised to the power of parameter p, changing
weights importance. Plot define if method should return list of ES
for each position in ranking, if plot=False (default) second
returned object is None.
Reference: http://www.pnas.org/content/102/43/15545.full
"""
cval = 0
Dn = 0
Nr = 0
N = len(gene_list)
Nh = 0
for i in range(N):
if gene_list[i] in gene_set:
Nr += adj_corr[i]
Nh += 1
if N == Nh:
miss_pen = 1.
else:
miss_pen = float(1) / (N - Nh)
stat_plot = N * [None]
if plot:
stat_plot = N * [None]
else:
stat_plot = None
for i in range(N):
if gene_list[i] in gene_set:
cval += adj_corr[i] / Nr
else:
cval -= miss_pen
if plot:
stat_plot[i] = cval
if abs(cval) > abs(Dn):
Dn = cval
return (Dn, stat_plot) | 5,325,620 |
def test_symlink_missing_src(file, source):
"""
Test symlink when src is missing should still create the link
"""
target = source.parent / "symlink.lnk"
missing_source = source.parent / "missing.txt"
try:
file.symlink(str(missing_source), str(target))
assert salt.utils.path.islink(str(target))
finally:
target.unlink() | 5,325,621 |
async def segment_url(request: schemas.UrlSegmentationRequest) -> schemas.SegmentationResponse:
""" This endpoint accept the URL of an image, and returns a SegmentationResponse.
The endpoint will try to download the image at the given URL.
Note: not all servers allow for non-browser user agents to download images.
"""
try:
assert re.match(config.URL_REGEX, request.image_url)
image = utils.download_image(request.image_url)
segments = pipeline.segment_image(image)
return schemas.SegmentationResponse(status_code=0,
error_message="",
segment_count=len(segments),
segments=segments)
except Exception as e:
return error_response(e) | 5,325,622 |
def generate_answers(session, model, word2id, qn_uuid_data, context_token_data, qn_token_data):
"""
Given a model, and a set of (context, question) pairs, each with a unique ID,
use the model to generate an answer for each pair, and return a dictionary mapping
each unique ID to the generated answer.
Inputs:
session: TensorFlow session
model: QAModel
word2id: dictionary mapping word (string) to word id (int)
qn_uuid_data, context_token_data, qn_token_data: lists
Outputs:
uuid2ans: dictionary mapping uuid (string) to predicted answer (string; detokenized)
"""
uuid2ans = {} # maps uuid to string containing predicted answer
data_size = len(qn_uuid_data)
num_batches = ((data_size-1) / model.FLAGS.batch_size) + 1
batch_num = 0
detokenizer = MosesDetokenizer()
print "Generating answers..."
for batch in get_batch_generator(word2id, qn_uuid_data, context_token_data, qn_token_data, model.FLAGS.batch_size, model.FLAGS.context_len, model.FLAGS.question_len, model.FLAGS.num_feats, model.FLAGS.word_len, model.mcids_dict):
# Get the predicted spans
pred_start_batch, pred_end_batch = model.get_start_end_pos(session, batch, model.FLAGS.max_span)
# Convert pred_start_batch and pred_end_batch to lists length batch_size
pred_start_batch = pred_start_batch.tolist()
pred_end_batch = pred_end_batch.tolist()
# For each example in the batch:
for ex_idx, (pred_start, pred_end) in enumerate(zip(pred_start_batch, pred_end_batch)):
# Original context tokens (no UNKs or padding) for this example
context_tokens = batch.context_tokens[ex_idx] # list of strings
# Check the predicted span is in range
assert pred_start in range(len(context_tokens))
assert pred_end in range(len(context_tokens))
# Predicted answer tokens
pred_ans_tokens = context_tokens[pred_start : pred_end +1] # list of strings
# Detokenize and add to dict
uuid = batch.uuids[ex_idx]
uuid2ans[uuid] = detokenizer.detokenize(pred_ans_tokens, return_str=True)
batch_num += 1
if batch_num % 10 == 0:
print "Generated answers for %i/%i batches = %.2f%%" % (batch_num, num_batches, batch_num*100.0/num_batches)
print "Finished generating answers for dataset."
return uuid2ans | 5,325,623 |
def _add_agline_to_dict(geo, line, d={}, idx=0, mesh_size=1e-2, n_elements=0, bc=None):
"""Draw a new Air Gap line and add it to GMSH dictionary if it does not exist
Parameters
----------
geo : Model
GMSH Model objet
line : Object
Line Object
d : Dictionary
GMSH dictionary
idx : int
Surface index it belongs to
mesh_size : float
Points mesh size
n_elements : int
Number of elements on the line for meshing control
Returns
-------
None
"""
# TO-DO: Allow repeated points for the rotor and stator sliding bands
dlines = list()
ltag = None
btag, bx, by = _find_point_tag(d, line.get_begin())
etag, ex, ey = _find_point_tag(d, line.get_end())
if btag is None:
btag = geo.addPoint(bx, by, 0, meshSize=mesh_size, tag=-1)
else:
dlines.extend(_find_lines_from_point(d, btag))
if etag is None:
etag = geo.addPoint(ex, ey, 0, meshSize=mesh_size, tag=-1)
else:
dlines.extend(_find_lines_from_point(d, etag))
if isinstance(line, Arc):
ctag, cx, cy = _find_point_tag(d, line.get_center())
if ctag is None:
ctag = geo.addPoint(cx, cy, 0, meshSize=mesh_size, tag=-1)
else:
dlines.extend(_find_lines_from_point(d, ctag))
if len(dlines) > 0:
for iline in dlines:
p = _find_points_from_line(d, iline)
if p[0] == btag and p[1] == etag and p[2] == ctag:
ltag = iline
break
elif p[0] == etag and p[1] == btag and p[2] == ctag:
ltag = -iline
break
else:
pass
if ltag is None:
ltag = geo.addCircleArc(btag, ctag, etag, tag=-1)
if n_elements > 0:
geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression")
else:
ltag = geo.addCircleArc(btag, ctag, etag, tag=-1)
if n_elements > 0:
geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression")
# To avoid fill the dictionary with repeated lines
repeated = False
for lvalues in d[idx].values():
if type(lvalues) is not dict:
continue
else:
if lvalues["tag"] == ltag:
repeated = True
if not repeated:
nline = len(d[idx]) - 2
arc_angle = cmath.phase(complex(ex, ey)) - cmath.phase(complex(bx, by))
d[idx].update(
{
nline: {
"tag": ltag,
"n_elements": n_elements,
"bc_name": bc,
"begin": {"tag": btag, "coord": complex(bx, by)},
"end": {"tag": etag, "coord": complex(ex, ey)},
"cent": {"tag": ctag, "coord": complex(cx, cy)},
"arc_angle": arc_angle,
"line_angle": None,
}
}
)
else:
if len(dlines) > 0:
for iline in dlines:
p = _find_points_from_line(d, iline)
if p[0] == btag and p[1] == etag:
ltag = iline
break
elif p[0] == etag and p[1] == btag:
ltag = -iline
break
else:
pass
if ltag is None:
ltag = geo.addLine(btag, etag, tag=-1)
if n_elements > 0:
geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression")
else:
ltag = geo.addLine(btag, etag, tag=-1)
if n_elements > 0:
geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression")
# To avoid fill the dictionary with repeated lines
repeated = False
for lvalues in d[idx].values():
if type(lvalues) is not dict:
continue
else:
if lvalues["tag"] == ltag:
repeated = True
if not repeated:
nline = len(d[idx]) - 2
line_angle = 0.5 * (
cmath.phase(complex(ex, ey)) + cmath.phase(complex(bx, by))
)
d[idx].update(
{
nline: {
"tag": ltag,
"n_elements": n_elements,
"bc_name": bc,
"begin": {"tag": btag, "coord": complex(bx, by)},
"end": {"tag": etag, "coord": complex(ex, ey)},
"arc_angle": None,
"line_angle": line_angle,
}
}
)
return None | 5,325,624 |
async def test_abort_if_no_url_available(hass, flow_handler, local_impl):
"""Check no_url_available generating authorization url."""
flow_handler.async_register_implementation(hass, local_impl)
flow = flow_handler()
flow.hass = hass
with patch.object(
local_impl, "async_generate_authorize_url", side_effect=NoURLAvailableError
):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_url_available" | 5,325,625 |
def bisect_driver(bisect_stage, execargs):
"""Call appropriate bisection stage according to value in bisect_stage."""
bisect_dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
if bisect_stage == 'POPULATE_GOOD':
bisect_populate(execargs, bisect_dir, 'good')
elif bisect_stage == 'POPULATE_BAD':
bisect_populate(execargs, bisect_dir, 'bad')
elif bisect_stage == 'TRIAGE':
bisect_triage(execargs, bisect_dir)
else:
raise ValueError('wrong value for BISECT_STAGE: %s' % bisect_stage) | 5,325,626 |
def create_game(gm):
"""
Configure and create a game.
Creates a game with base settings equivalent to one of the default presets.
Allows user to customize the settings before starting the game.
Parameters
----------
gm : int
Game type to replicate:
0: Normal mode.
1: Advanced mode.
Returns
-------
BattleshipGame
Game instance with user-chosen settings.
"""
print('\n' * PAD_AMOUNT) # Pad previous output.
# Choose and print default settings.
if gm == 0:
Utils.box_string('Normal Mode', print_string=True)
settings = normal_mode_preset
elif gm == 1:
Utils.box_string('Advanced Mode', print_string=True)
settings = advanced_mode_preset
else: # TODO: REMOVE TESTING MODE
Utils.box_string('Testing Mode', print_string=True)
settings = testing_preset
# Print current settings.
Utils.print_settings(settings)
# Change settings, if applicable.
if Utils.num_input('Would you like to change the settings?', 'No', 'Yes') == 1:
while True:
# Determine which setting group to modify.
setting = Utils.num_input('Settings', 'Grid Size', 'Ship Amount', 'Special Abilities', 'Game Type', 'Exit')
# Modify setting groups.
if setting == 0: # Grid Size
# Take grid dimensions.
settings['width'] = int(Utils.string_input('Grid Width (5-26)', condition=r'^[5-9]$|^1[0-9]$|^2[0-6]$'))
settings['height'] = int(Utils.string_input('Grid Height (5-26)', condition=r'^[5-9]$|^1[0-9]$|^2[0-6]$'))
elif setting == 1: # Ship Amount
while True:
# Take ship amounts.
settings['5_ships'] = int(Utils.string_input('5-Long Ships (0-9)', condition=r'[0-9]'))
settings['4_ships'] = int(Utils.string_input('4-Long Ships (0-9)', condition=r'[0-9]'))
settings['3_ships'] = int(Utils.string_input('3-Long Ships (0-9)', condition=r'[0-9]'))
settings['2_ships'] = int(Utils.string_input('2-Long Ships (0-9)', condition=r'[0-9]'))
settings['1_ships'] = int(Utils.string_input('1-Long Ships (0-9)', condition=r'[0-9]'))
# Test if ship amounts are valid.
count = settings['5_ships'] + settings['4_ships'] + settings['3_ships'] + settings['2_ships'] + settings['1_ships']
if count == 0:
Utils.box_string('You must have at least one ship!', print_string=True)
elif count > 26:
Utils.box_string('You have put in too many ships! (max 26)', print_string=True)
elif settings['5_ships'] * 5 + settings['4_ships'] * 4 + settings['3_ships'] * 3 + settings['2_ships'] * 2 + settings['1_ships'] > settings['width'] * settings['height']:
Utils.box_string('Your ships will not fit inside of the board!', print_string=True)
else:
break
elif setting == 2: # Special Abilities
# Take abilities.
settings['allow_moves'] = Utils.num_input('Ship Moving', 'Enable', 'Disable') == 0
if settings['allow_moves']:
settings['allow_mines'] = Utils.num_input('Mines', 'Enable', 'Disable') == 0
settings['mine_turns'] = int(Utils.string_input('Turns Between Mines', condition=r'\d+')) if settings['allow_mines'] else None
elif setting == 3: # Game Type
# Take game type.
settings['p_type'] = ['CPU', 'Player'][Utils.num_input('Game Type', 'CPU', 'Player')]
# Print updated settings.
Utils.print_settings(settings)
if setting == 4: # Exit
break
return BattleshipGame(settings) | 5,325,627 |
def mat_toeplitz_2d(h, x):
"""
Constructs a Toeplitz matrix for 2D convolutions
Parameters
----------
h: list[list]
A matrix of scalar values representing the filter
x: list[list]
A matrix of scalar values representing the signal
Returns
-------
list[list]
A doubly block Toeplitz matrix T such that y = T(h) * x
"""
# Calculate the dimensions of the arrays
Nh, Mh = mat_dim(h)
Nx, Mx = mat_dim(x)
Ny, My = Nh + Nx - 1, Mh + Mx - 1
# Pad the filter, if needed
padn, padm = Ny - Nh, My - Mh
# Dimensions of a Toeplitz matrix
Trows, Tcols = My, Mx
# Dimension of the block Toeplitz matrix (BTM)
BTrows, BTcols = Ny, Nx
# Dimension of the doubly block Toeplitz matrix (DBTM)
DTrows, DTcols = BTrows * Trows, BTcols * Tcols
# Create the Toeplitz matrices
Tlist = []
for row in reversed(h):
t = mat_toeplitz_1d(row, x[0])
Tlist.append(t)
# Padding the blocks, if needed
Tlist += [None] * padn
# Construct the DBTM
DBTM = mat_new(DTrows, DTcols)
for col in range(BTcols):
for row in range(BTrows):
i = row - col
offset = (row * Trows, col * Tcols)
block = Tlist[i]
if block:
mat_submat_copy(DBTM, block, offset)
return DBTM | 5,325,628 |
def list_open_orders(args):
"""List open orders."""
# Parameters to pass to the API
api_params = {
# TODO
}
if args.txid:
api_params.update({'txid': args.txid})
res_ol = query_api('private', 'QueryOrders', api_params, args)
else:
res = query_api('private', 'OpenOrders', api_params, args)
# extract list of orders from API results
res_ol = res['open']
# the parsing is done in an helper function
ol = parse_order_res(res_ol, ['open'])
# filter and sort orders by price in each category
for otype in ol:
# filter orders based on currency pair
if 'pair' in args and args.pair:
ol[otype] = [odict for odict in ol[otype]
if (odict['pair'] in [args.pair, asset_pair_short(args.pair)] or args.pair == 'all')]
# sort orders by price
ol[otype] = sorted(ol[otype], key=lambda odict: Decimal(odict['price']))
# final list is concatenation of buy orders followed by sell orders
ol_all = ol['buy'] + ol['sell']
if not ol_all:
return
if args.csv:
print(csv(ol_all, headers="keys"))
else:
print(tabulate(ol_all, headers="keys")) | 5,325,629 |
def fix_text_segment(
text,
*,
fix_entities='auto',
remove_terminal_escapes=True,
fix_encoding=True,
fix_latin_ligatures=True,
fix_character_width=True,
uncurl_quotes=True,
fix_line_breaks=True,
fix_surrogates=True,
remove_control_chars=True,
remove_bom=True,
normalization='NFC'
):
"""
Apply fixes to text in a single chunk. This could be a line of text
within a larger run of `fix_text`, or it could be a larger amount
of text that you are certain is in a consistent encoding.
See `fix_text` for a description of the parameters.
"""
if isinstance(text, bytes):
raise UnicodeError(fixes.BYTES_ERROR_TEXT)
if fix_entities == 'auto' and '<' in text and '>' in text:
fix_entities = False
while True:
origtext = text
if remove_terminal_escapes:
text = fixes.remove_terminal_escapes(text)
if fix_encoding:
text = fixes.fix_encoding(text)
if fix_entities:
text = fixes.unescape_html(text)
if fix_latin_ligatures:
text = fixes.fix_latin_ligatures(text)
if fix_character_width:
text = fixes.fix_character_width(text)
if uncurl_quotes:
text = fixes.uncurl_quotes(text)
if fix_line_breaks:
text = fixes.fix_line_breaks(text)
if fix_surrogates:
text = fixes.fix_surrogates(text)
if remove_control_chars:
text = fixes.remove_control_chars(text)
if remove_bom and not remove_control_chars:
# Skip this step if we've already done `remove_control_chars`,
# because it would be redundant.
text = fixes.remove_bom(text)
if normalization is not None:
text = unicodedata.normalize(normalization, text)
if text == origtext:
return text | 5,325,630 |
def SSValues(MPKa,Rfa,r):
"""
Steady-State Values (Numerical solutions Linear)
Input: Annual MPK and Rf Rates, r (repetition index)
Output: Annual MPK and Rf Rates (Input), mu, gamma, SS Capital, SS Wage, SS Investment, Value function
"""
#Compute Parameters
MPK = pow(MPKa/100+1,years)
Rf = pow(Rfa/100+1,years)
#Eq(19)
gamma = (np.log(MPK) - np.log(Rf))/(sigma*sigma)
#Eq(18)
mu = np.log(MPK) - np.log(alpha) - (sigma*sigma)/2
Xs = exp(np.sqrt(2)*sigma*nodes+mu) #Gauss-Hermite
#Initialize Model
i = 0 #Reset period
X = l*WSS0list[r] #Non-stochastic endowment = l*WSS
tr = tau*beta*(1+l)*WSS0list[r] #Non-stochastic transfer = tau*(1+l)*ISS
K = beta*(1+l)*WSS0list[r] #Initial value for K
W = WSS0list[r] #Initial value for W
I = beta*(1+l)*WSS0list[r] #Initial value for I
#Create Empty lists
Klist=[]
cylist=[]
Ecolist=[]
Wlist=[]
Ilist=[]
while i != periods:
#Current Random Shock
random.seed(i)
np.random.seed(i)
z = np.random.lognormal(mu, sigma)
#Old
Eco = pow(pi,-1/2) * sum(weights * pow(I * (1 + rK_func(Xs,K) - delta) + phiret* tauL * W_func(Xs,K) + tr, 1-gamma))
#Current Wage
W = W_func(z,K)
#Young Optimal Investment Decision
I = least_squares(foc, (beta*(1+l)*W), bounds = (0,W*(1-tauL)-tr+X), args=(K,W,Xs,gamma,X,tr,))
I = round(I.x[0],50)
cy = W*(1-tauL) -tr - I + X
#Capital Motion
K = (1 - delta) * K + I
#Build Lists
Klist.append(K)
cylist.append(cy)
Ecolist.append(Eco)
Wlist.append(W)
Ilist.append(I)
i += 1
#Compute SS values
KSS = round(np.mean(Klist[drop:]),50)
WSS = round(np.mean(Wlist[drop:]),50)
ISS = round(np.mean(Ilist[drop:]),50)
#Compute Value function
cylist = [1] + cylist #Fix consumption for '1st generation' of old when were young to 1
cylist = cylist[:-1] #Remove last consumption young to make it consistent
Vlong = (1-beta)*np.log(np.asarray(cylist)) + beta / (1-gamma) * np.log(np.asarray(Ecolist))
V = np.mean(Vlong[drop:])
return (round(MPKa,5), round(Rfa,5), round(mu,2), round(gamma,2), round(KSS,5), round(WSS,5), round(ISS,5), round(V,5)) | 5,325,631 |
def simplify_junctures(graph, epsilon=5):
"""Simplifies clumps by replacing them with a single juncture node. For
each clump, any nodes within epsilon of the clump are deleted. Remaining
nodes are connected back to the simplified junctures appropriately."""
graph = graph.copy()
max_quadrance = epsilon * epsilon
clumps = find_clumps(graph, epsilon)
for clump in clumps:
to_delete = set([])
for node in graph.nodes_iter():
for juncture in clump:
if quadrance(node, juncture) < max_quadrance:
to_delete.add(node)
to_join = set([])
for node in to_delete:
for neighbor in nx.all_neighbors(graph, node):
if not (neighbor in to_delete):
to_join.add(neighbor)
clump_center = (0, 0)
for juncture in clump:
clump_center = (
clump_center[0]+juncture[0], clump_center[1]+juncture[1])
clump_center = (
clump_center[0] / len(clump), clump_center[1] / len(clump))
for node in to_delete:
graph.remove_node(node)
for node in to_join:
graph.add_edge(node, clump_center)
return graph | 5,325,632 |
def sample_truncated_norm(clip_low, clip_high, mean, std):
"""
Given a range (a,b), returns the truncated norm
"""
a, b = (clip_low - mean) / std, (clip_high - mean) / std
return int(truncnorm.rvs(a, b, mean, std)) | 5,325,633 |
def f(x):
""" Approximated funhction."""
return x.mm(w_target)+b_target[0] | 5,325,634 |
def get_urls():
""" get all sci-hub-torrent url
"""
source_url = 'http://gen.lib.rus.ec/scimag/repository_torrent/'
urls_list = []
try:
req = requests.get(source_url)
soups = BeautifulSoup(req.text, 'lxml').find_all('a')
for soup in soups:
if '.torrent' not in soup.text:
continue
url = source_url + soup.text
print(url)
urls_list.append(url)
except Exception as error:
print(error)
finally:
return urls_list | 5,325,635 |
def getVariablesForCookie(request=None):
""" returns dict with variables for cookie
"""
cookie_path = '/'
portalurl = absoluteURL(getSite(), request)
cookie_name = "%s%s"%('__zojax_comment_author_', md5(portalurl).hexdigest())
return dict(name=cookie_name, path=cookie_path) | 5,325,636 |
def pg_compare(target_db_url, other_db_url):
"""Compare target DB to other DB using PostgreSQL settings."""
ca, ra = _conn(target_db_url)
cb, rb = _conn(other_db_url)
params_a = PostgreSQLParameter.all_settings(ra)
params_b = PostgreSQLParameter.all_settings(rb)
host_a = ca.get_dsn_parameters()["host"]
host_b = cb.get_dsn_parameters()["host"]
if host_a == host_b:
_error("Target database and other database are the same database.")
table = PrettyTable(
[
"Name",
ca.get_dsn_parameters()["host"],
cb.get_dsn_parameters()["host"],
"Unit",
]
)
diff = 0
for a in params_a:
for b in params_b:
if a.name() == b.name():
if a != b:
diff += 1
table.add_row(
[a.name(), a.value()[:50], b.value()[:50], a.unit().lower()]
)
if diff == 0:
_result("No differences.")
else:
print(table) | 5,325,637 |
def logsumexp(x, dim):
""" sums up log-scale values """
offset, _ = torch.max(x, dim=dim)
offset_broadcasted = offset.unsqueeze(dim)
safe_log_sum_exp = torch.log(torch.exp(x-offset_broadcasted).sum(dim=dim))
return safe_log_sum_exp + offset | 5,325,638 |
def check_numeric_problems(RMs, freq_array,theta):
"""Checks for conditions that might cause numeric instability in the
time-integration, and warns the user if there might be concerns.
Specifically, checks for extreme jumps in polarization angle between
timesteps (will cause integration errorrs),
and for extreme depolarization (high liklihood of large errors).
Args:
RMs (array): ionospheric RMs per time step
freq_array (array): channel frequencies (in Hz)
theta (array): ionospheric modulation per channel.
"""
import warnings
#Check for large jumps in RM/polarization angle between steps.
#These can cause the numeric integrator to not catch angle wraps.
longest_l2=(C/np.min(freq_array))**2
max_deltaRM=np.max(np.diff(RMs))
max_delta_polangle=longest_l2*max_deltaRM #in radians
if max_delta_polangle > 0.5:
warnings.warn(("\nLarge variations in RM between points, which may "
"introduce numerical errors.\n"
"Consider trying a smaller timestep."))
#Warn about very low values of theta (very strong depolarization)
# as these can probably not be corrected reliably.
if np.min(np.abs(theta)) < 0.02:
warnings.warn(("\nExtreme depolarization predicted (>98%). "
"Corrected polarization will almost certainly not "
"be trustworthy in affected channels."))
elif np.min(np.abs(theta)) < 0.1:
warnings.warn(("\nSignificant depolarization predicted (>90%). "
"Errors in corrected polarization are likely to be "
"very large in some channels.")) | 5,325,639 |
def make_request(session, verb, endpoint, data={},
timeoutInSeconds=REQUEST_TIMEOUT_IN_SECONDS,
max_retries=MAX_RETRIES):
""" Make a REST request """
try:
if verb is RequestVerb.post:
r = session.post(url=endpoint, json=data, timeout=timeoutInSeconds)
if r.status_code == requests.codes.ok or r.status_code == requests.codes.created:
return r
else:
print('Error: ' + str(r.status_code) + ' Posting to Endpoint: ' + str(endpoint))
return None
elif verb is RequestVerb.delete:
r = session.delete(url=endpoint, timeout=timeoutInSeconds)
if r.status_code == requests.codes.ok:
return r
else:
print('Error: ' + str(r.status_code) + ' Deleting Endpoint: ' + str(endpoint))
return None
elif verb is RequestVerb.put:
r = session.put(url=endpoint, json=data, timeout=timeoutInSeconds)
if r.status_code == requests.codes.ok:
return r
else:
print('Error: ' + str(r.status_code) + ' Putting Endpoint: ' + str(endpoint))
return None
elif verb is RequestVerb.patch:
r = session.patch(url=endpoint, json=data, timeout=timeoutInSeconds)
if r.status_code == requests.codes.ok or r.status_code == requests.codes.no_content:
return r
else:
print('Error: ' + str(r.status_code) + ' Patching Endpoint: ' + str(endpoint))
return None
elif verb is RequestVerb.get:
r = session.get(url=endpoint, timeout=timeoutInSeconds)
if r.status_code == requests.codes.ok or r.status_code == requests.codes.no_content:
return r
else:
print('Error: ' + str(r.status_code) + ' Getting Endpoint: ' + str(endpoint))
return None
else:
print('Make request verb not supported: ' + str(verb))
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err} Request Verb: {str(verb)} Endpoint: {str(endpoint)}')
return http_err
except requests.ConnectionError as err:
if max_retries > 0:
max_retries = max_retries - 1
sleep(0.25)
return make_request(session, verb, endpoint, data, timeoutInSeconds, max_retries)
else:
print('Connection Error, will not retry')
return err
except Exception as err:
print(f'Other error occurred: {err}' + ' Request Verb: ' + str(verb) + ' Endpoint: ' + str(endpoint))
return err | 5,325,640 |
def certificate_managed(
name, days_remaining=90, append_certs=None, managed_private_key=None, **kwargs
):
"""
Manage a Certificate
name
Path to the certificate
days_remaining : 90
Recreate the certificate if the number of days remaining on it
are less than this number. The value should be less than
``days_valid``, otherwise the certificate will be recreated
every time the state is run. A value of 0 disables automatic
renewal.
append_certs:
A list of certificates to be appended to the managed file.
They must be valid PEM files, otherwise an error will be thrown.
managed_private_key:
Has no effect since v2016.11 and will be removed in Salt Aluminium.
Use a separate x509.private_key_managed call instead.
kwargs:
Any arguments supported by :py:func:`x509.create_certificate
<salt.modules.x509.create_certificate>` or :py:func:`file.managed
<salt.states.file.managed>` are supported.
not_before:
Initial validity date for the certificate. This date must be specified
in the format '%Y-%m-%d %H:%M:%S'.
.. versionadded:: 3001
not_after:
Final validity date for the certificate. This date must be specified in
the format '%Y-%m-%d %H:%M:%S'.
.. versionadded:: 3001
Examples:
.. code-block:: yaml
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: ca.example.com
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3650
- days_remaining: 0
- backup: True
.. code-block:: yaml
/etc/ssl/www.crt:
x509.certificate_managed:
- ca_server: pki
- signing_policy: www
- public_key: /etc/ssl/www.key
- CN: www.example.com
- days_valid: 90
- days_remaining: 30
- backup: True
"""
if "path" in kwargs:
name = kwargs.pop("path")
if "ca_server" in kwargs and "signing_policy" not in kwargs:
raise salt.exceptions.SaltInvocationError(
"signing_policy must be specified if ca_server is."
)
if (
"public_key" not in kwargs
and "signing_private_key" not in kwargs
and "csr" not in kwargs
):
raise salt.exceptions.SaltInvocationError(
"public_key, signing_private_key, or csr must be specified."
)
if managed_private_key:
salt.utils.versions.warn_until(
"Aluminium",
"Passing 'managed_private_key' to x509.certificate_managed has no effect and "
"will be removed Salt Aluminium. Use a separate x509.private_key_managed call instead.",
)
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
is_valid, invalid_reason, current_cert_info = _certificate_is_valid(
name, days_remaining, append_certs, **kwargs
)
if is_valid:
file_args, extra_args = _get_file_args(name, **kwargs)
return _certificate_file_managed(ret, file_args)
if __opts__["test"]:
file_args, extra_args = _get_file_args(name, **kwargs)
# Use empty contents for file.managed in test mode.
# We don't want generate a new certificate, even in memory,
# for security reasons.
# Using an empty string instead of omitting it will at least
# show the old certificate in the diff.
file_args["contents"] = ""
ret = _certificate_file_managed(ret, file_args)
ret["result"] = None
ret["comment"] = "Certificate {} will be created".format(name)
ret["changes"]["Status"] = {
"Old": invalid_reason,
"New": "Certificate will be valid and up to date",
}
return ret
contents = __salt__["x509.create_certificate"](text=True, **kwargs)
# Check the module actually returned a cert and not an error message as a string
try:
__salt__["x509.read_certificate"](contents)
except salt.exceptions.SaltInvocationError as e:
ret["result"] = False
ret[
"comment"
] = "An error occurred creating the certificate {}. The result returned from x509.create_certificate is not a valid PEM file:\n{}".format(
name, str(e)
)
return ret
if not append_certs:
append_certs = []
for append_file in append_certs:
try:
append_file_contents = __salt__["x509.get_pem_entry"](
append_file, pem_type="CERTIFICATE"
)
contents += append_file_contents
except salt.exceptions.SaltInvocationError as e:
ret["result"] = False
ret[
"comment"
] = "{} is not a valid certificate file, cannot append it to the certificate {}.\nThe error returned by the x509 module was:\n{}".format(
append_file, name, str(e)
)
return ret
file_args, extra_args = _get_file_args(name, **kwargs)
file_args["contents"] = contents
ret = _certificate_file_managed(ret, file_args)
if ret["result"]:
ret["changes"]["Certificate"] = {
"Old": current_cert_info,
"New": __salt__["x509.read_certificate"](certificate=name),
}
ret["changes"]["Status"] = {
"Old": invalid_reason,
"New": "Certificate is valid and up to date",
}
return ret | 5,325,641 |
def _unique_field_to_col_matching(
rules: Iterable[Rule], field_to_matching_cols: Dict[str, List[int]]
) -> Dict[str, int]:
"""
Given a potential field to column matching this functions tries to determine a unique 1-to-1
matching.
Returns a dictionary in which each key is a name of a filed and the value is the index of the
best matching column.
"""
# This method works by elimination - we give higher priority to fields with less potential
# matches. We determine their best matching column and then we can eliminate that column for
# other fields.
sorted_rules = sorted(rules, key=lambda r: len(field_to_matching_cols[r.field_name]))
unallocated_columns = set(range(len(rules)))
field_to_col = {}
for r in sorted_rules:
col = [c for c in field_to_matching_cols[r.field_name] if c in unallocated_columns][0]
field_to_col[r.field_name] = col
unallocated_columns.remove(col)
return field_to_col | 5,325,642 |
def minimal_config():
"""Return YAML parsing result for (somatic) configuration"""
return yaml.round_trip_load(
textwrap.dedent(
r"""
static_data_config:
reference:
path: /path/to/ref.fa
dbsnp:
path: /path/to/dbsnp.vcf.gz
step_config:
ngs_mapping:
tools:
dna: ['bwa']
compute_coverage_bed: true
path_target_regions: /path/to/regions.bed
bwa:
path_index: /path/to/bwa/index.fa
targeted_seq_cnv_calling:
tools:
- xhmm
- gcnv
xhmm:
path_target_interval_list_mapping:
- pattern: "Agilent SureSelect Human All Exon V6.*"
name: "Agilent_SureSelect_Human_All_Exon_V6"
path: /path/to/Agilent/SureSelect_Human_All_Exon_V6_r2/GRCh37/Exons.bed
gcnv:
path_target_interval_list_mapping:
- pattern: "Agilent SureSelect Human All Exon V6.*"
name: "Agilent_SureSelect_Human_All_Exon_V6"
path: /path/to/Agilent/SureSelect_Human_All_Exon_V6_r2/GRCh37/Exons.bed
path_uniquely_mapable_bed: /path/to/uniquely/mappable/variable/GRCh37/file.bed.gz
data_sets:
first_batch:
file: sheet.tsv
search_patterns:
- {'left': '*/*/*_R1.fastq.gz', 'right': '*/*/*_R2.fastq.gz'}
search_paths: ['/path']
type: germline_variants
naming_scheme: only_secondary_id
"""
).lstrip()
) | 5,325,643 |
def plot_bboxes(bboxes, img, map_class_name=None):
"""
description: Plots bounding boxes on an image,
param:
x (np.ndarray): bounding boxes with confidence score and class id
img (np.ndarray): a image object
map_class_name: (dict): a map from class id to class name
"""
if bboxes is None:
return
# generate color for unique class
color = Colors()
for bbox in bboxes:
class_id = int(bbox[5])
c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3]))
cv2.rectangle(img,
c1,
c2,
color(class_id, True),
thickness=2,
lineType=cv2.LINE_AA)
if map_class_name is not None:
class_name = map_class_name[class_id]
else:
class_name = str(class_id)
text = 'class: ' + class_name + ' conf: {:.2f}'.format(bbox[4])
t_size = cv2.getTextSize(text, 0, fontScale=0.5, thickness=1)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color(class_id, True), -1, cv2.LINE_AA)
cv2.putText(
img,
text,
(c1[0], c1[1] - 2),
0,
0.5,
[225, 255, 255],
thickness=1,
lineType=cv2.LINE_AA,
) | 5,325,644 |
def data_prep_pipeline(
root,
input_filenames_or_glob,
output_filename,
data_prep_behavior,
beam_params,
suffix,
):
"""Set up beam data prep pipeline based on `data_prep_behavior`."""
if data_prep_behavior == 'many_models':
make_many_models_beam_pipeline(
root,
input_filenames=input_filenames_or_glob,
output_filename=output_filename,
suffix=suffix,
**beam_params)
elif data_prep_behavior == 'many_embeddings_single_model':
multiple_embeddings_from_single_model_pipeline(
root,
input_filenames=input_filenames_or_glob,
output_filename=output_filename,
suffix=suffix,
# Specific args.
chunk_len=FLAGS.chunk_len,
embedding_length=FLAGS.embedding_length,
**beam_params)
elif data_prep_behavior == 'chunked_audio':
precompute_chunked_audio_pipeline(
root,
input_filenames=input_filenames_or_glob,
output_filename=output_filename,
suffix=suffix,
# Specific args.
chunk_len=FLAGS.chunk_len,
embedding_length=FLAGS.embedding_length,
compute_embeddings_on_chunked_audio=FLAGS.compute_embeddings_on_chunked_audio, # pylint:disable=line-too-long
**beam_params)
elif data_prep_behavior == 'batched_single_model':
batched_chunked_single_model_pipeline(
root,
input_filenames=input_filenames_or_glob,
output_filename=output_filename,
suffix=suffix,
# Specific args.
chunk_len=FLAGS.chunk_len,
embedding_length=FLAGS.embedding_length,
batch_size=FLAGS.batch_size,
**beam_params)
else:
raise ValueError(
f'data_prep_behavior not recognized: {data_prep_behavior}') | 5,325,645 |
def walk(n=1000, mu=0, sigma=1, alpha=0.01, s0=NaN):
"""
Mean reverting random walk.
Returns an array of n-1 steps in the following process::
s[i] = s[i-1] + alpha*(mu-s[i-1]) + e[i]
with e ~ N(0,sigma).
The parameters are::
*n* walk length
*s0* starting value, defaults to N(mu,sigma)
*mu* target mean, defaults to 0
*sigma* volatility
*alpha* in [0,1] reversion rate
Use alpha=0 for a pure Gaussian random walk or alpha=1 independent
samples about the mean.
If *mu* is a vector, multiple streams are run in parallel. In this
case *s0*, *sigma* and *alpha* can either be scalars or vectors.
If *mu* is an array, the target value is non-stationary, and the
parameter *n* is ignored.
Note: the default starting value should be selected from a distribution
whose width depends on alpha. N(mu,sigma) is too narrow. This
effect is illustrated in :function:`demo`, where the following choices
of sigma and alpha give approximately the same histogram::
sigma = [0.138, 0.31, 0.45, 0.85, 1]
alpha = [0.01, 0.05, 0.1, 0.5, 1]
"""
s0, mu, sigma, alpha = [asarray(v) for v in (s0, mu, sigma, alpha)]
nchains = mu.shape[0] if mu.ndim > 0 else 1
if mu.ndim < 2:
if isnan(s0):
s0 = mu + util.rng.randn(nchains)*sigma
s = [s0*ones_like(mu)]
for i in range(n-1):
s.append(s[-1] + alpha*(mu-s[-1]) + sigma*util.rng.randn(nchains))
elif mu.ndim == 2:
if isnan(s0):
s0 = mu[0] + util.rng.randn(nchains)*sigma
s = [s0*ones_like(mu[0])]
for i in range(mu.shape[1]):
s.append(s[-1] + alpha*(mu[i]-s[-1])
+ sigma*util.rng.randn(nchains))
else:
raise ValueError("mu must be scalar, vector or 2D array")
return asarray(s) | 5,325,646 |
def GetCLIInfoMgr():
""" Get the vmomi type manager """
return _gCLIInfoMgr | 5,325,647 |
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
from datetime import datetime
now = datetime.now()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time,datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str( second_diff / 60 ) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str( second_diff / 3600 ) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff/7) + " weeks ago"
if day_diff < 365:
return str(day_diff/30) + " months ago"
return str(day_diff/365) + " years ago" | 5,325,648 |
def per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in cext.per_cpu_times():
user, nice, system, idle = cpu_t
item = scputimes(user, nice, system, idle)
ret.append(item)
return ret | 5,325,649 |
def cooldown(rate, per, type=commands.BucketType.default):
"""See `commands.cooldown` docs"""
def decorator(func):
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per, type))
else:
func.__commands_cooldown__ = Cooldown(rate, per, type)
return func
return decorator | 5,325,650 |
def construct_psi_k2(theta, y, X, kappa = 30):
"""
Kappa-based filter for time-varying autoregressive component, based on
Platteau (2021)
"""
#get parameter vector
T = len(y)
omega = theta[0]
alpha = theta[1]
beta = theta[2]
#Filter Volatility
psi = np.zeros(T)
#initialize volatility at unconditional variance
psi[0] = omega/(1-alpha)
#initialise the regression filter values
t = 0
xylist = [X.iloc[t]*(y.iloc[t])]
x2list = [X.iloc[t]**2]
xysum = sum(xylist)
x2sum = sum(x2list)
#do the first filtering
psi[t+1] = omega + (alpha )*(psi[t]) + (beta)*(np.tanh(psi[t]) - xysum/ x2sum )
#Continue filtering, as long as kappa not reached, use all available elements
for t in range(1,kappa):
xylist.append(X.iloc[t]*(y.iloc[t]))
x2list.append(X.iloc[t]**2)
xysum = sum(xylist)
x2sum = sum(x2list)
psi[t+1] = omega + (alpha )*(psi[t]) + (beta)*(np.tanh(psi[t]) - xysum/ x2sum )
#When kappa is reached, also drop the first instance in each iteration
for t in range(kappa -1,T-1):
xylist.append(X.iloc[t]*(y.iloc[t]))
x2list.append(X.iloc[t]**2)
xylist.pop(0)
x2list.pop(0)
xysum = sum(xylist)
x2sum = sum(x2list)
psi[t+1] = omega + (alpha )*(psi[t]) + (beta)*(np.tanh(psi[t]) - xysum/ x2sum )
#return the autoregressive component
return psi, 0, 1 | 5,325,651 |
def calHoahaoSancai(tian_ge, ren_ge, di_ge):
"""
三才五行吉凶计算
:return:
:param tian_ge: 天格
:param ren_ge: 人格
:param di_ge: 地格
:return:
"""
sancai = getSancaiWuxing(tian_ge) + getSancaiWuxing(ren_ge) + getSancaiWuxing(di_ge)
if sancai in g_sancai_wuxing_dict:
data = g_sancai_wuxing_dict[sancai]
return sancai, data['result'], data['evaluate']
else:
return sancai, constants.RESULT_UNKNOWN, None | 5,325,652 |
def schema_downgrades():
"""schema downgrade migrations go here."""
op.drop_table('installed_payloads') | 5,325,653 |
def load_coeff_swarm_mio_internal(path):
""" Load internal model coefficients and other parameters
from a Swarm MIO_SHA_2* product file.
"""
with open(path, encoding="ascii") as file_in:
data = parse_swarm_mio_file(file_in)
return SparseSHCoefficientsMIO(
data["nm"], data["gh"],
ps_extent=(data["pmin"], data["pmax"], data["smin"], data["smax"]),
is_internal=True,
), data | 5,325,654 |
def verify_variable_with_environment(var, var_name, env_name):
"""
Helper function that assigns a variable based on the inputs and gives relevant outputs to understand what is being
done. If the variable is defined, it will make sure that the environment variable (used by some lower-level code)
is consistent before returning the user specified value. If it is not specified, then the environment variable is used.
If the environment variable is also undefined, then it gives a useful output and then exits.
Args:
var, any type. Can be any python data type that can be assigned from an environment variable.
var_name, str. The name of the variable (used exclusively for outputting useful messages).
env_name, str. The name of the environment variable that would hold the value relevant to the var variable.
Returns:
var, any type. Either the input var if that is not NoneType. Otherwise the value from the environment variable.
If neither is defined it exits with status 1 rather than returning anything.
"""
log = get_logger()
if var is not None:
if env_name in os.environ and var != os.environ[env_name]:
old = os.environ[env_name]
log.warning(f"Warning, overwriting what the environment variable is for {env_name}")
log.info(f"\tOld {env_name}: {old}")
log.info(f"\tNew {env_name}: {var}")
os.environ[env_name] = var
else:
var = define_variable_from_environment(env_name, var_name)
return var | 5,325,655 |
def cli_frontend(ctx, verbose, test_env):
"""
Optimus is a static site builder using Jinja2, webassets and Babel.
"""
printout = True
if verbose == 0:
verbose = 1
printout = False
# Verbosity is the inverse of logging levels
levels = [item for item in OPTIMUS_LOGGER_CONF]
levels.reverse()
# Init the logger config
root_logger = init_logger(levels[verbose], printout=printout)
# Init the default context that will be passed to commands
ctx.obj = {
"verbosity": verbose,
"logger": root_logger,
"test_env": test_env,
} | 5,325,656 |
def group_n(gen, n):
"""
chunks data into groups of size n
:param gen: data source
:param n: group size
"""
data = []
count = 0
for row in gen:
count += 1
data.append(np.asarray(row, dtype=float))
if count == n:
yield np.asarray(data)
data = []
count = 0 | 5,325,657 |
def plot_trajectory_from_data(X : np.array, y : np.array, sample_n = 0, excludeY=True, ylabel=None, xlabel=None):
"""
Plots trajectory from data
sample_n: sample index
"""
fig, ax = plt.subplots()
dim = X.shape[2]
for d in range(dim):
trajectory = list(X[sample_n,:,d].squeeze())
if not excludeY: trajectory.append(y[sample_n])
ax.plot(list(range(1,len(trajectory)+1)) ,trajectory, alpha=0.9)
if not xlabel:
ax.set_xlabel("Time steps")
else:
ax.set_xlabel(xlabel)
if not ylabel:
ax.set_ylabel("State")
else:
ax.set_ylabel(ylabel)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['bottom'].set_linewidth(3)
ax.spines['left'].set_linewidth(3)
return fig, ax | 5,325,658 |
def ls(directory, create=False):
"""
List the contents of a directory, optionally creating it first.
If create is falsy and the directory does not exist, then an exception
is raised.
"""
if create and not os.path.exists(directory):
os.mkdir(directory)
onlyfiles = [f
for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f))]
return onlyfiles | 5,325,659 |
def test_listing_log_subdir_files_for_day(dispatcher, filesystem, mocker):
"""Test --list flag on log lists all files"""
mocker.patch('builtins.print')
log_base = filesystem.get_log_base_path()
day_path = log_base / '2020/01-january/2020-01-01'
day_path.mkdir(parents=True)
(day_path / 'index.md').touch() # this will be ignored
(day_path / 'journal').mkdir()
(day_path / 'journal' / 'morning.md').touch()
(day_path / 'journal' / 'evening.md').touch()
# For LogDate the path is in the file, but must end with a slash
dispatcher.run(['today', 'journal/', '--list'])
# With any_order=False we make sure they are printed in this order too
print.assert_has_calls([
call('evening'),
call('morning')],
any_order=False) | 5,325,660 |
async def get_prices(database, match_id):
"""Get market prices."""
query = """
select
timestamp::interval(0), extract(epoch from timestamp)::integer as timestamp_secs,
round((food + (food * .3)) * 100) as buy_food, round((wood + (wood * .3)) * 100) as buy_wood, round((stone + (stone * .3)) * 100) as buy_stone,
round((food - (food * .3)) * 100) as sell_food, round((wood - (wood * .3)) * 100) as sell_wood, round((stone - (stone * .3)) * 100) as sell_stone
from market
where match_id=:match_id
order by timestamp
"""
results = await database.fetch_all(query, values=dict(match_id=match_id))
return list(map(dict, results)) | 5,325,661 |
def ete_database_data():
""" Return path to ete3 database json """
user = os.environ.get('HOME', '/')
fp = os.path.join(user, ".mtsv/ete_databases.json")
if not os.path.isfile(fp):
with open(fp, 'w') as outfile:
outfile.write("{}")
return fp | 5,325,662 |
def link(srcPath, destPath):
"""create a hard link from srcPath to destPath"""
import os
return os.link(srcPath, destPath) | 5,325,663 |
def isChinese():
"""
Determine whether the current system language is Chinese
确定当前系统语言是否为 中文
"""
return SYSTEM_LANGUAGE == 'zh_CN' | 5,325,664 |
def load_db(db):
"""
Load database as a dataframe. Extracts the zip files if necessary. The database is indexed by the user, session.
"""
if DEV_GENUINE == db or DEV_IMPOSTOR == db:
extract_dev_db()
if GENUINE == db or UNKNOWN == db:
extract_test_db()
return pd.read_csv(db, index_col=[0, 1]) | 5,325,665 |
def fetch_rrlyrae_mags(data_home=None, download_if_missing=True):
"""Loader for RR-Lyrae data
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all astroML data is stored in '~/astroML_data'.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : recarray, shape = (483,)
record array containing imaging data
Examples
--------
>>> from astroML.datasets import fetch_rrlyrae_mags
>>> data = fetch_rrlyrae_mags() # doctest: +IGNORE_OUTPUT
>>> data.shape # number of objects in dataset
(483,)
Notes
-----
This data is from table 1 of Sesar et al 2010 ApJ 708:717
"""
# fits is an optional dependency: don't import globally
from astropy.io import fits
data_home = get_data_home(data_home)
archive_file = os.path.join(data_home, os.path.basename(DATA_URL))
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
fitsdata = download_with_progress_bar(DATA_URL)
open(archive_file, 'wb').write(fitsdata)
hdulist = fits.open(archive_file)
return np.asarray(hdulist[1].data) | 5,325,666 |
def is_implemented_in_notebook(cls):
"""Check if the remote class is implemented in the environments like notebook(e.g., ipython, notebook).
Args:
cls: class
"""
assert inspect.isclass(cls)
if hasattr(cls, '__module__'):
cls_module = sys.modules.get(cls.__module__)
if getattr(cls_module, '__file__', None):
return False
return True | 5,325,667 |
def publish_layer(layer_name: str, desc: str) -> None:
"""Upload lambda code base (as a zip file) to its associated AWS Lambda.
:param layer_name: Name of the layer.
:param desc: Description of the layer.
:raises RuntimeError: Either update layer fails
"""
with open(LAYER_ZIP, 'rb') as f_obj: # upload
zip_bytes = f_obj.read()
try:
resp = CLIENT.publish_layer_version(
LayerName=layer_name,
Description=desc,
Content={'ZipFile': zip_bytes},
CompatibleRuntimes=[PYTHON_VERSION],
)
except Exception as err:
raise RuntimeError('Publish layer FAILED.') from err
logger.info(resp) | 5,325,668 |
def _pool_tags(hash, name):
"""Return a dict with "hidden" tags to add to the given cluster."""
return dict(__mrjob_pool_hash=hash, __mrjob_pool_name=name) | 5,325,669 |
def main():
""" Simple test of phylotree functions. """
if len(sys.argv) > 1:
phy_fn = sys.argv[1]
with open(phy_fn, 'r') as phy_in:
phy = Phylotree(phy_in, anon_haps=True)
for hap in phy.hap_var:
print(hap, ','.join(phy.hap_var[hap]))
else:
phy = example()
hap_var = dict({'A':['A1G', 'A2T', 'A4T'],
'B':['A1G', 'A3T', 'A5T', 'A6T', 'A8T'],
'C':['A1G', 'A3T', 'T5A', 'A6T'],
'D':['A1G', 'A3T', 'A5T', 'A7T', 'A9T'],
'E':['A1G', 'A3T', 'A4T', 'A5T', 'A7T'],
'F':['A1G', 'A3T', 'A5T', 'A6T'],
'G':['A1G', 'A3T', 'A5T', 'A7T'],
'H':['A1G', 'A3T', 'A5T'],
'I':['A1G']})
for hap in sorted(phy.hap_var):
print(hap, phy.hap_var[hap], hap_var[hap])
print(phy.variants)
phy.root.dump()
return 0 | 5,325,670 |
def clusters_dictionary():
"""
Read the column 'label' from final_dataframe.tsv' and return the clusters as a dictionary.
If the column 'label' is not in final_dataframe.tsv', call k_means_clustering and perform
the clustering.
:return: a dictionary, where the key is the cluster id and the value is a list of Areas
"""
# Open sample
df = pd.read_csv(Path("dataframes/") / 'final_dataframe.tsv', sep='\t', skiprows=0, encoding='utf-8',
dtype={'Postal code': object})
if 'label' not in df.columns:
_, df = k_means_clustering()
cluster_dic = {}
for i in list(set(df['label'].to_list())):
cluster_dic[i] = df[df.label == i][['Postal code', 'Area']].values
return cluster_dic | 5,325,671 |
def get_valid_blendable_attribute_types():
"""
Returns a list of valid blendable attribute types in current DCC
:return: list(str)
"""
raise NotImplementedError() | 5,325,672 |
def test_mmb_predict_sample_from_extracted_vba_df():
"""
Test ensures the mmb_predict function can make a prediction from a single vba_sample.
"""
resetTest()
mmb = MaliciousMacroBot(benign_path,
malicious_path,
model_path, retain_sample_contents=False)
result = mmb.mmb_init_model(modelRebuild=True)
samplevba = 'MsgBox "this is vba"'
predresult = mmb.mmb_predict(samplevba, datatype='vba')
predicted_label = predresult.iloc[0]['prediction']
logging.info('predicted label: {}'.format(predicted_label))
assert(predicted_label == 'benign' or predicted_label == 'malicious') | 5,325,673 |
def set_random_state(state_dict):
"""Resets the random state for experiment restore.
"""
random.setstate(state_dict["random"])
np.random.set_state(state_dict["numpy"])
torch.torch.set_rng_state(state_dict["torch"]) | 5,325,674 |
def normalize_medians_for_batch(expression_matrix, meta_data, **kwargs):
"""
Calculate the median UMI count per cell for each batch. Transform all batches by dividing by a size correction
factor, so that all batches have the same median UMI count (which is the median batch median UMI count)
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:param batch_factor_column: str
Which meta data column should be used to determine batches
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
kwargs, batch_factor_column = process_normalize_args(**kwargs)
utils.Debug.vprint('Normalizing median counts between batches ... ')
# Get UMI counts for each cell
umi = expression_matrix.sum(axis=1)
# Create a new dataframe with the UMI counts and the factor to batch correct on
umi = pd.DataFrame({'umi': umi, batch_factor_column: meta_data[batch_factor_column]})
# Group and take the median UMI count for each batch
median_umi = umi.groupby(batch_factor_column).agg('median')
# Convert to a correction factor based on the median of the medians
median_umi = median_umi / median_umi['umi'].median()
umi = umi.join(median_umi, on=batch_factor_column, how="left", rsuffix="_mod")
# Apply the correction factor to all the data
return expression_matrix.divide(umi['umi_mod'], axis=0), meta_data | 5,325,675 |
def drbd_stop(resource):
"""Stop drbd resource"""
try:
subprocess.check_call(["drbdadm", "secondary", resource],
stdout=DEVNULL)
# Allow time for demotion to be processed
time.sleep(1)
subprocess.check_call(["drbdadm", "down", resource], stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to stop drbd %s" % resource)
raise | 5,325,676 |
def apply_wet_day_frequency_correction(ds, process):
"""
Parameters
----------
ds : xr.Dataset
process : {"pre", "post"}
Returns
-------
xr.Dataset
Notes
-------
[1] A.J. Cannon, S.R. Sobie, & T.Q. Murdock, "Bias correction of GCM precipitation by quantile mapping: How well do methods preserve changes in quantiles and extremes?", Journal of Climate, vol. 28, Issue 7, pp. 6938-6959.
"""
threshold = 0.05 # mm/day
low = 1e-16
if process == "pre":
ds_corrected = ds.where(ds != 0.0, np.random.uniform(low=low, high=threshold))
elif process == "post":
ds_corrected = ds.where(ds >= threshold, 0.0)
else:
raise ValueError("this processing option is not implemented")
return ds_corrected | 5,325,677 |
def get_nb_build_nodes_and_entities(city, print_out=False):
"""
Returns number of building nodes and building entities in city
Parameters
----------
city : object
City object of pycity_calc
print_out : bool, optional
Print out results (default: False)
Returns
-------
res_tuple : tuple
Results tuple with number of building nodes (int) and
number of building entities
(nb_b_nodes, nb_buildings)
Annotations
-----------
building node might also be PV- or wind-farm (not only building entity)
"""
nb_b_nodes = 0
nb_buildings = 0
for n in city.nodes():
if 'node_type' in city.nodes[n]:
if city.nodes[n]['node_type'] == 'building':
if 'entity' in city.nodes[n]:
if city.nodes[n]['entity']._kind == 'building':
nb_buildings += 1
if (city.nodes[n]['entity']._kind == 'building' or
city.nodes[n][
'entity']._kind == 'windenergyconverter' or
city.nodes[n]['entity']._kind == 'pv'):
nb_b_nodes += 1
if print_out: # pragma: no cover
print('Number of building nodes (Buildings, Wind- and PV-Farms):')
print(nb_b_nodes)
print()
print('Number of buildings: ', nb_buildings)
print()
return (nb_b_nodes, nb_buildings) | 5,325,678 |
def generate_pairs(agoals, props):
"""Forms all the pairs that are applicable to the current goals"""
all_pairs = []
for i in range(0, len(agoals)):
for j in range(i, len(agoals)):
goal1, goal2 = agoals[i], agoals[j]
all_pairs.extend(list(form_pairs(goal1, goal2, props)))
if props.sort_distinct_pos:
all_pairs.sort(key=lambda p: distinct_pos(set.union(*agoals).difference(p[1][0]) | p[1][1]))
return all_pairs | 5,325,679 |
def construct_aircraft_data(args):
"""
create the set of aircraft data
:param args: parser argument class
:return: aircraft_name(string), aircraft_data(list)
"""
aircraft_name = args.aircraft_name
aircraft_data = [args.passenger_number,
args.overall_length,
args.width,
args.height,
args.fuselage_width,
args.fuselage_height,
args.max_takeoff_weight,
args.max_landing_weight,
args.max_zero_fuel_weight,
args.cargo_volume,
args.cruise_mach,
args.cruise_altitude,
args.cruise_range,
args.lift_by_drag,
args.wing_area,
args.aspect_ratio,
args.rectangle_angle,
args.ratio_of_thickness_and_chord,
args.vertical_wing_width,
args.horizontal_wing_width]
return aircraft_name, aircraft_data | 5,325,680 |
def Oplus_simple(ne):
"""
"""
return ne | 5,325,681 |
def save_update(self, entity_manager = None):
"""
Saves or updates the current instance into the data source
described in the current entity manager.
This method provides the persistence layer for
creating an object.
:type entity_manager: EntityManager
:param entity_manager: The optional entity manager
reference to be used.
"""
# retrieves the entity manager to be used or the
# default "embedded" entity manager
entity_manager = entity_manager or self._entity_manager
# sets the context information in the current instance
# this should change the instance to reflect the current
# context values (only change it in case context exists)
self.set_context(entity_manager = entity_manager)
# saves or updates the entity using the entity manager
entity_manager.save_update(self) | 5,325,682 |
def test_export_filename_template_1():
""" export photos using filename template """
import glob
import locale
import os
import os.path
import osxphotos
from osxphotos.__main__ import export
locale.setlocale(locale.LC_ALL, "en_US")
runner = CliRunner()
cwd = os.getcwd()
# pylint: disable=not-context-manager
with runner.isolated_filesystem():
result = runner.invoke(
export,
[
os.path.join(cwd, CLI_PHOTOS_DB),
".",
"-V",
"--filename",
"{created.year}-{original_name}",
],
)
assert result.exit_code == 0
workdir = os.getcwd()
files = glob.glob("*.*")
assert sorted(files) == sorted(CLI_EXPORTED_FILENAME_TEMPLATE_FILENAMES1) | 5,325,683 |
def lin_exploit(version):
"""
The title says it all :)
"""
kernel = version
startno = 119
exploits_2_0 = {
'Segment Limit Privilege Escalation': {'min': '2.0.37', 'max': '2.0.38', 'cve': ' CVE-1999-1166', 'src': 'https://www.exploit-db.com/exploits/19419/'}
}
exploits_2_2 = {
'ptrace kmod Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'},
'mremap Privilege Escalation': {'min': '2.2.0', 'max': '2.2.26', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'},
'ptrace setuid Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'},
'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'},
'Privileged Process Hijacking Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'},
'Sendmail Capabilities Privilege Escalation': {'min': '2.2.0', 'max': '2.2.16', 'cve': 'CVE-2000-0506', 'src': 'https://www.exploit-db.com/exploits/20001/'}
}
exploits_2_4 = {
'ptrace kmod Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'},
'do_brk Privilege Escalation': {'min': '2.4.0', 'max': '2.4.23', 'cve': 'CVE-2003-0961', 'src': 'https://www.exploit-db.com/exploits/131/'},
'do_mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.24', 'cve': ' CVE-2003-0985', 'src': 'https://www.exploit-db.com/exploits/145/'},
'mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.25', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'},
'uselib Privilege Escalation': {'min': '2.4.0', 'max': '2.4.29-rc2', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'},
'bluez Privilege Escalation': {'min': '2.4.6', 'max': '2.4.30-rc2', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'},
'System Call Emulation Privilege Escalation': {'min': '2.4.0', 'max': '2.4.37.10', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'},
'ptrace setuid Privilege Escalation': {'min': '2.4.0', 'max': '2.4.10', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'},
'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.4.0', 'max': '2.4.4', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'},
'Privileged Process Hijacking Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'},
'sock_sendpage Privilege Escalation': {'min': '2.4.4', 'max': '2.4.37.4', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'},
'pipe.c Privilege Escalation': {'min': '2.4.1', 'max': '2.4.37', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/9844/'},
'Ptrace Privilege Escalation': {'min': '2.4.0', 'max': '2.4.35.3', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'}
}
exploits_2_6 = {
'mremap Privilege Escalation': {'min': '2.6.0', 'max': '2.6.2', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'},
'uselib Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'},
'bluez Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'},
'SYS_EPoll_Wait Privilege Escalation': {'min': '2.6.0', 'max': '2.6.12', 'cve': 'CVE-2005-0736', 'src': 'https://www.exploit-db.com/exploits/1397/'},
'logrotate prctl Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2031/'},
'proc Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2013/'},
'System Call Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'},
'BlueTooth Stack Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/4756/'},
'vmsplice Privilege Escalation': {'min': '2.6.17', 'max': '2.6.24.1', 'cve': 'CVE-2008-0600', 'src': 'https://www.exploit-db.com/exploits/5092/'},
'ftruncate()/open() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22', 'cve': 'CVE-2008-4210', 'src': 'https://www.exploit-db.com/exploits/6851/'},
'exit_notify() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.30-rc1', 'cve': 'CVE-2009-1337', 'src': 'https://www.exploit-db.com/exploits/8369/'},
'UDEV Privilege Escalation': {'min': '2.6.0', 'max': '2.6.40', 'cve': 'CVE-2009-1185', 'src': 'https://www.exploit-db.com/exploits/8478/'},
'ptrace_attach() Race Condition': {'min': '2.6.0', 'max': '2.6.30-rc4', 'cve': 'CVE-2009-1527', 'src': 'https://www.exploit-db.com/exploits/8673/'},
'Samba Share Privilege Escalation': {'min': '2.6.0', 'max': '2.6.39', 'cve': 'CVE-2004-0186', 'src': 'https://www.exploit-db.com/exploits/23674/'},
'ReiserFS xattr Privilege Escalation': {'min': '2.6.0', 'max': '2.6.35', 'cve': 'CVE-2010-1146', 'src': 'https://www.exploit-db.com/exploits/12130/'},
'sock_sendpage Privilege Escalation': {'min': '2.6.6', 'max': '2.6.30.5', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'},
'pipe.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-rc6', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/33322/'},
'Sys_Tee Privilege Escalation': {'min': '2.6.0', 'max': '2.6.17.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/29714/'},
'Linux Kernel Privilege Escalation': {'min': '2.6.18', 'max': '2.6.18-20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/10613/'},
'Dirty COW': {'min': '2.6.22', 'max': '4.8.3', 'cve': 'CVE-2016-5195', 'src': 'https://www.exploit-db.com/exploits/40616/'},
'compat Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36', 'cve': 'CVE-2010-3081', 'src': 'https://www.exploit-db.com/exploits/15024/'},
'DEC Alpha Linux - Privilege Escalation': {'min': '2.6.28', 'max': '3.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/17391/'},
'SELinux (RHEL 5) - Privilege Escalation': {'min': '2.6.30', 'max': '2.6.31', 'cve': 'CVE-2009-1897', 'src': 'https://www.exploit-db.com/exploits/9191/'},
'proc Handling SUID Privilege Escalation': {'min': '2.6.0', 'max': '2.6.38', 'cve': 'CVE-2011-1020', 'src': 'https://www.exploit-db.com/exploits/41770/'},
'PERF_EVENTS Privilege Escalation': {'min': '2.6.32', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/25444/'},
'RDS Protocol Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc8', 'cve': 'CVE-2010-3904', 'src': 'https://www.exploit-db.com/exploits/15285/'},
'Full-Nelson.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37', 'cve': 'CVE-2010-4258', 'src': 'https://www.exploit-db.com/exploits/15704/'},
'Mempodipper Privilege Escalation': {'min': '2.6.39', 'max': '3.2.2', 'cve': 'CVE-2012-0056', 'src': 'https://www.exploit-db.com/exploits/35161/'},
'Ext4 move extents ioctl Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-git6', 'cve': 'CVE-2009-4131', 'src': 'https://www.exploit-db.com/exploits/33395/'},
'Ptrace Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'},
'udp_sendmsg Privilege Escalation': {'min': '2.6.0', 'max': '2.6.19', 'cve': 'CVE-2009-2698', 'src': 'https://www.exploit-db.com/exploits/9575/'},
'fasync_helper() Privilege Escalation': {'min': '2.6.28', 'max': '2.6.33-rc4-git1', 'cve': 'CVE-2009-4141', 'src': 'https://www.exploit-db.com/exploits/33523/'},
'CAP_SYS_ADMIN Privilege Escalation': {'min': '2.6.34', 'max': '2.6.40', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/15916/'},
'CAN BCM Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc1', 'cve': 'CVE-2010-2959', 'src': 'https://www.exploit-db.com/exploits/14814/'},
'ia32syscall Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc4-git2', 'cve': 'CVE-2010-3301', 'src': 'https://www.exploit-db.com/exploits/15023/'},
'Half-Nelson.c Econet Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36.2', 'cve': 'CVE-2010-3848', 'src': 'https://www.exploit-db.com/exploits/17787/'},
'ACPI custom_method Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37-rc2', 'cve': 'CVE-2010-4347', 'src': 'https://www.exploit-db.com/exploits/15774/'},
'SGID Privilege Escalation': {'min': '2.6.32.62', 'max': '3.14.8', 'cve': 'CVE-2014-4014', 'src': 'https://www.exploit-db.com/exploits/33824/'},
'libfutex Privilege Escalation': {'min': '2.6.4', 'max': '3.14.6', 'cve': 'CVE-2014-3153', 'src': 'https://www.exploit-db.com/exploits/35370/'},
'perf_swevent_init Privilege Escalation': {'min': '2.6.37', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/26131/'},
'MSR Driver Privilege Escalation': {'min': '2.6', 'max': '3.7.6', 'cve': 'CVE-2013-0268', 'src': 'https://www.exploit-db.com/exploits/27297/'}
}
exploits_3 = {
'overlayfs Privilege Escalation': {'min': '3.0.0', 'max': '3.19.0', 'cve': 'CVE-2015-1328', 'src': 'https://www.exploit-db.com/exploits/37292/'},
'CLONE_NEWUSER|CLONE_FS Privilege Escalation': {'min': '3.0', 'max': '3.3.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/38390/'},
'SO_SNDBUFFORCE & SO_RCVBUFFORCE Local Privilege Escalation': {'min': '3.5', 'max': '4.8.14', 'cve': 'CVE-2016-9793', 'src': 'https://www.exploit-db.com/exploits/41995/'},
'Raw Mode PTY Echo Race Condition Privilege Escalation': {'min': '3.14-rc1', 'max': '3.16', 'cve': 'CVE-2014-0196', 'src': 'https://www.exploit-db.com/exploits/33516/'},
'sock_diag_handlers() Privilege Escalation': {'min': '3.3.0', 'max': '3.7.10', 'cve': 'CVE-2013-1763', 'src': 'https://www.exploit-db.com/exploits/24555/'},
'b43 Wireless Driver Privilege Escalation': {'min': '3.0', 'max': '3.9.4', 'cve': 'CVE-2013-2852', 'src': 'https://www.exploit-db.com/exploits/38559/'},
'CONFIG_X86_X32=y Privilege Escalation': {'min': '3.4', 'max': '3.13.2', 'cve': 'CVE-2014-0038', 'src': 'https://www.exploit-db.com/exploits/31347/'},
'Double-free usb-midi SMEP Local Privilege Escalation': {'min': '3.0', 'max': '4.5', 'cve': 'CVE-2016-2384', 'src': 'https://www.exploit-db.com/exploits/41999/'},
'Remount FUSE Privilege Escalation': {'min': '3.2', 'max': '3.16.1', 'cve': 'CVE-2014-5207', 'src': 'https://www.exploit-db.com/exploits/34923/'},
'ptrace/sysret Privilege Escalation': {'min': '3.0', 'max': '3.15.4', 'cve': 'CVE-2014-4699', 'src': 'https://www.exploit-db.com/exploits/34134/'},
'open-time Capability file_ns_capable() Privilege Escalation': {'min': '3.0', 'max': '3.8.9', 'cve': 'CVE-2013-1959', 'src': 'https://www.exploit-db.com/exploits/25450/'},
'REFCOUNT Overflow/Use-After-Free in Keyrings Privilege Escalation': {'min': '3.8.0', 'max': '4.4.1', 'cve': 'CVE-2016-0728', 'src': 'https://www.exploit-db.com/exploits/39277/'}
}
exploits_4 = {
'overlayfs Privilege Escalation': {'min': '4.0', 'max': '4.3.3', 'cve': 'CVE-2015-8660', 'src': 'https://www.exploit-db.com/exploits/39166/'},
'BPF Privilege Escalation': {'min': '4.4.0', 'max': '4.5.5', 'cve': 'CVE-2016-4557', 'src': 'https://www.exploit-db.com/exploits/39772/'},
'AF_PACKET Race Condition Privilege Escalation': {'min': '4.2.0', 'max': '4.9.0-2', 'cve': 'CVE-2016-8655', 'src': 'https://www.exploit-db.com/exploits/40871/'},
'DCCP Double-Free Privilege Escalation': {'min': '4.4.0', 'max': '4.9.11', 'cve': 'CVE-2017-6074', 'src': 'https://www.exploit-db.com/exploits/41458/'},
'Netfilter target_offset Out-of-Bounds Privilege Escalation': {'min': '4.4.0-21-generic', 'max': '4.4.0-31-generic', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/40049/'},
'IP6T_SO_SET_REPLACE Privilege Escalation': {'min': '4.6.2', 'max': '4.6.3', 'cve': 'CVE-2016-4997', 'src': 'https://www.exploit-db.com/exploits/40489/'},
'Packet Socket Local Privilege Escalation': {'min': '4.8.0', 'max': '4.10.6', 'cve': 'CVE-2017-7308', 'src': 'https://www.exploit-db.com/exploits/41994/'},
'UDEV < 232 - Privilege Escalation': {'min': '4.8.0', 'max': '4.9.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/41886/'}
}
if kernel.startswith('2.2'):
for name, exploit in exploits_2_2.items(): # iterate over exploits dict
if kernel >= exploit['min'] and kernel < exploit['max']:
return name, exploit['cve'], exploit['src']
else:
continue
elif kernel.startswith('2.4'):
for name, exploit in exploits_2_4.items():
if kernel >= exploit['min'] and kernel < exploit['max']:
return name, exploit['cve'], exploit['src']
else:
continue
elif kernel.startswith('2.6'):
for name, exploit in exploits_2_6.items():
if kernel >= exploit['min'] and kernel < exploit['max']:
return name, exploit['cve'], exploit['src']
else:
continue
elif kernel.startswith('2.0'):
for name, exploit in exploits_2_0.items():
if kernel >= exploit['min'] and kernel < exploit['max']:
return name, exploit['cve'], exploit['src']
else:
continue
elif kernel.startswith('3'):
for name, exploit in exploits_3.items():
if kernel >= exploit['min'] and kernel < exploit['max']:
return name, exploit['cve'], exploit['src']
else:
continue
elif kernel.startswith('4'):
for name, exploit in exploits_4.items():
if kernel >= exploit['min'] and kernel < exploit['max']:
return name, exploit['cve'], exploit['src']
else:
continue
else:
return 'No exploits found for this kernel version' | 5,325,684 |
async def get_device(
hass: HomeAssistant,
config_entry_id: str,
device_category: str,
device_type: str,
vin: str,
):
"""Get a tesla Device for a Config Entry ID."""
entry_data = hass.data[TESLA_DOMAIN][config_entry_id]
devices = entry_data["devices"].get(device_category, [])
for device in devices:
if device.type == device_type and device.vin() == vin:
return device
return None | 5,325,685 |
def cartesian2complex(real, imag):
"""
Calculate the complex number from the cartesian form: z = z' + i * z".
Args:
real (float|np.ndarray): The real part z' of the complex number.
imag (float|np.ndarray): The imaginary part z" of the complex number.
Returns:
z (complex|np.ndarray): The complex number: z = z' + i * z".
"""
return real + 1j * imag | 5,325,686 |
def compare_maps(ra_id, method_id, type_id, method_comp=None, type_comp=None):
"""Function to compare maps / or just print off a given map"""
# Get the map
map_one = GPVal.objects.filter(my_anal_id=ra_id,
type_id=type_id,
method_id=method_id)
if method_comp and type_comp:
map_two = GPVal.objects.filter(my_anal_id=ra_id,
type_id=type_comp,
method_id=method_comp)
# Now do the comparison
for gpval in map_one:
comp_gp = map_two.filter(gp_id=gpval.gp_id)
if comp_gp:
if gpval.value != 0.0 and comp_gp[0].value != 0.0:
gpval.out_val = gpval.value / comp_gp[0].value
else:
gpval.out_val = 0.0
else:
gpval.out_val = gpval.value
# Now render this data
out_m = ""
for my_p in map_one:
if method_comp and type_comp:
my_mol = Chem.MolFromPDBBlock(str(my_p.pdb_info))
if my_p.out_val != 0.0:
atm = my_mol.GetAtomWithIdx(0)
atm.GetPDBResidueInfo().SetTempFactor(my_p.out_val)
out_m += Chem.MolToPDBBlock(my_mol)
else:
if my_p.value != 0.0:
out_m += my_p.pdb_info
return out_m | 5,325,687 |
def test_determinism_even_sequences():
"""
Check that EvenSequencesSubsetIterator deterministically visits
entries of a dataset of sequence data.
"""
rng = np.random.RandomState(123)
lengths = rng.randint(1,10, 100)
data = [['w']*l for l in lengths]
batch_size = 5
my_iter = EvenSequencesSubsetIterator(data, batch_size)
visited1 = [0] * len(data)
for b_ind, ind_list in enumerate(my_iter):
assert [len(data[i]) == len(data[ind_list[0]]) for i in ind_list]
for i in ind_list:
visited1[i] = b_ind
my_iter = EvenSequencesSubsetIterator(data, batch_size)
visited2 = [0] * len(data)
for b_ind, ind_list in enumerate(my_iter):
assert [len(data[i]) == len(data[ind_list[0]]) for i in ind_list]
for i in ind_list:
visited2[i] = b_ind
assert np.all(np.asarray(visited1) == np.asarray(visited2)) | 5,325,688 |
def on_connect(unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print('on_connect', mqtt.connack_string(rc))
gateway_state.connected = True | 5,325,689 |
def _row_reduce_list(mat, rows, cols, one, iszerofunc, simpfunc,
normalize_last=True, normalize=True, zero_above=True,
dotprodsimp=None):
"""Row reduce a flat list representation of a matrix and return a tuple
(rref_matrix, pivot_cols, swaps) where ``rref_matrix`` is a flat list,
``pivot_cols`` are the pivot columns and ``swaps`` are any row swaps that
were used in the process of row reduction.
Parameters
==========
mat : list
list of matrix elements, must be ``rows`` * ``cols`` in length
rows, cols : integer
number of rows and columns in flat list representation
one : SymPy object
represents the value one, from ``Matrix.one``
iszerofunc : determines if an entry can be used as a pivot
simpfunc : used to simplify elements and test if they are
zero if ``iszerofunc`` returns `None`
normalize_last : indicates where all row reduction should
happen in a fraction-free manner and then the rows are
normalized (so that the pivots are 1), or whether
rows should be normalized along the way (like the naive
row reduction algorithm)
normalize : whether pivot rows should be normalized so that
the pivot value is 1
zero_above : whether entries above the pivot should be zeroed.
If ``zero_above=False``, an echelon matrix will be returned.
dotprodsimp : bool, optional
Specifies whether intermediate term algebraic simplification is used
during matrix multiplications to control expression blowup and thus
speed up calculation.
"""
def get_col(i):
return mat[i::cols]
def row_swap(i, j):
mat[i*cols:(i + 1)*cols], mat[j*cols:(j + 1)*cols] = \
mat[j*cols:(j + 1)*cols], mat[i*cols:(i + 1)*cols]
def cross_cancel(a, i, b, j):
"""Does the row op row[i] = a*row[i] - b*row[j]"""
q = (j - i)*cols
for p in range(i*cols, (i + 1)*cols):
mat[p] = dps(a*mat[p] - b*mat[p + q])
dps = _dotprodsimp if dotprodsimp else lambda e: e
piv_row, piv_col = 0, 0
pivot_cols = []
swaps = []
# use a fraction free method to zero above and below each pivot
while piv_col < cols and piv_row < rows:
pivot_offset, pivot_val, \
assumed_nonzero, newly_determined = _find_reasonable_pivot(
get_col(piv_col)[piv_row:], iszerofunc, simpfunc)
# _find_reasonable_pivot may have simplified some things
# in the process. Let's not let them go to waste
for (offset, val) in newly_determined:
offset += piv_row
mat[offset*cols + piv_col] = val
if pivot_offset is None:
piv_col += 1
continue
pivot_cols.append(piv_col)
if pivot_offset != 0:
row_swap(piv_row, pivot_offset + piv_row)
swaps.append((piv_row, pivot_offset + piv_row))
# if we aren't normalizing last, we normalize
# before we zero the other rows
if normalize_last is False:
i, j = piv_row, piv_col
mat[i*cols + j] = one
for p in range(i*cols + j + 1, (i + 1)*cols):
mat[p] = dps(mat[p] / pivot_val)
# after normalizing, the pivot value is 1
pivot_val = one
# zero above and below the pivot
for row in range(rows):
# don't zero our current row
if row == piv_row:
continue
# don't zero above the pivot unless we're told.
if zero_above is False and row < piv_row:
continue
# if we're already a zero, don't do anything
val = mat[row*cols + piv_col]
if iszerofunc(val):
continue
cross_cancel(pivot_val, row, val, piv_row)
piv_row += 1
# normalize each row
if normalize_last is True and normalize is True:
for piv_i, piv_j in enumerate(pivot_cols):
pivot_val = mat[piv_i*cols + piv_j]
mat[piv_i*cols + piv_j] = one
for p in range(piv_i*cols + piv_j + 1, (piv_i + 1)*cols):
mat[p] = dps(mat[p] / pivot_val)
return mat, tuple(pivot_cols), tuple(swaps) | 5,325,690 |
def download_corpus(corpus: str, saving_directory: str = None) -> None:
"""Function that downloads a corpus from AWS.
:param corpus: Name of the corpus to be loaded.
:param saving_directory: RELATIVE path to the saving folder.
"""
corpus = corpus.lower()
if not saving_directory:
saving_directory = "data/"
if not os.path.exists(saving_directory):
os.makedirs(saving_directory)
if os.path.isdir(saving_directory + corpus):
click.secho(f"{corpus} is already in cache.", fg="yellow")
return
elif corpus in corpus2download:
download_file_maybe_extract(
corpus2download[corpus],
directory=saving_directory,
)
else:
raise Exception(f"{corpus} is not a valid corpus!")
click.secho("Download succeeded.", fg="yellow")
if os.path.exists(saving_directory + corpus + ".zip"):
os.remove(saving_directory + corpus + ".zip")
elif os.path.exists(saving_directory + corpus + ".tar.gz"):
os.remove(saving_directory + corpus + ".tar.gz")
else:
click.secho("Fail to delete compressed file.", fg="red") | 5,325,691 |
def _remove_sources_list(ctx, config, remote):
"""
Removes /etc/yum.repos.d/{proj}.repo
:param remote: the teuthology.orchestra.remote.Remote object
:param proj: the project whose .repo needs removing
"""
builder = _get_builder_project(ctx, remote, config)
builder.remove_repo()
if remote.os.name not in ['opensuse', 'sle']:
_yum_unset_check_obsoletes(remote) | 5,325,692 |
def get_action_class(class_str):
"""Imports the action class.
Args:
class_str (str): A string action class.
Returns:
Action: A child class of Action.
Raises:
ActionImportError: If the class doesn't exist.
"""
(module_name, class_name) = class_str.rsplit('.', 1)
try:
module = importlib.import_module(module_name)
module = getattr(module, class_name)
except ImportError as e:
raise ActionImportError(e)
return module | 5,325,693 |
def rate_comments(request):
""" Render a bloom page where respondents can rate comments by others. """
return render(request, 'rate-comments.html') | 5,325,694 |
def gaussian2d(size=(32, 32), sigma=0.5):
"""
Generate a Gaussian kernel (not normalized).
:param size: k x m size of the returned kernel
:param sigma: standard deviation of the returned Gaussian
:return: A tensor with the Gaussian kernel
"""
x, y = tf.meshgrid(tf.linspace(-1.0, 1.0, size[0]), tf.linspace(-1.0, 1.0, size[1]))
d_squared = x * x + y * y
two_times_sigma_squared = 2.0 * (sigma ** 2.0)
return tf.exp(-d_squared / two_times_sigma_squared) | 5,325,695 |
def home():
"""
List all users or add new user
"""
users = User.query.all()
return render_template('home.html', users=users) | 5,325,696 |
def test_show_avg_silhouette_scores_3d():
"""Tests the show avg silhouette scores function in K Means Clustering for 3d config."""
clu = KMeansClustering(is_3d=True)
fig = clu.show_avg_silhouette_scores(return_fig=True)
assert isinstance(fig, Figure) | 5,325,697 |
def download_projects():
"""Downloads all existing projects to disk so they can be used
for autocompletion"""
remote_projects = kimai.get_projects()
project_map = {}
for project in remote_projects:
map_key = "(%s) %s" % (project['customerName'], project['name'])
project_map[map_key] = project['projectID']
config.set('Projects', project_map)
print_success('Successfully downloaded projects.') | 5,325,698 |
def test_coherency_regularized():
"""
Tests that the regularized coherency algorithm runs smoothly, using the
different csd routines and that the result is symmetrical:
"""
for method in methods:
f, c = tsa.coherency_regularized(tseries, 0.05, 1000,
csd_method=method)
npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate()) | 5,325,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.