content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def coords_to_id(traversed):
"""calculate the id in level-order from the coordinates
Args:
input: traversed tree as list of dict
Returns:
traversed tree (dict) with id as key
"""
traversed_id = {}
#print('coords to id, traversed ', traversed)
for node in traversed:
k = full_tree_id(node['coords'])
traversed_id[k] = node
parent = [k for k, val in traversed_id.items() if val['coords'] == node['coords'][:-1]]
traversed_id[k]['parent_id'] = parent[0] if len(parent) == 1 else -1
# level_no = 0
# while True:
# current_level =list(filter(lambda d: len(d['coords']) == level_no+1, traversed))
# if len(current_level) == 0:
# break
# for d in sorted(current_level, key=lambda d: sum(d['coords'])):
# k = full_tree_id(d['coords'])
# traversed_id[k] = d
# parent = [k for k, val in traversed_id.items() if val['coords'] == d['coords'][:-1]]
# traversed_id[k]['parent_id'] = parent[0] if len(parent) == 1 else -1
# level_no += 1
#print('coords to id, traversed_id ', traversed_id)
return traversed_id | 91f993f9693e01983de1f7fa124dcb5cb39a92f9 | 25,600 |
def dataframe_to_ipy_image(df, f=None, **kwargs):
"""Create IPython Image from PIL Image.
Args:
df - dataframe to render
f - operation to perform on PIL Image (e.g. f=lambda img: img.rotate(-90, expand=True))
kwargs - arguments to IPython.display.Image, such as width and height for html display
"""
pil_image = dataframe_to_pil_image(df)
if not f is None:
pil_image = f(pil_image)
return pil_to_ipy_image(pil_image=pil_image, **kwargs) | 44348ac041067620bfa37cdedb22f3544e6bc940 | 25,601 |
import os
import yaml
def get_yaml_file_info(file_name):
"""Loads a yaml file into a dictionary
Args:
file_name (str): The file to load
Returns:
Raises:
ProgramError
"""
if not os.path.isfile(file_name):
print("Cannot find file %s" % file_name)
raise ProgramError(1)
with open(file_name) as fdesc:
out = yaml.load(fdesc, Loader=yaml.FullLoader)
return out | 516c1484515d26d97dca44d2e2d04eb6fb852026 | 25,602 |
def read_dmarkov(columns, rows, D, symbolization_type, division_order, suffix=["normal", "gaussian002"]):
"""
Reads the result files for the D-Markov algorithm. The function requires a configuration for the parameters of
the D-Markov. The suffix parameter indicates if the non-modified files should be loaded ("normal"), the noisy
files should be loaded ("gaussian002") or both.
:param columns: Number of columns in the division.
:param rows: Number of rows in the division.
:param D: Number of previous symbols to take into account (Markov property).
:param symbolization_type: Type of symbolization. It should be an Enum of type SymbolizationType (observations_set.py)
(see EqualWidthLimits, EqualFrequencyLimits and EqualFrequencyLimitsNoBounds in observations_set.py).
:param division_order: Only for EqualFrequencyLimits and EqualFrequencyLimitsNoBounds. Should we do a row-first
or column-first division? It should be an Enum of type DivisionOrder (observations_set.py)
:param suffix: Load non-modified ("normal"), noisy ("gaussian002") or both file results.
:return: A pandas dataframe with the information in the result files. Also, an "Anomalous" column is created, which
is False for the "normal" result files and True for the "gaussian002" files.
"""
if isinstance(suffix, str):
suffix = [suffix]
basename = 'results/DMarkovMachine/Type{:d}/DMarkovMachine_{:d}_{:d}_{:d}_{}_{}.csv'
if symbolization_type == SymbolizationType.EQUAL_WIDTH:
symb_str = "EW"
else:
if symbolization_type == SymbolizationType.EQUAL_FREQUENCY:
symb_str = "EF"
elif symbolization_type == SymbolizationType.EQUAL_FREQUENCY_NO_BOUNDS:
symb_str = "EFNB"
if division_order == DivisionOrder.ROWS_THEN_COLUMNS:
symb_str += "_RC"
elif division_order == DivisionOrder.COLUMNS_THEN_ROWS:
symb_str += "_CR"
dmarkov_df = pd.DataFrame()
for type_idx in range(1,37):
for s in suffix:
normal_name = basename.format(type_idx, rows, columns, D, symb_str, s)
file_df = pd.read_csv(normal_name, dtype={'Name': 'object', 'AnomalyScore': 'float64'})
if s == "normal":
file_df["Anomalous"] = False
elif s == "gaussian002":
file_df["Anomalous"] = True
dmarkov_df = dmarkov_df.append(file_df)
return dmarkov_df | c9fec2d46cbc8c3f4bcf7fc112779432dd6e9155 | 25,603 |
import torch
def compute_output_shape(observation_space, layers):
"""Compute the size of the output after passing an observation from
`observation_space` through the given `layers`."""
# [None] adds a batch dimension to the random observation
torch_obs = torch.tensor(observation_space.sample()[None])
with torch.no_grad():
sample = preprocess_obs(torch_obs, observation_space, normalize_images=True)
for layer in layers:
# forward prop to compute the right size
sample = layer(sample)
# make sure batch axis still matches
assert sample.shape[0] == torch_obs.shape[0]
# return everything else
return sample.shape[1:] | 865b9b90f39f5726feb16da70afc515071991fd7 | 25,604 |
def tenure_type():
""" RESTful CRUD controller """
return s3_rest_controller(#rheader = s3db.stdm_rheader,
) | bfee3c2be579e1db6e8799b4a9d3156130b802a9 | 25,605 |
def _format_port(port):
"""
compute the right port type str
Arguments
-------
port: input/output port object
Returns
-------
list
a list of ports with name and type
"""
all_ports = []
for key in port:
one_port = {}
one_port['name'] = key
port_type = port[key]['type']
if isinstance(port_type, list):
types = []
for t in port_type:
type_name = t.__module__+'.'+t.__name__
types.append(type_name)
one_port['type'] = types
else:
type_name = port_type.__module__+'.'+port_type.__name__
one_port['type'] = [type_name]
all_ports.append(one_port)
return all_ports | 2fa65686b6b764afc97a200a02baec65645c9879 | 25,606 |
import os
def getTileName(minfo,ti,xIndex,yIndex,level = -1):
"""
creates the tile file name
"""
global LastRowIndx
max = ti.countTilesX
if (ti.countTilesY > max):
max=ti.countTilesY
countDigits= len(str(max))
parts=os.path.splitext(os.path.basename(minfo.filename))
if parts[0][0]=="@" : #remove possible leading "@"
parts = ( parts[0][1:len(parts[0])], parts[1])
if UseDirForEachRow :
format=getTargetDir(level)+str(yIndex)+os.sep+parts[0]+"_%0"+str(countDigits)+"i"+"_%0"+str(countDigits)+"i"
#See if there was a switch in the row, if so then create new dir for row.
if LastRowIndx < yIndex :
LastRowIndx = yIndex
if (os.path.exists(getTargetDir(level)+str(yIndex)) == False) :
os.mkdir(getTargetDir(level)+str(yIndex))
else:
format=getTargetDir(level)+parts[0]+"_%0"+str(countDigits)+"i"+"_%0"+str(countDigits)+"i"
#Check for the extension that should be used.
if Extension is None:
format=format+parts[1]
else:
format=format+"."+Extension
return format % (yIndex,xIndex) | e48dea5c57e018c1ab99a865a5ceacf6dbd91e29 | 25,607 |
import io
def proc_cgroups(proc='self'):
"""Read a process' cgroups
:returns:
``dict`` - Dictionary of all the process' subsystem and cgroups.
"""
assert isinstance(proc, int) or '/' not in proc
cgroups = {}
with io.open(_PROC_CGROUP.format(proc), 'r') as f:
for cgroup_line in f:
(_id, subsys, path) = cgroup_line.strip().split(':', 2)
cgroups[subsys] = path
return cgroups | 95cb24cbbb4167dd2fa26ce36d78e5f532f10c1a | 25,608 |
import csv
def load_csv_data(
data_file_name,
*,
data_module=DATA_MODULE,
descr_file_name=None,
descr_module=DESCR_MODULE,
):
"""Loads `data_file_name` from `data_module with `importlib.resources`.
Parameters
----------
data_file_name : str
Name of csv file to be loaded from `data_module/data_file_name`.
For example `'wine_data.csv'`.
data_module : str or module, default='sklearn.datasets.data'
Module where data lives. The default is `'sklearn.datasets.data'`.
descr_file_name : str, default=None
Name of rst file to be loaded from `descr_module/descr_file_name`.
For example `'wine_data.rst'`. See also :func:`load_descr`.
If not None, also returns the corresponding description of
the dataset.
descr_module : str or module, default='sklearn.datasets.descr'
Module where `descr_file_name` lives. See also :func:`load_descr`.
The default is `'sklearn.datasets.descr'`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features of a given sample.
target : ndarry of shape (n_samples,)
A 1D array holding target variables for all the samples in `data`.
For example target[0] is the target variable for data[0].
target_names : ndarry of shape (n_samples,)
A 1D array containing the names of the classifications. For example
target_names[0] is the name of the target[0] class.
descr : str, optional
Description of the dataset (the content of `descr_file_name`).
Only returned if `descr_file_name` is not None.
"""
with resources.open_text(data_module, data_file_name) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=int)
if descr_file_name is None:
return data, target, target_names
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
return data, target, target_names, descr | 3629dded45954c25e538c53b5c7bc5d0dfec0a39 | 25,609 |
def ptFromSudakov(sudakovValue):
"""Returns the pt value that solves the relation
Sudakov = sudakovValue (for 0 < sudakovValue < 1)
"""
norm = (2*CA/pi)
# r = Sudakov = exp(-alphas * norm * L^2)
# --> log(r) = -alphas * norm * L^2
# --> L^2 = log(r)/(-alphas*norm)
L2 = log(sudakovValue)/(-alphas * norm)
pt = ptHigh * exp(-sqrt(L2))
return pt | 8ba504749f13ed1046799b5456d1f6f3c74bfc1e | 25,610 |
def _set_lod_2(gml_bldg, length, width, height, bldg_center):
"""Adds a LOD 2 representation of the building based on building length,
width and height
alternative way to handle building position
Parameters
----------
gml_bldg : bldg.Building() object
A building object, where bldg is a reference to
`pyxb.bundles.opengis.citygml.building`.
length : float
length of the building
width : float
width of the building
height : float
height of the building
bldg_center : list
coordinates in the reference system of the building center
Returns
-------
gml_bldg : bldg.Building() object
Returns the modified building object
"""
boundary_surface = []
lod_2_solid = gml.SolidPropertyType()
lod_2_solid.Solid = gml.Solid_()
exterior_solid = gml.SurfacePropertyType()
composite_surface = gml.CompositeSurface()
bldg_center[0] -= length / 2
bldg_center[1] -= width / 2
# Ground surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1], bldg_center[2]],
[bldg_center[0], width + bldg_center[1], bldg_center[2]]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = gml_bldg.name[
0].value() + "_ground"
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.FloorSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_ground")
# Roof surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2] + height],
[length + bldg_center[0], bldg_center[1],
bldg_center[2] + height],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2] + height],
[bldg_center[0], width + bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_roof")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.RoofSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_roof")
# Side a surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], bldg_center[1],
bldg_center[2] + height],
[bldg_center[0], bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_a")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_a")
# Side b surface
coords = [[bldg_center[0], width + bldg_center[1], bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2] + height],
[bldg_center[0], width + bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_b")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_b")
# Side c surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2]],
[bldg_center[0], width + bldg_center[1], bldg_center[2]],
[bldg_center[0], width + bldg_center[1], bldg_center[2] + height],
[bldg_center[0], bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_c")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_c")
# Side d surface
coords = [[length + bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2] + height],
[length + bldg_center[0], bldg_center[1],
bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_d")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_d")
exterior_solid.Surface = composite_surface
lod_2_solid.Solid.exterior = exterior_solid
gml_bldg.lod2Solid = lod_2_solid
gml_bldg.boundedBy_ = boundary_surface
return gml_bldg | 309f66319c5cce07adbcb456548b3c29f707d96c | 25,611 |
def load_data(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).drop_duplicates()
indx_drop = full_data[full_data["checkin_date"] <= full_data["cancellation_datetime"]].index
full_data.drop(indx_drop, inplace=True)
indx_drop = full_data[pd.to_datetime(full_data["booking_datetime"]).dt.date >= pd.to_datetime(
full_data["cancellation_datetime"])].index
full_data.drop(indx_drop, inplace=True)
full_data.reset_index(inplace=True)
features = full_data[[
# "hotel_id",
"accommadation_type_name",
"original_selling_amount",
"booking_datetime",
"checkin_date",
"checkout_date",
"charge_option",
"original_payment_method",
"original_payment_type",
"hotel_star_rating",
"hotel_country_code",
"customer_nationality",
"guest_is_not_the_customer",
"guest_nationality_country_name",
"no_of_adults",
"no_of_children",
"no_of_extra_bed",
"no_of_room",
"origin_country_code",
"original_payment_currency",
"is_user_logged_in",
"request_nonesmoke",
"request_latecheckin",
"request_highfloor",
"request_largebed",
"request_twinbeds",
"request_airport",
"request_earlycheckin",
"cancellation_policy_code",
"h_customer_id"
]]
process_dates(features)
# Cancellation processing
cancellation_policy_processing(features)
# days_to_end_of_cancellation_week = (features["booking_datetime"] - pd.to_datetime('2020-12-13')).dt.days
# Clean
features.loc[:, "request_nonesmoke"] = features[
"request_nonesmoke"].fillna(0)
features.loc[:, "request_latecheckin"] = features[
"request_latecheckin"].fillna(0)
features.loc[:, "request_highfloor"] = features[
"request_highfloor"].fillna(0)
features.loc[:, "request_largebed"] = features["request_largebed"].fillna(
0)
features.loc[:, "request_twinbeds"] = features["request_twinbeds"].fillna(
0)
features.loc[:, "request_airport"] = features["request_airport"].fillna(0)
features.loc[:, "request_earlycheckin"] = features[
"request_earlycheckin"].fillna(0)
features.loc[:, "is_user_logged_in"] = features[
"is_user_logged_in"].astype(int)
# Dummies
dummies_columns = ["accommadation_type_name", "charge_option",
"original_payment_type", "original_payment_method",
"original_payment_currency", "hotel_country_code",
"customer_nationality",
"guest_nationality_country_name",
"origin_country_code", "h_customer_id"]
for d in dummies_columns:
top_values = features[d].value_counts()
top_values = top_values[top_values > 100]
features[d] = [x if x in top_values else np.nan for x in features[d]]
features = pd.get_dummies(features, columns=dummies_columns)
print(features.iloc[0])
labels = ~full_data['cancellation_datetime'].isnull()
features.drop("booking_datetime", inplace=True, axis=1)
features.drop("checkin_date", inplace=True, axis=1)
features.drop("checkout_date", inplace=True, axis=1)
# Check correlation
# correlation_heatmap(features, labels)
scaler = MinMaxScaler()
scaled_features = scaler.fit_transform(features)
data_frame_features = pd.DataFrame(scaled_features, columns=features.columns)
regression_data = data_frame_features[labels]
cancellation_date = pd.to_datetime(full_data["cancellation_datetime"]).dt.date
booking_date = pd.to_datetime(full_data["booking_datetime"]).dt.date
cancellation_date_regression = cancellation_date[labels]
booking_date_regression = booking_date[labels]
regression_labels = (cancellation_date_regression - booking_date_regression).dt.days
checkein_date = pd.to_datetime(full_data["checkin_date"]).dt.date
##
# regression_data["cancellation_minus_booking"] = (cancellation_date - booking_date).dt.days
# regression_data["checkin_minus_cancellation"] = (checkein_date - cancellation_date).dt.days
# regression_data = scaler.fit_transform(regression_data)
##
data_frame_features["booking_date"] = booking_date
# labels["cancellation_date"] = cancellation_date
feature_columns = data_frame_features.columns
return data_frame_features, labels, regression_data, regression_labels, cancellation_date, feature_columns | 808b0e1344352a6645ef090019ac5015172e096c | 25,612 |
def send_mail(subject, message, from_email, recipient_list, html_message='',
scheduled_time=None, headers=None, priority=PRIORITY.medium):
"""
Add a new message to the mail queue. This is a replacement for Django's
``send_mail`` core email method.
"""
subject = force_text(subject)
status = None if priority == PRIORITY.now else STATUS.queued
emails = []
for address in recipient_list:
emails.append(
OutgoingEmail.objects.create(
from_email=from_email, to=address, subject=subject,
message=message, html_message=html_message, status=status,
headers=headers, priority=priority, scheduled_time=scheduled_time
)
)
if priority == PRIORITY.now:
for email in emails:
email.dispatch()
return emails | a97103e5e56463170122252073ebcc873306c708 | 25,613 |
def proxy_a_distance(source_X, target_X):
"""
Compute the Proxy-A-Distance of a source/target representation
"""
nb_source = np.shape(source_X)[0]
nb_target = np.shape(target_X)[0]
train_X = np.vstack((source_X, target_X))
train_Y = np.hstack((np.zeros(nb_source, dtype=int), np.ones(nb_target, dtype=int)))
clf = svm.LinearSVC(random_state=0)
clf.fit(train_X, train_Y)
y_pred = clf.predict(train_X)
error = metrics.mean_absolute_error(train_Y, y_pred)
dist = 2 * (1 - 2 * error)
return dist | fe0102cfd2a5a3cadb64a5ddfb7705e7b8440028 | 25,614 |
import json
def load_metadata(stock_model_name="BlackScholes", time_id=None):
"""
load the metadata of a dataset specified by its name and id
:return: dict (with hyperparams of the dataset)
"""
time_id = _get_time_id(stock_model_name=stock_model_name, time_id=time_id)
path = '{}{}-{}/'.format(training_data_path, stock_model_name, int(time_id))
with open('{}metadata.txt'.format(path), 'r') as f:
hyperparam_dict = json.load(f)
return hyperparam_dict | 1171bf3a06327e907449872755315db8c34565c8 | 25,615 |
from typing import List
import torch
def evaluate(env: AlfEnvironment, algorithm: RLAlgorithm,
num_episodes: int) -> List[alf.metrics.StepMetric]:
"""Perform one round of evaluation.
Args:
env: the environment
algorithm: the training algorithm
num_episodes: number of episodes to evaluate
Returns:
a list of metrics from the evaluation
"""
batch_size = env.batch_size
env.reset()
time_step = common.get_initial_time_step(env)
algorithm.eval()
policy_state = algorithm.get_initial_predict_state(env.batch_size)
trans_state = algorithm.get_initial_transform_state(env.batch_size)
episodes_per_env = (num_episodes + batch_size - 1) // batch_size
env_episodes = torch.zeros(batch_size, dtype=torch.int32)
episodes = 0
metrics = [
alf.metrics.AverageReturnMetric(
buffer_size=num_episodes, example_time_step=time_step),
alf.metrics.AverageEpisodeLengthMetric(
example_time_step=time_step, buffer_size=num_episodes),
alf.metrics.AverageEnvInfoMetric(
example_time_step=time_step, buffer_size=num_episodes),
alf.metrics.AverageDiscountedReturnMetric(
buffer_size=num_episodes, example_time_step=time_step)
]
time_step = common.get_initial_time_step(env)
while episodes < num_episodes:
# For parallel play, we cannot naively pick the first finished `num_episodes`
# episodes to estimate the average return (or other statitics) as it can be
# biased towards short episodes. Instead, we stick to using the first
# episodes_per_env episodes from each environment to calculate the
# statistics and ignore the potentially extra episodes from each environment.
invalid = env_episodes >= episodes_per_env
# Force the step_type of the extra episodes to be StepType.FIRST so that
# these time steps do not affect metrics as the metrics are only updated
# at StepType.LAST. The metric computation uses cpu version of time_step.
time_step.cpu().step_type[invalid] = StepType.FIRST
next_time_step, policy_step, trans_state = policy_trainer._step(
algorithm=algorithm,
env=env,
time_step=time_step,
policy_state=policy_state,
trans_state=trans_state,
metrics=metrics)
time_step.step_type[invalid] = StepType.FIRST
for i in range(batch_size):
if time_step.step_type[i] == StepType.LAST:
env_episodes[i] += 1
episodes += 1
policy_state = policy_step.state
time_step = next_time_step
env.reset()
return metrics | 0218f1a38be8f897ac3b2a70036213877f5f7654 | 25,616 |
from pagure.hooks import BaseHook
def get_plugin_names(blacklist=None, without_backref=False):
"""Return the list of plugins names.
:arg blacklist: name or list of names to not return
:type blacklist: string or list of strings
:arg without_backref: whether or not to include hooks that
have backref "None"
:type without_backref: bool
:return: list of plugin names (strings)
"""
plugins = load("pagure.hooks", subclasses=BaseHook)
if not blacklist:
blacklist = []
elif not isinstance(blacklist, list):
blacklist = [blacklist]
output = [
plugin.name
for plugin in plugins
if plugin.name not in blacklist and (plugin.backref or without_backref)
]
# The default hook is not one we show
if "default" in output:
output.remove("default")
return sorted(output) | 7f3b560334a5680fdcb4a47929613706bb699393 | 25,617 |
def is_autosync(*args):
"""
is_autosync(name, type) -> bool
is_autosync(name, tif) -> bool
Is the specified idb type automatically synchronized?
@param name (C++: const char *)
@param type (C++: const type_t *)
"""
return _ida_typeinf.is_autosync(*args) | 0f7eacc9931897f5fc0f076d0e07e0f1e1e01bce | 25,618 |
def scanboards(dirpath):
"""Scans the directory for board files and returns an array"""
print("Scanning for JSON board data files...", end = "")
files = [x for x in subfiles(dirpath) if x.endswith(".json") and not x.endswith("index.json")]
print("Found {} in \"{}\"".format(len(files), dirpath))
return files | 9cfce78b06fef0b8f7ebaa3d1c5904dfd3e0ec56 | 25,619 |
def router_get_notification() -> dict:
"""Lista todas as configurações do BOT Telegram."""
logger.log('LOG ROTA', "Chamada rota /get_all.")
return {"configuracoes": TelegramNotifier.make_current_cfg_dict()} | 46faf67e02d537de49616085a1bcbb30f3087805 | 25,620 |
def script_filter_maximum_value(config):
""" The scripting version of `filter_maximum_value`. This
function applies the filter to the entire directory (or single
file). It also adds the tags to the header file of each fits file
indicating the number of pixels filtered for this filter.
Parameters
----------
config : ConfigObj
The configuration object that is to be used for this
function.
Returns
-------
None
"""
# Extract the global configuration parameters, including
# the directory.
data_directory = core.config.extract_configuration(
config_object=config, keys=['data_directory'])
subfolder = core.config.extract_configuration(
config_object=config, keys=['subfolder'])
filter_tag_name = core.config.extract_configuration(
config_object=config, keys=['filter_tag_name'])
# Extract the run flag for this particular script.
run_flag = core.config.extract_configuration(
config_object=config, keys=['filter','run_filter_maximum_value'])
# Extract the filter programs configuration parameters.
maximum_value = core.config.extract_configuration(
config_object=config, keys=['filter','maximum_value'])
# The function that is being used to calculate the masks.
filter_function = mask.filter_maximum_value
# Compiling the arguments that the masking function uses.
filter_arguments = {'maximum_value':maximum_value}
# Create the filters from the directory.
mask.base.create_directory_filter_files(data_directory=data_directory,
filter_function=filter_function,
filter_arguments=filter_arguments,
filter_file_tag=filter_tag_name,
subfolder=subfolder,
run=run_flag)
# All done.
return None | 8eccc2356c803d63c1ddfc7603e1dc784ccc49fe | 25,621 |
import time
def pretty_date(d):
""" returns a html formatted pretty date """
special_suffixs = {1 : "st", 2 : "nd" , 3 : "rd", 21 : "st", 22 : "nd", 23 : "rd", 31 : "st"}
suffix = "th"
if d.tm_mday in special_suffixs:
suffix = special_suffixs[d.tm_mday]
suffix = "<sup>" + suffix + "</sup>"
day = time.strftime("%A", d)
month = time.strftime("%B", d)
return day + " the " + str(d.tm_mday) + suffix + " of " + month + ", " + str(d.tm_year) | 7d6675f115021ddd46b2a614e831c9fae8faf7ad | 25,622 |
from datetime import datetime
import dateutil
def update(model, gcs_bucket, gcs_object):
"""Updates the given GCS object with new data from the given model.
Uses last_modified to determine the date to get items from. Bases the
identity of entities in the GCS object on their 'id' field -- existing
entities for which new data is found will be replaced.
Parameters
----------
model : ndb.Model
gcs_bucket : str
gcs_object : str
"""
# If file doesn't exist, just dump
if not cloud_storage.exists(gcs_bucket, gcs_object):
LOG.info('No object to update, calling dump(...)')
return dump(model, gcs_bucket, gcs_object)
# Get preexisting items
transferred_items = cloud_storage.download_object(gcs_bucket, gcs_object)
LOG.info('{} items exist'.format(len(transferred_items)))
# Find the most recently modified one
last_date = datetime.datetime(1, 1, 1)
for item in transferred_items:
modified_date = dateutil.parser.parse(item['last_modified'])
if modified_date > last_date:
last_date = modified_date
# Get all items modified after that date
LOG.info('Last date on record: {}'.format(last_date.isoformat()))
new_items_iter = model.query(model.last_modified > last_date).iter()
new_items = tuple(item.serializable() for item in new_items_iter)
new_items_by_id = {i['id']: i for i in new_items}
if new_items: # Found new items -- update existing items GCS
items_by_id = {i['id']: i for i in transferred_items}
items_by_id.update(new_items_by_id)
items = items_by_id.values()
LOG.info("Updating {n} items in {m} to {o}".format(n=len(new_items),
m=model._get_kind(),
o=gcs_object))
cloud_storage.upload_data(items, gcs_bucket, gcs_object)
else: # Nothing to update with.
LOG.info("No new items in {m} to append to {o}".format(m=model._get_kind(),
o=gcs_object)) | 66bde1371383f16c9449a3aec29e894e6a473d44 | 25,623 |
import logging
def setup_logging(
logger: logging.Logger = logging.getLogger(__name__),
verbose: bool = False,
debug: bool = False,
) -> logging.Logger:
"""Configure logging."""
if debug:
logger.setLevel(logging.DEBUG)
elif verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"[%(asctime)-15s: %(levelname)-8s @ %(funcName)s] %(message)s"
))
logger.addHandler(handler)
return logger | 22024a54fdc6f7a1a542121f032baef33649fe02 | 25,624 |
import os
def train_protocol():
""" train the model with model.fit() """
model = create_cnn_model(tf_print=True)
model.compile(optimizer='adam', loss='mean_squared_error')
x_all, y_all = load_data_batch(num_images_total=30000)
model.fit(x=x_all, y=y_all, batch_size=128, epochs=50, verbose=1,
validation_split=0.125, shuffle=True)
name_model_save = os.path.join(path_model_save, 'model_{}.h5'.format(gen_time_str()))
model.save(filepath=name_model_save)
return model | e9c999c8c5ac53b675a06da5898cccd33aac2bef | 25,625 |
def member_requests_list(context, data_dict):
""" Show request access check """
return _only_registered_user() | c3ffdf798aabc80b3bd91160e9a580ff38c9540d | 25,626 |
def get_conductivity(sw_tdep,mesh,rvec,ham_r,ndegen,avec,fill,temp_max,temp_min,tstep,sw_tau,idelta=1e-3,tau0=100):
"""
this function calculates conductivity at tau==1 from Boltzmann equation in metal
"""
def calc_Kn(eig,veloc,temp,mu,tau):
dfermi=0.25*(1.-np.tanh(0.5*(eig-mu)/temp)**2)/temp
#Kn=sum_k(v*v*(e-mu)^n*(-df/de))
K0=np.array([[(vk1*vk2*dfermi*tau).sum() for vk2 in veloc.T] for vk1 in veloc.T])
K1=np.array([[(vk1*vk2*(eig-mu)*dfermi*tau).sum() for vk2 in veloc.T] for vk1 in veloc.T])
K2=np.array([[(vk1*vk2*(eig-mu)**2*dfermi*tau).sum() for vk2 in veloc.T] for vk1 in veloc.T])
K0=comm.allreduce(K0,MPI.SUM)
K1=comm.allreduce(K1,MPI.SUM)
K2=comm.allreduce(K2,MPI.SUM)
return(K0,K1,K2)
if sw_unit:
kb=scconst.physical_constants['Boltzmann constant in eV/K'][0] #the unit of temp is kBT[eV], so it need to convert eV>K
eC=scconst.e #electron charge, it need to convert eV>J (1eV=eCJ)
tau_u=1.e-15 #unit of tau is sec. default of tau is 1fs
else:
kb=1.
eC=1.
tau_u=1.
itau0=1./tau0
gsp=(1.0 if with_spin else 2.0) #spin weight
Nk,count,k_mpi=gen_klist(mesh)
Vuc=sclin.det(avec)*1e-30 #unit is AA^3. Nk*Vuc is Volume of system.
ham=np.array([get_ham(k,rvec,ham_r,ndegen) for k in k_mpi])
eig=np.array([sclin.eigvalsh(h) for h in ham]).T/mass
veloc=np.array([get_vec(k,rvec,ham_r,ndegen,avec) for k in k_mpi])/mass
emin=comm.allreduce(eig.min(),MPI.MIN)
emax=comm.allreduce(eig.max(),MPI.MAX)
wlength=np.linspace(emin,emax,300)
tdf=np.array([[[(v1*v2*tau_u/((w-eig)**2+idelta**2)).sum() for w in wlength]
for v1 in veloc.T] for v2 in veloc.T])
tdf=gsp*comm.allreduce(tdf,MPI.SUM)/Nk
if rank==0:
f=open('tdf.dat','w')
for w,td in zip(wlength,tdf.T):
f.write('%7.3f '%w)
for i,d in enumerate(td):
for dd in d[:i+1]:
f.write('%10.3e '%(dd))
f.write('\n')
f.close()
if sw_tdep:
temp0=np.linspace(temp_min,temp_max,tstep)
else:
temp0=[temp_max]
for temp in temp0:
mu=calc_mu(eig,Nk,fill,temp)
if sw_tau==0:
tauw=eig*0+1.
elif sw_tau==1:
tauw=1./(itau0+(eig-mu)**2)
K0,K1,K2=calc_Kn(eig,veloc,temp,mu,tauw)
sigma=gsp*tau_u*eC*K0/(Nk*Vuc) #sigma=e^2K0 (A/Vm) :1eC is cannceled with eV>J
#kappa=gsp*tau_u*kb*eC*K2/(temp*Nk*Vuc) #kappa=K2/T (W/Km) :eC(kb) appears with converting eV>J(eV>K)
kappa=gsp*tau_u*kb*eC*(K2-K1.dot(sclin.inv(K0).dot(K1)))/(temp*Nk*Vuc)
sigmaS=gsp*tau_u*kb*eC*K1/(temp*Nk*Vuc) #sigmaS=eK1/T (A/mK)
Seebeck=-kb*sclin.inv(K0).dot(K1)/temp #S=K0^(-1)K1/eT (V/K) :kb appears with converting eV>K
Pertier=K1.dot(sclin.inv(K0)) #pi=K1K0^(-1)/e (V:J/C) :eC is cannceled with eV>J
PF=sigmaS.dot(Seebeck)
if rank==0:
'''
sigma,kappa,sigmaS consistent with boltzwann in cartesian coordinate.
but S is sign inverted. should we multiply by a minus?
Lorenz number of free electron is 2.44e-8(WOhmK^-2)
O(L)~1e-8
'''
print('temperature = %4.0d[K]'%int(temp/kb))
print('mu = %7.3f'%mu)
print('sigma matrix')
print(sigma.round(10))
print('kappa matrix')
print(kappa.round(10))
print('sigmaS matrix')
print(sigmaS.round(10))
print('Seebeck matrix')
print(Seebeck.round(10))
print('Pertier matrix')
print(Pertier.round(13))
print('Lorenz matrix')
print(kb*kappa/(sigma*temp))
print('Power Factor')
print(PF.round(10)) | 0304781bac6160b353a90e5bce061faa89075bc0 | 25,627 |
from typing import List
from typing import Union
import time
def time_match(
data: List,
times: Union[List[str], List[int], int, str],
conv_codes: List[str],
strptime_attr: str,
name: str,
) -> np.ndarray:
"""
Match times by applying conversion codes to filtering list.
Parameters
----------
data
Input data to perform filtering on
times
Times to match
conv_codes
If :obj:`times` contains strings, conversion codes to try passing to
:func:`time.strptime` to convert :obj:`times` to :class:`datetime.datetime`
strptime_attr
If :obj:`times` contains strings, the :class:`datetime.datetime` attribute to
finalize the conversion of strings to integers
name
Name of the part of a datetime to extract, used to produce useful error
messages.
Returns
-------
:class:`numpy.ndarray` of :obj:`bool`
Array where ``True`` indicates a match
Raises
------
ValueError
If input times cannot be converted understood or if input strings do not lead to
increasing integers (i.e. "Nov-Feb" will not work, one must use ["Nov-Dec",
"Jan-Feb"] instead)
"""
times_list = [times] if isinstance(times, (int, str)) else times
def conv_strs(strs_to_convert, conv_codes, name):
res = None
for conv_code in conv_codes:
try:
res = [
getattr(time.strptime(t, conv_code), strptime_attr)
for t in strs_to_convert
]
break
except ValueError:
continue
if res is None:
error_msg = "Could not convert {} '{}' to integer".format(
name, strs_to_convert
)
raise ValueError(error_msg)
return res
if isinstance(times_list[0], str):
to_delete = []
to_append = [] # type: List
for i, timeset in enumerate(times_list):
# ignore type as already established we're looking at strings
if "-" in timeset: # type: ignore
ints = conv_strs(timeset.split("-"), conv_codes, name) # type: ignore
if ints[0] > ints[1]:
error_msg = (
"string ranges must lead to increasing integer ranges,"
" {} becomes {}".format(timeset, ints)
)
raise ValueError(error_msg)
# + 1 to include last month
to_append += [j for j in range(ints[0], ints[1] + 1)]
to_delete.append(i)
for i in to_delete:
del times_list[i]
times_list = conv_strs(times_list, conv_codes, name)
times_list += to_append
return is_in(data, times_list) | 0480f5ca3e29ebcc4f44bef5a81db8fb36f78616 | 25,628 |
def find_best_input_size(sizes=[40]):
""" Returns the average and variance of the models """
accuracies = []
accuracy = []
t = []
sigma = []
time = []
#sizes = np.arange(5, 80, 5)
for size in sizes:
#for size in [80]:
accuracy = []
N = 20
for j in range(N):
tf.keras.backend.clear_session
accuracy_, _, _, _, _, _, t_ = train_model(size=size)
accuracy.append(accuracy_)
t.append(t_)
time.append(np.average(t))
accuracies.append(np.average(accuracy))
sigma.append(np.std(accuracy))
print("Average accuracy: " + str(np.average(accuracy)))
print("Standard deviation: " + str(np.std(accuracy)))
return accuracies, sigma, time | 3d22441d07b44779cde6c4347669a435568f0378 | 25,629 |
import torch
def eval_acc(trainer, dataset="val"):
"""
"""
trainer.model.eval()
with torch.no_grad():
shot_count = 0
total_count = 0
for inputs,targets in trainer.val_dataset():
inputs = nested_to_cuda(inputs, trainer.device)
targets = nested_to_cuda(targets, trainer.device)
outputs = trainer.model(inputs)
pred = outputs[0]
shot = torch.sum(pred.argmax(1) == targets[0].view(-1))
shot_count = shot_count + shot.item()
total_count = total_count + targets[0].size(0)
acc = shot_count / total_count
trainer.logger.info("acc:%f" % acc)
return acc | 452861ccb5805778d5dd0bc83226b73539b8aebb | 25,630 |
import os
import sys
def which(program):
"""
Find a program in PATH and return path
From: http://stackoverflow.com/q/377017/
"""
def is_exe(fpath):
found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if not found and sys.platform == 'win32':
fpath = fpath + ".exe"
found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
return found
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
logger.debug("found executable: " + str(program))
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
logger.debug("found executable: " + str(exe_file))
return exe_file
return None | b58fe57517bf66301ff1de0e8f345674a5796d9d | 25,631 |
def _floor(n, base=1):
"""Floor `n` to a multiple of `base`"""
return n // base * base | 49019e4aa925b4f77a7f13f9919d36948bd132cc | 25,632 |
import os
def record_pid(ptype: str, running: bool = True) -> int:
"""
记录程序运行的PID
"""
pid = os.getpid() if running else -1
if ptype == 'bpid':
i = dao.update_config([ConfigVO(const.Key.Run.BPID.value, pid)], True)
elif ptype == 'fpid':
i = dao.update_config([ConfigVO(const.Key.Run.FPID.value, pid)], True)
return pid | 0000a2688a58744f6fdbc8c531f298d8c2154645 | 25,633 |
def fixed_timezone(offset): # type: (int) -> _FixedTimezone
"""
Return a Timezone instance given its offset in seconds.
"""
if offset in _tz_cache:
return _tz_cache[offset]
tz = _FixedTimezone(offset)
_tz_cache[offset] = tz
return tz | 401303d1893bc2ab7bee19ba09161549a2cc7fb2 | 25,634 |
def getFactoriesInfo():
"""
Returns a dictionary with information on how to create an object Sensor from its factory
"""
return {'Stitcher':
{
'factory':'createStitcher'
}
} | 75806002b1ada6bd1a87c9bde6b2e47f587d988d | 25,635 |
from typing import Dict
from typing import List
from typing import Optional
def pending_observations_as_array(
pending_observations: Dict[str, List[ObservationFeatures]],
outcome_names: List[str],
param_names: List[str],
) -> Optional[List[np.ndarray]]:
"""Re-format pending observations.
Args:
pending_observations: List of raw numpy pending observations.
outcome_names: List of outcome names.
param_names: List fitted param names.
Returns:
Filtered pending observations data, by outcome and param names.
"""
if len(pending_observations) == 0:
pending_array: Optional[List[np.ndarray]] = None
else:
pending_array = [np.array([]) for _ in outcome_names]
for metric_name, po_list in pending_observations.items():
# It is possible that some metrics attached to the experiment should
# not be included in pending features for a given model. For example,
# if a model is fit to the initial data that is missing some of the
# metrics on the experiment or if a model just should not be fit for
# some of the metrics attached to the experiment, so metrics that
# appear in pending_observations (drawn from an experiment) but not
# in outcome_names (metrics, expected for the model) are filtered out.ß
if metric_name not in outcome_names:
continue
pending_array[outcome_names.index(metric_name)] = np.array(
[[po.parameters[p] for p in param_names] for po in po_list]
)
return pending_array | bc9bfff51b991b413b5861f55c8b0f55331ab763 | 25,636 |
import os
def sauv_record_jeu(pseudo, collec, numero, score):
"""
Sauvegarde le nouveau record dans le fichier de record.
:param pseudo: Pseudo du joueur ayant réalisé le nouveau record
:param collec: Collection du puzzle sur lequel il y a eu un nouveau record
:param numero: Numero du puzzle sur lequel il y a un nouveau record
:param score: Score du nouveau record
:return: True si la sauvegarde a été effectué sinon False
"""
ligne = outil.puzzle_xsb(collec, numero) + ';' + str(score) + ';' + pseudo + '\n'
chemin = os.path.join("sauvegardes", "records.txt")
if os.path.isfile(chemin):
with open(chemin, 'r', encoding='UTF-8') as lect_fichier:
contenu_record = lect_fichier.readlines()
record = ''.join(contenu_record)
if outil.puzzle_xsb(collec, numero) not in record:
record += ligne
with open(chemin, 'w', encoding='UTF-8') as ecriture_fichier_ajout:
ecriture_fichier_ajout.write(record)
return True
else:
record = record.split('\n')
for i in range(len(record)):
if record[i].split(';')[0] == outil.puzzle_xsb(collec, numero):
puzzle, val_record, nom_joueur = record[i].split(';')
if int(val_record) < score:
return False
else:
val_record = ';' + str(score) + ';'
nom_joueur = pseudo
tup = (puzzle, val_record, nom_joueur)
record[i] = ''.join(tup)
record = '\n'.join(record)
with open(chemin, 'w', encoding='UTF-8') as ecriture_fichier_modif:
ecriture_fichier_modif.write(record)
return True
else:
with open(chemin, 'w', encoding='UTF-8') as creation_fichier:
creation_fichier.write(ligne)
return True | 4b1e4176898eadc4fff965d0f9d4c2da37a7859a | 25,637 |
def inverted_conditional_planar(input_dim, context_dim, hidden_dims=None):
"""
A helper function to create a
:class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care
of constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [input_dim * 10, input_dim * 10]
:type hidden_dims: list[int]
"""
if hidden_dims is None:
hidden_dims = [input_dim * 10, input_dim * 10]
nn = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
return InvertedConditionalPlanar(nn) | 8bf5ae5dd6d8743a3eb1506b26dec5cf51af2bde | 25,638 |
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for index, cat in enumerate(categories):
category_index[index] = cat
return category_index | 226a39189d4203e2861bbba7334d5b8bbaa3b7df | 25,639 |
import torch
def n_step_returns(q_values, rewards, kls, discount=0.99):
"""
Calculates all n-step returns.
Args:
q_values (torch.Tensor): the Q-value estimates at each time step [time_steps+1, batch_size, 1]
rewards (torch.Tensor): the rewards at each time step [time_steps, batch_size, 1]
kls (torch.Tensor): the scaled kl divergences at each time step [time_steps, batch_size, 1]
discount (float): the temporal discount factor
"""
discounts = torch.cat([(discount*torch.ones_like(q_values[:1]))**i for i in range(rewards.shape[0])], 0)
rewards[1:] = rewards[1:] - kls[:-1]
discounted_returns = torch.cumsum(discounts * rewards, dim=0)
terminal_values = discount * discounts * (q_values[1:] - kls)
# return torch.cat([q_values[:1], discounted_returns], dim=0)
return torch.cat([q_values[:1], discounted_returns + terminal_values], dim=0) | 3bbd6026046328dc8ef63ab3e871f6c47636cb80 | 25,640 |
import os
def user(*args: str) -> str:
"""
Creates an absolute path from the specified relative components within the
user's Cauldron app data folder.
:param args:
Relative components of the path relative to the root package
:return:
The absolute path
"""
return clean(os.path.join('~', '.cauldron', *args)) | 6ab8750b5c29a1d23ed194adab569b4c7b7c87c3 | 25,641 |
import random
def random_split_exact(iterable, split_fractions=None):
"""Randomly splits items into multiple sample lists according to the given
split fractions.
The number of items in each sample list will be given exactly by the
specified fractions.
Args:
iterable: a finite iterable
split_fractions: an optional list of split fractions, which should sum
to 1. By default, [0.5, 0.5] is used
Returns:
sample_lists: a list of lists, of the same length as `split_fractions`.
Each sub-list contains items from the original iterable
"""
split_fractions = _validate_split_fractions(split_fractions)
shuffled = list(iterable)
random.shuffle(shuffled)
return _split_in_order(shuffled, split_fractions) | 2b7ae86e55b9be225e94cfc983295beeb3ed08cf | 25,642 |
def calc_output_coords(source_dataset, config, model_profile):
"""Construct the coordinates for the dataset containing the extracted variable(s).
The returned coordinates container has the following mapping of attributes to
coordinate names:
* :kbd:`time`: :kbd:`time`
* :kbd:`depth`: :kbd:`depth`
* :kbd:`y_index`: :kbd:`gridY`
* :kbd:`x_index`: :kbd:`gridx`
:param source_dataset: Dataset from which variables are being extracted.
:type source_dataset: :py:class:`xarray.Dataset`
:param dict config: Extraction processing configuration dictionary.
:param dict model_profile: Model profile dictionary.
:return: Mapping of coordinate names to their data arrays.
:rtype: dict
"""
time_examples = {
"day": (
"e.g. the field average values for 8 February 2022 have "
"a time value of 2022-02-08 12:00:00Z"
),
"hour": (
"e.g. the field average values for the first hour of 8 February 2022 have "
"a time value of 2022-02-08 00:30:00Z"
),
}
time_interval = config.get("selection", {}).get("time interval", 1)
# stop=None in slice() means the length of the array without having to know what that is
time_selector = {model_profile["time coord"]["name"]: slice(0, None, time_interval)}
extract_time_origin = model_profile["extraction time origin"]
match config["dataset"]["time base"]:
case "day":
time_offset = "12:00:00"
case "hour":
time_offset = "00:30:00"
case _:
time_offset = "00:30:00"
times = create_dataarray(
"time",
source_dataset[model_profile["time coord"]["name"]].isel(time_selector),
attrs={
"standard_name": "time",
"long_name": "Time Axis",
"time_origin": f"{extract_time_origin} {time_offset}",
"comment": (
f"time values are UTC at the centre of the intervals over which the "
f"calculated model results are averaged; "
f"{time_examples[config['dataset']['time base']]}"
),
# time_origin and units are provided by encoding when dataset is written to netCDF file
},
)
logger.debug("extraction time coordinate", time=times)
# There are 2 special cases for which there is no depth coordinate:
# 1. The dataset does not have a depth coordinate; e.g. HRDPS surface forcing fields
# 2. The variable does not have a depth coordinate; e.g. sea surface height
# in SalishSeaCast grid_T dataset
if "depth" not in model_profile["chunk size"]:
# Dataset does not have a depth coordinate; e.g. HRDPS surface forcing fields
include_depth_coord = False
depths = None
else:
datasets = model_profile["results archive"]["datasets"]
time_base = config["dataset"]["time base"]
vars_group = config["dataset"]["variables group"]
depth_coord = datasets[time_base][vars_group]["depth coord"]
include_depth_coord = any(
[depth_coord in var.coords for var in source_dataset.data_vars.values()]
)
if not include_depth_coord:
# Variable does not have a depth coordinate; e.g. sea surface height in SalishSeaCast
# grid_T dataset
depths = None
else:
# At least 1 variable has a depth coordinate, so include depth in output dataset
# coordinates
depth_min = config.get("selection", {}).get("depth", {}).get("depth min", 0)
depth_max = (
config.get("selection", {}).get("depth", {}).get("depth max", None)
)
depth_interval = (
config.get("selection", {}).get("depth", {}).get("depth interval", 1)
)
depth_selector = slice(depth_min, depth_max, depth_interval)
depths = create_dataarray(
"depth",
source_dataset[depth_coord].isel({depth_coord: depth_selector}),
attrs={
"standard_name": "sea_floor_depth",
"long_name": "Sea Floor Depth",
"units": "metres",
"positive": "down",
},
)
logger.debug("extraction depth coordinate", depth=depths)
y_min = config.get("selection", {}).get("grid y", {}).get("y min", 0)
y_max = config.get("selection", {}).get("grid y", {}).get("y max", None)
y_interval = config.get("selection", {}).get("grid y", {}).get("y interval", 1)
y_selector = slice(y_min, y_max, y_interval)
y_coord = model_profile["y coord"]["name"]
y_indices = create_dataarray(
"gridY",
source_dataset[y_coord].isel({y_coord: y_selector}).astype(int),
attrs={
"standard_name": "y",
"long_name": "Grid Y",
"units": model_profile["y coord"].get("units", "count"),
"comment": model_profile["y coord"].get(
"comment", "gridY values are grid indices in the model y-direction"
),
},
)
logger.debug("extraction y coordinate", y_index=y_indices)
x_min = config.get("selection", {}).get("grid x", {}).get("x min", 0)
x_max = config.get("selection", {}).get("grid x", {}).get("x max", None)
x_interval = config.get("selection", {}).get("grid x", {}).get("x interval", 1)
x_selector = slice(x_min, x_max, x_interval)
x_coord = model_profile["x coord"]["name"]
x_indices = create_dataarray(
"gridX",
source_dataset[x_coord].isel({x_coord: x_selector}).astype(int),
attrs={
"standard_name": "x",
"long_name": "Grid X",
"units": model_profile["x coord"].get("units", "count"),
"comment": model_profile["x coord"].get(
"comment", "gridX values are grid indices in the model x-direction"
),
},
)
logger.debug("extraction x coordinate", x_index=x_indices)
return (
{"time": times, "depth": depths, "gridY": y_indices, "gridX": x_indices}
if include_depth_coord
else {"time": times, "gridY": y_indices, "gridX": x_indices}
) | 22759d734f5356bc0597db5528eac864df23d39c | 25,643 |
def computeMaskIntra(inputFilename, outputFilename, m=0.2, M=0.9, cc=1):
""" Depreciated, see compute_mask_intra.
"""
print "here we are"
return compute_mask_intra(inputFilename, outputFilename,
m=m, M=M, cc=cc) | 0eaf8b8845c12b1fc90cb032881dacf53a2c7d12 | 25,644 |
def read_space_delimited(filename, skiprows=None, class_labels=True):
"""Read an space-delimited file
skiprows: list of rows to skip when reading the file.
Note: we can't use automatic comment detection, as
`#` characters are also used as data labels.
class_labels: boolean
if true, the last column is treated as the class label
"""
with open(filename, 'r') as fd:
df = pd.read_table(fd, skiprows=skiprows, skip_blank_lines=True, comment=None, header=None, sep=' ', dtype=str)
# targets are last column. Data is everything else
if class_labels is True:
target = df.loc[:, df.columns[-1]].values
data = df.loc[:, df.columns[:-1]].values
else:
data = df.values
target = np.zeros(data.shape[0])
return data, target | be25b4f6c3c775f12fdfef7f334b4886c85a514e | 25,645 |
def get_gas_price(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get network gas price
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Network gas price
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
method = 'hmy_gasPrice'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'], 16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e | b7f18a5a5044d8aeee7a63b702b01944cbff597b | 25,646 |
def function_3():
"""This is a Function prototype in Python"""
print("Printing Docs String")
return 0 | 4268904e75772b9fef804931e3a3564fda333bc7 | 25,647 |
def client():
""" client fixture """
return testing.TestClient(app=service.microservice.start_service(), headers=CLIENT_HEADERS) | ea9997f9904057f0ffdc3175f081acb7e21e719d | 25,648 |
def get_character_bullet(index: int) -> str:
"""Takes an index and converts it to a string containing a-z, ie.
0 -> 'a'
1 -> 'b'
.
.
.
27 -> 'aa'
28 -> 'ab'
"""
result = chr(ord('a') + index % 26) # Should be 0-25
if index > 25:
current = index // 26
while current > 0:
result = chr(ord('a') + (current - 1) % 25) + result
current = current // 26
return result | 357f68feb302f11a996b5446c642ad9ca1f0f8d3 | 25,649 |
import os
def get_masks_path(base_dir, trial, prune_iter):
"""Builds the mask save path"""
return os.path.join(
base_dir,
"trial_{:02d}".format(trial),
"prune_iter_{:02d}".format(prune_iter),
"masks",
) | 7da9d346aa42b5ea9d291b3422aa447cce02db53 | 25,650 |
def update_det_cov(
res: OptResult,
jacobian: JacobianValue):
"""Calculates the inv hessian of the deterministic variables
Note that this modifies res.
"""
covars = res.hess_inv
for v, grad in jacobian.items():
for det, jac in grad.items():
cov = propagate_uncertainty(covars[v], jac)
covars[det] = covars.get(det, 0.) + cov
return res | c505654be6f08dcf037337104b4077f6003db876 | 25,651 |
def simplex_init_modified(A, b, c):
"""
Attempt to find a basic feasible vector for the linear program
max: c*x
ST: Ax=b
x>=0,
where A is a (m,n) matrix.
Input Parameters:
A - (n,m) constraint matrix
b - (m,1) vector appearing in the constraint equation above
c - (1,n) vector giving the coefficients of the objective function
Output Parameters:
istatus - integer parameter reporting the condition of the
istatus = 0 indicates a basic feasible vector was found
istatus = 4 indicates that the initialization procedure failed
istatus = 16 indicates that the problem is infeasible
iB - integer vector of length m specifying the indices of the basic
variables
iN - integer vector of length n-m specying the indices of the nonbasic
variables
xB - vector of length m specifying the values of the basic
variables
"""
A_new, b_new = A, b
A_new[find_negative_index(b)] = -A[find_negative_index(b)]
b_new[find_negative_index(b)] = -b[find_negative_index(b)]
A_new = np.hstack((A_new, np.eye(b.shape[0])))
# problem setup
c_phase_I = np.zeros(A_new.shape[1]).reshape(1, -1)
c_phase_I[0, c.shape[1]:] = np.ones(b.shape[0])
iB = np.arange(c.shape[1], c.shape[1] + b.shape[0]) + 1 # index begin with 1 for input
iN = np.arange(0, c.shape[1]) + 1
xB = np.matrix(np.copy(b))
istatus_step = 1000
while istatus_step != -1:
try:
istatus_step, iB, iN, xB, Binv = simplex_step(A_new, b_new, c_phase_I, iB, iN, xB, irule=0)
except np.linalg.LinAlgError:
raise ValueError("iB cannot form a basis!")
if istatus_step == 16:
istatus, iB, iN, xB, tableau = 4, None, None, None, None
return istatus, iB, iN, xB
iB = iB - 1
optimal_cost = np.matmul(c_phase_I[0, iB].reshape(1, -2), xB)
if optimal_cost > 0:
istatus, iB, iN, xB, tableau = 16, None, None, None, None
return istatus, iB, iN, xB
if optimal_cost == 0:
#print("optimal basis is found!")
istatus = 0
artificial_idx = np.arange(c.shape[1], c.shape[1] + b.shape[0])
artificial_in_basis = np.intersect1d(artificial_idx, iB)
tableau = np.matmul(Binv, A_new)
#c_new = np.concatenate((c, np.zeros(A.shape[0]).reshape(1, -1)), axis=1)
#reduced_cost = c - np.matmul(np.matmul(c_new[0, iB], Binv), A)
if len(artificial_in_basis) == 0:
#print("no artificial variable in the final basis")
return istatus, iB+1, iN, xB, tableau[:, 0:A.shape[1]]
else:
#print("artificial variable in the final basis")
for xl in artificial_in_basis:
row_l = tableau[np.where(iB == xl), :c.shape[1]]
if np.sum(row_l) == 0:
tableau = np.delete(tableau, np.where(iB == xl), axis=0)
xB = np.delete(xB, np.where(iB == xl))
iB = np.delete(iB, np.where(iB == xl))
iN = np.setdiff1d(range(c.shape[1]), iB)
iB = iB + 1
iN = iN + 1
xB = xB.reshape(-1, 1)
return istatus, iB, iN, xB, tableau[:, 0:A.shape[1]] | fd415eedaec1138812fb054656c45450a53535b8 | 25,652 |
import os
def get_all_user_dir():
"""get the root user dir. This is the dir where all users are stored
Returns:
str: path
"""
return os.path.join(get_base_dir(), ALL_USER_DIR_NAME) | 225c30e1d956722c63facde6d876f757dc8c40d5 | 25,653 |
def LeakyRelu(
alpha: float,
do_stabilize: bool = False) -> InternalLayer:
"""Leaky ReLU nonlinearity, i.e. `alpha * min(x, 0) + max(x, 0)`.
Args:
alpha: slope for `x < 0`.
do_stabilize: set to `True` for very deep networks.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return ABRelu(alpha, 1, do_stabilize) | 93a9f103c42979e5107291f818d387eb06feb41b | 25,654 |
def load_interp2d(xz_data_path: str, y: list):
"""
Setup 2D interpolation
Example:
x1, y1, z1, x2, y2, z2\n
1, 3, 5, 1, 4, 6\n
2, 3, 6, 2, 4, 7\n
3, 3 7, 3, 4, 8\n
xy_data_path will lead to a file such as:
1,5,6\n
2,6,7\n
3,7,8\n
y will be: [3, 4]
:param xz_data_path: path to csv file with columnated data, e.g. 'x1,z1,z2,...,zn'
:param y: list of *constant* values for the second independent variable
:return initialized interp2d instance
"""
data = np.genfromtxt(xz_data_path, delimiter=',')
_, num_col = data.shape
num_series = num_col - 1
# check to make sure number of columns and length of 'y' match
if num_series != len(y):
ValueError("Number of columns in '{}' inconsistent with 'y'".format(xz_data_path))
x = data[:, 0]
z = []
for idx in range(1, num_series + 1):
z.append(data[:, idx])
return interp2d(x, y, z) | 0883c317c44a97a8e38c615285315adb22a091c5 | 25,655 |
def save_new_party(json_data):
"""saves a new party in the database
Args:
json_data (json) : party details
Returns:
json : api endpoint response
"""
# Deserialize the data input against the party schema
# check if input values throw validation errors
try:
data = party_schema.load(json_data)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
party_name = data['party_name']
hq_address = data['hq_address']
logo_url = data['logo_url']
# Query database for party by name
party_by_name = Party.get_party_by_name(party_name)
party = db().get_single_row(*party_by_name)
if party is None:
# if name is not taken
new_party = Party(
party_name=party_name,
hq_address=hq_address,
logo_url=logo_url
)
save_changes(new_party)
# 1. serialize the input for response
# 2. return serialized and proper format json to api endpoint
party_saved = db().get_single_row(*party_by_name)
response = party_schema.dump(party_saved)
response_object = jsonify({
"status": 201,
"data": [response]
})
return response_object, 201
# default response When name is taken
return jsonify({
"status": 409,
"error": "Try a different Party name, Provided name is taken."
}), 409 | e6a11646c1aa13bfceabb1c143308fc985b3f59d | 25,656 |
import sys
def _system_path_separator():
"""
System dependent character for element separation in PATH variable
:rtype: str
"""
if sys.platform == 'win32':
return ';'
else:
return ':' | b89a77d5b444a1b806a75e9f024f2084e3cfc93f | 25,657 |
def cost_function(theta, X, y, lamda=0.01, regularized=False):
"""
Compute cost and gradient for logistic regression with and without regularization.
Computes the cost of using theta as the parameter for regularized logistic regression
and the gradient of the cost w.r.t. to the parameters.
using lamda instead of lambda because of keyword conflict.
:param X: numpy array of shape (m,n)
Training data
:param theta: numpy array (n,1)
Weights
:param y: numpy array of shape (m,1)
Training predictions
:param lamda: Floating point value
Regularization parameter
:param regularized: Bool(Default:True)
if True the cost function returned will be regularized
:return J, Grad:
J: Cost of the theta values for given dataset
Grad: gradient for logistic regression with regularization
partial derivatives of the cost w.r.t. each parameter in theta
"""
# initial values
m = y.size
if type(theta) != type(np.array([])):
theta = np.array(theta).reshape([-1, 1])
# since in regularization we do not penalize theta(0)
# print("Message: theta = {}".format(theta))
h = sigmoid(X @ theta)
J = (-(y.T @ np.log(h)) - ((1 - y.T) @ np.log(1 - h))) / m
if regularized:
J = J + ((theta[1:].T @ theta[1:]) * (lamda / (2 * m))) # regularization value addted to cost;
# note we didn't add regularization for first theta
return J | e4002fc30455be730e6ba46db85588b113e24451 | 25,658 |
from pathlib import Path
import torch
def read_image_numpy(input_filename: Path) -> torch.Tensor:
"""
Read an Numpy file with Torch and return a torch.Tensor.
:param input_filename: Source image file path.
:return: torch.Tensor of shape (C, H, W).
"""
numpy_array = np.load(input_filename)
torch_tensor = torch.from_numpy(numpy_array)
return torch_tensor | 987185e7b207ecae1abcf01fd5ea939ace0fb869 | 25,659 |
import argparse
def init_argparse():
"""Parses the required arguments file name and source database type and returns a parser object"""
parser = argparse.ArgumentParser(
usage="%(prog)s --filename 'test.dtsx' --source 'postgres'",
description="Creates a configuration file in the output directory based on the SQLs in the dtsx file."
)
parser.add_argument(
"-f", "--filename", action='store', help='Input DTSX file name', required=True
)
parser.add_argument(
"-s", "--source", action="store", help='Type of the source database (sqlserver, postgres)', required=True
)
return parser | 1c57c6712819d147ef7917d1100c20353339f7b4 | 25,660 |
def solution(n: int = 4000000) -> int:
"""Returns the sum of all fibonacci sequence even elements that are lower
or equals to n.
>>> solution(10)
10
>>> solution(15)
10
>>> solution(2)
2
>>> solution(1)
0
>>> solution(34)
44
"""
fib = [0, 1]
i = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
total = 0
for j in range(len(fib) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total | b2c3983b9888ae8a10b4ceca2faf5d943b17fbe3 | 25,661 |
def triu(m: ndarray,
k: int = 0) -> ndarray:
"""
Upper triangle of an array.
"""
af_array = af.data.upper(m._af_array, is_unit_diag=False)
return ndarray(af_array) | 00b0b4a301b0b59214a53d8b741c7417bac95f3d | 25,662 |
def generate_annotation(overlay_path,img_dim,ext):
"""
Generate custom annotation for one image from its DDSM overlay.
Args:
----------
overlay_path: string
Overlay file path
img_dim: tuple
(img_height,img_width)
ext: string
Image file format
Returns:
----------
pandas.dataframe
columns: ['NAME','FEATURE','SEVERITY','X1','Y1','X2','Y2','HEIGHT','WIDTH']
NAME <--> image filename with extension
FEATURE <--> lesion_type: mass or calcifications
X1,Y1,X2,Y2 <--> xyrb bounding box
HEIGHT <--> image height
WIDTH <--> image width
"""
myColumns = ["NAME","FEATURE","SEVERITY","X1","Y1","X2","Y2","HEIGHT","WIDTH"]
sdf = pd.DataFrame(columns=myColumns)
H,W = img_dim
overlay = get_overlay_info(overlay_path)
total_abnormalities = overlay["total_abnormalities"]
name = overlay["name"]
name = str(name) + ext
for i in range(1,total_abnormalities+1):
abnormality = overlay[i]
lesion_type = abnormality["lesion_type"]
lesion_type = '_'.join(lesion_type)
pathology_type = abnormality["pathology_type"][0]
boundary = abnormality["boundary"]
x,y,w,h = cv2.boundingRect(boundary)
X1 = int(x)
Y1 = int(y)
X2 = int(x+w)
Y2 = int(y+h)
data = [str(name),str(lesion_type),str(pathology_type),X1,Y1,X2,Y2,H,W]
label = pd.DataFrame([data],columns=myColumns)
sdf = sdf.append(label,ignore_index=True)
return sdf | e9d334c834063ee27b5b9a9d597f084bc735f794 | 25,663 |
from datetime import datetime
def parse_episode_page(loc, contents):
"""Parse a page describing a single podcast episode.
@param loc: The URL of this page.
@type loc: basestring
@param contents: The raw HTML contents of the episode page from which
episode information should be parsed.
@type contents: basestring
@return: Dictionary describing the episode. Contains keys name (str value),
date (datetime.date), loc (url - str value), duration (seconds - int),
and orig_tags (tags applied to episode - list of str)
@rtype: dict
"""
soup = bs4.BeautifulSoup(contents)
header = soup.find(class_='centerPosts')
title = header.find('strong').contents[0]
date_str = soup.find(class_='pdateS').find('em').contents[0]
date_components = date_str.replace(',', ' ').split(' ')
year = int(date_components[2])
month = common.MONTH_ABBRV[date_components[0]]
day = int(date_components[1])
episode_date = datetime.date(year, month, day)
tags = sorted(set(map(
lambda x: x.contents[0], soup.findAll('a', rel='tag')
)))
duration_str = soup.find(class_='podpress_mediafile_dursize').contents[0]
duration_str_clean = duration_str.replace('[ ', '').replace(' ]', '')
duration = common.interpret_duration(duration_str_clean)
return {
'title': title,
'date': episode_date,
'tags': tags,
'loc': loc,
'duration': duration
} | 805e466c15741ee004059817efa70da66e470871 | 25,664 |
def _bitarray_to_message(barr):
"""Decodes a bitarray with length multiple of 5 to a byte message (removing the padded zeros if found)."""
padding_len = len(barr) % 8
if padding_len > 0:
return bitstring.Bits(bin=barr.bin[:-padding_len]).bytes
else:
return barr.bytes | 79e601bc30519e42c8dbf2369deea5b36a5851ff | 25,665 |
import os
import subprocess
import logging
def join(kmerfile, codonfile, minhashfile, dtemp):
"""Externally join with built-in GNU Coreutils in the order
label, kmers, codons ,minhash
Args:
kmerfile (str): Kmer csv file
codonfile (str): Codon csv file
minhashfile (str): Minhash csv file
dtemp (str): the path to a temporary directory
Returns:
(str) the path of the merged file created
References:
GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html
"""
kcfile= os.path.join(dtemp, "kcfile.csv")
mergefile = os.path.join(dtemp, "mergefile.csv")
try:
with open(kcfile, 'w') as kcf:
options = ['join', '-t', ',', '-1', '1', '-2', '1', kmerfile, codonfile]
subprocess.run(options, check=True, stdout=kcf)
with open(mergefile, "w") as mf:
options2 = ['join', '-t', ',', '-1', '1', '-2', '1', kcfile, minhashfile]
subprocess.run(options2, check=True, stdout=mf)
os.remove(kcfile)
return mergefile
except RuntimeError:
logging.exception("Could not merge csv files using unix join command") | d3a373573d87a0312ecb8291bb0c81479f6402b6 | 25,666 |
def align_address_to_page(address: int) -> int:
"""Align the address to a page."""
a = align_address(address) >> DEFAULT_PAGE_ALIGN_SHIFT
return a << DEFAULT_PAGE_ALIGN_SHIFT | 1211d3c1a3ae6b1bd183f3d1b1cfb1097fc7dc40 | 25,667 |
from typing import List
from sys import path
def virtual_entities(entity: AnyText, kind: int = Kind.HATCHES) -> EntityQuery:
"""Convert the text content of DXF entities TEXT and ATTRIB into virtual
SPLINE and 3D POLYLINE entities or approximated LWPOLYLINE entities
as outlines, or as HATCH entities as fillings.
Returns the virtual DXF entities as an :class:`~ezdxf.query.EntityQuery`
object.
Args:
entity: TEXT or ATTRIB entity
kind: kind of entities to create as bit flags, see enum :class:`Kind`
"""
check_entity_type(entity)
extrusion = entity.dxf.extrusion
attribs = entity.graphic_properties()
entities: List[DXFGraphic] = []
if kind & Kind.HATCHES:
entities.extend(make_hatches_from_entity(entity))
if kind & (Kind.SPLINES + Kind.LWPOLYLINES):
paths = make_paths_from_entity(entity)
if kind & Kind.SPLINES:
entities.extend(
path.to_splines_and_polylines(paths, dxfattribs=attribs)
)
if kind & Kind.LWPOLYLINES:
entities.extend(
path.to_lwpolylines(
paths, extrusion=extrusion, dxfattribs=attribs
)
)
return EntityQuery(entities) | 9d02dab1d2ed975d206888f403358db9e56936b1 | 25,668 |
def getNamespace(modelName):
"""Get the name space from rig root
Args:
modelName (str): Rig top node name
Returns:
str: Namespace
"""
if not modelName:
return ""
if len(modelName.split(":")) >= 2:
nameSpace = ":".join(modelName.split(":")[:-1])
else:
nameSpace = ""
return nameSpace | abfb4c54f2dd1b54563f6c7c84e902ed4ee77b01 | 25,669 |
import re
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(
len(environment.comment_start_string),
TOKEN_COMMENT_BEGIN,
e(environment.comment_start_string),
),
(
len(environment.block_start_string),
TOKEN_BLOCK_BEGIN,
e(environment.block_start_string),
),
(
len(environment.variable_start_string),
TOKEN_VARIABLE_BEGIN,
e(environment.variable_start_string),
),
]
if environment.line_statement_prefix is not None:
rules.append(
(
len(environment.line_statement_prefix),
TOKEN_LINESTATEMENT_BEGIN,
r"^[ \t\v]*" + e(environment.line_statement_prefix),
)
)
if environment.line_comment_prefix is not None:
rules.append(
(
len(environment.line_comment_prefix),
TOKEN_LINECOMMENT_BEGIN,
r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
)
)
return [x[1:] for x in sorted(rules, reverse=True)] | ca7971de422f66e9c9574c13306610e84a000271 | 25,670 |
import os
def get_output(db, output_id):
"""
:param db: a :class:`openquake.server.dbapi.Db` instance
:param output_id: ID of an Output object
:returns: (ds_key, calc_id, dirname)
"""
out = db('SELECT output.*, ds_calc_dir FROM output, job '
'WHERE oq_job_id=job.id AND output.id=?x', output_id, one=True)
return out.ds_key, out.oq_job_id, os.path.dirname(out.ds_calc_dir) | 8ad5cd6b5ca0808038ee29345b8d3e53e80fb9de | 25,671 |
def compute_window_based_feature(seq,
sample_freq,
func_handle,
window_length,
window_stride,
verbose=False,
**kwargs):
"""Use this function to compute any metric within a sliding window.
Parameters
----------
seq : 1D array like object
e.g. a blood volume pulse sequence, a continuous blood pressure sequence, a heart rate sequence etc.
sample_freq : float
the sampling frequency of the sequence; if the signal is not sampled at a constant sampling frequency than resample it beforehand
func_handle : function handle
the handle of the function to apply on each window
window_length : float
the length of each window in seconds
window_stride : float
the stride between two consecutive windows in seconds
verbose : bool, optional
if the intermediate results of each window should be collected and returned as well, by default False
Returns
-------
numpy.ndarray of shape [n,] where n corresponds to the number of windows built
the extracted metric value for each window
dict containing {'w_data', 'w_masks'} the intermediate results of each window and the corresponding boolean mask used to extract the window; is only returned, when verbose is True
Raises
------
TypeError
if a 'ref_hr_bpm' key is set in kwargs to compute the snr metric but the corresponding value is not a list
"""
# create timestamps for the sequence
seq = np.squeeze(seq)
seq_ts = np.arange(0, len(seq)) * 1/sample_freq
res = []
ver = {'w_data': [], 'w_masks': []}
# set loop indexes
ts = 0
i = 0
# check kwargs
ref_hr_bpm = kwargs.pop('ref_hr_bpm', None)
if ref_hr_bpm is not None:
ref_hr_bpm = np.squeeze(ref_hr_bpm)
while ts + window_length <= seq_ts[-1]:
mask = (seq_ts >= ts) & (seq_ts < ts + window_length)
if ref_hr_bpm is not None:
kwargs['ref_hr_bpm'] = ref_hr_bpm[i]
out = func_handle(seq[mask], sample_freq, verbose=True, **kwargs)
res.append(out[0])
if verbose:
ver['w_data'].append(out[1])
ver['w_masks'].append(mask)
ts += window_stride
i += 1
if verbose:
return np.asarray(res), ver
return np.asarray(res) | 4ab084d3459c617640e404b5232db4557b22c8b8 | 25,672 |
def read_cif(filename):
"""
read the cif, mainly for pyxtal cif output
Be cautious in using it to read other cif files
Args:
filename: path of the structure file
Return:
pyxtal structure
"""
species = []
coords = []
with open(filename, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith('_symmetry_Int_Tables_number'):
sg = int(line.split()[-1])
elif line.startswith('_cell_length_a'):
a = float(lines[i].split()[-1])
b = float(lines[i+1].split()[-1])
c = float(lines[i+2].split()[-1])
alpha = float(lines[i+3].split()[-1])
beta = float(lines[i+4].split()[-1])
gamma = float(lines[i+5].split()[-1])
elif line.startswith('_symmetry_cell_setting'):
lat_type = line.split()[-1]
elif line.startswith('_symmetry_space_group_name_H-M '):
symbol = line.split()[-1]
if eval(symbol) in ["Pn", "P21/n", "C2/n"]:
diag = True
else:
diag = False
elif line.find('_atom_site') >= 0:
s = i
while True:
s += 1
if lines[s].find('_atom_site') >= 0:
pass
elif len(lines[s].split()) <= 3:
break
else:
tmp = lines[s].split()
pos = [float(tmp[-4]), float(tmp[-3]), float(tmp[-2])]
species.append(tmp[0])
coords.append(pos)
break
wp0 = Group(sg)[0]
lattice = Lattice.from_para(a, b, c, alpha, beta, gamma, lat_type)
sites = []
for specie, coord in zip(species, coords):
pt, wp, _ = WP_merge(coord, lattice.matrix, wp0, tol=0.1)
sites.append(atom_site(wp, pt, specie, diag))
return lattice, sites | d6d164a6425d088a17bb449b75e875047a5fbc29 | 25,673 |
import random
def custom_data_splits(src_sents, trg_sents, val_samples=3000, seed=SEED):
"""
splits data based on custom number of validation/test samples
:param src_sents: the source sentences
:param trg_sents: the target sentences
:param val_samples: number of validation/test samples
:param seed: the random seed
:return: training, validation and test datasets splits
"""
assert len(src_sents) == len(trg_sents)
data = list(zip(src_sents, trg_sents))
num_samples = len(data)
print("Total samples: ", num_samples)
print("Shuffling data....")
random.seed(seed) # 30
random.shuffle(data)
val_set = data[:val_samples]
test_set = data[val_samples:val_samples + val_samples]
train_set = data[val_samples + val_samples:]
print("Total train:", len(train_set))
print("Total validation:", len(val_set))
print("Total test:", len(test_set))
print("All:", len(test_set) + len(train_set) + len(val_set))
samples = train_set[:5] + val_set[:5] + test_set[:5]
train_set = list(zip(*train_set))
val_set = list(zip(*val_set))
test_set = list(zip(*test_set))
samples_set = list(zip(*samples))
return train_set, val_set, test_set, samples_set | 5a4754ce9fe400248a46f4868aeaa0b96ebd5760 | 25,674 |
def normalize(output):
"""将null或者empty转换为暂无输出"""
if not output:
return '暂无'
else:
return output | 18af58c74325522a64dcfd98a75f55e677c01ca3 | 25,675 |
def sgd(args):
""" Wrapper of torch.optim.SGD (PyTorch >= 1.0.0).
Implements stochastic gradient descent (optionally with momentum).
"""
args.lr = 0.01 if args.lr == -1 else args.lr
args.weight_decay = 0 if args.weight_decay == -1 else args.weight_decay
args.momentum = 0 if args.momentum == -1 else args.momentum
args.dampening = 0 if args.dampening == -1 else args.dampening
args.nesterov = False if args.nesterov == False else args.nesterov
def sgd_wrapper(param_groups):
pytorch_support(required_version='1.0.0', info_str='Optimizer - SGD')
return optim.SGD(
param_groups,
lr=args.lr, momentum=args.momentum, dampening=args.dampening,
weight_decay=args.weight_decay, nesterov=args.nesterov)
return sgd_wrapper | 17a852165766bcf02f92bac4c847684f2dcfb133 | 25,676 |
def normal_conjugates_known_scale_posterior(prior, scale, s, n):
"""Posterior Normal distribution with conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale posterior" is
the distribution of the unknown `loc`.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Returns a posterior (also Normal) distribution object, with parameters
`(loc', scale'**2)`, where:
```
mu ~ N(mu', sigma'**2)
sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal posterior distribution object for the unknown observation
mean `loc`.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = math_ops.cast(n, prior.dtype)
scale0_2 = math_ops.square(prior.scale)
scale_2 = math_ops.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc/scale0_2 + s/scale_2) * scalep_2,
scale=math_ops.sqrt(scalep_2)) | 0bc94999ee10ce63ba0156510a9807523de6c085 | 25,677 |
def make_matrix(num_rows, num_cols, entry_fn):
"""retorna a matriz num_rows X num_cols
cuja entrada (i,j)th é entry_fn(i, j)"""
return [[entry_fn(i, j) # dado i, cria uma lista
for j in range(num_cols)] # [entry_fn(i, 0), ... ]
for i in range(num_rows)] | f706773245730eab3ce6cf41b0f6e81fbe3d52ab | 25,678 |
def add_relationtoforeignsign(request):
"""Add a new relationtoforeignsign instance"""
if request.method == "POST":
form = RelationToForeignSignForm(request.POST)
if form.is_valid():
sourceid = form.cleaned_data['sourceid']
loan = form.cleaned_data['loan']
other_lang = form.cleaned_data['other_lang']
other_lang_gloss = form.cleaned_data['other_lang_gloss']
try:
gloss = Gloss.objects.get(pk=int(sourceid))
except Gloss.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Source gloss not found."), content_type='text/plain')
rel = RelationToForeignSign(gloss=gloss, loan=loan, other_lang=other_lang,
other_lang_gloss=other_lang_gloss)
rel.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}) + '?editrelforeign')
else:
print(form)
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Form not valid"), content_type='text/plain')
# fallback to redirecting to the requesting page
return HttpResponseRedirect('/') | 44e6a80ed4596b9dae48ce8f4ed37927feb1ec71 | 25,679 |
def check_url(url):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]
return get_server_status_code(url) in good_codes | f6dede6aaf41f404c182052cd4dc5708b9a0b879 | 25,680 |
def table(df, sortable=False, last_row_is_footer=False, col_format=None):
""" generate an HTML table from a pandas data frame
Args:
df (df): pandas DataFrame
col_format (dict): format the column name (key)
using the format string (value)
Returns:
HTML (str)
"""
if col_format is None:
col_format = {}
row_count = len(df)
# default column formatting
default_format = {"align": "right", "decimal_places": 2, "commas": True, "width": None}
def get_format(col, attribute):
""" helper function to get column formatting
Args:
col: column name (key in the col_format)
attribute (str)
Returns:
format (str) for the specified col
"""
if col in col_format and attribute in col_format[col_name]:
value = col_format[col_name][attribute]
elif "*" in col_format and attribute in col_format["*"]:
value = col_format["*"][attribute]
else:
value = default_format[attribute]
return value
# create header
items = []
# if there's a hierarchical index for the columns, span the top level; only suppots 2 levels
if isinstance(df.columns[0], tuple):
headers = []
prev_header = df.columns[0][0]
span = 0
for i, col_name in enumerate(df.columns):
h1 = col_name[0]
h2 = col_name[1]
if h1 == prev_header:
span += 1
if i == (len(df.columns) - 1):
headers.append(htmltag.th(h1, colspan=span, _class="centered"))
else:
headers.append(htmltag.th(prev_header, colspan=span, _class="centered"))
if i == (len(df.columns) - 1):
headers.append(htmltag.th(h1, colspan=1))
else:
prev_header = h1
span = 1
if get_format(col_name, "align") == "right":
items.append(htmltag.th(h2, _class="alignRight"))
else:
items.append(htmltag.th(h2))
thead = htmltag.thead(htmltag.tr(*headers), htmltag.tr(*items))
else:
for col_name in df.columns:
if get_format(col_name, "align") == "right":
if sortable:
items.append(htmltag.th(col_name, **{"class": "alignRight", "data-sortable": "true"}))
else:
items.append(htmltag.th(col_name, _class="alignRight"))
else:
if sortable:
items.append(htmltag.th(col_name, **{"data-sortable": "true"}))
else:
items.append(htmltag.th(col_name))
thead = htmltag.thead(htmltag.tr(*items))
# create body (and optionally footer)
tfoot = ""
rows = []
for i, row in df.iterrows():
values = row.tolist()
items = []
for j, v in enumerate(values):
col_name = df.columns[j]
if is_numeric(v):
decimal_places = get_format(col_name, "decimal_places")
if get_format(col_name, "commas"):
pattern = "{:,." + str(decimal_places) + "f}"
else:
pattern = "{:." + str(decimal_places) + "f}"
v = pattern.format(v)
if get_format(col_name, "align") == "right":
items.append(htmltag.td(v, _class="alignRight"))
# TODO - need to implement width control
# width = get_format(col_name, 'width')
# if is_numeric(width):
# style='width:' + str(width) + 'px'
# items.append(htmltag.td(v, style=style ))
else:
items.append(htmltag.td(v))
if last_row_is_footer and i == row_count - 1:
tfoot = htmltag.tfoot(htmltag.tr(*items))
else:
rows.append(htmltag.tr(*items))
tbody = htmltag.tbody(*rows)
# if sortable, apply the bootstrap-table tab, bs-table
if sortable:
if row_count > 15:
return htmltag.table(thead, tbody, tfoot, **{"class": "bs-table", "data-striped": "true", "data-height": "600"})
else:
return htmltag.table(thead, tbody, tfoot, **{"class": "bs-table", "data-striped": "true"})
else:
return htmltag.table(thead, tbody, tfoot, **{"class": "table-striped"}) | 05c7250673160f74fab6ca9ad46b02c7e948c9c8 | 25,681 |
from datetime import datetime
def ap_time_filter(value):
"""
Converts a datetime or string in hh:mm format into AP style.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%I:%M')
value_tz = _set_timezone(value)
value_year = value_tz.replace(year=2016)
return value_year.strftime('%-I:%M') | 0539cd58bfa4b7ee647ac88a58bcac93108d4819 | 25,682 |
def make_signal(time, amplitude=1, phase=0, period=1):
"""
Make an arbitrary sinusoidal signal with given amplitude, phase and period over a specific time interval.
Parameters
----------
time : np.ndarray
Time series in number of days.
amplitude : float, optional
A specific amplitude (defaults to 1).
phase : float, optional
A given phase offset in degrees (defaults to 0).
period : float, optional
A period for the sine wave (defaults to 1).
Returns
-------
signal : np.ndarray
The time series with the given parameters.
"""
signal = (amplitude * np.sin((2 * np.pi * 1 / period * (time - np.min(time)) + np.deg2rad(phase))))
return signal | 9f940922ae2a4bf1e3ff7d1c13351f4d07c40ca8 | 25,683 |
def train_data(X, y):
"""
:param X: numpy array for date(0-5), school_id
:param y: output for the data provided
:return: return the learned linear regression model
"""
regression = linear_model.LinearRegression()
regression.fit(X, y)
return regression | abaa0ba6f02ed111b6ec9b0945e9e26c643836be | 25,684 |
import re
def clean_text(s, stemmer, lemmatiser):
"""
Takes a string as input and cleans it by removing non-ascii characters,
lowercasing it, removing stopwords and lemmatising/stemming it
- Input:
* s (string)
* stemmer (object that stems a string)
* lemmatiser (object that lemmatises a string)
- Output:
* text (string)
"""
stop_words = set(stopwords.words('english'))
# Remove non ASCII characters
text = removeNonAscii(s)
text = text.lower()
# Remove any undesired character
for s in ['#', '|', '*', '.', ',', ';', '!', ':']:
text = text.replace(s, '')
# Remove digits
for s in [str(x) for x in range(10)]:
text = text.replace(s, '')
text = text.replace('\n', ' ')
text = re.sub(' +', ' ', text)
# Apply stemmer/lemmatiser
word_tokens = word_tokenize(text)
s = []
for w in word_tokens:
if w in stop_words: continue
if stemmer:
s.append(stemmer.stem(w))
if lemmatiser:
s.append(lemmatiser.lemmatize(w))
text = ' '.join(s)
return text | 0bcb14378c6b72e24526c7eff9f1daf2b6871152 | 25,685 |
def make_rst_sample_table(data):
"""Format sample table"""
if data is None:
return ""
else:
tab_tt = tt.Texttable()
tab_tt.set_precision(2)
tab_tt.add_rows(data)
return tab_tt.draw() | 160b28355f1bea80878417f2a92e5dc31dde66cd | 25,686 |
def is_thunk(space, w_obj):
"""Check if an object is a thunk that has not been computed yet."""
while 1:
w_alias = w_obj.w_thunkalias
if w_alias is None:
return space.w_False
if w_alias is w_NOT_COMPUTED_THUNK:
return space.w_True
w_obj = w_alias | 1918a7d79d02a2a20e6f7ead8b7a2dc6cfe05a85 | 25,687 |
def plot_roc_curve(
fpr,
tpr,
roc_auc=None,
ax=None,
figsize=None,
style="seaborn-ticks",
**kwargs,
):
"""Plots a receiver operating characteristic (ROC) curve.
Args:
fpr: an array of false postive rates
tpr: an array of true postive rates
roc_auc (None): the area under the ROC curve
ax (None): an optional matplotlib axis to plot in
figsize (None): an optional ``(width, height)`` for the figure, in
inches
style ("seaborn-ticks"): a style to use for the plot
**kwargs: optional keyword arguments for matplotlib's ``plot()``
Returns:
a matplotlib figure
"""
with plt.style.context(style):
display = skm.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc)
display.plot(ax=ax, **kwargs)
if figsize is not None:
display.figure_.set_size_inches(*figsize)
return display.figure_ | d4d6f9d33857598a16b04097de035a5a7a3f354b | 25,688 |
def my_place_or_yours(our_address: Address, partner_address: Address) -> Address:
"""Convention to compare two addresses. Compares lexicographical
order and returns the preceding address """
if our_address == partner_address:
raise ValueError("Addresses to compare must differ")
sorted_addresses = sorted([our_address, partner_address])
return sorted_addresses[0] | 991b2d44042520eea28817f33cbb9421d7b99a78 | 25,689 |
def get_dataset_json(met, version):
"""Generated HySDS dataset JSON from met JSON."""
return {
"version": version,
"label": met['data_product_name'],
"starttime": met['sensingStart'],
} | d84f3652866c83e8c1618a9f87bc3bf6b5c6a0cf | 25,690 |
def hlmoft_SEOB_dict(P,Lmax=2):
"""
Generate the TD h_lm -2-spin-weighted spherical harmonic modes of a GW
with parameters P. Returns a dictionary of modes.
Just for SEOBNRv2 SEOBNRv1, and EOBNRv2. Uses aligned-spin trick to get (2,2) and (2,-2) modes.
A hack.
Works for any aligned-spin time-domain waveform with only (2,\pm 2) modes though.
"""
if P.approx == lalSEOBNRv4HM:
extra_params = P.to_lal_dict()
nqcCoeffsInput=lal.CreateREAL8Vector(10)
hlm_struct, dyn, dynHi = lalsim.SimIMRSpinAlignedEOBModes(P.deltaT, P.m1, P.m2, P.fmin, P.dist, P.s1z, P.s2z,41, 0., 0., 0.,0.,0.,0.,0.,0.,1.,1.,nqcCoeffsInput, 0)
hlms = SphHarmTimeSeries_to_dict(hlm_struct,Lmax)
mode_list_orig = list(hlms.keys()) # force a cast and therefore a copy - some python versions not playing safely here
for mode in mode_list_orig:
# Add zero padding if requested time period too short
if not (P.deltaF is None):
TDlen = int(1./P.deltaF * 1./P.deltaT)
if TDlen > hlms[mode].data.length:
hlms[mode] = lal.ResizeCOMPLEX16TimeSeries(hlms[mode],0,TDlen)
# Should only populate positive modes; create negative modes
mode_conj = (mode[0],-mode[1])
if not mode_conj in hlms:
hC = hlms[mode]
hC2 = lal.CreateCOMPLEX16TimeSeries("Complex h(t)", hC.epoch, hC.f0,
hC.deltaT, lsu_DimensionlessUnit, hC.data.length)
hC2.data.data = (-1.)**mode[1] * np.conj(hC.data.data) # h(l,-m) = (-1)^m hlm^* for reflection symmetry
# hT = hlms[mode].copy() # hopefully this works
# hT.data.data = np.conj(hT.data.data)
hlms[mode_conj] = hC2
return hlms
if not (P.approx == lalsim.SEOBNRv2 or P.approx==lalsim.SEOBNRv1 or P.approx == lalSEOBv4 or P.approx == lalsim.SEOBNRv4_opt or P.approx==lalsim.EOBNRv2 or P.approx == lalTEOBv2 or P.approx==lalTEOBv4):
return None
# Remember, we have a fiducial orientation for the h22.
# WARNING THIS MAY BE DESTRUCTIVE
# P2 = P.manual_copy()
P.phiref=0.
P.psi=0.
P.incl = 0
hC = complex_hoft(P) # pad as needed
hC.epoch = hC.epoch - P.tref # need to CORRECT the event time: hoft adds an epoch
if rosDebugMessagesContainer[0]:
print( " SEOB hlm trick: epoch of hC ", hC.epoch)
fac = np.sqrt(5./np.pi)/2
hC.data.data *=1./fac #lal.SpinWeightedSphericalHarmonic(0.,0., -2,2,2)
# Copy...but I don't trust lalsuite copy
hC2 = lal.CreateCOMPLEX16TimeSeries("Complex h(t)", hC.epoch, hC.f0,
hC.deltaT, lsu_DimensionlessUnit, hC.data.length)
hC2.data.data =np.conj(hC.data.data)
hlm_dict={}
hlm_dict[(2,2)] = hC
hlm_dict[(2,-2)] = hC2
return hlm_dict | b963958e3defbed0e61cb0691fcef329ceadf313 | 25,691 |
def encode(value):
"""
pyg_mongo.encoder is similar to pyg_base.encoder with the only exception being that bson.objectid.ObjectId used by mongodb to generate the document _id, are not encoded
Parameters
----------
value : value/document to be encoded
Returns
-------
encoded value/document
"""
return encode_(value, ObjectId) | fa7dec607dca66736e3b9203bf97289a0ffdd733 | 25,692 |
def locate_line_segments(isolated_edges):
"""
Extracts line segments from observed lane edges using Hough Line Transformations
:param isolated_edges: Lane edges returned from isolated_lane_edges()
:return: Line segments extracted by HoughLinesP()
"""
rho = 1
theta = np.pi / 180
threshold = 10
min_line_length = 8
max_line_gap = 4
segments = cv2.HoughLinesP(isolated_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
return segments | 3b26da0535b327dfac4b268552209c75481bb4d2 | 25,693 |
def proj(A, B):
"""Returns the projection of A onto the hyper-plane defined by B"""
return A - (A * B).sum() * B / (B ** 2).sum() | 982cdfb1564166dce14432bf24404f066e2acee3 | 25,694 |
def v6_multimax(iterable):
"""Return a list of all maximum values.
Bonus 2: Make the function works with lazy iterables.
Our current solutions fail this requirement because they loop through
our iterable twice and generators can only be looped over one time only.
We could keep track of the maximum values as we loop and manually build
up a list of maximums
"""
maximums = []
for item in iterable:
if not maximums or maximums[0] == item:
maximums.append(item)
elif item > maximums[0]:
maximums = [item]
return maximums | 5539adb0dcb6c9db4f8f2f68487fc13c6aa8d067 | 25,695 |
import traceback
def format_traceback_string(exception):
"""Format exception traceback as a single string.
Args:
exception: Exception object.
Returns:
Full exception traceback as a string.
"""
return '\n'.join(
traceback.TracebackException.from_exception(exception).format()
) | debdf53966b26b6562671bf48d283a3bf10d85d5 | 25,696 |
def get_stored_file(file_id):
"""Get the "stored file" or the summary about the file."""
return JsonResponse(StoredFile.objects(id=ObjectId(file_id)).first()) | 860f6f5dd24e5ebaf59fff1f4c82f4b5c7ce6da5 | 25,697 |
def _compute_array_job_index():
# type () -> int
"""
Computes the absolute index of the current array job. This is determined by summing the compute-environment-specific
environment variable and the offset (if one's set). The offset will be set and used when the user request that the
job runs in a number of slots less than the size of the input.
:rtype: int
"""
offset = 0
if _os.environ.get("BATCH_JOB_ARRAY_INDEX_OFFSET"):
offset = int(_os.environ.get("BATCH_JOB_ARRAY_INDEX_OFFSET"))
return offset + int(_os.environ.get(_os.environ.get("BATCH_JOB_ARRAY_INDEX_VAR_NAME"))) | 5c9b451af75f894ad49dc8aa95b7c1a80e6e9c96 | 25,698 |
def make_transpose(transpose_name, input_name, input_type, perm):
"""Makes a transpose node.
Args:
transpose_name: name of the transpose op.
input_name: name of the op to be the tranpose op's input.
input_type: type of the input node.
perm: permutation array, e.g. [0, 2, 3, 1] for NCHW to NHWC.
Returns:
A (transpose, permation) pair of NodeDefs to be added to a GraphDef.
"""
perm_bytes = np.array(perm, dtype=np.int32).tobytes()
perm_def = PERMUTE_TMPL % (transpose_name + '/perm', len(perm))
perm_node = tf.compat.v1.NodeDef()
text_format.Merge(perm_def, perm_node)
perm_node.attr['value'].tensor.tensor_content = perm_bytes
transpose_def = TRANSPOSE_TMPL % (
transpose_name, input_name, perm_node.name)
transpose_node = tf.compat.v1.NodeDef()
text_format.Merge(transpose_def, transpose_node)
transpose_node.attr['T'].type = input_type
return transpose_node, perm_node | 21e05caed8a439f748f3fa939b5bff9864c2525d | 25,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.