content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def turn_xyz_into_llh(x,y,z,system):
"""Convert 3D Cartesian x,y,z into Lat, Long and Height
See http://www.ordnancesurvey.co.uk/gps/docs/convertingcoordinates3D.pdf"""
a = abe_values[system][0]
b = abe_values[system][1]
e2 = abe_values[system][2]
p = math.sqrt(x*x + y*y)
long = math.atan(y/x)
lat_init = math.atan( z / (p * (1.0 - e2)) )
v = a / math.sqrt( 1.0 - e2 * (math.sin(lat_init) * math.sin(lat_init)) )
lat = math.atan( (z + e2*v*math.sin(lat_init)) / p )
height = (p / math.cos(lat)) - v # Ignore if a bit out
# Turn from radians back into degrees
long = long / 2 / math.pi * 360
lat = lat / 2 / math.pi * 360
return [lat,long,height] | 5,329,600 |
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
# Sum reduces grad along the first dimension for IndexedSlices
if isinstance(grad, ops.IndexedSlices):
grad = math_ops.unsorted_segment_sum(
grad.values,
math_ops.mod(grad.indices, input_shape[0]),
input_shape[0])
split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
if not context.executing_eagerly():
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None] | 5,329,601 |
def obj_spatial_error_sum_and_naturalness_jac(s, data):
""" jacobian of error function. It is a combination of analytic solution
for motion primitive model and numerical solution for kinematic error
"""
# Extract relevant parameters from data tuple.
# Note other parameters are used for calling obj_error_sum
gmm = data[0].get_gaussian_mixture_model()
error_scale = data[-1]
quality_scale = data[-2]
logLikelihoods = _estimate_log_gaussian_prob(s, gmm.means_, gmm.precisions_cholesky_, 'full')
logLikelihoods = np.ravel(logLikelihoods)
numerator = 0
n_models = len(gmm.weights_)
for i in range(n_models):
numerator += np.exp(logLikelihoods[i]) * gmm.weights_[i] * np.dot(np.linalg.inv(gmm.covars_[i]), (s - gmm.means_[i]))
denominator = np.exp(gmm.score([s])[0])
logLikelihood_jac = numerator / denominator
kinematic_jac = approx_fprime(s, obj_spatial_error_sum, 1e-7, data[-2:])# ignore the kinematic factor and quality factor
jac = logLikelihood_jac * quality_scale + kinematic_jac * error_scale
return jac | 5,329,602 |
def test_setup_logger_with_label(tmpdir, output):
"""Test behaviour when label is not None.
This should produce a log file.
"""
if output:
output = str(tmpdir.mkdir(output))
logger = setup_logger(label='test', output=output)
if output is None:
output = '.'
assert os.path.exists(f'{output}/test.log')
assert any([type(h) == logging.FileHandler for h in logger.handlers]) | 5,329,603 |
def today():
"""Ritorna il giorno di oggi in formato YYYYMMDD"""
today = datetime.date.today()
return today.strftime("%Y%m%d") | 5,329,604 |
def recognize(img, lang, *, hints=None):
"""
识别图像中的文本并返回 OcrResult
:param img: 需要识别的图像, PIL.Image.Image 对象
:param lang: 需要识别的语言,BCP-47 格式字符串
:param hints: 对 OCR 引擎的提示,OcrHint 中定义的值的列表
:returns: OcrResult
OcrResult = {
lines: Tuple[OcrLine],
extra: Any # 引擎返回的额外信息
}
OcrLine = {
words: Tuple[OcrWord],
extra: Any
}
OcrWord = {
text: str,
rect: Rect,
extra: Any
}
"""
from .common import OcrResult
return OcrResult(tuple()) | 5,329,605 |
def check_missing_requirements ():
"""This list of missing requirements (mencoder, mplayer, lame, and mkvmerge).
Returns None if all requirements are in the execution path.
"""
missing = []
if which("mencoder") is None:
missing.append("mencoder")
if which("mplayer") is None:
missing.append("mplayer")
if which("lame") is None:
missing.append("lame")
if which("mkvmerge") is None:
missing.append("mkvmerge")
if len(missing)==0:
return None
return missing | 5,329,606 |
def getExpMat(xy, shape, start, end, r, repeats=5):
"""
Get the expected interaction contact matrix.
xy is [[x,y]]
shape is () shape from the observed matrix.
r is resolution
"""
mat = []
i = 0
while i < repeats:
a = xy[:, 0]
b = xy[:, 1]
np.random.shuffle(a)
np.random.shuffle(b)
xy[:, 0] = a
xy[:, 1] = b
s = b-a
s = np.where( s > 0)[0]
nxy = xy[s,]
nmat = getObsMat(nxy, start, end, r)
if nmat.shape == shape:
mat.append(nmat)
i += 1
mat = np.array(mat)
return mat.mean(axis=0) | 5,329,607 |
async def test_save_timestamped_image(hass, mock_image, mock_detections, mock_now):
"""Save a processed image."""
valid_config_save_ts_file = deepcopy(VALID_CONFIG)
valid_config_save_ts_file[ip.DOMAIN].update({sh.CONF_SAVE_FILE_FOLDER: TEST_DIR})
valid_config_save_ts_file[ip.DOMAIN].update({sh.CONF_SAVE_TIMESTAMPTED_FILE: True})
await async_setup_component(hass, ip.DOMAIN, valid_config_save_ts_file)
assert hass.states.get(VALID_ENTITY_ID)
with mock.patch(
"homeassistant.components.sighthound.image_processing.Image.open"
) as pil_img_open:
pil_img = pil_img_open.return_value
pil_img = pil_img.convert.return_value
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "2"
assert pil_img.save.call_count == 2
directory = Path(TEST_DIR)
timestamp_save_path = (
directory / "sighthound_demo_camera_2020-02-20_10:05:03.jpg"
)
assert pil_img.save.call_args_list[1] == mock.call(timestamp_save_path) | 5,329,608 |
async def student_decline_offer(uid: str, username=Depends(auth_handler.auth_wrapper)):
"""
Student to decline the offer in an Application
Require: Student-self or Admin-write
"""
logger.debug(f"{username} trying to decline the offer in an ApplicationForm")
_updated = student_update_application_form_status(
AF_uid=uid,
username=username,
new_status=AFStatus.DECLINE,
pre_status=[AFStatus.OFFER],
)
return _updated | 5,329,609 |
def test_constraint_compatibility():
"""
Test compatibility v1.6.1 and earlier
"""
dl_col = DdlParseColumn(
name='Col',
data_type_array={'type_name': ['INT']},
)
dl_col.constraint = "PRIMARY KEY COMMENT 'foo'"
# Check column constraint
assert dl_col.not_null is True
assert dl_col.primary_key is True
assert dl_col.unique is False
assert dl_col.comment == 'foo' | 5,329,610 |
def main():
"""Run module tests, for now just doctests only."""
from crds.tests import test_table_effects, tstmod
return tstmod(test_table_effects) | 5,329,611 |
def get_relative_path(full_path: str) -> str:
"""
Extract the relative path from the data folder
:param full_path: The full path
:return: Relative path from data folder
>>> get_relative_path(path.join(DATA_PATH, 'MIDI/001-001.mid'))
'MIDI/001-001.mid'
"""
return path.relpath(full_path, DATA_PATH) | 5,329,612 |
def etminan(C, Cpi, F2x=3.71, scale_F2x=True):
"""Calculate the radiative forcing from CO2, CH4 and N2O.
This function uses the updated formulas of Etminan et al. (2016),
including the overlaps between CO2, methane and nitrous oxide.
Reference: Etminan et al, 2016, JGR, doi: 10.1002/2016GL071930
Inputs:
C: [CO2, CH4, N2O] concentrations, [ppm, ppb, ppb]
Cpi: pre-industrial [CO2, CH4, N2O] concentrations
Keywords:
F2x: radiative forcing from a doubling of CO2.
scale_F2x: boolean. Scale the calculated value to the specified F2x?
Returns:
3-element array of radiative forcing: [F_CO2, F_CH4, F_N2O]
"""
Cbar = 0.5 * (C[0] + Cpi[0])
Mbar = 0.5 * (C[1] + Cpi[1])
Nbar = 0.5 * (C[2] + Cpi[2])
# Tune the coefficient of CO2 forcing to acheive desired F2x, using
# pre-industrial CO2 and N2O. F2x_etminan ~= 3.801.
scaleCO2 = 1
if scale_F2x:
F2x_etminan = (
-2.4e-7*Cpi[0]**2 + 7.2e-4*Cpi[0] - 2.1e-4*Cpi[2] + 5.36) * np.log(2)
scaleCO2 = F2x/F2x_etminan
F = np.zeros(3)
F[0] = (-2.4e-7*(C[0] - Cpi[0])**2 + 7.2e-4*np.fabs(C[0]-Cpi[0]) - \
2.1e-4 * Nbar + 5.36) * np.log(C[0]/Cpi[0]) * scaleCO2
F[1] = (-1.3e-6*Mbar - 8.2e-6*Nbar + 0.043) * (np.sqrt(C[1]) - \
np.sqrt(Cpi[1]))
F[2] = (-8.0e-6*Cbar + 4.2e-6*Nbar - 4.9e-6*Mbar + 0.117) * \
(np.sqrt(C[2]) - np.sqrt(Cpi[2]))
return F | 5,329,613 |
def render_manage_data_store_pages(request, html_file):
"""
Generate management pages for data_stores.
"""
# initialize session
session_maker = app.get_persistent_store_database('main_db',
as_sessionmaker=True)
session = session_maker()
data_stores = session.query(DataStore) \
.filter(DataStore.id > 1) \
.order_by(DataStore.name) \
.all()
context = {
'data_stores': data_stores,
}
table_html = \
render(request,
'streamflow_prediction_tool/{}'.format(html_file),
context)
# in order to close the session, the request needed to be rendered first
session.close()
return table_html | 5,329,614 |
def validate_password_form(p1, p2, is_open, btn, sp1, sp2):
"""Validade password form
Returns
Output('password1', 'invalid'),
Output('password2', 'invalid'),
Output('password1', 'title'),
Output('password2', 'title'),
"""
invalid = {'p1':sp1, 'p2':sp2}
title = {'p1':None, 'p2':None}
ctx = dash.callback_context
if ctx.triggered:
btn_id = ctx.triggered[0]['prop_id'].split('.')[0]
if btn_id == 'modal' or btn_id == 'clear':
return False, False, None, None
if p1:
pwd_check = password_check(p1)
if not pwd_check['ok']:
invalid['p1'] = True
if pwd_check['length_error']:
title['p1']= _(
'The password must be at least 8 characters long.'
)
elif pwd_check['digit_error']:
title['p1'] = _('The password must have numbers.')
elif pwd_check['uppercase_error'] or pwd_check['lowercase_error']:
title['p1'] = _(
'The password must haver uppercase and lowercase letters.'
)
elif pwd_check['symbol_error']:
title['p1'] = _('The password must have special symbols.')
else:
invalid['p1'] = False
if p2:
if not p1:
invalid['p2'] = True
title['p2'] = _('Fill password field.')
elif not p1==p2:
invalid['p2'] = True
title['p2'] = _('Passwords don\'t match.')
else:
invalid['p2'] = False
return invalid['p1'], invalid['p2'], title['p1'], title['p2'] | 5,329,615 |
def cardinal_spline(points,tension=0.5):
"""Path instructions for a cardinal spline. The spline interpolates the control points.
Args:
points (list of 2-tuples): The control points for the cardinal spline.
tension (float, optional): Tension of the spline in the range [0,1]. Defaults to 0.5.
Returns:
string: Ipe path instructions
"""
instructions = [ str(points[0][0]), str(points[0][1]), 'm' ] + [ f(p) for p in points[1:] for f in [ lambda p: str(p[0]), lambda p: str(p[1])] ] + [str(tension),'C ']
return ' '.join(instructions) | 5,329,616 |
def extract(x, *keys):
"""
Args:
x (dict or list): dict or list of dicts
Returns:
(tuple): tuple with the elements of the dict or the dicts of the list
"""
if isinstance(x, dict):
return tuple(x[k] for k in keys)
elif isinstance(x, list):
return tuple([xi[k] for xi in x] for k in keys)
else:
raise NotImplementedError | 5,329,617 |
def get_source_config_from_ctx(_ctx,
group_name=None,
hostname=None,
host_config=None,
sources=None):
"""Generate a source config from CTX.
:param _ctx: Either a NodeInstance or a RelationshipInstance ctx.
:param group_name: User's override value, like 'webservers'.
:param hostname: User's override value, like 'web'.
:param host_config: User's override value. Like:
{
'ansible_host': '127.0.0.1',
'ansible_user': 'ubuntu',
}
:param sources: User's sources override value.
:return:
"""
sources = sources or {}
if _ctx.type == NODE_INSTANCE and \
'cloudify.nodes.Compute' not in _ctx.node.type_hierarchy and \
_ctx.instance.runtime_properties.get(SOURCES):
return AnsibleSource(_ctx.instance.runtime_properties[SOURCES]).config
elif _ctx.type == RELATIONSHIP_INSTANCE:
host_config = host_config or \
get_host_config_from_compute_node(_ctx.target)
group_name, hostname = \
get_group_name_and_hostname(
_ctx.target, group_name, hostname)
additional_node_groups = get_additional_node_groups(
_ctx.target.node.name, _ctx.deployment.id)
else:
host_config = host_config or \
get_host_config_from_compute_node(_ctx)
group_name, hostname = \
get_group_name_and_hostname(
_ctx, group_name, hostname)
additional_node_groups = get_additional_node_groups(
_get_node(_ctx).name, _ctx.deployment.id)
if '-o StrictHostKeyChecking=no' not in \
host_config.get('ansible_ssh_common_args', ''):
_ctx.logger.warn(
'This value {0} is not included in Ansible Configuration. '
'This is required for automating host key approval.'.format(
{'ansible_ssh_common_args': '-o StrictHostKeyChecking=no'}))
hosts = {
hostname: host_config
}
sources[group_name] = {
HOSTS: hosts
}
for additional_group in additional_node_groups:
sources[additional_group] = {HOSTS: {hostname: None}}
return AnsibleSource(sources).config | 5,329,618 |
def index(request):
"""Show welcome to the sorting quiz."""
template = loader.get_template("ggpoll/index.html")
context = {}
return HttpResponse(template.render(context, request)) | 5,329,619 |
def cnn_dropout_mnist(args):
"""
Main function
"""
# %%
# IMPORTS
# code repository sub-package imports
from artificial_neural_networks.utils.download_mnist import download_mnist
from artificial_neural_networks.utils.generic_utils import save_classif_model
from artificial_neural_networks.utils.vis_utils import plot_confusion_matrix, epoch_plot
# %%
if args.verbose > 0:
print(args)
# For reproducibility
if args.reproducible:
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(args.seed)
rn.seed(args.seed)
tf.set_random_seed(args.seed)
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)
# print(hash("keras"))
# %%
# Load the MNIST dataset
mnist_path = download_mnist()
mnist = np.load(mnist_path)
train_x = mnist['x_train'].astype(np.float32)
train_y = mnist['y_train'].astype(np.int32)
test_x = mnist['x_test'].astype(np.float32)
test_y = mnist['y_test'].astype(np.int32)
mnist.close()
# %%
# PREPROCESSING STEP
scaling_factor = args.scaling_factor
translation = args.translation
img_width = train_x.shape[1]
img_height = train_x.shape[2]
n_train = train_x.shape[0] # number of training examples/samples
n_test = test_x.shape[0] # number of test examples/samples
n_in = img_width * img_height # number of features / dimensions
n_out = np.unique(train_y).shape[0] # number of classes/labels
# Reshape training and test sets
train_x = train_x.reshape(n_train, img_width, img_height, 1)
test_x = test_x.reshape(n_test, img_width, img_height, 1)
# Apply preprocessing
train_x = scaling_factor * (train_x - translation)
test_x = scaling_factor * (test_x - translation)
one_hot = False # It works exactly the same for both True and False
# Convert class vectors to binary class matrices (i.e. One hot encoding)
if one_hot:
train_y = to_categorical(train_y, n_out)
test_y = to_categorical(test_y, n_out)
# %%
# Model hyperparameters and ANN Architecture
N = []
N.append(n_in) # input layer
if args.same_size:
n_layers = args.n_layers
for i in range(n_layers):
N.append(args.layer_size) # hidden layer i
else:
n_layers = len(args.explicit_layer_sizes)
for i in range(n_layers):
N.append(args.explicit_layer_sizes[i]) # hidden layer i
N.append(n_out) # output layer
# ANN Architecture
L = len(N) - 1
x = Input(shape=(img_width, img_height, 1)) # input layer
h = Dropout(rate=args.dropout_rate_input)(x)
h = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2, 2))(h)
h = Dropout(rate=args.dropout_rate_conv)(h)
h = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2, 2))(h)
h = Dropout(rate=args.dropout_rate_conv)(h)
h = Flatten()(h)
for i in range(1, L):
h = Dense(units=N[i], activation='relu')(h) # hidden layer i
h = Dropout(rate=args.dropout_rate_hidden)(h)
out = Dense(units=n_out, activation='softmax')(h) # output layer
model = Model(inputs=x, outputs=out)
if args.verbose > 0:
model.summary()
if one_hot:
loss_function = 'categorical_crossentropy'
else:
loss_function = 'sparse_categorical_crossentropy'
metrics = ['accuracy']
lr = args.lrearning_rate
epsilon = args.epsilon
optimizer_selection = {
'Adadelta':
optimizers.Adadelta(lr=lr, rho=0.95, epsilon=epsilon, decay=0.0),
'Adagrad':
optimizers.Adagrad(lr=lr, epsilon=epsilon, decay=0.0),
'Adam':
optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0, amsgrad=False),
'Adamax':
optimizers.Adamax(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0),
'Nadam':
optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, schedule_decay=0.004),
'RMSprop':
optimizers.RMSprop(lr=lr, rho=0.9, epsilon=epsilon, decay=0.0),
'SGD':
optimizers.SGD(lr=lr, momentum=0.0, decay=0.0, nesterov=False)
}
optimizer = optimizer_selection[args.optimizer]
model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics)
# %%
# Save trained models for every epoch
models_path = r'artificial_neural_networks/trained_models/'
model_name = 'mnist_cnn_dropout'
weights_path = models_path + model_name + '_weights'
model_path = models_path + model_name + '_model'
file_suffix = '_{epoch:04d}_{val_acc:.4f}_{val_loss:.4f}'
if args.save_weights_only:
file_path = weights_path
else:
file_path = model_path
file_path += file_suffix
# monitor = 'val_loss'
monitor = 'val_acc'
if args.save_models:
checkpoint = ModelCheckpoint(
file_path + '.h5',
monitor=monitor,
verbose=args.verbose,
save_best_only=args.save_best_only,
mode='auto',
save_weights_only=args.save_weights_only)
callbacks = [checkpoint]
else:
callbacks = []
# %%
# TRAINING PHASE
if args.time_training:
start = timer()
model_history = model.fit(
x=train_x,
y=train_y,
validation_data=(test_x, test_y),
batch_size=args.batch_size,
epochs=args.n_epochs,
verbose=args.verbose,
callbacks=callbacks)
if args.time_training:
end = timer()
duration = end - start
print('Total time for training (in seconds):')
print(duration)
# %%
# TESTING PHASE
train_y_pred = np.argmax(model.predict(train_x), axis=1)
test_y_pred = np.argmax(model.predict(test_x), axis=1)
train_score = model.evaluate(x=train_x, y=train_y, verbose=args.verbose)
train_dict = {'loss': train_score[0], 'acc': train_score[1]}
test_score = model.evaluate(x=test_x, y=test_y, verbose=args.verbose)
test_dict = {'val_loss': test_score[0], 'val_acc': test_score[1]}
if args.verbose > 0:
print('Train loss:', train_dict['loss'])
print('Train accuracy:', train_dict['acc'])
print('Test loss:', test_dict['val_loss'])
print('Test accuracy:', test_dict['val_acc'])
# %%
# Data Visualization
if args.plot:
# Confusion matrices
classes = list(range(n_out))
train_cm = confusion_matrix(train_y, train_y_pred)
plot_confusion_matrix(train_cm, classes=classes, title='Confusion matrix for training set')
test_cm = confusion_matrix(test_y, test_y_pred)
plot_confusion_matrix(test_cm, classes=classes, title='Confusion matrix for test set')
# Loss vs epoch
epoch_axis = range(1, args.n_epochs + 1)
train_loss = model_history.history['loss']
test_loss = model_history.history['val_loss']
epoch_plot(epoch_axis, train_loss, test_loss, 'Loss')
# Accuracy vs epoch
train_acc = model_history.history['acc']
test_acc = model_history.history['val_acc']
epoch_plot(epoch_axis, train_acc, test_acc, 'Accuracy')
# %%
# Save the architecture and the lastly trained model
save_classif_model(model, models_path, model_name, weights_path, model_path, file_suffix,
test_dict, args)
# %%
return model | 5,329,620 |
def _download(
out_path
):
"""Function for downloading all data and results related to this tool's paper"""
out_path = Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logging.info(f"Downloading and extracting datasets and models to {str(out_path)}.")
r = requests.get(URLs["tango_reproduction_package"])
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(out_path) | 5,329,621 |
def get_all_random_experiment_histories_from_files(experiment_path_prefix, net_count):
""" Read history-arrays from all specified npz-files with net_number from zero to 'net_count' and return them as
one ExperimentHistories object. """
assert net_count > 0, f"'net_count' needs to be greater than 0, but is {net_count}."
histories = get_random_experiment_histories_from_file(experiment_path_prefix, 0)
for net_number in range(1, net_count):
current_histories = get_random_experiment_histories_from_file(experiment_path_prefix, net_number)
histories = histories.stack_histories(current_histories)
return histories | 5,329,622 |
def is_gcloud_oauth2_token_cached():
"""Returns false if 'gcloud auth login' needs to be run."""
p = os.path.join(os.path.expanduser('~'), '.config', 'gcloud', 'credentials')
try:
with open(p) as f:
return len(json.load(f)['data']) != 0
except (KeyError, IOError, OSError, ValueError):
return False | 5,329,623 |
def convert_index_to_indices(index_ls, shape):
"""
将 index_ls 格式的坐标列表转换为 indices_ls 格式
"""
assert index_ls.size <= np.prod(shape)
source = np.zeros(shape=shape)
zip_indices = np.where(source >= 0)
indices_ls = convert.zip_type_to_indices(zip_indices=zip_indices)
indices_ls = indices_ls[index_ls]
return indices_ls | 5,329,624 |
def get_funnels_list(connector: MixpanelAPI) -> pd.DataFrame:
"""
This function returns the whole list of funnels in a table containing the funnel ID and the funnel name
:param connector: the connector to the Mixpanel service
:return: a pandas DataFrame
"""
# TODO: change dataframe to simple dict
flist = connector.request(["funnels/list"], {})
flist_df = pd.DataFrame(flist)
flist_df.set_index('funnel_id', inplace=True)
return flist_df | 5,329,625 |
def _gen_off_list(sindx):
"""
Given a starting index and size, return a list of numbered
links in that range.
"""
def _gen_link_olist(osize):
return list(range(sindx, sindx + osize))
return _gen_link_olist | 5,329,626 |
def report_operation_log_list(request):
"""
返回常规操作的日志列表
:param request:
:return:
"""
return administrator.report_operation_log_list(request) | 5,329,627 |
def connect_redis(redis_host, redis_port, redis_db):
""" connect to redis """
global _conn
if _conn is None:
print "connect redis %s (%s)" % ("%s:%s" % (redis_host, redis_port),
os.getpid())
_conn = redis.Redis(host=redis_host, port=redis_port,
db=redis_db)
return _conn | 5,329,628 |
def debug(*args: Any) -> None:
"""Print args to the console if the "debug" setting is True."""
if log_debug:
printf(*args) | 5,329,629 |
def test_caching_policy_strict_mod(openshift, api_client, service):
"""
Tests:
- if response with valid credentials as before have status_code == 200
- sets backend-listener to the starting value
- sets the auth caching policy of an API to None.
- promotes the configuration to the staging once again.
- sets replicas of backend listener to 0.
- the "none" mode disables caching.
-this mode is useful if you want the policy to remain active, but do not want to use caching.
- if response with valid credentials as before have status_code == 403
"""
response = api_client().get("/")
assert response.status_code == 200
caching_type = service.proxy.list().policies.list()
caching_type['policies_config'][1]['configuration']['caching_type'] = "none"
# caching type None is supposed to disable caching, policy remains active, no caching
caching_type.update()
# before updating service.proxy.list(), make sure to update caching_type
service.proxy.list().update(caching_type)
# promotes the configuration to the staging once again
with openshift().scaler.scale("backend-listener", 0):
response = api_client().get("/")
assert response.status_code == 403 | 5,329,630 |
def deploy_blog(blog: BST) -> None:
"""Deploy blog content json files."""
path = Path("./public/blog")
for b in blog:
bpath = path / "blog" / str(b["blog_path"])
os.makedirs(bpath.parent.as_posix(), exist_ok = True)
with bpath.open("w") as f:
json.dump(b, f) | 5,329,631 |
def species_production_reaction(data_dir, spe='OH', top_n=50, norm=False):
"""
count species production reactions in pathway,
multiply by accurate pathwap probability
"""
ps.species_production_reaction(data_dir, spe=spe, top_n=top_n, norm=norm) | 5,329,632 |
def custom_static1(filename):
""" Request to access specific files in the backup directory
.. :quickref: Get backup files; Get a specific file from a directory in the DAQBroker backup directory
"""
scoped = daqbrokerSettings.getScoped()
session = scoped()
#connection = connect(request)
scoped = daqbrokerSettings.getScoped()
session = scoped()
globalsObj = session.query(
daqbrokerSettings.Global).filter_by(
clock=session.query(
func.max(
daqbrokerSettings.Global.clock))).first()
if globalsObj:
globals = {}
for field in globalsObj.__dict__:
if not field.startswith('_'):
globals[field] = getattr(globalsObj, field)
if 'remarks' in globals:
globals["remarks"] = json.loads(globals["remarks"])
else:
globals = {
'clock': time.time(),
'version': '0.1',
'backupfolder': 'backups',
'importfolder': 'uploads',
'tempfolder': 'temp',
'ntp': None,
'commport': 9090,
'logport': 9092,
'remarks': {}}
#print(request.args)
#print(filename.split('.'))
if filename.split('.')[1]=='zip':
#print("osijdfposdijfopsdfijdopsifjdopsfij")
return send_file(os.path.join(globals['tempfolder'], filename),mimetype="zip", attachment_filename="downloaded_files.zip", as_attachment=True)
else:
return send_from_directory(globals['tempfolder'], filename) | 5,329,633 |
def loadSV(fname, shape=None, titles=None, aligned=False, byteorder=None,
renamer=None, **kwargs):
"""
Load a delimited text file to a numpy record array.
Basically, this function calls loadSVcols and combines columns returned by
that function into a numpy ndarray with stuctured dtype. Also uses and
returns metadata including column names, formats, coloring, &c. if these
items are determined during the loading process.
**Parameters**
**fname** : string or file object
Path (or file object) corresponding to a separated variable
(CSV) text file.
**names** : list of strings
Sets the names of the columns of the resulting tabarray. If
not specified, `names` value is determined first by looking for
metadata in the header of the file, and if that is not found,
are assigned by NumPy's `f0, f1, ... fn` convention. See
**namesinheader** parameter below.
**formats** : string or list of strings
Sets the datatypes of the columns. The value of `formats` can
be a list or comma-delimited string of values describing values
for each column (e.g. "str,str,int,float" or
["str", "str", "int", "float"]), a single value to apply to all
columns, or anything that can be used in numpy.rec.array
constructor.
If the **formats** (or **dtype**) parameter are not specified,
typing is done by inference. See **typer** parameter below.
**dtype** : numpy dtype object
Sets the numpy dtype of the resulting tabarray, combining column
format and column name information. If dtype is set, any
**names** and **formats** specifications will be overriden. If
the **dtype** (or **formats**) parameter are not specified,
typing is done by inference. See **typer** parameter below.
The **names**, **formats** and **dtype** parameters duplicate
parameters of the NumPy record array creation inferface. Additional
paramters of the NumPy inferface that are passed through are
**shape**, **titles**, **byteorder** and **aligned** (see NumPy
documentation for more information.)
**kwargs**: keyword argument dictionary of variable length
Contains various parameters to be passed down to loadSVcols. These may
include **skiprows**, **comments**, **delimiter**, **lineterminator**,
**uselines**, **usecols**, **excludecols**, **metametadata**,
**namesinheader**,**headerlines**, **valuefixer**, **linefixer**,
**colfixer**, **delimiter_regex**, **inflines**, **typer**,
**missingvalues**, **fillingvalues**, **verbosity**, and various CSV
module parameters like **escapechar**, **quoting**, **quotechar**,
**doublequote**, **skipinitialspace**.
**Returns**
**R** : numpy record array
Record array constructed from data in the SV file
**metadata** : dictionary
Metadata read and constructed during process of reading file.
**See Also:**
:func:`tabular.io.loadSVcols`, :func:`tabular.io.saveSV`,
:func:`tabular.io.DEFAULT_TYPEINFERER`
"""
[columns, metadata] = loadSVcols(fname, **kwargs)
if 'names' in metadata.keys():
names = metadata['names']
else:
names = None
if 'formats' in metadata.keys():
formats = metadata['formats']
else:
formats = None
if 'dtype' in metadata.keys():
dtype = metadata['dtype']
else:
dtype = None
if renamer is not None:
print 'Trying user-given renamer ...'
renamed = renamer(names)
if len(renamed) == len(uniqify(renamed)):
names = renamed
print '''... using renamed names (original names will be in return
metadata)'''
else:
print '... renamer failed to produce unique names, not using.'
if names and len(names) != len(uniqify(names)):
print 'Names are not unique, reverting to default naming scheme.'
names = None
return [utils.fromarrays(columns, type=np.ndarray, dtype=dtype,
shape=shape, formats=formats, names=names,
titles=titles, aligned=aligned,
byteorder=byteorder), metadata] | 5,329,634 |
def write_int8_8_by_8_not_aligned():
"""
>>> import numpy
>>> int8s = ones(44100 * 2 * 2, int8)
>>> stream = BitStream(4 * [True])
>>> for i in range(len(int8s) / 8):
... stream.write(int8s[8*i:8*(i+1)])
""" | 5,329,635 |
def rationalize_quotes_from_table(table, rationalizeBase=10000):
"""
Retrieve the data from the given table of the SQLite database
It takes parameters:
table (this is one of the Quote table models: Open, High, Low, or Close)
It returns a tuple of lists
"""
first_row = table.select().limit(1).get()
rationalize_bull_1x_price = rationalizeBase / first_row.bull_1x_price
rationalize_bear_1x_price = rationalizeBase / first_row.bear_1x_price
rationalize_bull_3x_price = rationalizeBase / first_row.bull_3x_price
rationalize_bear_3x_price = rationalizeBase / first_row.bear_3x_price
indices = []
dates = []
bull_1x_prices = []
bear_1x_prices = []
bull_3x_prices = []
bear_3x_prices = []
for row in table.select():
indices.append(row.id)
dates.append(row.date)
bull_1x_prices.append(row.bull_1x_price * rationalize_bull_1x_price)
bear_1x_prices.append(row.bear_1x_price * rationalize_bear_1x_price)
bull_3x_prices.append(row.bull_3x_price * rationalize_bull_3x_price)
bear_3x_prices.append(row.bear_3x_price * rationalize_bear_3x_price)
return indices, dates, bull_1x_prices, bear_1x_prices, bull_3x_prices, bear_3x_prices | 5,329,636 |
def test_serder_suber():
"""
Test SerderSuber LMDBer sub database class
"""
with dbing.openLMDB() as db:
assert isinstance(db, dbing.LMDBer)
assert db.name == "test"
assert db.opened
sdb = subing.SerderSuber(db=db, subkey='bags.')
assert isinstance(sdb, subing.SerderSuber)
assert not sdb.sdb.flags()["dupsort"]
pre = "BWzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc"
srdr0 = eventing.incept(keys=[pre])
keys = (pre, srdr0.said)
sdb.put(keys=keys, val=srdr0)
actual = sdb.get(keys=keys)
assert isinstance(actual, coring.Serder)
assert actual.said == srdr0.said
sdb.rem(keys)
actual = sdb.get(keys=keys)
assert actual is None
sdb.put(keys=keys, val=srdr0)
actual = sdb.get(keys=keys)
assert isinstance(actual, coring.Serder)
assert actual.said == srdr0.said
srdr1 = eventing.rotate(pre=pre, keys=[pre], dig=srdr0.said)
result = sdb.put(keys=keys, val=srdr1)
assert not result
assert isinstance(actual, coring.Serder)
assert actual.said == srdr0.said
result = sdb.pin(keys=keys, val=srdr1)
assert result
actual = sdb.get(keys=keys)
assert isinstance(actual, coring.Serder)
assert actual.said == srdr1.said
# test with keys as string not tuple
keys = "{}.{}".format(pre, srdr1.said)
sdb.put(keys=keys, val=srdr1)
actual = sdb.get(keys=keys)
assert isinstance(actual, coring.Serder)
assert actual.said == srdr1.said
sdb.rem(keys)
actual = sdb.get(keys=keys)
assert actual is None
# test missing entry at keys
badkey = "badkey"
actual = sdb.get(badkey)
assert actual is None
# test iteritems
sdb = subing.SerderSuber(db=db, subkey='pugs.')
assert isinstance(sdb, subing.SerderSuber)
sdb.put(keys=("a","1"), val=srdr0)
sdb.put(keys=("a","2"), val=srdr1)
items = [(keys, srdr.said) for keys, srdr in sdb.getItemIter()]
assert items == [(('a', '1'), srdr0.said),
(('a', '2'), srdr1.said)]
assert sdb.put(keys=("b","1"), val=srdr0)
assert sdb.put(keys=("b","2"), val=srdr1)
assert sdb.put(keys=("bc","1"), val=srdr0)
topkeys = ("b", "") # append empty str to force trailing .sep
items = [(keys, srdr.said) for keys, srdr in sdb.getItemIter(keys=topkeys)]
assert items == [(('b', '1'), srdr0.said),
(('b', '2'), srdr1.said)]
assert not os.path.exists(db.path)
assert not db.opened | 5,329,637 |
def task_figures():
"""Make all figures. Each figure is a sub-task."""
# Make figure 1: plot of the CSD matrices.
yield dict(
name='csd',
task_dep=['connectivity_stats'],
file_dep=[fname.epo(subject=subjects[0]),
fname.csd(subject=subjects[0], condition='face')],
targets=['../paper/figures/csd.pdf'],
actions=['python figure_csd.py'],
)
# Make figure 2: plot of the source space and forward model.
yield dict(
name='forward',
file_dep=[fname.fwd(subject=subjects[0]),
fname.fwd_r(subject=subjects[0]),
fname.trans(subject=subjects[0])],
targets=['../paper/figures/forward1.png',
'../paper/figures/forward2.png'],
actions=['python figure_forward.py'],
)
# Make figure 3: grand average power maps.
file_dep = [fname.ga_power_hemi(condition=cond, hemi='lh') for cond in conditions]
file_dep += [fname.ga_power_hemi(condition=cond, hemi='rh') for cond in conditions]
targets = ['../paper/figures/power_face_lh.png',
'../paper/figures/power_face_rh.png',
'../paper/figures/power_scrambled_lh.png',
'../paper/figures/power_scrambled_rh.png']
targets += ['../paper/figures/power_contrast_%s-%s-lh.png' % (freq[0], freq[1]) for freq in freq_bands]
yield dict(
name='power',
file_dep=file_dep,
targets=targets,
actions=['python figure_power.py'],
)
# Make figure 4: plot of the functional connectivity.
yield dict(
name='connectivity',
file_dep=[fname.ga_con(condition='pruned'),
fname.ga_con(condition='parcelled')],
targets=['../paper/figures/degree_lh.png',
'../paper/figures/degree_rh.png',
'../paper/figures/squircle.pdf'],
actions=['python figure_connectivity.py'],
) | 5,329,638 |
def k(func):
"""定义一个装饰器函数"""
def m(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return m | 5,329,639 |
def get_playlist_object(playlist_url, access_token):
"""
playlist_url : url of spotify playlist
access_token : access token gotten from client credentials authorization
return object containing playlist tracks
"""
playlist_id = playlist_url.split("/")[-1]
playlist_endpoint = f"https://api.spotify.com/v1/playlists/{playlist_id}"
get_header = {
"Authorization" : "Bearer " + access_token
}
# API request
response = requests.get(playlist_endpoint, headers=get_header)
playlist_object = response.json()
return playlist_object | 5,329,640 |
def guess_identifiers(fuzzy_base_name: str) -> Tuple[str, str]:
"""
Given a fuzzy base name, guess the corresponding (item ID, base name)
identifier pair.
:param fuzzy_base_name: The base name to be matched.
:return: The identifier pair with the closest matching base name.
"""
sql = 'SELECT base_name FROM item_info'
choices = _conn.execute(sql).fetchall()
base_name = process.extractOne(fuzzy_base_name, choices)[0][0]
sql2 = 'SELECT item_id FROM item_info WHERE base_name = ?'
item_id = _conn.execute(sql2, (base_name,)).fetchone()[0]
return item_id, base_name | 5,329,641 |
def test_clean_skip_false(mocker):
"""
Validate that the output directory is deleted from disk.
Validate that a new output directory is created.
"""
mock_mkdirs = mocker.patch('os.makedirs')
mock_rmtree = mocker.patch('shutil.rmtree')
output_dir_name = 'find/me/NOW'
manifest = {'output_dir': output_dir_name}
cli._clean(logger, False, manifest)
mock_rmtree.assert_called_with(output_dir_name, ignore_errors=True)
mock_mkdirs.assert_called_with(output_dir_name) | 5,329,642 |
def part_2(filename: Path):
"""Part two of day 13. (print code)"""
points, instructions = read_instruction(filename)
points = fold(points, instructions)
num_columns = max([*zip(*points)][0]) + 1
num_rows = max([*zip(*points)][1]) + 1
grid = [[0] * num_columns for i in range(num_rows)]
for x, y in points:
grid[y][x] = 1
for row in grid:
print("".join(" #"[col] for col in row)) | 5,329,643 |
def make_image(center=(.1,-.4),dpi=500,X_cut_min = -.59 -xcut_offset,Y_cut_max = 1.61
+ ycut_offset,X_cut_max = .12-xcut_offset,Y_cut_min = .00 +ycut_offset,bands=23 ):
"""make visual count it by area then have hist values for normalization wih movement data
to be exported and then can be counted
PARAMS
------------
center : tuple
where beacon is
dpi : int
dots per inch - resolution - if changed can mess up pixel count
X_cut,Y_cut : int
points of rectagle, same as used for cutting of rears - floor of arena
bands : int
amount of circles fittign inthe rectangle - max is 23
Returns
------------
Histogram and appropriate bins made by the histogram
Used for area estimation later on
"""
fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=dpi,)
fig.patch.set_visible(False)
rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),abs(Y_cut_min)+abs(Y_cut_max) , color="white")
ax1.add_patch(rectangle)
#plt.plot(center[0],center[1], "ro")
color = np.linspace(0,.99,bands+1)
for i in reversed(range(bands)):
c=color[i]
patch = patches.Circle((center[0],center[1]), radius=.075*i,color=str(c))
ax1.add_patch(patch)
patch.set_clip_path(rectangle)
ax1.axis("equal")
ax1.axis("off")
fig.savefig('norm_graph.png', dpi=dpi, transparent=True)
img= Image.frombytes('RGB',fig.canvas.get_width_height(),fig.canvas.tostring_rgb())
image_array = np.asarray(img)
hist, bins = np.histogram(image_array,bins=bands,range=(0,249))
#plt.show()
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
#plt.bar(center, hist, align='center', width=width)
return hist,bins | 5,329,644 |
def read_answer_patterns(pattern_file_path):
"""load answer patterns into qid2patterns dictionary
"""
qid2patterns = {}
last_qid = None
with open(pattern_file_path) as f:
for line in f :
qid, pattern = line.strip().split("\t")
if qid != last_qid : # start collecting patterns for a new qid
if last_qid != None: # if not the first question
qid2patterns[last_qid] = patterns
last_qid = qid
patterns = [pattern]
else: # collect patterns for the current qid
patterns.append(pattern)
qid2patterns[last_qid] = patterns
return qid2patterns | 5,329,645 |
def fitness(coords, solution):
"""
Total distance of the current solution path.
"""
N = len(coords)
cur_fit = 0
for i in range(N):
cur_fit += dist(coords[solution[i % N]], coords[solution[(i + 1) % N]])
return cur_fit | 5,329,646 |
def f_of_sigma(sigma,A=0.186,a=1.47,b=2.57,c=1.19):
"""
The prefactor in the mass function parametrized
as in Tinker et al 2008. The default values
of the optional parameters correspond to a mean
halo density of 200 times the background. The
values can be found in table 2 of
Tinker 2008 ApJ 679, 1218
Parameters
----------
sigma: float
Standard deviation of the linear power spectrum
A,a,b,c: float, optional
0.186 by default
Returns
-------
f: float
Value of f(sigma)
"""
import numpy as np
f = A*((sigma/b)**(-a)+1)*np.exp(-c/sigma/sigma)
return f | 5,329,647 |
def create_role(
role_name: str,
base_session: boto3.Session,
region: str,
auto_trust_caller_identity=True,
allowed_services: Sequence[str] = [],
allowed_aws_entities: Sequence[str] = [],
external_id: str = None,
):
"""
Creates a role that lets a list of specified services assume the role.
:return: The newly created role.
"""
iam = base_session.client("iam")
trusted_entities = set(allowed_aws_entities)
if auto_trust_caller_identity:
trusted_entities.add(get_caller_identity(base_session, region))
try:
role = exponential_retry(
iam.create_role,
["AccessDenied", "ServiceFailureException"],
RoleName=role_name,
AssumeRolePolicyDocument=_get_trust_policy(allowed_services, trusted_entities, external_id),
MaxSessionDuration=MAX_ASSUME_ROLE_DURATION,
)
if "role_exists" in iam.waiter_names:
iam.get_waiter("role_exists").wait(RoleName=role_name)
time.sleep(3)
else:
time.sleep(15) # give some time for IAM propagation
module_logger.info(f"Created role {role_name} for new services {allowed_services} and entities {trusted_entities}")
except ClientError as ex:
module_logger.exception("Couldn't create role %s. Exception: %s", role_name, str(ex))
raise
else:
return role | 5,329,648 |
def get_auto_switch_state(conn):
"""Get the current auto switch enabled / disabled state"""
packet = _request(conn, GET_AUTO_SWITCH_STATE)
if not _validate_packet(packet):
raise ChecksumError()
return _decode_toggle(packet) | 5,329,649 |
def SGD(X, y, lmd, gradient, n_epochs, M, opt = "SGD", eta0 = None, eta_type = 'schedule', t0=5, t1=50, momentum = 0., rho = 0.9, b1 = 0.9, b2 = 0.999):
"""Stochastic Gradient Descent Algorithm
Args:
- X (array): design matrix (training data)
- y (array): output dataset (training data)
- gradient (function): function to compute the gradient
- n_epochs (int): number of epochs
- M (int): size of minibatches
- opt (string): "SGD", "ADAGRAD", "RMS", "ADAM" - different optimizers
- eta0 (float): learning rate if 'static' or 'invscaling'
- eta_type = 'static', 'schedule', 'invscaling', 'hessian' - different methods for evaluating the learning rate
- t0 (float): initial paramenter to compute the learning rate in 'schedule'
- t1 (float): sequential paramenter to compute the learning rate in 'schedule'
- momentum, rho, b1, b2 (float): parameters for different optimizers
Returns:
beta/theta-values"""
if opt not in optimizers:
raise ValueError("Optimizer must be defined in "+str(optimizers))
if eta_type not in eta_types:
raise ValueError("Learning rate type must be defined within "+str(eta_types))
theta = np.random.randn(X.shape[1])
m = int(X.shape[0]/M)
v = np.zeros(X.shape[1]) # parameter for velocity (momentum), squared-gradient (adagrad, RMS),
ma = np.zeros(X.shape[1]) # parameter for adam
delta = 1e-1
for epoch in range(n_epochs):
for i in range(m):
random_index = M*np.random.randint(m)
Xi = X[random_index:random_index + M]
yi = y[random_index:random_index + M]
gradients = gradient(Xi, yi, theta, lmd) #* X.shape[0] #2.0 * Xi.T @ ((Xi @ theta)-yi)
# Evaluate the hessian metrix to test eta < max H's eigvalue
H = (2.0/X.shape[0])* (X.T @ X)
eigvalues, eigvects = np.linalg.eig(H)
eta_opt = 1.0/np.max(eigvalues)
eta = eta_opt
if not eta0:
eta0=eta
if eta_type == 'static':
eta = eta0
elif eta_type == 'schedule':
eta = learning_schedule(epoch*m+i, t0=t0, t1=t1)
elif eta_type == 'invscaling':
power_t = 0.25 # one can change it but I dont want to overcrowd the arguments
eta = eta0 / pow(epoch*m+i, power_t)
elif eta_type == 'hessian':
pass
#assert eta > eta_opt, "Learning rate higher than the inverse of the max eigenvalue of the Hessian matrix: SGD will not converge to the minimum. Need to set another learning rate or its paramentes."
if opt == "SDG":
v = momentum * v - eta * gradients
theta = theta + v
elif opt == "ADAGRAD":
v = v + np.multiply(gradients, gradients)
theta = theta - np.multiply(eta / np.sqrt(v+delta), gradients)
elif opt == "RMS":
v = rho * v + (1. - rho) * np.multiply(gradients, gradients)
theta = theta - np.multiply(eta / np.sqrt(v+delta), gradients)
elif opt == "ADAM":
ma = b1 * ma + (1. - b1) * gradients
v = b2 * v + (1. - b2) * np.multiply(gradients, gradients)
ma = ma / (1. - b1)
v = v / (1. - b2)
theta = theta - np.multiply(eta / np.sqrt(v+delta), ma)
return theta | 5,329,650 |
def get_last_oplog_entry(client):
"""
gets most recent oplog entry from the given pymongo.MongoClient
"""
oplog = client['local']['oplog.rs']
cursor = oplog.find().sort('$natural', pymongo.DESCENDING).limit(1)
docs = [doc for doc in cursor]
if not docs:
raise ValueError("oplog has no entries!")
return docs[0] | 5,329,651 |
def schmidt_quasi_norm(size):
"""
Returns an array of the Schmidt Quasi-normalised values
Array is symmetrical about the diagonal
"""
schmidt = square_array(size)
for n in range(size):
for m in range(n + 1):
if n == 0:
double = 1
else:
double = double_factorial(2 * n - 1)
schmidt[m][n] = (
math.sqrt(
((2 - kronecker_delta(0, m)) * math.factorial(n - m))
/ math.factorial(n + m)
)
* double
/ math.factorial(n - m)
)
return schmidt | 5,329,652 |
def _merge_jamos(initial, medial, final=None):
"""Merge Jamos into Hangul syllable.
Raises:
AssertionError: If ``initial``, ``medial``, and ``final`` are not in
``INITIAL``, ``MEDIAL``, and ``FINAL`` respectively.
"""
assert initial in INITIALS
assert medial in MEDIALS
final = "∅" if final is None else final
assert final in FINALS
return chr(0xAC00 +
588 * _INITIALS_IDX[initial] +
28 * _MEDIALS_IDX[medial] +
_FINALS_IDX[final]) | 5,329,653 |
def azimuth(wspr_data):
"""Display the contacts azimut / distance."""
filename = os.path.join(Config.target, 'azimuth.png')
logging.info('Drawing azimuth to %s', filename)
data = []
for node in wspr_data:
data.append((math.radians(int(node.azimuth/Config.granularity) * Config.granularity),
(node.distance / 50) * 50))
dist_count = collections.defaultdict(int)
for elem in data:
dist_count[elem] += 1
theta = []
distance = []
density = []
for key, cnt in dist_count.items():
theta.append(key[0])
distance.append(key[1])
density.append(cnt * 3)
fig = plt.figure(figsize=(8, 8))
fig.text(.01, .02, ('http://github.com/0x9900/wspr - Distance & direction - '
'Time span: %sH - Band: %s') % (Config.timespan, Config.band))
fig.suptitle('[{}] WSPR Stats'.format(Config.callsign), fontsize=14, fontweight='bold')
ax_ = fig.add_subplot(111, projection="polar")
ax_.set_theta_zero_location("N")
ax_.set_theta_direction(-1)
ax_.scatter(theta, distance, s=density, c=theta, cmap='PiYG', alpha=0.8)
plt.savefig(filename)
plt.close() | 5,329,654 |
def get_feature_set(eq, features):
"""Get features from their strings
Arguments:
eq {Equity} -- equity to build around
features {string array} -- features and params to use
Returns:
list -- list of ndarray of floats
"""
feature_set = []
for feature in features:
f = get_feature(eq, feature)
for feat in f:
feature_set.append(feat)
return feature_set | 5,329,655 |
def class_http_endpoint(methods: METHODS, rule_string: str, side_effect: Optional[HTTP_SIDE_EFFECT] = None, **kwargs):
"""
Creates an HTTP endpoint template. Declare this as a class variable in your webserver subclass to automatically add
the endpoint to all instances. Can be used as a decorator.
Args:
methods: forwarded to MockHTTPEndpoint
rule_string: forwarded to MockHTTPEndpoint
side_effect: forwarded to MockHTTPEndpoint
**kwargs: forwarded to MockHTTPEndpoint
Returns:
A new http endpoint template
"""
def ret(side_effect_method):
return HTTPEndpointTemplate(methods, rule_string, side_effect_method=side_effect_method, **kwargs)
if side_effect is not None:
return ret(side_effect)
return ret | 5,329,656 |
def value(iterable, key=None, position=1):
"""Generic value getter. Returns containing value."""
if key is None:
if hasattr(iterable, '__iter__'):
return iterable[position]
else:
return iterable
else:
return iterable[key] | 5,329,657 |
def test_derive_course_code_from_course_on_save():
"""The course code should be derived from the course foreign key."""
course = CourseFactory(course_code='TMA4130')
exam = DocumentInfo.objects.create(course=course)
assert exam.course_code == 'TMA4130' | 5,329,658 |
def find_aa_seqs(
aa_seq: str,
var_sites: str,
n_flanking: int = 7
):
"""Grabs the flanking AA sequence around a given location in a protein sequence string.
Args:
aa_seq: Protein sequence string.
var_sites: Integer location of the site of interest (1-indexed, not 0-indexed).
n_flanking: The number of flanking AAs to grab around the site of interest.
Returns: AA sequence centered around var_site.
"""
sites = [max(int(v.strip())-1, 0) for v in var_sites.split(var_site_delimiter)]
seqs = []
for var_site in sites:
n = int(var_site)
if len(aa_seq) < n:
return '_'*(1+(n_flanking*2))
left_ = '_'*max((n_flanking - n), 0)
right_ = '_'*max(((n+n_flanking+1) - len(aa_seq)), 0)
aas = aa_seq[max((n-n_flanking), 0):min(len(aa_seq), (n+n_flanking+1))]
seqs.append(left_ + aas + right_)
return var_site_delimiter.join(seqs) | 5,329,659 |
def test_by_id_with_invalid_param(get_user, user_repository_spy, fake_user):
"""
Testando o erro metodo by_id.
Utilizando um valor invalido para o parametro user_id.
Deve retornar uma mensagem negativa de sucesso e None.
"""
response = get_user.by_id(user_id=fake_user.name)
# Testando a entrada:
assert not user_repository_spy.get_user_params
# Testando a saida:
assert response["success"] is False
assert response["data"] is None | 5,329,660 |
def main(fn, tmp=False):
"""sorting the lines of the file and write the result to a new file"""
if tmp:
fnew = os.path.join(TMP, os.path.basename(fn))
else:
fnew = '_sorted'.join(os.path.splitext(fn))
with open(fn) as _in, open(fnew, "w") as _out:
regels = _in.readlines()
regels.sort()
for x in regels:
_out.write(x)
return fnew | 5,329,661 |
def compact_interval_string(value_list):
"""Compact a list of integers into a comma-separated string of intervals.
Args:
value_list: A list of sortable integers such as a list of numbers
Returns:
A compact string representation, such as "1-5,8,12-15"
"""
if not value_list:
return ''
value_list.sort()
# Start by simply building up a list of separate contiguous intervals
interval_list = []
curr = []
for val in value_list:
if curr and (val > curr[-1] + 1):
interval_list.append((curr[0], curr[-1]))
curr = [val]
else:
curr.append(val)
if curr:
interval_list.append((curr[0], curr[-1]))
# For each interval collapse it down to "first, last" or just "first" if
# if first == last.
return ','.join([
'{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0])
for pair in interval_list
]) | 5,329,662 |
def _mk_asm() -> str:
"""
Generate assembly to program all allocated translation tables.
"""
string = ""
for n,t in enumerate(table.Table._allocated):
string += _mk_table(n, t)
keys = sorted(list(t.entries.keys()))
while keys:
idx = keys[0]
entry = t.entries[idx]
if type(entry) is Region:
string += _mk_blocks(n, t, idx, entry)
for k in range(idx, idx+entry.num_contig):
keys.remove(k)
else:
string += _mk_next_level_table(n, idx, entry)
keys.remove(idx)
return string | 5,329,663 |
def getNuitkaModules():
""" Create a list of all modules known to Nuitka.
Notes:
This will be executed at most once: on the first time when a module
is encountered and cannot be found in the recorded calls (JSON array).
Returns:
List of all modules.
"""
mlist = []
for m in getRootModules():
if m not in mlist:
mlist.append(m)
for m in done_modules:
if m not in mlist:
mlist.append(m)
for m in uncompiled_modules:
if m not in mlist:
mlist.append(m)
for m in active_modules:
if m not in mlist:
mlist.append(m)
return mlist | 5,329,664 |
def run_model(network):
"""
Runs a model with pre-defined values.
"""
model = network(vocab_size+1, EMBEDDING_SIZE)
model.cuda()
EPOCHS = 20
train_model(model, train, epochs=EPOCHS, echo=False)
return model | 5,329,665 |
def is_prime(num):
""" Assumes num > 3 """
if num % 2 == 0:
return False
for p in range(3, int(num**0.5)+1, 2): # Jumps of 2 to skip odd numbers
if num % p == 0:
return False
return True | 5,329,666 |
def test_nonregistered_units_raise_error():
"""Test that an error is raised when trying to get unit of function that is not registered"""
units = Unit.unit_factory(__name__)
with pytest.raises(exceptions.UnitError):
units("not_registered") | 5,329,667 |
def verify_password(email_or_token, password):
"""
电子邮件和密码是由User模型中现有的方法验证,如果登录密令正确,这个验证回调函数就返回True;
验证回调函数把通过认证的用户保存在Flask的全局对象g中,如此一来,视图函数便能进行访问。
注意:匿名登录时,这个函数返回True并把Flask-login提供的AnonymousUser类实例赋值给g.current_user
:param email:
:param password:
:return:
"""
if email_or_token == '':
# API蓝本支持匿名用户访问,此时客户端发送的电子邮件字段必须为空
# 也即如果该字段为空,那么假定是匿名用户
g.current_user = AnoymousUser()
return True
if password == '':
# 如果密码为空,那就假定email_or_token参数提供的是令牌,按照令牌的方式进行认证。
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
# 如果两个参数都不为空,假定使用常规的邮件地址和密码进行认证。
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password) | 5,329,668 |
def get_charges_from_recent_text():
"""This function parces the recent text field and extracts the listed charges."""
t0, i = time.time(), 0
needs_charges_formula = "AND(charges_updated = '', html != '', recent_text != '', hours_since_verification < 72, DONT_DELETE != 'no charges')"
records = airtab.get_all(formula=needs_charges_formula)
for record in records:
this_dict = {}
if record["fields"]["jail"] == "lcdc":
charges = []
bond_ammounts = []
fine_ammounts = []
soup = BeautifulSoup(record["fields"]["html"], "html.parser").tbody
rows = soup.find_all("tr")
if soup.tfoot:
goods = rows[: len(rows) - 1]
this_dict["intake_bond_cash"] = soup.tfoot.find_all("td")[2].b.string.strip()
this_dict["intake_fine_ammount"] = soup.tfoot.find_all("td")[3].b.string.strip()
else:
goods = rows
for row in goods:
cells = row.find_all("td")
if cells[0].string.strip():
if "," in cells[0].string.strip():
charges.append('"' + cells[0].string.strip() + '"')
else:
charges.append(cells[0].string.strip())
if cells[2].string.strip():
bond_ammounts.append(
cells[2].string.strip().replace(",", ""))
if cells[3].string.strip():
fine_ammounts.append(
cells[3].string.strip().replace(",", ""))
if charges:
this_dict["charges"] = ", ".join(charges)
if bond_ammounts:
this_dict["bond_ammounts"] = "\n".join(bond_ammounts)
if fine_ammounts:
this_dict["fine_ammounts"] = "\n".join(fine_ammounts)
airtab.update(record["id"], this_dict, typecast=True)
i += 1
elif record["fields"]["jail"] == "kcdc":
charges = []
text = record["fields"]["recent_text"]
goods = text[text.find("Charges:"): text.find("Note:")].splitlines()
if len(goods) > 1:
for good in goods:
if "," in good:
charges.append('"' + good.strip() + '"')
else:
charges.append(good)
this_dict["charges"] = ", ".join(goods[1:])
airtab.update(record["id"], this_dict)
i += 1
elif record["fields"]["jail"] in {"ccdc", "tcdc", "jcdc"}:
charges = []
text = record["fields"]["recent_text"]
x = text.find("\nCharges:") + 9
y = text.find("\nBond:")
goods = text[x:y].strip().splitlines()
for line in goods:
if "," in line:
charges.append('"' + line + '"')
else:
charges.append(line)
this_dict["charges"] = ", ".join(charges)
airtab.update(record["id"], this_dict)
i += 1
elif record["fields"]["jail"] == "hcdc":
messy = []
goods = []
data = record["fields"]["recent_text"].splitlines()
messy.append(data[data.index("Charge 1") + 1].strip())
messy.append(data[data.index("Charge 2") + 1].strip())
messy.append(data[data.index("Charge 3") + 1].strip())
messy.append(data[data.index("Charge 4") + 1].strip())
for x in messy:
if not x.startswith("Felony / Misd"):
if "," in x:
goods.append('"' + x + '"')
else:
goods.append(x)
this_dict["charges"] = ", ".join(goods)
airtab.update(record["id"], this_dict)
i += 1
wrap_it_up(t0, new=i, total=len(records), function='get_charges_from_recent_text') | 5,329,669 |
def is_valid_orcid_id(orcid_id: str):
"""adapted from stdnum.iso7064.mod_11_2.checksum()"""
check = 0
for n in orcid_id:
check = (2 * check + int(10 if n == "X" else n)) % 11
return check == 1 | 5,329,670 |
def train_model(args, model):
""" Train the model """
os.makedirs(args.output_dir, exist_ok=True)
writer = LogWriter(logdir=os.path.join("logs", args.name))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Prepare dataset
train_loader, test_loader = get_loader(args)
# Prepare optimizer and scheduler
t_total = args.num_steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(args.learning_rate, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(args.learning_rate, warmup_steps=args.warmup_steps, t_total=t_total)
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(),
learning_rate=scheduler,
momentum=0.9,
weight_decay=args.weight_decay,
grad_clip=paddle.nn.ClipGradByGlobalNorm(clip_norm=args.max_grad_norm))
if args.resume_model is not None:
start_iter = resume(args, model, optimizer, args.resume_model)
scheduler.last_epoch = start_iter
else:
start_iter = 0
if args.fp16:
# Step1:定义 GradScaler,用于缩放loss比例,避免浮点数溢出
scaler = paddle.amp.GradScaler(init_loss_scaling=args.loss_scale)
# Train!
logger.info("***** Running training *****")
logger.info(" Total optimization steps = %d", args.num_steps)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * args.n_gpu)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
optimizer.clear_grad()
set_seed(args.seed) # Added here for reproducibility (even between python 2 and 3)
losses = AverageMeter()
global_step = start_iter
best_acc = 0
while True:
model.train()
for step, batch in enumerate(train_loader):
x, y = batch
if args.fp16:
# Step2:创建AMP上下文环境,开启自动混合精度训练
with paddle.amp.auto_cast():
loss = model(x, y)
else:
loss = model(x, y)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
# Step3:使用 Step1中定义的 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播
scaled = scaler.scale(loss)
scaled.backward()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
if args.fp16:
# 训练模型
scaler.minimize(optimizer, scaled)
else:
optimizer.step()
scheduler.step()
optimizer.clear_grad()
global_step += 1
if global_step%10==0:
print("Training (%d / %d Steps) (loss=%2.5f(%2.5f))" % (global_step, t_total, losses.val, losses.avg))
writer.add_scalar("train/loss", value=losses.val, step=global_step)
writer.add_scalar("train/lr", value=optimizer.get_lr(), step=global_step)
if global_step % args.eval_every == 0:
accuracy, valid_loss = valid(args, model, test_loader, global_step)
writer.add_scalar("test/accuracy", value=accuracy, step=global_step)
writer.add_scalar("test/loss", value=valid_loss, step=global_step)
print("Accuracy:",accuracy)
if best_acc < accuracy:
save_model(args, model, optimizer, global_step, is_best=True)
best_acc = accuracy
else:
save_model(args, model, optimizer, global_step, is_best=False)
model.train()
if global_step % t_total == 0:
break
losses.reset()
if global_step % t_total == 0:
break
writer.close()
logger.info("Best Accuracy: \t%f" % best_acc)
logger.info("End Training!")
print("Best Accuracy:",best_acc) | 5,329,671 |
def copy_file(src, dest):
""" Copies 'src' file to 'dest'
Args:
src (string): file name to be copied from
dest (string): file name to be copied to
"""
shutil.copyfile(src, dest) | 5,329,672 |
def extract_entity(entities, entity_type=_PERSON):
"""
Extract name from the entity specified in entity_type.
We use the JSON format to extract the entity information:
-
:param entities:
:param entity_type:
:return:
"""
if not entity_type:
raise ValueError('Invalid entity type')
if _ENTITIES not in entities:
raise ValueError('No entities format')
try:
extracted_entities = []
log.info('extract_entity() Searching for %s in %r', entity_type,
entities)
for entity in entities[_ENTITIES]:
# Extract entity (PERSON, ORGANIZATION)
if _TYPE in entity:
if entity[_TYPE] == entity_type:
entity_name = entity[_NAME]
log.info('extract_entity() Extracting %s from entity %s',
entity_type, entity_name)
if entity_name[0].isupper():
if entity[_METADATA]:
log.info('extract_entity() | Insert %s: %s | %s ',
entity_type, entity_name,
entity[_METADATA])
extracted_entities.append(entity[_NAME])
else:
# Filter entity name by discarding dictionary of
# words.
if not set(extract_filter()) & set(
entity_name.lower().split()):
log.info('extract_entity() | Insert %s %s ',
entity_type, entity_name)
extracted_entities.append(entity[_NAME])
return extracted_entities
except KeyError as e:
log.exception(e) | 5,329,673 |
def ReadBDAFile(bdafile):
""" Read BDA file
:param str bdafile: file name
:return: natm - number of atoms
:return: molnam - name of molecule
:return: frgdat - [bdadic,frgnamlst,frgatmdic,frgattribdic]
bdalst:[[bda#,baa#,bda atmnam,baa atmnam,
bda resdat,baa resdat],...]
frgnamlst: [frgnam1,frgnam2,...]
frgatmdic:{frgnam:atmlst,...}
frgattribdic:{frgnam:[charge,layer,active],..}
"""
def ErrorMessage(line,s):
mess='Error at line='+str(line)+'\n'
mess=mess+'s='+s
lib.MessageBoxOK(mess,'rwfile.ReadBDAFile')
bdalst=[]; natm=-1; molnam=''; resnam=''; nbda=-1
frgnamlst=[]; frgatmdic={}; frgattribdic={}
if not os.path.exists(bdafile):
mess='file not found. file='+bdafile
lib.MessageBoxOK(mess,'rwfile.ReadBDAFile')
return molnam,resnam,natm,[]
head,tail=os.path.split(bdafile)
bdanam,ext=os.path.splitext(tail)
name=lib.GetResDatFromFileName(bdanam)
if name is not None: bdanam=name
f=open(bdafile,'r')
line=0
for s in f.readlines():
line += 1; ss=s
s=s.strip()
if len(s) <= 0: continue
nc=s.find('#')
if nc >= 0: s=s[:nc].strip()
if len(s) <= 0: continue
if s.startswith('MOLNAM',0,6):
key,molnam=lib.GetKeyAndValue(s)
continue
elif s.startswith('RESNAM',0,6):
key,resnam=lib.GetKeyAndValue(s)
if resnam[-1] == ':': resnam=resnam+' '
continue
elif s.startswith('NATM',0,4):
key,natm=lib.GetKeyAndValue(s)
continue
elif s.startswith('NBDA',0,4):
key,nbda=lib.GetKeyAndValue(s)
continue
elif s.startswith('BDABAA',0,6):
key,s0=lib.GetKeyAndValue(s,conv=False)
s1=s0; s2=''; nc=s0.find('"')
if nc >= 0:
s1=s0[:nc]; s2=s0[nc:]
items=lib.SplitStringAtSpacesOrCommas(s1)
try:
bda=int(items[1])-1; baa=int(items[2])-1
except:
ErrorMessage(line,ss)
return molnam,resnam,natm,[]
#if not bdadic.has_key(bdanam): bdadic[bdanam]=[]
bdaatm=None; baaatm=None; bdares=None; baares=None
items=lib.GetStringBetweenQuotation(s2)
if len(items) >= 2:
bdaatm=items[0]; baaatm=items[1]
if len(items) >= 4:
bdares=items[2]; baares=items[3]
bdalst.append([bda,baa,bdaatm,baaatm,bdares,baares])
elif s.startswith('FRAGMENT',0,8):
key,s1=lib.GetKeyAndValue(s,conv=False)
items=s1.split('[',1)
try:
dat1=items[0]; dat2=items[1]
dat2=dat2.strip(); dat2=dat2[:-1]
except:
ErrorMessage(line,ss)
return molnam,resnam,natm,[]
items=lib.SplitStringAtSpacesOrCommas(dat1)
frgnam=items[1]; charge=int(items[3]); layer=int(items[4])
active=int(items[5]); spin=int(items[6])
frgnamlst.append(frgnam)
frgattribdic[frgnam]=[charge,layer,active,spin]
try: atmlst=lib.StringToInteger(dat2)
except:
ErrorMessage(lin,ss)
return molnam,resnam,natm,[]
atmlst=[x-1 for x in atmlst]
#const.CONSOLEMESSAGE('atmlst='+str(atmlst))
frgatmdic[frgnam]=atmlst
else: pass
f.close()
frgdat=[bdalst,frgnamlst,frgatmdic,frgattribdic]
return molnam,resnam,natm,frgdat | 5,329,674 |
def reconstruct_from_patches(img_arr, org_img_size, stride=None, size=None):
"""[summary]
Args:
img_arr (numpy.ndarray): [description]
org_img_size (tuple): [description]
stride ([type], optional): [description]. Defaults to None.
size ([type], optional): [description]. Defaults to None.
Raises:
ValueError: [description]
Returns:
numpy.ndarray: [description]
"""
#print('Img_Arr : ',img_arr.shape)
#print('Orig_Img_Size : ',org_img_size)
# check parameters
if type(org_img_size) is not tuple:
raise ValueError("org_image_size must be a tuple")
if img_arr.ndim == 3:
img_arr = np.expand_dims(img_arr, axis=0)
if size is None:
size = img_arr.shape[1]
if stride is None:
stride = size
nm_layers = img_arr.shape[3]
i_max = org_img_size[0] // stride
if i_max*stride < org_img_size[0] :
i_max = i_max + 1
j_max = org_img_size[1] // stride
if j_max*stride < org_img_size[1] :
j_max = j_max + 1
#total_nm_images = img_arr.shape[0] // (i_max ** 2)
total_nm_images = img_arr.shape[0] // (i_max * j_max)
nm_images = img_arr.shape[0]
images_list = []
kk = 0
for img_count in range(total_nm_images):
img_r = np.zeros(
(i_max*stride, j_max*stride, nm_layers), dtype=img_arr[0].dtype
)
for i in range(i_max):
for j in range(j_max):
for layer in range(nm_layers):
img_r[
i * stride : i * stride + size,
j * stride : j * stride + size,
layer,
] = img_arr[kk, :, :, layer]
kk += 1
img_bg = np.zeros(
(org_img_size[0], org_img_size[1], nm_layers), dtype=img_arr[0].dtype
)
img_bg = img_r[0:org_img_size[0], 0:org_img_size[1], 0:]
images_list.append(img_bg)
return np.stack(images_list) | 5,329,675 |
def process_highlight(entry, img_width):
"""
Function processing highlights extracted from DOM tree. Downloads image based
on its url and scales it. Prettifies text by inserting newlines and
shortening author lists.
Parameters
----------
entry : dict of str
Dictionary created by extract_highlights function.
img_width : int
Width of image to resize to.
Returns
-------
dict
Highlight dict with downloaded and resized image and prettified text
"""
# 'https:' is missing in page src links
if not entry['img'].startswith("https"):
entry['img'] = "https:" + entry['img']
# fetch the image and resize it to common width
entry['img'] = resize_img_to_x(Image.open(requests.get(entry['img'], stream=True).raw), img_width)
entry['title'] = newline_join(entry['title'], max_letters['title'])
entry['authors'] = shorten_authors(entry['authors'])
entry['comment'] = newline_join(entry['comment'], max_letters['comment'])
return(entry) | 5,329,676 |
def urlopen_with_tries(url, initial_wait=5, rand_wait_range=(1, 60),
max_num_tries=10, timeout=60, read=False):
"""
Open a URL via urllib with repeated tries.
Often calling urllib.request.urlopen() fails with HTTPError, especially
if there are multiple processes calling it. The reason is that NCBI
has a cap on the number of requests per unit time, and the error raised
is 'HTTP Error 429: Too Many Requests'.
Args:
url: url to open
initial_wait: number of seconds to wait in between the first two
requests; the wait for each subsequent request doubles in time
rand_wait_range: tuple (a, b); in addition to waiting an amount of
time that grows exponentially (starting with initial_wait), also
wait a random number of seconds between a and b (inclusive).
If multiple processes are started simultaneously, this helps to
avoid them waiting on the same cycle
max_num_tries: maximum number of requests to attempt to make
timeout: timeout in sec before retrying
read: also try to read the opened URL, and return the results;
if this raises an HTTPException, the call will be retried
Returns:
result of urllib.request.urlopen(); unless read is True, in which
case it is the data returned by reading the url
"""
num_tries = 0
while num_tries < max_num_tries:
try:
num_tries += 1
logger.debug(("Making request to open url: %s"), url)
r = urllib.request.urlopen(url, timeout=timeout)
if read:
raw_data = r.read()
return raw_data
else:
return r
except (urllib.error.HTTPError, http.client.HTTPException,
urllib.error.URLError, socket.timeout):
if num_tries == max_num_tries:
# This was the last allowed try
logger.warning(("Encountered HTTPError or HTTPException or "
"URLError or timeout %d times (the maximum allowed) when "
"opening url: %s"),
num_tries, url)
raise
else:
# Pause for a bit and retry
wait = initial_wait * 2**(num_tries - 1)
rand_wait = random.randint(*rand_wait_range)
total_wait = wait + rand_wait
logger.info(("Encountered HTTPError or HTTPException or "
"URLError or timeout when opening url; sleeping for %d "
"seconds, and then trying again"), total_wait)
time.sleep(total_wait)
except:
logger.warning(("Encountered unexpected error while opening "
"url: %s"), url)
raise | 5,329,677 |
def SE3ToROSPose(oMg):
"""Converts SE3 matrix to ROS geometry_msgs/Pose format"""
from geometry_msgs.msg import Pose, Point, Quaternion
xyz_quat = pin.SE3ToXYZQUATtuple(oMg)
return Pose(position=Point(*xyz_quat[:3]), orientation=Quaternion(*xyz_quat[3:])) | 5,329,678 |
def send_emails_tentative(recipients):
""" given list of email addresses, sends emails to each recipient indicating a match """
EMAIL_ADDRESS = "ypool.official@gmail.com"
EMAIL_PASSWORD = "rekcyzrrpcdgfmfl"
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
people = "%s\n%s" % ("\n".join(recipients[:-1]), str(recipients[-1]))
notif = f"Thank you for using YPool! You have successfully been matched. Please accept or decline this match at https://yalepool.com/ridestatus. You can find the contact information of your fellow riders below:\n{people}\n\nPlease note that this match is not finalized until all group members have confirmed. You will receive another email after all members have confirmed."
msg = EmailMessage()
msg["Subject"] = "[YPool] Ride Match Found - Please Confirm"
msg["From"] = EMAIL_ADDRESS
msg["To"] = recipients
msg.set_content(notif)
smtp.send_message(msg) | 5,329,679 |
def calib_graph_to_infer_graph(calibration_graph_def, is_dynamic_op=False):
"""Convert an existing calibration graph to inference graph.
Args:
calibration_graph_def: the calibration GraphDef object with calibration data
is_dynamic_op: whether to create dynamic static engines from calibration
Returns:
New GraphDef with TRTEngineOps placed in graph replacing calibration nodes.
Raises:
RuntimeError: if the returned status message is malformed.
"""
def py2string(inp):
return inp
def py3string(inp):
return inp.decode("utf-8")
if _six.PY2:
to_string = py2string
else:
to_string = py3string
is_calib_graph = False
for n in calibration_graph_def.node:
if n.op == "TRTEngineOp":
is_calib_graph = is_calib_graph or not n.attr["calibration_data"].s
if not is_calib_graph:
tf_logging.error(
"Not a calib graph. Doesn't seem to contain any calibration nodes.")
return None
graph_str = calibration_graph_def.SerializeToString()
out = calib_convert(graph_str, is_dynamic_op)
status = to_string(out[0])
output_graph_def_string = out[1]
del graph_str # Save some memory
if len(status) < 2:
raise _impl.UnknownError(None, None, status)
if status[:2] != "OK":
msg = status.split(";")
if len(msg) == 1:
raise RuntimeError("Status message is malformed {}".format(status))
# pylint: disable=protected-access
raise _impl._make_specific_exception(None, None, ";".join(msg[1:]),
int(msg[0]))
# pylint: enable=protected-access
output_graph_def = graph_pb2.GraphDef()
output_graph_def.ParseFromString(output_graph_def_string)
del output_graph_def_string # Save some memory
return output_graph_def | 5,329,680 |
def demo():
"""Run this implementation"""
f_1 = Fraction(2, 4)
f_2 = Fraction(60, 90)
f_3 = f_1 + f_2
print(f_3.num)
print(f_3.den) | 5,329,681 |
def test10Instruments():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
Ten instruments are used in this example
"""
# Get preprocessed training data
P = Preprocess()
P.loadData('preprocessed/instr_test_10.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Load weights for neural net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
net.loadWeights('weights/instr_train_10.txt') # Load weights from file, since net has been trained
# Test testing data
print('Testing 10 Instruments Recognition')
net.testBatch(X,Y) | 5,329,682 |
def calc_minimum_angular_variance_1d(var_r, phi_c, var_q):
"""Calculate minimum possible angular variance of a beam achievable with a correction lens.
Args:
var_r (scalar): real space variance.
phi_c (scalar): real-space curvature - see above.
var_q (scalar): angular variance of the beam.
Returns:
var_q_min (scalar): minimum possible angular variance of the beam.
"""
var_q_min = var_q - 4*phi_c**2/var_r
return var_q_min | 5,329,683 |
def grid_subsampling(points, features=None, labels=None, ins_labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features)
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param ins_labels: optional (N,) matrix of integer instance labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.subsample(points,
sampleDl=sampleDl,
verbose=verbose)
elif (labels is None):
return cpp_subsampling.subsample(points,
features=features,
sampleDl=sampleDl,
verbose=verbose)
elif (features is None):
return cpp_subsampling.subsample(points,
classes=labels,
ins_classes=ins_labels,
sampleDl=sampleDl,
verbose=verbose)
else:
return cpp_subsampling.subsample(points,
features=features,
classes=labels,
ins_classes=ins_labels,
sampleDl=sampleDl,
verbose=verbose) | 5,329,684 |
def test_histcontrol(xonsh_builtins):
"""Test HISTCONTROL=ignoredups,ignoreerr"""
FNAME = 'xonsh-SESSIONID.json'
FNAME += '.append'
hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = 'ignoredups,ignoreerr'
assert len(hist.buffer) == 0
# An error, buffer remains empty
hist.append({'inp': 'ls foo', 'rtn': 2})
assert len(hist.buffer) == 0
# Success
hist.append({'inp': 'ls foobazz', 'rtn': 0})
assert len(hist.buffer) == 1
assert 'ls foobazz' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Error
hist.append({'inp': 'ls foo', 'rtn': 2})
assert len(hist.buffer) == 1
assert 'ls foobazz' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# File now exists, success
hist.append({'inp': 'ls foo', 'rtn': 0})
assert len(hist.buffer) == 2
assert 'ls foo' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Success
hist.append({'inp': 'ls', 'rtn': 0})
assert len(hist.buffer) == 3
assert 'ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Dup
hist.append({'inp': 'ls', 'rtn': 0})
assert len(hist.buffer) == 3
# Success
hist.append({'inp': '/bin/ls', 'rtn': 0})
assert len(hist.buffer) == 4
assert '/bin/ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Error
hist.append({'inp': 'ls bazz', 'rtn': 1})
assert len(hist.buffer) == 4
assert '/bin/ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Error
hist.append({'inp': 'ls bazz', 'rtn': -1})
assert len(hist.buffer) == 4
assert '/bin/ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
os.remove(FNAME) | 5,329,685 |
def delete_all_collections_from_collection(collection, api_key=None):
"""
Delete *ALL* Collections from a Collection.
:param collection: The Collection to remove *all* Collections from.
:type collection: str
:param api_key: The API key to authorize request against.
:type api_key: str
:return
:rtype
"""
assertions.datatype_str('collection', collection)
url = '/collections/{}/collections'.format(collection)
return utils.request('DELETE', url, api_key=api_key, accept=True) | 5,329,686 |
def weights_init(net: nn.Module) -> None:
"""Takes as input a neural network net that will initialize all its weights.
:param torch.nn net: a neural network, which is Generator or Discriminator
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0) | 5,329,687 |
def generate_kbit_random_tensor(size, bitlength=None, **kwargs):
"""Helper function to generate a random k-bit number"""
if bitlength is None:
bitlength = torch.iinfo(torch.long).bits
if bitlength == 64:
return generate_random_ring_element(size, **kwargs)
rand_tensor = torch.randint(0, 2 ** bitlength, size, dtype=torch.long, **kwargs)
if rand_tensor.is_cuda:
return CUDALongTensor(rand_tensor)
return rand_tensor | 5,329,688 |
def AnimalsWithAttributes2(path: str) -> Dataset:
"""`Animals with attributes 2 <https://cvml.ist.ac.at/AwA2/>`_ dataset.
The file structure should be like::
<path>
classes.txt
predicates.txt
predicate-matrix-binary.txt
JPEGImages/
<classname>/
<imagename>.jpg
...
...
Arguments:
path: The root directory of the dataset.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
root_path = os.path.abspath(os.path.expanduser(path))
dataset = Dataset(DATASET_NAME)
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
segment = dataset.create_segment()
with open(os.path.join(root_path, "classes.txt"), encoding="utf-8") as fp:
class_names = [line[:-1].split("\t", 1)[-1] for line in fp]
with open(os.path.join(root_path, "predicates.txt"), encoding="utf-8") as fp:
attribute_keys = [line[:-1].split("\t", 1)[-1] for line in fp]
with open(os.path.join(root_path, "predicate-matrix-binary.txt"), encoding="utf-8") as fp:
attribute_values = [line[:-1].split(" ") for line in fp]
attribute_mapping = {}
for class_name, values in zip(class_names, attribute_values):
attribute_mapping[class_name] = Classification(
category=class_name,
attributes=dict(zip(attribute_keys, (bool(int(value)) for value in values))),
)
for class_name in sorted(os.listdir(os.path.join(root_path, "JPEGImages"))):
image_paths = glob(os.path.join(root_path, "JPEGImages", class_name, "*.jpg"))
label = attribute_mapping[class_name]
for image_path in image_paths:
data = Data(image_path)
data.label.classification = label
segment.append(data)
return dataset | 5,329,689 |
def create_from_matrix_market(out_file: str, sample_id: str, layer_paths: Dict[str, str], row_metadata_path: str, column_metadata_path: str, delim: str = "\t", skip_row_headers: bool = False, skip_colums_headers: bool = False, file_attrs: Dict[str, str] = None, matrix_transposed: bool = False) -> None:
"""
Create a .loom file from .mtx matrix market format
Args:
out_file: path to the newly created .loom file (will be overwritten if it exists)
sample_id: string to use as prefix for cell IDs
layer_paths: dict mapping layer names to paths to the corresponding matrix file (usually with .mtx extension)
row_metadata_path: path to the row (usually genes) metadata file
column_metadata_path: path to the column (usually cells) metadata file
delim: delimiter used for metadata (default: "\t")
skip_row_headers: if true, skip first line in rows metadata file
skip_column_headers: if true, skip first line in columns metadata file
file_attrs: dict of global file attributes, or None
matrix_transposed: if true, the main matrix is transposed
Remarks:
layer_paths should typically map the empty string to a matrix market file: {"": "path/to/filename.mtx"}.
To create a multilayer loom file, map multiple named layers {"": "path/to/layer1.mtx", "layer2": "path/to/layer2.mtx"}
Note: the created file MUST have a main layer named "". If no such layer is given, BUT all given layers are the same
datatype, then a main layer will be created as the sum of the other layers. For example, {"spliced": "spliced.mtx", "unspliced": "unspliced.mtx"}
will create three layers, "", "spliced", and "unspliced", where "" is the sum of the other two.
"""
layers: Dict[str, Union[np.ndarray, scipy.sparse.coo_matrix]] = {}
for name, path in layer_paths.items():
matrix = mmread(path)
if matrix_transposed:
matrix = matrix.T
layers[name] = matrix
if "" not in layers:
main_matrix = None
for name, matrix in layers.items():
if main_matrix is None:
main_matrix = matrix.copy()
else:
main_matrix = main_matrix + matrix
layers[""] = main_matrix
genelines = open(row_metadata_path, "r").readlines()
bclines = open(column_metadata_path, "r").readlines()
accession = np.array([x.split("\t")[0] for x in genelines]).astype("str")
if(len(genelines[0].split("\t")) > 1):
gene = np.array([x.split("\t")[1].strip() for x in genelines]).astype("str")
row_attrs = {"Accession": accession, "Gene": gene}
else:
row_attrs = {"Accession": accession}
cellids = np.array([sample_id + ":" + x.strip() for x in bclines]).astype("str")
col_attrs = {"CellID": cellids}
create(out_file, layers[""], row_attrs, col_attrs, file_attrs=file_attrs)
if len(layers) > 1:
with loompy.connect(out_file) as ds:
for name, layer in layers.items():
if name == "":
continue
ds[name] = layer | 5,329,690 |
def cache_by_sha(func):
""" only downloads fresh file, if we don't have one or we do and the sha has changed """
@wraps(func)
def cached_func(*args, **kwargs):
cache = {}
list_item = args[1]
dest_dir = kwargs.get('dest_dir')
path_to_file = list_item.get('path', '')
file_out = '{}{}'.format(dest_dir, path_to_file)
p_file_out = '{}{}.pickle'.format(dest_dir, path_to_file)
makedirs(dirname(file_out), exist_ok=True)
if exists(p_file_out) and exists(file_out):
with open(p_file_out, 'rb') as pf:
cache = pickle.load(pf)
cache_sha = cache.get('sha', False)
input_sha = list_item.get('sha', False)
if cache_sha and input_sha and cache_sha == input_sha:
# do nothing as we have the up to date file already
return None
else:
with open(p_file_out, mode='wb+') as pf:
pickle.dump(list_item, pf, pickle.HIGHEST_PROTOCOL)
return func(*args, **kwargs)
return cached_func | 5,329,691 |
def rewrite_by_assertion(tm):
"""
Rewrite the tm by assertions. Currently we only rewrite the absolute boolean variables.
"""
global atoms
pt = refl(tm)
# boolvars = [v for v in tm.get_vars()] + [v for v in tm.get_consts()]
return pt.on_rhs(*[top_conv(replace_conv(v)) for _, v in atoms.items()]).on_rhs(*[top_conv(replace_conv(v)) for _, v in atoms.items()]) | 5,329,692 |
def hash_cp_stat(fdpath, follow_symlinks=False, hash_function=hash):
""" Returns hash of file stat that can be used for shallow comparision
default python hash function is used which returns a integer. This
can be used to quickly compare files, for comparing directories
see hash_walk().
"""
stat = cp_stat(fdpath, follow_symlinks)
if stat:
return hash_function(json.dumps(stat, sort_keys=True).encode("utf-8")) | 5,329,693 |
def multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a Head for multi label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label from a discrete set.
This head by default uses sigmoid cross entropy loss, which expects as input
a multi-hot tensor of shape `(batch_size, num_classes)`.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of `Head` for multi label classification.
Raises:
ValueError: If n_classes is < 2
ValueError: If loss_fn does not have expected signature.
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if loss_fn:
_verify_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None) | 5,329,694 |
def render_macros(line, macros):
"""Given a line of non-preprocessed code, and a list of macros, process macro expansions until done.
NOTE: Ignore comments"""
if line.startswith(";"):
return line
else:
while [macro_name for macro_name in macros.keys() if macro_name in line]:
for macro_name, macro_info in macros.items():
macro_body, params = macro_info
if params and macro_name in line:
line = render_parameterised_macro(line, macro_name, macro_body, params)
else:
line = line.replace(macro_name, macro_body)
return line | 5,329,695 |
def init_LR_XR(args, feat_dim = 1, class_dim = 10, debug = False):
"""
To build lr_xr
:param args: intput arguments
:param feat_dim: dimension of feature
:param class_dim: dimension of class label
:param debug: debug option (True: ON)
:return: init lr_xr using tensorflow build
"""
tf.reset_default_graph()
model = LR_XR(args, featdim=feat_dim, classdim=class_dim)
return model | 5,329,696 |
def calc_qpos(x, bit = 16):
"""
引数の数値を表現できる最大のQ位置を返す。
:param x: float
:return: int
"""
for q in range(bit):
maxv = (2 ** (q - 1)) - 1
if x > maxv:
continue
return bit - q
return bit | 5,329,697 |
def yamlcheck(python):
"""Return True if PyYAML has libyaml support, False if it does not and None if it was not found."""
result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0])
if not result['yaml']:
return None
return result['cloader'] | 5,329,698 |
def test_how_many_measurements(coins, result):
"""Test that function how_many_measurements returns a defined result."""
from counterfeit import how_many_measurements
assert how_many_measurements(coins) == result | 5,329,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.