content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def categorical_onehot_binarizer(feature, feature_scale=None, prefix='columns', dtype='int8'):
"""Transform between iterable of iterables and a multilabel format, sample is simple categories.
Args:
feature: pd.Series, sample feature.
feature_scale: list, feature categories list.
prefix: String to append DataFrame column names.
dtype: default np.uint8. Data type for new columns. Only a single dtype is allowed.
Returns:
Dataframe for onehot binarizer.
"""
assert not any(feature.isnull()), "`feature' should be not contains NaN"
scale = feature.drop_duplicates().tolist()
if feature_scale is not None:
t = pd.get_dummies(feature.replace({i:'temp_str' for i in set.difference(set(scale), set(feature_scale))}), prefix=prefix, dtype=dtype)
if prefix+'_temp_str' in t.columns:
t = t.drop([prefix+'_temp_str'], axis=1)
for i in set.difference(set(feature_scale), set(scale)):
if prefix+'_'+str(i) not in t.columns:
t[prefix+'_'+str(i)] = 0
scale = feature_scale
t = t[[prefix+'_'+str(i) for i in feature_scale]]
else:
t = pd.get_dummies(feature, prefix=prefix, dtype=dtype)
t = t[[prefix+'_'+str(i) for i in scale]]
return t, scale
| 17,000
|
def mtp_file_list2():
"""
Returns the output of 'mtp-files' as a Python list.
Uses subprocess.
"""
cmd_str = "sudo mtp-files"
try: result = subprocess.check_output(shlex.split(cmd_str))
except subprocess.CalledProcessError as e:
log.error("Could not execute: %s" % str(e))
return False, None
the_files = parse_files(result)
return True, the_files
| 17,001
|
def test_get_unexisting_hotel_01():
""" Test getting an unexisting hotel. """
last_hotel = client.get("/hotels/last/").json()
hotel_id = last_hotel["hotel"]["id"] + 1
expect = {
'detail': "Hotel not found",
}
response = client.get(f"/hotels/{hotel_id}")
assert response.status_code == 404
assert response.json() == expect
assert response.json() is not None
| 17,002
|
def test_harman_political():
"""Test module harman_political.py by downloading
harman_political.csv and testing shape of
extracted data has 8 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = harman_political(test_path)
try:
assert x_train.shape == (8, 8)
except:
shutil.rmtree(test_path)
raise()
| 17,003
|
def add_selfloops(adj_matrix: sp.csr_matrix, fill_weight=1.0):
"""add selfloops for adjacency matrix.
>>>add_selfloops(adj, fill_weight=1.0) # return an adjacency matrix with selfloops
# return a list of adjacency matrices with selfloops
>>>add_selfloops(adj, adj, fill_weight=[1.0, 2.0])
Parameters
----------
adj_matrix: Scipy matrix or Numpy array or a list of them
Single or a list of Scipy sparse matrices or Numpy arrays.
fill_weight: float scalar, optional.
weight of self loops for the adjacency matrix.
Returns
-------
Single or a list of Scipy sparse matrix or Numpy matrices.
See also
----------
graphgallery.functional.AddSelfloops
"""
def _add_selfloops(adj, w):
adj = eliminate_selfloops(adj)
if w:
return adj + w * sp.eye(adj.shape[0], dtype=adj.dtype, format='csr')
else:
return adj
if gg.is_listlike(fill_weight):
return tuple(_add_selfloops(adj_matrix, w) for w in fill_weight)
else:
return _add_selfloops(adj_matrix, fill_weight)
| 17,004
|
def create_new_profile(ctx):
"""Create a configuration profile"""
create_profile(ctx.obj.config_dir)
config_files = load_config_profiles(ctx.obj.config_dir)
config = choose_profile(config_files)
es_client = setup_elasticsearch_client(config["okta_url"])
# Update the Dorothy class object with the values from the chosen configuration profile
ctx.obj.base_url = config["okta_url"]
ctx.obj.api_token = config["api_token"]
ctx.obj.profile_id = config["id"]
ctx.obj.es_client = es_client
pass
| 17,005
|
def handle(event, _ctxt):
""" Handle the Lambda Invocation """
response = {
'message': '',
'event': event
}
ssm = boto3.client('ssm')
vpc_ids = ssm.get_parameter(Name=f'{PARAM_BASE}/vpc_ids')['Parameter']['Value']
vpc_ids = vpc_ids.split(',')
args = {
'vpc_ids': vpc_ids
}
try:
sg_name = ssm.get_parameter(Name=f'{PARAM_BASE}/secgrp_name')['Parameter']['Value']
args['managed_sg_name'] = sg_name
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'ParameterNotFound':
pass
else:
print(ex)
return response
run(**args)
return response
| 17,006
|
def _get_indice_map(chisqr_set):
"""Find element with lowest chisqr at each voxel """
#make chisqr array of dims [x,y,z,0,rcvr,chisqr]
chisqr_arr = np.stack(chisqr_set,axis=5)
indice_arr = np.argmin(chisqr_arr,axis=5)
return indice_arr
| 17,007
|
def _train(params, fpath, hyperopt=False):
"""
:param params: hyperparameters. Its structure is consistent with how search space is defined. See below.
:param fpath: Path or URL for the training data used with the model.
:param hyperopt: Use hyperopt for hyperparameter search during training.
:return: dict with fields 'loss' (scalar loss) and 'status' (success/failure status of run)
"""
max_depth, max_features, n_estimators = params
max_depth, max_features, n_estimators = (int(max_depth), float(max_features), int(n_estimators))
# Log all of our training parameters for this run.
pyver = sys.version_info
mlparams = {
'cudf_version': str(cudf.__version__),
'cuml_version': str(cuml.__version__),
'max_depth': str(max_depth),
'max_features': str(max_features),
'n_estimators': str(n_estimators),
'python_version': f"{pyver[0]}.{pyver[1]}.{pyver[2]}.{pyver[3]}",
}
mlflow.log_params(mlparams)
X_train, X_test, y_train, y_test = load_data(fpath)
mod = RandomForestClassifier(
max_depth=max_depth, max_features=max_features, n_estimators=n_estimators
)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
acc = accuracy_score(y_test, preds)
mlflow.log_metric("accuracy", acc)
mlflow.sklearn.log_model(mod, "saved_models")
if not hyperopt:
return mod
return {"loss": acc, "status": STATUS_OK}
| 17,008
|
def noct_synthesis(spectrum, freqs, fmin, fmax, n=3, G=10, fr=1000):
"""Adapt input spectrum to nth-octave band spectrum
Convert the input spectrum to third-octave band spectrum
between "fc_min" and "fc_max".
Parameters
----------
spectrum : numpy.ndarray
amplitude rms of the one-sided spectrum of the signal, size (nperseg, nseg).
freqs : list
List of input frequency , size (nperseg) or (nperseg, nseg).
fmin : float
Min frequency band [Hz].
fmax : float
Max frequency band [Hz].
n : int
Number of bands pr octave.
G : int
System for specifying the exact geometric mean frequencies.
Can be base 2 or base 10.
fr : int
Reference frequency. Shall be set to 1 kHz for audible frequency
range, to 1 Hz for infrasonic range (f < 20 Hz) and to 1 MHz for
ultrasonic range (f > 31.5 kHz).
Outputs
-------
spec : numpy.ndarray
Third octave band spectrum of signal sig [dB re.2e-5 Pa], size (nbands, nseg).
fpref : numpy.ndarray
Corresponding preferred third octave band center frequencies, size (nbands).
"""
# Get filters center frequencies
fc_vec, fpref = _center_freq(fmin=fmin, fmax=fmax, n=n, G=G, fr=fr)
nband = len(fpref)
if len(spectrum.shape) > 1:
nseg = spectrum.shape[1]
spec = np.zeros((nband, nseg))
if len(freqs.shape) == 1:
freqs = np.tile(freqs, (nseg, 1)).T
else:
nseg = 1
spec = np.zeros((nband))
# Frequency resolution
# df = freqs[1:] - freqs[:-1]
# df = np.concatenate((df, [df[-1]]))
# Get upper and lower frequencies
fu = fc_vec * 2**(1/(2*n))
fl = fc_vec / 2**(1/(2*n))
for s in range(nseg):
for i in range(nband):
if len(spectrum.shape) > 1:
# index of the frequencies within the band
idx = np.where((freqs[:, s] >= fl[i]) & (freqs[:, s] < fu[i]))
spec[i, s] = np.sqrt(
np.sum(np.power(np.abs(spectrum[idx,s]), 2)))
else:
# index of the frequencies within the band
idx = np.where((freqs >= fl[i]) & (freqs < fu[i]))
spec[i] = np.sqrt(np.sum(np.abs(spectrum[idx])**2))
return spec, fpref
| 17,009
|
def get_model_prediction(model_input, stub, model_name='amazon_review', signature_name='serving_default'):
""" no error handling at all, just poc"""
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = signature_name
request.inputs['input_input'].CopyFrom(tf.make_tensor_proto(model_input))
response = stub.Predict.future(request, 5.0) # 5 seconds
return response.result().outputs["output"].float_val
| 17,010
|
def test_organization_4(base_settings):
"""No. 4 tests collection for Organization.
Test File: organization-example-mihealth.json
"""
filename = base_settings["unittest_data_dir"] / "organization-example-mihealth.json"
inst = organization.Organization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Organization" == inst.resource_type
impl_organization_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Organization" == data["resourceType"]
inst2 = organization.Organization(**data)
impl_organization_4(inst2)
| 17,011
|
def edit_image_data(item):
"""Edit image item data to file"""
try:
# Spyder 2
from spyderlib.widgets.objecteditor import oedit
except ImportError:
# Spyder 3
from spyder.widgets.variableexplorer.objecteditor import oedit
oedit(item.data)
| 17,012
|
def _make_feature_stats_proto(
common_stats, feature_name,
q_combiner,
num_values_histogram_buckets,
is_categorical, has_weights
):
"""Convert the partial common stats into a FeatureNameStatistics proto.
Args:
common_stats: The partial common stats associated with a feature.
feature_name: The name of the feature.
q_combiner: The quantiles combiner used to construct the quantiles
histogram for the number of values in the feature.
num_values_histogram_buckets: Number of buckets in the quantiles
histogram for the number of values per feature.
is_categorical: A boolean indicating whether the feature is categorical.
has_weights: A boolean indicating whether a weight feature is specified.
Returns:
A statistics_pb2.FeatureNameStatistics proto.
"""
common_stats_proto = statistics_pb2.CommonStatistics()
common_stats_proto.num_non_missing = common_stats.num_non_missing
common_stats_proto.num_missing = common_stats.num_missing
common_stats_proto.tot_num_values = common_stats.total_num_values
if common_stats.num_non_missing > 0:
common_stats_proto.min_num_values = common_stats.min_num_values
common_stats_proto.max_num_values = common_stats.max_num_values
common_stats_proto.avg_num_values = (
common_stats.total_num_values / common_stats.num_non_missing)
# Add num_values_histogram to the common stats proto.
num_values_quantiles = q_combiner.extract_output(
common_stats.num_values_summary)
histogram = quantiles_util.generate_quantiles_histogram(
num_values_quantiles, common_stats.min_num_values,
common_stats.max_num_values, common_stats.num_non_missing,
num_values_histogram_buckets)
common_stats_proto.num_values_histogram.CopyFrom(histogram)
# Add weighted common stats to the proto.
if has_weights:
weighted_common_stats_proto = statistics_pb2.WeightedCommonStatistics(
num_non_missing=common_stats.weighted_num_non_missing,
num_missing=common_stats.weighted_num_missing,
tot_num_values=common_stats.weighted_total_num_values)
if common_stats.weighted_num_non_missing > 0:
weighted_common_stats_proto.avg_num_values = (
common_stats.weighted_total_num_values /
common_stats.weighted_num_non_missing)
common_stats_proto.weighted_common_stats.CopyFrom(
weighted_common_stats_proto)
# Create a new FeatureNameStatistics proto.
result = statistics_pb2.FeatureNameStatistics()
result.name = feature_name
# Set the feature type.
# If we have a categorical feature, we preserve the type to be the original
# INT type. Currently we don't set the type if we cannot infer it, which
# happens when all the values are missing. We need to add an UNKNOWN type
# to the stats proto to handle this case.
if is_categorical:
result.type = statistics_pb2.FeatureNameStatistics.INT
elif common_stats.type is None:
# If a feature is completely missing, we assume the type to be STRING.
result.type = statistics_pb2.FeatureNameStatistics.STRING
else:
result.type = common_stats.type
# Copy the common stats into appropriate numeric/string stats.
# If the type is not set, we currently wrap the common stats
# within numeric stats.
if (result.type == statistics_pb2.FeatureNameStatistics.STRING or
is_categorical):
# Add the common stats into string stats.
string_stats_proto = statistics_pb2.StringStatistics()
string_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.string_stats.CopyFrom(string_stats_proto)
else:
# Add the common stats into numeric stats.
numeric_stats_proto = statistics_pb2.NumericStatistics()
numeric_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.num_stats.CopyFrom(numeric_stats_proto)
return result
| 17,013
|
def serial_chunked_download(
d_obj: Download,
end_action: Optional[Action] = None,
session: Optional[requests.Session] = None,
*,
progress_data: Optional[DownloadProgressSave] = None,
start: int = 0,
end: int = 0,
chunk_id: Optional[int] = 0,
) -> bool:
"""Downloads a file using a single connection getting a chunk at a time
"""
splits = None
if start == 0 and end == 0:
if progress_data is None:
# new download
d_obj.init_size()
d_obj.init_file([Chunk(0, d_obj.size - 1, -1)])
nb_split: int = 0
# TODO: ugly here
if d_obj.split_size != -1:
nb_split = int(d_obj.size / d_obj.split_size) + 1
else:
nb_split = d_obj.nb_split
splits = utils.split(d_obj.size - 1, nb_split)
else:
d_obj.init_file()
# TODO: ugly here
if d_obj.split_size != -1:
nb_split = int(d_obj.size / d_obj.split_size) + 1
else:
nb_split = d_obj.nb_split
splits = utils.split(d_obj.size - 1, nb_split,
progress_data.chunks[0].last)
else:
# coming from serial_parralel_chunked
if d_obj.split_size != -1:
nb_split = int(d_obj.size / d_obj.split_size) + 1
else:
nb_split = d_obj.nb_split
splits = utils.split(end, nb_split, start)
for split in splits:
get_chunk(d_obj.url, split, d_obj, chunk_id, session)
if d_obj.has_error or d_obj.is_stopped():
return False
if not d_obj.is_paused():
if end_action is not None:
end_action()
if end == 0 and start == 0:
d_obj.finish()
return True
| 17,014
|
def get_device():
"""
Returns the id of the current device.
"""
c_dev = c_int_t(0)
safe_call(backend.get().af_get_device(c_pointer(c_dev)))
return c_dev.value
| 17,015
|
def plotLatentsSweep(yhat,nmodels=1):
"""plotLatentsSweep(yhat):
plots model latents and a subset of the corresponding stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(yhat,x)
alternatively,
plotLatentsSweep(sweepCircleLatents(vae))
"""
# Initialization
if type(yhat) is tuple:
yhat = yhat[0]
# Start a-plottin'
fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col')
for latentdim in range(4):
if nmodels > 1:
for imodel in range(nmodels):
plt.sca(ax[imodel,latentdim])
plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy())
# ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio())
ax[imodel,latentdim].spines['top'].set_visible(False)
ax[imodel,latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[imodel,latentdim].spines['left'].set_visible(False)
# ax[imodel,latentdim].set_yticklabels([])
ax[imodel,latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[imodel,latentdim].spines['bottom'].set_visible(False)
ax[imodel,latentdim].set_xticklabels([])
ax[imodel,latentdim].tick_params(axis='x', length=0)
else:
imodel=0
plt.sca(ax[latentdim])
plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy())
ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio())
ax[latentdim].spines['top'].set_visible(False)
ax[latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[latentdim].spines['left'].set_visible(False)
ax[latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[latentdim].spines['bottom'].set_visible(False)
ax[latentdim].set_xticklabels([])
ax[latentdim].tick_params(axis='x', length=0)
return fig, ax
| 17,016
|
def calc_lipophilicity(seq, method="mean"):
""" Calculates the average hydrophobicity of a sequence according to the Hessa biological scale.
Hessa T, Kim H, Bihlmaier K, Lundin C, Boekel J, Andersson H, Nilsson I, White SH, von Heijne G. Nature. 2005 Jan 27;433(7024):377-81
The Hessa scale has been calculated empirically, using the glycosylation assay of TMD insertion.
Negative values indicate hydrophobic amino acids with favourable membrane insertion.
Other hydrophobicity scales are in the settings folder. They can be generated as follows.
hydrophob_scale_path = r"D:\korbinian\korbinian\settings\hydrophobicity_scales.xlsx"
df_hs = pd.read_excel(hydrophob_scale_path, skiprows=2)
df_hs.set_index("1aa", inplace=True)
dict_hs = df_hs.Hessa.to_dict()
hessa_scale = np.array([value for (key, value) in sorted(dict_hs.items())])
['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V',
'W', 'Y']
Parameters:
-----------
seq : string
Sequence to be analysed. Gaps (-) and unknown amino acids (x) should be ignored.
method : string
Method to be used to average the hydrophobicity values over the whole sequence.
The hydrophobicity score is positive for polar/charged aa, negative for hydrophobic aa.
"sum" will return the sum of the hydrophobicity scores over the sequence
"mean" will return the mean of the hydrophobicity scores over the sequence
Returns:
--------
mean hydrophobicity value for the sequence entered
Usage:
------
from korbinian.utils import calc_lipophilicity
# for a single sequence
s = "SAESVGEVYIKSTETGQYLAG"
calc_lipophilicity(s)
# for a series of sequences
TMD_ser = df2.TM01_SW_match_seq.dropna()
hydro = TMD_ser.apply(lambda x : calc_lipophilicity(x))
Notes:
------
%timeit results:
for a 20aa seq: 136 µs per loop
for a pandas series with 852 tmds: 118 ms per loop
"""
# hydrophobicity scale
hessa_scale = np.array([0.11, -0.13, 3.49, 2.68, -0.32, 0.74, 2.06, -0.6, 2.71,
-0.55, -0.1, 2.05, 2.23, 2.36, 2.58, 0.84, 0.52, -0.31,
0.3, 0.68])
# convert to biopython analysis object
analysed_seq = ProteinAnalysis(seq)
# biopython count_amino_acids returns a dictionary.
aa_counts_dict = analysed_seq.count_amino_acids()
# get the number of AA residues used to calculated the hydrophobicity
# this is not simply the sequence length, as the sequence could include gaps or non-natural AA
aa_counts_excluding_gaps = np.array(list(aa_counts_dict.values()))
number_of_residues = aa_counts_excluding_gaps.sum()
# if there are no residues, don't attempt to calculate a mean. Return np.nan.
if number_of_residues == 0:
return np.nan
# convert dictionary to array, sorted by aa
aa_counts_arr = np.array([value for (key, value) in sorted(aa_counts_dict.items())])
multiplied = aa_counts_arr * hessa_scale
sum_of_multiplied = multiplied.sum()
if method == "mean":
return sum_of_multiplied / number_of_residues
if method == "sum":
return sum_of_multiplied
| 17,017
|
def get_enrollments(username, include_inactive=False):
"""Retrieves all the courses a user is enrolled in.
Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled
in the the course.
Args:
username: The username of the user we want to retrieve course enrollment information for.
include_inactive (bool): Determines whether inactive enrollments will be included
Returns:
A list of enrollment information for the given user.
Examples:
>>> get_enrollments("Bob")
[
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
},
{
"created": "2014-10-25T20:18:00Z",
"mode": "verified",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/edX-Insider/2014T2",
"course_name": "edX Insider Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": True
}
}
]
"""
return _data_api().get_course_enrollments(username, include_inactive)
| 17,018
|
async def connections_accept_request(request: web.BaseRequest):
"""
Request handler for accepting a stored connection request.
Args:
request: aiohttp request object
Returns:
The resulting connection record details
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
connection_id = request.match_info["id"]
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
raise web.HTTPNotFound()
connection_mgr = ConnectionManager(context)
my_endpoint = request.query.get("my_endpoint") or None
request = await connection_mgr.create_response(connection, my_endpoint)
await outbound_handler(request, connection_id=connection.connection_id)
return web.json_response(connection.serialize())
| 17,019
|
def test_list_of_tasks():
"""
Test that a list of tasks can be set as a switch condition
"""
with Flow(name="test") as flow:
condition = Condition()
true_branch = [SuccessTask(), SuccessTask()]
false_branch = SuccessTask()
ifelse(condition, true_branch, false_branch)
with prefect.context(CONDITION=True):
state = flow.run()
for t in true_branch:
assert isinstance(state.result[t], Success)
assert isinstance(state.result[false_branch], Skipped)
with prefect.context(CONDITION=False):
state = flow.run()
for t in true_branch:
# the tasks in the list ran becuase they have no upstream dependencies.
assert isinstance(state.result[t], Success)
list_task = next(
t for t in flow.tasks if isinstance(t, prefect.tasks.core.collections.List)
)
# but the list itself skipped
assert isinstance(state.result[list_task], Skipped)
assert isinstance(state.result[false_branch], Success)
| 17,020
|
def nearest(x, base=1.):
"""
Round the inputs to the nearest base. Beware, due to the nature of
floating point arithmetic, this maybe not work as you expect.
INPUTS
x : input value of array
OPTIONS
base : number to which x should be rounded
"""
return np.round(x/base)*base
| 17,021
|
def sort_nesting(list1, list2):
"""Takes a list of start points and end points and sorts the second list according to nesting"""
temp_list = []
while list2 != temp_list:
temp_list = list2[:] # Make a copy of list2 instead of reference
for i in range(1, len(list1)):
if list2[i] > list2[i-1] and list1[i] < list2[i-1]:
list2[i-1], list2[i] = list2[i], list2[i-1]
return list2
| 17,022
|
def confusion_matrix(Y_hat, Y, norm=None):
"""
Calculate confusion matrix.
Parameters
----------
Y_hat : array-like
List of data labels.
Y : array-like
List of target truth labels.
norm : {'label', 'target', 'all', None}, default=None
Normalization on resulting matrix. Must be one of:
- 'label' : normalize on labels (columns).
- 'target' : normalize on targets (rows).
- 'all' : normalize on the entire matrix.
- None : No normalization.
Returns
-------
matrix : ndarray, shape=(target_classes, label_classes)
Confusion matrix with target classes as rows and
label classes as columns. Classes are in sorted order.
"""
target_classes = sorted(set(Y))
label_classes = sorted(set(Y_hat))
target_dict = {target_classes[k]: k for k in range(len(target_classes))}
label_dict = {label_classes[k]: k for k in range(len(label_classes))}
matrix = np.zeros((len(target_classes), len(label_classes)))
for label, target in zip(Y_hat, Y):
matrix[target_dict[target],label_dict[label]] += 1
if norm == 'label':
matrix /= np.max(matrix, axis=0).reshape((1,matrix.shape[1]))
elif norm == 'target':
matrix /= np.max(matrix, axis=1).reshape((matrix.shape[0],1))
elif norm == 'all':
matrix /= np.max(matrix)
elif norm is not None:
raise ValueError("Norm must be one of {'label', 'target', 'all', None}")
return matrix.astype(int)
| 17,023
|
def reroot(original_node: Tree, new_node: Tree):
"""
:param original_node: the node in the original tree
:param new_node: the new node to give children
new_node should have as chldren, the relations of original_node except for new_node's parent
"""
new_node.children = [
Tree(relation.label)
for relation in original_node.relations
if relation.label != new_node.parent_label
]
for relation in new_node.children:
reroot(
original_node.find(relation.label),
relation,
)
return new_node
| 17,024
|
def test_precursormz_match_tolerance2_array():
"""Test with array and tolerance=2."""
spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"precursor_mz": 100.0})
spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"precursor_mz": 101.0})
spectrum_a = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"precursor_mz": 99.0})
spectrum_b = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"precursor_mz": 98.0})
similarity_score = PrecursorMzMatch(tolerance=2.0)
scores = similarity_score.matrix([spectrum_1, spectrum_2],
[spectrum_a, spectrum_b])
assert numpy.all(scores == numpy.array([[True, True],
[True, False]])), "Expected different scores."
| 17,025
|
def thue_morse_sub(n):
""" generate Thue-Morse sequence using substitution system:
0 -> 01
1 -> 10
See http://mathworld.wolfram.com/Thue-MorseSequence.html"""
pass
| 17,026
|
def test_calculate_results(scoring, expected_results_columns):
"""Test the calculation of the experiment's results."""
MSCV.set_params(scoring=scoring)
scoring_cols = _define_binary_experiment_parameters(MSCV)[1]
results = _calculate_results(MSCV, DATASETS, scoring_cols)
assert len(results) == len(ParameterGrid(MSCV.param_grids)) * len(DATASETS)
assert expected_results_columns == results.columns.get_level_values(0).tolist()
| 17,027
|
def update_users():
"""Sync LDAP users with local users in the DB."""
log_uuid = str(uuid.uuid4())
start_time = time.time()
patron_cls = current_app_ils.patron_cls
patron_indexer = PatronBaseIndexer()
invenio_users_updated_count = 0
invenio_users_added_count = 0
# get all CERN users from LDAP
ldap_client = LdapClient()
ldap_users = ldap_client.get_primary_accounts()
_log_info(
log_uuid,
"users_fetched_from_ldap",
dict(users_fetched=len(ldap_users)),
)
if not ldap_users:
return 0, 0, 0
# create a map by employeeID for fast lookup
ldap_users_emails = set()
ldap_users_map = {}
for ldap_user in ldap_users:
if "mail" not in ldap_user:
_log_info(
log_uuid,
"missing_email",
dict(employee_id=ldap_user_get(ldap_user, "employeeID")),
is_error=True,
)
continue
email = ldap_user_get_email(ldap_user)
if email not in ldap_users_emails:
ldap_person_id = ldap_user_get(ldap_user, "employeeID")
ldap_users_map[ldap_person_id] = ldap_user
ldap_users_emails.add(email)
_log_info(
log_uuid,
"users_cached",
)
remote_accounts = RemoteAccount.query.all()
_log_info(
log_uuid,
"users_fetched_from_invenio",
dict(users_fetched=len(remote_accounts)),
)
# get all Invenio remote accounts and prepare a list with needed info
invenio_users = []
for remote_account in remote_accounts:
invenio_users.append(
dict(
remote_account_id=remote_account.id,
remote_account_person_id=remote_account.extra_data[
"person_id"
],
remote_account_department=remote_account.extra_data.get(
"department"
),
user_id=remote_account.user_id,
)
)
_log_info(
log_uuid,
"invenio_users_prepared",
)
# STEP 1
# iterate on all Invenio users first, to update outdated info from LDAP
# or delete users if not found in LDAP.
#
# Note: cannot iterate on the db query here, because when a user is
# deleted, db session will expire, causing a DetachedInstanceError when
# fetching the user on the next iteration
for invenio_user in invenio_users:
# use `dict.pop` to remove from `ldap_users_map` the users found
# in Invenio, so the remaining will be the ones to be added later on
ldap_user = ldap_users_map.pop(
invenio_user["remote_account_person_id"], None
)
if ldap_user:
# the imported LDAP user is already in the Invenio db
ldap_user_display_name = ldap_user_get(ldap_user, "displayName")
user_id = invenio_user["user_id"]
user_profile = UserProfile.query.filter_by(
user_id=user_id
).one()
invenio_full_name = user_profile.full_name
ldap_user_department = ldap_user_get(ldap_user, "department")
invenio_user_department = invenio_user["remote_account_department"]
user = User.query.filter_by(id=user_id).one()
ldap_user_email = ldap_user_get_email(ldap_user)
invenio_user_email = user.email
has_changed = (
ldap_user_display_name != invenio_full_name
or ldap_user_department != invenio_user_department
or ldap_user_email != invenio_user_email
)
if has_changed:
_update_invenio_user(
invenio_remote_account_id=invenio_user[
"remote_account_id"
],
invenio_user_profile=user_profile,
invenio_user=user,
ldap_user=ldap_user,
)
_log_info(
log_uuid,
"department_updated",
dict(
user_id=invenio_user["user_id"],
previous_department=invenio_user_department,
new_department=ldap_user_department,
),
)
# re-index modified patron
patron_indexer.index(patron_cls(invenio_user["user_id"]))
invenio_users_updated_count += 1
db.session.commit()
_log_info(
log_uuid,
"invenio_users_updated_and_deleted",
)
# STEP 2
# Import any new LDAP user not in Invenio yet, the remaining
new_ldap_users = ldap_users_map.values()
if new_ldap_users:
importer = LdapUserImporter()
for ldap_user in new_ldap_users:
user_id = importer.import_user(ldap_user)
email = ldap_user_get_email(ldap_user)
employee_id = ldap_user_get(ldap_user, "employeeID")
_log_info(
log_uuid,
"user_added",
dict(email=email, employee_id=employee_id),
)
# index newly added patron
patron_indexer.index(patron_cls(user_id))
invenio_users_added_count += 1
db.session.commit()
_log_info(
log_uuid,
"invenio_users_created",
)
total_time = time.time() - start_time
_log_info(log_uuid, "task_completed", dict(time=total_time))
return (
len(ldap_users),
invenio_users_updated_count,
invenio_users_added_count,
)
| 17,028
|
def GenerateDictionary(input_file, ngraph_size):
"""Generates the G2P (G2NG) dictionary."""
words = set()
with io.open(input_file, mode='rt', encoding='utf-8') as text:
for line in text:
for word in line.split():
words.add(word)
words = list(words)
words.sort()
if 'SENTNCE-END' in words:
words.remove('SENTENCE-END')
for word in words:
word.replace('_', '')
phoneme = SimpleG2P('_%s_' % word, ngraph_size)
stdout.write('%s\t%s\n' % (word.lower(), phoneme.lower()))
| 17,029
|
def gen_maven_artifact(
name,
artifact_name,
artifact_id,
artifact_target,
javadoc_srcs,
packaging = "jar",
artifact_target_libs = [],
is_extension = False):
"""Generates files required for a maven artifact.
Args:
name: The name associated with various output
artifact_name: The name of the generated artifcat in maven, e.g. "Google Guice Core Library".
artifact_id: The id of the generated artifact in maven, e.g. "guice".
artifact_target: The target containing the actual maven target.
artifact_target_libs: The list of dependencies that should be packaged together with artifact_target,
corresponding to the list of targets exported by artifact_target.
javadoc_srcs: Source files used to generate the Javadoc maven artifact.
packaging: The packaging used for the artifact, default is "jar".
is_extension: Whether the maven artifact is a Guice extension or not.
"""
_validate_target_libs_rule(
name = name + "_validate_target_libs",
target = artifact_target,
actual_target_libs = artifact_target_libs,
)
group_id = "com.google.inject"
if is_extension:
group_id = "com.google.inject.extensions"
# TODO: get artifact_target_libs from bazel and remove the need to pass this in explictly.
artifact_targets = [artifact_target] + artifact_target_libs
pom_file(
name = "pom",
targets = artifact_targets,
preferred_group_ids = [
"com.google.inject",
"com.google",
],
template_file = "//:pom-template.xml",
substitutions = {
"{artifact_name}": artifact_name,
"{artifact_id}": artifact_id,
"{artifact_group_id}": group_id,
"{packaging}": packaging,
},
)
if packaging == "jar":
jarjar_library(
name = artifact_id,
jars = artifact_targets,
)
jarjar_library(
name = artifact_id + "-src",
jars = [_src_jar(dep) for dep in artifact_targets],
)
if javadoc_srcs:
javadoc_library(
name = artifact_id + "-javadoc",
srcs = javadoc_srcs,
testonly = 1,
deps = artifact_targets,
)
| 17,030
|
def main():
"""
Main - entry point for service node
"""
log.info("Service node initializing")
app = create_app()
# run app using either socket or tcp
sn_socket = config.getCmdLineArg("sn_socket")
if sn_socket:
# use a unix domain socket path
# first, make sure the socket does not already exist
log.info(f"Using socket {sn_socket}")
try:
os.unlink(sn_socket)
except OSError:
if os.path.exists(sn_socket):
raise
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(sn_socket)
try:
run_app(app, sock=s, handle_signals=True)
except KeyboardInterrupt:
print("got keyboard interrupt")
except SystemExit:
print("got system exit")
except Exception as e:
print(f"got exception: {e}s")
# loop = asyncio.get_event_loop()
# loop.run_until_complete(release_http_client(app))
log.info("run_app done")
# close socket?
else:
# Use TCP connection
port = int(config.get("sn_port"))
log.info(f"run_app on port: {port}")
run_app(app, port=port)
log.info("Service node exiting")
| 17,031
|
def test_help(tool_cls):
""" Check that all command line tools have a --help option that explains the usage.
As per argparse default, this help text always starts with `usage:`.
"""
tool = tool_cls()
try:
tool.run("--help")
except SystemExit as e:
pass
if not hasattr(sys.stdout, "getvalue"):
raise Exception('stdout not captured in test.')
output = sys.stdout.getvalue().strip()
assert output.startswith('usage:')
| 17,032
|
def opt(dfs, col='new', a=1, b=3, rlprior=None, clprior=None):
"""Returns maximum likelihood estimates of the model parameters `r` and `c`.
The optimised parameters `r` and `c` refer to the failure count of the
model's negative binomial likelihood function and the variance factor
introduced by each predictive prior, respectively.
Args:
dfs: a data frame or list/tuple of data frames containing counts.
col: the column containing daily new infection counts.
a, b: parameters of the initial predictive beta prime prior.
rlprior, clprior: log density functions to be used as priors on `r` and
`c` (uniform by default).
"""
def f(r):
return _optc(dfs, r, col, a, b, rlprior, clprior, copy=False)[1]
if not isinstance(dfs, list) and not isinstance(dfs, tuple):
dfs = [dfs]
dfs = [df.copy() for df in dfs] # create copies once, before optimising.
# We double r until we pass a local minimum, and then optimize the two
# regions that might contain that minimum separately.
p, r = 1, 2
while f(p) > f(r):
p, r = r, 2*r
r1, l1 = _cvxsearch(f, p//2, p)
r2, l2 = _cvxsearch(f, p, r)
if l1 <= l2:
return r1, _optc(dfs, r1, col, a, b, rlprior, clprior, copy=False)[0]
else:
return r2, _optc(dfs, r2, col, a, b, rlprior, clprior, copy=False)[0]
| 17,033
|
def unpackage_datasets(dirname, dataobject_format=False):
"""
This function unpackages all sub packages, (i.e. train, valid, test)
You should use this function if you want everything
args:
dirname: directory path that has the train, valid, test folders in it
dataobject_format: used for dataobject format
"""
with open(join(dirname, 'room-data.json')) as f:
lm = json.load(f)['Landmarks']
res = {s: unpackage_dataset(join(dirname, s), dataobject_format) for s in ['train', 'valid', 'test']}
res['landmarks'] = lm
return res
| 17,034
|
def test_parse_entry_brackets1() -> None:
""" """
entry = "{house,building, skyscraper, booth, hovel, tower, grandstand}"
classes = parse_entry(entry)
gt_classes = [
"house",
"building",
"skyscraper",
"booth",
"hovel",
"tower",
"grandstand",
]
assert classes == gt_classes
| 17,035
|
def get_command_view(
is_running: bool = False,
stop_requested: bool = False,
commands_by_id: Sequence[Tuple[str, cmd.Command]] = (),
) -> CommandView:
"""Get a command view test subject."""
state = CommandState(
is_running=is_running,
stop_requested=stop_requested,
commands_by_id=OrderedDict(commands_by_id),
)
return CommandView(state=state)
| 17,036
|
def test_read_python_code(mock_bytes_to_intel_hex, mock_read_flash):
"""."""
python_code_hex = "\n".join(
[
":020000040003F7",
":10E000004D509600232041646420796F7572205032",
":10E010007974686F6E20636F646520686572652E21",
":10E0200020452E672E0A66726F6D206D6963726FD0",
":10E0300062697420696D706F7274202A0A7768694A",
":10E040006C6520547275653A0A202020206469733B",
":10E05000706C61792E7363726F6C6C282748656CE5",
":10E060006C6F2C20576F726C642127290A202020A6",
":10E0700020646973706C61792E73686F7728496DBD",
":10E080006167652E4845415254290A20202020739B",
":10E090006C656570283230303029000000000000C7",
":10E0A000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF80",
]
)
python_code = "\n".join(
[
"# Add your Python code here. E.g.",
"from microbit import *",
"while True:",
" display.scroll('Hello, World!')",
" display.show(Image.HEART)",
" sleep(2000)",
]
)
mock_bytes_to_intel_hex.return_value = python_code_hex
result = cmds.read_python_code()
assert result == python_code
| 17,037
|
def _split_by_size(in_fastq, split_size, out_dir):
"""Split FASTQ files by a specified number of records.
"""
existing = _find_current_split(in_fastq, out_dir)
if len(existing) > 0:
return existing
def new_handle(num):
base, ext = os.path.splitext(os.path.basename(in_fastq))
fname = os.path.join(out_dir, "{base}_{num}{ext}".format(
base=base, num=num, ext=ext))
return fname, open(fname, "w")
cur_index = 0
cur_count = 0
out_fname, out_handle = new_handle(cur_index)
out_files = [out_fname]
with open(in_fastq) as in_handle:
for name, seq, qual in FastqGeneralIterator(in_handle):
if cur_count < split_size:
cur_count += 1
else:
cur_count = 0
cur_index += 1
out_handle.close()
out_fname, out_handle = new_handle(cur_index)
out_files.append(out_fname)
out_handle.write("@%s\n%s\n+\n%s\n" % (name, seq, qual))
out_handle.close()
return out_files
| 17,038
|
def get_node_ip_addresses(ipkind):
"""
Gets a dictionary of required IP addresses for all nodes
Args:
ipkind: ExternalIP or InternalIP or Hostname
Returns:
dict: Internal or Exteranl IP addresses keyed off of node name
"""
ocp = OCP(kind=constants.NODE)
masternodes = ocp.get(selector=constants.MASTER_LABEL).get("items")
workernodes = ocp.get(selector=constants.WORKER_LABEL).get("items")
nodes = masternodes + workernodes
return {
node["metadata"]["name"]: each["address"]
for node in nodes
for each in node["status"]["addresses"]
if each["type"] == ipkind
}
| 17,039
|
def code_block(
code: str = None,
path: str = None,
language_id: str = None,
title: str = None,
caption: str = None
):
"""
Adds a block of syntax highlighted code to the display from either
the supplied code argument, or from the code file specified
by the path argument.
:param code:
A string containing the code to be added to the display
:param path:
A path to a file containing code to be added to the display
:param language_id:
The language identifier that indicates what language should
be used by the syntax highlighter. Valid values are any of the
languages supported by the Pygments highlighter.
:param title:
If specified, the code block will include a title bar with the
value of this argument
:param caption:
If specified, the code block will include a caption box below the code
that contains the value of this argument
"""
environ.abort_thread()
r = _get_report()
r.append_body(render.code_block(
block=code,
path=path,
language=language_id,
title=title,
caption=caption
))
r.stdout_interceptor.write_source('{}\n'.format(code))
| 17,040
|
def fill_cache(msg="Fetching cache"):
"""Fill the cache with the packages."""
import os # pylint: disable=import-outside-toplevel
import requests # pylint: disable=import-outside-toplevel
from rich.progress import Progress # pylint: disable=import-outside-toplevel
all_packages_url = f"{base_url}/simple/"
cache_path = os.path.join(os.path.dirname(__file__), "cache")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(os.path.dirname(__file__), "cache", "packages.txt")
with Progress(transient=True) as progress:
response = requests.get(all_packages_url, stream=True)
response_data = ""
content_length = response.headers.get("content-length")
if content_length is not None:
total_length = int(content_length)
task = progress.add_task(msg, total=total_length)
downloaded = 0
for data in response.iter_content(chunk_size=32768):
downloaded += len(data)
response_data += data.decode("utf-8")
progress.advance(task, 32768)
else:
response_data = response.content.decode("utf-8")
import re # pylint: disable=import-outside-toplevel
packages = re.findall(r"<a[^>]*>([^<]+)<\/a>", response_data)
with open(cache_file, "w", encoding="utf-8") as cache_file:
cache_file.write("\n".join(packages))
return packages
| 17,041
|
def house_filter(size, low, high):
"""
Function that returns the "gold standard" filter.
This window is designed to produce low sidelobes
for Fourier filters.
In essence it resembles a sigmoid function that
smoothly goes between zero and one, from short
to long time.
"""
filt = np.zeros(size)
def eval_filter(rf, c1, c2, c3, c4):
r1 = 1. - rf**2.
r2 = r1**2.
r3 = r2 * r1
filt = c1 + c2*r1 + c3*r2 + c4*r3
return filt
coefficients = {
"c1": 0.074,
"c2": 0.302,
"c3": 0.233,
"c4": 0.390
}
denom = (high - low + 1.0) / 2.
if denom < 0.:
raise ZeroDivisionError
for i in range(int(low), int(high)):
rf = (i + 1) / denom
if rf > 1.5:
filt[i] = 1.
else:
temp = eval_filter(rf, **coefficients)
if temp < 0.:
filt[i] = 1.
else:
filt[i] = 1. - temp
filt[int(high):] = 1.
return filt
| 17,042
|
def PToData(inGFA, data, err):
"""
Copy host array to data
Copys data from GPUFArray locked host array to data
* inFA = input Python GPUFArray
* data = FArray containing data array
* err = Obit error/message stack
"""
################################################################
return Obit.GPUFArrayToData (inFA.me, data.me, err.me)
| 17,043
|
def download_vendor_image(image):
""" Downloads specified vendor binary image
Args:
image (str): Path of image filename to begin downloading
Returns:
"""
# TODO Prevent sending hidden files
return send_from_directory(os.path.join(_AEON_TOPDIR, 'vendor_images'), image)
| 17,044
|
def test_atomic_string_length_4_nistxml_sv_iv_atomic_string_length_5_1(mode, save_output, output_format):
"""
Type atomic/string is restricted by facet length with value 1000.
"""
assert_bindings(
schema="nistData/atomic/string/Schema+Instance/NISTSchema-SV-IV-atomic-string-length-5.xsd",
instance="nistData/atomic/string/Schema+Instance/NISTXML-SV-IV-atomic-string-length-5-1.xml",
class_name="NistschemaSvIvAtomicStringLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 17,045
|
def maker(sql_connection, echo=False):
"""
Get an sessionmaker object from a sql_connection.
"""
engine = get_engine(sql_connection, echo=echo)
m = orm.sessionmaker(bind=engine, autocommit=True, expire_on_commit=False)
return m
| 17,046
|
def ntohs(*args,**kw):
"""ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order."""
pass
| 17,047
|
def test_auth(request):
"""Tests authentication worked successfuly."""
return Response({"message": "You successfuly authenticated!"})
| 17,048
|
def streak_condition_block() -> Block:
"""
Create block with 'streak' condition, when rotation probability is low and
target orientation repeats continuously in 1-8 trials.
:return: 'Streak' condition block.
"""
return Block(configuration.STREAK_CONDITION_NAME,
streak_rotations_generator)
| 17,049
|
def resolve_appinstance(request,
appinstanceid,
permission='base.change_resourcebase',
msg=_PERMISSION_MSG_GENERIC,
**kwargs):
"""
Resolve the document by the provided primary key
and check the optional permission.
"""
return resolve_object(
request,
AppInstance, {'pk': appinstanceid},
permission=permission,
permission_msg=msg,
**kwargs)
| 17,050
|
def test_star(genome, force):
"""Create star index."""
assert os.path.exists(genome.filename)
force = True if force == "overwrite" else False
if cmd_ok("STAR"):
p = StarPlugin()
p.after_genome_download(genome)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "star")
fname = os.path.join(index_dir, "SA")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
force_test(p, fname, genome, force)
| 17,051
|
def statementTVM(pReact):
"""Use this funciton to produce the TVM statemet"""
T,V,mass = pReact.T,pReact.volume,pReact.mass
statement="\n{}: T: {:0.2f} K, V: {:0.2f} m^3, mass: {:0.2f} kg".format(pReact.name,T,V,mass)
return statement
| 17,052
|
def fill_NaNs_with_nearest_neighbour(data, lons, lats):
"""At each depth level and time, fill in NaN values with nearest lateral
neighbour. If the entire depth level is NaN, fill with values from level
above. The last two dimensions of data are the lateral dimensions.
lons.shape and lats.shape = (data.shape[-2], data.shape[-1])
:arg data: the data to be filled
:type data: 4D numpy array
:arg lons: longitude points
:type lons: 2D numpy array
:arg lats: latitude points
:type lats: 2D numpy array
:returns: a 4D numpy array
"""
filled = data.copy()
for t in range(data.shape[0]):
for k in range(data.shape[1]):
subdata = data[t, k, :, :]
mask = np.isnan(subdata)
points = np.array([lons[~mask], lats[~mask]]).T
valid_data = subdata[~mask]
try:
filled[t, k, mask] = interpolate.griddata(
points, valid_data, (lons[mask], lats[mask]),
method='nearest'
)
except ValueError:
# if the whole depth level is NaN,
# set it equal to the level above
filled[t, k, :, :] = filled[t, k - 1, :, :]
return filled
| 17,053
|
def transform_type_postorder(type_signature, transform_fn):
"""Walks type tree of `type_signature` postorder, calling `transform_fn`.
Args:
type_signature: Instance of `computation_types.Type` to transform
recursively.
transform_fn: Transformation function to apply to each node in the type tree
of `type_signature`. Must be instance of Python function type.
Returns:
A possibly transformed version of `type_signature`, with each node in its
tree the result of applying `transform_fn` to the corresponding node in
`type_signature`.
Raises:
TypeError: If the types don't match the specification above.
"""
# TODO(b/134525440): Investigate unifying the recursive methods in type_utils,
# rather than proliferating them.
# TODO(b/134595038): Revisit the change here to add a mutated flag.
py_typecheck.check_type(type_signature, computation_types.Type)
py_typecheck.check_callable(transform_fn)
if isinstance(type_signature, computation_types.FederatedType):
transformed_member, member_mutated = transform_type_postorder(
type_signature.member, transform_fn)
if member_mutated:
type_signature = computation_types.FederatedType(transformed_member,
type_signature.placement,
type_signature.all_equal)
fed_type_signature, type_signature_mutated = transform_fn(type_signature)
return fed_type_signature, type_signature_mutated or member_mutated
elif isinstance(type_signature, computation_types.SequenceType):
transformed_element, element_mutated = transform_type_postorder(
type_signature.element, transform_fn)
if element_mutated:
type_signature = computation_types.SequenceType(transformed_element)
seq_type_signature, type_signature_mutated = transform_fn(type_signature)
return seq_type_signature, type_signature_mutated or element_mutated
elif isinstance(type_signature, computation_types.FunctionType):
transformed_param, param_mutated = transform_type_postorder(
type_signature.parameter, transform_fn)
transformed_result, result_mutated = transform_type_postorder(
type_signature.result, transform_fn)
if param_mutated or result_mutated:
type_signature = computation_types.FunctionType(transformed_param,
transformed_result)
fn_type_signature, fn_mutated = transform_fn(type_signature)
return fn_type_signature, fn_mutated or param_mutated or result_mutated
elif isinstance(type_signature, computation_types.NamedTupleType):
elems = []
elems_mutated = False
for element in anonymous_tuple.iter_elements(type_signature):
transformed_element, element_mutated = transform_type_postorder(
element[1], transform_fn)
elems_mutated = elems_mutated or element_mutated
elems.append((element[0], transformed_element))
if elems_mutated:
if isinstance(type_signature,
computation_types.NamedTupleTypeWithPyContainerType):
type_signature = computation_types.NamedTupleTypeWithPyContainerType(
elems,
computation_types.NamedTupleTypeWithPyContainerType
.get_container_type(type_signature))
else:
type_signature = computation_types.NamedTupleType(elems)
tuple_type_signature, tuple_mutated = transform_fn(type_signature)
return tuple_type_signature, elems_mutated or tuple_mutated
elif isinstance(type_signature,
(computation_types.AbstractType, computation_types.TensorType,
computation_types.PlacementType)):
return transform_fn(type_signature)
| 17,054
|
def get_job_output(job_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
view: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobResult]:
"""
Gets the state of the specified Cloud Dataflow job. To get the state of a job, we recommend using `projects.locations.jobs.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.get` is not recommended, as you can only get the state of jobs that are running in `us-central1`.
"""
...
| 17,055
|
def check_stack_feature(stack_feature, stack_version):
"""
Given a stack_feature and a specific stack_version, it validates that the feature is supported by the stack_version.
:param stack_feature: Feature name to check if it is supported by the stack. For example: "rolling_upgrade"
:param stack_version: Version of the stack
:return: Will return True if successful, otherwise, False.
"""
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.version import compare_versions
stack_features_config = default("/configurations/cluster-env/stack_features", None)
data = _DEFAULT_STACK_FEATURES
if not stack_version:
return False
if stack_features_config:
data = json.loads(stack_features_config)
for feature in data["stack_features"]:
if feature["name"] == stack_feature:
if "min_version" in feature:
min_version = feature["min_version"]
if compare_versions(stack_version, min_version, format = True) < 0:
return False
if "max_version" in feature:
max_version = feature["max_version"]
if compare_versions(stack_version, max_version, format = True) >= 0:
return False
return True
return False
| 17,056
|
def test_publish_does_not_exist(client, mocked_apis):
"""
Test behavior if a release is published and one of the summary rows
do not exist
"""
db = boto3.resource('dynamodb')
release_table = db.Table('release-summary')
study_table = db.Table('study-summary')
with patch('requests.get') as mock_request:
mock_request.side_effect = partial(mocked_apis, version='0.0.3')
r = release_summary.run('TA_00000000', 'RE_00000000')
assert release_table.item_count == 1
assert study_table.item_count == 1
# Now delete the summaries, as if it never existed
release_table.delete_item(Key={
'release_id': 'RE_00000000'
})
study_table.delete_item(Key={
'release_id': 'RE_00000000',
'study_id': 'SD_00000000'
})
assert release_table.item_count == 0
assert study_table.item_count == 0
# Publish the release
mock_request.side_effect = partial(mocked_apis, version='0.1.0')
r = release_summary.publish('RE_00000000')
# There should still be no summary rows
assert release_table.item_count == 0
assert study_table.item_count == 0
| 17,057
|
def get_random_action_weights():
"""Get random weights for each action.
e.g. [0.23, 0.57, 0.19, 0.92]"""
return np.random.random((1, NUM_ACTIONS))
| 17,058
|
def rpg_radar2nc(data, path, larda_git_path, **kwargs):
"""
This routine generates a daily NetCDF4 file for the RPG 94 GHz FMCW radar 'LIMRAD94'.
Args:
data (dict): dictionary of larda containers
path (string): path where the NetCDF file is stored
"""
dt_start = h.ts_to_dt(data['Ze']['ts'][0])
h.make_dir(path)
site_name = kwargs['site'] if 'site' in kwargs else 'no-site'
cn_version = kwargs['version'] if 'version' in kwargs else 'pyhon'
ds_name = f'{path}/{h.ts_to_dt(data["Ze"]["ts"][0]):%Y%m%d}-{site_name}-limrad94.nc'
ncvers = '4'
repo = git.Repo(larda_git_path)
sha = repo.head.object.hexsha
with netCDF4.Dataset(ds_name, 'w', format=f'NETCDF{ncvers}') as ds:
ds.Convention = 'CF-1.0'
ds.location = data['Ze']['paraminfo']['location']
ds.system = data['Ze']['paraminfo']['system']
ds.version = f'Variable names and dimensions prepared for Cloudnet {kwargs["version"]} version'
ds.title = 'LIMRAD94 (SLDR) Doppler Cloud Radar, calibrated Input for Cloudnet'
ds.institution = 'Leipzig Institute for Meteorology (LIM), Leipzig, Germany'
ds.source = '94 GHz Cloud Radar LIMRAD94\nRadar type: Frequency Modulated Continuous Wave,\nTransmitter power 1.5 W typical (solid state ' \
'amplifier)\nAntenna Type: Bi-static Cassegrain with 500 mm aperture\nBeam width: 0.48deg FWHM'
ds.reference = 'W Band Cloud Radar LIMRAD94\nDocumentation and User Manual provided by manufacturer RPG Radiometer Physics GmbH\n' \
'Information about system also available at https://www.radiometer-physics.de/'
ds.calibrations = f'remove Precip. ghost: {kwargs["ghost_echo_1"]}\n, remove curtain ghost: {kwargs["ghost_echo_2"]}\n' \
f'despeckle: {kwargs["despeckle"]}\n, number of standard deviations above noise: {kwargs["NF"]}\n'
ds.git_description = f'pyLARDA commit ID {sha}'
ds.description = 'Concatenated data files of LIMRAD 94GHz - FMCW Radar, used as input for Cloudnet processing, ' \
'filters applied: ghost-echo, despeckle, use only main peak'
ds.history = 'Created ' + time.ctime(time.time())
ds._FillValue = data['Ze']['paraminfo']['fill_value']
ds.day = dt_start.day
ds.month = dt_start.month
ds.year = dt_start.year
# ds.commit_id = subprocess.check_output(["git", "describe", "--always"]) .rstrip()
ds.history = 'Created ' + time.ctime(time.time()) + '\nfilters applied: ghost-echo, despeckle, main peak only'
Ze_str = 'Zh' if cn_version == 'python' else 'Ze'
vel_str = 'v' if cn_version == 'python' else 'vm'
width_str = 'width' if cn_version == 'python' else 'sigma'
dim_tupel = ('time', 'range') if cn_version == 'python' else ('range', 'time')
n_chirps = len(data['no_av'])
ds.createDimension('chirp', n_chirps)
ds.createDimension('time', data['Ze']['ts'].size)
ds.createDimension('range', data['Ze']['rg'].size)
if cn_version == 'matlab':
for ivar in ['Ze', 'VEL', 'sw', 'ldr', 'kurt', 'skew']:
data[ivar]['var'] = data[ivar]['var'].T
# coordinates
nc_add_variable(
ds,
val=94.0,
dimension=(),
var_name='frequency',
type=np.float32,
long_name='Radar frequency',
units='GHz'
)
nc_add_variable(
ds,
val=256,
dimension=(),
var_name='Numfft',
type=np.float32,
long_name='Number of points in FFT',
units=''
)
nc_add_variable(
ds,
val=np.mean(data['MaxVel']['var']),
dimension=(),
var_name='NyquistVelocity',
type=np.float32,
long_name='Mean (over all chirps) Unambiguous Doppler velocity (+/-)',
units='m s-1'
)
nc_add_variable(
ds,
val=data['Ze']['paraminfo']['altitude'],
dimension=(),
var_name='altitude',
type=np.float32,
long_name='Height of instrument above mean sea level',
units='m'
)
nc_add_variable(
ds,
val=data['Ze']['paraminfo']['coordinates'][0],
dimension=(),
var_name='latitude',
type=np.float32,
long_name='latitude',
units='degrees_north'
)
nc_add_variable(
ds,
val=data['Ze']['paraminfo']['coordinates'][1],
dimension=(),
var_name='longitude',
type=np.float32,
long_name='longitude',
units='degrees_east'
)
if 'version' in kwargs and cn_version == 'python':
nc_add_variable(
ds,
val=data['no_av'],
dimension=('chirp',),
var_name='NumSpectraAveraged',
type=np.float32,
long_name='Number of spectral averages',
units=''
)
# time and range variable
# convert to time since midnight
if cn_version == 'python':
ts = np.subtract(data['Ze']['ts'], datetime.datetime(dt_start.year, dt_start.month, dt_start.day, 0, 0, 0, tzinfo=timezone.utc).timestamp()) / 3600
ts_str = 'Decimal hours from midnight UTC to the middle of each day'
ts_unit = f'hours since {dt_start:%Y-%m-%d} 00:00:00 +00:00 (UTC)'
rg = data['Ze']['rg'] / 1000.0
elif cn_version == 'matlab':
ts = np.subtract(data['Ze']['ts'], datetime.datetime(2001, 1, 1, 0, 0, 0, tzinfo=timezone.utc).timestamp())
ts_str = 'Seconds since 1st January 2001 00:00 UTC'
ts_unit = 'sec'
rg = data['Ze']['rg']
else:
raise ValueError('Wrong version selected! version to "matlab" or "python"!')
nc_add_variable(ds, val=ts, dimension=('time',), var_name='time', type=np.float64, long_name=ts_str, units=ts_unit)
nc_add_variable(ds, val=rg, dimension=('range',), var_name='range', type=np.float32,
long_name='Range from antenna to the centre of each range gate', units='km')
nc_add_variable(ds, val=data['Azm']['var'], dimension=('time',), var_name='azimuth', type=np.float32,
long_name='Azimuth angle from north', units='degree')
nc_add_variable(ds, val=data['Elv']['var'], dimension=('time',), var_name='elevation', type=np.float32,
long_name='elevation angle. 90 degree is vertical direction.', units='degree')
# chirp dependent variables
nc_add_variable(ds, val=data['MaxVel']['var'][0], dimension=('chirp',),
var_name='DoppMax', type=np.float32, long_name='Unambiguous Doppler velocity (+/-)', units='m s-1')
# index plus (1 to n) for Matlab indexing
nc_add_variable(ds, val=data['rg_offsets'], dimension=('chirp',),
var_name='range_offsets', type=np.int32,
long_name='chirp sequences start index array in altitude layer array', units='-')
# 1D variables
nc_add_variable(ds, val=data['bt']['var'], dimension=('time',),
var_name='bt', type=np.float32, long_name='Direct detection brightness temperature', units='K')
nc_add_variable(ds, val=data['LWP']['var'], dimension=('time',),
var_name='lwp', type=np.float32, long_name='Liquid water path', units='g m-2')
nc_add_variable(ds, val=data['rr']['var'], dimension=('time',),
var_name='rain', type=np.float32, long_name='Rain rate from weather station', units='mm h-1')
nc_add_variable(ds, val=data['SurfRelHum']['var'], dimension=('time',),
var_name='SurfRelHum', type=np.float32, long_name='Relative humidity from weather station', units='%')
# 2D variables
nc_add_variable(ds, val=data['Ze']['var'], dimension=dim_tupel, var_name=Ze_str, type=np.float32,
long_name='Radar reflectivity factor', units='mm6 m-3', plot_range=data['Ze']['var_lims'], plot_scale='linear',
comment='Calibrated reflectivity. Calibration convention: in the absence of attenuation, '
'a cloud at 273 K containing one million 100-micron droplets per cubic metre will '
'have a reflectivity of 0 dBZ at all frequencies.')
nc_add_variable(ds, val=data['VEL']['var'], dimension=dim_tupel, plot_range=data['VEL']['var_lims'], plot_scale='linear',
var_name=vel_str, type=np.float32, long_name='Doppler velocity', units='m s-1', unit_html='m s<sup>-1</sup>',
comment='This parameter is the radial component of the velocity, with positive velocities are away from the radar.',
folding_velocity=data['MaxVel']['var'].max())
nc_add_variable(ds, val=data['sw']['var'], dimension=dim_tupel, plot_range=data['sw']['var_lims'], lot_scale='logarithmic',
var_name=width_str, type=np.float32, long_name='Spectral width', units='m s-1', unit_html='m s<sup>-1</sup>',
comment='This parameter is the standard deviation of the reflectivity-weighted velocities in the radar pulse volume.')
nc_add_variable(ds, val=data['ldr']['var'], dimension=dim_tupel, plot_range=[-30.0, 0.0],
var_name='ldr', type=np.float32, long_name='Linear depolarisation ratio', units='dB',
comment='This parameter is the ratio of cross-polar to co-polar reflectivity.')
nc_add_variable(ds, val=data['kurt']['var'], dimension=dim_tupel, plot_range=data['kurt']['var_lims'],
var_name='kurt', type=np.float32, long_name='Kurtosis', units='linear')
nc_add_variable(ds, val=data['skew']['var'], dimension=dim_tupel, plot_range=data['skew']['var_lims'],
var_name='Skew', type=np.float32, long_name='Skewness', units='linear')
print('save calibrated to :: ', ds_name)
return 0
| 17,059
|
def load_ssl_user_from_request(request):
"""
Loads SSL user from current request.
SSL_CLIENT_VERIFY and SSL_CLIENT_S_DN needs to be set in
request.environ. This is set by frontend httpd mod_ssl module.
"""
ssl_client_verify = request.environ.get('SSL_CLIENT_VERIFY')
if ssl_client_verify != 'SUCCESS':
raise Unauthorized('Cannot verify client: %s' % ssl_client_verify)
username = request.environ.get('SSL_CLIENT_S_DN')
if not username:
raise Unauthorized('Unable to get user information (DN) from client certificate')
user = User.find_user_by_name(username)
if not user:
user = User.create_user(username=username)
g.groups = []
g.user = user
return user
| 17,060
|
def redirect_stream_via_fd(stream, to=os.devnull):
""" Redirects given stream to another at file descriptor level. """
assert stream is not None
stream_fd = _fileno(stream)
# copy stream_fd before it is overwritten
# NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stream_fd), 'wb') as copied:
stream.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(_fileno(to), stream_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stream_fd) # $ exec > to
try:
yield stream # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
# NOTE: dup2 makes stdout_fd inheritable unconditionally
stream.flush()
os.dup2(copied.fileno(), stream_fd)
| 17,061
|
def test_to_wkt():
"""
Tilsvarende test_from_wkt()
"""
with pytest.raises(Exception):
geometry.to_wkt({"coordinates": [10.2, 56.1], "type": "Punkt"})
| 17,062
|
def get_all_users():
"""Gets all users"""
response = user_info.get_all_users()
return jsonify({'Users' : response}), 200
| 17,063
|
def open_mailbox_maildir(directory, create=False):
""" There is a mailbox here.
"""
return lazyMaildir(directory, create=create)
| 17,064
|
def TANH(*args) -> Function:
"""
Returns the hyperbolic tangent of any real number.
Learn more: https//support.google.com/docs/answer/3093755
"""
return Function("TANH", args)
| 17,065
|
def update_item_orders(begin_order, t_task, projects, api, cmd_count):
"""Update tasks' order that are greater than `begin_order`."""
for task in t_tasks.values():
if is_in_the_same_proj(task, projects) and task['item_order'] >= begin_order:
api.items.get_by_id(task['id']).update(item_order=task['item_order']+1)
update_cmd_count(api)
| 17,066
|
def value_frequencies_chart_from_blocking_rules(
blocking_rules: list, df: DataFrame, spark: SparkSession, top_n=20, bottom_n=10
):
"""Produce value frequency charts for the provided blocking rules
Args:
blocking_rules (list): A list of blocking rules as specified in a Splink
settings dictionary
df (DataFrame): Dataframe to profile
spark (SparkSession): SparkSession object
top_n (int, optional): Number of values with the highest frequencies to display. Defaults to 20.
bottom_n (int, optional): Number of values with the lowest frequencies to display. Defaults to 10.
Returns:
Chart: If Altair is installed, return a chart. If not, then it returns the
vega lite chart spec as a dictionary
"""
col_combinations = blocking_rules_to_column_combinations(blocking_rules)
return column_combination_value_frequencies_chart(
col_combinations, df, spark, top_n, bottom_n
)
| 17,067
|
def getOfflineStockDataManifest():
"""Returns manifest for the available offline data.
If manifest is not found, creates an empty one.
Returns:
A dict with the manifest. For example:
{'STOCK_1':
{'first_available_date': datetime(2016, 1, 1),
'last_available_date': datetime(2017, 2, 28)},
'STOCK_2':
{'first_available_date': datetime(2014, 2, 4),
'last_available_date': datetime(2016, 6, 15)}}
"""
if exists(offlineStockDataManifestPath):
with open(offlineStockDataManifestPath) as manifest_file:
return JSON.openJson(manifest_file)
else:
manifest = {}
updateOfflineStockDataManifest(manifest)
return manifest
| 17,068
|
def count_tags(request, t, t_old, tag_dict):
""" при сохранении документа пересчитать кол-во тегов в облаке t-скорее всего тег
вызывается в save_tags
1) получает документ где хранится облако тегов
2) идем по тегам и если есть
"""
tags = request.db.conf.find_one({"_id":'tags_'+tag_dict[4:]}) # получает документ где хранится облако тегов
tags = tags['tags'][cur_lang(request)] if tags and 'tags' in tags and cur_lang(request) in tags['tags'] else []
tags_d = dict(tags)
for res in t:
if not res in tags_d: tags_d[res] = 1 #если нету полученого в тегах документа то прописуем там
else: tags_d[res] += 1
for res in t_old:
if res in tags_d:
tags_d[res] -= 1
if tags_d[res] == 0: del tags_d[res]
tags = [ (res, tags_d[res]) for res in tags_d]
request.db.conf.save( {"_id":'tags_'+tag_dict[4:],"tags":{cur_lang(request):tags}} )
| 17,069
|
def test_dbt_build_select(profiles_file, dbt_project_file, model_files):
"""Test execution of DbtBuildOperator selecting all models."""
op = DbtBuildOperator(
task_id="dbt_task",
project_dir=dbt_project_file.parent,
profiles_dir=profiles_file.parent,
select=[str(m.stem) for m in model_files],
do_xcom_push=True,
)
execution_results = op.execute({})
build_result = execution_results["results"][0]
assert build_result["status"] == RunStatus.Success
| 17,070
|
def align_buf(buf: bytes, sample_width: bytes):
"""In case of buffer size not aligned to sample_width pad it with 0s"""
remainder = len(buf) % sample_width
if remainder != 0:
buf += b'\0' * (sample_width - remainder)
return buf
| 17,071
|
def update_alert():
""" Make Rest API call to security graph to update an alert """
if flask.request.method == 'POST':
flask.session.pop('UpdateAlertData', None)
result = flask.request.form
flask.session['VIEW_DATA'].clear()
alert_data = {_: result[_] for _ in result} # Iterate over html form POST from Graph.html
if alert_data.get('AlertId'): # Id form was not empty
alert_data['AlertId'] = alert_data.get('AlertId').strip(' ')
else:
flask.session['VIEW_DATA']['UpdateAlertError'] = "Please enter valid alert Id"
return flask.redirect(flask.url_for('homepage'))
alert_id = alert_data['AlertId']
old_alert = get_alert_by_id(alert_id) # store old alert before updating it
if not old_alert: # alert not found
flask.session['VIEW_DATA']['UpdateAlertError'] = "No alert matching this ID " + alert_id + " was found"
return flask.redirect(flask.url_for('homepage'))
else:
flask.session['VIEW_DATA']['OldAlert'] = old_alert
properties_to_update = {}
properties_to_update["assignedTo"] = flask.session['email']
if alert_data.get("SelectStatusToUpdate") != "Unknown":
properties_to_update["status"] = alert_data.get("SelectStatusToUpdate")
if alert_data.get("SelectFeedbackToUpdate") != "Unknown":
properties_to_update["feedback"] = alert_data.get("SelectFeedbackToUpdate")
if alert_data.get("Comments") != "":
comments = old_alert.get("comments")
new_comment = alert_data.get("Comments")
comments.append(new_comment)
properties_to_update["comments"] = comments
# include the required vendor information in the body of the PATCH
properties_to_update["vendorInformation"] = old_alert.get("vendorInformation")
# update the alert
update_security_alert(alert_id, properties_to_update)
# make another call to graph to get the updated alert
updated_alert = get_alert_by_id(alert_id)
# store the alert to be rendered in the table in Graph.html
flask.session['VIEW_DATA']['UpdateAlertResults'] = updated_alert
flask.session['VIEW_DATA']['UpdateQueryDetails'] = "REST query PATCH: '" \
+ config.SECURITYAPI_URL \
+ "alerts/" \
+ alert_id \
+ "'"
flask.session['VIEW_DATA']['UpdateQueryBody'] = "Request Body: " \
+ json.dumps(properties_to_update,
sort_keys=True,
indent=4,
separators=(',', ': '))
flask.session['UpdateAlertData'] = alert_data
return flask.redirect(flask.url_for('homepage'))
| 17,072
|
def simulate_bet(odds, stake):
"""
Simulate the bet taking place assuming the odds accurately represent the probability of the event
:param odds: numeric: the odds given for the event
:param stake: numeric: the amount of money being staked
:return: decimal: the returns from the bet
"""
probability = odds_to_prob(odds)
if np.random.rand() <= probability:
return stake * (1 + odds)
else:
return 0
| 17,073
|
def lrcn(num_classes, lrcn_time_steps, lstm_hidden_size=200, lstm_num_layers=2):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
class TimeDistributed(nn.Module):
def __init__(self, layer, time_steps):
super(TimeDistributed, self).__init__()
# self.layers = nn.ModuleList([layer for _ in range(time_steps)])
self.layers = nn.ModuleList([nn.Linear(10, 10) for _ in range(time_steps)])
def forward(self, x):
batch_size, time_steps, *_ = x.size()
# outputs = list()
for i, layer in enumerate(self.layers):
x = layer(x)
# output_t = layer(x[:, i])
# if i == 0:
# output = output_t.unsqueeze(1)
# else:
# output = torch.cat((output, output_t.unsqueeze(1)), 1)
# outputs.append(output_t)
# output = torch.stack(outputs, dim=1)
# return output
return x
class BiLSTMHidden2Dense(nn.Module):
def __init__(self):
super(BiLSTMHidden2Dense, self).__init__()
def forward(self, x):
lstm_output, (hn, cn) = x
lstm_last_hidden_state = hn[-2:].transpose(0, 1).contiguous().view(hn.size(1), -1)
return lstm_last_hidden_state
cnn_model = squeezenet1_1(pretrained=False, progress=True)
model = nn.Sequential(OrderedDict([
('timedistributed_cnn', TimeDistributed(nn.Conv2d(3, 60, (1, 1)), time_steps=lrcn_time_steps)),
# ('timedistributed_cnn', TimeDistributed(cnn_model, time_steps=lrcn_time_steps)),
# ('bidirectional_stacked_lstm', nn.LSTM(input_size=1000, hidden_size=lstm_hidden_size, num_layers=lstm_num_layers,
# batch_first=True, dropout=0.2, bidirectional=True)),
# ('hidden2dense', BiLSTMHidden2Dense()),
# ('dense', nn.Linear(in_features=2*lstm_hidden_size, out_features=lstm_hidden_size)),
# ('norm', nn.BatchNorm1d(num_features=lstm_hidden_size)),
# ('relu', nn.ReLU()),
# ('dropout', nn.Dropout(p=0.25)),
# ('last', nn.Linear(in_features=lstm_hidden_size, out_features=num_classes))
]))
return model
| 17,074
|
def index_document(connection, doc_id, content):
"""对document建立反向索引"""
words = tokenize(content)
pipe = connection.pipeline(True)
for word in words:
pipe.sadd('idx:' + word, doc_id)
return len(pipe.execute())
| 17,075
|
def get_incar_magmoms(incarpath,poscarpath):
"""
Read in the magnetic moments in the INCAR
Args:
incarpath (string): path to INCAR
poscarpath (string): path to POSCAR
Returns:
mof_mag_list (list of floats): magnetic moments
"""
mof_mag_list = []
init_mof = read(poscarpath)
with open(incarpath,'r') as incarfile:
for line in incarfile:
line = line.strip()
if 'MAGMOM' in line:
mag_line = line.split('= ')[1:][0].split(' ')
for val in mag_line:
mag = float(val.split('*')[1])
num = int(val.split('*')[0])
mof_mag_list.extend([mag]*num)
if not bool(mof_mag_list):
mof_mag_list = np.zeros(len(init_mof))
if len(mof_mag_list) != len(mof_mag_list):
raise ValueError('Error reading INCAR magnetic moments')
return mof_mag_list
| 17,076
|
def add_security_groups(t):
"""Given a template will add database and web server security groups.
These security groups will be able accessibly externally and from private subnets."""
t.add_resource(
SecurityGroup(
'LinuxServer',
GroupDescription='Enable SSH access via port 22',
SecurityGroupIngress=[
SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp=IP_HOME_LOCATION),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp="10.100.0.0/16"),
SecurityGroupRule(
IpProtocol='udp',
FromPort='123',
ToPort='123',
CidrIp=IP_HOME_LOCATION)],
VpcId=Ref('VPC'),
))
t.add_resource(
SecurityGroup(
'WebServer',
GroupDescription='Enable web SSH and ping',
SecurityGroupIngress=[
SecurityGroupRule(
IpProtocol='tcp',
FromPort='80',
ToPort='80',
CidrIp='0.0.0.0/0'),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='443',
ToPort='443',
CidrIp='0.0.0.0/0'),
SecurityGroupRule(
IpProtocol='icmp',
FromPort='-1',
ToPort='-1',
CidrIp='0.0.0.0/0')],
VpcId=Ref('VPC'),
))
# Create a list of all the Private subnet zones that need Web access to the NAT
private_zone_rules = []
for port in ('80', '443'):
private_zone_rules.append(SecurityGroupRule(
IpProtocol='tcp',
FromPort=port,
ToPort=port,
CidrIp=f"10.100.0.0/16"),
)
t.add_resource(
SecurityGroup(
'NATSG',
GroupDescription='For NAT instances',
SecurityGroupIngress=[
SecurityGroupRule(
IpProtocol='tcp',
FromPort='80',
ToPort='80',
CidrIp='0.0.0.0/0'),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='443',
ToPort='443',
CidrIp='0.0.0.0/0'),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp=IP_HOME_LOCATION),
] + private_zone_rules, # Port 80 rules for private subnets
VpcId=Ref('VPC'),
))
t.add_resource(
SecurityGroup(
'BastionSG',
GroupDescription='For Bastion instances only accessible from home location',
SecurityGroupIngress=[
SecurityGroupRule(
IpProtocol='tcp',
FromPort='80',
ToPort='80',
CidrIp=IP_HOME_LOCATION),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='80',
ToPort='80',
CidrIp='10.100.0.0/16'),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='443',
ToPort='443',
CidrIp=IP_HOME_LOCATION),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='443',
ToPort='443',
CidrIp='10.100.0.0/16'),
SecurityGroupRule(
IpProtocol='icmp',
FromPort='-1',
ToPort='-1',
CidrIp=IP_HOME_LOCATION),
SecurityGroupRule(
IpProtocol='icmp',
FromPort='-1',
ToPort='-1',
CidrIp='10.100.0.0/16'),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp='10.100.0.0/16'),
SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp=IP_HOME_LOCATION),
] + private_zone_rules, # Port 80 rules for private subnets
VpcId=Ref('VPC'),
))
t.add_resource(
SecurityGroup(
'RDSPostgres',
GroupDescription='Enable postgres access',
SecurityGroupIngress=[
SecurityGroupRule(
IpProtocol='tcp',
FromPort='5432',
ToPort='5432',
CidrIp='0.0.0.0/0'),
],
VpcId=Ref('VPC'),
))
| 17,077
|
def main(config_path=None):
""" The main entry point for the unix version of dogstatsd. """
# Deprecation notice
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
COMMANDS_START_DOGSTATSD = [
'start',
'stop',
'restart',
'status'
]
parser = optparse.OptionParser("%prog [start|stop|restart|status]")
parser.add_option('-u', '--use-local-forwarder', action='store_true',
dest="use_forwarder", default=False)
opts, args = parser.parse_args()
if not args or args[0] in COMMANDS_START_DOGSTATSD:
reporter, server, cnf = init(config_path, use_watchdog=True, use_forwarder=opts.use_forwarder, args=args)
daemon = Dogstatsd(PidFile(PID_NAME, PID_DIR).get_path(), server, reporter,
cnf.get('autorestart', False))
# If no args were passed in, run the server in the foreground.
if not args:
daemon.start(foreground=True)
return 0
# Otherwise, we're process the deamon command.
else:
command = args[0]
if command == 'start':
daemon.start()
elif command == 'stop':
daemon.stop()
elif command == 'restart':
daemon.restart()
elif command == 'status':
daemon.status()
elif command == 'info':
return Dogstatsd.info()
else:
sys.stderr.write("Unknown command: %s\n\n" % command)
parser.print_help()
return 1
return 0
| 17,078
|
def test_get_file_succeeds_with_valid_resource_path(client, request_headers):
"""
Tests that response is okay when valid resource path is requested.
Args:
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(
f'{base_url}/dist/bundle.min.js',
headers=request_headers,
follow_redirects=True
)
assert res.status_code == 200
assert b'function' in res.data
| 17,079
|
def remove_report_from_plist(plist_file_obj, skip_handler):
"""
Parse the original plist content provided by the analyzer
and return a new plist content where reports were removed
if they should be skipped. If the remove failed for some reason None
will be returned.
WARN !!!!
If the 'files' array in the plist is modified all of the
diagnostic section (control, event ...) nodes should be
re indexed to use the proper file array indexes!!!
"""
report_data = None
try:
report_data = parse_plist(plist_file_obj)
if not report_data:
return
except Exception as ex:
LOG.error("Plist parsing error")
LOG.error(ex)
return
file_ids_to_remove = []
try:
for i, f in enumerate(report_data['files']):
if skip_handler.should_skip(f):
file_ids_to_remove.append(i)
kept_diagnostics, kept_files = get_kept_report_data(report_data,
file_ids_to_remove)
report_data['diagnostics'] = kept_diagnostics
report_data['files'] = kept_files if kept_diagnostics else []
return plistlib.dumps(report_data)
except KeyError:
LOG.error("Failed to modify plist content, "
"keeping the original version")
return
| 17,080
|
def writeAllSynchronous(
tagPaths, # type: List[String]
values, # type: List[Any]
timeout=45000, # type: Optional[int]
):
# type: (...) -> None
"""Performs a synchronous write to multiple tags.
Synchronous means that execution will not continue until this
function has completed, so you will know that a write has been
attempted on the provided tags. The first write to fail or time out
will throw an exception, but any subsequent tags in the provided
list will still be attempted. This function cannot be called from
the event dispatch thread, meaning it cannot be called directly from
a GUI event like a button press without creating a new thread with a
call to system.util.invokeAsynchronous. You can call this from
project event scripts like timer scripts.
Note that the order of the tags listed in the tagPaths parameter
determines the order that the writes will occur.
Args:
tagPaths: The paths of the tags to write to.
values: The values to write.
timeout: How long to wait in milliseconds before timing out
pending writes. The default is 45000 milliseconds. Optional.
"""
print(tagPaths, values, timeout)
| 17,081
|
def assert_allclose(
actual: Tuple[float, numpy.float64], desired: Tuple[float, numpy.float64]
):
"""
usage.statsmodels: 2
"""
...
| 17,082
|
def _resize_event(event, params):
"""Handle resize event."""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size, set_env=False)
_layout_figure(params)
| 17,083
|
def propagate_memlets_sdfg(sdfg):
""" Propagates memlets throughout an entire given SDFG.
:note: This is an in-place operation on the SDFG.
"""
# Reset previous annotations first
reset_state_annotations(sdfg)
for state in sdfg.nodes():
propagate_memlets_state(sdfg, state)
propagate_states(sdfg)
| 17,084
|
def doctest_profile():
"""Test for profile.
>>> @profilehooks.profile
... def sample_fn(x, y, z):
... print("%s %s %s" % (x, y, z))
... return x + y * z
You can call that function normally
>>> r = sample_fn(1, 2, z=3)
1 2 3
>>> r
7
and do that more than once
>>> sample_fn(3, 2, 1)
3 2 1
5
When you exit, the profile is printed to stdout
>>> run_exitfuncs()
<BLANKLINE>
*** PROFILER RESULTS ***
sample_fn (<doctest test_profilehooks.doctest_profile[0]>:1)
function called 2 times
...
"""
| 17,085
|
def print_arguments(args):
"""none"""
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
| 17,086
|
def isTask(item): # pragma: no cover
"""Is the given item an OmniFocus task?"""
return item.isKindOfClass_(taskClass)
| 17,087
|
def add2grouppvalue_real1():
"""
add2grouppvalue_real1
description:
Uses the raw data from real_data_1.csv to compute p-values for 2 group comparisons on a bunch of pairs of
groups and using all 3 stats tests
Test fails if there are any errors or if the shape of any of the following stats entries are incorrect:
dset.stats['{stats_test}_..._raw'].shape = (773,)
returns:
(bool) -- test pass (True) or fail (False)
"""
dset = Dataset(os.path.join(os.path.dirname(__file__), 'real_data_1.csv'))
dset.assign_groups({
'Par': [0, 1, 2, 3],
'Dap2': [4, 5, 6, 7],
'Dal2': [8, 9, 10, 11],
'Van4': [12, 13, 14, 15],
'Van8': [16, 17, 18, 19]
})
# pairs of groups to compute log2fc on
pairs = [
['Par', 'Dap2'],
['Par', 'Dal2'],
['Par', 'Van4'],
['Par', 'Van8']
]
for pair in pairs:
for stats_test in ['students', 'welchs', 'mann-whitney']:
#print('testing {} with {}'.format(pair, stats_test))
add_2group_pvalue(dset, pair, stats_test)
stest_abbrev = {'students': 'studentsP', 'welchs': 'welchsP', 'mann-whitney': 'mannwhitP'}[stats_test]
if dset.stats['{}_{}_raw'.format(stest_abbrev, '-'.join(pair))].shape != (773,):
m = 'add2grouppvalue_real1: "{}_..._raw" should have shape (773,), has shape: {}'
raise RuntimeError(m.format(dset.stats['LOG2FC_{}_raw'.format(stest_abbrev, '-'.join(pair))].shape))
# diagnostic printing stuff
"""
print(dset)
for s, w, m in zip(dset.stats["studentsP_Par-Dap2_raw"] <= 0.05,
dset.stats["welchsP_Par-Dap2_raw"] <= 0.05,
dset.stats["mannwhitP_Par-Dap2_raw"] <= 0.05):
if s and w and m:
print(True)
elif not s and not w and not m:
print(False)
else:
print(s, w, m)
"""
return True
| 17,088
|
def cox_cc_loss(g_case: Tensor, g_control: Tensor, shrink : float = 0.,
clamp: Tuple[float, float] = (-3e+38, 80.)) -> Tensor:
"""Torch loss function for the Cox case-control models.
For only one control, see `cox_cc_loss_single_ctrl` instead.
Arguments:
g_case {torch.Tensor} -- Result of net(input_case)
g_control {torch.Tensor} -- Results of [net(input_ctrl1), net(input_ctrl2), ...]
Keyword Arguments:
shrink {float} -- Shrinkage that encourage the net got give g_case and g_control
closer to zero (a regularizer in a sense). (default: {0.})
clamp {tuple} -- See code (default: {(-3e+38, 80.)})
Returns:
[type] -- [description]
"""
control_sum = 0.
shrink_control = 0.
if g_case.shape != g_control[0].shape:
raise ValueError(f"Need `g_case` and `g_control[0]` to have same shape. Got {g_case.shape}"+
f" and {g_control[0].shape}")
for ctr in g_control:
shrink_control += ctr.abs().mean()
ctr = ctr - g_case
ctr = torch.clamp(ctr, *clamp) # Kills grads for very bad cases (should instead cap grads!!!).
control_sum += torch.exp(ctr)
loss = torch.log(1. + control_sum)
shrink_zero = shrink * (g_case.abs().mean() + shrink_control) / len(g_control)
return torch.mean(loss) + shrink_zero.abs()
| 17,089
|
def vector_field(mesh, v):
"""
Returns a np.array with values specified by `v`, where `v` should
be a iterable of length 3, or a function that returns an iterable of
length 3 when getting the coordinates of a cell of `mesh`.
"""
return field(mesh, v, dim=3)
| 17,090
|
def is_fav_recipe(request):
"""
Handles the requests from /ajax/is_fav_recipe/
Checks if a :model:`matega.recipe` is a saved recipe for a :model:'matega.user'
**Data**
Boolean if :model:`matega.recipe` is a saved recipe for :model:'matega.user'
"""
user_id = int(request.GET.get('user_id', None))
recipe_id = int(request.GET.get('recipe_id', None))
is_fav = False
user = User.objects.get(pk=user_id)
for rec in user.saved_recipes.values_list():
if rec[0] == recipe_id:
is_fav = True
data = {
'is_fav': is_fav
}
return JsonResponse(data)
| 17,091
|
def people_interp():
"""
<enumeratedValueSet variable="People"> <value value="500"/> </enumeratedValueSet>
Integer between 1 and 500
"""
return f'<enumeratedValueSet variable="People"> <value value="%s"/> </enumeratedValueSet>'
| 17,092
|
def check_lint(path, lint_name="md"):
"""lint命令及检测信息提取,同时删除中间文档xxx_lint.md或者xxx_lint.py"""
error_infos = []
lint_ext = "_lint.md" if lint_name == "md" else "_lint.py"
check_command = "mdl -s mdrules.rb" if lint_name == "md" else "pylint -j 4"
if lint_name == "md":
convert_to_markdown(path)
if lint_name == "py":
convert_to_py(path)
check_path = path.replace(".ipynb", lint_ext)
if lint_ext == "_lint.md":
math_f_info = check_mathematical_formula(check_path)
error_infos.extend(math_f_info)
cmd = "{} {}".format(check_command, check_path)
res = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8",)
info = res.stdout.read()
if info is not None:
info_list = info.split("\n")[:-1]
for i in info_list:
try:
location_, error_info = re.findall(":([0-9]+):(.*)", i)[0]
file_name = check_path.replace(lint_ext, ".ipynb")
error_infos.append((file_name, location_, error_info.strip()))
except IndexError:
pass
finally:
pass
os.remove(check_path)
return error_infos
| 17,093
|
def through_omas_s3(ods, method=['function', 'class_method'][1]):
"""
Test save and load S3
:param ods: ods
:return: ods
"""
filename = 'test.pkl'
if method == 'function':
save_omas_s3(ods, filename, user='omas_test')
ods1 = load_omas_s3(filename, user='omas_test')
else:
ods.save('s3', filename=filename, user='omas_test')
ods1 = ODS().load('s3', filename=filename, user='omas_test')
return ods1
| 17,094
|
def read_packages(filename):
"""Return a python list of tuples (repository, branch), given a file
containing one package (and branch) per line.
Comments are excluded
"""
lines = load_order_file(filename)
packages = []
for line in lines:
if "," in line: # user specified a branch
path, branch = [k.strip() for k in line.split(",", 1)]
packages.append((path, branch))
else:
packages.append((line, "master"))
return packages
| 17,095
|
def find_min(x0, capacities):
"""
(int list, int list) --> (int list, int)
Find the schedule that minimizes the passenger wait time with the given capacity distribution
Uses a mixture of Local beam search and Genetic Algorithm
Returns the min result
"""
scores_and_schedules = []
# Generate 199 neighbouring schedules using the input schedule x0
init_neighbours = find_neighbours(199, 10, x0)
min_score = all_trains(x0, capacities, passengers)
min_sched = x0
heapq.heappush(scores_and_schedules,(min_score, x0))
# Add them all to the list, as well as the input schedule
for i in init_neighbours:
score = all_trains(i, capacities, passengers)
heapq.heappush(scores_and_schedules,(score, i))
if score < min_score:
min_score, min_sched = score, i
local_min_counter = 0
# Perform the genetic algorithm for optimization
while local_min_counter < 500:
scores_and_schedules = best_n(scores_and_schedules, capacities, 5)
if scores_and_schedules[0][0] < min_score:
min_score, min_sched = scores_and_schedules[0]
local_min_counter = 0
else:
local_min_counter += 1
return min_sched, min_score
| 17,096
|
def test_spectrum_getters_return_copies():
"""Test if getters return (deep)copies so that edits won't change the original entries."""
spectrum = Spectrum(mz=numpy.array([100.0, 101.0], dtype="float"),
intensities=numpy.array([0.4, 0.5], dtype="float"),
metadata={"testdata": 1})
# Get entries and modify
testdata = spectrum.get("testdata")
testdata += 1
assert spectrum.get("testdata") == 1, "Expected different entry"
peaks_mz = spectrum.peaks.mz
peaks_mz += 100.0
assert numpy.all(spectrum.peaks.mz == numpy.array([100.0, 101.0])), "Expected different peaks.mz"
metadata = spectrum.metadata
metadata["added_info"] = "this"
assert spectrum.metadata == {'testdata': 1}, "Expected metadata to remain unchanged"
| 17,097
|
def delta_eta_with_gaussian(analysis: "correlations.Correlations") -> None:
""" Plot the subtracted delta eta near-side. """
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
for (attribute_name, width_obj), (correlation_attribute_name, correlation) in \
zip(analysis.widths_delta_eta, analysis.correlation_hists_delta_eta_subtracted):
# Setup
# Sanity check
if attribute_name != correlation_attribute_name:
raise ValueError(
"Issue extracting width and hist together."
f"Width obj name: {attribute_name}, hist obj name: {correlation_attribute_name}"
)
# Plot only the near side for now because the away-side doesn't have a gaussian shape
if attribute_name == "away_side":
continue
# Plot the data.
h = correlation.hist
ax.errorbar(
h.x, h.y, yerr = h.errors,
marker = "o", linestyle = "",
label = f"{correlation.type.display_str()}",
)
# Plot the fit
gauss = width_obj.fit_object(h.x, **width_obj.fit_result.values_at_minimum)
fit_plot = ax.plot(
h.x, gauss,
label = fr"Gaussian fit: $\mu = $ {width_obj.mean:.2f}, $\sigma = $ {width_obj.width:.2f}",
)
# Fill in the error band.
error = width_obj.fit_object.calculate_errors(x = h.x)
ax.fill_between(
h.x, gauss - error, gauss + error,
facecolor = fit_plot[0].get_color(), alpha = 0.5,
)
# Labels.
ax.set_xlabel(labels.make_valid_latex_string(correlation.axis.display_str()))
ax.set_ylabel(labels.make_valid_latex_string(labels.delta_eta_axis_label()))
jet_pt_label = labels.jet_pt_range_string(analysis.jet_pt)
track_pt_label = labels.track_pt_range_string(analysis.track_pt)
ax.set_title(fr"Subtracted 1D ${correlation.axis.display_str()}$,"
f" {analysis.reaction_plane_orientation.display_str()} event plane orient.,"
f" {jet_pt_label}, {track_pt_label}")
ax.legend(loc = "upper right")
# Final adjustments
fig.tight_layout()
# Save plot and cleanup
plot_base.save_plot(analysis.output_info, fig,
f"jetH_delta_eta_{analysis.identifier}_width_{attribute_name}_fit")
# Reset for the next iteration of the loop
ax.clear()
# Final cleanup.
plt.close(fig)
| 17,098
|
def mult_by_scalar_func(
raster_path, scalar, target_nodata, target_path):
"""Multiply raster by scalar."""
nodata = pygeoprocessing.get_raster_info(raster_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(raster_path, 1), (scalar, 'raw'), (nodata, 'raw'),
(target_nodata, 'raw')], _mult_by_scalar_op, target_path,
gdal.GDT_Float32, target_nodata)
| 17,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.