content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def _init_basemap(border_colour):
"""Initializes basemap.
:param border_colour: Colour (in any format accepted by matplotlib) of
political borders.
:return: narr_row_limits: length-2 numpy array of (min, max) NARR rows to
plot.
:return: narr_column_limits: length-2 numpy array of (min, max) NARR columns
to plot.
:return: axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:return: basemap_object: Instance of `mpl_toolkits.basemap.Basemap`.
"""
(narr_row_limits, narr_column_limits
) = nwp_plotting.latlng_limits_to_rowcol_limits(
min_latitude_deg=MIN_LATITUDE_DEG, max_latitude_deg=MAX_LATITUDE_DEG,
min_longitude_deg=MIN_LONGITUDE_DEG,
max_longitude_deg=MAX_LONGITUDE_DEG,
model_name=nwp_model_utils.NARR_MODEL_NAME)
_, axes_object, basemap_object = nwp_plotting.init_basemap(
model_name=nwp_model_utils.NARR_MODEL_NAME,
first_row_in_full_grid=narr_row_limits[0],
last_row_in_full_grid=narr_row_limits[1],
first_column_in_full_grid=narr_column_limits[0],
last_column_in_full_grid=narr_column_limits[1])
plotting_utils.plot_coastlines(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=border_colour)
plotting_utils.plot_countries(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=border_colour)
plotting_utils.plot_states_and_provinces(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=border_colour)
plotting_utils.plot_parallels(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lat_deg=-90., upper_right_lat_deg=90.,
parallel_spacing_deg=PARALLEL_SPACING_DEG)
plotting_utils.plot_meridians(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lng_deg=0., upper_right_lng_deg=360.,
meridian_spacing_deg=MERIDIAN_SPACING_DEG)
return narr_row_limits, narr_column_limits, axes_object, basemap_object | aeec84f7973972abd93bc344fc7f2028d216c4b5 | 25,800 |
def makeFigure():
"""Get a list of the axis objects and create a figure"""
ax, f = getSetup((9, 12), (5, 2))
cellTarget = "Treg"
epitopesDF = pd.DataFrame(columns={"Classifier", "Epitope"})
posCorrs1, negCorrs = CITE_RIDGE(ax[4], cellTarget)
for x in posCorrs1:
epitopesDF = epitopesDF.append(pd.DataFrame({"Classifier": 'CITE_RIDGE', "Epitope": [x]}))
possCorrs2 = distMetricScatt(ax[6:8], cellTarget, 10, weight=False)
for x in possCorrs2:
epitopesDF = epitopesDF.append(pd.DataFrame({"Classifier": 'distMetricF', "Epitope": [x]}))
possCorrs3 = distMetricScatt(ax[8:10], cellTarget, 10, weight=True)
for x in possCorrs3:
epitopesDF = epitopesDF.append(pd.DataFrame({"Classifier": 'distMetricT', "Epitope": [x]}))
print(epitopesDF)
#do for Cite_SVM
#put these three in data frame, get abundance and affinity data
#use minSelect function
#Feed into bispec binding model
#optimize using minSelect
return f | e0c4cf34630171e489f195d14a3d29f8d0fa10e1 | 25,801 |
def get_data_frame(binary_tables, all_inputs):
"""
Gets a data frame that needs QM reduction and further logic.
Also removes the all_inputs from the DataFrame.
:param binary_tables: contains a tables with True and False outputs.
:param all_inputs: columns
:return: Pandas DataFrame.
"""
columns = all_inputs + [KEYWORDS[OUTPUT]]
df = from_dict_to_data_frame(binary_tables, columns)
for an_input in all_inputs:
df = df.sort([an_input], ascending=[1])
#import time
#start = time.time()
best_df = get_dataframe_duplicates(df, an_input)
#print('get_dataframe_duplicates for {}: {}'.format(an_input, time.time() - start))
# only takes unique values.
variables = set(helper.get_variables(best_df, an_input))
#start = time.time()
df = add_empty_columns(df, variables)
#print('add_empty_column for {}: {}'.format(an_input, time.time() - start))
#start = time.time()
df = add_boolean_table(df, variables, an_input)
#print('add_boolean_table for {}: {}'.format(an_input, time.time() - start))
# before dropping all_inputs columns, will record their range.
input_ranges = {}
for the_input in all_inputs:
input_ranges[the_input] = [min(list(df[the_input])), max(list(df[the_input]))]
df.drop(all_inputs, inplace=True, axis=1)
df.drop_duplicates(keep='first', inplace=True)
return df, input_ranges | e40a914f19242ee82cd0c3e0f706f97f6c71e9fa | 25,802 |
def compute_shift_delay_samples(params_delays,vector_seconds_ref,freq_sample,seconds_frame,pair_st_so,data_type=0,\
front_time=None,cache_rates=[],cache_delays=[]):
"""
Compute number of samples to shift signal (always positive since reference station is closest to source).
Parameters
----------
params_delays
delay model ini file.
vector_seconds_ref
list of floats with seconds for delay information (start time polynomials).
freq_sample
sampling frequency [Hz].
seconds_frame
seconds corresponding to the frame to be processed.
station_id
corresponds to id number in stations ini file.
source_id
[default 0], see limitations.
pair_st_so
data_type
0 for real, 1 for complex.
front_time
frontier time, that is, time corresponding to the start of the integration period (takes priority over the seconds of the frame)
cache_rates
temporary information on delays to avoid reprocessing of the input files (see lib_ini_files.get_rates_delays()).
cache_delays
list with [seconds_fr_nearest,pair_st_so,delay] from previous computation.
Returns
-------
shift_int
number of sample components to offset (integer delay).
delay
total delay (=freq_sample*(shift_int+fractional_sample_delay)).
fractional_sample_delay
error_out
0 if sucess, -1 if error (e.g. accumulation period not found in ini file)
cache_rates
updated cache_rates (input).
Notes
-----
|
| **Limitations:**
|
| Currently assuming single source (source_id always zero
|
|
| **TO DO:**
|
| Simplify code, no need for params_delays nor find_nearest().
"""
#print("ft: "+str(front_time))
seconds_fr_nearest=get_seconds_fr_front(front_time,vector_seconds_ref,seconds_frame)
#seconds_fr_nearest=front_time
if front_time is None:
seconds_fr_nearest=find_nearest_seconds(vector_seconds_ref,seconds_frame)
if seconds_fr_nearest>=-1:
#rel_epoch=DELAY_MODEL_REL_MARKER+str(seconds_fr_nearest)
#found_delay=1
try:
#delay = float(get_param_serial(params_delays,pair_st_so,rel_epoch))
[delay,cache_delays] = get_delay_cache(seconds_fr_nearest,pair_st_so,params_delays,cache_delays)
except ValueError:
#found_delay=0
print("zM\tWarning: could not get delay for pair "+pair_st_so+", "+str(seconds_fr_nearest)+", skipping frame")
seconds_fr_nearest=-2
if seconds_fr_nearest>=-1:
[shift_int,fractional_sample_delay]=get_delay_shift_frac(delay,freq_sample,data_type)
error_out=0
else:
shift_int=-1
delay=-1
fractional_sample_delay=-1
error_out=1
return([shift_int,delay,fractional_sample_delay,error_out,cache_delays]) | 5cf35bd1b2187d107f8fd5809dd7db9822d5c110 | 25,803 |
from typing import Dict
from typing import Tuple
def classification_metrics_function(
logits: jnp.ndarray,
batch: base_model.Batch,
target_is_onehot: bool = False,
metrics: base_model.MetricNormalizerFnDict = _CLASSIFICATION_METRICS,
) -> Dict[str, Tuple[float, int]]:
"""Calculates metrics for the classification task.
Currently we assume each metric_fn has the API:
```metric_fn(logits, targets, weights)```
and returns an array of shape [batch_size]. We also assume that to compute
the aggregate metric, one should sum across all batches, then divide by the
total samples seen. In this way we currently only support metrics of the 1/N
sum f(inputs, targets). Note, the caller is responsible for dividing by
the normalizer when computing the mean of each metric.
Args:
logits: Output of model in shape [batch, length, num_classes].
batch: Batch of data that has 'label' and optionally 'batch_mask'.
target_is_onehot: If the target is a one-hot vector.
metrics: The classification metrics to evaluate. The key is the name of the
metric, and the value is the metrics function.
Returns:
A dict of metrics, in which keys are metrics name and values are tuples of
(metric, normalizer).
"""
if target_is_onehot:
one_hot_targets = batch['label']
else:
one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])
weights = batch.get('batch_mask') # batch_mask might not be defined
# This psum is required to correctly evaluate with multihost. Only host 0
# will report the metrics, so we must aggregate across all hosts. The psum
# will map an array of shape [n_global_devices, batch_size] -> [batch_size]
# by summing across the devices dimension. The outer sum then sums across the
# batch dim. The result is then we have summed across all samples in the
# sharded batch.
evaluated_metrics = {}
for key, val in metrics.items():
evaluated_metrics[key] = model_utils.psum_metric_normalizer(
(val[0](logits, one_hot_targets, weights),
val[1](logits, one_hot_targets, weights)))
return evaluated_metrics | defcde9e70822721a866a5af62c472386251b0e8 | 25,804 |
import _io
def create_validator_delegation_withdrawal(
params: DeployParameters,
amount: int,
public_key_of_delegator: PublicKey,
public_key_of_validator: PublicKey,
path_to_wasm: str
) -> Deploy:
"""Returns a standard withdraw delegation deploy.
:param params: Standard parameters used when creating a deploy.
:param amount: Amount in motes to be delegated.
:param public_key_of_delegator: Public key of delegator.
:param public_key_of_validator: Public key of validator.
:param path_to_wasm: Path to compiled delegate.wasm.
:returns: A standard delegation deploy.
"""
# @TODO: oh snap!
payment = create_standard_payment(
constants.STANDARD_PAYMENT_FOR_DELEGATION_WITHDRAWAL
)
session = ExecutableDeployItem_ModuleBytes(
module_bytes=_io.read_wasm(path_to_wasm),
args=[
create_deploy_argument(
"amount",
amount,
create_cl_type_of_simple(CLTypeKey.U512)
),
create_deploy_argument(
"delegator",
public_key_of_delegator,
create_cl_type_of_simple(CLTypeKey.PUBLIC_KEY)
),
create_deploy_argument(
"validator",
public_key_of_validator,
create_cl_type_of_simple(CLTypeKey.PUBLIC_KEY)
),
]
)
return create_deploy(params, payment, session) | d4395928cdc32da03775f0944b4d3d2ef802e534 | 25,805 |
def get_host_finding_vulnerabilities_hr(vulnerabilities):
"""
Prepare human readable json for "risksense-get-host-finding-detail" command.
Including vulnerabilities details.
:param vulnerabilities: vulnerabilities details from response.
:return: list of dict
"""
vulnerabilities_list = [{
'Name': vulnerability.get('cve', ''),
'V2/Score': vulnerability.get('baseScore', ''),
'Threat Count': vulnerability.get('threatCount', ''),
'Attack Vector': vulnerability.get('attackVector', ''),
'Access Complexity': vulnerability.get('accessComplexity', ''),
'Authentication': vulnerability.get('authentication', '')
} for vulnerability in vulnerabilities]
# To present human readable horizontally
if len(vulnerabilities) == 1:
vulnerabilities_list.append({})
return vulnerabilities_list | 8f0689441f2fef41bbd5da91c802dfb8baa2b979 | 25,806 |
from typing import Optional
from typing import List
import copy
def train_on_file_dataset(
train_dataset_path: str,
valid_dataset_path: Optional[str],
feature_ids: List[str],
label_id: str,
weight_id: Optional[str],
model_id: str,
learner: str,
task: Optional[TaskType] = Task.CLASSIFICATION,
generic_hparms: Optional[hyperparameter_pb2.GenericHyperParameters] = None,
ranking_group: Optional[str] = None,
uplift_treatment: Optional[str] = None,
training_config: Optional[abstract_learner_pb2.TrainingConfig] = None,
deployment_config: Optional[abstract_learner_pb2.DeploymentConfig] = None,
guide: Optional[data_spec_pb2.DataSpecificationGuide] = None,
model_dir: Optional[str] = None,
keep_model_in_resource: Optional[bool] = True,
working_cache_path: Optional[str] = None,
distribution_config: Optional[DistributionConfiguration] = None,
try_resume_training: Optional[bool] = False) -> tf.Operation:
"""Trains a model on dataset stored on file.
The input arguments and overall logic of this OP is similar to the ":train"
CLI or the "learner->Train()" method of Yggdrasil Decision Forests (in fact,
this OP simply calls "learner->Train()").
Similarly as the `train` method, the implementation the learning algorithm
should be added as a dependency to the binary. Similarly, the implementation
the dataset format should be added as a dependency to the
binary.
In the case of distributed training, `train_on_file_dataset` should only be
called by the `chief` process, and `deployment_config` should contain the
address of the workers.
Args:
train_dataset_path: Path to the training dataset.
valid_dataset_path: Path to the validation dataset.
feature_ids: Ids/names of the input features.
label_id: Id/name of the label feature.
weight_id: Id/name of the weight feature.
model_id: Id of the model.
learner: Key of the learner.
task: Task to solve.
generic_hparms: Hyper-parameter of the learner.
ranking_group: Id of the ranking group feature. Only for ranking.
uplift_treatment: Id of the uplift treatment group feature. Only for uplift.
training_config: Training configuration.
deployment_config: Deployment configuration (e.g. where to train the model).
guide: Dataset specification guide.
model_dir: If specified, export the trained model into this directory.
keep_model_in_resource: If true, keep the model as a training model
resource.
working_cache_path: Path to the working cache directory. If set, and if the
training is distributed, all the workers should have write access to this
cache.
distribution_config: Socket addresses of the workers for distributed
training.
try_resume_training: Try to resume the training from the
"working_cache_path" directory. The the "working_cache_path" does not
contains any checkpoint, start the training from the start.
Returns:
The OP that trigger the training.
"""
if generic_hparms is None:
generic_hparms = hyperparameter_pb2.GenericHyperParameters()
if training_config is None:
training_config = abstract_learner_pb2.TrainingConfig()
else:
training_config = copy.deepcopy(training_config)
if deployment_config is None:
deployment_config = abstract_learner_pb2.DeploymentConfig()
else:
deployment_config = copy.deepcopy(deployment_config)
if guide is None:
guide = data_spec_pb2.DataSpecificationGuide()
if ranking_group is not None:
training_config.ranking_group = ranking_group
if uplift_treatment is not None:
training_config.uplift_treatment = uplift_treatment
# Set the method argument into the proto configs.
if learner:
training_config.learner = learner
training_config.task = task
training_config.label = label_id
if weight_id is not None:
training_config.weight_definition.attribute = weight_id
training_config.weight_definition.numerical.SetInParent()
for feature_id in feature_ids:
training_config.features.append(normalize_inputs_regexp(feature_id))
if working_cache_path is not None:
deployment_config.cache_path = working_cache_path
if try_resume_training:
if working_cache_path is None:
raise ValueError("Cannot train a model with `try_resume_training=True` "
"without a working cache directory.")
deployment_config.try_resume_training = True
if distribution_config is not None:
deployment_config.try_resume_training = True
deployment_config.distribute.implementation_key = "TF_DIST"
if distribution_config.workers_addresses is not None:
dst_addresses = deployment_config.distribute.Extensions[
tf_distribution_pb2.tf_distribution].addresses
dst_addresses.addresses[:] = distribution_config.workers_addresses
else:
# Assume the worker paths are provided through the env.
deployment_config.distribute.Extensions[
tf_distribution_pb2.tf_distribution].environment_variable.SetInParent(
)
return training_op.SimpleMLModelTrainerOnFile(
train_dataset_path=train_dataset_path,
valid_dataset_path=valid_dataset_path if valid_dataset_path else "",
model_id=model_id if keep_model_in_resource else "",
model_dir=model_dir or "",
hparams=generic_hparms.SerializeToString(),
training_config=training_config.SerializeToString(),
deployment_config=deployment_config.SerializeToString(),
guide=guide.SerializeToString()) | 16e692adc9e72d06678680e13ef50636e6f17450 | 25,807 |
def db_fixture():
"""Get app context for tests
:return:
"""
return db | 4c780071e5a092870a676685aede295365de6ad9 | 25,808 |
def login_required(route_function):
"""
这个函数看起来非常绕
是实现装饰器的一般套路
"""
def f(request):
u = current_user(request)
if u is None:
log('非登录用户')
return redirect('/login')
else:
return route_function(request)
return f | 44cff97ad32257e4dc4cfe5d7e3b79ea643986ef | 25,809 |
def cuTypeConverter(cuType):
""" Converts calendar user types to OD type names """
return "recordType", CalendarDirectoryRecordMixin.fromCUType(cuType) | a4afcfe912fc1d853ee841ed215c099c352ace0c | 25,810 |
def add_viz_sphere(
sim: habitat_sim.Simulator, radius: float, pos: mn.Vector3
) -> habitat_sim.physics.ManagedRigidObject:
"""
Add a visualization-only sphere to the world at a global position.
Returns the new object.
"""
obj_attr_mgr = sim.get_object_template_manager()
sphere_template = obj_attr_mgr.get_template_by_handle(
obj_attr_mgr.get_template_handles("icosphereWireframe")[0]
)
sphere_template.scale = mn.Vector3(radius)
obj_attr_mgr.register_template(sphere_template, "viz_sphere")
new_object = sim.get_rigid_object_manager().add_object_by_template_handle(
"viz_sphere"
)
new_object.motion_type = habitat_sim.physics.MotionType.KINEMATIC
new_object.collidable = False
new_object.translation = pos
return new_object | cc8f47c8b32ad2f4bf7c0e159d19dac048546aea | 25,811 |
import torch
def get_normalize_layer(dataset: str) -> torch.nn.Module:
"""Return the dataset's normalization layer"""
if dataset == "imagenet":
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif dataset == "cifar10":
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV) | 52d5d0a744e0e10db1f54d83dfb9b7a8b779c49d | 25,812 |
def summer_69(arr):
"""
Return the sum of the numbers in the array,
except ignore sections of numbers starting
with a 6 and extending to the next 9 (every 6 will be followed by at least one 9).
Return 0 for no numbers.
:param arr: list of integers
:return: int
"""
get_result = 0
add = True
for num in arr:
while add:
if num != 6:
get_result += num
break
else:
add = False
while not add:
if num != 9:
break
else:
add = True
break
return get_result | d155a739afe131025b654002bebb51b25325bd1e | 25,813 |
def get_notebook_title(nb_json, default=None):
"""Determine a suitable title for the notebook.
This will return the text of the first header cell.
If that does not exist, it will return the default.
"""
cells = nb_json['cells']
for cell in cells:
if cell['cell_type'] == 'heading':
return cell['source']
return default | 4a20fe9890371ab107d0194e791c6faf9901d714 | 25,814 |
import os
import sys
def get_local_server_dir(subdir = None):
"""
Get the directory at the root of the venv.
:param subdir:
:return:
"""
figures_dir = os.path.abspath(os.path.join(sys.executable, '..', '..', '..'))
if subdir is not None:
figures_dir = os.path.join(figures_dir, subdir)
return figures_dir | 1e540157786cd0ea2ffa9bd1fabdb3d48bcb37f5 | 25,815 |
import torch
def extract_sequence(sent,
annotations,
sources,
label_indices):
"""
Convert the annotations of a spacy document into an array of observations of shape
(nb_sources, nb_bio_labels)
"""
sequence = torch.zeros([len(sent), len(sources), len(label_indices)], dtype=torch.float)
for i, source in enumerate(sources):
sequence[:, i, 0] = 1.0
assert source in annotations, logger.error(f"source name {source} is not included in the data")
for (start, end), vals in annotations[source].items():
for label, conf in vals:
if start >= len(sent):
logger.warning("Encountered incorrect annotation boundary")
continue
elif end > len(sent):
logger.warning("Encountered incorrect annotation boundary")
end = len(sent)
sequence[start:end, i, 0] = 0.0
sequence[start, i, label_indices["B-%s" % label]] = conf
if end - start > 1:
sequence[start + 1: end, i, label_indices["I-%s" % label]] = conf
return sequence | 1d988fe82b19d583438b8bf1ceb00671de566fca | 25,816 |
def is_valid_password_1(password):
"""
>>> is_valid_password_1("111111")
True
>>> is_valid_password_1("223450")
False
>>> is_valid_password_1("123789")
False
"""
has_double = any(password[c] == password[c+1] for c in range(len(password)-1))
is_ascending = all(password[c] <= password[c+1] for c in range(len(password)-1))
return has_double and is_ascending | 8544e15a7d50c025073a3ac51b9f5b8809341d2e | 25,817 |
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Parameters
----------
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If keepdims is False, the rank of the tensor is reduced
by 1 for each entry in axis. If keep_dims is True,
the reduced dimensions are retained with length 1.
Returns
-------
A tensor with the mean of elements of x.
"""
axis = _normalize_axis(axis, get_ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, tf.float32)
return tf.reduce_mean(x, axis=axis, keep_dims=keepdims) | 9f8d1b98a5f1dd37a91493fb822437885e04468e | 25,818 |
def frame_pass_valid_sample_criteria(frame, image_type):
"""Returns whether a frame matches type criteria"""
return frame_image_type_match(frame, image_type) | cf5b51dfe63e7667a14b41c9793a66aa065663e8 | 25,819 |
def embed(tokenizer, text):
"""
Embeds a text sequence using BERT tokenizer
:param text: text to be embedded
:return: embedded sequence (text -> tokens -> ids)
"""
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) | 453d411d9c460dfc28cb54c7a6a807290905bed3 | 25,820 |
def exponent_fmt(x, pos):
""" The two args are the value and tick position. """
return '{0:.0f}'.format(10 ** x) | 46e2104e966ec452fb510a411b1907090d55daf3 | 25,821 |
def _unpack(arr, extent, order='C'):
"""
This is a helper method that handles the initial unpacking of a data array.
ParaView and VTK use Fortran packing so this is convert data saved in
C packing to Fortran packing.
"""
n1,n2,n3 = extent[0],extent[1],extent[2]
if order == 'C':
arr = np.reshape(arr, (n1,n2,n3))
arr = np.swapaxes(arr,0,2)
extent = np.shape(arr)
elif order == 'F':
# effectively doing nothing
#arr = np.reshape(arr, (n3,n2,n1))
return arr.flatten(), extent
return arr.flatten(), extent | 2d7054da8ffc5773bfd151973bf3b06c84c2e735 | 25,822 |
import torch
def unzip(list):
"""unzip the tensor tuple list
Args:
list: contains tuple of segemented tensors
"""
T, loss = zip(*list)
T = torch.cat(T)
mean_loss = torch.cat(loss).mean()
return T, mean_loss | 5ed656aa8221c7bc5bd8a43b80fe0efd07d4df24 | 25,823 |
def ergs_to_lsun(luminosity):
"""
From luminostiy in erg/s to Lsun
"""
lum = u.Quantity(luminosity, u.erg / u.s)
return lum.to(u.L_sun) | fa7e572f5509b0408520e15433141e6da88daae1 | 25,824 |
def decode(code, P):
"""
Decode an RNS representation array into decimal number
:param P: list of moduli in order from bigger to smaller [pn, .., p2, p1, p0]
>>> decode(code=[5, 3, 1], P=[7,6,5])
201
"""
lcms = np.fromiter(accumulate(P[::-1], np.lcm), int)[::-1]
n = code[-1] % P[-1]
for i in range(1, len(P)):
bottom_p = lcms[-i]
per_diff = bottom_p % P[-i - 1] # rev
current_next = n % P[-i - 1]
wanted_next = code[-i - 1] % P[-i - 1]
if wanted_next < current_next:
wanted_next = wanted_next + P[-i - 1]
distance = wanted_next - current_next
distance = distance % P[-i - 1]
if distance > 0:
bottomp_scroll_count = solve(a=per_diff, m=P[-i - 1], k=distance, allow_zero=True)
n = n + bottomp_scroll_count * bottom_p
return n | 422128ef0d0da62b404b6e8c0b927221505ead17 | 25,825 |
def is_collection(obj):
"""
Check if a object is iterable.
:return: Result of check.
:rtype: bool
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) | 70fa0262ea7bf91a202aade2a1151d467001071e | 25,826 |
import hashlib
def file_md5(fpath):
"""Return the MD5 digest for the given file"""
with open(fpath,'rb') as f:
m = hashlib.md5()
while True:
s = f.read(4096)
if not s:
break
m.update(s)
return m.hexdigest() | 40b355b9a628d286bf86b5199fd7e2a8bea354de | 25,827 |
import os
def strip_path(full_path):
"""Returns the filename part of full_path with any directory path removed.
:meta private:
"""
return os.path.basename(full_path) | 327736cb77d9aa409a5790efd51895318d970382 | 25,828 |
import os
def update():
"""
Updates the Database
Returns
-------
None.
"""
os.system("git submodule update --recursive --remote")
return None
# %% Load USA data | 3c9b7817c6512fd7fe018d3642fe6e9106d85b7c | 25,829 |
def move(column, player):
"""Apply player move to the given column"""
index = _index_of(column, None)
if index < 0:
print('Entire column is occupied')
return False
column[index] = player
return True | 9c728c4c764154390478e27408f5bc25acaacf1d | 25,830 |
def calculate_costs(points, centric_point):
""" Returns the accumulated costs of all point in `points` from the centric_point """
if len(points) == 1:
return points[0].hyp()
_part = (points - centric_point)**2
_fin = []
for point in _part:
_fin.append(point.hyp())
return (np.array(_fin)).sum() | c35e00dabb3e85d5136afc3f5696a73aad607470 | 25,831 |
def formatUs(time):
"""Format human readable time (input in us)."""
if time < 1000:
return f"{time:.2f} us"
time = time / 1000
if time < 1000:
return f"{time:.2f} ms"
time = time / 1000
return f"{time:.2f} s" | 7546db60e3977e07dbbbad0a3ab767865840c2e3 | 25,832 |
from typing import Dict
from typing import List
from typing import Union
def parse_annotations(ann_filepath: str) -> Dict[int, List[Label]]:
"""Parse annotation file into List of Scalabel Label type per frame."""
outputs = defaultdict(list)
for line in load_file_as_list(ann_filepath):
gt = line.strip().split(",")
class_id = gt[7]
if class_id not in NAME_MAPPING:
continue
class_name = NAME_MAPPING[class_id]
if class_name in IGNORE:
continue
frame_id, ins_id = (int(x) for x in gt[:2])
box2d = bbox_to_box2d([float(x) for x in gt[2:6]])
attrs: Dict[str, Union[bool, float, str]] = dict(
visibility=float(gt[8])
)
ann = Label(
category=class_name,
id=ins_id,
box2d=box2d,
attributes=attrs,
)
outputs[frame_id].append(ann)
return outputs | 1ef42147fa4cb44b1ebd37f861444e502d0ea9b9 | 25,833 |
import os
def bel_graph_loader(from_dir: str) -> BELGraph:
"""Obtains a combined BELGraph from all the BEL documents in one folder.
:param from_dir: The folder with the BEL documents.
:return: A corresponding BEL Graph.
"""
logger.info("Loading BEL Graph.")
files = [
os.path.join(from_dir, file)
for file
in os.path.listdir(from_dir)
if os.path.isfile(os.path.join(from_dir, file))
]
bel_files = [file for file in files if file[-4:].lower() == '.bel']
bel_graphs = [os.path.from_path(file) for file in bel_files]
return union(bel_graphs) | f7369c3a3abb9ab1d0d43411ca92e3edc601b8e4 | 25,834 |
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from typing import Any
def apply(lang1: Dict[List[str], float], lang2: Dict[List[str], float], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> float:
"""
Calculates the EMD distance between the two stochastic languages
Parameters
-------------
lang1
First language
lang2
Second language
parameters
Parameters of the algorithm, including:
- Parameters.STRING_DISTANCE: function that accepts two strings and returns a distance
Returns
---------------
emd_dist
EMD distance
"""
if parameters is None:
parameters = {}
distance_function = exec_utils.get_param_value(Parameters.STRING_DISTANCE, parameters, normalized_levensthein)
enc1, enc2 = encode_two_languages(lang1, lang2, parameters=parameters)
# transform everything into a numpy array
first_histogram = np.array([x[1] for x in enc1])
second_histogram = np.array([x[1] for x in enc2])
# including a distance matrix that includes the distance between
# the traces
distance_matrix = []
for x in enc1:
distance_matrix.append([])
for y in enc2:
# calculates the (normalized) distance between the strings
dist = distance_function(x[0], y[0])
distance_matrix[-1].append(float(dist))
distance_matrix = np.array(distance_matrix)
ret = emd(first_histogram, second_histogram, distance_matrix)
return ret | c87d171018a6eddef6572a0bd3639952499fca44 | 25,835 |
import collections
def reInpainting(image, ground_truth, teethColor):
"""
if pixel has pink color (marked for teeth) and not in range of teeth => fill by teethColor
"""
isTeeth, isNotTeeth = 0, 0
threshold = calculateThreshhold(image, teethColor)
# print(f"Threshold: {threshold}")
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
pixel = image[i][j]
pink = [255, 0, 255]
if collections.Counter(pixel) == collections.Counter(pink):
if isTeethColor(ground_truth[i][j], teethColor, threshold):
isTeeth = isTeeth + 1
else:
# 229,224,212 _________ 200,160,75
ground_truth[i][j] = [teethColor[2], teethColor[1], teethColor[0]]
isNotTeeth = isNotTeeth + 1
# print(f"isTeeth: {isTeeth}, isNotTeeth: {isNotTeeth}")
return ground_truth | c5f8a71c9c1bbf6e3b4c03b477901b9669d9f72c | 25,836 |
def data_for_cylinder_along_z(center_x, center_y, radius, height_z):
"""
Method for creating grid for cylinder drawing. Cylinder will be created along Z axis
:param center_x: Euclidean 3 dimensional center of drawing on X axis
:param center_y: Euclidean 3 dimensional center of drawing on Y axis
:param radius: cylinder radius
:param height_z: cylinder height
:return: Three lists with grid coordinates for z, y, x sequentially
"""
z = np.linspace(0, height_z, 50)
theta = np.linspace(0, 2 * np.pi, 50)
theta_grid, z_grid = np.meshgrid(theta, z)
x_grid = radius * np.cos(theta_grid) + center_x
y_grid = radius * np.sin(theta_grid) + center_y
return z_grid, y_grid, x_grid | 2582860582564e7b8a4e9ba6e89d0740d44fa069 | 25,837 |
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / (FLAGS.batch_size * FLAGS.num_clones) *
FLAGS.num_epochs_per_decay)
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate') | c1395b7521b6a55e8a77c50b47dca920f8c27dc0 | 25,838 |
def cpm(adata: ad.AnnData) -> ad.AnnData:
"""Normalize data to counts per million."""
_cpm(adata)
return adata | ec0a2a0ed61965e8c78ebf59fab569f2a4954790 | 25,839 |
def argon2_key(encryption_password, salt):
"""
Generates an encryption key from a password using the Argon2id KDF.
"""
return argon2.low_level.hash_secret_raw(encryption_password.encode('utf-8'), salt,
time_cost=RFC_9106_LOW_MEMORY.time_cost, memory_cost=RFC_9106_LOW_MEMORY.memory_cost,
parallelism=RFC_9106_LOW_MEMORY.parallelism, hash_len=32, type=argon2.low_level.Type.ID) | eaf5a0f3ca0ee12e22b0ddb9594dcd1734ef91e8 | 25,840 |
def print_policy_analysis(policies, game, verbose=False):
"""Function printing policy diversity within game's known policies.
Warning : only works with deterministic policies.
Args:
policies: List of list of policies (One list per game player)
game: OpenSpiel game object.
verbose: Whether to print policy diversity information. (True : print)
Returns:
List of list of unique policies (One list per player)
"""
states_dict = get_all_states.get_all_states(game, np.infty, False, False)
unique_policies = []
for player in range(len(policies)):
cur_policies = policies[player]
cur_set = set()
for pol in cur_policies:
cur_str = ""
for state_str in states_dict:
if states_dict[state_str].current_player() == player:
pol_action_dict = pol(states_dict[state_str])
max_prob = max(list(pol_action_dict.values()))
max_prob_actions = [
a for a in pol_action_dict if pol_action_dict[a] == max_prob
]
cur_str += "__" + state_str
for a in max_prob_actions:
cur_str += "-" + str(a)
cur_set.add(cur_str)
unique_policies.append(cur_set)
if verbose:
print("\n=====================================\nPolicy Diversity :")
for player, cur_set in enumerate(unique_policies):
print("Player {} : {} unique policies.".format(player, len(cur_set)))
print("")
return unique_policies | 51379d78dc3dd924da41dc00e8d6236d72b68f3c | 25,841 |
def model_fn(is_training=True, **params):
"""
Create base model with MobileNetV2 + Dense layer (n class).
Wrap up with CustomModel process.
Args:
is_training (bool): if it is going to be trained or not
params: keyword arguments (parameters dictionary)
"""
baseModel = MobileNetV2(
include_top=False, weights='imagenet',
input_shape=(224, 224, 3), pooling="avg")
fc = tf.keras.layers.Dense(
params['n_class'], activation="softmax",
name="softmax_layer")(baseModel.output)
model = CustomModel(inputs=baseModel.input, outputs=fc)
# If it is not training mode
if not is_training:
model.trainable = False
return model | 2a14d803c5d521f453ce30a641d8736364e64ac0 | 25,842 |
import argparse
def make_arg_parser():
"""
Create the argument parser.
"""
parser = argparse.ArgumentParser(description="Scrap WHOIS data.")
parser.add_argument("--config", help="uwhoisd configuration")
parser.add_argument(
"--log",
default="warning",
choices=["critical", "error", "warning", "info", "debug"],
help="Logging level",
)
parser.add_argument("--ipv4", action="store_true", help="Scrape IPv4 assignments")
zone_group = parser.add_mutually_exclusive_group(required=True)
zone_group.add_argument(
"--new-only",
action="store_true",
help="Only scrape new zones (requires config)",
)
zone_group.add_argument("--full", action="store_true", help="Do a full zone scrape")
return parser | f9c94b23589ed77fc3db950549d21f371e156eb1 | 25,843 |
def roundToElement(dateTime, unit):
""" Returns a copy of dateTime rounded to given unit
:param datetime.datetime: date time object
:param DtUnit unit: unit
:return: datetime.datetime
"""
year = dateTime.year
month = dateTime.month
day = dateTime.day
hour = dateTime.hour
minute = dateTime.minute
second = dateTime.second
microsecond = dateTime.microsecond
if unit.value < DtUnit.YEARS.value:
pass # Never round years
if unit.value < DtUnit.MONTHS.value:
month = 1
if unit.value < DtUnit.DAYS.value:
day = 1
if unit.value < DtUnit.HOURS.value:
hour = 0
if unit.value < DtUnit.MINUTES.value:
minute = 0
if unit.value < DtUnit.SECONDS.value:
second = 0
if unit.value < DtUnit.MICRO_SECONDS.value:
microsecond = 0
result = dt.datetime(year, month, day, hour, minute, second, microsecond,
tzinfo=dateTime.tzinfo)
return result | 226f532e9e729d155d14135e4025015e8b00b2e0 | 25,844 |
def tuple_factory(colnames, rows):
"""
Returns each row as a tuple
Example::
>>> from cassandra.query import tuple_factory
>>> session = cluster.connect('mykeyspace')
>>> session.row_factory = tuple_factory
>>> rows = session.execute("SELECT name, age FROM users LIMIT 1")
>>> print rows[0]
('Bob', 42)
.. versionchanged:: 2.0.0
moved from ``cassandra.decoder`` to ``cassandra.query``
"""
return rows | 5526647a414b397ac9d71c35173718c01385a03b | 25,845 |
def is_error(splunk_record_key):
"""Return True if the given string is an error key.
:param splunk_record key: The string to check
:type splunk_record_key: str
:rtype: bool
"""
return splunk_record_key == 'error' | 26371ec9c5941fbf07a84c6904ea739b02eb97ba | 25,846 |
def parse_network_info(net_bond, response_json):
"""
Build the network info
"""
out_dict = {}
ip_list = []
node_count = 0
# Build individual node information
for node_result in response_json['result']['nodes']:
for node in response_json['result']['nodes']:
if node['nodeID'] == node_result['nodeID']:
node_id = str(node_result['nodeID'])
n_id = "Node ID " + node_id
net_result = node['result']['network'][net_bond]
bond_addr = net_result['address']
bond_mask = net_result['netmask']
bond_gateway = net_result['gateway']
bond_mode = net_result['bond-mode']
bond_mtu = net_result['mtu']
bond_speed = net_result['linkSpeed']
name_servers = net_result['dns-nameservers']
search_domains = net_result['dns-search']
out_dict['------' + n_id + ' ------'] = \
'--------------------------'
out_dict[n_id + ' Bond name'] = net_bond
out_dict[n_id + ' Address'] = bond_addr
out_dict[n_id + ' Netmask'] = bond_mask
out_dict[n_id + ' Gateway'] = bond_gateway
out_dict[n_id + ' Bond mode'] = bond_mode
out_dict[n_id + ' MTU'] = bond_mtu
out_dict[n_id + ' Link speed'] = bond_speed
if net_bond == 'Bond1G':
out_dict[n_id + ' DNS servers'] = name_servers
out_dict[n_id + ' DNS search'] = search_domains
ip_list.append(bond_addr)
node_count = node_count + 1
if net_bond != 'Bond10G':
return out_dict, ip_list
else:
return out_dict | 2c83aa72d6ee0195a42339546d1fded84f85680f | 25,847 |
async def create_or_update(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Create or update a network security group.
:param name: The name of the network security group to create.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
azurerm.network.network_security_group.create_or_update testnsg testgroup
"""
if "location" not in kwargs:
rg_props = await hub.exec.azurerm.resource.group.get(
ctx, resource_group, **kwargs
)
if "error" in rg_props:
log.error("Unable to determine location from resource group specified.")
return {
"error": "Unable to determine location from resource group specified."
}
kwargs["location"] = rg_props["location"]
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
secgroupmodel = await hub.exec.azurerm.utils.create_object_model(
"network", "NetworkSecurityGroup", **kwargs
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
secgroup = netconn.network_security_groups.create_or_update(
resource_group_name=resource_group,
network_security_group_name=name,
parameters=secgroupmodel,
)
secgroup.wait()
secgroup_result = secgroup.result()
result = secgroup_result.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result | 251ee69d6077d2fd4ffda8c9da53b8ae84c9a696 | 25,848 |
def download_media_suite(req, domain, app_id):
"""
See Application.create_media_suite
"""
return HttpResponse(
req.app.create_media_suite()
) | fe0f5e0b5598b2368fd756a7f6bee89035813317 | 25,849 |
def non_numeric(string: str) -> str:
""" Removes all numbers from the string """
return ''.join(letter for letter in string if not letter.isdigit()) | fe16297c4cf1b144fb583986a5c01ea02920787e | 25,850 |
import re
def prepare_xs(path, numbergroup=1):
"""Prepare the needed representation of cross-section data
Paramteres:
-----------
path : str
filename of cross-section data
numbergroup : int
number of energies neutron multigroup
Returns:
--------
energies : iterable of str
energy discritization by multigroups
xslib : dict
key : MT number, value cross-section values (str)
"""
def skip(ofile, number):
for i in range(number):
line = next(ofile)
energies = np.zeros(numbergroup + 1)
xslib = {}
xs = []
mtnum = ''
with open(path,'r') as f:
for line in f:
res = re.search("MT=\w*\d+", line)
if res:
mtnum = re.search("\d+", line).group()
skip(f, 5)
xs = np.zeros(numbergroup)
while(len(line.rstrip()) > 1):
dump = line.rstrip().split()
num = 0
en = 0.0
x = 0.0
for i, d in enumerate(dump):
if (i % 3 == 0):
num = int(d.rstrip())
if (i % 3 == 1):
en = float(d.rstrip())
if (num < numbergroup + 2):
if (energies[num - 1] == 0.0):
energies[num - 1] = en
if (i % 3 == 2):
x = float(d.rstrip())
if (num < numbergroup + 1):
xs[num - 1] = x
line = next(f)
if (sum(xs) > 0):
xslib[mtnum] = xs
return energies, xslib | 5f6ffd4e7954984d43ebc00c108d268797831256 | 25,851 |
def shiftField(field, dz):
"""Shifts the z-coordinate of the field by dz"""
for f in field:
if f.ID == 'Polar Data':
f.set_RPhiZ(f.r, f.phi, f.z + dz)
elif f.ID == 'Cartesian Data':
f.set_XYZ(f.x, f.y, f.z + dz)
return field | c3c592356dc21688049a94291d075879a12012ee | 25,852 |
def pair_equality(dataframe, column_1, column_2, new_feature_name):
"""
Adds a new binary feature to an existing dataframe which, for every row,
is 1 if and only if that row has equal values in two given columns.
:param dataframe:
Dataframe to add feature to
:param column_1:
Name of first existing column
:param column_2:
Name of second existing column
:param new_feature_name:
Name of the new column to add
:return:
Modified version of given dataframe
"""
dataframe[new_feature_name] = dataframe.apply(
lambda row: get_pair_equality(row, column_1, column_2), axis=1)
return dataframe | d82a02c49399351aa62b712664bb9500390ebf81 | 25,853 |
import networkx
import itertools
def compute_diagram(PhenosObj, FnameJson=None, FnameImage=None, Silent=False):
"""
todo: finish code
todo: add unit tests
computes the phenotype diagram from the phenotypes object obtained from :ref:`phenotypes_compute_json`.
save the diagram as json data with *FnameJson*. useful for e.g. manually renaming nodes.
**arguments**:
* *PhenosObj* (dict): result of compute_json(..)
* *FnameJson* (str): save diagram as json
* *FnameImage* (str): generate image for diagram
* *Silent* (bool): print infos to screen
**returns**::
* *Diagram* (networkx.DiGraph): the phenotype diagram
**example**::
>>> phenos = compute_json(attrobj, markers)
>>> compute_diagram(phenos, FnameImage="phenos.pdf")
created phenos.pdf
"""
Primes = PhenosObj["primes"]
Update = PhenosObj["update"]
assert(Update in PyBoolNet.StateTransitionGraphs.UPDATE_STRATEGIES)
assert(Primes)
if not Silent:
print("Phenotypes.compute_diagram(..)")
diagram = networkx.DiGraph()
for key in PhenosObj:
diagram.graph[key] = PyBoolNet.Utility.Misc.copy_json_data(PhenosObj[key])
# nodes
node_id = 0
Flags = [[0,1]]*len(PhenosObj["phenotypes"])
for i,flags in enumerate(itertools.product(*Flags)):
stateformulas, names = [], []
for j, flag in enumerate(flags):
if flag:
stateformulas.append(PhenosObj["phenotypes"][j]["stateformula"])
names.append(PhenosObj["phenotypes"][j]["name"])
stateformulas.sort()
names = tuple(sorted(names))
if not stateformulas:
unreach = " & ".join("!EF({x})".format(x=x["stateformula"]) for x in PhenosObj["phenotypes"])
spec = "CTLSPEC {x}".format(x=unreach)
else:
reach = ["EF({x})".format(x=x) for x in stateformulas]
reach_all = " & ".join(reach)
reach_some = " | ".join(reach)
spec = "CTLSPEC {x} & AG({y})".format(x=reach_all,y=reach_some)
init = "INIT TRUE"
answer, accepting = PyBoolNet.ModelChecking.check_primes_with_acceptingstates(Primes, Update, init, spec)
data = {"names": names,
"init": init,
"spec": spec,
"initaccepting_size": accepting["INITACCEPTING_SIZE"],
"initaccepting": accepting["INITACCEPTING"]}
if data["initaccepting_size"]>0:
if not Silent:
print(" [{x}] = {y}".format(x=", ".join(names), y=data["initaccepting_size"]))
diagram.add_node(node_id)
for key, value in data.items():
diagram.nodes[node_id][key] = value
node_id+= 1
# edges
for source in diagram:
for target in diagram:
if source==target: continue
sourceset = set(diagram.nodes[source]["names"])
targetset = set(diagram.nodes[target]["names"])
if targetset.issubset(sourceset):
init = "INIT {x}".format(x=diagram.nodes[source]["initaccepting"])
spec = "CTLSPEC EX({x})".format(x=diagram.nodes[target]["initaccepting"])
answer, accepting = PyBoolNet.ModelChecking.check_primes_with_acceptingstates(Primes, Update, init, spec)
if accepting["INITACCEPTING_SIZE"]>0:
data = {"init": init,
"spec": spec,
"initaccepting_size": accepting["INITACCEPTING_SIZE"],
"initaccepting": accepting["INITACCEPTING"]}
diagram.add_edge(source, target)
for key, value in data.items():
diagram.edges[source, target][key] = value
if not Silent:
print(" [{x}] --{s}--> [{y}]".format(
x=", ".join(diagram.nodes[source]["names"]),
s=data["initaccepting_size"],
y=", ".join(diagram.nodes[target]["names"])))
if FnameImage:
diagram2image(diagram, FnameImage)
if FnameJson:
save_diagram(diagram, FnameJson)
return diagram | f2cfa298807f7969d93dafd76d403d4d8bddcb11 | 25,854 |
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2 = filters
if K.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, kernel_size, padding = 'same',
kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = LeakyReLU()(x)
x = Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.add([x, input_tensor])
x = LeakyReLU()(x)
return x | 3d33a0bec933697eae642199fe7b24e90e45e15b | 25,855 |
from typing import Optional
def find_linux_kernel_memory(
pml4: PageTable, mem: Memory, mem_range: Interval
) -> Optional[MappedMemory]:
"""
Return virtual and physical memory
"""
# TODO: skip first level in page tables to speed up the search
# i = get_index(mem_range.begin, 0)
# pdt = page_table(mem, pml4.entries[i] & PHYS_ADDR_MASK)
it = iter(pml4)
first: Optional[PageTableEntry] = None
for entry in it:
if entry.virt_addr >= mem_range.begin:
first = entry
break
if first is None:
return None
last: PageTableEntry = first
for entry in it:
if entry.virt_addr > mem_range.end:
break
if last.phys_addr + last.size != entry.phys_addr:
print(
"Kernel is not in physical-continous memory. Assuming vmsh memory allocation."
)
break
last = entry
phys_mem = mem[first.phys_addr : last.phys_addr + last.size]
return phys_mem.map(first.virt_addr) | 6b08bfb2c8a0f98ef250a8268a076b72496bd89f | 25,856 |
def cnn_encoder(inputs, is_train=True, reuse=False, name='cnnftxt', return_h3=False):
""" 64x64 --> t_dim, for text-image mapping """
w_init = tf.random_normal_initializer(stddev=0.02)
gamma_init = tf.random_normal_initializer(1., 0.02)
df_dim = 64
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(True)
net_in = InputLayer(inputs, name='/in')
net_h0 = Conv2d(net_in, df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2),
padding='SAME', W_init=w_init, name='cnnf/h0/conv2d')
net_h1 = Conv2d(net_h0, df_dim*2, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=None, name='cnnf/h1/conv2d')
net_h1 = BatchNormLayer(net_h1, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='cnnf/h1/batch_norm')
# if name != 'cnn': # debug for training image encoder in step 2
# net_h1 = DropoutLayer(net_h1, keep=0.8, is_fix=True, name='p/h1/drop')
net_h2 = Conv2d(net_h1, df_dim*4, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=None, name='cnnf/h2/conv2d')
net_h2 = BatchNormLayer(net_h2, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='cnnf/h2/batch_norm')
# if name != 'cnn': # debug for training image encoder in step 2
# net_h2 = DropoutLayer(net_h2, keep=0.8, is_fix=True, name='p/h2/drop')
net_h3 = Conv2d(net_h2, df_dim*8, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=None, name='cnnf/h3/conv2d')
net_h3 = BatchNormLayer(net_h3, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='cnnf/h3/batch_norm')
# if name != 'cnn': # debug for training image encoder in step 2
# net_h3 = DropoutLayer(net_h3, keep=0.8, is_fix=True, name='p/h3/drop')
net_h4 = FlattenLayer(net_h3, name='cnnf/h4/flatten')
net_h4 = DenseLayer(net_h4, n_units= (z_dim if name == 'z_encoder' else t_dim),
act=tf.identity,
W_init = w_init, b_init = None, name='cnnf/h4/embed')
if return_h3:
return net_h4, net_h3
else:
return net_h4 | 9c66cd9a2b9589da89572779dc01117b7f349fee | 25,857 |
def cmp_id(cls1, cls2, idx1, idx2):
"""Compare same particles between two clusters and output numbers
Parameters
----------
cls1,cls2: Cluster object
idx1,idx2: Indices of detected particles in the clusters.
Output
------
The number of same particles.
"""
partId1 = cls1.gas_id[idx1]
partId2 = cls2.gas_id[idx2]
sameId = np.intersect1d(partId1, partId2)
return len(sameId) | b3ebf4a3c98da18a84545caff446e0d1732208a0 | 25,858 |
def get_clinical_cup():
"""
Returns tuple with clinical cup description
"""
return ("8", "2", "M", "01", 25) | fac133ea74fbe30b50e551fdd7cdce349cc02a3a | 25,859 |
def to_utm_bbox(bbox: BBox) -> BBox:
"""Transform bbox into UTM CRS
:param bbox: bounding box
:return: bounding box in UTM CRS
"""
if CRS.is_utm(bbox.crs):
return bbox
lng, lat = bbox.middle
utm_crs = get_utm_crs(lng, lat, source_crs=bbox.crs)
return bbox.transform(utm_crs) | 80e67ce402ba1551a5282f93fc629be78255e96a | 25,860 |
def define_actions(action):
"""
Define the list of actions we are using.
Args
action: String with the passed action. Could be "all"
Returns
actions: List of strings of actions
Raises
ValueError if the action is not included in H3.6M
"""
actions = ["walking", "wiping", "lifting", "co-existing", "co-operating", "noise", "p1_1", "p1_2", "p2_1", "p5",
"p7"]
if action in actions:
return [action]
if action == "all":
return actions
if action == "all_srnn":
return ["walking", "eating", "smoking", "discussion"]
raise (ValueError, "Unrecognized action: %d" % action) | 45bfbd20971a04f566feeed0de509b21be83963b | 25,861 |
def gen_order_history_sequence(uid, history_grouped, has_history_flag):
""" 用户订单历史结果构成的序列 """
# 311 天的操作记录
sequence = ['0'] * 311
if has_history_flag == 0:
return sequence
df = history_grouped[uid]
for i in df['days_from_now']:
sequence[i] = str(df[df['days_from_now'] == i].shape[0])
return sequence | 9f9e93549ea4c35971f87957b74e44e258d79d49 | 25,862 |
def _get_plugin_type_ids():
"""Get the ID of each of Pulp's plugins.
Each Pulp plugin adds one (or more?) content unit type to Pulp. Each of
these content unit types is identified by a certain unique identifier. For
example, the `Python type`_ has an ID of ``python_package``.
:returns: A set of plugin IDs. For example: ``{'ostree',
'python_package'}``.
.. _Python type:
http://pulp-python.readthedocs.org/en/latest/reference/python-type.html
"""
client = api.Client(config.get_config(), api.json_handler)
plugin_types = client.get(PLUGIN_TYPES_PATH)
return {plugin_type['id'] for plugin_type in plugin_types} | 0c31a239980a2427f1fb115d890eea9d92edf396 | 25,863 |
def scale_48vcurrent(value, reverse=False, pcb_version=0):
"""
Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw
value to Amps (if reverse=False), or convert a value in Amps to raw (if reverse=True).
For now, raw values are hundredths of an Amp, positive only.
:param value: raw register contents as a value from 0-65535, or current in Amps
:param reverse: Boolean, True to perform physical->raw conversion instead of raw->physical
:param pcb_version: integer PCB version number, 0-65535
:return: output_value in Amps
"""
if reverse:
return int(value * 100) & 0xFFFF
else:
return value / 100.0 | 562a9354f1648203ba9854f2404e00365e12f67f | 25,864 |
import pickle
def get_graph(graph_name):
"""Return graph, input can be string with the file name (reuse previous created graph),
or a variable containing the graph itself"""
# open file if its a string, or just pass the graph variable
if '.p' not in graph_name:
graph_name = add_extension(graph_name)
if isinstance(graph_name, str):
graph_path = get_whole_path(graph_name, 'graphs')
# get graph info: returns file object, mode read binary
infile = open(graph_path, 'rb')
G = pickle.load(infile)
infile.close()
else:
G = graph_name
return G | 0efe5cb90b6f8bf1fc59853704cf7e07038fb8fb | 25,865 |
import traceback
import sys
def max_function(context, nodeset, string):
"""
The dyn:max function calculates the maximum value for the nodes passed as
the first argument, where the value of each node is calculated dynamically
using an XPath expression passed as a string as the second argument.
http://www.exslt.org/dyn/functions/max/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return max(map(datatypes.number, _map(context, nodeset, expr))) | 4a10eec7d82417d7950116a4f4d4313d7c81d27e | 25,866 |
def get_transfer_encodings():
"""Return a list of supported content-transfer-encoding values."""
return transfer_decoding_wrappers.keys() | c42dfd886b1080e6a49fe4dabc2616967855a7f0 | 25,867 |
def is_name_valid(name: str, rules: list) -> bool:
""" Determine whether a name corresponds to a named rule. """
for rule in rules:
if rule.name == name:
return True
return False | 41e9f88d86a078ca6386f1d0d6b7123233c819b9 | 25,868 |
def fig2data(fig, imsize):
"""
:param fig: Matplotlib figure
:param imsize:
:return:
"""
canvas = FigureCanvas(fig)
ax = fig.gca()
# ax.text(0.0, 0.0, "Test", fontsize=45)
# ax.axis("off")
canvas.draw()
image = np.fromstring(canvas.tostring_rgb(), dtype="uint8")
width, height = imsize
image = image.reshape((int(height), int(width), -1))
return image | 5a8b7bf34d6aa3849f20b5ca140c572c5cad0e57 | 25,869 |
def draw_agent_trail(img, trail_data, rgb, vision):
""" draw agent trail on the device with given color.
Args:
img : cv2 read image of device.
trail_data : data of trail data of the agent
rgb : (r,g,b) tuple of rgb color
Returns:
img : updated image with agent trail drawn.
"""
for j in range(len(trail_data)):
if j > 0:
if vision:
cv2.line(img, trail_data[j], trail_data[j - 1], rgb, 5)
else:
cv2.line(img, trail_data[j], trail_data[j - 1], rgb, 12)
return img | 9524cb10cbe1ed7dceb8714c7ace443be64e8767 | 25,870 |
def get_total_received_items(scorecard):
""" Gets the total number of received shipments in the period (based on Purchase Receipts)"""
supplier = frappe.get_doc('Supplier', scorecard.supplier)
# Look up all PO Items with delivery dates between our dates
data = frappe.db.sql("""
SELECT
SUM(pr_item.received_qty)
FROM
`tabPurchase Receipt Item` pr_item,
`tabPurchase Receipt` pr
WHERE
pr.supplier = %(supplier)s
AND pr.posting_date BETWEEN %(start_date)s AND %(end_date)s
AND pr_item.docstatus = 1
AND pr_item.parent = pr.name""",
{"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0)[0][0]
if not data:
data = 0
return data | 856e5b42a1b572a6fa7150b789eb8754a045677d | 25,871 |
def generate_default_filters(dispatcher, *args, **kwargs):
"""
Prepare filters
:param dispatcher: for states
:param args:
:param kwargs:
:return:
"""
filters_list = []
for name, filter_data in kwargs.items():
if filter_data is None:
# skip not setted filter names
# Note that state by default is not None,
# check dispatcher.storage for more information
continue
if name == DefaultFilters.REQUEST_TYPE:
filters_list.append(RequestTypeFilter(filter_data))
elif name == DefaultFilters.COMMANDS:
if isinstance(filter_data, str):
filters_list.append(CommandsFilter([filter_data]))
else:
filters_list.append(CommandsFilter(filter_data))
elif name == DefaultFilters.STARTS_WITH:
if isinstance(filter_data, str):
filters_list.append(StartsWithFilter([filter_data]))
else:
filters_list.append(StartsWithFilter(filter_data))
elif name == DefaultFilters.CONTAINS:
if isinstance(filter_data, str):
filters_list.append(ContainsFilter([filter_data]))
else:
filters_list.append(ContainsFilter(filter_data))
elif name == DefaultFilters.STATE:
if isinstance(filter_data, (list, set, tuple, frozenset)):
filters_list.append(StatesListFilter(dispatcher, filter_data))
else:
filters_list.append(StateFilter(dispatcher, filter_data))
elif name == DefaultFilters.FUNC:
filters_list.append(filter_data)
elif name == DefaultFilters.REGEXP:
filters_list.append(RegexpFilter(filter_data))
elif isinstance(filter_data, Filter):
filters_list.append(filter_data)
else:
log.warning('Unexpected filter with name %r of type `%r` (%s)',
name, type(filter_data), filter_data)
filters_list += list(args) # Some custom filters
return filters_list | e307b9933280bfc91ef25ac306586c3cc6cf8c94 | 25,872 |
def linear_map(x, init_mat_params=None, init_b=None, mat_func=get_LU_map,
trainable_A=True, trainable_b=True, irange=1e-10,
name='linear_map'):
"""Return the linearly transformed, y^t = x^t * mat_func(mat_params) + b^t,
log determinant of Jacobian and inverse map.
Args:
x: N x d real tensor of covariates to be linearly transformed.
init_mat_params: tensor of parameters for linear map returned by
mat_func(init_mat_params, b) (see get_LU_map above).
init_b: d length tensor of biases.
mat_func: function that returns matrix, log determinant, and inverse
for linear mapping (see get_LU_map).
trainable_A: boolean indicating whether to train matrix for linear
map.
trainable_b: boolean indicating whether to train bias for linear
map.
name: variable scope.
Returns:
z: N x d linearly transformed covariates.
logdet: scalar, the log determinant of the Jacobian for transformation.
invmap: function that computes the inverse transformation.
"""
if irange is not None:
initializer = tf.random_uniform_initializer(-irange, irange)
else:
initializer = None
with tf.variable_scope(name, initializer=initializer):
d = int(x.get_shape()[-1])
if init_mat_params is None:
# mat_params = tf.get_variable(
# 'mat_params', dtype=tf.float32,
# shape=(d, d), trainable=trainable_A)
mat_params = tf.get_variable(
'mat_params', dtype=tf.float32,
initializer=tf.eye(d, dtype=tf.float32),
trainable=trainable_A)
else:
mat_params = tf.get_variable('mat_params', dtype=tf.float32,
initializer=init_mat_params,
trainable=trainable_A)
if init_b is None:
# b = tf.get_variable('b', dtype=tf.float32, shape=(d,),
# trainable=trainable_b)
b = tf.get_variable('b', dtype=tf.float32,
initializer=tf.zeros((d, ), tf.float32),
trainable=trainable_b)
else:
b = tf.get_variable('b', dtype=tf.float32, initializer=init_b,
trainable=trainable_b)
A, logdet, invmap = mat_func(mat_params, b)
z = tf.matmul(x, A) + tf.expand_dims(b, 0)
return z, logdet, invmap | 1286fc8087288f94b1ef63388fc6c8636d061b2f | 25,873 |
def isolated_70():
"""
Real Name: b'Isolated 70'
Original Eqn: b'INTEG ( isolation rate symptomatic 70+isolation rate asymptomatic 70-isolated recovery rate 70\\\\ -isolated critical case rate 70, init Isolated 70)'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return _integ_isolated_70() | b1185a6a03759830f7cfeaefae34389699e62c48 | 25,874 |
def db_retry(using=None, tries=None, delay=None, max_delay=None, backoff=1, jitter=0, logger=logging_logger):
"""Returns a retry decorator.
:param using: database alias from settings.DATABASES.
:param tries: the maximum number of attempts.
-1 means infinite.
None - get from current connection.
default: DATABASES[using].get('MAX_RETRIES', 1).
:param delay: initial delay between attempts.
None - get from current connection.
default: DATABASES[using].get('RETRY_DELAY_SECONDS', 0).
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
default: retry.logging_logger. if None, logging is disabled.
:returns: a retry decorator.
"""
if tries is None or delay is None:
connection = get_connection(using=using)
if tries is None:
tries = connection.settings_dict.get("MAX_RETRIES", 1)
if delay is None:
# RETRY_DELAY_SECONDS might be None, so that added this "or 0"
delay = connection.settings_dict.get("RETRY_DELAY_SECONDS", 0) or 0
def wrap(f):
def wrapped_f(*fargs, **fkwargs):
args = fargs if fargs else list()
kwargs = fkwargs if fkwargs else dict()
return __retry_internal(
partial(f, *args, **kwargs), tries, delay, max_delay, backoff,
jitter, logger
)
return wrapped_f
return wrap | 5727bb89f55a8cc68cea2a35ea256b79b6b852da | 25,875 |
from typing import Collection
def A000142(start: int = 0, limit: int = 20) -> Collection[int]:
"""Factorial numbers: n! = 1*2*3*4*...*n
(order of symmetric group S_n, number of permutations of n letters).
"""
sequence = []
colors = []
x = []
for i in range(start, start + limit):
sequence.append(factorial(i))
colors.append(np.random.rand())
x.append(i)
return sequence | c0c709529bb7926369912ea195aec5fba17f7887 | 25,876 |
import idwgopt.idwgopt_default as idwgopt_default
def default(nvars):
""" Generate default problem structure for IDW-RBF Global Optimization.
problem=idwgopt.default(n) generate a default problem structure for a
an optimization with n variables.
(C) 2019 by A. Bemporad.
"""
problem = idwgopt_default.set(nvars)
return problem | 6e865ffdab0b3913c793357b6cb2688a6cd4dc00 | 25,877 |
from point import Point
from line import Segment
from polygon import Polygon
def convex_hull(*args):
"""
Returns a Polygon representing the convex hull of a set of 2D points.
Notes:
======
This can only be performed on a set of non-symbolic points.
Example:
========
>>> from sympy.geometry import Point
>>> points = [ Point(x) for x in [(1,1), (1,2), (3,1), (-5,2), (15,4)] ]
>>> convex_hull(points)
Polygon(Point(3, 1), Point(15, 4), Point(-5, 2), Point(1, 1))
Description of method used:
===========================
See http://en.wikipedia.org/wiki/Graham_scan.
"""
p = args[0]
if isinstance(p, Point):
p = args
# Basic checks
if len(p) == 1:
return p[0]
elif len(p) == 2:
return Segment(p[0], p[1])
# Find lowest+rightmost point
m = 0
for i in xrange(1, len(p)):
if (p[i][1] < p[m][1]) or ((p[i][1] == p[m][1]) and (p[i][0] > p[m][0])):
m = i
p[0], p[m] = p[m], p[0]
def tarea(a, b, c):
return (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1])
# Radial sort of points with respect to p[0] (our pivot)
destroy = {}
p0 = p[0]
def pcompare(p1, p2):
a = tarea(p0, p1, p2)
if a > 0:
return -1
elif a < 0:
return 1
else:
x = abs(p1[0] - p0[0]) - abs(p2[0] - p0[0])
y = abs(p1[1] - p0[1]) - abs(p2[1] - p0[1])
if (x < 0) or (y < 0):
destroy[p1] = True
return -1
elif (x > 0) or (y > 0):
destroy[p2] = True
return 1
else:
destroy[p1] = True
return 0
p = p[1:]
p.sort(pcompare)
p.insert(0, p0)
# Destroy points as found by sorting
for i in xrange(len(p)-1, -1, -1):
if p[i] in destroy:
del p[i]
# Graham scan
def isleft(a, b, c):
return (tarea(a, b, c) > 0)
top = [p[0], p[1]]
i = 2
while i < len(p):
p1 = top[-2]
p2 = top[-1]
if isleft(p1, p2, p[i]):
top.append(p[i])
i += 1
else:
top.pop()
return Polygon(top) | ee1c1fd65dfe849a36a6dfc8e86a4e1e2ee8ca69 | 25,878 |
from typing import Dict
def find_namespaces(tree: ElementTree) -> Dict[str, str]:
"""
Finds the namespaces defined in the ElementTree of an XML document. It looks for namespaces
defined in the root element of the XML document. To avoid namespaces being left out, they shall
all be defined in the root element of an XML document, instead of being defined across the
document.
:param tree: An lxml ElementTree containing the XML document from which to extract the namespaces.
:return: A dictionary containing the mapping between short namespace and full namespace.
"""
root = tree.getroot()
namespaces = root.nsmap
try:
namespaces.pop(None)
except KeyError:
pass
return namespaces | 8b2a523c9d7152280fa609563e94eda4facebe4b | 25,879 |
def filterStories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
filteredStories = []
for story in stories:
for trig in triggerlist:
if trig.evaluate(story) and story not in filteredStories:
filteredStories.append(story)
return filteredStories | 1fcf2592e22c97cd13919dbfe5b8a4acde682761 | 25,880 |
def lcs(a, b):
"""
Compute the length of the longest common subsequence between two sequences.
Time complexity: O(len(a) * len(b))
Space complexity: O(min(len(a), len(b)))
"""
# This is an adaptation of the standard LCS dynamic programming algorithm
# tweaked for lower memory consumption.
# Sequence a is laid out along the rows, b along the columns.
# Minimize number of columns to minimize required memory
if len(a) < len(b):
a, b = b, a
# Sequence b now has the minimum length
# Quit early if one sequence is empty
if len(b) == 0:
return 0
# Use a single buffer to store the counts for the current row, and
# overwrite it on each pass
row = [0] * len(b)
for ai in a:
left = 0
diag = 0
for j, bj in enumerate(b):
up = row[j]
if ai == bj:
value = diag + 1
else:
value = max(left, up)
row[j] = value
left = value
diag = up
# Return the last cell of the last row
return left | 0201e9efade98aece854e05d0910192251e5f63c | 25,881 |
def save(config, filename="image.img", host=None):
"""Save the Image File to the disk"""
cmd = DockerCommandBuilder(host=host).save(config.getImageName()).set_output(filename).build()
return execute(cmd) | 628dca6307b6a5d975e90e08649f20790bc8b639 | 25,882 |
from typing import Dict
from typing import Any
import sys
def klass_from_obj_type(cm_json: Dict) -> Any:
"""Get reference to class (n.b. not an instance) given the value for a key 'obj_type' in the given json dict"""
module_name, klass_path = cm_json['obj_type'].rsplit('.', 1)
# @todo make this load from file if not available: http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
klass = getattr(sys.modules[module_name], klass_path)
return klass | daf9c4d8579d3da7383abdfc38c9af0d4161d302 | 25,883 |
def lines2bars(lines, is_date):
"""将CSV记录转换为Bar对象
header: date,open,high,low,close,money,volume,factor
lines: 2022-02-10 10:06:00,16.87,16.89,16.87,16.88,4105065.000000,243200.000000,121.719130
"""
if isinstance(lines, str):
lines = [lines]
def parse_date(x):
return arrow.get(x).date()
def parse_naive(x):
return arrow.get(x).naive
if is_date:
convert = parse_date
else:
convert = parse_naive
data = []
for line in lines:
fields = line.split(",")
data.append(
(
convert(fields[0]),
float(fields[1]),
float(fields[2]),
float(fields[3]),
float(fields[4]),
float(fields[5]),
float(fields[6]),
float(fields[7]),
)
)
return np.array(data, dtype=bars_dtype) | 4d2049d08f885de3b999b1537a48c03088f45da3 | 25,884 |
import torch
def pgd_linf_untargeted(model, X, y, epsilon=0.1, alpha=0.01, num_iter=20, randomize=False):
""" Construct FGSM adversarial examples on the examples X"""
if randomize:
delta = torch.rand_like(X, requires_grad=True)
delta.data = delta.data * 2 * epsilon - epsilon
else:
delta = torch.zeros_like(X, requires_grad=True)
for t in range(num_iter):
loss = nn.CrossEntropyLoss()(model(X + delta), y)
loss.backward()
delta.data = (delta + alpha*delta.grad.detach().sign()).clamp(-epsilon,epsilon)
delta.grad.zero_()
return delta.detach() | b19091048d269853c6b55c4d96d5919c4efcfbe6 | 25,885 |
def cal_NB_pvalue (treatTotal,controlTotal,items):
"""calculate the pvalue in pos of chromosome.
"""
pvalue = 1
(treatCount,controlCount,pos)=items
pvalue = negativeBinomail(treatCount,treatTotal,controlCount,controlTotal)
return (pvalue,treatCount,controlCount,pos) | f68809ffb40949c2d4ca1486870ec421d48bbfb5 | 25,886 |
def handle_watches(connection, author):
"""Return an array of watches for the author."""
database = connection['test']
collection = database['watches']
watches = []
# this should not except
for post in collection.find({"author" : ObjectId(author)}):
watches.append(cleanup_watch(post))
return watches | bfb765e30d249fac30fdbf567006283be1808e6c | 25,887 |
def new_mm(*args, figsize, **kwargs):
"""Wrapper for plt.subplots, using figsize in millimeters
:rtype: figure, axes
"""
return plt.subplots(*args, figsize=(figsize[0] / 25.4, figsize[1] / 25.4), **kwargs) | 7111f1fd8261d3367bff03fd36ed86cc26917fe8 | 25,888 |
def compute_center_of_mass(coordinates, masses):
"""
Given coordinates and masses, return center of mass coordinates.
Also works to compute COM translational motion.
Args:
coordinates ({nparticle, ndim} ndarray): xyz (to compute COM) or velocities (COM velocity)
masses ({nparticle,} array_like): masses
Returns:
({ndim,} ndarray): center of mass coordinates
"""
coordinates_cp = np.array(coordinates)
mass_cp = np.reshape(masses, (-1, 1))
com_coordinates = np.sum(mass_cp * coordinates_cp, axis=0)/np.sum(mass_cp)
return com_coordinates | d190c20930209e180524c07c8bf8fef9ab95734b | 25,889 |
def chars_count(word: str):
"""
:param word: string to count the occurrences of a character symbol for.
:return: a dictionary mapping each character found in word to the number of times it appears in it.
"""
res = dict()
for c in word:
res[c] = res.get(c, 0) + 1
return res | 30c27b23c04909a65264247d068e9e2c695c6ecc | 25,890 |
def do_expressiondelete(**kwargs):
"""
Worker to remove expression from engine
proexpobj: expression object
profileexplist: expression list object
return 0 if expression deleted
"""
proexpobj = kwargs.get('proexpobj')
profileexplist = kwargs.get('profileexplist')
if profileexplist.delete(proexpobj.profile_expression_id):
return 1
else:
return 0 | 4d4f26aca34417026ac326d237f817b88afe525c | 25,891 |
import csv
def read_csv_as_nested_dict(filename, keyfield, separator, quote):
"""
Inputs:
filename - name of CSV file
keyfield - field to use as key for rows
separator - character that separates fields
quote - character used to optionally quote fields
Output:
Returns a dictionary of dictionaries where the outer dictionary
maps the value in the key_field to the corresponding row in the
CSV file. The inner dictionaries map the field names to the
field values for that row.
"""
table = {}
with open(filename, newline='') as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=separator, quotechar=quote)
for row in csvreader:
rowid = row[keyfield]
table[rowid] = row
return table | b86a19e531ac2d0c815839714ee93fbc618e911d | 25,892 |
def msgpackb(lis):
"""list -> bytes"""
return create_msgpack(lis) | 4e2667ff32c58be09620cd8360ff0207406a7871 | 25,893 |
from azureml._execution import _commands
from azureml.core.runconfig import RunConfiguration
from azureml._project.project import Project
def prepare_compute_target(experiment, source_directory, run_config):
"""Prepare the compute target.
Installs all the required packages for an experiment run based on run_config and custom_run_config.
:param experiment:
:type experiment: azureml.core.experiment.Experiment
:param source_directory:
:type source_directory: str
:param run_config: The run configuration. This can be a run configuration name, as string, or a
azureml.core.runconfig.RunConfiguration object.
:type run_config: str or azureml.core.runconfig.RunConfiguration
:return: A run object
:rtype: azureml.core.script_run.ScriptRun
"""
run_config_object = RunConfiguration._get_run_config_object(path=source_directory, run_config=run_config)
project_object = Project(experiment=experiment, directory=source_directory)
return _commands.prepare_compute_target(project_object, run_config_object) | d6a7f2f45483c2e0a42bcb03407791ca781318ab | 25,894 |
def streaming_ndarray_agg(
in_stream,
ndarray_cols,
aggregate_cols,
value_cols=[],
sample_cols=[],
chunksize=30000,
add_count_col=False,
divide_by_count=False,
):
"""
Takes in_stream of dataframes
Applies ndarray-aware groupby-sum or groupby-mean: treats ndarray_cols as numpy arrays,
value_cols as normal values, for sample_cols takes the first element.
Does groupby over aggregate_cols
if add_count_col is True, adds column "count", if it's a string - adds column with add_count_col name
if divide_by_counts is True, divides result by column "count".
If it's a string, divides by divide_by_count column
This function can be used for automatically aggregating P(s), R(s) etc.
for a set of conformations that is so large that all P(s) won't fit in RAM,
and when averaging needs to be done over so many parameters
that for-loops are not an issue. Examples may include simulations in which sweep
over many parameters has been performed.
"""
value_cols_orig = [i for i in value_cols]
ndarray_cols, value_cols = list(ndarray_cols), list(value_cols)
aggregate_cols, sample_cols = list(aggregate_cols), list(sample_cols)
if add_count_col is not False:
if add_count_col is True:
add_count_col = "count"
value_cols.append(add_count_col)
def agg_one(dfs, aggregate):
"""takes a list of DataFrames and old aggregate
performs groupby and aggregation and returns new aggregate"""
if add_count_col is not False:
for i in dfs:
i[add_count_col] = 1
df = pd.concat(dfs + ([aggregate] if aggregate is not None else []), sort=False)
aggregate = ndarray_groupby_aggregate(
df,
ndarray_cols=ndarray_cols,
aggregate_cols=aggregate_cols,
value_cols=value_cols,
sample_cols=sample_cols,
preset="sum",
)
return aggregate.reset_index()
aggregate = None
cur = []
count = 0
for i in in_stream:
cur.append(i)
count += len(i)
if count > chunksize:
aggregate = agg_one(cur, aggregate)
cur = []
count = 0
if len(cur) > 0:
aggregate = agg_one(cur, aggregate)
if divide_by_count is not False:
if divide_by_count is True:
divide_by_count = "count"
for i in ndarray_cols + value_cols_orig:
aggregate[i] = aggregate[i] / aggregate[divide_by_count]
return aggregate | a47a3f82444dc1ef7d5eb5f63d7dd77c862fc605 | 25,895 |
from typing import List
def get_cropped_source_data(
stack_list: List[str], crop_origin: np.ndarray, crop_max: np.ndarray
) -> np.ndarray:
"""
Read data from the given image files in an image stack
:param List[str] stack_list: List of filenames representing images in a stack
:param np.ndarray crop_origin: Origin of region to crop, array of shape (3,)
:param np.ndarray crop_max: Max position of region to crop, array of shape (3,)
:return: Cropped source data as an array of shape (x,y,z)
"""
stack_files = stack_list[crop_origin[2] : crop_max[2]]
img_slices = []
for f in stack_files:
img = Image.open(f)
img_arr = np.array(img)
# crop from image
img_crop = img_arr[
crop_origin[0] : crop_max[0],
crop_origin[1] : crop_max[1],
]
img_slices.append(img_crop)
return np.stack(img_slices, axis=2) | 40f2537417a99d070979ba206de7c6e91a313b02 | 25,896 |
def valid_random_four_channel_images() -> str:
"""
Make a folder with 5 valid images that have 4 channels.
:return: path to the folder
"""
# use .png because that supports 4 channels
return make_folder_with_files('.png', file_type='image', resolution=(300, 300), n_files=6, channels=4) | 6b39f46467b4ded5a773964255293a3b587d9b6d | 25,897 |
def build_template(spec) -> Template:
"""Build a template from a specification.
The resulting template is an object that when called with a set of
bindings (as produced by a matcher from `build_matcher`), returns
an instance of the template with names substituted by their bound values.
This is a generic function. Support for additional template specifications
can be added with the `build_template.register(<type>, <handler>)` function.
See the documentation of `functools.singledispatch` for further information.
"""
return LiteralTemplate(spec) | ef44befe0a937b786a48b1e1ddf729f5c1327e3b | 25,898 |
def flatten_objective(expr):
"""
- Decision variable: Var
- Linear: sum([Var]) (CPMpy class 'Operator', name 'sum')
wsum([Const],[Var]) (CPMpy class 'Operator', name 'wsum')
"""
if __is_flat_var(expr):
return (expr, [])
if isinstance(expr, Operator) and (expr.name == 'sum' or expr.name == 'wsum'):
if expr.name == 'sum':
if all(__is_flat_var(arg) for arg in expr.args):
return (expr, [])
else:
# one of the arguments is not flat, flatten all
flatvars, flatcons = zip(*[get_or_make_var(arg) for arg in expr.args])
newexpr = Operator(expr.name, flatvars)
return (newexpr, [c for con in flatcons for c in con])
elif expr.name == 'wsum':
raise NotImplementedError(expr) # TODO, wsum
# any other numeric expression
return get_or_make_var(expr) | 65cea292a03bca4a8bece31e5b2b6d32ae07a77a | 25,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.