content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def camelcase(key_name):
"""Convert snake-case to camel-case."""
parts = iter(key_name.split('_'))
return next(parts) + ''.join(i.title() for i in parts) | 078adb6b7b014bf3b0a7dffb00f1ff321951f275 | 27,300 |
def calculate_sigmoid(alpha_value = 1, TDE = 0, sigma=.1):
"""
SOFTMAX VBDE: f(s, a, \sigma) = \frac{2e^{-|\alpha TDE| / \sigma}}{1-e^{-|\alpha TDE|/\sigma}}
:return:
"""
temp = np.exp(-np.abs(alpha_value *TDE + 1e-16) / sigma)
return 2*temp / (1-temp) | d030447197531a850c43d5c86b17aae698c806ff | 27,301 |
def getprop(obj, string):
"""
Par exemple 'position.x'
:param string:
:return:
"""
tab = string.split('.')
curr_val = obj
for str in tab:
curr_val = getattr(curr_val, str)
return curr_val | c82f869395129a8b69d1a89cde97ce36fe5affd9 | 27,302 |
import argparse
def get_options():
"""Parses options."""
parser = argparse.ArgumentParser(
description="Dynamic inventory for Decapod.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-l", "--list",
help="List all inventory.",
action="store_true",
default=False
)
group.add_argument(
"-s", "--host",
help="List host specific variables",
default=None
)
return parser.parse_args() | c4efc1978dfb735e6e08060400d3752357e924a0 | 27,303 |
def extend(vals, inds, shape):
""" Makes an array of shape `shape` where indices `inds` have vales `vals`
"""
z = np.zeros(shape, dtype=vals.dtype)
z[inds] = vals
return z | fddcf709779ce139f1645e4292690fed6e0378f6 | 27,304 |
def check_model(model_name, model, model_type):
"""
Check the type of input `model` .
Args:
model_name (str): Name of model.
model (Object): Model object.
model_type (Class): Class of model.
Returns:
Object, if the type of `model` is `model_type`, return `model` itself.
Raises:
ValueError: If model is not an instance of `model_type` .
"""
if isinstance(model, model_type):
return model
msg = '{} should be an instance of {}, but got {}'.format(model_name, model_type, type(model).__name__)
LOGGER.error(TAG, msg)
raise TypeError(msg) | b6bd8b25cc2ea91e328ec5e17fd88f3f6e676e67 | 27,305 |
import os
def load_score_map(input_prefix, day, epsilon=0.000000001, excluded_indices=None, restricted_indices=None):
"""TODO: The centrality maps were pre-sorted in decreasing order???"""
score_file_path = input_prefix + '_%i.csv' % day
if not os.path.exists(score_file_path):
raise IOError("File is missing: %s" % score_file_path)
else:
scores = pd.read_csv(score_file_path, sep=" ", names=["id","score"])
# filter for indices
if restricted_indices != None:
scores = scores[scores["id"].isin(restricted_indices)]
else:
if excluded_indices != None:
scores = scores[~scores["id"].isin(excluded_indices)]
# create dict like structure
scores = scores.set_index("id")
# all active nodes is set to have positive centrality scores
if epsilon != None:
scores["score"] = scores["score"] + epsilon
return scores | 7596f68644ce0c4d56becedd26aad450e5590276 | 27,306 |
def compute_tnacs( hvib ):
"""
Computes the time-averaged nonadiabatic couplings for a given hvib.
hvib ( list of list of matrices ): The vibronic hamiltonian for all timesteps
returns: a matrix of time-averaged nonadiabatic couplings between all electronic states
in meV
"""
nstates = hvib[0].num_of_rows
nacs = data_stat.cmat_stat2( hvib, 1)
#nacs = data_stat.cmat_stat2( hvib, 2)
mb_tnacs = []
for i in range( nstates ):
mb_tnacs.append( [] )
for j in range( nstates ):
mb_tnacs[i].append( nacs.get(i,j).imag * 1000.0 / units.ev2Ha )
return np.array( mb_tnacs ) | 87a30d408a2a9a14fd4987edba66a47b95d4a0fe | 27,307 |
import async_timeout
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up config entry."""
session = aiohttp_client.async_get_clientsession(hass)
hass.data.setdefault(DOMAIN, {})
printer = SyncThru(
entry.data[CONF_URL], session, connection_mode=ConnectionMode.API
)
async def async_update_data() -> SyncThru:
"""Fetch data from the printer."""
try:
async with async_timeout.timeout(10):
await printer.update()
except SyncThruAPINotSupported as api_error:
# if an exception is thrown, printer does not support syncthru
_LOGGER.info(
"Configured printer at %s does not provide SyncThru JSON API",
printer.url,
exc_info=api_error,
)
raise api_error
else:
# if the printer is offline, we raise an UpdateFailed
if printer.is_unknown_state():
raise UpdateFailed(
f"Configured printer at {printer.url} does not respond."
)
return printer
coordinator: DataUpdateCoordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=timedelta(seconds=30),
)
hass.data[DOMAIN][entry.entry_id] = coordinator
await coordinator.async_config_entry_first_refresh()
if isinstance(coordinator.last_exception, SyncThruAPINotSupported):
# this means that the printer does not support the syncthru JSON API
# and the config should simply be discarded
return False
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
configuration_url=printer.url,
connections=device_connections(printer),
identifiers=device_identifiers(printer),
model=printer.model(),
name=printer.hostname(),
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | 4dbecb1a63b228b689461f764c8e679c3a52f545 | 27,308 |
def center_binned_stats(*args, **kwargs):
"""
Same as scipy.stats.binned_statistic, but returns
the bin centers (matching length of `statistic`)
instead of the binedges.
See docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html
"""
stat, binedges, binnumber = binned_statistic(*args,**kwargs)
bincenters = (binedges[:-1]+binedges[1:])/2.
return stat, bincenters, binedges, binnumber | adb762d874a19b4af09a49a2ad30b1b33d314be8 | 27,309 |
def create_account():
"""
Handles the user signing up to create an account
Redirects them to the log in page with a message after if successful
"""
signUpForm = forms.RegisterFormFactory()
if signUpForm.validate_on_submit():
netid,name,duke_email,phone_number,affiliation,password=extract_info(signUpForm)
#everything checked with vaildators so just can register user
register_user(netid,name,duke_email,phone_number,affiliation,password)
return redirect(url_for('rides.log_in_main'))
return render_template('registerLogInPages/sign-up.html', form=signUpForm) | 321bcf6c779c6aa8857b9c4d054c18c5f611c5ad | 27,310 |
from art.classifiers import BlackBoxClassifier
from art.utils import to_categorical
import os
import json
def get_classifier_bb(defences=None):
"""
Standard BlackBox classifier for unit testing
:return: BlackBoxClassifier
"""
# define blackbox classifier
def predict(x):
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)),
'data/mnist', 'api_output.txt')) as json_file:
predictions = json.load(json_file)
return to_categorical(predictions['values'][:len(x)], nb_classes=10)
bbc = BlackBoxClassifier(predict, (28, 28, 1), 10, clip_values=(0, 255), defences=defences)
return bbc | 8a2c3d47296450544c3f7ed69b9e4316f85ca591 | 27,311 |
import warnings
def tracks(track):
"""
Check if the submitted RGTs are valid
Arguments
---------
track: ICESat-2 reference ground track (RGT)
"""
#-- string length of RGTs in granules
track_length = 4
#-- total number of ICESat-2 satellite RGTs is 1387
all_tracks = [str(tr + 1).zfill(track_length) for tr in range(1387)]
if track is None:
return ["????"]
else:
if isinstance(track, (str,int)):
assert int(track) > 0, "Reference Ground Track must be positive"
track_list = [str(track).zfill(track_length)]
elif isinstance(track, list):
track_list = []
for t in track:
assert int(t) > 0, "Reference Ground Track must be positive"
track_list.append(str(t).zfill(track_length))
else:
raise TypeError(
"Reference Ground Track as a list or string"
)
#-- check if user-entered RGT is outside of the valid range
if not set(all_tracks) & set(track_list):
warnings.filterwarnings("always")
warnings.warn("Listed Reference Ground Track is not available")
return track_list | 306b213fc040dbaabf515dfeff7efe45db656549 | 27,312 |
def submit_spark_job(project_id, region, cluster_name, job_id_output_path,
main_jar_file_uri=None, main_class=None, args=[], spark_job={}, job={},
wait_interval=30):
"""Submits a Cloud Dataproc job for running Apache Spark applications on YARN.
Args:
project_id (str): Required. The ID of the Google Cloud Platform project
that the cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the
request.
cluster_name (str): Required. The cluster to run the job.
main_jar_file_uri (str): The HCFS URI of the jar file that contains the main class.
main_class (str): The name of the driver's main class. The jar file that
contains the class must be in the default CLASSPATH or specified in
jarFileUris.
args (list): Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
spark_job (dict): Optional. The full payload of a [SparkJob](
https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob).
job (dict): Optional. The full payload of a [Dataproc job](
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
wait_interval (int): The wait seconds between polling the operation.
Defaults to 30s.
job_id_output_path (str): Path for the ID of the created job
Returns:
The created job payload.
"""
if not spark_job:
spark_job = {}
if not job:
job = {}
if main_jar_file_uri:
spark_job['mainJarFileUri'] = main_jar_file_uri
if main_class:
spark_job['mainClass'] = main_class
if args:
spark_job['args'] = args
job['sparkJob'] = spark_job
return submit_job(project_id, region, cluster_name, job, wait_interval, job_id_output_path=job_id_output_path) | 86494a8af89a93507d40c334a84ee279cc5f7250 | 27,313 |
import sys
def load_pickle(f):
"""load pickle file and generate dictionary
:param f: absolute filepath to CSC library pickle files
:return: dictionary object (Pandas)
"""
with open(f, 'rb') as infile:
pickle_dataframe = pl.load(infile,encoding='latin1')
try:
pickle_dictionary = pickle_dataframe.set_index('gRNA').to_dict()
return pickle_dictionary
except AttributeError:
if type(pickle_dataframe) == dict:
sys.stdout.write('\n%s is a dictionary object\n' % f)
pickle_dictionary = pickle_dataframe
return pickle_dictionary
else:
sys.stderr.write('\n%s is incompatible pickle file\nHave pickle file be dictionary with gRNA as key and specificity string as value\n' % f)
sys.exit(1) | 09b28ce3981d41381a632c888e2f331c2733ba84 | 27,314 |
def get_filter_ids(id_query):
"""Parses the `id` filter paramter from the url query.
"""
if id_query is None:
return None
filter_ids = id_query.split(',')
for filter_id in filter_ids:
validate_id(filter_id)
return filter_ids | 08582368e4487c602160af42dbc6ffcd10c97075 | 27,315 |
import random
def random_sampling(predictions, number):
"""
This method will return us the next values that we need to labelise
for our training with a random prioritisation
Args:
predictions : A matrix of probabilities with all the predictions
for the unlabelled data
number : The number of indexes that we need to return
Returns:
The indexes that we need to labelised and enter in the training set
"""
return random.sample(range(len(predictions)), number) | 22a1b13122bdf5c1b95d2b039458d27a62544f6d | 27,316 |
def natsort_key_icase(s: str) -> str:
"""Split string to numeric and non-numeric fragments."""
return natsort_key(s.lower()) | ae8bbbec4a7889c2737fbe10e62c69215398faf7 | 27,317 |
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
tf.logging.info("*** Features3 ***")
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn | 84a66d5e10032c5e6fffbbf0069e2e674f8ca8f8 | 27,318 |
def datetime_to_gps_format(t):
"""
Converts from a datetime to week number and time of week format.
NOTE: This does NOT convert between utc and gps time. The result
will still be in gps time (so will be off by some number of
leap seconds).
Parameters
----------
t : np.datetime64, pd.Timestamp, datetime.datetime
A datetime object (possibly an array) that is convertable to
datetime64 objects using pd.to_datetime (see the pandas docs
for more details).
Returns
--------
wn_tow : dict
Dictionary with attributes 'wn' and 'tow' corresponding to the
week number and time of week.
See also: tow_to_datetime
"""
t = pd.to_datetime(t)
delta = (t - GPS_WEEK_0)
# compute week number
wn = np.floor(delta.total_seconds() / WEEK_SECS).astype('int64')
# subtract the whole weeks from timedelta and get the remaining seconds
delta -= pd.to_timedelta(wn * WEEK_SECS, 's')
seconds = delta.total_seconds()
return {'wn': wn, 'tow': seconds} | 66dfdb09534f3b425f50d0710c68819e067c89a4 | 27,319 |
def view(jinja_environment: Environment, name: str, *args, **kwargs):
"""
Returns a Response object with HTML obtained from synchronous rendering.
Use this when `enable_async` is set to False when calling `use_templates`.
"""
return get_response(
render_template(
jinja_environment.get_template(template_name(name)), *args, **kwargs
)
) | 864c7106db19cc15cb37132c7755652b500f3ef2 | 27,320 |
from typing import List
import collections
def create_preprocess_fn(
vocab: List[str],
num_oov_buckets: int,
client_batch_size: int,
client_epochs_per_round: int,
max_sequence_length: int,
max_elements_per_client: int,
max_shuffle_buffer_size: int = 10000) -> tff.Computation:
"""Creates a preprocessing functions for Stack Overflow next-word-prediction.
This function returns a `tff.Computation` which takes a dataset and returns a
dataset, suitable for mapping over a set of unprocessed client datasets.
Args:
vocab: Vocabulary which defines the embedding.
num_oov_buckets: The number of out of vocabulary buckets. Tokens that are
not present in the `vocab` are hashed into one of these buckets.
client_batch_size: Integer representing batch size to use on the clients.
client_epochs_per_round: Number of epochs for which to repeat train client
dataset. Must be a positive integer.
max_sequence_length: Integer determining shape of padded batches. Sequences
will be padded up to this length, and sentences longer than this will be
truncated to this length.
max_elements_per_client: Integer controlling the maximum number of elements
to take per client. If -1, keeps all elements for each client. This is
applied before repeating `client_epochs_per_round`, and is intended
primarily to contend with the small set of clients with tens of thousands
of examples.
max_shuffle_buffer_size: Maximum shuffle buffer size.
Returns:
A `tff.Computation` taking as input a `tf.data.Dataset`, and returning a
`tf.data.Dataset` formed by preprocessing according to the input arguments.
"""
if client_batch_size <= 0:
raise ValueError('client_batch_size must be a positive integer. You have '
'passed {}.'.format(client_batch_size))
elif client_epochs_per_round <= 0:
raise ValueError('client_epochs_per_round must be a positive integer. '
'You have passed {}.'.format(client_epochs_per_round))
elif max_sequence_length <= 0:
raise ValueError('max_sequence_length must be a positive integer. You have '
'passed {}.'.format(max_sequence_length))
elif max_elements_per_client == 0 or max_elements_per_client < -1:
raise ValueError(
'max_elements_per_client must be a positive integer or -1. You have '
'passed {}.'.format(max_elements_per_client))
if num_oov_buckets <= 0:
raise ValueError('num_oov_buckets must be a positive integer. You have '
'passed {}.'.format(num_oov_buckets))
if (max_elements_per_client == -1 or
max_elements_per_client > max_shuffle_buffer_size):
shuffle_buffer_size = max_shuffle_buffer_size
else:
shuffle_buffer_size = max_elements_per_client
# Features are intentionally sorted lexicographically by key for consistency
# across datasets.
feature_dtypes = collections.OrderedDict(
creation_date=tf.string,
score=tf.int64,
tags=tf.string,
title=tf.string,
tokens=tf.string,
type=tf.string,
)
@tff.tf_computation(tff.SequenceType(feature_dtypes))
def preprocess_fn(dataset):
to_ids = build_to_ids_fn(
vocab=vocab,
max_sequence_length=max_sequence_length,
num_oov_buckets=num_oov_buckets)
dataset = dataset.take(max_elements_per_client).shuffle(
shuffle_buffer_size).repeat(client_epochs_per_round).map(
to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return batch_and_split(dataset, max_sequence_length, client_batch_size)
return preprocess_fn | 1a7d114de3d979da4556123b185a00ca810b3f82 | 27,321 |
def get_slope_aspect(dem, get_aspect=True):
"""
Return the instantaneous slope and aspect (if get_aspect) for dem.
Both values derived using NumPy's gradient method by methods from:
Ritter, Paul. "A vector-based slope and aspect generation algorithm."
Photogrammetric Engineering and Remote Sensing 53, no. 8 (1987): 1109-1111
Parameters
----------
dem(height) (2D array): A two dimensional array of elevation data.
get_aspect (bool): Whether to also compute and return aspect (default True)
Returns
-------
slope (2D array): A two dimensional array of slope data.
aspect (2D array, optional): A two dimensional array of aspect data.
"""
# We use the gradient function to return the dz_dx and dz_dy.
dz_dy, dz_dx = np.gradient(dem)
# Slope is returned in degrees through simple calculation.
# No windowing is done (e.g. Horn's method).
slope = np.rad2deg(np.arctan(np.sqrt(dz_dx ** 2 + dz_dy ** 2)))
if get_aspect:
aspect = np.rad2deg(np.arctan2(-dz_dy, dz_dx))
# Convert to clockwise about Y and restrict to (0, 360) from North
aspect = (270 + aspect) % 360
# TODO: do we need this line?
# If no slope we don't need aspect
aspect[slope == 0] = 0
return slope, aspect
return slope | 261ea3dbd114dc706f2b1f388741961bd7eb0360 | 27,322 |
def get_element_parts(
original_list: list, splitter_character: str, split_index: int
) -> list:
"""
Split all elements of the passed list on the passed splitter_character.
Return the element at the passed index.
Parameters
----------
original_list : list
List of strings to be split.
splitter_character : str
Character to split the strings on.
split_index : int
Index of the element to be returned.
Returns
-------
list
List with the elements at the passed index.
"""
new_list = []
for element in original_list:
temp_element = element.rsplit(splitter_character)[split_index] # split element
temp_element = temp_element.strip() # clean data
temp_element = temp_element.casefold() # force lower case
new_list.append(temp_element)
return new_list | 8c663fd64ebb1b2c53a64a17f7d63e842b457652 | 27,323 |
from typing import Union
import os
import csv
import requests
import json
def download(tickers: list, start: Union[str, int] = None, end: Union[str, int] = None, interval: str = "1d") -> dict:
"""
Download historical data for tickers in the list.
Parameters
----------
tickers: list
Tickers for which to download historical information.
start: str or int
Start download data from this date.
end: str or int
End download data at this date.
interval: str
Frequency between data.
Returns
-------
data: dict
Dictionary including the following keys:
- tickers: list of tickers
- logp: array of log-adjusted closing prices, shape=(num stocks, length period);
- volume: array of volumes, shape=(num stocks, length period);
- sectors: dictionary of stock sector for each ticker;
- industries: dictionary of stock industry for each ticker.
"""
tickers = tickers if isinstance(tickers, (list, set, tuple)) else tickers.replace(',', ' ').split()
tickers = list(set([ticker.upper() for ticker in tickers]))
data = {}
si_columns = ["SYMBOL", "CURRENCY", "SECTOR", "INDUSTRY"]
si_filename = "stock_info.csv"
if not os.path.exists(si_filename):
# create a .csv to store stock information
with open(si_filename, 'w') as file:
wr = csv.writer(file)
wr.writerow(si_columns)
# load stock information file
si = pd.read_csv(si_filename)
missing_tickers = [ticker for ticker in tickers if ticker not in si['SYMBOL'].values]
missing_si, na_si = {}, {}
currencies = {}
if end is None:
end = int(dt.datetime.timestamp(dt.datetime.today()))
elif type(end) is str:
end = int(dt.datetime.timestamp(dt.datetime.strptime(end, '%Y-%m-%d')))
if start is None:
start = int(dt.datetime.timestamp(dt.datetime.today() - dt.timedelta(365)))
elif type(start) is str:
start = int(dt.datetime.timestamp(dt.datetime.strptime(start, '%Y-%m-%d')))
@multitasking.task
def _download_one_threaded(ticker: str, start: str, end: str, interval: str = "1d"):
"""
Download historical data for a single ticker with multithreading. Plus, it scrapes missing stock information.
Parameters
----------
ticker: str
Ticker for which to download historical information.
interval: str
Frequency between data.
start: str
Start download data from this date.
end: str
End download data at this date.
"""
data_one = _download_one(ticker, start, end, interval)
try:
data_one = data_one["chart"]["result"][0]
data[ticker] = _parse_quotes(data_one)
if ticker in missing_tickers:
currencies[ticker] = data_one['meta']['currency']
try:
html = requests.get(url='https://finance.yahoo.com/quote/' + ticker, headers=headers).text
json_str = html.split('root.App.main =')[1].split('(this)')[0].split(';\n}')[0].strip()
info = json.loads(json_str)['context']['dispatcher']['stores']['QuoteSummaryStore']['summaryProfile']
assert (len(info['sector']) > 0) and (len(info['industry']) > 0)
missing_si[ticker] = dict(sector=info["sector"], industry=info["industry"])
except:
pass
except:
pass
progress.animate()
num_threads = min([len(tickers), multitasking.cpu_count() * 2])
multitasking.set_max_threads(num_threads)
progress = ProgressBar(len(tickers), 'completed')
for ticker in tickers:
_download_one_threaded(ticker, start, end, interval)
multitasking.wait_for_tasks()
progress.completed()
if len(data) == 0:
raise Exception("No symbol with full information is available.")
data = pd.concat(data.values(), keys=data.keys(), axis=1, sort=True)
data.drop(columns=data.columns[data.isnull().sum(0) > 0.33 * data.shape[0]], inplace=True)
data = data.fillna(method='bfill').fillna(method='ffill').drop_duplicates()
info = zip(list(missing_si.keys()), [currencies[ticker] for ticker in missing_si.keys()],
[v['sector'] for v in missing_si.values()],
[v['industry'] for v in missing_si.values()])
with open(si_filename, 'a+', newline='') as file:
wr = csv.writer(file)
for row in info:
wr.writerow(row)
si = pd.read_csv('stock_info.csv').set_index("SYMBOL").to_dict(orient='index')
missing_tickers = [ticker for ticker in tickers if ticker not in data.columns.get_level_values(0)[::2].tolist()]
tickers = data.columns.get_level_values(0)[::2].tolist()
if len(missing_tickers) > 0:
print('\nRemoving {} from list of symbols because we could not collect full information.'.format(missing_tickers))
# download exchange rates and convert to most common currency
currencies = [si[ticker]['CURRENCY'] if ticker in si else currencies[ticker] for ticker in tickers]
ucurrencies, counts = np.unique(currencies, return_counts=True)
default_currency = ucurrencies[np.argmax(counts)]
xrates = get_exchange_rates(currencies, default_currency, data.index, start, end, interval)
return dict(tickers=tickers,
dates=pd.to_datetime(data.index),
price=data.iloc[:, data.columns.get_level_values(1) == 'Adj Close'].to_numpy().T,
volume=data.iloc[:, data.columns.get_level_values(1) == 'Volume'].to_numpy().T,
currencies=currencies,
exchange_rates=xrates,
default_currency=default_currency,
sectors={ticker: si[ticker]['SECTOR'] if ticker in si else "NA_" + ticker for ticker in tickers},
industries={ticker: si[ticker]['INDUSTRY'] if ticker in si else "NA_" + ticker for ticker in tickers}) | f775f45ab6136773f5c9859531ef96eb73f9e337 | 27,324 |
def logsumexp_masked(a, mask):
"""Returns row-wise masked log sum exp of a.
Uses the following trick for numeric stability:
log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
Args:
a: 2D tensor.
mask: 2D tensor.
"""
mask = tf.cast(mask, a.dtype)
a_max = tf.math.reduce_max(a * mask, axis=1, keepdims=True)
a = a - a_max
a_exp = tf.math.exp(a)
a_sum_exp = tf.math.reduce_sum(a_exp * mask, axis=1, keepdims=True)
return tf.squeeze(tf.math.log(a_sum_exp) + a_max) | 86102b153e7ce912678a6381b38cb3a5168de3c8 | 27,325 |
def mutaprop_class(display_name, gui_id=None, gui_major_version=0,
gui_minor_version=0):
""" Class-level decorator. It is required for classes whose instances should
be visible for the Mutaprop UI manager.
:param display_name: Class name/description to be accessible by the UI API.
In most cases, it's not really important.
:param gui_id: API-level identifier of a class definition.
In most cases, it's not really important.
:param gui_major_version: Reserved for future use.
:param gui_minor_version: Reserved for future use.
"""
def decorator(cls):
logger.debug("Registered mutaprop class: %s", cls.__name__)
return type("MutaProp{0}".format(cls.__name__), (cls, MutaPropClass),
{MutaPropClass.muta_attr(MutaPropClass.MP_NAME):
display_name,
MutaPropClass.muta_attr(MutaPropClass.MP_GUI_ID): gui_id,
MutaPropClass.muta_attr(
MutaPropClass.MP_GUI_MAJOR_VERSION): gui_major_version,
MutaPropClass.muta_attr(
MutaPropClass.MP_GUI_MINOR_VERSION): gui_minor_version,
"__doc__": cls.__doc__,
"_orig_cls": cls})
return decorator | 4aee47a88ec5f20263decb12eb64340dd0bf296f | 27,326 |
def preprocess2(data: list, max_length: int, test_data: bool):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
query1 =[]
query2 =[]
for d in data:
q1,q2 = d.split('\t')
query1.append(q1)
query2.append(q2.replace('\n',''))
vectorized_data1 = [decompose_str_as_one_hot(datum, warning=False) for datum in query1]
vectorized_data2 = [decompose_str_as_one_hot(datum, warning=False) for datum in query2]
if test_data :
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding2 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding1_test = np.zeros((test_size, max_length), dtype=np.int32)
zero_padding2_test = np.zeros((test_size, max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data1):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1[idx, :length] = np.array(seq)[:length]
else:
zero_padding1[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding1_test[idx - train_size, :length] = np.array(seq)
for idx, seq in enumerate(vectorized_data2):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2[idx, :length] = np.array(seq)[:length]
else:
zero_padding2[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding2_test[idx - train_size, :length] = np.array(seq)
return zero_padding1,zero_padding2, zero_padding1_test,zero_padding2_test, train_size
else:
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((data_size, max_length), dtype=np.int32)
zero_padding2 = np.zeros((data_size, max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data1):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1[idx, :length] = np.array(seq)[:length]
else:
zero_padding1[idx, :length] = np.array(seq)
for idx, seq in enumerate(vectorized_data2):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2[idx, :length] = np.array(seq)[:length]
else:
zero_padding2[idx, :length] = np.array(seq)
return zero_padding1, zero_padding2 | 61810a385c935796f1d4f6e57c5f602cb0fa6f33 | 27,327 |
def create_inference_metadata(object_type, boundary_image, boundary_world):
"""
Create a metadata of **each** detected object
:param object_type: Type of the object | int
:param boundary: Boundary of the object in GCS - shape: 2(x, y) x points | np.array
:return: JSON object of each detected object ... python dictionary
"""
obj_metadata = {
"obj_type": object_type,
"obj_boundary_image": boundary_image
}
object_boundary = "POLYGON (("
for i in range(boundary_world.shape[1]):
object_boundary = object_boundary + str(boundary_world[0, i]) + " " + str(boundary_world[1, i]) + ", "
object_boundary = object_boundary + str(boundary_world[0, 0]) + " " + str(boundary_world[1, 0]) + "))"
# print("object_boundary: ", object_boundary)
obj_metadata["obj_boundary_world"] = object_boundary # string in wkt
# print("obj_metadata: " ,obj_metadata)
return obj_metadata | b13f1ad4abc22f3eaca2c81c56ab9cb0eae80aa9 | 27,328 |
def noauth_filter_factory(global_conf, forged_roles):
"""Create a NoAuth paste deploy filter
:param forged_roles: A space seperated list for roles to forge on requests
"""
forged_roles = forged_roles.split()
def filter(app):
return NoAuthFilter(app, forged_roles)
return filter | 5a18d581616c6b4ae54b5d9c30095dd8734ed2e7 | 27,329 |
from typing import Tuple
from typing import Dict
def parse_line_protocol_stat_key(key: str) -> Tuple[str, Dict[str, str]]:
"""Parseline protocolish key to stat prefix and key.
Examples:
SNMP_WORKER;hostname=abc.com,worker=snmp-mti
will become:
("SNMP_WORKER", {"hostname": "abc.com", "worker": "snmp-mti"})
"""
try:
prefix, raw_labels = key.split(";", 1)
labels = dict(raw_label.split("=", 1) for raw_label in raw_labels.split(","))
return prefix, labels
except ValueError:
return key, {} | a6806f7dd67fb2a4734caca94bff3d974923f4b2 | 27,330 |
def negative_log_partial_likelihood(censor, risk):
"""Return the negative log-partial likelihood of the prediction
y_true contains the survival time
risk is the risk output from the neural network
censor is the vector of inputs that are censored
regularization is the regularization constant (not used currently in model)
Uses the Keras backend to perform calculations
Sorts the surv_time by sorted reverse time
"""
# calculate negative log likelihood from estimated risk
epsilon = 0.001
risk = K.reshape(risk, [-1]) # flatten
hazard_ratio = K.exp(risk)
# cumsum on sorted surv time accounts for concordance
log_risk = K.log(tf.cumsum(hazard_ratio)+epsilon)
log_risk = K.reshape(log_risk, [-1])
uncensored_likelihood = risk - log_risk
# apply censor mask: 1 - dead, 0 - censor
censored_likelihood = uncensored_likelihood * censor
num_observed_events = K.sum(censor)
neg_likelihood = - K.sum(censored_likelihood) / \
tf.cast(num_observed_events, tf.float32)
return neg_likelihood | e42a7cf32fd191efb91806ecf51fd7bf279595c6 | 27,331 |
import math
def approx_equal(x, y, tol=1e-12, rel=1e-7):
"""approx_equal(x, y [, tol [, rel]]) => True|False
Return True if numbers x and y are approximately equal, to within some
margin of error, otherwise return False. Numbers which compare equal
will also compare approximately equal.
x is approximately equal to y if the difference between them is less than
an absolute error tol or a relative error rel, whichever is bigger.
If given, both tol and rel must be finite, non-negative numbers. If not
given, default values are tol=1e-12 and rel=1e-7.
>>> approx_equal(1.2589, 1.2587, tol=0.0003, rel=0)
True
>>> approx_equal(1.2589, 1.2587, tol=0.0001, rel=0)
False
Absolute error is defined as abs(x-y); if that is less than or equal to
tol, x and y are considered approximately equal.
Relative error is defined as abs((x-y)/x) or abs((x-y)/y), whichever is
smaller, provided x or y are not zero. If that figure is less than or
equal to rel, x and y are considered approximately equal.
Complex numbers are not directly supported. If you wish to compare to
complex numbers, extract their real and imaginary parts and compare them
individually.
NANs always compare unequal, even with themselves. Infinities compare
approximately equal if they have the same sign (both positive or both
negative). Infinities with different signs compare unequal; so do
comparisons of infinities with finite numbers.
"""
if tol < 0 or rel < 0:
raise ValueError('error tolerances must be non-negative')
# NANs are never equal to anything, approximately or otherwise.
if math.isnan(x) or math.isnan(y):
return False
# Numbers which compare equal also compare approximately equal.
if x == y:
# This includes the case of two infinities with the same sign.
return True
if math.isinf(x) or math.isinf(y):
# This includes the case of two infinities of opposite sign, or
# one infinity and one finite number.
return False
# Two finite numbers.
actual_error = abs(x - y)
allowed_error = max(tol, rel*max(abs(x), abs(y)))
return actual_error <= allowed_error | 45285d62e6fb3da403f3efd15b1f67df92cd345c | 27,332 |
import os
def get_deleted_filename(absolute):
"""
Returns the name of the deleted file referenced by the AUFS whiteout file at the given path or
None if the file path does not reference a whiteout file.
"""
filename = os.path.basename(absolute)
if not filename.startswith(AUFS_WHITEOUT):
return None
return filename[AUFS_WHITEOUT_PREFIX_LENGTH:] | 14936fa55bbb1733bf2d481772e9fe96c5291a55 | 27,333 |
import json
def get_latest_enabled_scripts():
"""The ``/scripts/latest/enabled`` endpoint requires authentication.
It is used to get latest enabled scripts for all services submitted
by all teams including master/organizer where the team id will be
Null.
The JSON response is:
{
"scripts" : [List of {"id" : int,
"type": ("exploit", "benign", "getflag",
"setflag"),
"team_id": int or NULL (NULL if it's our
exploit),
"service_id" : int}]
}
:return: a JSON dictionary that contains all latest working scripts.
"""
cursor = mysql.cursor()
# First, we need to get the latest scripts submitted by each team for each service.
# Union that with all the scripts of administrator i.e get_flag/set_flag
cursor.execute("""SELECT MAX(id) as id, type, team_id, service_id
FROM scripts
WHERE current_state = 'enabled'
AND team_id IS NOT NULL
GROUP BY team_id, service_id, type
UNION
SELECT id, type, team_id, service_id
FROM scripts
WHERE current_state = 'enabled'
AND team_id IS NULL
GROUP BY team_id, service_id, type""")
return json.dumps({"scripts": cursor.fetchall()}) | 4430eaf9c7a0d0a82f5977850e46221e8b5998fe | 27,334 |
def hanning(shape, dtype=np.float, device=backend.cpu_device):
"""Create multi-dimensional hanning window.
Args:
shape (tuple of ints): Output shape.
dtype (Dtype): Output data-type.
device (Device): Output device.
Returns:
array: hanning filter.
"""
device = backend.Device(device)
xp = device.xp
shape = _normalize_shape(shape)
with device:
window = xp.ones(shape, dtype=dtype)
for n, i in enumerate(shape[::-1]):
x = xp.arange(i, dtype=dtype)
w = 0.5 - 0.5 * xp.cos(2 * np.pi * x / max(1, (i - (i % 2))))
window *= w.reshape([i] + [1] * n)
return window | 2192b4e75ceb52560865219553d27acd56bf4ef4 | 27,335 |
from typing import Union
def bitwise_and(x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array:
"""
Computes the bitwise AND of the underlying binary representation of each element x1_i of the input array x1 with
the respective element x2_i of the input array x2.
:param x1: first input array. Should have an integer or boolean data type.
:param x2: second input array. Must be compatible with x1 (see Broadcasting). Should have an integer or
boolean data type.
:return: an array containing the element-wise results. The returned array must have a data type determined
by Type Promotion Rules.
"""
return _cur_framework(x1, x2).bitwise_and(x1, x2) | 662a706ba79e5f67958c78752197eff4c2a0a298 | 27,336 |
import os
def init_database(use_mysql=False, dbname="sbs"):
"""
initialize database
if use_mysql is true, use environment variables to set it up
otherwise default to sqlite
"""
#engine = create_engine('sqlite:///:memory:', echo=False)
# "mysql+mysqldb://{user}:{password}@{host}:{port}/{dbname}"
if use_mysql:
db_setup = dict(user=os.environ.get('MYSQL_LOGIN'),
password=os.environ.get('MYSQL_PASSWORD'),
host="127.0.0.1",
port=os.environ.get('MYSQL_PORT', 3006),
dbname=dbname
)
mysql_setup = "mysql+mysqldb://{user}:{password}@{host}:{port}/{dbname}?charset=utf8".format(**db_setup)
engine = create_engine(mysql_setup, echo=False)
else:
engine = create_engine('sqlite:///data.sqlite', echo=False)
event.Base.metadata.create_all(engine)
gameinfomodel.Base.metadata.create_all(engine)
playerinfo.Base.metadata.create_all(engine)
teaminfomodel.Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session | 9f4ecd5c375333ef717acf6f521001854138b2d2 | 27,337 |
def add_without_punctuation(line, punctuation):
"""Returns the line cleaned of punctuation.
Param:
line (unicode)
Returns:
False if there are not any punctuation
Corrected line
"""
cleaned_line = line.translate(str.maketrans('', '', punctuation))
if line != cleaned_line:
return cleaned_line
else:
return False | 20dafde21efad966f8ea1be0da928594e2ee5cc4 | 27,338 |
from typing import Callable
def there_is_zero(
f: Callable[[float], float], head: float, tail: float, subint: int
) -> bool:
"""
Checks if the function has a zero in [head, tail], looking at subint
subintervals
"""
length = tail - head
step = length / subint
t = head
a = f(head)
for i in range(1, subint + 1):
t += step
if a * f(t) <= 0:
return True
return False | dd80c55d4be5fed2e3100672ea63862014b0f8cc | 27,339 |
def target_distribution_gen(name, parameter1, parameter2):
""" parameter1 is usually a parameter of distribution (not always relevant). parameter2 is usually noise."""
if name=="Fritz-visibility":
""" parameter2 is the visibility"""
ids = np.zeros((4,4,4)).astype(str)
p = np.zeros((4,4,4))
for i,j,k,l,m,n in product('01',repeat=6):
a = int(i+j,2)
b = int(k+l,2)
c = int(m+n,2)
temp0 = [str(a),str(b),str(c)]
temp = [a,b,c]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
cspi8 = 1/(2*np.sqrt(2))
cos2pi8 = (2 + np.sqrt(2))/4
sin2pi8 = (2 - np.sqrt(2))/4
if m==j and n==l:
if n=='0':
if i==k:
p[temp[0],temp[1],temp[2]] = (1 - parameter2*(cos2pi8 - sin2pi8))/16
if i!=k:
p[temp[0],temp[1],temp[2]] = (1 + parameter2*(cos2pi8 - sin2pi8))/16
if n=='1':
if m=='0':
if i==k:
p[temp[0],temp[1],temp[2]] = 1/16 - cspi8 * parameter2/8
if i!=k:
p[temp[0],temp[1],temp[2]] = 1/16 + cspi8 * parameter2/8
if m=='1':
if i==k:
p[temp[0],temp[1],temp[2]] = 1/16 + cspi8 * parameter2/8
if i!=k:
p[temp[0],temp[1],temp[2]] = 1/16 - cspi8 * parameter2/8
p = p.flatten()
ids = ids.flatten()
if name=="Renou-visibility":
""" Info: If param_c >~ 0.886 or <~0.464, there is no classical 3-local model."""
""" In terms of c**2: above 0.785 or below 0.215 no classical 3-local model."""
c = parameter1
v = parameter2
p = np.array([
-(-1 + v)**3/64.,-((-1 + v)*(1 + v)**2)/64.,((-1 + v)**2*(1 + v))/64.,((-1 + v)**2*(1 + v))/64.,-((-1 + v)*(1 + v)**2)/64.,-((-1 + v)*(1 + v)**2)/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,
((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,-((-1 + v)*(1 + v)**2)/64.,-((-1 + v)*(1 + v)**2)/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,
-((-1 + v)*(1 + v)**2)/64.,-(-1 + v)**3/64.,((-1 + v)**2*(1 + v))/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + 3*(1 - 2*c**2)**2*v + 3*(1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,
(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 + 3*(1 - 2*c**2)**2*v + 3*(1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.
])
ids = np.array([
"000", "001", "002", "003", "010", "011", "012", "013", "020", "021", \
"022", "023", "030", "031", "032", "033", "100", "101", "102", "103", \
"110", "111", "112", "113", "120", "121", "122", "123", "130", "131", \
"132", "133", "200", "201", "202", "203", "210", "211", "212", "213", \
"220", "221", "222", "223", "230", "231", "232", "233", "300", "301", \
"302", "303", "310", "311", "312", "313", "320", "321", "322", "323", \
"330", "331", "332", "333"
])
if name=="Renou-localnoise":
""" Info: If param_c >~ 0.886 or <~0.464, there is no classical 3-local model."""
""" In terms of c**2: above 0.785 or below 0.215 no classical 3-local model."""
param_c = parameter1
param_s = np.np.sqrt(1-param_c**2)
# the si and ci functions
param2_c = {'2':param_c, '3':param_s}
param2_s = {'2':param_s, '3':-1*param_c}
# First create noiseless Salman distribution.
ids = np.zeros((4,4,4)).astype(str)
p = np.zeros((4,4,4))
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
# p(12vi) et al.
if (a=='0' and b=='1' and c=='2') or (a=='1' and b=='0' and c=='3'):
p[temp[0],temp[1],temp[2]] = 1/8*param_c**2
elif (c=='0' and a=='1' and b=='2') or (c=='1' and a=='0' and b=='3'):
p[temp[0],temp[1],temp[2]] = 1/8*param_c**2
elif (b=='0' and c=='1' and a=='2') or (b=='1' and c=='0' and a=='3'):
p[temp[0],temp[1],temp[2]] = 1/8*param_c**2
elif (a=='0' and b=='1' and c=='3') or (a=='1' and b=='0' and c=='2'):
p[temp[0],temp[1],temp[2]] = 1/8*param_s**2
elif (c=='0' and a=='1' and b=='3') or (c=='1' and a=='0' and b=='2'):
p[temp[0],temp[1],temp[2]] = 1/8*param_s**2
elif (b=='0' and c=='1' and a=='3') or (b=='1' and c=='0' and a=='2'):
p[temp[0],temp[1],temp[2]] = 1/8*param_s**2
# p(vi vj vk) et al.
elif a in '23' and b in '23' and c in '23':
p[temp[0],temp[1],temp[2]] = 1/8 * (param2_c[a]*param2_c[b]*param2_c[c] + param2_s[a]*param2_s[b]*param2_s[c])**2
else:
p[temp[0],temp[1],temp[2]] = 0
# Let's add local noise.
new_values = np.zeros_like(p)
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
new_values[temp[0],temp[1],temp[2]] = (
parameter2**3 * p[temp[0],temp[1],temp[2]] +
parameter2**2*(1-parameter2) * 1/4 * ( np.sum(p,axis=2)[temp[0],temp[1]] + np.sum(p,axis=0)[temp[1],temp[2]] + np.sum(p,axis=1)[temp[0],temp[2]] ) +
parameter2*(1-parameter2)**2 * 1/16 * ( np.sum(p,axis=(1,2))[temp[0]] + np.sum(p,axis=(0,2))[temp[1]] + np.sum(p,axis=(0,1))[temp[2]] ) +
(1-parameter2)**3 * 1/64
)
p = new_values.flatten()
ids = ids.flatten()
if name=="elegant-visibility":
""" Recreating the elegant distribution with visibility v (parameter2) in each singlet. """
ids = np.zeros((4,4,4)).astype(str)
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
ids = ids.flatten()
p = np.array([1/256 *(4+9 *parameter2+9 *parameter2**2+3 *parameter2**3),1/256 *(4+parameter2-3 *parameter2**2-parameter2**3),1/256 *(4+parameter2-3 *parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+9*parameter2+9*parameter2**2+3*parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+9*parameter2+9*parameter2**2+3*parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+9*parameter2+9*parameter2**2+3*parameter2**3)])
if name=="elegant-localnoise":
""" Recreating the elegant distribution, with each detector having 1-v (1-parameter2) chance of outputting a uniformly random output, and v chance of working properly. """
ids = np.zeros((4,4,4)).astype(str)
p = np.zeros((4,4,4))
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
if (a==b) and (b==c):
p[temp[0],temp[1],temp[2]] = 25/256
elif (a==b and b!=c) or (b==c and c!=a) or (c==a and a!=b):
p[temp[0],temp[1],temp[2]] = 1/256
else:
p[temp[0],temp[1],temp[2]] = 5/256
# Let's add local noise.
new_values = np.zeros_like(p)
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
new_values[temp[0],temp[1],temp[2]] = (
parameter2**3 * p[temp[0],temp[1],temp[2]] +
parameter2**2*(1-parameter2) * 1/4 * ( np.sum(p,axis=2)[temp[0],temp[1]] + np.sum(p,axis=0)[temp[1],temp[2]] + np.sum(p,axis=1)[temp[0],temp[2]] ) +
parameter2*(1-parameter2)**2 * 1/16 * ( np.sum(p,axis=(1,2))[temp[0]] + np.sum(p,axis=(0,2))[temp[1]] + np.sum(p,axis=(0,1))[temp[2]] ) +
(1-parameter2)**3 * 1/64
)
p=new_values.flatten()
ids = ids.flatten()
assert (np.abs(np.sum(p)-1.0) < (1E-6)),"Improperly normalized p!"
return p | 475ee931c64f1e0bb0f6a6494aeacb25283adb9f | 27,340 |
def module_code(
sexpr,
name: str = "<unknown>",
filename: str = "<unknown>",
lineno: int = 1,
doc: str = "",
):
"""Create a module's code object from given metadata and s-expression.
"""
module_builder = Builder(
ScopeSolver.outermost(), [], SharedState(doc, lineno, filename)
)
# incompletely build instruction
scheduling(module_builder.eval(sexpr))
# resolve symbols, complete building requirements
module_builder.sc.resolve()
# complete building requirements
instructions = module_builder.build()
code = make_code_obj(name, filename, lineno, doc, [], [], [], instructions)
return code | b1baf9ab8355fadd15c6763c569399aca62b9994 | 27,341 |
def acknowledgements():
"""Provides acknowlegements for the JRO instruments and experiments
Returns
-------
ackn : str
String providing acknowledgement text for studies using JRO data
"""
ackn = ' '.join(["The Jicamarca Radio Observatory is a facility of the",
"Instituto Geofisico del Peru operated with support from",
"the NSF AGS-1433968 through Cornell University."])
return ackn | 013727319d43baaec57461995af8a683b5f02278 | 27,342 |
def list_vmachines(vdc):
"""
Returns:
list: vmachines info
"""
return vdc.to_dict()["vmachines"] | b3ce74c5b6f7d6d9f109a884f0c050ffae840e70 | 27,343 |
def reflection_matrix(v):
"""
The reflection transformation about a plane with normal vector `v`.
"""
n = len(v)
v = np.array(v)[np.newaxis]
return np.eye(n) - 2 * np.dot(v.T, v) | 0b56f21e95162720e4856ac2ee995570c0231d4f | 27,344 |
import logging
def log_level(level):
""" Setup the root logger for the script """
return {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}.get(level, logging.INFO) | 97af634f195c1ccaef52e3db0e29caaa50554e31 | 27,345 |
def is_valid_furl_or_file(furl_or_file):
"""Validate a FURL or a FURL file.
If ``furl_or_file`` looks like a file, we simply make sure its directory
exists and that it has a ``.furl`` file extension. We don't try to see
if the FURL file exists or to read its contents. This is useful for
cases where auto re-connection is being used.
"""
if is_valid_furl(furl_or_file) or is_valid_furl_file(furl_or_file):
return True
else:
return False | 452617b469006ec5bf4d341af2163dc3b4f69bf7 | 27,346 |
import random
def stride(input_data, input_labels):
"""
Takes an input waterfall visibility with labels and strides across frequency,
producing (Nchan - 64)/S new waterfalls to be folded.
"""
spw_hw = 32 # spectral window half width
nchans = 1024
fold = nchans / (2 * spw_hw)
sample_spws = random.sample(range(0, 60), fold)
x = np.array(
[
input_data[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
x_labels = np.array(
[
input_labels[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
X = np.array([x[i].T for i in sample_spws])
X_labels = np.array([x_labels[i].T for i in sample_spws])
X_ = X.reshape(-1, 60).T
X_labels = X_labels.reshape(-1, 60).T
return X_, X_labels | f490df82e65356443d6fe8a951a0ad282ca1c2af | 27,347 |
import os
def abspaths(paths):
"""
an wrapper function of os.path.abspath to process mutliple paths.
"""
return [os.path.abspath(p) for p in paths] | ae3591a11ee152a0ca2eb342644749c669c8d78e | 27,348 |
import DominantSparseEigenAD.Lanczos as lanczos
import torch
def chiF_sparseAD(model, k):
"""
Compute chi_F using the DominantSparseSymeig primitive, where the matrix
to be diagonalized is "sparse" and represented as a function.
"""
lanczos.setDominantSparseSymeig(model.H, model.Hadjoint_to_gadjoint)
dominant_sparse_symeig = lanczos.DominantSparseSymeig.apply
E0, psi0 = dominant_sparse_symeig(model.g, k, model.dim)
logF = torch.log(psi0.detach().matmul(psi0))
dlogF, = torch.autograd.grad(logF, model.g, create_graph=True)
d2logF, = torch.autograd.grad(dlogF, model.g)
chiF = -d2logF.item()
return E0, psi0, chiF | acb820da75218dd37d805867ed246ca9ec2efad2 | 27,349 |
def sum_digits(y):
"""Sum all the digits of y.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
>>> a = sum_digits(123) # make sure that you are using return rather than print
>>> a
6
"""
"*** YOUR CODE HERE ***"
sum = 0
while(y > 0):
sum += y % 10
y = y // 10
return sum | 5300e5bdbb058c4cc8d4a57155b114eab31b1935 | 27,350 |
import scipy
def leastsq(error_func, x0, *args, **options):
"""Find the parameters that yield the best fit for the data.
`x0` can be a sequence, array, Series, or Params
Positional arguments are passed along to `error_func`.
Keyword arguments are passed to `scipy.optimize.leastsq`
error_func: function that computes a sequence of errors
x0: initial guess for the best parameters
args: passed to error_func
options: passed to leastsq
:returns: Params object with best_params and ModSimSeries with details
"""
# override `full_output` so we get a message if something goes wrong
options["full_output"] = True
# run leastsq
t = scipy.optimize.leastsq(error_func, x0=x0, args=args, **options)
best_params, cov_x, infodict, mesg, ier = t
# pack the results into a ModSimSeries object
details = ModSimSeries(infodict)
details.set(cov_x=cov_x, mesg=mesg, ier=ier)
# if we got a Params object, we should return a Params object
if isinstance(x0, Params):
best_params = Params(Series(best_params, x0.index))
# return the best parameters and details
return best_params, details | d949358016ddab5d650ca1bab5c98e4ae124c153 | 27,351 |
def atomic_token_partition(value):
"""Partition given value on a token that appears resolvable(contains no
sub tokens). Returns in a tuple: (before_token, token, after_token).
Returned token includes token syntax. If no tokens are found, returned
tuple contains None in all values.
:param value: text to find a token from, and partition
:type value: str
:return: before_token, token, after_token
:rtype: tuple(str, str, str)
"""
before, sep, after_bef = value.rpartition(TOKEN_PREFIX)
if not sep:
return (None, None, None)
token, sep, after = after_bef.partition(TOKEN_SUFFIX)
if not sep:
# msg = 'bad resolve formatting, cannot find closer for {}'
# msg = msg.format(before + tokens.TOKEN_PREFIX)
# logger.error(msg)
return (None, None, None)
return before, make_token_str(token), after | 1a4b0b952dcaa65c6f04d066685a609f1bd03669 | 27,352 |
def add_color_bar(img, space, cv):
"""
args:
img: (ndarray) in [img_rows, img_cols, channels], dtype as unit8
space: (int) pixels of space
cv: (int) color value in [0, 255]
return:
tmp_img: (ndarray) processed img
"""
assert len(img.shape) == 3, "img should be 3D"
img_rows, img_cols, channels = img.shape
tmp_img = np.ones((img_rows + 2 * space,
img_cols + 2 * space,
channels), np.uint8) * cv
tmp_img[space: space + img_rows,
space: space + img_cols] = img
return tmp_img | a08fc1eac525dd4156add949bc4cf706ee1ea299 | 27,353 |
import json
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n':
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body | 7d65bd1d19a837b5e78d0e1d834f2a00f4815cdb | 27,354 |
def zero_at(pos, size=8):
"""
Create a size-bit int which only has one '0' bit at specific position.
:param int pos: Position of '0' bit.
:param int size: Length of value by bit.
:rtype: int
"""
assert 0 <= pos < size
return 2**size - 2**(size - pos - 1) - 1 | 7ebdcc1ac9db4ad934108f67a751b336b4f18011 | 27,355 |
def get_authz_token(request, user=None, access_token=None):
"""Construct AuthzToken instance from session; refresh token if needed."""
if access_token is not None:
return _create_authz_token(request, user=user, access_token=access_token)
elif is_request_access_token(request):
return _create_authz_token(request, user=user)
elif is_session_access_token(request) and not is_session_access_token_expired(request, user=user):
return _create_authz_token(request, user=user, access_token=access_token)
elif not is_refresh_token_expired(request):
# Have backend reauthenticate the user with the refresh token
user = authenticate(request)
if user:
return _create_authz_token(request, user=user)
return None | 84a7f06085877b51181cadaf29da4dc0611c636b | 27,356 |
import torch
def get_bert_model():
"""
Load uncased HuggingFace model.
"""
bert_model = torch.hub.load('huggingface/pytorch-transformers',
'model',
'bert-base-uncased')
return bert_model | 51b49255fe4b1291d538251c8c199bd570fb1a31 | 27,357 |
import bio_utils.bio as bio
import tqdm
def get_bitseq_estimates(
config,
isoform_strategy,
bitseq_id_field='transcript_id',
strings_to_remove=['.cds-only', '.merged']):
""" Load the bitseq abundance estimates into a single long data frame.
Parameters
----------
config: dict-like
The configuration for the project, presumably from the yaml file
isoform_strategy: str
The strategy for handling transcript isoforms
bitseq_id_field: str
Name for the "transcript_id" field (second column) in bitseq tr file
strings_to_remove: list of strings
A list of strings to replace with "" in the bitseq ids
Returns
-------
bitseq_estimates: pd.DataFrame
A data frame containing the following columns
* rpkm_{mean,var}: the bitseq estimates
* sample: the name of the respective sample
* type: "ribo" or "rna"
"""
msg = "Reading the bitseq tr info file"
logger.info(msg)
# check which transcript file to load
is_merged = False
if isoform_strategy == "merged":
is_merged = True
# and get the file
transcript_fasta = filenames.get_transcript_fasta(
config['genome_base_path'],
config['genome_name'],
is_annotated=True,
is_merged=is_merged,
is_cds_only=True
)
tr_info = filenames.get_bitseq_transcript_info(transcript_fasta)
bitseq_tr = bio.read_bitseq_tr_file(tr_info)
# we need to remove all of the indicated strings from the ids
for to_remove in strings_to_remove:
tids = bitseq_tr['transcript_id'].str.replace(to_remove, "")
bitseq_tr['transcript_id'] = tids
bitseq_tr = bitseq_tr.rename(columns={'transcript_id': bitseq_id_field})
note = config.get('note', None)
all_dfs = []
msg = "Reading riboseq BitSeq estimates"
logger.info(msg)
is_unique = 'keep_riboseq_multimappers' not in config
it = tqdm.tqdm(config['riboseq_samples'].items())
for name, file in it:
lengths, offsets = get_periodic_lengths_and_offsets(
config,
name,
isoform_strategy=isoform_strategy,
is_unique=is_unique
)
bitseq_rpkm_mean = filenames.get_riboseq_bitseq_rpkm_mean(
config['riboseq_data'],
name,
is_unique=is_unique,
is_transcriptome=True,
is_cds_only=True,
length=lengths,
offset=offsets,
isoform_strategy=isoform_strategy,
note=note
)
field_names = ['rpkm_mean', 'rpkm_var']
bitseq_rpkm_mean_df = bio.read_bitseq_means(
bitseq_rpkm_mean,
names=field_names
)
bitseq_rpkm_mean_df['sample'] = name
bitseq_rpkm_mean_df['type'] = 'ribo'
bitseq_rpkm_mean_df[bitseq_id_field] = bitseq_tr[bitseq_id_field]
all_dfs.append(bitseq_rpkm_mean_df)
# now, the rnaseq
msg = "Reading RNA-seq BitSeq estimates"
logger.info(msg)
is_unique = ('remove_rnaseq_multimappers' in config)
it = tqdm.tqdm(config['rnaseq_samples'].items())
for name, data in it:
bitseq_rpkm_mean = filenames.get_rnaseq_bitseq_rpkm_mean(
config['rnaseq_data'],
name,
is_unique=is_unique,
is_transcriptome=True,
is_cds_only=True,
isoform_strategy=isoform_strategy,
note=note
)
field_names = ['rpkm_mean', 'rpkm_var']
bitseq_rpkm_mean_df = bio.read_bitseq_means(
bitseq_rpkm_mean,
names=field_names
)
bitseq_rpkm_mean_df['sample'] = name
bitseq_rpkm_mean_df['type'] = 'rna'
bitseq_rpkm_mean_df[bitseq_id_field] = bitseq_tr[bitseq_id_field]
all_dfs.append(bitseq_rpkm_mean_df)
msg = "Joining estimates into long data frame"
logger.info(msg)
long_df = pd.concat(all_dfs)
long_df = long_df.reset_index(drop=True)
return long_df | 93738faf48d82c6989411c3034271da1224490b0 | 27,358 |
from pathlib import Path
def na_layout(na_layout_path: Path) -> ParadigmLayout:
"""
Returns the parsed NA layout.
"""
with na_layout_path.open(encoding="UTF-8") as layout_file:
return ParadigmLayout.load(layout_file) | 35e8405b5e17ab4da917d48ef89779c7d081791e | 27,359 |
def mean_allcnnc():
"""The all convolution layer implementation of torch.mean()."""
# TODO implement pre forward hook to adapt to arbitary image size for other data sets than cifar100
return nn.Sequential(
nn.AvgPool2d(kernel_size=(6, 6)),
flatten()
) | f607750c5a05eba77999f54b8ed59f2af2ea1d12 | 27,360 |
def load_atomic_data_for_training(in_file, categories, tokenizer, max_input_length, max_output_length):
"""
Loads an ATOMIC dataset file and
:param in_file: CSV ATOMIC file
:param categories: ATOMIC category list
:param tokenizer: LM tokenizer
:return: a list of tuples
"""
examples = load_atomic_data(in_file, categories)
examples = [(f"{e1} <{cat}>", f"{e2} <eos>")
for e1, e1_relations in examples.items()
for cat, e2s in e1_relations.items()
for e2 in e2s]
process = lambda s: tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s))
examples = [tuple(map(process, ex)) for ex in examples]
# Pad
max_input_length = min(max_input_length, max([len(ex[0]) for ex in examples]))
max_output_length = min(max_output_length, max([len(ex[1]) for ex in examples]))
max_length = max_input_length + max_output_length + 1
input_lengths = [len(ex[0]) for ex in examples]
examples = [ex[0] + ex[1] for ex in examples]
examples = [ex[:max_length] + [0] * max(0, max_length - len(ex)) for ex in examples]
examples = {"examples": examples, "input_lengths": input_lengths}
return examples | e47916d61dae92bdbe3cc8ccf8a06299878f1814 | 27,361 |
def md5s_loaded(func):
"""Decorator which automatically calls load_md5s."""
def newfunc(self, *args, **kwargs):
if self.md5_map == None:
self.load_md5s()
return func(self, *args, **kwargs)
return newfunc | 9eba943b939c484280b6dca79cf79fc04337f0ab | 27,362 |
def is_dap_message(message: str) -> bool:
"""Checks if a message contains information about some neighbour DAP."""
if "DAP" in message:
return True
return False | 01294888ab5cac7560fb7c58669f14573e0c1acd | 27,363 |
import random
def PhotoMetricDistortion(
img,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
"""Apply photometric distortion to image sequentially, every dictprocessation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
img (np.ndarray): imput image.
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
Returns:
dict: distorted_image
"""
contrast_lower, contrast_upper = contrast_range
saturation_lower, saturation_upper = saturation_range
def bgr2hsv(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def hsv2bgr(img):
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
def convert(img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(img):
"""Brightness distortion."""
if random.randint(2):
return convert(
img,
beta=random.uniform(-brightness_delta,
brightness_delta))
return img
def contrast(img):
"""Contrast distortion."""
if random.randint(2):
return convert(
img,
alpha=random.uniform(contrast_lower, contrast_upper))
return img
def saturation(img):
"""Saturation distortion."""
if random.randint(2):
img = bgr2hsv(img)
img[:, :, 1] = convert(
img[:, :, 1],
alpha=random.uniform(saturation_lower,
saturation_upper))
img = hsv2bgr(img)
return img
def hue(img):
"""Hue distortion."""
if random.randint(2):
img = bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-hue_delta, hue_delta)) % 180
img = hsv2bgr(img)
return img
def distorted(img):
"""Call function to perform photometric distortion on images.
Args:
img (np.ndarray): imput image.
Returns:
dict: Result dict with images distorted.
"""
# random brightness
img = brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
img = contrast(img)
# random saturation
img = saturation(img)
# random hue
img = hue(img)
# random contrast
if mode == 0:
img = contrast(img)
return img
return distorted(img) | 04b371ec269d1ea52e7689726fe63b158bb53e11 | 27,364 |
from typing import Optional
import functools
def sharded_adafactor(
learning_rate_fn: optax.Schedule,
weight_decay: Optional[float] = None,
layerwise_adaptation: bool = False,
decay_method: str = '',
decay_adam: float = 0.,
decay_pow: float = 0.,
beta1: float = 0.,
clipping_threshold: float = 1.,
factored: bool = True,
epsilon1_grad_sq_reg: float = 1e-30,
quantized_dtype: jnp.dtype = jnp.int8,
# TODO(bf-jax) Update default value to True, once this is supported.
respect_skip_lp_regularization: bool = False,
per_var_learning_summary=False,
sort_factored_second_moment_dims=False,
# min_dim_size_to_factor is only used when
# sort_factored_second_moment_dims=True.
min_dim_size_to_factor: int = 128,
multiply_by_parameter_scale: bool = False,
epsilon2_param_scale_reg: float = 1e-3,
) -> ShardedGradientTransformation:
"""AdaFactor optimizer that supports SPMD sharding.
Reference:
Shazeer et al, 2018: https://arxiv.org/abs/1804.04235
Adafactor is very similar to Adam (Kingma and Ba, 2019), the major
differences being:
1. For a two-dimensional AxB weight matrix, Adafactor uses only A+B auxiliary
parameters to maintain the second-moment estimator, instead of AB.
This is advantageous on memory-limited systems. In addition, beta1
(momentum) is set to zero by default, saving an additional auxiliary
parameter per weight. Variables with >=3 dimensions are treated as
collections of two-dimensional matrices - factorization is over the final
two dimensions.
2. Adafactor incorporates "update-clipping" - a scale-invariant analog of
gradient clipping. This improves stability.
3. Adafactor does not require an external "learning rate". By default, it
incorporates a relative-update-scale schedule, corresponding to
inverse-square-root learning-rate-decay in Adam. We hope this works well
for most applications.
Args:
learning_rate_fn: a callable that given the current training step, returns
the learning rate to apply.
weight_decay: an optional float tensor as decoupled weight decay value.
layerwise_adaptation: a boolean, whether or not to use layer-wise
adaptive moments (LAMB): https://arxiv.org/abs/1904.00962.
decay_method: a string, deciding how decay_rate should be computed.
Permitted values are 'adam' and 'pow'.
decay_adam: a float, decay if decay_method == 'adam'.
decay_pow: a float, decay if decay_method == 'pow'.
beta1: a float value between 0 and 1 for momentum.
clipping_threshold: an optional float >= 1
factored: a boolean, whether or not to use factored second order momentum.
epsilon1_grad_sq_reg: Regularization constant for squared gradient.
quantized_dtype: type of the quantized input. Allowed options are jnp.int8,
jnp.int16, jnp.bfloat16 and jnp.float32. If floating-point type is
specified, accumulators are stored as such type, instead of quantized
integers.
respect_skip_lp_regularization: whether or not to respect lingvo
SKIP_LP_REGULARIZATION var collection that skips decoupled weight decay.
per_var_learning_summary: a bool, whether or not to export per-var learning
summaries.
sort_factored_second_moment_dims: a bool, whether to select dims to factor
by size, for the factored second moment.
min_dim_size_to_factor: an integer, only factor the statistics if two array
dimensions have at least this size. NOTE: min_dim_size_to_factor is only
used when sort_factored_second_moment_dims=True.
multiply_by_parameter_scale: a boolean, if True, then scale learning_rate
by parameter scale. if False provided learning_rate is absolute step
size. NOTE: False by default.
epsilon2_param_scale_reg: Regularization constant for parameter scale.
Only used when multiply_by_parameter_scale is True.
Returns:
A `ShardedGradientTransformation`.
"""
# TODO(bf-jax): layerwise adaptation and skip regularization.
assert not layerwise_adaptation
assert not respect_skip_lp_regularization
assert decay_adam >= 0
assert decay_pow >= 0
assert learning_rate_fn is not None
assert decay_method == 'adam' or decay_method == 'pow', (
f'decay_method: {decay_method} not supported. Supported methods are '
'"pow", or "adam".')
sharded_adafactor_helper = _ShardedAdafactorHelper(
learning_rate_fn=learning_rate_fn,
weight_decay=weight_decay,
layerwise_adaptation=layerwise_adaptation,
decay_method=decay_method,
decay_adam=decay_adam,
decay_pow=decay_pow,
beta1=beta1,
clipping_threshold=clipping_threshold,
factored=factored,
epsilon1_grad_sq_reg=epsilon1_grad_sq_reg,
quantized_dtype=quantized_dtype,
respect_skip_lp_regularization=respect_skip_lp_regularization,
per_var_learning_summary=per_var_learning_summary,
sort_factored_second_moment_dims=sort_factored_second_moment_dims,
min_dim_size_to_factor=min_dim_size_to_factor,
multiply_by_parameter_scale=multiply_by_parameter_scale,
epsilon2_param_scale_reg=epsilon2_param_scale_reg)
def init_fn(params):
"""Initializes the optimizer's state."""
return sharded_adafactor_helper.to_state(
jnp.zeros([], jnp.int32),
jax.tree_map(sharded_adafactor_helper.init, params))
def init_partition_spec_fn(var_params):
var_spec_flattened, _ = jax.tree_flatten(var_params)
assert var_spec_flattened
first_var = var_spec_flattened[0]
assert isinstance(first_var, py_utils.Params)
device_mesh = first_var.device_mesh
count = py_utils.weight_params(
shape=[],
init=None,
dtype=jnp.int32,
collections=None,
device_mesh=device_mesh,
tensor_split_dims_mapping=[])
return sharded_adafactor_helper.to_state(
count,
jax.tree_map(sharded_adafactor_helper.init_partition_spec, var_params))
def update_fn(updates, state, params=None):
if params is None:
raise ValueError(
'You are using a transformation that requires the current value of '
'parameters, but you are not passing `params` when calling `update`.')
compute_var_and_slot_update_fn = functools.partial(
sharded_adafactor_helper.compute_var_and_slot_update, state.count)
var_names = py_utils.extract_prefixed_keys_from_nested_map(updates)
output = jax.tree_multimap(compute_var_and_slot_update_fn, updates, state.m,
state.m_scale, state.vr, state.vc, state.v,
params, var_names)
updates = jax.tree_map(lambda o: o.update, output)
count_plus_one = state.count + jnp.array(1, jnp.int32)
updated_states = sharded_adafactor_helper.to_state(count_plus_one, output)
return updates, updated_states
return ShardedGradientTransformation(
init=init_fn,
update=update_fn,
init_partition_spec=init_partition_spec_fn) | 1b90507a8fa78e9d13a0f030e7a11cac25aee524 | 27,365 |
def make_game():
"""Builds and returns an Apprehend game."""
return ascii_art.ascii_art_to_game(
GAME_ART, what_lies_beneath=' ',
sprites={'P': PlayerSprite, 'b': BallSprite},
update_schedule=['b', 'P'],nb_action=2) | 9e6b1ead4ec65083ffe00c5e8708bd174c7eaf79 | 27,366 |
import psutil
import os
import sys
def get_system_memory():
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
memory_limit_filename = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = None
try:
psutil_memory_in_bytes = psutil.virtual_memory().total
except ImportError:
pass
if psutil_memory_in_bytes is not None:
memory_in_bytes = psutil_memory_in_bytes
elif sys.platform == "linux" or sys.platform == "linux2":
# Handle Linux.
bytes_in_kilobyte = 1024
memory_in_bytes = vmstat("total memory") * bytes_in_kilobyte
else:
# Handle MacOS.
memory_in_bytes = sysctl(["sysctl", "hw.memsize"])
if docker_limit is not None:
return min(docker_limit, memory_in_bytes)
else:
return memory_in_bytes | a9bcde327495cf48a471c42855fc62f39e68fa5b | 27,367 |
from typing import Any
from typing import Tuple
def process_makefile(
data: Any,
specification: Any,
path: Tuple[str, ...] = (),
apply_defaults: bool = True,
) -> Any:
"""Validates a makefile and applies defaults to missing keys.
Note that that default values are deep-copied before being set.
"""
if isinstance(specification, WithoutDefaults):
specification = specification.specification
data = process_makefile(data, specification, path, apply_defaults=False)
elif isinstance(specification, PreProcessMakefile):
data, specification = specification(path, data)
data = process_makefile(data, specification, path, apply_defaults)
elif _is_spec(specification):
_instantiate_spec(specification)(path, data)
elif isinstance(data, (dict, type(None))) and isinstance(specification, dict):
# A limitation of YAML is that empty subtrees are equal to None;
# this check ensures that empty subtrees to be handled properly
if data is None:
data = {}
_process_default_values(data, specification, path, apply_defaults)
for cur_key in data:
ref_key = _get_matching_spec_or_value(
cur_key, specification, path + (cur_key,)
)
data[cur_key] = process_makefile(
data[cur_key], specification[ref_key], path + (cur_key,), apply_defaults
)
elif isinstance(data, (list, type(None))) and isinstance(specification, list):
if not all(map(_is_spec, specification)):
raise TypeError(
"Lists contains non-specification objects (%r): %r"
% (_path_to_str(path), specification)
)
elif data is None: # See comment above
data = []
specification = IsListOf(*specification)
_instantiate_spec(specification)(path, data)
elif not isinstance(specification, (dict, list)):
raise TypeError(
"Unexpected type in makefile specification at %r: %r!"
% (_path_to_str(path), specification)
)
else:
raise MakefileError(
"Inconsistency between makefile specification and "
"current makefile at %s:\n Expected %s, "
"found %s %r!"
% (
_path_to_str(path),
type(specification).__name__,
type(data).__name__,
data,
)
)
return data | 6d4d114b59c22c36414a3c4e0f232dc7782802f1 | 27,368 |
def load_fashion(n_data=70000):
"""Fetches the fashion MNIST dataset and returns its desired subset.
Args:
n_data (int, optional): The size of the wanted subset. Defaults to 70000.
Returns:
tuple: The dataset, the labels of elements, the names of categories and the name of the dataset.
"""
mnist = fetch_openml("Fashion-MNIST")
data, labels = __split_data(mnist["data"], mnist["target"].astype(int), n_data)
label_names = [
"T-shirt",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot"
]
return data, labels, label_names, "Fashion MNIST" | ed09938ad8c776f11029f6eaecb4f5a01cbf2fed | 27,369 |
from matplotlib.animation import Animation
import os
import re
def matplotlib_scraper_multi(block, block_vars, gallery_conf, **kwargs):
"""Scrape Matplotlib images, but with both high and low-def...
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png', 'jpg', and 'svg' are
supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
"""
matplotlib, plt = scrapers._import_matplotlib()
image_path_iterator = block_vars['image_path_iterator']
image_rsts = []
# Check for animations
anims = list()
if gallery_conf.get('matplotlib_animations', False):
for ani in block_vars['example_globals'].values():
if isinstance(ani, Animation):
anims.append(ani)
# Then standard images
for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator):
if 'format' in kwargs:
image_path = '%s.%s' % (os.path.splitext(image_path)[0],
kwargs['format'])
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_num)
# Deal with animations
cont = False
for anim in anims:
if anim._fig is fig:
image_rsts.append(_anim_rst(anim, image_path, gallery_conf))
cont = True
break
if cont:
continue
# get fig titles
fig_titles = scrapers._matplotlib_fig_titles(fig)
to_rgba = matplotlib.colors.colorConverter.to_rgba
# shallow copy should be fine here, just want to avoid changing
# "kwargs" for subsequent figures processed by the loop
these_kwargs = kwargs.copy()
hikwargs = kwargs.copy()
hikwargs['dpi']=200
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr) and \
attr not in kwargs:
these_kwargs[attr] = fig_attr
try:
hipath = image_path[:-4]+'_hidpi' + image_path[-4:]
fig.savefig(hipath, **hikwargs)
fig.savefig(image_path, **these_kwargs)
except Exception:
plt.close('all')
raise
if 'images' in gallery_conf['compress_images']:
optipng(image_path, gallery_conf['compress_images_args'])
image_rsts.append(
figure_rst([image_path], gallery_conf['src_dir'], fig_titles))
plt.close('all')
rst = ''
if len(image_rsts) == 1:
rst = image_rsts[0]
elif len(image_rsts) > 1:
image_rsts = [re.sub(r':class: sphx-glr-single-img',
':class: sphx-glr-multi-img',
image) for image in image_rsts]
image_rsts = [HLIST_IMAGE_MATPLOTLIB + indent(image, u' ' * 6)
for image in image_rsts]
rst = HLIST_HEADER + ''.join(image_rsts)
return rst | d1684402ba359653dc8d77cd57e189cb9592d9b2 | 27,370 |
def add_countdown_ago(date, event, events_dates_per_event):
"""
Given a date and an even,
compute the number of days since the previous occurence of this events within events_dates_per_event
"""
countdown = []
for special_date in events_dates_per_event[event]:
date_count_down = (special_date - date).days
if date_count_down <= 0:
countdown.append(date_count_down)
return -1 * max(countdown) | 3ced21ad6b53007e8777d245dee4fb84f83a3086 | 27,371 |
import json
import sys
def get_groups_from_json(json_definitions):
"""Return a dict of tango.Group objects matching the JSON definitions.
Extracts the definitions of groups of devices and builds up matching
tango.Group objects. Some minimal validation is done - if the definition
contains nothing then None is returned, otherwise an exception will
be raised on error.
This function will *NOT* attempt to verify that the devices exist in
the Tango database, nor that they are running.
The definitions would typically be provided by the Tango device property
"GroupDefinitions", available in the SKABaseDevice. The property is
an array of strings. Thus a sequence is expected for this function.
Each string in the list is a JSON serialised dict defining the "group_name",
"devices" and "subgroups" in the group. The tango.Group() created enables
easy access to the managed devices in bulk, or individually. Empty and
whitespace-only strings will be ignored.
The general format of the list is as follows, with optional "devices" and
"subgroups" keys:
[ {"group_name": "<name>",
"devices": ["<dev name>", ...]},
{"group_name": "<name>",
"devices": ["<dev name>", "<dev name>", ...],
"subgroups" : [{<nested group>},
{<nested group>}, ...]},
...
]
For example, a hierarchy of racks, servers and switches:
[ {"group_name": "servers",
"devices": ["elt/server/1", "elt/server/2",
"elt/server/3", "elt/server/4"]},
{"group_name": "switches",
"devices": ["elt/switch/A", "elt/switch/B"]},
{"group_name": "pdus",
"devices": ["elt/pdu/rackA", "elt/pdu/rackB"]},
{"group_name": "racks",
"subgroups": [
{"group_name": "rackA",
"devices": ["elt/server/1", "elt/server/2",
"elt/switch/A", "elt/pdu/rackA"]},
{"group_name": "rackB",
"devices": ["elt/server/3", "elt/server/4",
"elt/switch/B", "elt/pdu/rackB"],
"subgroups": []}
]} ]
Parameters
----------
json_definitions: sequence of str
Sequence of strings, each one a JSON dict with keys "group_name", and
one or both of: "devices" and "subgroups", recursively defining
the hierarchy.
Returns
-------
groups: dict
The keys of the dict are the names of the groups, in the following form:
{"<group name 1>": <tango.Group>,
"<group name 2>": <tango.Group>, ...}.
Will be an empty dict if no groups were specified.
Raises
------
GroupDefinitionsError:
- If error parsing JSON string.
- If missing keys in the JSON definition.
- If invalid device name.
- If invalid groups included.
- If a group has multiple parent groups.
- If a device is included multiple time in a hierarchy.
E.g. g1:[a,b] g2:[a,c] g3:[g1,g2]
"""
try:
# Parse and validate user's definitions
groups = {}
for json_definition in json_definitions:
json_definition = json_definition.strip()
if json_definition:
definition = json.loads(json_definition)
_validate_group(definition)
group_name = definition['group_name']
groups[group_name] = _build_group(definition)
return groups
except Exception as exc:
# the exc_info is included for detailed traceback
ska_error = SKABaseError(exc)
raise GroupDefinitionsError(ska_error).with_traceback(sys.exc_info()[2]) | 8d1f741448764236705a06d72d960f6e3b160c41 | 27,372 |
def _get_indexed_role(dep):
"""Return the function (governor/dependent) and role
based on the dependency relation type."""
gram_relation = dep['@type'][0:5]
if gram_relation in ["conj", "conj_"]:
return (-1, 'conj')
(function, role) = _relation_map[gram_relation]
return (_iminus_one(dep[function]['@idx']), role) | 46edb717174f4267414604c362c22134265ceca4 | 27,373 |
def aug_op_mul_col(aug_input: Tensor, mul: float) -> Tensor:
"""
multiply each pixel
:param aug_input: the tensor to augment
:param mul: the multiplication factor
:return: the augmented tensor
"""
input_tensor = aug_input * mul
input_tensor = aug_op_clip(input_tensor, clip=(0, 1))
return input_tensor | 42e7d1596a7417ca7d981573ffaac9dccff49490 | 27,374 |
import math
def example3(path: str):
"""Planetary orbit"""
print(f"\n{Col.TITL}{' Example 3 ':-^79}{Col.RES}\n")
if not path:
path = "qr_data/ex3.txt"
file = open(path, "r")
ls = file.readlines()
# Read input data from `file`
m = int(ls[0].replace("\n", ""))
data = np.zeros((m, 2))
for i in range(1, m + 1):
data[i - 1, :] = list(map(float, ls[i].replace("\n", "").split(" ")))
xs = data[:, 0]
ys = data[:, 1]
mat = np.ndarray((m, 5))
f = np.full(shape=m, fill_value=-1.0, dtype=float)
for i in range(10):
mat[i, 0] = xs[i] ** 2
mat[i, 1] = xs[i] * ys[i]
mat[i, 2] = ys[i] ** 2
mat[i, 3] = xs[i]
mat[i, 4] = ys[i]
sol = Qr.solve(mat, f)
res = f - np.matmul(mat, sol)
print(
f"{Col.INF} Given the system (A b):\n{np.column_stack((mat, f))}\n\n"
f"{Col.SOL} The obtained solution is:\n{sol}\n\n"
f"{Col.INF} Residue norm is: {np.linalg.norm(res)}"
)
# Find closest points on ellipse
def solve(x: float, y: float, coeff: np.ndarray) -> float:
"""Given x, returns the nearest y on the ellipse"""
a = coeff[2]
b = x * coeff[1] + coeff[4]
c = coeff[0] * x * x + coeff[3] * x + 1
sqt = math.sqrt(b * b - 4 * a * c)
y0 = (-b - sqt) / (2 * a)
y1 = (-b + sqt) / (2 * a)
if abs(y - y0) < abs(y - y1):
return y0
else:
return y1
yy = [solve(x, y, sol) for x, y in zip(xs, ys)]
close = zip(xs, ys, yy)
print(f"{Col.INF} The closest points are:\n[(x_i, y_i, yy_i)] = [")
for p in close:
print(f" {p},")
print("]\n") | aebe5d23d3d1080ce6795ba6e292ee4d973360a5 | 27,375 |
def drones_byDroneId_patch(droneId):
"""
Update the information on a specific drone
It is handler for PATCH /drones/<droneId>
"""
return handlers.drones_byDroneId_patchHandler(droneId) | 93f66abc182ff4df3b3ad06bb8cdd38d19ab8e01 | 27,376 |
def load_drop_columns(df, targets, file):
"""
Loads a dataframe with only a subset of the columns
TODO: we probably need to delete this because this exists:
df = pd.read_csv('data.csv', skipinitialspace=True, usecols=fields)
#https://stackoverflow.com/questions/26063231/read-specific-columns-with-pandas-or-other-python-module
"""
keep_columns = load_keep_columns(file)
categorical_features, numeric_features = dd.get_features_by_type(df, targets)
drop = []
keep = []
for feature in categorical_features:
if(feature in keep_columns):
keep.append(feature)
else:
drop.append(feature)
for feature in numeric_features:
if(feature in keep_columns):
keep.append(feature)
else:
drop.append(feature)
return drop | c9218bbf22ae1429f5f7134e1e53fd85fd9d422e | 27,377 |
def get_categories(categories_file):
""" Group categories by image
"""
# map each category id to its name
id_to_category = {}
for category in categories_file['categories']:
id_to_category[category['id']] = category['name']
image_categories = {}
for category in categories_file['annotations']:
if category['image_id'] not in image_categories:
image_categories[category['image_id']] = []
if id_to_category[category['category_id']] not in image_categories[category['image_id']]:
image_categories[category['image_id']].append(id_to_category[category['category_id']])
return image_categories | 10377ea688c2e33195f137cc9470cadd6eb2b9e7 | 27,378 |
from typing import Callable
from datetime import datetime
def get_profit_forecast(code: str,
getter: Callable[[str], pd.DataFrame] = rdb.get_profit_forecast):
""" 获取分析师的盈利预期
"""
today = datetime.datetime.now().strftime('%Y-%m-%d')
return getter(today).loc[code].to_dict() | 1e2499eb785e1e4e50b6d82a2c3a4eea7b297a0d | 27,379 |
import functools
def checkpoint_wrapper(module: nn.Module, offload_to_cpu: bool = False) -> nn.Module:
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
- supports offloading activations to CPU
Usage::
checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
Args:
module (nn.Module): module to wrap
offload_to_cpu (Optional, bool): whether to offload activations to CPU
"""
module.forward = functools.partial(_checkpointed_forward, module.forward, offload_to_cpu) # type: ignore
return module | 63d8f20265d2ade29e35ad5ae38b85e5a7f5f8af | 27,380 |
def get_L_star_CS_d_t_i(L_CS_d_t_i, Q_star_trs_prt_d_t_i, region):
"""(9-2)(9-2)(9-3)
Args:
L_CS_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの冷房顕熱負荷(MJ/h)
Q_star_trs_prt_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の非居室への熱移動(MJ/h)
region: 地域区分
L_CS_d_t_i: returns: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷
"""
H, C, M = get_season_array_d_t(region)
L_CS_d_t_i = L_CS_d_t_i[:5]
f = L_CS_d_t_i > 0
Cf = np.logical_and(C, f)
L_star_CS_d_t_i = np.zeros((5, 24 * 365))
L_star_CS_d_t_i[Cf] = np.clip(L_CS_d_t_i[Cf] + Q_star_trs_prt_d_t_i[Cf], 0, None)
return L_star_CS_d_t_i | 2d679123b10d4c5206253a9a7a4aadfb14fc77da | 27,381 |
def delete(isamAppliance, name, check_mode=False, force=False):
"""
Delete an Authentication Mechanism
"""
ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)
mech_id = ret_obj['data']
if mech_id == {}:
logger.info("Authentication Mechanism {0} not found, skipping delete.".format(name))
else:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Delete an Authentication Mechanism",
"{0}/{1}".format(module_uri, mech_id),
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object() | 4deb3c1362010d59abc39868f0b5fef8b4ddaa2f | 27,382 |
def point_to_line_cluster_distance(point, line_cluster):
"""
Distance between a single point and a cluster of lines
"""
return val_point_to_line_cluster_distance(point.value, np.array([l.value for l in line_cluster])) | 082ba543895e6bf25d013df6d94954de93468754 | 27,383 |
import time
def str_time_prop(start, end, date_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, date_format))
etime = time.mktime(time.strptime(end, date_format))
ptime = stime + prop * (etime - stime)
return time.strftime(date_format, time.localtime(ptime)) | 6ce9a7ec5afd41df43ce029ed7391150f42b8d8a | 27,384 |
def parse_fields(flds):
"""Parse Data Dictionary XML Fields
Arguments:
flds -- XML document element for fields
"""
fields_dict = dict()
for elem in flds:
elem_tag = str(elem.tag).strip()
if elem_tag.lower() != FIELD_TAG.lower():
raise ValueError(elem_tag + " element tag is not equal to " + FIELD_TAG)
elem_dict = dict()
elem_dict['type'] = str(elem.attrib['type']).upper()
elem_dict['number'] = int(elem.attrib['number'])
elem_dict['description'] = elem.attrib['description'] if 'description' in elem.attrib else ''
elem_values = dict()
for elem_value in elem:
elem_value_tag = str(elem_value.tag).strip()
if elem_value_tag.lower() != FIELD_VALUE_TAG.lower():
raise ValueError(elem_value_tag + " element value tag is not equal to " + FIELD_VALUE_TAG)
elem_value_enum = str(elem_value.attrib['enum']).strip()
elem_value_desc = str(elem_value.attrib['description']).strip()
elem_values[elem_value_enum] = elem_value_desc
if elem_values:
elem_dict['values'] = elem_values
fields_dict[str(elem.attrib['name']).strip()] = elem_dict
return fields_dict | c3057f2dc3ff525564589024ec75a58bab0a3331 | 27,385 |
import json
def analyse_json_data(input_file_path):
"""
:type input_file_path: str
:rtype: InputData
:raise FileNotFoundError
"""
data = InputData()
try:
with open(input_file_path, 'r') as input_file:
json_data = json.load(input_file)
except FileNotFoundError as e:
raise InputFileNotFoundError(e)
number_of_aggregated_packets = 0
previous_time_received_in_microseconds = 0
for time_received_in_microseconds in \
[time_in_nanoseconds / 1000 for time_in_nanoseconds in json_data['timesReceivedInNanoseconds']]:
delta_time = time_received_in_microseconds - previous_time_received_in_microseconds
if delta_time > 400:
if number_of_aggregated_packets != 0:
data.add_big_packet(number_of_aggregated_packets)
number_of_aggregated_packets = 0
number_of_aggregated_packets += 1
previous_time_received_in_microseconds = time_received_in_microseconds
if number_of_aggregated_packets != 0:
data.add_big_packet(number_of_aggregated_packets)
return data | fe4eef01d2fe14f920a1bf64687410ef9d8e13d9 | 27,386 |
import math
def process_datastore_tweets(project, dataset, pipeline_options):
"""Creates a pipeline that reads tweets from Cloud Datastore from the last
N days. The pipeline finds the top most-used words, the top most-tweeted
URLs, ranks word co-occurrences by an 'interestingness' metric (similar to
on tf* idf).
"""
user_options = pipeline_options.view_as(UserOptions)
DAYS = 4
p = beam.Pipeline(options=pipeline_options)
# Read entities from Cloud Datastore into a PCollection, then filter to get
# only the entities from the last DAYS days.
lines = (p | QueryDatastore(project, DAYS)
| beam.ParDo(FilterDate(user_options, DAYS))
)
global_count = AsSingleton(
lines
| 'global count' >> beam.combiners.Count.Globally())
# Count the occurrences of each word.
percents = (lines
| 'split' >> (beam.ParDo(WordExtractingDoFn())
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(lambda (word, ones): (word, sum(ones)))
| 'in tweets percent' >> beam.Map(
lambda (word, wsum), gc: (word, float(wsum) / gc), global_count))
top_percents = (percents
| 'top 500' >> combiners.Top.Of(500, lambda x, y: x[1] < y[1])
)
# Count the occurrences of each expanded url in the tweets
url_counts = (lines
| 'geturls' >> (beam.ParDo(URLExtractingDoFn())
.with_output_types(unicode))
| 'urls_pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'urls_group' >> beam.GroupByKey()
| 'urls_count' >> beam.Map(lambda (word, ones): (word, sum(ones)))
| 'urls top 300' >> combiners.Top.Of(300, lambda x, y: x[1] < y[1])
)
# Define some inline helper functions.
def join_cinfo(cooccur, percents):
"""Calculate a co-occurence ranking."""
word1 = cooccur[0][0]
word2 = cooccur[0][1]
try:
word1_percent = percents[word1]
weight1 = 1 / word1_percent
word2_percent = percents[word2]
weight2 = 1 / word2_percent
return (cooccur[0], cooccur[1], cooccur[1] *
math.log(min(weight1, weight2)))
except:
return 0
def generate_cooccur_schema():
"""BigQuery schema for the word co-occurrence table."""
json_str = json.dumps({'fields': [
{'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})
# {'name': 'ts', 'type': 'STRING', 'mode': 'NULLABLE'}]})
return parse_table_schema_from_json(json_str)
def generate_url_schema():
"""BigQuery schema for the urls count table."""
json_str = json.dumps({'fields': [
{'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})
# {'name': 'ts', 'type': 'STRING', 'mode': 'NULLABLE'}]})
return parse_table_schema_from_json(json_str)
def generate_wc_schema():
"""BigQuery schema for the word count table."""
json_str = json.dumps({'fields': [
{'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})
# {'name': 'ts', 'type': 'STRING', 'mode': 'NULLABLE'}]})
return parse_table_schema_from_json(json_str)
# Now build the rest of the pipeline.
# Calculate the word co-occurence scores.
cooccur_rankings = (lines
| 'getcooccur' >> (beam.ParDo(CoOccurExtractingDoFn()))
| 'co_pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'co_group' >> beam.GroupByKey()
| 'co_count' >> beam.Map(lambda (wordts, ones): (wordts, sum(ones)))
| 'weights' >> beam.Map(join_cinfo, AsDict(percents))
| 'co top 300' >> combiners.Top.Of(300, lambda x, y: x[2] < y[2])
)
# Format the counts into a PCollection of strings.
wc_records = top_percents | 'format' >> beam.FlatMap(
lambda x: [{'word': xx[0], 'percent': xx[1],
'ts': user_options.timestamp.get()} for xx in x])
url_records = url_counts | 'urls_format' >> beam.FlatMap(
lambda x: [{'url': xx[0], 'count': xx[1],
'ts': user_options.timestamp.get()} for xx in x])
co_records = cooccur_rankings | 'co_format' >> beam.FlatMap(
lambda x: [{'w1': xx[0][0], 'w2': xx[0][1], 'count': xx[1],
'log_weight': xx[2],
'ts': user_options.timestamp.get()} for xx in x])
# Write the results to three BigQuery tables.
wc_records | 'wc_write_bq' >> beam.io.Write(
beam.io.BigQuerySink(
'%s:%s.word_counts' % (project, dataset),
schema=generate_wc_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
url_records | 'urls_write_bq' >> beam.io.Write(
beam.io.BigQuerySink(
'%s:%s.urls' % (project, dataset),
schema=generate_url_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
co_records | 'co_write_bq' >> beam.io.Write(
beam.io.BigQuerySink(
'%s:%s.word_cooccur' % (project, dataset),
schema=generate_cooccur_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
# Actually run the pipeline.
return p.run() | db60b67d85f2d703e8cf46f5d89d56d8c9cfcbed | 27,387 |
def unpack_uid(uid):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
return ''.join([str(i-1) if i < 11 else '.' for pair in [(ord(c) >> 4, ord(c) & 15) for c in uid] for i in pair if i > 0]) | cb131f3df386c40382cf70ddee5125f901de5fa8 | 27,388 |
def command(name=None):
"""A decorator to register a subcommand with the global `Subcommands` instance.
"""
def decorator(f):
_commands.append((name, f))
return f
return decorator | 89dd62a97ce6af317dbb2d33273fa215d740deab | 27,389 |
def agse_convert(invec, swdata, insys='gse'):
"""Convert between GSE and aberrated GSE
Using common definiton, e.g., Schwartz 1998
(S.J. Schwartz, "Shock and Discontinuity Normals, Mach Numbers, and Related Parameters",
In: Analysis Methods for Multi-Spacecraft Data, Eds.: G. Paschmann and P. Daly,
ISSI Scientific Reports Series, ESA/ISSI, Vol. 1, ISBN 1608-280X, 1998, pp.249-270)
Neglects transverse components of SW velocity
"""
assert insys in ('gse', 'agse')
alpha = np.arctan(30/swdata['Plasma_bulk_speed'])
gse_to_agse = np.zeros((3,3), dtype=float)
gse_to_agse[2, 2] = 1
gse_to_agse[0, 0] = np.cos(alpha)
gse_to_agse[1, 1] = np.cos(alpha)
gse_to_agse[0, 1] = -np.sin(alpha)
gse_to_agse[1, 0] = np.sin(alpha)
if insys == 'gse':
outvec = np.dot(gse_to_agse, invec)
else:
outvec = np.dot(gse_to_agse.T, invec)
return outvec | 3d5d77565187e9242104d8b25260b86458109701 | 27,390 |
import numpy as np
def parse_geom_text_output(out_lines, input_dict=None):
"""
Parse output of .geom file
:param out_lines: a list of lines from the readline function
:param input_dict: not in use at the moment
:return parsed_data: key, value of the trajectories of cell, atoms,
force etc
"""
txt = out_lines
Hartree = units['Eh']
Bohr = units['a0']
# Yeah, we know that...
cell_list = []
species_list = []
geom_list = []
forces_list = []
energy_list = []
temperature_list = []
velocity_list = []
current_pos = []
current_species = []
current_forces = []
current_velocity = []
current_cell = []
in_header = False
for i, line in enumerate(txt):
if 'begin header' in line.lower():
in_header = True
continue
if 'end header' in line.lower():
in_header = False
continue
if in_header:
continue # Skip header lines
sline = line.split()
if '<-- E' in line:
energy_list.append(float(sline[0]) * Hartree)
continue
elif '<-- h' in line:
current_cell.append(list(map(float, sline[:3])))
continue
elif '<-- R' in line:
current_pos.append(list(map(float, sline[2:5])))
current_species.append(sline[0])
elif '<-- F' in line:
current_forces.append(list(map(float, sline[2:5])))
elif '<-- V' in line:
current_velocity.append(list(map(float, sline[2:5])))
elif '<-- T' in line:
temperature_list.append(float(sline[0]))
elif not line.strip() and current_cell:
cell_list.append(current_cell)
species_list.append(current_species)
geom_list.append(current_pos)
forces_list.append(current_forces)
current_cell = []
current_species = []
current_pos = []
current_forces = []
if current_velocity:
velocity_list.append(current_velocity)
current_velocity = []
if len(species_list) == 0:
raise RuntimeError('No data found in geom file')
out = dict(
cells=np.array(cell_list) * Bohr,
positions=np.array(geom_list) * Bohr,
forces=np.array(forces_list) * Hartree / Bohr,
geom_energy=np.array(energy_list),
symbols=species_list[0],
)
if velocity_list:
out['velocities'] = np.array(velocity_list) * Bohr
return out | a01ba11130d91aa2211563322384e772dfe7ad1a | 27,391 |
def evalKnapsackBalanced(individual):
"""
Variant of the original weight-value knapsack problem with added third object being minimizing weight difference between items.
"""
weight, value = evalKnapsack(individual)
balance = 0.0
for a,b in zip(individual, list(individual)[1:]):
balance += abs(items[a][0]-items[b][0])
if len(individual) > MAX_ITEM or weight > MAX_WEIGHT:
return weight, value, 1e30 # Ensure overweighted bags are dominated
return weight, value, balance | 2037e6b33d4cd8c76496b8fbb866febbf355aaac | 27,392 |
import os
def read_links(ls):
"""Returns list of objects with source and target"""
return list(map(lambda el: {"source": el, "target": os.readlink(el)}, ls)) | 9768106043c706ed37b90de9289325f0574096db | 27,393 |
def complexify_module(lines_in):
"""
Complexify a module by separating its derived types, functions, and subroutines
and passing them through a line complexification function.
Parameters
----------
lines_in : list of string
List of strings source code for one module to be complexified
Returns
-------
lines_out : list of string
List of strings representing the output complexified module
"""
N_line = len(lines_in)
iLine = 0
lines_out = []
# Start Module
lines_out.append(lines_in[0] + "use MOD_COMPLEXIFY\n")
iLine += 1
while iLine < N_line:
# Handle Derived Type
if not re_type_start.search(lines_in[iLine]) is None:
iSearch = 0
lines_type = []
# Group All Subroutine Lines
while re_type_end.search(lines_in[iLine + iSearch]) is None:
lines_type.append(lines_in[iLine + iSearch])
iSearch += 1
lines_type.append(lines_in[iLine + iSearch])
iSearch += 1
# Fix Function
lines_fixed = complexify_type(lines_type)
for line in lines_fixed:
lines_out.append(line)
iLine += iSearch
# Handle Function
elif not re_function_start.search(lines_in[iLine]) is None:
iSearch = 0
lines_function = []
# Group All Subroutine Lines
while re_function_end.search(lines_in[iLine + iSearch]) is None:
lines_function.append(lines_in[iLine + iSearch])
iSearch += 1
lines_function.append(lines_in[iLine + iSearch])
iSearch += 1
# Fix Function
lines_fixed = complexify_function(lines_function)
for line in lines_fixed:
lines_out.append(line)
iLine += iSearch
# Handle Subroutine
elif not re_subroutine_start.search(lines_in[iLine]) is None:
iSearch = 0
lines_subroutine = []
# Group All Subroutine Lines
while re_subroutine_end.search(lines_in[iLine + iSearch]) is None:
lines_subroutine.append(lines_in[iLine + iSearch])
iSearch += 1
lines_subroutine.append(lines_in[iLine + iSearch])
iSearch += 1
# Fix Subroutine
lines_fixed = complexify_subroutine(lines_subroutine)
for line in lines_fixed:
lines_out.append(line)
iLine += iSearch
# Write Line Unchanged
else:
lines_out.append(lines_in[iLine])
iLine += 1
return lines_out | dfa30e883c2addeb1e3e5ac0c84be4f7d7834277 | 27,394 |
from datetime import datetime
import pytz
def utc_right_now():
"""
Returns a datetime object reflecting the time in UTC as of when this function was called.
"""
return datetime.datetime.now(tz=pytz.utc).replace(tzinfo=None) | 870eff5c5f744b8d3e84a21f00adc48bae088221 | 27,395 |
def process_img_border(img_array, polygon_pts, border=6):
"""Process Raw Data into
Args:
img_array (numpy array): numpy representation of image.
polygon_pts (array): corners of the building polygon.
Returns:
numpy array: .
"""
height, width, _ = img_array.shape
xcoords = polygon_pts[:, 0]
ycoords = polygon_pts[:, 1]
xmin, xmax = np.min(xcoords), np.max(xcoords)
ymin, ymax = np.min(ycoords), np.max(ycoords)
xdiff = xmax - xmin
ydiff = ymax - ymin
#Extend image by scale percentage
xmin = max(int(xmin - border), 0)
xmax = min(int(xmax + border), width)
ymin = max(int(ymin - border), 0)
ymax = min(int(ymax + border), height)
(X,Y,Z)=img_array.shape
return img_array[ymin:ymax, xmin:xmax, :] | 7ad98cc9e66e34849a534a42e3d78062e1b779c4 | 27,396 |
from typing import Dict
from typing import Any
def async_flow_poll(destination: Text, batch_id: Text) -> Dict[Text, Any]:
"""Repeatedly checks on the status of the batch, and returns
the result after the processing has been completed.
Args:
destination (Text): This is the destination parsed
batch_id (Text):
Returns:
Dict[Text, Any]: This is the return value with the status code of 200 or 202
as per the status of the write.
"""
LOG.debug('async_flow_poll() called as destination header was not found in a GET.')
destination_driver = import_module(
f'geff.drivers.destination_{urlparse(destination).scheme}'
)
# Ignoring style due to dynamic import
status_body = destination_driver.check_status(destination, batch_id) # type: ignore
if status_body:
LOG.debug(f'Manifest found return status code 200.')
return {'statusCode': 200, 'body': status_body}
else:
LOG.debug(f'Manifest not found return status code 202.')
return {'statusCode': 202} | e0816d6c7bb2f841a5878f55781329844d5b4ce9 | 27,397 |
from typing import Optional
from typing import Mapping
def get_connection(arn: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Provides details about CodeStar Connection.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(arn=aws_codestarconnections_connection["example"]["arn"])
```
:param str arn: The CodeStar Connection ARN.
:param Mapping[str, str] tags: Map of key-value resource tags to associate with the resource.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:codestarconnections/getConnection:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
arn=__ret__.arn,
connection_status=__ret__.connection_status,
id=__ret__.id,
name=__ret__.name,
provider_type=__ret__.provider_type,
tags=__ret__.tags) | 76494278cfd1ec4adf3ecd90d8f5536f545a814f | 27,398 |
from typing import Counter
def generate_samples(n_samples, func, *args, **kwargs):
"""Call a function a bunch of times and count the results.
Args:
n_samples: Number of time to call the function.
func: The function results are counted from.
*args
**args: The arguments to pass to func.
Returns:
Counter containing results.
"""
samples = Counter()
for _ in range(n_samples):
res = func(*args, **kwargs)
samples[res] += 1
return samples | 625c2bf6713420e26704d2c2842504343be09434 | 27,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.