content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def build_stats(loss, eval_result, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
loss: The final loss at training time.
eval_result: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if loss:
stats["loss"] = loss
if eval_result:
stats["eval_loss"] = eval_result[0]
stats["eval_hit_rate"] = eval_result[1]
if time_callback:
timestamp_log = time_callback.timestamp_log
stats["step_timestamp_log"] = timestamp_log
stats["train_finish_time"] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats["avg_exp_per_second"] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log) - 1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats | cddde6bf9bd2797c94bc392be77f7be19a46271e | 26,700 |
import random
def random_resource_code2() -> str:
"""One random book name chosen at random. This fixture exists so
that we can have a separate book chosen in a two language document
request."""
book_ids = list(bible_books.BOOK_NAMES.keys())
return random.choice(book_ids) | 04ea455fa85eea32c2c7e9d7d3a3dc98759b937b | 26,701 |
def _to_str(value):
"""Helper function to make sure unicode values are converted to UTF-8.
Args:
value: String or Unicode text to convert to UTF-8.
Returns:
UTF-8 encoded string of `value`; otherwise `value` remains unchanged.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
return value | 46186757a475c2b5fa877e8fb62c32a27770e6b7 | 26,702 |
def get_loss(loss_name):
"""Get loss from LOSS_REGISTRY based on loss_name."""
if not loss_name in LOSS_REGISTRY:
raise Exception(NO_LOSS_ERR.format(
loss_name, LOSS_REGISTRY.keys()))
loss = LOSS_REGISTRY[loss_name]
return loss | d91bde7ce34e2d4fe38a5c86a93ba96d153eb7c1 | 26,703 |
def collate_fn(batch):
"""
Data collater.
Assumes each instance is a dict.
Applies different collation rules for each field.
Args:
batches: List of loaded elements via Dataset.__getitem__
"""
collated_batch = {}
# iterate over keys
for key in batch[0]:
try:
collated_batch[key] = default_collate([elem[key] for elem in batch])
except TypeError:
collated_batch[key] = [elem[key] for elem in batch]
return collated_batch | 718a6945d71a485fd4dbbbeaac374afbb9256621 | 26,704 |
def _rec_eval_in(g, a, v, i, j, K):
"""Recursive helper for :func:`dmp_eval_in`."""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v) | 51fbd9a45b4e1722ef98a5ab543575980d56b66b | 26,705 |
def normalize(array):
"""
Normalize a 4 (or Nx4) element array/list/numpy.array for use as a quaternion
:param array: 4 or Nx4 element list/array
:returns: normalized array
:rtype: numpy array
"""
quat = np.array(array)
return np.squeeze(quat / np.sqrt(np.sum(quat * quat, axis=-1, keepdims=True))) | 020b1fb9b1050192254274ac3d716e655a5ff003 | 26,706 |
def dose_class_baseline(dose_num, df_test, df_targets):
"""Calculate the PR- baseline for each dose treatment"""
dose_cpds_index = df_test[df_test['dose'] == dose_num].index
df_class_targets = df_targets.loc[dose_cpds_index].reset_index(drop = True)
class_baseline_score = calculate_baseline(df_class_targets)
return class_baseline_score | 0e0178573fc3ccfb08c8f898d264efa84fd10962 | 26,707 |
def xrange(mn: int, mx: int = None) -> list:
"""Built-in range function, but actually gives you a range between mn and mx.
Range: range(5) -> [0, 1, 2, 3, 4]
XRange: xrange(5) -> [0, 1, 2, 3, 4, 5]"""
return list(range(0 if mx is None else mn, mn + 1 if mx is None else mx + 1)) | 4ab3059a51966cefd43008c4aa4c50cf42cb8fa2 | 26,708 |
import os
def stage_per_sample_specific_statics(opts, sample_ids):
"""Items like the Latex templates
Parameters
----------
opts : dict
A dict of relevant opts.
sample_ids : Iterable of str
A list of sample IDs of interest
Returns
-------
dict
A dict containing each sample ID and any errors observed or None if
no error was observed for the sample. {str: str or None}
"""
result = {}
sample_type = opts['sample_type'].lower()
statics_src = opts['per-sample']['statics-%s' % sample_type]
for id_ in sample_ids:
result[id_] = None
path = _result_path(opts, id_)
template_path = os.path.join(path, id_ + '.tex')
statics_path = os.path.join(path, 'statics')
try:
agru.stage_static_latex(opts['sample_type'], template_path)
except:
result[id_] = "Cannot stage template."
continue
try:
os.symlink(statics_src, statics_path)
except:
result[id_] = "Cannot symlink for statics."
return result | 038dd67ed235d133bd7abb14800214de54f34bb2 | 26,709 |
def sous_tableaux(arr: list, n: int) -> list:
"""
Description:
Découper un tableau en sous-tableaux.
Paramètres:
arr: {list} -- Tableau à découper
n: {int} -- Nombre d'éléments par sous-tableau
Retourne:
{list} -- Liste de sous-tableaux
Exemple:
>>> sous_tableaux([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1], 3)
[[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1], [0, 0, 1]]
"""
if n <= 0:
return []
if len(arr) < n:
return []
return [arr[i:i + n] for i in range(0, len(arr), n)] | 4f0a627ea00beafb5b6bc77490e71631e8a55e28 | 26,710 |
def get_previous_tweets(date_entry):
"""Return details about previous Tweets. Namely, retrieve details
about the date_entry-th Tweets from 7 days ago, 30 days ago, and a
random number of days ago.
If a given Tweet does not exist, its corresponding entry in the
output will be empty.
Args:
date_entry: An integer representing the number of Tweets tweeted
before the desired one on the date it was tweeted.
Returns:
A namedtuple with name PreviousTweets and fields with names
"last_week", "last_month", and "random", where each name points
to a dictionary containing the Tweet's Twitter ID, the word
associated with the Tweet, and the URL to the Tweet.
Raises:
AWSClientError: If any AWS query fails.
TypeError: If one or more inputs has an unexpected type.
ValueError: If the Tweet entry falls outside of the expected
range.
"""
today = date.today()
tweets_by_date = {
"last_week": get_tweets_on_date(
today - timedelta(days=7), date_entry=date_entry),
"last_month": get_tweets_on_date(
today - timedelta(days=30), date_entry=date_entry),
"random": get_tweets_on_random_date(date_entry=date_entry),
}
twitter_client = TwitterAPIClient()
table_schema = DynamoDBTable.TWEETS.value.schema
for date_key in ("last_week", "last_month", "random"):
tweets = tweets_by_date[date_key]
tweets_by_date[date_key] = dict()
if not isinstance(tweets, list):
continue
if not tweets:
continue
if len(tweets) > 1:
pass
tweet = tweets[0]
if not validate_item_against_schema(table_schema, tweet):
continue
tweet_id = tweet["TweetId"]
if not twitter_client.tweet_exists(tweet_id):
continue
word = tweet["Word"]
url = tweet_url(TWITTER_USER_USERNAME, tweet_id)
tweets_by_date[date_key] = dict(tweet_id=tweet_id, word=word, url=url)
PreviousTweets = namedtuple(
"PreviousTweets", "last_week last_month random")
return PreviousTweets(
last_week=tweets_by_date["last_week"],
last_month=tweets_by_date["last_month"],
random=tweets_by_date["random"]) | a4b91b87f3cc897e720f745a1c0ad1097292774b | 26,711 |
def changed_cat_keys(dt):
"""Returns keys for categories, changed after specified time"""
return [root_category_key()] | fbc4d0380bb1deaf7f1214d526d1c623cceb4676 | 26,712 |
def create_client(CLIENT_ID, CLIENT_SECRET):
"""Creates Taboola Client object with the given ID and secret."""
client = TaboolaClient(CLIENT_ID, client_secret=CLIENT_SECRET)
return client | bea955f5d944e47f11c74ae97c5472dc3c512217 | 26,713 |
def set_values_at_of_var_above_X_lat_2_avg(lat_above2set=65, ds=None,
use_avg_at_lat=True, res='0.125x0.125',
var2set=None,
only_consider_water_boxes=True,
fixed_value2use=None,
save2NetCDF=True):
"""
Set values above a latitude to the monthly lon average
Parameters
-------
lat_above2set (float): latitude to set values above
fixed_value2use (float): value to set selected latitudes (lat_above2set)
var2set (str): variable in dataset to set to new value
res (str): horizontal resolution of dataset (e.g. 4x5)
only_consider_water_boxes (bool): only update non-water grid boxes
ds (xr.Dataset): xarray dataset to use for plotting
save2NetCDF (bool): save outputted dataset as a NetCDF file
Returns
-------
(xr.Dataset)
"""
print(var2set)
# local variables
folder = utils.get_file_locations('data_root')+'/data/'
# Get existing file
if isinstance(ds, type(None)):
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
ds = xr.open_dataset(folder + filename)
# get the average value at lat
avg = ds[var2set].sel(lat=lat_above2set, method='nearest')
# get index of lat to set values from
idx = AC.find_nearest(avg['lat'].values, ds['lat'].values)
# Setup a bool for values above or equal to lat
bool_ = avg['lat'].values <= ds['lat'].values
# Just use a fixed value?
if not use_avg_at_lat:
assert type(fixed_value2use) != int, 'Fixed value must be a float!'
if isinstance(fixed_value2use, float):
print('Set all values above lat to: {}'.format(fixed_value2use))
avg[:] = fixed_value2use
# Make sure there is one value per month
if len(avg.shape) == 1:
try:
avg = np.ma.array([avg.values]*12)
except AttributeError:
avg = np.ma.array([avg]*12)
# Only consider the ware boxes
if only_consider_water_boxes:
# add LWI to array
if res == '0.125x0.125':
folderLWI = utils.get_file_locations('AC_tools')
folderLWI += '/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
bool_water = LWI.to_array().values[0, :, idx, :] == 0.0
else:
LWI = AC.get_LWI_map(res=res)[..., 0]
bool_water = (LWI[:, idx] == 0.0)
# Use the annual value for ewvery month
bool_water = np.ma.array([bool_water]*12)
# Set land/ice values to NaN
for n_month in range(12):
avg[n_month, ~bool_water[n_month]] = np.NaN
# get the average over lon
avg = np.nanmean(avg, axis=-1)
pstr = '>={}N = monthly avg. @ lat (avg={:.2f},min={:.2f},max={:.2f})'
print(pstr.format(lat_above2set, avg.mean(), avg.min(), avg.max()))
# Get the data
values = ds[var2set].values
# Update the values above the specific lat
# Do this on a monthly basis if data is monthly.
if len(values.shape) == 3:
for month in np.arange(values.shape[0]):
# Updated array of values
arr = np.zeros(values[month, bool_, :].shape)
arr[:] = avg[month]
# now replace values
values[month, bool_, :] = arr
del arr
else:
# Updated array of values
arr = np.zeros(values[bool_, :].shape)
arr[:] = np.nanmean(avg)
# now replace values
values[bool_, :] = arr
ds[var2set].values = values
# Update the history attribute to record the update.
attrs = ds.attrs
#
try:
History = attrs['History']
except KeyError:
attrs['History'] = ''
hist_str = "; '{}' above lat ({}N) set to monthly lon average at that lat.;"
hist_str = hist_str.format(var2set, lat_above2set)
attrs['History'] = attrs['History'] + hist_str
# Save updated file
if save2NetCDF:
ext_str = '_INTERP_NEAREST_DERIVED_UPDATED_{}'.format(var2set)
filename = 'Oi_prj_feature_variables_{}{}.nc'.format(res, ext_str)
ds.to_netcdf(filename)
else:
return ds | 6040523af84f5046af373de2aa22447e66aef181 | 26,714 |
def vzc_dict(svy):
"""
vizier column dictionary
dictionary for NVSS, TGSS column using vizier
TODO: add column for errors too
"""
columns = {'tgss': ['RAJ2000', 'DEJ2000', 'Maj', 'Min', 'PA'],
'nvss': ['RAJ2000', 'DEJ2000', 'MajAxis', 'MinAxis', 'PA', '+NVSS']
}
if svy in columns:
return columns[svy]
elif svy == '*':
return columns
elif svy == 'v':
return columns.values()
elif svy == 'k':
return columns.keys()
else:
return "'{}' is not a valid survey. please choose one from {} or use any of '*' 'v' 'k' ".format(svy, columns.keys()) | fa431d490da440cd3d95e5026892c38da8743988 | 26,715 |
def _plot_xkcd(plot_func, *args, **kwargs):
""" Plot with *plot_func*, *args* and **kwargs*, but in xkcd style. """
with plt.xkcd():
fig = plot_func(*args, **kwargs)
return fig | a4bc526c115c54f37c5171b82639adf0c0a3f888 | 26,716 |
import requests
def unfollow_user():
"""UnfollowUser"""
auth = request.headers
user = request.args.get('userId')
req = requests.post(
'/api/v1/IsAuthenticated',
{'id': auth['Authorization']}
)
req.json()
if req.authenticated:
cur = MY_SQL.connection.cursor()
cur.execute(
'''DELETE from follows WHERE followerid = %d AND
followingid = %d''',
req.user_id,
user
)
output = 'Done'
else:
output = 'Not Authenticated'
return output | 8e40b633b960070a4d433610f4f0d4e7f8b89a12 | 26,717 |
def isOverlaysEnabled():
"""Returns whether or not the current client's quality overlay
system is currently enabled.
Returns:
bool: True (1) if overlays are currently enabled.
"""
return False | d433b86b38bfa3c3ed28705888ef12710aaf4f96 | 26,718 |
def load_image(path, size=None, grayscale=False):
"""
Load the image from the given file-path and resize it
to the given size if not None.
"""
# Load the image using opencv
if not grayscale: # BGR format
image = cv2.imread(path)
else: # grayscale format
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# Resize image if desired.
if not size is None:
image = cv2.resize(image, size)
# Convert image to numpy array and scale pixels so they fall between 0.0 and 1.0
image = np.array(image) / 255.0
# Convert 2-dim gray-scale array to 3-dim BGR array.
if (len(image.shape) == 2):
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
return image | 2d3d4a625a690800c2d5db8f8577e9c06a36001a | 26,719 |
def brent_min(f, bracket, fnvals=None, tolerance=1e-6, max_iterations=50):
"""\
Given a univariate function f and a tuple bracket=(x1,x2,x3) bracketing a minimum,
find a local minimum of f (with fn value) using Brent's method.
Optionally pass in the tuple fnvals=(f(x1),f(x2),f(x3)) as a parameter.
"""
x1, x2, x3 = bracket
if fnvals==None:
f1, f2, f3 = f(x1), f(xx), f(x3)
else:
f1, f2, f3 = fnvals
if not f1>f2<f3:
raise MinimizationException("initial triple does not bracket a minimum")
if not x1<x3: # ensure x1, x2, x3 in ascending order
x1, f1, x3, f3 = x3, f3, x1, f1
a, b = x1, x3
e = 0.
x = w = v = x2
fw = fv = fx = f(x)
for j in range(max_iterations):
xm = (a+b)/2
accuracy = tolerance*abs(x) + LITTLE
if abs(x-xm) < (2*accuracy - (b-a)/2):
return x, fx
if abs(e)>accuracy:
r = (x-w)*(fx-fv)
q = (x-v)*(fx-fw)
p = (x-v)*q - (x-w)*r
q = 2*(q-r)
if q>0:
p = -p
q = abs(q)
etemp = e
e = d
if abs(p)>=abs(q*etemp)/2 or p<=q*(a-x) or p>=q*(b-x):
if x>=xm:
e = a-x
else:
e = b-x
d = (2-GOLDEN)*e
else: # accept parabolic fit
d = p/q
u = x+d
if u-a<2*accuracy or b-u<2*accuracy:
d = accuracy*sgn(xm-x)
else:
if x>=xm:
e = a-x
else:
e = b-x
d = (2-GOLDEN)*e
if abs(d)>=accuracy:
u = x+d
else:
u = x+accuracy*sgn(d)
fu = f(u)
if fu<=fx:
if u>=x:
a = x
else:
b = x
v, w, x = w, x, u
fv, fw, fx = fw, fx, fu
else:
if u<x:
a = u
else:
b = u
if fu<-fw or w==x:
v, w, fv, fw = w, u, fw, fu
elif fu<=fw or v==x or v==w:
v, fv = u, fu
raise MinimizationException("too many iterations") | ef4010e00ca67d1751b7f8eea497afc59e76364c | 26,720 |
def best_validity(source):
"""
Retrieves best clustering result based on the relative validity metric
"""
# try:
cols = ['min_cluster_size', 'min_samples', 'validity_score', 'n_clusters']
df = pd.DataFrame(source, columns = cols)
df['validity_score'] = df['validity_score'].fillna(0)
best_validity = df.loc[df['validity_score'].idxmax()]
# except TypeError:
# best_validity = None
return best_validity | ba830ccca8c9f62758ecd8655576efb58892cdbc | 26,721 |
import json
def normalize_cell_value(value):
"""Process value for writing into a cell.
Args:
value: any type of variable
Returns:
json serialized value if value is list or dict, else value
"""
if isinstance(value, dict) or isinstance(value, list):
return json.dumps(value)
return value | 8ef421814826c452cdb6528c0645133f48bd448a | 26,722 |
import numpy
def _ancestry2paths(A):
"""Convert edge x edge ancestry matrix to tip-to-tip path x edge
split metric matrix. The paths will be in the same triangular matrix order
as produced by distanceDictAndNamesTo1D, provided that the tips appear in
the correct order in A"""
tips = [i for i in range(A.shape[0]) if sum(A[:,i])==1]
paths = []
for (tip1, tip2) in triangularOrder(tips):
path = A[tip1] ^ A[tip2]
paths.append(path)
return numpy.array(paths) | 732ef3bbccff4696650c24a983fdbc338f1d8e24 | 26,723 |
def geometric_mean(x, axis=-1, check_for_greater_than_zero=True):
"""
Return the geometric mean of matrix x along axis, ignore NaNs.
Raise an exception if any element of x is zero or less.
"""
if (x <= 0).any() and check_for_greater_than_zero:
msg = 'All elements of x (except NaNs) must be greater than zero.'
raise ValueError, msg
x = x.copy()
m = np.isnan(x)
np.putmask(x, m, 1.0)
m = np.asarray(~m, np.float64)
m = m.sum(axis)
x = np.log(x).sum(axis)
g = 1.0 / m
x = np.multiply(g, x)
x = np.exp(x)
idx = np.ones(x.shape)
if idx.ndim == 0:
if m == 0:
idx = np.nan
else:
np.putmask(idx, m==0, np.nan)
x = np.multiply(x, idx)
return x | 485780f7766857333a240d059d2bb1c526d3f5a8 | 26,724 |
def mongodb():
"""
Simple form to get and set a note in MongoDB
"""
return None | a6de90429bb3ad3e23191e52e1b43484435747f9 | 26,725 |
def get_single_blog(url):
"""Получить блог по указанному url"""
blog = Blog.get_or_none(Blog.url == url)
if blog is None:
return errors.not_found()
user = get_user_from_request()
has_access = Blog.has_access(blog, user)
if not has_access:
return errors.no_access()
blog_dict = blog.to_json()
blog_dict = Vote.add_votes_info(blog_dict, 2, user)
return jsonify({"success": 1, "blog": blog_dict}) | b4a32278681af7eecbebc29ed06b88c7860a39c0 | 26,726 |
def test_section():
"""Returns a testing scope context to be used in 'with' statement
and captures testing code.
Example::
with autograd.train_section():
y = model(x)
compute_gradient([y])
with autograd.test_section():
# testing, IO, gradient updates...
"""
return TrainingStateScope(False) | 6bcbc9aaaaeee5a9b5d8b6a3307f1fb69bf726ae | 26,727 |
from typing import Optional
import select
async def get_installation_owner(metadata_account_id: int,
mdb_conn: morcilla.core.Connection,
cache: Optional[aiomcache.Client],
) -> str:
"""Load the native user ID who installed the app."""
user_login = await mdb_conn.fetch_val(
select([MetadataAccount.owner_login])
.where(MetadataAccount.id == metadata_account_id))
if user_login is None:
raise ResponseError(NoSourceDataError(detail="The installation has not started yet."))
return user_login | 925780ce87c14758cf98191cf39effcaf09a8aaa | 26,728 |
def get_cflags():
"""Get the cflag for compile python source code"""
flags = ['-I' + get_path('include'),
'-I' + get_path('platinclude')]
flags.extend(getvar('CFLAGS').split())
# Note: Extrat cflags not valid for cgo.
for not_go in ('-fwrapv', '-Wall'):
if not_go in flags:
flags.remove(not_go)
return ' '.join(flags) | f1c171d5a70127bda98a3ef7625c60336641ea1f | 26,729 |
import requests
def request_post_json(url, headers, data):
"""Makes a POST request and returns the JSON response"""
try:
response = requests.post(url, headers=headers, data=data, timeout=10)
if response.status_code == 201:
return response.json()
else:
error_message = None
try:
json_response = response.json()
if len(json_response) > 0 and "errorMessage" in json_response[0]:
error_message = json_response[0]["errorMessage"]
except ValueError:
# Raised by response.json() if JSON couln't be decoded
logger.error('Radarr returned non-JSON error result: {}', response.content)
raise RadarrRequestError(
"Invalid response received from Radarr: %s" % response.content,
logger,
status_code=response.status_code,
error_message=error_message,
)
except RequestException as e:
raise RadarrRequestError("Unable to connect to Radarr at %s. Error: %s" % (url, e)) | 82d7ce423024ca1af8a7c513f3d689fbc77c591a | 26,730 |
import random
def get_rand_number(min_value, max_value):
"""
This function gets a random number from a uniform distribution between
the two input values [min_value, max_value] inclusively
Args:
- min_value (float)
- max_value (float)
Return:
- Random number between this range (float)
"""
range = max_value - min_value
choice = random.uniform(0, 1)
return min_value + range * choice | 0eec094d05b291c7c02207427685d36262e643e5 | 26,731 |
def detect_callec(tree):
"""Collect names of escape continuations from call_ec invocations in tree.
Currently supported and unsupported cases::
# use as decorator, supported
@call_ec
def result(ec): # <-- we grab name "ec" from here
...
# use directly on a literal lambda, supported
result = call_ec(lambda ec: ...) # <-- we grab name "ec" from here
# use as a function, **NOT supported**
def g(ec): # <-- should grab from here
...
...
result = call_ec(g) # <-- but this is here; g could be in another module
Additionally, the literal names `ec`, `brk`, `throw` are always interpreted
as invoking an escape continuation (whether they actually do or not).
So if you need the third pattern above, use **exactly** the name `ec`
for the escape continuation parameter, and it will work.
(The name `brk` covers the use of `unpythonic.fploop.breakably_looped`,
and `throw` covers the use of `unpythonic.ec.throw`.)
"""
fallbacks = ["ec", "brk", "throw"]
iscallec = partial(isx, x="call_ec")
def detect(tree):
class Detector(ASTVisitor):
def examine(self, tree):
# TODO: add support for general use of call_ec as a function (difficult)
if type(tree) in (FunctionDef, AsyncFunctionDef) and any(iscallec(deco) for deco in tree.decorator_list):
# TODO: Python 3.8+: handle the case where the ec is a positional-only arg?
fdef = tree
self.collect(fdef.args.args[0].arg) # FunctionDef.arguments.(list of arg objects).arg
elif is_decorated_lambda(tree, mode="any"):
# TODO: should we recurse selectively in this case?
decorator_list, thelambda = destructure_decorated_lambda(tree)
if any(iscallec(decocall.func) for decocall in decorator_list):
self.collect(thelambda.args.args[0].arg) # we assume it's the first arg, as that's what call_ec expects.
self.generic_visit(tree)
d = Detector()
d.visit(tree)
return d.collected
return fallbacks + detect(tree) | e55f1e1080c3872e664c1c50411a282a5c4958e5 | 26,732 |
import copy
def _get_good_pport_list(sriov_adaps, pports, capacity, redundancy,
check_link_status, redundant_pports=None):
"""Get a list of SRIOV*PPort filtered by capacity and specified pports.
Builds a list of pypowervm.wrappers.iocard.SRIOV*PPort from sriov_adaps
such that:
- Only ports whose location codes are listed in the pports param are
considered.
- Only ports with sufficient remaining capacity (per the capacity param, if
specified; otherwise the port's min_granularity) are considered.
:param sriov_adaps: A list of SRIOVAdapter wrappers whose mode is Sriov and
whose state is Running.
:param pports: A list of string physical location codes of the physical
ports to consider.
:param capacity: (float) Minimum capacity which must be available on each
backing device. Must be between 0.0 and 1.0, and must be
a multiple of the min_granularity of *all* of the pports.
If None, available port capacity is validated using each
port's min_granularity.
:param redundancy: The desired redundancy level (number of ports to
return).required. If the filtered list has fewer than
this number of ports, InsufficientSRIOVCapacity is
raised.
:param check_link_status: If True, ports with link-down status will not be
returned. If False, link status is not checked.
:param redundant_pports: A list of string physical location codes of the
physical redundant ports to consider.
:raise InsufficientSRIOVCapacity: If the final list contains fewer than
'redundancy' ports.
:return: A filtered list of SRIOV*PPort wrappers.
"""
def port_ok(port):
pok = True
# Is it in the candidate list?
if port.loc_code not in pports:
pok = False
# Is the link state up
if check_link_status and not port.link_status:
pok = False
# Does it have available logical ports?
if port.cfg_lps >= port.cfg_max_lps:
pok = False
# Does it have capacity?
des_cap = port.min_granularity
if capacity is not None:
# Must be at least min_granularity.
des_cap = max(des_cap, capacity)
if port.allocated_capacity + des_cap > 1.0:
pok = False
return pok
pport_wraps = []
for sriov in sriov_adaps:
for pport in sriov.phys_ports:
if port_ok(pport):
pp2add = copy.deepcopy(pport)
pport_wraps.append(pp2add)
if len(pport_wraps) < redundancy and not redundant_pports:
raise ex.InsufficientSRIOVCapacity(red=redundancy,
found_vfs=len(pport_wraps))
LOG.debug('Filtered list of physical ports: %s' %
str([pport.loc_code for pport in pport_wraps]))
return pport_wraps | a8f4fa0b618605f3f434e3c793e8b97c0c72f385 | 26,733 |
def read(string):
""" Given a single interval from a GFFv2 file, returns an Interval object.
Will return meta lines if they start with #, track, or browser. """
if string.startswith(metalines):
return interval(_is_meta=True, seqname=string)
values = []
cols = string.split(delimiter)
for field in required_fields:
values.append((field, cols.pop(0)))
try:
for field in optional_fields:
values.append((field, cols.pop(0)))
except IndexError:
pass
if cols:
# If there are still fields remaining after consuming all
# the required and optional fields
raise IndexError("Too many columns: {}".format(cols))
fields = dict(values)
i = interval(**fields)
# Account for 0-based indexing
i['start'] += 1
return i | e4651216c9694935bc879c012b1fe74f529cb41d | 26,734 |
from typing import Union
from pathlib import Path
from typing import Dict
def average_results(results_path: Union[Path, str], split_on: str = " = ") -> Dict[str, float]:
"""
Average accuracy values from a file.
Parameters
----------
results_path : Union[Path, str]
The file to read results from.
split_on : str
The symbol which separates an accuracy's key from its value.
Returns
-------
averages : Dict[str, float]
A dictionary mapping each accuracy key to its average value.
"""
averages = defaultdict(list)
with open(results_path, "r") as results_file:
for line in results_file:
if split_on not in line:
continue
line_split = line.split(split_on)
if len(line_split) != 2:
continue
key, value = line_split
key = key.strip()
if "accuracy" in key:
averages[key].append(float(value))
return {key: np.mean(value_list) for key, value_list in averages.items()} | 28b896d567d6ef18662766a2da64c6bdb262f3d7 | 26,735 |
import sys
import os
def absPath(myPath):
"""Get absolute path to resource, works for dev and for PyInstaller"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
lprint("found MEIPASS: %s " % os.path.join(base_path, os.path.basename(myPath)))
return os.path.join(base_path, os.path.basename(myPath))
except Exception as e:
lprint("did not find MEIPASS: %s " % e)
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath) | 7bfbee7dd9e566aed3449cb7c6b514f307af875f | 26,736 |
def dict_collection_only(method):
"""
Handles the behavior when a group is present on a clumper object.
"""
@wraps(method)
def wrapped(clumper, *args, **kwargs):
if not clumper.only_has_dictionaries:
non_dict = next(d for d in clumper if not isinstance(d, dict))
raise ValueError(
f"The `{method}` won't work unless all items are dictionaries. Found: {non_dict}."
)
return method(clumper, *args, **kwargs)
return wrapped | a84c3588942378157674e6862d2f1a8c785ba569 | 26,737 |
def country_name(country_id):
"""
Returns a country name
>>> country_name(198)
u'Spain'
"""
if country_id == '999':
#Added for internal call - ie flag/phone.png
return _('internal call').title()
try:
obj_country = Country.objects.get(id=country_id)
return obj_country.countryname
except:
return _('unknown').title() | fdb44061d795e42d9e312bc25f8335a41c91ca11 | 26,738 |
import sys
def parse_mjinfo():
"""List up game history stored in Flash cache directory
Returns
-------
dict
Key : str
File name in which data are stored
Value : list of dict
Information of logs. See :func:`parse_sol`.
"""
if sys.platform == 'darwin':
root_dirs = _get_flash_root_mac()
elif sys.platform.startswith('linux'):
root_dirs = _get_flash_root_linux()
else:
raise NotImplementedError(
'`list` function is not implemented for %s' % sys.platform)
return _parse_flash_dirs(root_dirs) | 44f0471738ac7a8c03ab4fad727f1f31227f4613 | 26,739 |
import copy
def multiply(input_op_node: cc_dag.OpNode, output_name: str, target_col_name: str, operands: list):
"""
Define Multiply operation.
:param input_op_node: Parent node for the node returned by this method.
:param output_name: Name of returned Multiply node.
:param target_col_name: Name of column that stores results of Multiply operation.
If target_col_name refers to an already existing column in the relation, then that
column should also be the first argument in the operands list. If target_col_name
does not refer to an existing column, then the columns in the operands list will
be multiplied together in order, and stored in a column named <target_col_name> and
appended to the relation.
:param operands: List of operand columns & scalars.
:return: Multiply OpNode.
"""
# Get input relation from input node
in_rel = input_op_node.out_rel
# Get relevant columns and create copies
out_rel_cols = copy.deepcopy(in_rel.columns)
# Replace all column names with corresponding columns.
operands = [utils.find(in_rel.columns, op) if isinstance(op, str) else op for op in operands]
# if target_col already exists, it will be at the 0th index of operands
if target_col_name == operands[0].name:
target_column = utils.find(in_rel.columns, target_col_name)
else:
# TODO: figure out new column's trust_set
target_column = rel.Column(output_name, target_col_name, len(in_rel.columns), "INTEGER", set())
out_rel_cols.append(target_column)
# Create output relation
out_rel = rel.Relation(output_name, out_rel_cols, copy.copy(in_rel.stored_with))
out_rel.update_columns()
# Create our operator node
op = cc_dag.Multiply(out_rel, input_op_node, target_column, operands)
# Add it as a child to input node
input_op_node.children.add(op)
return op | 68effc870a5a53cf123b763cbca87eaec83ff35d | 26,740 |
from typing import Dict
from pathlib import Path
import os
import shutil
import subprocess
def download_clip_wrapper(
row, label_to_dir, trim_format, tmp_dir, existing_files: Dict[str, Path] = {}
):
"""Wrapper for parallel processing purposes."""
output_filename = construct_video_filename(row, label_to_dir)
clip_id = os.path.basename(output_filename).split(".mp4")[0]
if clip_id in existing_files:
shutil.copyfile(src=existing_files[clip_id], dst=output_filename)
status = (clip_id, "Copied", output_filename)
print(status)
if os.path.exists(output_filename):
check_for_errors_in_file = (
f'ffmpeg -v error -i "{output_filename}" -f null - 2>{erf} && cat {erf}'
)
try:
output = subprocess.check_output(
check_for_errors_in_file, shell=True, stderr=subprocess.STDOUT
)
if not output:
status = (clip_id, "Exists", output_filename)
print(status)
return status
except subprocess.CalledProcessError as err:
print(err)
print(f"Removing corrupted file: {output_filename}")
try:
os.remove(output_filename)
except Exception:
pass
downloaded, log = download_clip(
row["video-id"], output_filename, tmp_dir=tmp_dir,
)
status = (clip_id, downloaded, log, output_filename)
print(status)
return status
downloaded, log = download_clip(row["video-id"], output_filename, tmp_dir=tmp_dir,)
status = (clip_id, downloaded, log, output_filename)
print(status)
return status | 4a589627259fd370b7f5f35200bbe8ad21646935 | 26,741 |
import os
def list_subdir(path):
""" list all subdirectories given a directory"""
return [o for o in os.listdir(path) if os.path.isdir(path / o)] | d99760b6ec914d59cdbf5b8f4bc2bb8b0a30f9e8 | 26,742 |
def update_jira(vulnerability_dictionary, config):
"""
Feeds JIRA with issues based on the vulnerability dictionary.
:param vulnerability_dictionary: dictionary of vulnerabilities
:param config: confiture object that contains the quart configuration.
:return: int, number of issues created/updated to Jira.
"""
passed_vulnerabilities_counter = 0
jira = JIRA(
options={'server': config.subsection('jira').get('url')},
basic_auth=(
str(config.subsection('jira').get('user')),
str(config.subsection('jira').get('password'))
)
)
issue_list = get_quart_issue_list(jira, config)
for vulnerability in vulnerability_dictionary:
# Get all issues created by Quart on this vulnerability
vulnerability_issues = get_vulnerability_issue_list(
issue_list,
vulnerability_dictionary[vulnerability]['qid'],
config
)
# Issue creation filter to avoid creating unnecessary issues
if vulnerability_filter(
vulnerability_dictionary[vulnerability],
vulnerability_issues,
config
):
verify_hostname(vulnerability_dictionary[vulnerability])
# If no existing issues: create a new issue
if not vulnerability_issues:
new_issue = create_vulnerability_issue(
jira,
vulnerability_dictionary[vulnerability],
config
)
LOGGER.info(
'Created issue: %s for QID %s -> %s',
str(new_issue),
vulnerability_dictionary[vulnerability]['qid'],
vulnerability_dictionary[vulnerability]['title']
)
passed_vulnerabilities_counter += 1
# Else update existing issues
else:
update_jira_issues(jira, vulnerability_issues,
vulnerability_dictionary[vulnerability],
config)
passed_vulnerabilities_counter += 1
else:
LOGGER.debug(
'Vulnerability ignored: QID %s %s ''- Category: '
'%s - Severity %d',
vulnerability_dictionary[vulnerability]['qid'],
vulnerability_dictionary[vulnerability]['title'],
vulnerability_dictionary[vulnerability]['category'],
vulnerability_dictionary[vulnerability]['severity']
)
return passed_vulnerabilities_counter | eaee30bf93e03d7e777e7372a4fdd105c977e771 | 26,743 |
def KLdist(P,Q):
"""
KLDIST Kullbach-Leibler distance.
D = KLDIST(P,Q) calculates the Kullbach-Leibler distance (information
divergence) of the two input distributions.
"""
P2 = P[P*Q>0]
Q2 = Q[P*Q>0]
P2 = P2 / np.sum(P2)
Q2 = Q2 / np.sum(Q2)
D = np.sum(P2*np.log(P2/Q2))
return D | 380796f3688c5ad8483ba50ddc940eb797e4a973 | 26,744 |
import argparse
def get_args():
"""Get arguments from CLI"""
parser = argparse.ArgumentParser(
description="""\nRuns Stacks process_radtags on each plate in barcodes.txt file""")
parser.add_argument(
"--Read1",
required=True,
action=FullPaths,
help="""Path to Read1 file"""
)
parser.add_argument(
"--Read2",
required=True,
action=FullPaths,
help="""Path to Read2 file"""
)
parser.add_argument(
"--i7barcodes",
required=True,
action=FullPaths,
help="""i7barcodes file in form i7barcodetabPlate"""
)
parser.add_argument(
"--logname",
required=True,
help="""name of log file"""
)
return parser.parse_args() | 3b3564a404e882281ec3603924fed3729ae33893 | 26,745 |
def homepage(request: HttpRequest) -> HttpResponse:
""" Render the home page of the application. """
context = make_context(request)
person = get_person(TARGET_NICK)
if not person:
return render(request, "404.html", status=404)
context["person"] = person
technology_set = person.technology_set.order_by(
Length("name").asc()).all()
context["technologies"] = columns_to_rows(technology_set)
protocol_set = person.protocol_set.order_by(
Length("name").asc()).all()
context["protocols"] = columns_to_rows(protocol_set)
return render(request, "index.html", context=context) | 42f749a38543b456b603b5b7b923d5637ab84abe | 26,746 |
def _call_rmat(scale, num_edges, create_using, mg):
"""
Simplifies calling RMAT by requiring only specific args that are varied by
these tests and hard-coding all others.
"""
return rmat(scale=scale,
num_edges=num_edges,
a=0.1,
b=0.2,
c=0.3,
seed=24,
clip_and_flip=False,
scramble_vertex_ids=True,
create_using=create_using,
mg=mg) | cf68a7e436919ad5296438708898eeb233112651 | 26,747 |
def value_loss(old_value):
"""value loss for ppo"""
def loss(y_true, y_pred):
vpredclipped = old_value + K.clip(y_pred - old_value, -LOSS_CLIPPING, LOSS_CLIPPING)
# Unclipped value
vf_losses1 = K.square(y_pred - y_true)
# Clipped value
vf_losses2 = K.square(vpredclipped - y_true)
vf_loss = .5 * K.mean(K.maximum(vf_losses1, vf_losses2))
return vf_loss
return loss | 0888a411be6fa7e41469d15a2798cbebda46db01 | 26,748 |
import torch
import warnings
def split_by_worker(urls):
"""Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset."""
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
wid = worker_info.id
num_workers = worker_info.num_workers
if wid == 0 and len(urls) < num_workers:
warnings.warn(f"num_workers {num_workers} > num_shards {len(urls)}")
return urls[wid::num_workers]
else:
return urls | 1ddcf436fecc4359367b783f9c1c62fe84782468 | 26,749 |
import os
import psutil
import sys
import math
def cpu_count():
"""Get the available CPU count for this system.
Takes the minimum value from the following locations:
- Total system cpus available on the host.
- CPU Affinity (if set)
- Cgroups limit (if set)
"""
count = os.cpu_count()
# Check CPU affinity if available
if psutil is not None:
try:
affinity_count = len(psutil.Process().cpu_affinity())
if affinity_count > 0:
count = min(count, affinity_count)
except Exception:
pass
# Check cgroups if available
if sys.platform == "linux":
# The directory name isn't standardized across linux distros, check both
for dirname in ["cpuacct,cpu", "cpu,cpuacct"]:
try:
with open("/sys/fs/cgroup/%s/cpu.cfs_quota_us" % dirname) as f:
quota = int(f.read())
with open("/sys/fs/cgroup/%s/cpu.cfs_period_us" % dirname) as f:
period = int(f.read())
# We round up on fractional CPUs
cgroups_count = math.ceil(quota / period)
if cgroups_count > 0:
count = min(count, cgroups_count)
break
except Exception:
pass
return count | 1e89fdf5660ac4e86d1acbc98db1ef8f4c8a5fc6 | 26,750 |
def runge_kutta4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
Parameters
----------
y : scalar
Initial/current value for y
x : scalar
Initial/current value for x
dx : scalar
difference in x (e.g. the time step)
f : ufunc(y,x)
Callable function (y, x) that you supply to compute dy/dx for
the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6. | 0f79962a3bd7bbe49bd3ae3eff6d5496182fbea8 | 26,751 |
def genBinaryFileRDD(sc, path, numPartitions=None):
"""
Read files from a directory to a RDD.
:param sc: SparkContext.
:param path: str, path to files.
:param numPartition: int, number or partitions to use for reading files.
:return: RDD with a pair of key and value: (filePath: str, fileData: BinaryType)
"""
numPartitions = numPartitions or sc.defaultParallelism
rdd = sc.binaryFiles(
path, minPartitions=numPartitions).repartition(numPartitions)
#rdd = rdd.map(lambda x: (x[0], bytearray(x[1])))
return rdd | 85ef3c657b932946424e2c32e58423509f07ceae | 26,752 |
import requests
def generate():
"""
Generate a classic image quote
:rtype: InspiroBotImageResponse
:return: The generated response
"""
try:
r = requests.get("{}?generate=true".format(url()))
except:
raise InsprioBotError("API request failed. Failed to connect")
if r.status_code != 200:
raise InsprioBotError("API request failed. Invalid response code ({})".format(r.status_code))
return InspiroBotImageResponse(r.text) | bc9a49909d9191f922a5c781d9fc68c97de92456 | 26,753 |
import pickle
def inference_lstm(im_path, model_path, tok_path, max_cap_len=39):
"""
Perform inference using a model trained to predict LSTM.
"""
tok = pickle.load(open(tok_path, 'rb'))
model = load_model(
model_path,
custom_objects={'RepeatVector4D': RepeatVector4D})
encoder = ImageEncoder(random_transform=False)
im_encoding = encoder.process(im_path)
def encode_partial_cap(partial_cap, im):
input_text = [[tok.word_index[w] for w in partial_cap if w in tok.word_index]]
input_text = pad_sequences(input_text, maxlen=max_cap_len, padding='post')
im = np.array([im])
return [im, input_text]
partial_cap = ['starttoken']
EOS_TOKEN = 'endtoken'
while True:
inputs = encode_partial_cap(partial_cap, im_encoding)
preds = model.predict(inputs)[0, len(partial_cap), :]
next_idx = np.argmax(preds, axis=-1)
next_word = tok.index_word[next_idx]
if next_word == EOS_TOKEN or len(partial_cap) == 39:
break
partial_cap.append(next_word)
print(' '.join(partial_cap[1:])) | 81cec1407b6227d7f65a697900467b16b2fce96e | 26,754 |
def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node:
"""Takes in a iterative pruned rules_dict and returns iterative proof
tree."""
root_node = Node(root)
queue = deque([root_node])
while queue:
v = queue.popleft()
rule = sorted(rules_dict[v.label])[0]
if not rule == ():
children = [Node(i) for i in rule]
queue.extend([child for child in children if not child.label == root])
v.children = children
return root_node | c7ce6f1e48f9ac04f68b94e07dbcb82162b2abba | 26,755 |
def get_host_buffer_init(arg_name, num_elem, host_data_type, host_init_val):
"""Get host code snippet: init host buffer"""
src = get_snippet("snippet/clHostBufferInit.txt")
src = src.replace("ARG_NAME", arg_name)
src = src.replace("NUM_ELEM", str(num_elem))
src = src.replace("HOST_DATA_TYPE", host_data_type)
src = src.replace("HOST_INIT_VALUE", host_init_val)
return src | 7f94d2727a3c6f861f5c6402c5c8d2211d32dd71 | 26,756 |
def get_setting(setting, override=None):
"""Get setting.
Get a setting from `muses` conf module, falling back to
the default.
If override is not None, it will be used instead of the setting.
:param setting: String with setting name
:param override: Value to use when no setting is available. Defaults to
None.
:return: Setting value.
"""
attr_name = 'MUSES_{0}'.format(setting)
if hasattr(settings, attr_name):
return getattr(settings, attr_name)
else:
if hasattr(defaults, setting):
return getattr(defaults, setting)
else:
return override | 7e4a05ee3b077023e04693a37d3cbeaaa6025d8d | 26,757 |
def extract_year_month_from_key(key):
"""
Given an AWS S3 `key` (str) for a file,
extract and return the year (int) and
month (int) specified in the key after
'ano=' and 'mes='.
"""
a_pos = key.find('ano=')
year = int(key[a_pos + 4:a_pos + 8])
m_pos = key.find('mes=')
month = int(key[m_pos + 4:m_pos + 5])
return year, month | b52dc08d393900b54fca3a4939d351d5afe0ef3c | 26,758 |
def depolarize(p: float) -> DepolarizingChannel:
"""Returns a DepolarizingChannel with given probability of error.
This channel applies one of four disjoint possibilities: nothing (the
identity channel) or one of the three pauli gates. The disjoint
probabilities of the three gates are all the same, p / 3, and the
identity is done with probability 1 - p. The supplied probability
must be a valid probability or else this constructor will raise a
ValueError.
This channel evolves a density matrix via
\rho -> (1 - p) \rho
+ (p / 3) X \rho X + (p / 3) Y \rho Y + (p / 3) Z \rho Z
Args:
p: The probability that one of the Pauli gates is applied. Each of
the Pauli gates is applied independently with probability p / 3.
Raises:
ValueError: if p is not a valid probability.
"""
return DepolarizingChannel(p) | 247dd040844cdd3cd44336ca097b98fcf2f3cac3 | 26,759 |
import re
def verilog_to_circuit(
netlist,
name,
infer_module_name=False,
blackboxes=None,
warnings=False,
error_on_warning=False,
fast=False,
):
"""
Creates a new Circuit from a module inside Verilog code.
Parameters
----------
netlist: str
Verilog code.
name: str
Module name.
infer_module_name: bool
If True and no module named `name` is found, parse the first
module in the netlist.
blackboxes: seq of BlackBox
Blackboxes in module.
warnings: bool
If True, warnings about unused nets will be printed.
error_on_warning: bool
If True, unused nets will cause raise `VerilogParsingWarning`
exceptions.
fast: bool
If True, uses the `fast_parse_verilog_netlist` function from
parsing/fast_verilog.py. This function is faster for parsing
very large netlists, but makes stringent assumptions about
the netlist and does not provide error checking. Read
the docstring for `fast_parse_verilog_netlist` in order to
confirm that `netlist` adheres to these assumptions before
using this flag.
Returns
-------
Circuit
Parsed circuit.
"""
if blackboxes is None:
blackboxes = []
if fast:
return fast_parse_verilog_netlist(netlist, blackboxes)
# parse module
regex = f"(module\s+{name}\s*\(.*?\);(.*?)endmodule)"
m = re.search(regex, netlist, re.DOTALL)
try:
module = m.group(1)
except AttributeError:
if infer_module_name:
regex = f"(module\s+(.*?)\s*\(.*?\);(.*?)endmodule)"
m = re.search(regex, netlist, re.DOTALL)
try:
module = m.group(1)
except AttributeError:
raise ValueError("Could not read netlist: no modules found")
else:
raise ValueError(f"Could not read netlist: {name} module not found")
return parse_verilog_netlist(module, blackboxes, warnings, error_on_warning) | 4dc8e59ff8bea29f32e64219e3c38ed7bfec4aef | 26,760 |
def load_all_channels(event_id=0):
"""Returns a 3-D dataset corresponding to all the electrodes for a single subject
and a single event. The first two columns of X give the spatial dimensions, and
the third dimension gives the time."""
info = load_waveform_data(eeg_data_file())
locs = load_channel_locs(channel_locs_file())
nchan, ntime, nev = info.waveforms.shape
X = np.zeros((0, 3))
y = np.zeros(0)
for c in range(nchan):
curr_X = np.zeros((ntime, 3))
curr_X[:, 0] = locs[c, 0]
curr_X[:, 1] = locs[c, 1]
curr_X[:, 2] = info.times
curr_y = info.waveforms[c, :, event_id].astype(float)
X = np.vstack([X, curr_X])
y = np.concatenate([y, curr_y])
return X, y | 17dd6dfc196a3f88f4bf7f0128da1a0b027f9072 | 26,761 |
def parentheses_cleanup(xml):
"""Clean up where parentheses exist between paragraph an emphasis tags"""
# We want to treat None's as blank strings
def _str(x):
return x or ""
for em in xml.xpath("//P/*[position()=1 and name()='E']"):
par = em.getparent()
left, middle, right = _str(par.text), _str(em.text), _str(em.tail)
has_open = '(' in left[-1:] + middle[:1]
has_close = ')' in middle[-1:] + right[:1]
if not left.endswith('(') and middle.startswith('(') and has_close:
# Move '(' out
par.text = _str(par.text) + "("
em.text = em.text[1:]
if middle.endswith(')') and not right.startswith(')') and has_open:
# Move ')' out
em.text = em.text[:-1]
em.tail = ")" + _str(em.tail) | b5a476cd6fd9b6a2ab691fcec63a33e6260d48f2 | 26,762 |
import numpy
def filter_atoms(coordinates, num_atoms=None, morphology="sphere"):
"""
Filter the atoms so that the crystal has a specific morphology with a given number of atoms
Params:
coordinates (array): The atom coordinates
num_atoms (int): The number of atoms
morphology (str): The morphology of the crystal
Returns:
array: The filtered coordinates
"""
def filter_atoms_sphere(coordinates, num_atoms):
# Get the centre of mass
x = coordinates["x"]
y = coordinates["y"]
z = coordinates["z"]
c = numpy.array([x, y, z]).T
centre_of_mass = numpy.mean(c, axis=0)
# Compute all square distances
sq_distance = numpy.sum((c - centre_of_mass) ** 2, axis=1)
# Get the selection of the closest n atoms
index = numpy.argsort(sq_distance)
return coordinates[index[0:num_atoms]]
# If the number of atoms is not set then return as is
if num_atoms is None or morphology is None:
return coordinates
# Check the number of atoms
assert len(coordinates) >= num_atoms
# Filter the atoms into the given morphology
return {"sphere": filter_atoms_sphere}[morphology](coordinates, num_atoms) | 9763f2c7b14a26d089bf58a4c7e82e2d4a0ae2bd | 26,763 |
def predict_timeslice_single(vis: Visibility, model: Image, predict=predict_2d, remove=True,
gcfcf=None, **kwargs) -> Visibility:
""" Predict using a single time slices.
This fits a single plane and corrects the image geometry.
:param vis: Visibility to be predicted
:param model: model image
:param predict:
:param remove: Remove fitted w (so that wprojection will do the right thing)
:param gcfcf: (Grid correction function, convolution function)
:return: resulting visibility (in place works)
"""
log.debug("predict_timeslice: predicting using time slices")
assert isinstance(vis, Visibility), vis
vis.data['vis'][...] = 0.0
# Fit and remove best fitting plane for this slice
uvw = vis.uvw
avis, p, q = fit_uvwplane(vis, remove=remove)
# We want to describe work image as distorted. We describe the distortion by putting
# the olbiquity parameters in the wcs. The input model should be described as having
# zero olbiquity parameters.
# Note that this has to be zero relative in first element, one relative in second!!!
newwcs = model.wcs.deepcopy()
newwcs.wcs.set_pv([(0, 1, -p), (0, 2, -q)])
workimage, footprintimage = reproject_image(model, newwcs, shape=model.shape)
workimage.data[footprintimage.data <= 0.0] = 0.0
workimage.wcs.wcs.set_pv([(0, 1, -p), (0, 2, -q)])
# Now we can do the predict
if remove:
avis.data['uvw'][...] = uvw
vis = predict(avis, workimage, gcfcf=gcfcf, **kwargs)
return vis | b09fe0d93c42a56372450f29e92731203bd4a1d4 | 26,764 |
def weekly():
"""The weekly status page."""
db = get_session(current_app)
#select id, user_id, created, strftime('%Y%W', created), date(created, 'weekday 1'), content from status order by 4, 2, 3;
return render_template(
'status/weekly.html',
week=request.args.get('week', None),
statuses=paginate(
db.query(Status).filter_by(reply_to=None).order_by(
desc(WeekColumnClause("created")),
Status.user_id,
desc(Status.created)),
request.args.get('page', 1),
startdate(request),
enddate(request),
per_page=100),) | b5c1e5a8d981fb217492241e8ee140898d47b633 | 26,765 |
from typing import List
import pathlib
from typing import Sequence
def parse_source_files(
src_files: List[pathlib.Path],
platform_overrides: Sequence[str],
) -> List[LockSpecification]:
"""
Parse a sequence of dependency specifications from source files
Parameters
----------
src_files :
Files to parse for dependencies
platform_overrides :
Target platforms to render meta.yaml files for
"""
desired_envs = []
for src_file in src_files:
if src_file.name == "meta.yaml":
desired_envs.append(
parse_meta_yaml_file(src_file, list(platform_overrides))
)
elif src_file.name == "pyproject.toml":
desired_envs.append(parse_pyproject_toml(src_file))
else:
desired_envs.append(parse_environment_file(src_file, pip_support))
return desired_envs | 47a6e66b56ca0d4acd60a6b388c9f58d0cccbb2c | 26,766 |
def delete_registry(
service_account_json, project_id, cloud_region, registry_id):
"""Deletes the specified registry."""
# [START iot_delete_registry]
print('Delete registry')
client = get_client(service_account_json)
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
registries = client.projects().locations().registries()
return registries.delete(name=registry_name).execute()
# [END iot_delete_registry] | baa8cad0d324f2e564052822f9d17f45a581a397 | 26,767 |
def bundle(cls: type) -> Bundle:
""" # Bundle Definition Decorator
Converts a class-body full of Bundle-storable attributes (Signals, other Bundles) to an `hdl21.Bundle`.
Example Usage:
```python
import hdl21 as h
@h.bundle
class Diff:
p = h.Signal()
n = h.Signal()
@h.bundle
class DisplayPort:
main_link = Diff()
aux = h.Signal()
```
Bundles may also define a `Roles` enumeration, inline within this class-body.
`Roles` are optional pieces of enumerated endpoint-labels which aid in dictating `Signal` directions.
Each `Signal` accepts optional source (`src`) and destination (`dst`) fields which (if set) must be one of these roles.
```python
import hdl21 as h
from enum import Enum, auto
@h.bundle
class HasRoles:
class Roles(Enum):
HOST = auto()
DEVICE = auto()
tx = h.Signal(src=Roles.HOST, dest=Roles.DEVICE)
rx = h.Signal(src=Roles.DEVICE, dest=Roles.HOST)
```
"""
if cls.__bases__ != (object,):
raise RuntimeError(f"Invalid @hdl21.bundle inheriting from {cls.__bases__}")
# Create the Bundle object
bundle = Bundle(name=cls.__name__)
protected_names = ["signals", "bundles"]
dunders = dict()
unders = dict()
# Take a lap through the class dictionary, type-check everything and assign relevant attributes to the bundle
for key, val in cls.__dict__.items():
if key in protected_names:
raise RuntimeError(f"Invalid field name {key} in bundle {cls}")
elif key.startswith("__"):
dunders[key] = val
elif key.startswith("_"):
unders[key] = val
elif key == "Roles":
# Special-case the upper-cased `Roles`, as it'll often be a class-def
setattr(bundle, "roles", val)
else:
setattr(bundle, key, val)
# And return the bundle
return bundle | bf1b68791dbdc5b6350d561db4d784ff92c0bbae | 26,768 |
import time
import calendar
def previousMidnight(when):
"""Given a time_t 'when', return the greatest time_t <= when that falls
on midnight, GMT."""
yyyy, MM, dd = time.gmtime(when)[0:3]
return calendar.timegm((yyyy, MM, dd, 0, 0, 0, 0, 0, 0)) | 0821eb46115a1e5b1489c4f4dbff78fab1d811b5 | 26,769 |
def compute_net_results(net, archname, test_data, df):
"""
For a given network, test on appropriate test data and return dataframes
with results and predictions (named obviously)
"""
pretrain_results = []
pretrain_predictions = []
tune_results = []
tune_predictions = []
for idx in range(5):
results_df, predictions_df = compute_pretrained_results(net, archname, idx, test_data)
pretrain_results.append(results_df)
pretrain_predictions.append(predictions_df)
pretrain_results = pd.concat(pretrain_results, axis=1)
for idx in range(5):
for test_idx in range(5):
results_df, predictions_df = compute_tuned_results(net, archname, idx, test_idx, test_data, df)
tune_results.append(results_df)
tune_predictions.append(predictions_df)
tune_results = pd.concat(tune_results, axis=1, join='inner').stack().unstack()
return pretrain_results, pretrain_predictions, tune_results, tune_predictions | b971f269bbee7e48f75327e3b01d73c77ec1f06c | 26,770 |
def _distance_along_line(start, end, distance, dist_func, tol):
"""Point at a distance from start on the segment from start to end.
It doesn't matter which coordinate system start is given in, as long
as dist_func takes points in that coordinate system.
Parameters
----------
start : tuple
Starting point for the line.
end : tuple
Outer bound on point's location.
distance : float
Positive distance to travel.
dist_func : callable
Two-argument function which returns distance.
tol : float
Relative error in distance to allow.
Returns
-------
np.ndarray, shape (2, 1)
Coordinates of a point.
"""
initial_distance = dist_func(start, end)
if initial_distance < distance:
raise ValueError("End is closer to start ({}) than "
"given distance ({}).".format(
initial_distance, distance
))
if tol <= 0:
raise ValueError("Tolerance is not positive: {}".format(tol))
# Binary search for a point at the given distance.
left = start
right = end
while not np.isclose(dist_func(start, right), distance, rtol=tol):
midpoint = (left + right) / 2
# If midpoint is too close, search in second half.
if dist_func(start, midpoint) < distance:
left = midpoint
# Otherwise the midpoint is too far, so search in first half.
else:
right = midpoint
return right | 2f168b068cc434fe9280e2cdf84ae3f0f93eb844 | 26,771 |
import argparse
import sys
import logging
import tempfile
import shutil
def top_level_cli(fragment, *pos_args, **kwargs):
""" Runs a default CLI that assists in building and running gateware.
If the user's options resulted in the board being programmed, this returns the fragment
that was programmed onto the board. Otherwise, it returns None.
"""
name = fragment.__name__ if callable(fragment) else fragment.__class__.__name__
parser = argparse.ArgumentParser(description=f"Gateware generation/upload script for '{name}' gateware.")
parser.add_argument('--output', '-o', metavar='filename', help="Build and output a bitstream to the given file.")
parser.add_argument('--erase', '-E', action='store_true',
help="Clears the relevant FPGA's flash before performing other options.")
parser.add_argument('--upload', '-U', action='store_true',
help="Uploads the relevant design to the target hardware. Default if no options are provided.")
parser.add_argument('--flash', '-F', action='store_true',
help="Flashes the relevant design to the target hardware's configuration flash.")
parser.add_argument('--dry-run', '-D', action='store_true',
help="When provided as the only option; builds the relevant bitstream without uploading or flashing it.")
parser.add_argument('--keep-files', action='store_true',
help="Keeps the local files in the default `build` folder.")
args = parser.parse_args()
platform = get_appropriate_platform()
# Set up our logging / output.
if sys.stdout.isatty():
log_format = LOG_FORMAT_COLOR
else:
log_format = LOG_FORMAT_PLAIN
logging.basicConfig(level=logging.INFO, format=log_format)
# If this isn't a fragment directly, interpret it as an object that will build one.
if callable(fragment):
fragment = fragment(*pos_args, **kwargs)
# If we have no other options set, build and upload the relevant file.
if (args.output is None and not args.flash and not args.erase and not args.dry_run):
args.upload = True
# Once the device is flashed, it will self-reconfigure, so we
# don't need an explicitly upload step; and it implicitly erases
# the flash, so we don't need an erase step.
if args.flash:
args.erase = False
args.upload = False
# Build the relevant gateware, uploading if requested.
build_dir = "build" if args.keep_files else tempfile.mkdtemp()
# Build the relevant files.
try:
if args.erase:
logging.info("Erasing flash...")
platform.toolchain_erase()
logging.info("Erase complete.")
join_text = "and uploading gateware to attached" if args.upload else "for"
logging.info(f"Building {join_text} {platform.name}...")
products = platform.build(fragment,
do_program=args.upload,
build_dir=build_dir
)
logging.info(f"{'Upload' if args.upload else 'Build'} complete.")
# If we're flashing the FPGA's flash, do so.
if args.flash:
logging.info("Programming flash...")
platform.toolchain_flash(products)
logging.info("Programming complete.")
# If we're outputting a file, write it.
if args.output:
bitstream = products.get("top.bit")
with open(args.output, "wb") as f:
f.write(bitstream)
# Return the fragment we're working with, for convenience.
if args.upload or args.flash:
return fragment
# Clean up any directories we've created.
finally:
if not args.keep_files:
shutil.rmtree(build_dir)
return None | 6163d73863521a432f74b980f69ae54057b7d32c | 26,772 |
def exploits_listing(request,option=None):
"""
Generate the Exploit listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
user = request.user
if user.has_access_to(ExploitACL.READ):
if option == "csv":
return generate_exploit_csv(request)
return generate_exploit_jtable(request, option)
else:
return render_to_response("error.html",
{'error': 'User does not have permission to view Exploit listing.'},
RequestContext(request)) | 941ab4e3da6273f17f4a180aebe62d35f7133080 | 26,773 |
from typing import Sequence
from typing import Tuple
def find_command(tokens: Sequence[str]) -> Tuple[Command, Sequence[str]]:
"""Looks up a command based on tokens and returns the command if it was
found or None if it wasn't.."""
if len(tokens) >= 3 and tokens[1] == '=':
var_name = tokens[0]
command_string = tokens[2:]
rvalue, args2 = find_command(command_string)
if not rvalue:
raise KeyError('could not find command: %s' %
' '.join(command_string))
return AssignCommand(var_name, rvalue), args2
elif tokens[0] in aliased_commands:
return aliased_commands[tokens[0]], tokens[1:]
else:
return None, tokens | 4b46b03f6dd0fb4a6cbcda855029d0a42958a49f | 26,774 |
def clean(df: pd.DataFrame, completelyInsideOtherBias: float = 0.7, filterCutoff: float = 0.65,
algo: str = "jaccard", readFromFile: bool = True, writeToFile: bool = True,
doBias: bool = True) -> pd.DataFrame:
"""Main function to completely clean a restaurant dataset.
Args:
df: a pandas DataFrame
completelyInsideOtherBias: float parameter for the bias function
filterCutoff: float parameter specifying at which distance value to cut off the distance list
algo: to use for text distance comparison, default = "jaccard"
readFromFile: if a cached text distance matrix should be read from a file, default = True
writeToFile: if the calculated text distance matrix should be written to a file, default = True
doBias: if the bias function should be applied, default = True
Returns:
a deduplicated pandas DataFrame
"""
global eqRing
df = preProcess(df)
distances = calcDistances(df.cname.unique(), completelyInsideOtherBias, algo, readFromFile, writeToFile, doBias)
filteredDistances = list(filter(lambda x: x[2] >= filterCutoff, distances))
eqRing = convertToEqualityRings(filteredDistances)
return dedupe(df, eqRing) | 6c7873958c61bab357abe1d099c57e681c265067 | 26,775 |
def create_list(value, sublist_nb, sublist_size):
"""
Create a list of len sublist_size, filled with sublist_nb sublists. Each sublist is filled with the value value
"""
out = []
tmp = []
for i in range(sublist_nb):
for j in range(sublist_size):
tmp.append(value)
out.append(tmp)
tmp = []
return out | 1ecf6c88390167584d1835430c359a7ed6d6b40b | 26,776 |
import os
def before_folder_option(location):
"""location: folder full path"""
if not os.path.exists(location):
rpc.create_folder(location) # 如果不存在,通过RPC创建文件夹
rpc.chmod(location, '777', recursive=True) # 通过RPC更改文件夹权限
return True | 65e110e10a98cee41fd0af314d9918f4d1daa334 | 26,777 |
from datetime import datetime
import re
def parse_time(date_str: str) -> datetime:
"""
Parses out a string-formatted date into a well-structured datetime in UTC.
Supports any of the following formats:
- hh:mm
In this format, we treat the value of the hh section to be 24hr format.
If a user types in 1:00, it will be interpreted as 1am, not 1pm.
- hh:mm(am|pm)
In this format, we treat the value of the hh section to be 12hr format,
and we rely on the am/pm flag to determine whether it is in the morning
or the afternoon.
"""
match = re.match(r"(\d?\d):(\d\d)(am|pm)?", date_str)
if match is None:
raise ValueError()
groups = match.groups()
hour = int(groups[0])
minute = int(groups[1])
if groups[2] == "pm" and hour < 12:
hour += 12
now = get_now()
time = datetime(
year=now.year,
month=now.month,
day=now.day,
hour=hour,
minute=minute,
second=0,
microsecond=0,
)
return time | 6009342d1e9c1c3f9b255758adf685e4fe7ca2f0 | 26,778 |
def gen_r_cr():
"""
Generate the R-Cr table.
"""
r_cr = [0] * 256
for i in range(256):
r_cr[i] = int(1.40199 * (i - 128))
return r_cr | 43e014bb62c40d038c5fbd124e834e98e9edb5e3 | 26,779 |
def fallible_to_exec_result_or_raise(
fallible_result: FallibleProcessResult, description: ProductDescription
) -> ProcessResult:
"""Converts a FallibleProcessResult to a ProcessResult or raises an error."""
if fallible_result.exit_code == 0:
return ProcessResult(
stdout=fallible_result.stdout,
stdout_digest=fallible_result.stdout_digest,
stderr=fallible_result.stderr,
stderr_digest=fallible_result.stderr_digest,
output_digest=fallible_result.output_digest,
)
raise ProcessExecutionFailure(
fallible_result.exit_code,
fallible_result.stdout,
fallible_result.stderr,
description.value,
) | 6b46a78897f0fcbd10e4a0c9b733f1834f638af0 | 26,780 |
def tf_pose_to_coords(tf_pose):
"""Convert TransformStamped to Coordinates
Parameters
----------
tf_pose : geometry_msgs.msg.Transform or geometry_msgs.msg.TransformStamped
transform pose.
Returns
-------
ret : skrobot.coordinates.Coordinates
converted coordinates.
"""
if isinstance(tf_pose, geometry_msgs.msg.Transform):
transform = tf_pose
elif isinstance(tf_pose, geometry_msgs.msg.TransformStamped):
transform = tf_pose.transform
else:
raise TypeError('{} not supported'.format(type(tf_pose)))
if transform.rotation.w == 0.0 and \
transform.rotation.x == 0.0 and \
transform.rotation.y == 0.0 and \
transform.rotation.z == 0.0:
transform.rotation.w = 1.0
return Coordinates(pos=[transform.translation.x,
transform.translation.y,
transform.translation.z],
rot=[transform.rotation.w, transform.rotation.x,
transform.rotation.y, transform.rotation.z]) | 3bfaf7d566e90c9ac0c0d8a34060497e2c0c0f78 | 26,781 |
def make_graph_indep_graphnet_functions(units,
node_or_core_input_size,
node_or_core_output_size = None,
edge_input_size = None,
edge_output_size = None,
global_input_size = None,
global_output_size = None,
aggregation_function = 'mean',
**kwargs):
"""
A wrapper that creates the functions that are needed for a graph-independent GN block.
Takes care of some flags that control a more general factory method for avoiding clutter.
Usage:
gn_core = GraphNet(**make_graph_indep_graphnet_functions(15, 20))
* If only "node_or_core_input_size" is defined, the rest of the inputs are assumed the same.
* If only "node_or_core_input_size" and "node_output_size" are defined, then all corresponding input and output sizes are
the same.
Parameters:
units: the width of the created MLPs
node_or_core_input_size : the input shape of the node MLP (or the input size of global and edge MLPs if no other input is defined).
node_or_core_output_size : the output shape of the node MLP (or the output sizes of global and edge MLPs if no other inputs are defined).
edge_input_size : [None] the edge state input size
edge_output_size : [None] the edge state output size
global_input_size : [None] ...
global_output_size : [None] ...
"""
if node_or_core_output_size is None:
node_or_core_output_size = node_or_core_input_size
if edge_input_size is None:
edge_input_size = node_or_core_input_size
if edge_output_size is None:
edge_output_size = node_or_core_output_size
if global_input_size is None:
global_input_size = node_or_core_input_size
if global_output_size is None:
global_output_size = node_or_core_output_size
if node_or_core_input_size is None:
raise ValueError("You should provide the GN size of the size of several of the involved states!")
# Just in case it is called from another wrapper that uses kwargs, check if the named inputs are repeated:
kwargs_forbidden = ['graph_indep', 'create_global_function', 'use_global_input', 'use_global_to_edge','use_global_to_node']
assert(np.all([k not in kwargs_forbidden for k in kwargs.keys()]))
return make_mlp_graphnet_functions(units,
node_or_core_input_size,
node_or_core_output_size,
edge_input_size = edge_input_size,
edge_output_size = edge_output_size,
global_output_size = global_output_size,
global_input_size = global_input_size,
use_global_input = True,
use_global_to_edge=False,
use_global_to_node=False,
create_global_function=True,
graph_indep=True,
aggregation_function = aggregation_function,
**kwargs) | 63694e7765896d0b369b65d4362edca29b6592d0 | 26,782 |
import typing
def is_generic(t):
"""
Checks if t is a subclass of typing.Generic. The implementation is done per
Python version, as the typing module has changed over time.
Args:
t (type):
Returns:
bool
"""
# Python 3.6, 3.5
if hasattr(typing, "GenericMeta"):
if isinstance(t, typing.GenericMeta):
return True
else:
# Python 3.7+
try:
if typing.Generic in t.mro():
return True
except AttributeError:
pass
return False | b085d7799ffe034b4bdeccea250d36f4ff372aea | 26,783 |
def getGpsTime(dt):
"""_getGpsTime returns gps time (seconds since midnight Sat/Sun) for a datetime
"""
total = 0
days = (dt.weekday()+ 1) % 7 # this makes Sunday = 0, Monday = 1, etc.
total += days*3600*24
total += dt.hour * 3600
total += dt.minute * 60
total += dt.second
return(total) | 16caa558741d8d65b4b058cf48a591ca09f82234 | 26,784 |
from re import T
def make_support_transforms():
"""
Transforms for support images during inference stage.
For transforms of support images during training, please visit dataset.py and dataset_fewshot.py
"""
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [512, 528, 544, 560, 576, 592, 608, 624, 640, 656, 672, 688, 704]
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomResize(scales, max_size=768),
normalize,
]) | 38d17b780fc8faf6a77074c53ef36733feb1f756 | 26,785 |
def single_label_normal_score(y_pred, y_gold):
"""
this function will computer the score by simple compare exact or not
Example 1:
y_pred=[1,2,3]
y_gold=[2,2,3]
score is 2/3
Example 2:
it can also compute the score same way but for each label
y_pred=[1,2,3,2,3]
y_gold=[2,2,3,1,3]
in this case we see that for label "1", it appears once in y_pred, and not in y_gold
thus accuracy for "1" is 0.
Similarity, label "2" appears twice in y_pred, and once it is in y_gold,
thus accuracy for "2" is 1/2
Same way, for "3" is 1
:param y_pred: a list of labels, must be same length as y_gold
:param y_gold: a list of labels, must be same length as y_pred
:return:
total_score: float of the total score calculated by example 1
label_wise_accuracy: a dictionary,where keys are labels, values are float score of the label
calculated by example 2
"""
assert len(y_pred) == len(y_gold), 'y_pred and y_gold need to have same length'
count = 0
label_wise_score = nltk.defaultdict(lambda: nltk.defaultdict(int))
for index, pred in enumerate(y_pred):
gold = y_gold[index]
if pred == gold:
count += 1
label_wise_score[pred]['total'] += 1
label_wise_score[pred]['correct'] += 1
else:
label_wise_score[pred]['total'] += 1
label_wise_accuracy = dict()
for label in label_wise_score.keys():
try:
rate = label_wise_score[label]['correct'] / label_wise_score[label]['total']
except:
rate = 0
label_wise_accuracy[label] = rate
total_score = count / len(y_gold)
return total_score, label_wise_accuracy | 3f906aca6cc5280b932c2dc0a73bfcedad63bd65 | 26,786 |
def get_tuning_curves(
spike_times: np.ndarray,
variable_values: np.ndarray,
bins: np.ndarray,
n_frames_sample=10000,
n_repeats: int = 10,
sample_frac: float = 0.4,
) -> dict:
"""
Get tuning curves of firing rate wrt variables.
Spike times and variable values are both in milliseconds
Returns a dictionary of n_repeats values at each bin in bins with the firing rate for a random sample of the data.
"""
# get max 1 spike per 1ms bin
spike_times = np.unique(spike_times.astype(int)) # in ms
# get which frames are in which bin
in_bin_indices = get_samples_in_bin(variable_values, bins)
# get tuning curves
tuning_curves = {(v + bins[1] - bins[0]): [] for v in bins[:-1]}
for i in range(n_repeats):
# sample n_frames_sample frames from each bin
sampled_frames = [
np.random.choice(v, size=n_frames_sample, replace=True)
if len(v) > n_frames_sample / 3
else []
for v in in_bin_indices.values()
]
# get firing rate for each bin
for i, b in enumerate(tuning_curves.keys()):
# get spiikes in bin's sampled frames
if sampled_frames:
spikes_in_bin = spike_times[
np.isin(spike_times, sampled_frames[i])
]
tuning_curves[b].append(
len(spikes_in_bin) / n_frames_sample * 1000
) # in Hz
else:
tuning_curves[b].append(np.nan)
return tuning_curves | ecdc4a71cc5a65dbb51dd69ef7a710c8ff596fec | 26,787 |
import functools
def _accelerate_update_fn(forward_and_backward_fn,
optimizer,
n_devices,
accelerate=True):
"""Accelerate the given forward_and_backward_fn function."""
if n_devices == 1:
def single_device_update_fn(
weights_and_slots, step, opt_params, batch, state, rng):
step = jnp.array(step, dtype=jnp.int32) # Needed in TFNP backend.
weights, slots = weights_and_slots
(loss, state), gradients = forward_and_backward_fn(
batch, weights, state, rng)
weights, slots, stats = optimizer.tree_update(
step, gradients, weights, slots, opt_params)
stats['loss'] = loss
return (weights, slots), state, stats
if accelerate:
# TODO(afrozm): Find out the status of buffer donation on GPUs, then do
# donate_argnums=(0,).
single_device_update_fn = fastmath.jit(single_device_update_fn)
return single_device_update_fn
# More than one device (core), i.e. all of TPU configurations etc.
assert n_devices > 1, f'{n_devices} should be greater than 1.'
@functools.partial(fastmath.pmap, axis_name='batch', donate_argnums=(0,))
def _multi_device_update_fn(
weights_and_slots, step, opt_params, batch, state, rng):
# We assume all tensors have the first dimension = n_devices.
weights, slots = weights_and_slots
(loss, state), gradients = forward_and_backward_fn(
batch, weights, state, rng)
# gradients now need to be summed over all the devices across different host
# machines, n_devices is only the number of devices on *this* host machine.
gradients = fastmath.psum(gradients, 'batch')
n_devices_total = fastmath.psum(jnp.array(1.0), 'batch')
# Average across hosts.
gradients = fastmath.nested_map(lambda g: g / n_devices_total, gradients)
weights, slots, stats = optimizer.tree_update(
step, gradients, weights, slots, opt_params)
stats['loss'] = loss
return (weights, slots), state, stats
def multi_device_update_fn(
weights_and_slots, step, opt_params, batch, state, rng):
# Need to replicate step to n_devices leading dimension.
return _multi_device_update_fn(weights_and_slots,
jnp.repeat(step, n_devices), opt_params,
batch, state, rng)
return multi_device_update_fn | 2fb96a6d17d5d28882a04914885eacb4f43fac15 | 26,788 |
def get_custom_feeds_ip_list(client: PrismaCloudComputeClient) -> CommandResults:
"""
Get all the BlackListed IP addresses in the system.
Implement the command 'prisma-cloud-compute-custom-feeds-ip-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
Returns:
CommandResults: command-results object.
"""
if feeds := client.get_custom_ip_feeds():
if "modified" in feeds:
feeds["modified"] = parse_date_string_format(date_string=feeds.get("modified", ""))
if "_id" in feeds:
feeds.pop("_id")
table = tableToMarkdown(
name="IP Feeds",
t=feeds,
headers=["modified", "feed"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
else:
table = "No results found."
return CommandResults(
outputs_prefix="PrismaCloudCompute.CustomFeedIP",
outputs=feeds,
readable_output=table,
outputs_key_field="digest",
raw_response=feeds
) | bcaf44dcefe0fda10943b29cae5e9ba72e561e27 | 26,789 |
from typing import Optional
import sys
import os
def get_dist_egg_link(dist: Distribution) -> Optional[str]:
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
egg_link
return None | 07c49687c7b627e9d511dff9787897b1a562ef22 | 26,790 |
import logging
def setup_new_file_handler(logger_name, log_level, log_filename, formatter, filter=None):
"""
Sets up new file handler for given logger
:param logger_name: name of logger to which filelogger is added
:param log_level: logging level
:param log_filename: path to log file
:param formatter: formatter for file logger
:param filter: filter for file logger
:return: logging.FileHandler object
"""
global write_mode
global _kind
global _interval
global _backup_count
global _compress_after_rotation
global _compress_command
global _compressed_file_extension
logger = logging.getLogger(logger_name)
if _kind is None:
cfh = logging.FileHandler(log_filename, write_mode)
elif _kind == 'time':
if _compress_after_rotation:
cfh = CompressedTimedRotatingFileHandler(compress_command=_compress_command,
compressed_file_extension=_compressed_file_extension, filename=log_filename,
when='S', interval=_interval, backupCount=_backup_count)
else:
cfh = TimedRotatingFileHandler(filename=log_filename, when='S', interval=_interval,
backupCount=_backup_count)
else:
if _compress_after_rotation:
cfh = CompressedRotatingFileHandler(compress_command=_compress_command,
compressed_file_extension=_compressed_file_extension, filename=log_filename,
mode=write_mode, backupCount=_backup_count,
maxBytes=_interval)
else:
cfh = RotatingFileHandler(filename=log_filename, mode=write_mode, backupCount=_backup_count,
maxBytes=_interval)
cfh.setLevel(log_level)
cfh.setFormatter(formatter)
if filter:
cfh.addFilter(filter)
logger.addHandler(cfh)
return cfh | 1ffd48250d17232eea94f13dd52628993ea04c2e | 26,791 |
import hashlib
def _gen_version(fields):
"""Looks at BotGroupConfig fields and derives a digest that summarizes them.
This digest is going to be sent to the bot in /handshake, and bot would
include it in its state (and thus send it with each /poll). If server detects
that the bot is using older version of the config, it would ask the bot
to restart.
Args:
fields: dict with BotGroupConfig fields (without 'version').
Returns:
A string that going to be used as 'version' field of BotGroupConfig tuple.
"""
# Just hash JSON representation (with sorted keys). Assumes it is stable
# enough. Add a prefix and trim a bit, to clarify that is it not git hash or
# anything like that, but just a dumb hash of the actual config.
fields = fields.copy()
fields['auth'] = [a._asdict() for a in fields['auth']]
digest = hashlib.sha256(utils.encode_to_json(fields)).hexdigest()
return 'hash:' + digest[:14] | 0052c655ca355182d0e962e37ae046f63d1a5066 | 26,792 |
import os
def server_factory(device, count):
""" Generate a virtusb server """
controller = VirtualController()
controller.devices = [device() for _ in range(count)]
server = UsbIpServer(controller)
if os.getuid() == 0:
#pylint: disable=line-too-long
LOGGER.warning('Super user permissions required for usbip. sudo will be used to escalate permissions when needed. You may be prompted for a password depending on your system configuration.')
return server | 80c58906077e07362275bb6db0eeb27431572c4c | 26,793 |
import logging
def get_callee_account(global_state, callee_address, dynamic_loader):
"""
Gets the callees account from the global_state
:param global_state: state to look in
:param callee_address: address of the callee
:param dynamic_loader: dynamic loader to use
:return: Account belonging to callee
"""
environment = global_state.environment
accounts = global_state.accounts
try:
return global_state.accounts[callee_address]
except KeyError:
# We have a valid call address, but contract is not in the modules list
logging.info("Module with address " + callee_address + " not loaded.")
if dynamic_loader is None:
raise ValueError()
logging.info("Attempting to load dependency")
try:
code = dynamic_loader.dynld(environment.active_account.address, callee_address)
except Exception as e:
logging.info("Unable to execute dynamic loader.")
raise ValueError()
if code is None:
logging.info("No code returned, not a contract account?")
raise ValueError()
logging.info("Dependency loaded: " + callee_address)
callee_account = Account(callee_address, code, callee_address, dynamic_loader=dynamic_loader)
accounts[callee_address] = callee_account
return callee_account | 28a95b2155b1f72a2683ac7c7029d1d5739305f3 | 26,794 |
from typing import Iterable
def get_products_with_summaries() -> Iterable[ProductWithSummary]:
"""
The list of products that we have generated reports for.
"""
index_products = {p.name: p for p in STORE.all_dataset_types()}
products = [
(index_products[product_name], get_product_summary(product_name))
for product_name in STORE.list_complete_products()
]
if not products:
raise RuntimeError(
"No product reports. "
"Run `python -m cubedash.generate --all` to generate some."
)
return products | 0d9e23fecfebd66ba251bc62b750e2d43b20c7fa | 26,795 |
import json
import logging
from datetime import datetime
import asyncio
async def events_handler(value: str) -> None:
"""Consume scan complete events and mark the execution status as FINISHED."""
try:
event = json.loads(value)
if event["eventId"] != EventId.SCAN_COMPLETE:
return None
network_name = event["topologyName"]
event_details = json.loads(event["details"])
token = event_details["token"]
except json.JSONDecodeError:
logging.exception("Failed to deserialize event data")
return None
except KeyError:
logging.exception("Invalid event received from Kafka")
return None
logging.debug(f"Got scan complete event for {network_name}, token {token}")
execution_info = Scheduler.get_execution(token, network_name)
if execution_info is None:
return None
execution_id, execution = execution_info
execution.token_range.discard(token)
if execution.token_range:
return None
await Scheduler.update_execution_status(
execution_id, ScanTestStatus.FINISHED, datetime.utcnow()
)
await Alerts.post(
execution_id,
f"Scan test for execution id {execution_id} is now complete.",
Severity.INFO,
)
await asyncio.sleep(Scheduler.CLEAN_UP_DELAY_S)
del Scheduler.executions[execution_id] | e0d7079d83e102f95dbad7549c13202dcc35b17a | 26,796 |
def _round_to_4(v):
"""Rounds up for aligning to the 4-byte word boundary."""
return (v + 3) & ~3 | c79736b4fe9e6e447b59d9ab033181317e0b80de | 26,797 |
import os
import sys
def cli(dry_run, force, find_links, index_url, extra_index_url, no_index, quiet, user_only, src_files):
"""Synchronize virtual environment with requirements.txt."""
if not src_files:
if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
src_files = (DEFAULT_REQUIREMENTS_FILE,)
else:
msg = 'No requirement files given and no {} found in the current directory'
log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))
sys.exit(2)
if any(src_file.endswith('.in') for src_file in src_files):
msg = ('Some input files have the .in extension, which is most likely an error and can '
'cause weird behaviour. You probably meant to use the corresponding *.txt file?')
if force:
log.warning('WARNING: ' + msg)
else:
log.error('ERROR: ' + msg)
sys.exit(2)
(pip_options, repository) = get_pip_options_and_pypi_repository(
index_url=index_url, extra_index_url=extra_index_url,
no_index=no_index, find_links=find_links)
def parse_req_file(filename):
return parse_requirements(
filename, session=True, finder=repository.finder)
requirements = flat_map(parse_req_file, src_files)
try:
requirements = sync.merge(requirements, ignore_conflicts=force)
except PrequError as e:
log.error(str(e))
sys.exit(2)
installed_dists = get_installed_distributions(skip=[], user_only=user_only)
to_install, to_uninstall = sync.diff(requirements, installed_dists)
install_flags = []
for link in repository.finder.find_links or []:
install_flags.extend(['-f', link])
if not repository.finder.index_urls:
install_flags.append('--no-index')
for (i, index_url) in enumerate(repository.finder.index_urls):
if i == 0:
install_flags.extend(['-i', index_url])
else:
install_flags.extend(['--extra-index-url', index_url])
if user_only:
install_flags.append('--user')
sys.exit(sync.sync(to_install, to_uninstall, verbose=(not quiet), dry_run=dry_run,
install_flags=install_flags)) | 33a3253c68dfa315e430684accd9366ba51fff64 | 26,798 |
def bool_like(value, name, optional=False, strict=False):
"""
Convert to bool or raise if not bool_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow bool. If False, allow types that support
casting to bool.
Returns
-------
converted : bool
value converted to a bool
"""
if optional and value is None:
return value
extra_text = ' or None' if optional else ''
if strict:
if isinstance(value, bool):
return value
else:
raise TypeError('{0} must be a bool{1}'.format(name, extra_text))
if hasattr(value, 'squeeze') and callable(value.squeeze):
value = value.squeeze()
try:
return bool(value)
except Exception:
raise TypeError('{0} must be a bool (or bool-compatible)'
'{1}'.format(name, extra_text)) | 42d16ae228140a0be719fbd238bdc25dafc1cb64 | 26,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.