content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def d2tf(ndp, days):
"""
Wrapper for ERFA function ``eraD2tf``.
Parameters
----------
ndp : int array
days : double array
Returns
-------
sign : char array
ihmsf : int array
Notes
-----
The ERFA documentation is below.
- - - - - - - -
e r a D 2 t f
- - - - - - - -
Decompose days to hours, minutes, seconds, fraction.
Given:
ndp int resolution (Note 1)
days double interval in days
Returned:
sign char '+' or '-'
ihmsf int[4] hours, minutes, seconds, fraction
Notes:
1) The argument ndp is interpreted as follows:
ndp resolution
: ...0000 00 00
-7 1000 00 00
-6 100 00 00
-5 10 00 00
-4 1 00 00
-3 0 10 00
-2 0 01 00
-1 0 00 10
0 0 00 01
1 0 00 00.1
2 0 00 00.01
3 0 00 00.001
: 0 00 00.000...
2) The largest positive useful value for ndp is determined by the
size of days, the format of double on the target platform, and
the risk of overflowing ihmsf[3]. On a typical platform, for
days up to 1.0, the available floating-point precision might
correspond to ndp=12. However, the practical limit is typically
ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is
only 16 bits.
3) The absolute value of days may exceed 1.0. In cases where it
does not, it is up to the caller to test for and handle the
case where days is very nearly 1.0 and rounds up to 24 hours,
by testing for ihmsf[0]=24 and setting ihmsf[0-3] to zero.
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
sign, ihmsf = ufunc.d2tf(ndp, days)
sign = sign.view(dt_bytes1)
return sign, ihmsf | 5577e48bf50304cbda71947e673e06ab09d18ea7 | 3,629,000 |
def libxl_init_members(ty, nesting = 0):
"""Returns a list of members of ty which require a separate init"""
if isinstance(ty, idl.Aggregate):
return [f for f in ty.fields if not f.const and isinstance(f.type,idl.KeyedUnion)]
else:
return [] | fae1eb0e3962ee59df83209ba605473f893cb2ea | 3,629,001 |
import os
from sys import path
import imp
def find_commands(cache=True):
"""
Scans the script directory and extracts all commands.
"""
global _command_cache
if _command_cache is not None and cache:
return _command_cache
files = os.walk(command_dir)
commands = {}
for (dirpath, dirnames, filenames) in files:
for filename in filenames:
fullpath = path.join(dirpath, filename)
if path.isfile(fullpath) and filename.endswith('py'):
# Attempt to load the script and extract any QQCommands
mod_name, ext = path.splitext(filename)
try:
mod = imp.load_source(mod_name, fullpath)
for name in dir(mod):
obj = getattr(mod, name)
cmd_name = getattr(obj, 'name', None)
if cmd_name is not None and QQCommand in getattr(obj, '__mro__', []):
obj.fullpath = fullpath
commands[cmd_name] = obj
except Exception as e:
print('Warning: failed attemping to load script "{}"'.format(filename))
print('Extended failure information:')
print(e)
_command_cache = commands
return commands | 046daf38a6b61efb67c82decf851bb5769ffa8be | 3,629,002 |
from datetime import datetime
import os
import fnmatch
def snr_and_sounding(radar, soundings_dir=None, refl_field_name='DBZ'):
"""
Compute the signal-to-noise ratio as well as interpolating the radiosounding
temperature on to the radar grid. The function looks for the radiosoundings
that happened at the closest time from the radar. There is no time
difference limit.
Parameters:
===========
radar:
soundings_dir: str
Path to the radiosoundings directory.
refl_field_name: str
Name of the reflectivity field.
Returns:
========
z_dict: dict
Altitude in m, interpolated at each radar gates.
temp_info_dict: dict
Temperature in Celsius, interpolated at each radar gates.
snr: dict
Signal to noise ratio.
"""
if soundings_dir is None:
soundings_dir = "/g/data2/rr5/vhl548/soudings_netcdf/"
# Getting radar date.
radar_start_date = netCDF4.num2date(radar.time['data'][0], radar.time['units'])
# Listing radiosounding files.
sonde_pattern = datetime.datetime.strftime(radar_start_date, 'YPDN_%Y%m%d*')
all_sonde_files = sorted(os.listdir(soundings_dir))
try:
# The radiosoundings for the exact date exists.
sonde_name = fnmatch.filter(all_sonde_files, sonde_pattern)[0]
except IndexError:
# The radiosoundings for the exact date does not exist, looking for the
# closest date.
dtime = [datetime.datetime.strptime(dt, 'YPDN_%Y%m%d_%H.nc') for dt in all_sonde_files]
closest_date = nearest(dtime, radar_start_date)
sonde_pattern = datetime.datetime.strftime(closest_date, 'YPDN_%Y%m%d*')
radar_start_date = closest_date
sonde_name = fnmatch.filter(all_sonde_files, sonde_pattern)[0]
interp_sonde = netCDF4.Dataset(os.path.join(soundings_dir, sonde_name))
temperatures = interp_sonde.variables['temp'][:]
times = interp_sonde.variables['time'][:]
heights = interp_sonde.variables['height'][:]
my_profile = pyart.retrieve.fetch_radar_time_profile(interp_sonde, radar)
z_dict, temp_dict = pyart.retrieve.map_profile_to_gates(temperatures, my_profile['height'], radar)
temp_info_dict = {'data': temp_dict['data'],
'long_name': 'Sounding temperature at gate',
'standard_name' : 'temperature',
'valid_min' : -100,
'valid_max' : 100,
'units' : 'degrees Celsius',
'comment': 'Radiosounding date: %s' % (radar_start_date.strftime("%Y/%m/%d"))}
snr = pyart.retrieve.calculate_snr_from_reflectivity(radar, refl_field=refl_field_name)
return z_dict, temp_info_dict, snr | 506f7d0c7a5be778915b1c5d22b5ad3064619400 | 3,629,003 |
from operator import concat
def make_weekly_data(con, ticker_id, begin_date, today):
"""
INPUTS:
con (mysql) - pymysql database connection
ticker_id (int) - Ticker id number from symbols table
begin_date (str) - Last price date in series. Iso8601 standard format
today (str) - Today's date in iso8601 standard format
OUTPUT:
Returns a pandas dataframe resampled to weekly
"""
sql = """SELECT * FROM daily_data WHERE symbol_id="{}"
AND price_date BETWEEN "{}" AND "{}";"""
df = read_db(sql.format(ticker_id, begin_date, today), con,
index_col="price_date", coerce_float=True,
parse_dates=["price_date", ])
df = df.drop("id", axis=1)
df.fillna(value=nan, inplace=True)
o = df.open.resample('1W-FRI').ohlc().open
h = df.high.resample('1W-FRI').max()
l = df.low.resample('1W-FRI').min()
c = df.close.resample('1W-FRI').ohlc().close
v = df.volume.resample('1W-FRI').sum()
ed = df.ex_dividend.resample('1W-FRI').sum()
sr = df.split_ratio.resample('1W-FRI').cumprod()
ao = df.adj_open.resample('1W-FRI').ohlc().open.rename("adj_open")
ah = df.adj_high.resample('1W-FRI').max()
al = df.adj_low.resample('1W-FRI').min()
ac = df.adj_close.resample('1W-FRI').ohlc().close.rename("adj_close")
av = df.adj_volume.resample('1W-FRI').sum()
data = [o, h, l, c, v, ed, sr, ao, ah, al, ac, av]
weekly = concat(data, axis=1)
return weekly | 8cb9f4baca363df8f081ccf8a6eae5dc90ecd5d3 | 3,629,004 |
import zmq
def kill_service(ctrl_addr):
"""kill the LLH service running at `ctrl_addr`"""
with zmq.Context.instance().socket(zmq.REQ) as sock:
sock.setsockopt(zmq.LINGER, 0)
sock.setsockopt(zmq.RCVTIMEO, 1000)
sock.connect(ctrl_addr)
sock.send_string("die")
return sock.recv_string() | 8ffdb8fda4e8ed6c9b82a70fa70adbe6fcdc7f29 | 3,629,005 |
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ] | 32f2e60bce921f525336fd8288c4736ee7677129 | 3,629,006 |
def has_attrs(inst, *args):
"""
checks if the instance has all attributes
as specified in *args and if they not falsy
:param inst: obj instance
:param args: attribute names of the object
"""
for a in args:
try:
if not getattr(inst, a, None)
return False
except AttributeError:
return False
return True | 297911bd61824cf171946afa26014ffcd0ee6be1 | 3,629,007 |
def get_related_offers(order):
"""
Search related offers to order from parameter
:param order: client order
:return: string with related offers to order from parameter or empty string
"""
related_offers = ""
if OrdersOffers.objects.filter(order=order).count() > 0:
order_offers = OrdersOffers.objects.filter(order=order)
for i in order_offers:
related_offers += "{}, ".format(str(i.offer.pk))
if related_offers != "":
related_offers = related_offers[:-2]
return related_offers | d55c3d58b4e88161fbeaa3226d4d9ee636b53de4 | 3,629,008 |
import torch
def undo_imagenet_preprocess(image):
""" Undo imagenet preprocessing
Input:
- image (pytorch tensor): image after imagenet preprocessing in CPU, shape = (3, 224, 224)
Output:
- undo_image (pytorch tensor): pixel values in [0, 1]
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view((3, 1, 1))
std = torch.Tensor([0.229, 0.224, 0.225]).view((3, 1, 1))
undo_image = image * std
undo_image += mean
return undo_image | 57d4cfc365c4e6c2dcfd37c8a2c500465daa421a | 3,629,009 |
def ob_mol_from_file(fname, ftype="xyz", add_hydrogen=True):
"""
Import a molecule from a file using OpenBabel
fname: the path string to the file to be opened
ftype: the file format
add_hydrogen: whether or not to insert hydrogens automatically
openbabel does not always add hydrogens automatically
Returns:
an openbabel OBMol molecule object
"""
#Set input/output format
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats(ftype, "mol2")
#Read the file
mol = openbabel.OBMol()
obConversion.ReadFile(mol, fname)
#Add hydrogens
if add_hydrogen is True:
mol.AddHydrogens()
return mol | 6dbbff4dc176637274af53799143e3bc862d403a | 3,629,010 |
def collapse(html):
"""Remove any indentation and newlines from the html."""
return ''.join([line.strip() for line in html.split('\n')]).strip() | a5a55691f2f51401dbd8b933562266cbed90c63d | 3,629,011 |
import torch
def multiclass_cross_entropy(phat, y, N_classes, weights, EPS = 1e-30) :
""" Per instance weighted cross entropy loss
(negative log-likelihood)
"""
y = F.one_hot(y, N_classes)
# Protection
loss = - y*torch.log(phat + EPS) * weights
loss = loss.sum() / y.shape[0]
return loss | 782e230c49314e8426a8a78cc78709929dadcf67 | 3,629,012 |
def get_arc_polygon(resolution,size=[1,1],arc=[0,1]):
"""resolution is the quantity of polygon points
horizontal and vertical size are requested
arc give the starting and ending angles"""
polygon=[]
for increment in range(resolution+1):
inc= arc[1]*(increment/resolution)
angl=(arc[0]+inc)*pi*2
polygon.append( (size[0]*sin(angl),size[1]*cos(angl)) )
return polygon | 7688bd90c093fb3db4a822af8e4ddd317d55cd62 | 3,629,013 |
def load(filename):
"""
Loads data line by line from a .wbp file, initiates a WellPlan object
and populates it with data.
Parameters
----------
filename: string
The location and filename of the .wbp file to load.
Returns
-------
A welleng.exchange.wbp.WellPlan object
"""
assert filename[-4:] == '.wbp', 'Wrong format'
with open(filename) as f:
wbp_data = [line.rstrip() for line in f]
total_lines = len(wbp_data)
line = 0
depth_unit = None
surface_unit = None
targets = []
well_plans = []
while True:
well_plans.append(
WellPlan(
wbp_data=wbp_data,
depth_unit=depth_unit,
surface_unit=surface_unit,
targets=[],
line=line
)
)
w = well_plans[-1]
line = w.lines
if line == total_lines:
return well_plans
else:
# keep passing these on to each well sequentially
depth_unit = w.depth_unit
surface_unit = w.surface_unit
targets = w.targets | d0606514ffe89e9e3f09b76c8becd65db94e0bd9 | 3,629,014 |
def get_input_fn(data_dir, is_training, num_epochs, batch_size, shuffle, normalize=True):
"""
This will return input_fn from which batches of data can be obtained.
Parameters
----------
data_dir: str
Path to where the mnist data resides
is_training: bool
Whether to read the training or the test portion of the data
num_epochs: int
Number of data epochs
batch_size: int
Batch size
shuffle: bool
Whether to shuffle the data or not
Returns
-------
input_fn: callable
The input function which returns a batch of images and labels
tensors, of shape (batch size, CHANNELS, HEIGTH, WIDTH) and
(batch size), respectively.
"""
gen = get_data_generator(data_dir, is_training, normalize)
ds = tf.data.Dataset.from_generator(
generator=gen,
output_types=(tf.float32, tf.int64),
output_shapes=(tf.TensorShape([HEIGHT, WIDTH, CHANNELS]),
tf.TensorShape([]))
)
if shuffle:
ds = ds.shuffle(buffer_size=2000, reshuffle_each_iteration=True)
ds = ds.repeat(count=num_epochs)
ds = ds.batch(batch_size=batch_size)
def input_fn():
ds_iter = ds.make_one_shot_iterator()
images, labels = ds_iter.get_next()
return images, labels
return input_fn | 4f41ff8939df638749efaf4a29106cc363a6737e | 3,629,015 |
from pyngrok import ngrok
from jupyter_dash import JupyterDash
from dash import Dash
def getDashApp(title:str, notebook:bool, usetunneling:bool, host:str, port:int, mode: str, theme, folder):
"""
Creates a dash or jupyter dash app, returns the app and a function to run it
:param title: Passed to dash app
:param notebook: Uses jupyter dash with default port of 1005 (instead of 8050)
:param usetunneling: Enables ngrok tunneling for kaggle notebooks
:param host: Passed to dash app.run()
:param port: Can be used to override default ports
:param mode: Could be 'inline', 'external', or 'jupyterlab'
:param theme: Passed to dash app external stylesheets
:param folder: Passed to assets folder to load theme css
"""
if port is None:
if notebook:
port = 1005
else:
port = 8050
if notebook:
if usetunneling:
try:
except ImportError:
raise ImportError("Pyngrok is required to run in a kaggle notebook. "
"Use pip install MLVisualizationTools[kaggle-notebook]")
ngrok.kill() #disconnects active tunnels
tunnel = ngrok.connect(port)
print("Running in an ngrok tunnel. This limits you to 40 requests per minute and one active app.",
"For full features use google colab instead.")
url = tunnel.public_url
else:
url = None
try:
except ImportError:
raise ImportError("JupyterDash is required to run in a notebook. "
"Use pip install MLVisualizationTools[dash-notebook]")
app = JupyterDash(__name__, title=title, server_url=url,
external_stylesheets=[theme], assets_folder=folder)
else:
app = Dash(__name__, title=title, external_stylesheets=[theme], assets_folder=folder)
def runApp():
if notebook:
app.run_server(host=host, port=port, mode=mode, debug=True)
else:
app.run_server(host=host, port=port, debug=True)
return app, runApp | dfc5c8196f9a86a6efaa79fdb938189b10f1b1aa | 3,629,016 |
def get_pagination_request_params():
"""
Pagination request params for a @doc decorator in API view.
"""
return {
"page": "Page",
"per_page": "Items per page",
} | 8ceb2f8ead3d9285017b595671f02817d098bc40 | 3,629,017 |
def access_bit(data, num):
""" from bytes array to bits by num position
"""
base = int(num // 8)
shift = 7 - int(num % 8)
return (data[base] & (1 << shift)) >> shift | fed874d0d7703c9e697da86c5a5832d20b46ebe5 | 3,629,018 |
def _any_isclose(left, right):
"""Short circuit any isclose for ndarray."""
return _any(np.isclose, left, right) | 8732c8db0b3e574a220c534ae7336acd89dbe753 | 3,629,019 |
def push(src, dest):
"""
Push object from host to target
:param src: string path to source object on host
:param dest: string destination path on target
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PUSH, src, dest]
return _exec_command(adb_full_cmd) | 926964ac7aa8b6c9e83c2049128bee138c5157ba | 3,629,020 |
def merge_set_if_true(set_1, set_2):
"""
Merges two sets if True
:return: New Set
"""
if set_1 and set_2:
return set_1.from_merge(set_1, set_2)
elif set_1 and not set_2:
return set_1
elif set_2 and not set_1:
return set_2
else:
return None | 833e6925ef2b3f70160238cdc32516be2482082d | 3,629,021 |
import pytz
def localtime(utc_dt, tz_str):
"""
Convert utc datetime to local timezone datetime
:param utc_dt: datetime, utc
:param tz_str: str, pytz e.g. 'US/Eastern'
:return: datetime, in timezone of tz
"""
tz = pytz.timezone(tz_str)
local_dt = tz.normalize(utc_dt.astimezone(tz))
return local_dt | f48844c72895813fdcd3913cfe7de0e6f6d0ac3c | 3,629,022 |
from typing import List
import torch
def _flatten_tensor_optim_state(
state_name: str,
pos_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
unflat_param_shapes: List[torch.Size],
flat_param: FlatParameter,
) -> torch.Tensor:
"""
Flattens the positive-dimension tensor optimizer state given by the values
``tensors`` for the state ``state_name`` for a single flattened parameter
``flat_param`` corresponding to the unflattened parameter names
``unflat_param_names`` and unflatted parameter shapes
``unflat_param_shapes``. This flattens each unflattened parameter's tensor
state into one tensor.
NOTE: We use zero tensors for any unflattened parameters without state
since some value is required to fill those entries. This assumes that the
zero tensor is mathematically equivalent to having no state, which is true
for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
optimizers.
Args:
state_name (str): Optimizer state name.
pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
optimizer state values for the unflattened parameters corresponding
to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
corresponding to the single flattened parameter.
flat_param (FlatParameter): The flattened parameter.
Returns:
torch.Tensor: A flattened tensor containing the optimizer state
corresponding to ``state_name`` constructed by concatenating the
unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
tensors for any unflattened parameters without the state).
"""
non_none_tensors = [t for t in pos_dim_tensors if t is not None]
# Check that all are tensors with the same dtype
dtypes = set(t.dtype for t in non_none_tensors)
if len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have positive-dimension tensor state with the "
f"same dtype but got dtypes {dtypes} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
dtype = next(iter(dtypes))
# Check that each tensor state matches its parameter's shape
for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
if tensor is None and len(shape) == 0:
raise ValueError(
"Flattening a zero-dimension parameter is not supported"
)
elif tensor is not None and tensor.shape != shape:
raise ValueError(
"Tensor optimizer state does not have same shape as its "
f"parameter: {tensor.shape} {shape}"
)
# Flatten the tensor states: we do not need to add any padding since the
# flattened optimizer state tensor sharded via `_get_shard()`, which pads
# the shard as needed (just like for the flattened parameter)
cpu_device = torch.device("cpu")
tensors = [
torch.flatten(state_value.to(cpu_device)) if state_value is not None
else torch.flatten(torch.zeros(
size=shape, dtype=dtype, device=cpu_device,
))
for state_value, shape
in zip(pos_dim_tensors, unflat_param_shapes)
]
flat_tensor = torch.cat(tensors)
flat_param_shape = flat_param._orig_size # type: ignore[attr-defined]
assert flat_tensor.shape == flat_param_shape, \
f"tensor optim state: {flat_tensor.shape} " \
f"flattened parameter: {flat_param_shape}"
return flat_tensor | 1b8ebbbe99cc5d0ce6f48ef9f321c8ea4fd0b7ee | 3,629,023 |
def variance ( func , xmin = None , xmax = None , err = False ) :
"""Get the variance for the distribution using
>>> fun = ...
>>> v = variance( fun , xmin = 10 , xmax = 50 )
"""
##
## get the functions from ostap.stats.moments
actor = lambda x1,x2 : Variance ( x1 , x2 , err )
## use it!
return sp_action ( func , actor , xmin , xmax ) | da2978abca2ecaa2754564d3bc7f5a82915211ac | 3,629,024 |
def get_mysql_entitySets(username, databaseName):
""" View all the enity sets in the databaseName """
password = get_password(username)
try:
cnx = connectSQLServerDB(username, password, username + "_" + databaseName)
mycursor = cnx.cursor()
sql = "USE " + username + "_" + databaseName + ";"
mycursor.execute(sql)
sql = "SHOW TABLES;"
mycursor.execute(sql)
tables = [table[0] for table in mycursor]
cnx.close()
return jsonify(success=1, entitySets=tables)
except mysql.connector.Error as err:
return jsonify(success=0, error_code=err.errno, message=err.msg) | 3eac962c936422258a2740e5ef429a72a440da92 | 3,629,025 |
def check_dbconnect_success(sess, system):
"""
測試資料庫是否成功連上(若連上且查詢成功代表資料庫存在)
Args:
sess: database connect session
system: 使用之系統名稱
Returns:
[0]: status(狀態,True/False)
[1]: err_msg(返回訊息)
"""
try:
if not sess.execute("select 1 as is_alive"): raise Exception
status = True
err_msg = "ok"
except Exception as e:
err_msg = e
status = False
finally:
sess.close()
return status,err_msg | 28a4bfc0ac1a71ba9d7d13f62cea1f4bb9cec385 | 3,629,026 |
def split(filename, size=10.):
"""split the figure into color bands"""
arr, aspect = _load_array(filename)
fig = Figure(figsize=(size, size*aspect))
cmaps = ['Reds_r', 'Greens_r', 'Blues_r', 'gray_r']
for i, band in enumerate(arr):
ax = fig.add_axes([(i % 2) * .5, (1 - i // 2) * .5, .5, .5])
ax.imshow(band, cmap=cmaps[i])
ax.axis('off')
return _show(fig) | 4e64ed73c2054aef1f729674f13c059010466c0a | 3,629,027 |
from sys import argv
def main():
""" Main body """
# validate the input
if len(argv) != 2:
print("usage: python gen_rand_sys.py <size of grid>")
return 1
else:
# get size from input
size = int(argv[1])
grid = gen_rand_sys(size)
print(grid) | c93361f08c6f7ea1bd0f57bb8c129fcf0f717cd9 | 3,629,028 |
def _compute_common_args(mapping):
"""Compute the list of arguments for dialog common options.
Compute a list of the command-line arguments to pass to dialog
from a keyword arguments dictionary for options listed as "common
options" in the manual page for dialog. These are the options
that are not tied to a particular widget.
This allows to specify these options in a pythonic way, such as:
d.checklist(<usual arguments for a checklist>,
title="...",
backtitle="...")
instead of having to pass them with strings like "--title foo" or
"--backtitle bar".
Notable exceptions: None
"""
args = []
for option, value in mapping.items():
args.extend(_common_args_syntax[option](value))
return args | 3e3dff995864d64452e8ef091ec949b281899455 | 3,629,029 |
def is_url(url):
"""URL書式チェック"""
return url.startswith("https://") or url.startswith("http://") | bc8f59d2e96e0a625317e86216b9b93077bbf8e2 | 3,629,030 |
import re
def _preclean(Q):
"""
Clean before annotation.
"""
Q = re.sub('#([0-9])', r'# \1', Q)
Q = Q.replace('€', ' €').replace('\'', ' ').replace(',', '').replace('?', '').replace('\"', '').replace('(s)', '').replace(' ', ' ').replace(u'\xa0', u' ')
return Q.lower() | 115828037b884108b9e3324337874c6b23dc066c | 3,629,031 |
import aiohttp
async def job_info(request):
"""Get job info."""
conn_manager = request.app[common.KEY_CONN_MANAGER]
conn_uid = request.match_info[ROUTE_VARIABLE_CONNECTION_UID]
job_uid = request.match_info[ROUTE_VARIABLE_JOB_UID]
connection = conn_manager.connection(conn_uid)
info = await connection.call_simple(
pagent.agent_service.AgentService.job_info.__name__, job_uid
)
return aiohttp.web.json_response(_extend_job_info(connection, info)) | 29ad0a6fb10d1ce0b982743443889d0771940d7d | 3,629,032 |
from typing import Dict
from typing import Callable
def load_dataset_map() -> Dict[str, Callable]:
"""
Get a map of datasets.
Returns:
Dict[str, Callable]: Key: Dataset name, Value: loader function which returns (X, y).
"""
dss = {
"iris-2d": load_iris_2d,
"wine-2d": load_wine_2d,
"diabetes": load_diabetes,
"audit": load_audit,
"banknotes": load_banknotes,
"ionosphere": load_ionosphere,
"sonar": load_sonar,
"wheat-2d": load_wheat_2d,
}
for n_feats in [8, 64, 128, 256, 512, 1024, 2048]:
dss["synth-" + str(n_feats) + "-easy"] = make_load_synth(
n_features=n_feats,
n_informative=n_feats,
n_redundant=0,
n_clusters_per_class=2,
class_sep=0.5,
)
dss["synth-" + str(n_feats) + "-hard"] = make_load_synth(
n_features=n_feats,
n_informative=int(n_feats / 4),
n_redundant=int(n_feats / 2),
n_clusters_per_class=2,
class_sep=0.01,
)
return dss | 9ecb35ba59ef1c15f0d0d609d269c2c8a54aed3b | 3,629,033 |
def m2fs_pixel_flat(flatfname, fiberconfig, Npixcut):
"""
Use the flat to find pixel variations.
DON'T USE THIS. It doesn't work.
"""
R, eR, header = read_fits_two(flatfname)
#shape = R.shape
#X, Y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing="ij")
tracefn = m2fs_load_trace_function(flatfname, fiberconfig)
## This is the output array
pixelflat = np.ones_like(R)
Npixuse = Npixcut-1
Nobj, Nord = fiberconfig[0], fiberconfig[1]
for iobj in range(Nobj):
for iord in range(Nord):
## Set up empty array to process and put back
Xmin, Xmax = fiberconfig[4][iord]
Xarr = np.arange(Xmin,Xmax+1)
Npix = len(Xarr)
medarr = np.zeros((Npix,2*Npixcut+1))
## Follow the trace and get relevant pixels in every location
y0 = tracefn(iobj, iord, Xarr)
ix_y = np.round(y0).astype(int)
ix_ymin = ix_y-Npixcut
ix_ymax = ix_y+Npixcut
for j in range(2*Npixcut+1):
dj = j - Npixcut
medarr[:,j] = R[Xarr,ix_y+dj]
## Rectify the pixels in just the y direction
#yloc = y0[:,np.newaxis] + np.arange(-Npixcut,Npixcut+1)[np.newaxis,:]
yoff = y0-ix_y
def mapping1(x):
return x[0], yoff[x[0]]+x[1]
def mapping2(x):
return x[0], -yoff[x[0]]+x[1]
rect_medarr = ndimage.geometric_transform(medarr, mapping1, mode='nearest')
# The typical value as a function of X
medX = np.median(rect_medarr,axis=1)
#medY = np.median(rect_medarr,axis=0)
#medR = medX[:,np.newaxis] * medY[np.newaxis,:]
#medR = medR * np.median(rect_medarr)/np.median(medR)
medR = ndimage.median_filter(rect_medarr, (9,1), mode='nearest')
np.save("medarr.npy",medarr)
np.save("rect_medarr.npy",rect_medarr)
np.save("rect_smooth.npy",medR)
rect_ratio = rect_medarr/medR
np.save("rect_ratio.npy",rect_ratio)
# Shift back
medR = ndimage.geometric_transform(medR, mapping2, mode='nearest')
np.save("smooth.npy",medR)
medR = medarr/medR
#medR = medR/np.median(medR)
for j in range(2*Npixuse+1):
dj = j - Npixuse
pixelflat[Xarr,ix_y+dj] = medR[:,j]
return pixelflat | 07a82fe106f38e9da70c02d1195e596606c9d835 | 3,629,034 |
def HTTP405(environ, start_response):
"""
HTTP 405 Response
"""
start_response('405 METHOD NOT ALLOWED', [('Content-Type', 'text/plain')])
return [''] | f07522ac904ec5ab1367ef42eb5afe8a2f0d1fce | 3,629,035 |
from operator import concat
import torch
import io
import time
def train_one_batch(batch, generator, optimizer, reward_obj, opt, global_step, tb_writer, lagrangian_params=None, cost_objs=[]):
#src, src_lens, src_mask, src_oov, oov_lists, src_str_list, trg_sent_2d_list, trg, trg_oov, trg_lens, trg_mask, _ = batch
"""
src: a LongTensor containing the word indices of source sentences, [batch, src_seq_len], with oov words replaced by unk idx
src_lens: a list containing the length of src sequences for each batch, with len=batch
src_mask: a FloatTensor, [batch, src_seq_len]
src_oov: a LongTensor containing the word indices of source sentences, [batch, src_seq_len], contains the index of oov words (used by copy)
oov_lists: a list of oov words for each src, 2dlist
"""
src = batch['src_tensor']
src_lens = batch['src_lens']
src_mask = batch['src_mask']
src_oov = batch['src_oov_tensor']
oov_lists = batch['oov_lists']
src_str_list = batch['src_list_tokenized']
trg_sent_2d_list = batch['trg_sent_2d_list']
src_sent_2d_list = batch['src_sent_2d_list']
trg = batch['trg_tensor']
trg_oov = batch['trg_oov_tensor']
trg_lens = batch['trg_lens']
trg_mask = batch['trg_mask']
control_variables = {}
if 1 in opt.control_modes:
control_variables['len_bins'] = batch['len_bins']
if 2 in opt.control_modes:
control_variables['exact_lens'] = batch['exact_lens']
if 3 in opt.control_modes or 4 in opt.control_modes or 5 in opt.control_modes or 6 in opt.control_modes:
control_variables['abs_bins'] = batch['abs_bins']
if 6 in opt.control_modes:
# tokenize the each src sentence list in the batch and put it in the control variable.
src_sent_2d_list_tokenized = [] # each item is a list of src sentences (tokenized) for an input sample
for src_sent_list in src_sent_2d_list:
src_sent_list = [src_sent.split(' ') for src_sent in src_sent_list]
src_sent_2d_list_tokenized.append(src_sent_list)
control_variables['src_word_2d_list_sent_tokenized'] = src_sent_2d_list_tokenized
#control_variables['src_word_2d_list'] = batch['src_list_tokenized']
#if 10 in opt.cost_types or 11 in opt.cost_types or 18 in opt.cost_types or 19 in opt.cost_types:
if 7 in opt.control_modes:
control_variables['reference_entities_list'] = batch['reference_entities_list']
position_ids = batch['position_ids']
position_ids = position_ids.to(opt.device)
control_variables['masked_questions_ids_2dlist'] = batch['masked_questions_ids_2dlist']
control_variables['answer_2dlist'] = batch['answer_2dlist']
#control_variables['answer_id_2dlist'] = batch['answer_id_2dlist']
#control_variables['multiple_choices_ids_2dlist'] = batch['multiple_choices_ids_2dlist']
else:
position_ids = None
control_variables['src_word_2d_list'] = batch['src_list_tokenized']
# move data to GPU if available
src = src.to(opt.device)
src_mask = src_mask.to(opt.device)
src_oov = src_oov.to(opt.device)
optimizer.zero_grad()
# debug
"""
for src_str in src_str_list:
print(src_str[:10])
print(src.detach().cpu().numpy()[:, :10])
print(batch['trg_lens'])
print(batch['len_bins'])
print(batch['abs_bins'])
exit()
"""
batch_size = src.size(0)
reward_type = opt.reward_type
sent_level_reward = opt.sent_level_reward
baseline = opt.baseline
regularization_type = opt.regularization_type
regularization_factor = opt.regularization_factor
if regularization_type == 2:
entropy_regularize = True
else:
entropy_regularize = False
trg_sent_2d_list_tokenized = [] # each item is a list of target sentences (tokenized) for an input sample
trg_str_list = [] # each item is the target output sequence (tokenized) for an input sample
for trg_sent_list in trg_sent_2d_list:
trg_sent_list = [trg_sent.strip().split(' ') for trg_sent in trg_sent_list]
trg_sent_2d_list_tokenized.append(trg_sent_list)
trg_str_list.append(list(concat(trg_sent_list)))
trg_sent_2d_list = trg_sent_2d_list_tokenized # each item is a list of target sentences (tokenized) for an input sample
# if use self critical as baseline, greedily decode a sequence from the model
if baseline == 'self':
# sample greedy prediction
generator.model.eval()
with torch.no_grad():
if isinstance(generator.model, Seq2SeqModelExactLenInput):
greedy_sample_list, _, _, greedy_eos_idx_mask, _, _ = generator.sample_with_exact_len_input(src, src_lens, src_oov, src_mask, oov_lists, batch['exact_lens'], greedy=True,
entropy_regularize=False)
else:
greedy_sample_list, _, _, greedy_eos_idx_mask, _, _ = generator.sample(src, src_lens, src_oov, src_mask,
oov_lists, greedy=True,
entropy_regularize=False,
position_ids=position_ids)
greedy_str_list = sample_list_to_str_list(greedy_sample_list, oov_lists, opt.idx2word, opt.vocab_size,
io.EOS,
io.UNK, opt.replace_unk,
src_str_list)
greedy_sent_2d_list = []
for greedy_str in greedy_str_list:
greedy_sent_list = nltk.tokenize.sent_tokenize(' '.join(greedy_str))
greedy_sent_list = [greedy_sent.strip().split(' ') for greedy_sent in greedy_sent_list]
greedy_sent_2d_list.append(greedy_sent_list)
# compute reward of greedily decoded sequence, tensor with size [batch_size]
baseline = compute_batch_reward(greedy_str_list, greedy_sent_2d_list, trg_str_list,
trg_sent_2d_list, batch_size, reward_obj,
regularization_factor=0.0, regularization_type=0, entropy=None, control_variables=control_variables)
generator.model.train()
if opt.ml_loss_coefficient > 0:
generator.model.train()
total_trg_tokens = sum(trg_lens)
trg = trg.to(opt.device)
trg_mask = trg_mask.to(opt.device)
trg_oov = trg_oov.to(opt.device)
ml_loss = compute_ml_loss(generator.model, src, src_lens, src_mask, src_oov, oov_lists, trg, trg_oov, trg_lens, trg_mask, generator.model.copy_attn, opt.control_modes, control_variables)
ml_loss_normalized = ml_loss.div(total_trg_tokens)
#ml_loss_normalized = ml_loss.div(batch_size)
# sample a sequence from the model
# sample_list is a list of dict, {"prediction": [], "scores": [], "attention": [], "done": True}, prediction is a list of 0 dim tensors
# log_selected_token_dist: size: [batch, output_seq_len]
# sample sequences for multiple times
sample_batch_size = batch_size * opt.n_sample
src = src.repeat(opt.n_sample, 1)
src_lens = src_lens * opt.n_sample
src_mask = src_mask.repeat(opt.n_sample, 1)
src_oov = src_oov.repeat(opt.n_sample, 1)
oov_lists = oov_lists * opt.n_sample
src_str_list = src_str_list * opt.n_sample
trg_sent_2d_list = trg_sent_2d_list * opt.n_sample
trg_str_list = trg_str_list * opt.n_sample
if opt.baseline != 'none': # repeat the greedy rewards
#baseline = np.tile(baseline, opt.n_sample)
baseline = baseline.repeat(opt.n_sample) # [sample_batch_size]
start_time = time.time()
if isinstance(generator.model, Seq2SeqModelExactLenInput):
repeated_exact_lens = batch['exact_lens'] * opt.n_sample
sample_list, log_selected_token_dist, output_mask, pred_eos_idx_mask, entropy, location_of_eos_for_each_batch = \
generator.sample_with_exact_len_input(src, src_lens, src_oov, src_mask, oov_lists, repeated_exact_lens, greedy=False, entropy_regularize=entropy_regularize)
else:
sample_list, log_selected_token_dist, output_mask, pred_eos_idx_mask, entropy, location_of_eos_for_each_batch = generator.sample(
src, src_lens, src_oov, src_mask, oov_lists, greedy=False, entropy_regularize=entropy_regularize, position_ids=position_ids)
pred_str_list = sample_list_to_str_list(sample_list, oov_lists, opt.idx2word, opt.vocab_size, io.EOS,
io.UNK, opt.replace_unk, src_str_list) # a list of word list, len(pred_word_2dlist)=sample_batch_size
sample_time = time_since(start_time)
max_pred_seq_len = log_selected_token_dist.size(1)
pred_sent_2d_list = [] # each item is a list of predicted sentences (tokenized) for an input sample, used to compute summary level Rouge-l
for pred_str in pred_str_list:
pred_sent_list = nltk.tokenize.sent_tokenize(' '.join(pred_str))
pred_sent_list = [pred_sent.strip().split(' ') for pred_sent in pred_sent_list]
pred_sent_2d_list.append(pred_sent_list)
if entropy_regularize:
entropy_array = entropy.data.cpu().numpy()
else:
entropy_array = None
# compute the reward
with torch.no_grad():
if sent_level_reward:
raise ValueError("Not implemented.")
else: # neither using reward shaping
# only receive reward at the end of whole sequence, tensor: [sample_batch_size]
cumulative_reward = compute_batch_reward(pred_str_list, pred_sent_2d_list, trg_str_list,
trg_sent_2d_list, sample_batch_size, reward_obj,
regularization_factor=regularization_factor,
regularization_type=regularization_type, entropy=entropy_array,
control_variables=control_variables)
# store the sum of cumulative reward (before baseline) for the experiment log
cumulative_reward_sum = cumulative_reward.detach().sum(0).item()
if opt.constrained_mdp:
lagrangian_model, optimizer_lagrangian = lagrangian_params
cumulative_cost = compute_batch_cost(pred_str_list, pred_sent_2d_list, trg_str_list, trg_sent_2d_list, sample_batch_size, cost_objs, control_variables) # [sample_batch_size, num_cost_types]
cumulative_cost_mean = cumulative_cost.mean(0) # [num_cost_types]
#cumulative_cost = torch.from_numpy(cumulative_cost_array).type(torch.FloatTensor).to(src.device)
# cumulative_cost: [sample_batch_size, len(cost_types)]
# subtract the regularization term: \lambda \dot C_t
constraint_regularization = lagrangian_model.compute_regularization(cumulative_cost) # [sample_batch_size]
cumulative_reward -= constraint_regularization
# Subtract the cumulative reward by a baseline if needed
if opt.baseline != 'none':
cumulative_reward = cumulative_reward - baseline # [sample_batch_size]
# q value estimation for each time step equals to the (baselined) cumulative reward
q_value_estimate = cumulative_reward.unsqueeze(1).repeat(1, max_pred_seq_len) # [sample_batch_size, max_pred_seq_len]
#q_value_estimate_array = np.tile(cumulative_reward.reshape([-1, 1]), [1, max_pred_seq_len]) # [batch, max_pred_seq_len]
#shapped_baselined_reward = torch.gather(shapped_baselined_phrase_reward, dim=1, index=pred_phrase_idx_mask)
# use the return as the estimation of q_value at each step
#q_value_estimate = torch.from_numpy(q_value_estimate_array).type(torch.FloatTensor).to(src.device)
q_value_estimate.requires_grad_(True)
q_estimate_compute_time = time_since(start_time)
# compute the policy gradient objective
pg_loss = compute_pg_loss(log_selected_token_dist, output_mask, q_value_estimate)
# back propagation to compute the gradient
if opt.loss_normalization == "samples": # use number of target tokens to normalize the loss
normalization = opt.n_sample
elif opt.loss_normalization == 'batches': # use batch_size to normalize the loss
normalization = sample_batch_size
else:
normalization = 1
pg_loss_normalized = pg_loss.div(normalization)
if opt.ml_loss_coefficient > 0:
if opt.pg_loss_coefficient > 0:
total_loss = opt.pg_loss_coefficient * pg_loss_normalized + opt.ml_loss_coefficient * ml_loss_normalized
else:
total_loss = (1 - opt.ml_loss_coefficient) * pg_loss_normalized + opt.ml_loss_coefficient * ml_loss_normalized
else:
ml_loss_normalized = torch.Tensor([0.0])
total_loss = pg_loss_normalized
start_time = time.time()
total_loss.backward()
#pg_loss.div(normalization).backward()
backward_time = time_since(start_time)
if opt.max_grad_norm > 0:
grad_norm_before_clipping = nn.utils.clip_grad_norm_(generator.model.parameters(), opt.max_grad_norm)
# take a step of gradient descent
optimizer.step()
#print(ml_loss_normalized.item())
#print(pg_loss_normalized.item())
# log each loss to tensorboard
if tb_writer is not None:
tb_writer.add_scalar('ml_loss', ml_loss_normalized.item(), global_step)
tb_writer.add_scalar('pg_loss', pg_loss_normalized.item(), global_step)
if opt.constrained_mdp:
lambda_tensor = lagrangian_model.get_lagrangian_multiplier()
for cost_i, cost_type in enumerate(opt.cost_types):
tb_writer.add_scalar('cost_{}'.format(cost_type), cumulative_cost_mean[cost_i].detach().item(), global_step)
tb_writer.add_scalar('lambda_{}'.format(cost_type), lambda_tensor[cost_i].item(), global_step)
stat = RewardStatistics(cumulative_reward_sum, pg_loss.item(), sample_batch_size, sample_time, q_estimate_compute_time, backward_time)
# (final_reward=0.0, pg_loss=0.0, n_batch=0, sample_time=0, q_estimate_compute_time=0, backward_time=0)
# reward=0.0, pg_loss=0.0, n_batch=0, sample_time=0, q_estimate_compute_time=0, backward_time=0
if opt.constrained_mdp:
lagrangian_loss, lagrangian_grad_norm, violate_amount = train_lagrangian_multiplier(lagrangian_model, cumulative_cost, optimizer_lagrangian, normalization, opt.max_grad_norm)
lagrangian_stat = LagrangianStatistics(lagrangian_loss=lagrangian_loss, n_batch=sample_batch_size, lagrangian_grad_norm=lagrangian_grad_norm, violate_amount=violate_amount)
stat = (stat, lagrangian_stat)
return stat, log_selected_token_dist.detach() | d8f1df07641584860022def119f8f49db3e6e5dc | 3,629,036 |
import sys
import os
def getProgramName():
"""Get the name of the currently running program."""
progName = sys.argv[0].strip()
if progName.startswith('./'):
progName = progName[2:]
if progName.endswith('.py'):
progName = progName[:-3]
# Only return the name of the program not the path
pName = os.path.split(progName)[-1]
if pName.endswith('.exe'):
pName = pName[:-4]
return pName | 486c454756dea87b7b422fb00e80e1346183d9d2 | 3,629,037 |
from datetime import datetime
from typing import Iterable
def get_all_trips(*, date_from: datetime, date_to: datetime, departure_station: BusStation, arrival_station: BusStation) \
-> Iterable[Trip]:
"""
[Step1] filter trips with departure time between `date_form` and `date_to`
[Step2] filter trips with trip route that contain the `departure_station` before the `arrival_station`
"""
trips = Trip.objects.filter(
departure_time__gte=date_from,
departure_time__lte=date_to,
trip_route__tripstop__station=departure_station,
).annotate(
# save 1st trip stop number as `start_station_number`
start_station_number=F('trip_route__tripstop__stop_number')
).filter(
trip_route__tripstop__station=arrival_station,
# compare 2nd trip stop number with `start_station_number`
trip_route__tripstop__stop_number__gt=F('start_station_number')
)
return trips | be58f578740bde35b45435d1f82a4d5e82b4dc6d | 3,629,038 |
from typing import Callable
from typing import Optional
from datetime import datetime
import httpx
import time
def get_coinbase_api_response(
get_api_url: Callable,
base_url: str,
timestamp_from: Optional[datetime] = None,
pagination_id: Optional[str] = None,
retry=30,
):
"""Get Coinbase API response."""
try:
url = get_api_url(
base_url, timestamp_from=timestamp_from, pagination_id=pagination_id
)
response = httpx.get(url)
if response.status_code == 200:
return response.json()
else:
response.raise_for_status()
except HTTPX_ERRORS:
if retry > 0:
time.sleep(1)
retry -= 1
return get_coinbase_api_response(
get_api_url,
base_url,
timestamp_from=timestamp_from,
pagination_id=pagination_id,
retry=retry,
)
raise | bc34ab3980f782403321d65214f0dad27e92e800 | 3,629,039 |
def intersect(A, B, C, D):
""" Finds the intersection of two lines represented by four points.
@parameter A: point #1, belongs to line #1
@parameter B: point #2, belongs to line #1
@parameter C: point #3, belongs to line #2
@parameter D: point #4, belongs to line #2
@returns: None if lines are parallel, tuple (x, y) with intersection point"""
if A is None or B is None or C is None or D is None:
return None
Pxn = (A[0] * B[1] - A[1] * B[0]) * (C[0] - D[0]) - (A[0] - B[0]) * (C[0] * D[1] - C[1] * D[0])
Pyn = (A[0] * B[1] - A[1] * B[0]) * (C[1] - D[1]) - (A[1] - B[1]) * (C[0] * D[1] - C[1] * D[0])
Pdenom = float((A[0] - B[0]) * (C[1] - D[1]) - (A[1] - B[1]) * (C[0] - D[0]))
np.seterr(all='raise')
try:
Px = Pxn / Pdenom
Py = Pyn / Pdenom
except FloatingPointError:
return None
except ZeroDivisionError:
return None
if np.isnan(Px) or np.isnan(Py):
return None
return (int(Px), int(Py)) | 54bb9fc4c826de00d144120edea55463699214ac | 3,629,040 |
def detect(inputs, anchors, n_classes, img_size, scope='detection'):
"""Detect layer
"""
with tf.name_scope(scope, 'detection',[inputs]):
n_anchors = len(anchors)
bbox_attrs = 5+n_classes
predictions = inputs
grid_size = predictions.get_shape().as_list()[1:3]
n_dims = grid_size[0] * grid_size[1]
predictions = tf.reshape(predictions, [-1, n_anchors * n_dims, bbox_attrs])
stride = (img_size[0] // grid_size[0], img_size[1] // grid_size[1])
anchors = [(a[0] / stride[0], a[1] / stride[1]) for a in anchors]
box_centers, box_sizes, confidence, classes = tf.split(predictions,
[2, 2, 1, n_classes],
axis=-1)
box_centers = tf.nn.sigmoid(box_centers)
confidence = tf.nn.sigmoid(confidence)
grid_y = tf.range(grid_size[0], dtype=tf.float32)
grid_x = tf.range(grid_size[1], dtype=tf.float32)
a, b = tf.meshgrid(grid_x, grid_y)
x_offset = tf.reshape(a, (-1, 1))
y_offset = tf.reshape(b, (-1, 1))
x_y_offset = tf.concat([x_offset, y_offset], axis=-1)
x_y_offset = tf.reshape(tf.tile(x_y_offset, [1, n_anchors]), [1, -1, 2])
box_centers = box_centers + x_y_offset
box_centers = box_centers * stride
anchors = tf.tile(anchors, [n_dims, 1])
box_sizes = tf.exp(box_sizes) * anchors
box_sizes = box_sizes * stride
detections = tf.concat([box_centers, box_sizes, confidence], axis=-1)
classes = tf.nn.sigmoid(classes)
predictions = tf.concat([detections, classes], axis=-1)
return predictions | 4751db9980137cf3f5acaee9a900653d5f31e90e | 3,629,041 |
def get_scenario_data():
"""Return sample scenario_data
"""
return [
{
'population_count': 100,
'county': 'oxford',
'season': 'cold_month',
'timestep': 2017
},
{
'population_count': 150,
'county': 'oxford',
'season': 'spring_month',
'timestep': 2017
},
{
'population_count': 200,
'county': 'oxford',
'season': 'hot_month',
'timestep': 2017
},
{
'population_count': 210,
'county': 'oxford',
'season': 'fall_month',
'timestep': 2017
},
] | b66ba716e6bd33e1a0ff80735acb64041663ed99 | 3,629,042 |
def user_not_found(error):
"""Custom error handler.
More info: http://flask.pocoo.org/docs/1.0/patterns/apierrors/#registering-an-error-handler
"""
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response | 3b63876308d616d4d206b3eaa4742a77490f4c50 | 3,629,043 |
from typing import Dict
from typing import Any
def get_do_pass_with_amendments_by_committee(
biennium: str, agency: str, committee_name: str
) -> Dict[str, Any]:
"""See: http://wslwebservices.leg.wa.gov/committeeactionservice.asmx?op=GetDoPassWithAmendmentsByCommittee"""
argdict: Dict[str, Any] = dict(biennium=biennium, agency=agency, committeeName=committee_name)
keydict: Dict[str, Any] = {
"billnumber": int,
"substituteversion": int,
"engrossedversion": int,
"active": lambda boolstr: (boolstr.lower() == "true"),
}
return waleg.call("CommitteeAction", "GetDoPassWithAmendmentsByCommittee", argdict, keydict) | f27622c2840b3375181f42e373ad96c569d2edc6 | 3,629,044 |
def petrosian_fd(x):
"""Petrosian fractal dimension.
Parameters
----------
x : list or np.array
One dimensional time series
Returns
-------
pfd : float
Petrosian fractal dimension
Notes
-----
The Petrosian algorithm can be used to provide a fast computation of
the FD of a signal by translating the series into a binary sequence.
The Petrosian fractal dimension of a time series :math:`x` is defined by:
.. math:: \\frac{log_{10}(N)}{log_{10}(N) +
log_{10}(\\frac{N}{N+0.4N_{\\Delta}})}
where :math:`N` is the length of the time series, and
:math:`N_{\\Delta}` is the number of sign changes in the binary sequence.
Original code from the pyrem package by Quentin Geissmann.
References
----------
.. [1] A. Petrosian, Kolmogorov complexity of finite sequences and
recognition of different preictal EEG patterns, in , Proceedings of the
Eighth IEEE Symposium on Computer-Based Medical Systems, 1995,
pp. 212-217.
.. [2] Goh, Cindy, et al. "Comparison of fractal dimension algorithms for
the computation of EEG biomarkers for dementia." 2nd International
Conference on Computational Intelligence in Medicine and Healthcare
(CIMED2005). 2005.
Examples
--------
Petrosian fractal dimension.
>>> import numpy as np
>>> from entropy import petrosian_fd
>>> np.random.seed(123)
>>> x = np.random.rand(100)
>>> print(petrosian_fd(x))
1.0505
"""
n = len(x)
# Number of sign changes in the first derivative of the signal
diff = np.ediff1d(x)
N_delta = (diff[1:-1] * diff[0:-2] < 0).sum()
return np.log10(n) / (np.log10(n) + np.log10(n / (n + 0.4 * N_delta))) | 775d0e27d305d0111d20a001282d369f78f7d48e | 3,629,045 |
def lemma(word):
"""
Transforms a given word to its lemmatized form, checking a lemma dictionary. If the word is
not included in the dictionary, the same word is returned.
:param word: string containing a single word
:type: string
:return: the word's lemma
:type: string
"""
return lemmas[word] if word in lemmas else word | 04f434d7a86ceeadc16a2d2a4eb329f7b814e61a | 3,629,046 |
def is_style_file(filename):
"""Return True if the filename looks like a style file."""
return STYLE_FILE_PATTERN.match(filename) is not None | 18a85b21d898b27e65d8debcda408507ba5ca5d9 | 3,629,047 |
def add_subnet():
"""add subnet.
Must fields: ['subnet']
Optional fields: ['name']
"""
data = _get_request_data()
return utils.make_json_response(
200,
network_api.add_subnet(user=current_user, **data)
) | 82db191d2b678cbf873ac80878138708a10371c7 | 3,629,048 |
def V_beta(gamma, pi):
""" Defining function for the expected utility for insured agent,
where we know the coverage ratio gamma and x is drawn from beta
distribution.
Args:
pi(float): insurance premium
gamma(float): coverage ratio
Returns:
Expected utility for agent.
"""
return u(y-(1-gamma)*x-pi, theta) | bf21816d5c80f34e8bc8c8883de14b739520a0da | 3,629,049 |
def counted(fn):
"""
count number of times a subroutine is called
:param fn:
:return:
"""
def wrapper(*args, **kwargs):
wrapper.called+= 1
return fn(*args, **kwargs)
wrapper.called= 0
wrapper.__name__= fn.__name__
return wrapper | 5c1ad20af39ed745718726045fa9f23938d7e479 | 3,629,050 |
def removeneg(im, key=0):
"""
remove NAN and INF in an image
"""
im2 = np.copy(im)
arr = im2 < 0
im2[arr] = key
return im2 | 572aa73d2f50f48b7940e476ba4cd885f93151d4 | 3,629,051 |
def mape(df, w):
"""Mean absolute percent error
Parameters
----------
df: Cross-validation results dataframe.
w: Aggregation window size.
Returns
-------
Dataframe with columns horizon and mape.
"""
ape = np.abs((df['y'] - df['yhat']) / df['y'])
if w < 0:
return pd.DataFrame({'horizon': df['horizon'], 'mape': ape})
return rolling_mean_by_h(
x=ape.values, h=df['horizon'].values, w=w, name='mape'
) | c42c1fd32d71f0f2a2c2224fc88e1f9ecc467c39 | 3,629,052 |
def p3p(pts_2d, pts_3d, K, q_ref=None, allow_imag_roots=False):
"""An implementation of the ... problem from "A Stable Algebraic Camera
Pose Estimation for Minimal Configurations of 2D/3D Point and Line Correspondences",
from Zhou et al. at ACCV 2018.
3 points
pts_2d - pixels in 2d. Each pixel is a row.
pts_3d - the corresponding points in 3D
K - the intrinsics matrix of the camera
q_ref - [w, x, y, z] an initial coarse estimate of the rotation to improve robustness
allow_imag_roots - allows imaginary roots (solutions) by only considering
their real part. Recommended to set to True under noise conditions.
Default: False
"""
# Extract point constraints
(C1, c2, c3), (N1, n2, n3) = _re3q3_point_constraints(pts_2d, pts_3d, K)
# Compose block matrices
C2 = np.vstack((c2[:2], c3[2]))
N2 = np.vstack((n2[:2], n3[2]))
N2_inv = np.linalg.inv(N2)
A = C1 - N1 @ N2_inv @ C2
# Invoke the modified re3q3
R, r = _re3q3(A=A, q_ref=q_ref, allow_imag_roots=allow_imag_roots)
# Getting the translation components back
t = -(N2_inv @ C2 @ r.T).T
# send everything back
return list(zip(R, t)) | a042a9adb9f4c8d705fe8fdc54c4e100c083db50 | 3,629,053 |
def status() -> tuple:
"""Health check endpoint."""
return xmlify('<status>ok</status>'), HTTP_200_OK | 0479c7f3a18b30680f8878c3f439ff9c8a18538b | 3,629,054 |
from typing import Dict
from typing import Any
from typing import Sequence
def nested_keys(nested_dict: Dict[Text, Any],
delimiter: Text = '/',
prefix: Text = '') -> Sequence[Text]:
"""Returns a flattend list of nested key strings of a nested dict.
Args:
nested_dict: Nested dictionary.
delimiter: String that splits the nested keys.
prefix: Top-level key used for recursion, usually leave blank.
Returns:
List of nested key strings.
"""
keys = []
for k, v in nested_dict.items():
key = k if not prefix else f'{prefix}{delimiter}{k}'
if not isinstance(v, dict):
keys.append(key)
else:
dict_keys = nested_keys(v, prefix=key)
keys += dict_keys
return keys | 7403321634986897e3ab9b974ba91dc81a2b0941 | 3,629,055 |
import os
def add_it(workbench, file_list, labels):
"""Add the given file_list to workbench as samples, also add them as nodes.
Args:
workbench: Instance of Workbench Client.
file_list: list of files.
labels: labels for the nodes.
Returns:
A list of md5s.
"""
md5s = []
for filename in file_list:
if filename != '.DS_Store':
with open(filename, 'rb') as pe_file:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')
workbench.add_node(md5, md5[:6], labels)
md5s.append(md5)
return md5s | 88e85b8bcc2fdbf6fdac64b9b4e84d82e7ea3185 | 3,629,056 |
def has_id(sxpr, id):
"""Test if an s-expression has a given id.
"""
return attribute(sxpr, 'id') == id | a7e7ce73c8c99af003dfff9954b351bb0e02cd41 | 3,629,057 |
def gf_gcdex(f, g, p, K):
"""Extended Euclidean Algorithm in `GF(p)[x]`.
Given polynomials `f` and `g` in `GF(p)[x]`, computes polynomials
`s`, `t` and `h`, such that `h = gcd(f, g)` and `s*f + t*g = h`. The
typical application of EEA is solving polynomial diophantine equations.
Consider polynomials `f = (x + 7) (x + 1)`, `g = (x + 7) (x**2 + 1)`
in `GF(11)[x]`. Application of Extended Euclidean Algorithm gives::
>>> from sympy.polys.galoistools import gf_gcdex, gf_mul, gf_add
>>> from sympy.polys.algebratools import ZZ
>>> s, t, g = gf_gcdex([1,8,7], [1,7,1,7], 11, ZZ)
>>> s, t, g
([5, 6], [6], [1, 7])
As result we obtained polynomials `s = 5*x + 6` and `t = 6`, and
additionally `gcd(f, g) = x + 7`. This is correct because::
>>> S = gf_mul(s, [1,8,7], 11, ZZ)
>>> T = gf_mul(t, [1,7,1,7], 11, ZZ)
>>> gf_add(S, T, 11, ZZ) == [1, 7]
True
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 46
"""
if not (f or g):
return [K.one], [], []
p0, r0 = gf_monic(f, p, K)
p1, r1 = gf_monic(g, p, K)
if not f:
return [], [K.invert(p1, p)], r1
if not g:
return [K.invert(p0, p)], [], r0
s0, s1 = [K.invert(p0, p)], []
t0, t1 = [], [K.invert(p1, p)]
while True:
Q, R = gf_div(r0, r1, p, K)
if not R:
break
(lc, r1), r0 = gf_monic(R, p, K), r1
inv = K.invert(lc, p)
s = gf_sub_mul(s0, s1, Q, p, K)
t = gf_sub_mul(t0, t1, Q, p, K)
s1, s0 = gf_mul_ground(s, inv, p, K), s1
t1, t0 = gf_mul_ground(t, inv, p, K), t1
return s1, t1, r1 | e40ceccf34173d4b6349ea2c77147e2a81ff09e4 | 3,629,058 |
from datetime import datetime
def _filetime_from_timestamp(timestamp):
""" See filetimes.py for details """
# Timezones are hard, sorry
moment = datetime.fromtimestamp(timestamp)
delta_from_utc = moment - datetime.utcfromtimestamp(timestamp)
return dt_to_filetime(moment, delta_from_utc) | 81122e093c78004392e3eee7819c52bc62d8d60b | 3,629,059 |
from typing import Callable
from typing import Optional
from typing import Union
from typing import Type
from typing import Sequence
from typing import Any
from typing import get_type_hints
def optimize(
func: Callable[[np.ndarray], float],
x: ArrayLike,
trials: int = 3,
iterations: Optional[int] = 1500,
abs_tol_x: float = 3e-2,
abs_tol_f: float = 3e-2,
rel_tol_x: float = 1e-1,
rel_tol_f: float = 1e-1,
check_decreasing: bool = True,
refinement_iterations: int = 300,
x_avg_rate: float = 1e-1,
f_avg_rate: float = 1e-1,
minimize: bool = True,
dtype: Union[Type[float], Type[int]] = float,
perturbation: float = 0.5,
pr_decay_rate: float = 0.01,
pr_decay_power: float = 0.16,
learning_rate: Optional[float] = None,
lr_decay_rate: float = 0.01,
lr_decay_power: float = 0.606,
momentum_rate: float = 0.99,
norm_momentum_rate: float = 0.999,
norm_order: Optional[float] = 2,
check_inputs: bool = True,
return_var: Sequence[str] = 'x',
) -> Any:
"""
Find a local optima of `func` using only function evaluations.
Requires detectable differences between func(x+px) and func(x-px) for small px
in order to determine if it should move towards x+px and x-px.
========
Features
========
The algorithm used is based on the
Simultaneous Perturbation Stochastic Approximation (SPSA).
SPSA is a derivative-free, global-local, stochastic algorithm that supports
integer optimization. It may also be used as a gradient approximation algorithm.
SPSA Features
-------------
Objective-Free
The objective function doesn't necessarily have to exist.
What we actually need is for every pair of function calls to
provide a change in output so that the algorithm knows which
direction to move in. This means applications can include objectives
such as chess AI, since we can measure the change based on the winner
of a game. See Parameters.Required.func for more details.
Derivative-Free
No gradient is required. Only function calls are used.
The function need not be differentiable either for this to be used.
For example, integer domains are possible (see below).
Integer Optimization
The domain of the problem may be integers instead of floats.
Even mixed combinations of integers and floats may be used,
see Parameters.ProblemSpecification.dtype below for more information.
Global-Local Search
The algorithm does not try to explore the entire search space.
Instead, it focuses on iteratively improving the current solution.
How far it looks to estimate the gradient depends on the perturbation
parameter. Initially, the algorithm looks further away, and over
time, it approaches a local search algorithm instead.
See Parameters.HyperParameters.perturbation below for more information.
Stochastic Optimization
The function being optimized may be stochastic (noisy).
If this is the case, then the expected (average) is optimized.
Additionally, decaying hyper-parameters improves stability,
and final results are averaged out to improve accuracy.
Random Walk and Significant Parameters
Parameters which matter the most experience accurate movement
towards their optimal values. Parameters which are largely
insignificant will mostly experience random movement, allowing
the algorithm to search for potential changes in these areas
without risking performance in parameters that matter.
High Dimensionality
Unlike other algorithms, such as Bayesian optimization, the
dimensionality of the problem is not an issue. The algorithm
may be used to optimize functions with hundreds of parameters.
In addition to SPSA: Nesterov's acceleration method, momentum, and
gradient normalization are used in combination with gradually
decaying hyper-parameters to ensure eventual convergence.
Other Features
--------------
Nesterov Acceleration
The algorithm looks ahead and estimates the gradient at
the next iteration instead of at the current iteration.
This improves reactiveness to changing gradients.
Momentum
Various parameters are estimated using an averaging approach.
This helps improve stability by preventing sudden changes.
In a stochastic setting, this also makes information less noisy.
Gradient Normalization
The learning rate does not need to be adjusted based on the
gradient's magnitude. Instead, the gradient is divided by a
momentum approximation of its norm. The nature of this
normalization is similar to that of adaptive methods, where
we use the square root of the average squared gradient.
Decaying Hyper Parameters
Various hyper-parameters will decay each iteration. This
provides theoretical convergence in the long run by allowing
more refined iterations towards the end.
Early Termination and Refinement
It is usually unknown when you should terminate beforehand,
even moreso in a stochastic setting where iterations may be noisy.
See Parameters.Termination for more information.
==========
Parameters
==========
Required
--------
func(np.ndarray[dtype]) -> float
The function being optimized. It must be able to take in numpy arrays and return floats.
The input is rounded if dtype=int.
The output is either minimized or maximized depending on the `minimize` arg.
It is guaranteed exactly two func evaluations occur every iteration.
In some contexts, such as two-player AIs, it may be required that both func evaluations occur together.
If this is the case, we suggest returning 0 for the first evaluation and then basing the second evaluation from that.
x : ArrayLike[dtype]
The initial estimate used. See `optimizer.random_array` to create random arrays.
Termination
-----------
trials : int = 3
The amount of times we restart the algorithm with a third of the learning rate and
perturbation size. Restarting has similar affects to learning rate schedules.
It may appear that when starting another trial, the objective rapidly decreases.
This phenomenon occurs due to dropping initial object function evaluations by reseting the average.
We recommend this as a counter measure to terminating too early without terminating too late.
iterations
: int = 1500, default
The maximum amount of iterations ran before refinement is triggered.
: None
The number of iterations ran is not considered.
abs_tol_x, abs_tol_f : float = 3e-2
rel_tol_x, rel_tol_f : float = 1e-1
The minimum default tolerance before refinement is triggered.
Tolerance is measured by comparing an averaged value against an even slower average.
In other words, the variable must remain the same a while for both averages to coincide.
Additionally, f_tol determines how fast the slow average func(x) must decrease if check_decreasing is set.
The x tolerance is rescaled by the learning rate.
Set to 0 to disable.
check_decreasing : bool = True
If set to True, then func(x) will be compared against the slower average func(x) using tol_f.
x_avg_rate : float = 1e-1
f_avg_rate : float = 1e-1
The rate used for the averages.
The smaller, the more averaged out it'll be, but the slower it can react to changes.
Every iteration it is gradually reduced to increase smoothness.
refinement_iterations : int = 300
The number of times refinement must be triggered for the trial to terminate.
Counts iterations, x tolerance, and f tolerance separately.
Additionally applies a third of the lr/pr decay power, where the refinement_i is used for the iterations.
learning_rate /= (1 + lr_decay_rate * refinement_i) ** (lr_decay_power / 3)
px /= (1 + pr_decay_rate * refinement_i) ** (pr_decay_power / 3)
Problem Specification
---------------------
minimize : bool = True
If True, func(x) is minimized. Otherwise func(x) is maximized.
dtype : type
= float, default
x is cast to floats and the perturbation may be used.
Hyper-parameter optimization is not usable.
= int
`x` is cast to integers and the perturbation is fixed to 0.5.
If the perturbation is not set to 0.5, a warning message is prompted.
**Hyper-parameter optimization is not usable.
Note: To get mixed integer and float values, round the specific values yourself and set:
dtype=float
perturbation=0.5
pr_decay_offset=0
Hyper Parameters
----------------
perturbation : float = 0.5
The amount that x is changed in order to estimate the gradient.
Changes to 0.5 if dtype=int is used for half-integer perturbations.
Using larger perturbations reduces the amount of noise in the gradient estimates,
but smaller perturbations increases the accuracy of the non-noise component of gradient estimates.
pr_decay_rate : float = 0.01
pr_decay_power : float = 0.16
Reduce the perturbation if dtype=float according to the formula:
perturbation / (1 + pr_decay_rate * i) ** pr_decay_power
where `i` is the current iteration.
This gradually improves the accuracy of the gradient estimation
but not so fast that the noise takes over.
Set either to 0 to disable.
Changes to 0 if dtype=int is used.
learning_rate : Optional[float] = (0.1 if dtype is int else 1e-3)
The amount that x is changed each iteration.
lr_decay_rate : float = 0.01
lr_decay_power : float = 0.16
Reduce the learning_rate according to the formula:
learning_rate / (1 + lr_decay_rate * i) ** lr_decay_power
where `i` is the current iteration.
This gradually improves the accuracy for stochastic functions
and ensures eventual convergence even if the learning_rate is initially too large
but doesn't decay so fast that it converges before the solution is found.
Set either to 0 to disable.
momentum_rate : float = 0.99
The amount of momentum retained each iteration.
Use 0 to disable.
norm_momentum_rate : float = 0.999
The amount of the magnitude of the momentum retained each iteration.
Use 0 to disable.
norm_order
: float = 2, default
The order used for the L^p norm when normalizing the momentum to compute step sizes.
: None
Don't normalize the momentum when computing step sizes.
Misc.
-----
check_inputs : bool = True
If True, the inputs are casted to their appropriate types and checked for appropriate conditions.
If False, only necessary setup is done.
return_var
: str = 'x', default
The name of the var returned, including below and any of the parameters.
i : the current iteration within the trial.
j : the current total iteration.
gradient : the gradient estimate.
x : rounded appropriately.
x_copy : a copy of x.
x_float : not rounded.
x_float_copy : not rounded and copied.
func(x) : last estimate of func(x). Note that x is perturbed, so this may be inaccurate.
avg_... : an average of the specified variable, including mainly x and func(x).
: Sequence[str]
Returns a list of vars e.g. ['x', 'func(x)'].
Returns
-------
The variable(s) specified by return_var.
"""
return last(iter_optimize(**{k: v for k, v in locals().items() if k in get_type_hints(iter_optimize)})) | ccef3d67669e0420e88b119d9c180fb35a0a98f8 | 3,629,060 |
import types
def _create_reconstruction(
n_cameras: int=0,
n_shots_cam=None,
n_pano_shots_cam=None,
n_points: int=0,
dist_to_shots: bool=False,
dist_to_pano_shots: bool=False,
):
"""Creates a reconstruction with n_cameras random cameras and
shots, where n_shots_cam is a dictionary, containing the
camera_id and the number of shots.
Example:
shot_cams = {"0": 50, "1": 30}
_create_reconstruction(2, shot_cams)
Will create a reconstruction with two cameras and 80 shots,
50 are associated with cam "0" and 30 with cam "1".
n_points_in_shots is the number of points to create.
If dist_to_shots, then observations are created and randomly
distributed to all shots. We pick with the repeat option, thus
if we have three shots the distribution could be
something like: [1,2,2], [0,1,2]. We avoid things like [3,3,3]
"""
if n_shots_cam is None:
n_shots_cam = {}
if n_pano_shots_cam is None:
n_pano_shots_cam = {}
rec = types.Reconstruction()
if n_cameras > 0:
for i in range(n_cameras):
focal, k1, k2 = np.random.rand(3)
cam = pygeometry.Camera.create_perspective(focal, k1, k2)
cam.id = str(i)
rec.add_camera(cam)
shot_id = 0
for cam_id, n_shots in n_shots_cam.items():
for _ in range(n_shots):
rec.create_shot(str(shot_id), cam_id)
shot_id += 1
shot_id = 0
for cam_id, n_shots in n_pano_shots_cam.items():
for _ in range(n_shots):
rec.create_pano_shot(str(shot_id), cam_id)
shot_id += 1
if n_points > 0:
for i in range(n_points):
rec.create_point(str(i), np.random.rand(3))
if dist_to_shots:
n_shots = len(rec.shots)
for pt in rec.points.values():
choice = set(np.random.choice(n_shots, n_shots))
if len(choice) > 1:
for ch in choice:
# create a new observation
obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, int(pt.id))
shot = rec.shots[str(ch)]
rec.add_observation(shot, pt, obs)
# TODO: If required, we have to do the same for pano shots
return rec | c381724cbaf7da1e368744ce874d4a9702b0033e | 3,629,061 |
from typing import Set
from typing import Tuple
from typing import cast
from typing import List
from typing import Dict
from typing import Any
def _validate_dialogue_section(
protocol_specification: ProtocolSpecification, performatives_set: Set[str]
) -> Tuple[bool, str]:
"""
Evaluate whether the dialogue section of a protocol specification is valid.
:param protocol_specification: a protocol specification.
:param performatives_set: set of all performatives in the dialogue.
:return: Boolean result, and associated message.
"""
if (
protocol_specification.dialogue_config != {}
and protocol_specification.dialogue_config is not None
):
# validate required fields exist
(
result_field_existence_validation,
msg_field_existence_validation,
) = _validate_field_existence(
cast(List[str], protocol_specification.dialogue_config),
)
if not result_field_existence_validation:
return result_field_existence_validation, msg_field_existence_validation
# Validate initiation
result_initiation_validation, msg_initiation_validation = _validate_initiation(
cast(List[str], protocol_specification.dialogue_config["initiation"]),
performatives_set,
)
if not result_initiation_validation:
return result_initiation_validation, msg_initiation_validation
# Validate reply
(
result_reply_validation,
msg_reply_validation,
terminal_performatives_from_reply,
) = _validate_reply(
cast(Dict[str, List[str]], protocol_specification.dialogue_config["reply"]),
performatives_set,
)
if not result_reply_validation:
return result_reply_validation, msg_reply_validation
# Validate termination
terminal_performatives_from_reply = cast(
Set[str], terminal_performatives_from_reply
)
(
result_termination_validation,
msg_termination_validation,
) = _validate_termination(
cast(List[str], protocol_specification.dialogue_config["termination"]),
performatives_set,
terminal_performatives_from_reply,
)
if not result_termination_validation:
return result_termination_validation, msg_termination_validation
# Validate roles
result_roles_validation, msg_roles_validation = _validate_roles(
cast(Dict[str, Any], protocol_specification.dialogue_config["roles"])
)
if not result_roles_validation:
return result_roles_validation, msg_roles_validation
# Validate end_state
result_end_states_validation, msg_end_states_validation = _validate_end_states(
cast(List[str], protocol_specification.dialogue_config["end_states"])
)
if not result_end_states_validation:
return result_end_states_validation, msg_end_states_validation
# Validate keep_terminal_state_dialogues
(
result_keep_terminal_validation,
msg_keep_terminal_validation,
) = _validate_keep_terminal(
cast(
bool,
protocol_specification.dialogue_config["keep_terminal_state_dialogues"],
)
)
if not result_keep_terminal_validation:
return result_keep_terminal_validation, msg_keep_terminal_validation
return True, "Dialogue section of the protocol specification is valid." | a9155544cae98723bb629d40ae24275c483c807a | 3,629,062 |
def _hsic_naive(x, y, scale=False, sigma_x=None, sigma_y=None,
kernel='gaussian', dof=0):
"""
Naive (slow) implementation of HSIC (Hilbert-Schmidt Independence
Criterion). This function is only used to assert correct results of the
faster method ``hsic``.
Parameters
----------
x : array-like, shape (n_samples,)
First vector to be compared.
y : array-like, shape (n_samples,)
Second vector to be compared.
scale : bool, optional (default=False)
If true, the result is scaled (from 0 to 1) as
.. math::
result = hsic(x, y) / (hsic(x, x) * hsic(y, y)) ** 0.5
sigma_x : float or None, optional (default=None)
Bandwidth for the kernel of the x variable.
By default, the median euclidean distance between points is used.
sigma_y : float or None, optional (default=None)
Bandwidth for the kernel of the y variable.
By default, the median euclidean distance between points is used.
kernel : str, optional (default='gaussian')
The type of kernel to be used. Choices are ``gaussian``, ``laplace``
and ``rational_quadratic``.
dof : int, optional (default=1)
Degree of freedom for scaling the non-normalized score. Default is 1.
Returns
-------
hsic_score : float
HSIC score
"""
kernel_fn = KERNEL_MAP[kernel]
m = x.shape[0]
h = np.eye(m) - 1 / m
k_x = kernel_fn(x, sigma=sigma_x) @ h
k_y = kernel_fn(y, sigma=sigma_y) @ h
trace_xy = np.sum(k_x.T * k_y)
if scale:
trace_xx = np.sum(k_x.T * k_x)
trace_yy = np.sum(k_y.T * k_y)
hsic_score = trace_xy / (trace_xx * trace_yy) ** 0.5
else:
hsic_score = trace_xy / (m - dof) ** 2
return hsic_score | 03ff899048ce740873cd32fc64ba4ce517e8cd5a | 3,629,063 |
from datetime import datetime
def translate(raw_path):
"""Reads official Rio de Janeiro BRT realized trips file
and converts to standarized realized trips.
TODO:
- get trip_id, maybe from GTFS?
- get departure_id and arrival_id, also from GTFS?
Parameters
----------
raw_path : str
where raw data is locates as .xlsx
"""
original_brt = pd.read_csv(
raw_path,
encoding="utf8",
sep=";",
na_values=["-"],
decimal=",",
parse_dates=["Data"],
infer_datetime_format=False,
date_parser=lambda x: datetime.strptime(x, "%d/%m/%Y"),
)
# filter columns
original_brt = original_brt[list(cols.keys())].rename(columns=cols)
# drop not executed
original_brt = original_brt.dropna(
subset=["vehicle_id", "departure_time", "arrival_time"]
)
# parse datetimes
original_brt["departure_datetime"] = original_brt.apply(
lambda x: pd.Timestamp(str(x["date"].date()) + " " + str(x["departure_time"])),
1,
)
original_brt["arrival_datetime"] = original_brt.apply(
lambda x: pd.Timestamp(str(x["date"].date()) + " " + str(x["arrival_time"])), 1
)
# map trajectory type
original_brt["trajectory_type"] = original_brt["trajectory_type"].replace(
trajectory_type_map
)
# creates missing columns
for c in realized_trips_cols:
if c not in original_brt.columns:
original_brt[c] = None
# vehicle id to str
# original_brt["vehicle_id"] = (
# original_brt["vehicle_id"].apply(lambda x: x.replace"*", "").astype(int).astype(str)
# )
return original_brt[realized_trips_cols] | 333daf9883a959b1fac53cc676405b7c629551e1 | 3,629,064 |
def get_percentage(numerator, denominator, precision = 2):
"""
Return a percentage value with the specified precision.
"""
return round(float(numerator) / float(denominator) * 100, precision) | 7104f6bf2d88f9081913ec3fbae596254cdcc878 | 3,629,065 |
def _calculate_verification_code(hash: bytes) -> int:
"""
Verification code is a 4-digit number used in mobile authentication and mobile signing linked with the hash value to be signed.
See https://github.com/SK-EID/MID#241-verification-code-calculation-algorithm
"""
return ((0xFC & hash[0]) << 5) | (hash[-1] & 0x7F) | 173f9653f9914672160fb263a04fff7130ddf687 | 3,629,066 |
def add_message(user, text, can_dismiss=True):
"""Add a message to the user's message queue for a variety of purposes.
:param user: the instance of `KlaxerUser` to add a message to
:param text: the text of the message
:param can_dismiss: (optional) whether or not the message can be dismissed
:returns: an instance of the `KlaxerMessage` that was created
:rtype: `klaxer.users.KlaxerMessage`
"""
message = KlaxerMessage(text=text, user=user, can_dismiss=can_dismiss)
session.add(message)
session.commit()
return message | f0deff8ed230716b88ed9876a06509f04c1cbfbf | 3,629,067 |
def dataQC(json_data):
""" perform quality analysis on data """
bad_data = {}
for device in json_data.keys():
for item in json_data[device]:
if item[1] <= check_lower * abs_std[0+omit_lower]:
if device not in bad_data:
bad_data[device] = []
bad_data[device].append(str(item[0]))
return bad_data | 388465f3388f871f7a1bd397c964bff63681fac4 | 3,629,068 |
from typing import Union
from typing import Tuple
from typing import List
from typing import Optional
def get_interatomic_r(atoms: Union[Tuple[str], List[str]],
expand: Optional[float] = None) -> float:
"""
Calculates bond length between two elements
Args:
atoms (list or tuple):
pair of atoms for which the bond length needs to be calculated
expand (float):
coefficient determining the maximum allowable expansion of
atomic bonds when constructing a graph. For example, the two
C atoms separated by 1.7 ang will be connected only if the
expansion coefficient is 1.2 or larger. The default value is 1.2
Returns:
Interatomic bond length
"""
atom1, atom2 = element(atoms)
r12 = (atom1.covalent_radius + atom2.covalent_radius) / 100
if expand:
r12 = expand * r12
return r12 | befa7950d0cd52ba26576c9c5c12589f71604577 | 3,629,069 |
def annualized_return_nb(returns, ann_factor):
"""2-dim version of `annualized_return_1d_nb`."""
result = np.empty(returns.shape[1], dtype=np.float_)
for col in range(returns.shape[1]):
result[col] = annualized_return_1d_nb(returns[:, col], ann_factor)
return result | 8e7c3ae47a81b7a700b5714544aabd0d9105f88c | 3,629,070 |
import resource
def canon_ref(did: str, ref: str, delimiter: str = None, did_type: str = None):
"""
Given a reference in a DID document, return it in its canonical form of a URI.
Args:
did: DID acting as the identifier of the DID document
ref: reference to canonicalize, either a DID or a fragment pointing to a
location in the DID doc
delimiter: delimiter character marking fragment (default '#') or
introducing identifier (';') against DID resource
"""
if not ok_did(did):
raise ValueError(
"Bad DID {} cannot act as DID document identifier".format(did))
if ok_did(ref): # e.g., LjgpST2rjsoxYegQDRm7EL
return "did:mydata:{}".format(did) if not did_type else "did:mydata:{}:{}".format(did_type, did)
if ok_did(resource(ref, delimiter)): # e.g., LjgpST2rjsoxYegQDRm7EL#keys-1
return "did:mydata:{}".format(ref) if not did_type else "did:mydata:{}:{}".format(did_type, did)
if ref.startswith(
"did:mydata:"
): # e.g., did:mydata:LjgpST2rjsoxYegQDRm7EL, did:mydata:LjgpST2rjsoxYegQDRm7EL#3
mydata_did_pattern_match = MYDATA_DID_PATTERN.match(
resource(ref, delimiter))
if mydata_did_pattern_match:
prefix_end = 13 if mydata_did_pattern_match.group(
'did_type') else 11
rv = ref[prefix_end:]
if ok_did(resource(rv, delimiter)):
return ref
raise ValueError(
"Bad URI {} does not correspond to a MyData DID".format(ref))
if urlparse(ref).scheme: # e.g., https://example.com/messages/8377464
return ref
return "did:mydata:{}{}{}".format(did, delimiter if delimiter else "#", ref) if not did_type else "did:mydata:{}:{}{}{}".format(did_type, did, delimiter if delimiter else "#", ref) | 5a0fa42a1a28597ec4863ace221c272ad5114076 | 3,629,071 |
def cars_produced_this_year() -> dict:
"""Get number of cars produced this year."""
return get_metric_of(label='cars_produced_this_year') | 96f25b773eaaaaf74bdb9d660830d1ddcff821f5 | 3,629,072 |
import argparse
def parse_script_args():
"""
"""
parser = argparse.ArgumentParser(description="Delete images from filesystem.")
parser.add_argument('--reg_ip', type=str, required=True,
help='Registry host address e.g. 1.2.3.4')
parser.add_argument('images', type=str, nargs='+',
help='Images to delete in the form repo:tag, e.g. centos:latest')
args = parser.parse_args()
for image in args.images:
if image.count(':') != 1:
parser.error("Malformed image(s)")
return args | fc77674d45f22febcb93b6bb0317cb5a0ef18e0f | 3,629,073 |
def merge_authentication_authorities(managed_auth_authority, user_record):
"""Merge two authentication_authority values, giving precedence to the
managed_auth_authority"""
existing_auth_authority = get_attribute_for_user(
"authentication_authority", user_record)
if existing_auth_authority:
managed_types = [auth_authority_item_type(item)
for item in managed_auth_authority]
for item in existing_auth_authority:
if auth_authority_item_type(item) not in managed_types:
# keep this authentication_authority item from the
# existing value
managed_auth_authority.append(item)
return managed_auth_authority | db5e47be422dbc27d1117040bc3632717112a461 | 3,629,074 |
def preprocess_pil_image(pil_img, color_mode='rgb', target_size=None):
"""Preprocesses the PIL image
Arguments
img: PIL Image
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
Returns
Preprocessed PIL image
"""
if color_mode == 'grayscale':
if pil_img.mode != 'L':
pil_img = pil_img.convert('L')
elif color_mode == 'rgba':
if pil_img.mode != 'RGBA':
pil_img = pil_img.convert('RGBA')
elif color_mode == 'rgb':
if pil_img.mode != 'RGB':
pil_img = pil_img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if pil_img.size != width_height_tuple:
pil_img = pil_img.resize(width_height_tuple, Image.NEAREST)
return pil_img | 83cb54157d7bd85299b6bc3149f7f14d9ed52d95 | 3,629,075 |
import torch
def _handle_coord(c, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
"""
Helper function for _handle_input.
Args:
c: Python scalar, torch scalar, or 1D torch tensor
Returns:
c_vec: 1D torch tensor
"""
if not torch.is_tensor(c):
c = torch.tensor(c, dtype=dtype, device=device)
if c.dim() == 0:
c = c.view(1)
if c.device != device or c.dtype != dtype:
c = c.to(device=device, dtype=dtype)
return c | 129a03900e8047a9c37400568d16316601e25671 | 3,629,076 |
import time
def current_time_hhmmss():
"""
Fetches current time in GMT UTC+0
Returns:
(str): Current time in GMT UTC+0
"""
return str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min) + ":" + str(time.gmtime().tm_sec) | 11a2874237c3fc7d25b93d6f10da167c7d700b33 | 3,629,077 |
def equalize_adaptive_clahe(image, ntiles=8, clip_limit=0.01):
"""Return contrast limited adaptive histogram equalized image.
The return value is normalised to the range 0 to 1.
:param image: numpy array or :class:`jicimagelib.image.Image` of dtype float
:param ntiles: number of tile regions
:param clip_limit: clipping limit in range 0 to 1,
higher values give more contrast
"""
# Convert input for skimage.
skimage_float_im = normalise(image)
if np.all(skimage_float_im):
raise(RuntimeError("Cannot equalise when there is no variation."))
normalised = skimage.exposure.equalize_adapthist(skimage_float_im,
ntiles_x=ntiles, ntiles_y=ntiles, clip_limit=clip_limit)
assert np.max(normalised) == 1.0
assert np.min(normalised) == 0.0
return normalised | 8ecd36bc50e9fd147bee676a92a8ebb1a892c59b | 3,629,078 |
def suggest_parameters_DRE_NMNIST(trial,
list_lr, list_bs, list_opt,
list_wd, list_multLam, list_order):
""" Suggest hyperparameters.
Args:
trial: A trial object for optuna optimization.
list_lr: A list of floats. Candidates of learning rates.
list_bs: A list of ints. Candidates of batch sizes.
list_opt: A list of strings. Candidates of optimizers.
list_wd: A list of floats. weight decay
list_multLam: A list of floats. Prefactor of the second term of BARR.
list_order: A list of integers. Order of SPRT-TANDEM.
Returns:
learning_rate: A float.
batch_size: An int.
name_optimizer: A string.
weight_decay: A float.
param_multLam: A float.
order_sprt: An int.
"""
# load yaml interprrets, e.g., 1e-2 as a string...
for iter_idx in range(len(list_lr)):
list_lr[iter_idx] = float(list_lr[iter_idx])
learning_rate = trial.suggest_categorical('learning_rate', list_lr)
batch_size = trial.suggest_categorical('batch_size', list_bs)
name_optimizer = trial.suggest_categorical('optimizer', list_opt)
weight_decay = trial.suggest_categorical('weight_decay', list_wd)
param_multLam = trial.suggest_categorical('param_multLam', list_multLam)
order_sprt = trial.suggest_categorical('order_sprt', list_order)
return learning_rate, batch_size, name_optimizer,\
weight_decay, param_multLam, order_sprt | 627855f5fe8fd15d43cc7c8ca3da22b704b5907e | 3,629,079 |
def format_seconds(seconds, hide_seconds=False):
"""
Returns a human-readable string representation of the given amount
of seconds.
"""
if seconds <= 60:
return str(seconds)
output = ""
for period, period_seconds in (
('y', 31557600),
('d', 86400),
('h', 3600),
('m', 60),
('s', 1),
):
if seconds >= period_seconds and not (hide_seconds and period == 's'):
output += str(int(seconds / period_seconds))
output += period
output += " "
seconds = seconds % period_seconds
return output.strip() | 341ab077b9f83a91e89a4b96cb16410efab90c1c | 3,629,080 |
def _contains_atom(example, atoms, get_atoms_fn):
"""Returns True if example contains any atom in atoms."""
example_atoms = get_atoms_fn(example)
for example_atom in example_atoms:
if example_atom in atoms:
return True
return False | c9e60d956585c185f9fb62cc0d11f169e6b79f88 | 3,629,081 |
import yaml
import sys
def load_config_file(filename):
"""Load the YAML configuration file."""
try:
config = None
with open(filename, 'r') as f:
config = yaml.load(f)
print('Using configuration at {0}'.format(filename))
if not config.keys() == default_config.keys():
print('Invalid configuration file! (either missing or too many fields!)')
sys.exit(1)
return config
except yaml.constructor.ConstructorError as e:
print('Failed to parse configuration file! At {0}'.format(filename))
sys.exit(1)
except FileNotFoundError as e:
print('Found no file at {0}'.format(filename))
sys.exit(1) | 183ae3ad10caa0ddc4324772c32181660c162e6c | 3,629,082 |
import torch
import math
def kllossGn2(o, l: 'xtrue'):
"""KL loss for Gaussian-mixture output, 2D, precision-matrix parameters."""
dx = o[:,0::6] - l[:,0,np.newaxis]
dy = o[:,2::6] - l[:,1,np.newaxis]
# precision matrix is positive definite, so has positive diagonal terms
Fxx = o[:,1::6]**2
Fyy = o[:,3::6]**2
# precision matrix is positive definite, so has positive
Fxy = torch.atan(o[:,4::6]) / (0.5*math.pi) * o[:,1::6] * o[:,3::6]
weight = torch.softmax(o[:,5::6], dim=1)
# omitting the sqrt(4*math*pi) since it's common to all templates
return -torch.mean(torch.logsumexp(torch.log(weight) - 0.5*(Fxx*dx*dx + Fyy*dy*dy + 2*Fxy*dx*dy) + 0.5*torch.log(Fxx*Fyy - Fxy*Fxy), dim=1)), dx, dy | 198b6b5189d72171b87e05998d326d006e6d156d | 3,629,083 |
def IngestApprovalDelta(cnxn, user_service, approval_delta, setter_id, config):
"""Ingest a protoc ApprovalDelta and create a protorpc ApprovalDelta."""
fids_by_name = {fd.field_name.lower(): fd.field_id for
fd in config.field_defs}
approver_ids_add = IngestUserRefs(
cnxn, approval_delta.approver_refs_add, user_service, autocreate=True)
approver_ids_remove = IngestUserRefs(
cnxn, approval_delta.approver_refs_remove, user_service, autocreate=True)
labels_add, labels_remove = [], []
# TODO(jojwang): monorail:4673, validate enum values all belong to approval.
field_vals_add, field_vals_remove = _RedistributeEnumFieldsIntoLabels(
labels_add, labels_remove,
approval_delta.field_vals_add, approval_delta.field_vals_remove,
config)
sub_fvs_add = IngestFieldValues(cnxn, user_service, field_vals_add, config)
sub_fvs_remove = IngestFieldValues(
cnxn, user_service, field_vals_remove, config)
sub_fields_clear = [fids_by_name.get(clear.field_name.lower()) for
clear in approval_delta.fields_clear
if clear.field_name.lower() in fids_by_name]
# protoc ENUMs default to the zero value (in this case: NOT_SET).
# NOT_SET should only be allowed when an issue is first created.
# Once a user changes it to something else, no one should be allowed
# to set it back.
status = None
if approval_delta.status != issue_objects_pb2.NOT_SET:
status = IngestApprovalStatus(approval_delta.status)
return tracker_bizobj.MakeApprovalDelta(
status, setter_id, approver_ids_add, approver_ids_remove,
sub_fvs_add, sub_fvs_remove, sub_fields_clear, labels_add, labels_remove) | 5f1851ac2cfb7f2515da9468701a2c92e9d457cd | 3,629,084 |
def get_diff_level(files):
"""Return the lowest hierarchical file parts level at which there are differences among file paths."""
for i, parts in enumerate(zip(*[f.parts for f in files])):
if len(set(parts)) > 1:
return i | c9c3f774712684c6817c8bb5b3bf9c101e1df8fa | 3,629,085 |
def get_max(list_tuples):
"""
Returns from a list a tuple which has the highest value as first element.
If empty, it returns -2's
"""
if len(list_tuples) == 0:
return (-2, -2, -2, -2)
# evaluate the max result
found = max(tup[0] for tup in list_tuples)
for result in list_tuples:
if result[0] == found:
return result | 91c662d5865de346a1ac73025ced78a996077111 | 3,629,086 |
def prepare_observation_lst(observation_lst):
"""Prepare the observations to satisfy the input fomat of torch
[B, S, W, H, C] -> [B, S x C, W, H]
batch, stack num, width, height, channel
"""
# B, S, W, H, C
observation_lst = np.array(observation_lst, dtype=np.uint8)
observation_lst = np.moveaxis(observation_lst, -1, 2)
shape = observation_lst.shape
observation_lst = observation_lst.reshape((shape[0], -1, shape[-2], shape[-1]))
return observation_lst | 28a4d190c515b4f6aed882b4a3448e49b93f6b39 | 3,629,087 |
def test_problem_builder(name: str, n_of_variables: int = None, n_of_objectives: int = None) -> MOProblem:
"""Build test problems. Currently supported: ZDT1-4, ZDT6, and DTLZ1-7.
Args:
name (str): Name of the problem in all caps. For example: "ZDT1", "DTLZ4", etc.
n_of_variables (int, optional): Number of variables. Required for DTLZ problems,
but can be skipped for ZDT problems as they only support one variable value.
n_of_objectives (int, optional): Required for DTLZ problems,
but can be skipped for ZDT problems as they only support one variable value.
Raises:
ProblemError: When one of many issues occur while building the MOProblem
instance.
Returns:
MOProblem: The test problem object
"""
problems = {
"ZDT1": zdt.ZDT1,
"ZDT2": zdt.ZDT2,
"ZDT3": zdt.ZDT3,
"ZDT4": zdt.ZDT4,
"ZDT5": zdt.ZDT5,
"ZDT6": zdt.ZDT6,
"DTLZ1": dtlz.DTLZ1,
"DTLZ2": dtlz.DTLZ2,
"DTLZ3": dtlz.DTLZ3,
"DTLZ4": dtlz.DTLZ4,
"DTLZ5": dtlz.DTLZ5,
"DTLZ6": dtlz.DTLZ6,
"DTLZ7": dtlz.DTLZ7,
}
num_var = {"ZDT1": 30, "ZDT2": 30, "ZDT3": 30, "ZDT4": 10, "ZDT6": 10}
if not (name in problems.keys()):
msg = "Specified Problem not yet supported.\n The supported problems are:" + str(problems.keys())
raise ProblemError(msg)
if "ZDT" in name:
if n_of_variables is None:
n_of_variables = num_var[name]
if n_of_objectives is None:
n_of_objectives = 2
if not (n_of_variables == num_var[name]):
msg = (
name
+ " problem has been limited to "
+ str(num_var[name])
+ " variables. Number of variables recieved = "
+ str(n_of_variables)
)
raise ProblemError(msg)
if not (n_of_objectives == 2):
msg = (
"ZDT problems can only have 2 objectives. " + "Number of objectives recieved = " + str(n_of_objectives)
)
raise ProblemError(msg)
obj_func = problems[name]()
elif "DTLZ" in name:
if (n_of_variables is None) or (n_of_objectives is None):
msg = "Please provide both number of variables and objectives" + " for the DTLZ problems"
raise ProblemError(msg)
obj_func = problems[name](n_of_objectives, n_of_variables)
else:
msg = "How did you end up here?"
raise ProblemError(msg)
lower_limits = obj_func.min_bounds
upper_limits = obj_func.max_bounds
var_names = ["x" + str(i + 1) for i in range(n_of_variables)]
obj_names = ["f" + str(i + 1) for i in range(n_of_objectives)]
variables = variable_builder(
names=var_names,
initial_values=lower_limits,
lower_bounds=lower_limits,
upper_bounds=upper_limits,
)
# Because optproblems can only handle one objective at a time
def modified_obj_func(x):
if isinstance(x, list):
if len(x) == n_of_variables:
return [obj_func(x)]
elif len(x[0]) == n_of_variables:
return list(map(obj_func, x))
else:
if x.ndim == 1:
return [obj_func(x)]
elif x.ndim == 2:
return list(map(obj_func, x))
raise TypeError("Unforseen problem, contact developer")
objective = VectorObjective(name=obj_names, evaluator=modified_obj_func)
problem = MOProblem([objective], variables, None)
return problem | e6917dd1edcd711ae1dd6370223b9bfbca252592 | 3,629,088 |
def get_coord_y(x, y):
"""
Function returns the y value of the coordinate
:param x: x value of coordinate
:param y: y value of coordinate
:return: y value of coordinate
"""
coord = Coordinates(x, y)
return coord.get_y() | ba844806a430130e20cfd320865b78f0af09cf25 | 3,629,089 |
import numpy
def calc_som2_flow(som2c_1, cmix, defac):
"""Calculate the C that flows from surface SOM2 to soil SOM2.
Some C flows from surface SOM2 to soil SOM2 via mixing. This flow is
controlled by the parameter cmix.
Parameters:
som2c_1 (numpy.ndarray): state variable, C in surface SOM2
cmix (numpy.ndarray): parameter, amount of C flowing via mixing
defac (numpy.ndarray): derived, decomposition factor
Returns:
tcflow, C flowing to soil SOM2 via mixing
"""
valid_mask = (
(~numpy.isclose(som2c_1, _SV_NODATA)) &
(cmix != _IC_NODATA) &
(defac != _TARGET_NODATA))
tcflow = numpy.empty(som2c_1.shape, dtype=numpy.float32)
tcflow[:] = _IC_NODATA
tcflow[valid_mask] = (
som2c_1[valid_mask] * cmix[valid_mask] * defac[valid_mask] *
0.020833)
return tcflow | a90089f65fa2ff8ea681d9f2c3375920e8cda52c | 3,629,090 |
def cdlseparatinglines(opn, high, low, close):
"""Separating Lines:
Bullish Separating Lines Pattern:
With just two candles – one black (or red) and one white (or green) – the Bullish Separating
Lines pattern is easy to learn and spot. To confirm its presence, seek out the following
criteria:
First, the pattern must begin with a clear and defined uptrend. Second, a long bearish candle
(black or red) must appear. Third, the next day must be defined by a long bullish candle (white
or green), which will open at the same place the first day opened.The white candle should not
have a lower wick, which means that the price can not drop below the opening price throughout
the course of the session.
Bearish Separating Lines Pattern:
The Bearish Separating Lines pattern encompasses just two candles. To spot it, look for the
following criteria:
First, the pattern must begin with a clear and defined downtrend. Second, a long bullish candle
(white or green) must appear. Third, the next day must be defined by a long bearish candle (black
or red), which will open at the same place as the first day opened. Ideally, the second candle
will not have an upper wick.
:param opn:
:param high:
:param low:
:param close:
:return:
"""
return CDLSEPARATINGLINES(opn, high, low, close) | 37d4a8e1721cc52157b944c2bf0a8db9a8439f52 | 3,629,091 |
def get_context(file_in):
"""Get genomic context from bed file"""
output_dict = {}
handle = open(file_in,'r')
header = handle.readline().rstrip('\n').split('\t')
for line in handle:
split_line = line.rstrip('\n').split('\t')
contig,pos,context = split_line[:3]
if context == '.':
continue
try:
output_dict[contig][pos] = context
except KeyError:
output_dict[contig] = {pos:context}
return output_dict | 90975b6eb929c546372fdce0eb449f455e9ffc18 | 3,629,092 |
def get_rating_users_total(contest_id: ContestID) -> int:
"""Return the number of unique users that have rated bungalows in
this contest.
"""
return User.query \
.join(Rating) \
.join(Contestant) \
.filter(Contestant.contest_id == contest_id) \
.distinct() \
.count() | 25b649f32273c208ae3ad163ffd3ec1929eb1d1c | 3,629,093 |
def read_from_file(filename):
"""
Reads the set of known plaintexts from the given file.
"""
candidates = []
with open(filename) as f:
lines = f.readlines()
if len(lines) > 5:
# from first candidate,
# to the end of the file,
# counting in increments of lines between candidates
for i in range(4, len(lines), 4):
candidates.append(Candidate(lines[i].strip().lower()))
return candidates | d7adda3d84ac02bf2d8095a42dae20b66ca4d4f0 | 3,629,094 |
def galactic_offsets_to_celestial(RA, Dec, glongoff=3, glatoff=0):
"""
Converts offsets in Galactic coordinates to celestial
The defaults were chosen by Pineda for Galactic plane survey
@param RA : FK5 right ascension in degrees
@type RA : float
@param Dec : FK5 declination in degrees
@type Dec : float
@param glongoff : Galactic longitude (degrees)
@type glongoff : float
@param glatoff : Galactic latitude offset (degrees)
@type glatoff : float
"""
# Initialize SkyCoord Object
gc = APc.SkyCoord(ra=RA*u.degree, dec=Dec*u.degree, frame='fk5')
# Add offset in galactic longitude
glongOFF=gc.galactic.b.value + glongoff
# Add offset in galactic latitude
glat=gc.galactic.l.value + glatoff
# Convert reference position to RA/DEC
radecoff=APc.SkyCoord(l=glat*u.degree, b=glongOFF*u.degree, frame='galactic')
# Replace Raoff and Decoff with new values
RAoff = radecoff.fk5.ra.value - RA
Decoff = radecoff.fk5.dec.value - Dec
return RAoff, Decoff | 737072bc3d26b91dab93a7cbefafe732eba0b43a | 3,629,095 |
import torch
from typing import Tuple
from typing import List
def compute_regularizer_term(model: LinearNet, criterion: torch.nn.modules.loss.CrossEntropyLoss,
train_data: Tuple[torch.Tensor, torch.Tensor],
valid_data: Tuple[torch.Tensor, torch.Tensor],
weight_decays: List[torch.Tensor], zeta: float,
lr: float) -> Tuple[List[torch.Tensor], torch.Tensor]:
"""Computes the value of the regularizer in Eq. 7 and is called at each step of the
inner optimization.
Note: gradients here can be tracked using the Higher library
Returns:
norm (scalar-valued torch.tensor) this is the squared 2-norm of the difference
between the training and validation gradients (Y in Algorithm 3.1).
"""
train_images, train_labels = train_data
valid_images, valid_labels = valid_data
# compute training and validation gradient
train_outputs = model(train_images)
valid_outputs = model(valid_images)
train_loss = criterion(train_outputs, train_labels)
valid_loss = criterion(valid_outputs, valid_labels)
train_grads = torch.autograd.grad(
train_loss, model.parameters(), retain_graph=True, create_graph=True)
valid_grads = torch.autograd.grad(
valid_loss, model.parameters(), retain_graph=True, create_graph=True)
norm = 0
for p, wd, t_grad, v_grad in zip(model.parameters(), weight_decays, train_grads, valid_grads):
a = (t_grad + wd * p.data) - v_grad
norm += (a**2).sum()
return norm | 9216e938f611d085f3c877bb3d9aeb98aaa6565d | 3,629,096 |
def get_to_and(num_bits: int) -> np.ndarray:
"""
Overview:
Get an np.ndarray with ``num_bits`` elements, each equals to :math:`2^n` (n decreases from num_bits-1 to 0).
Used by ``batch_binary_encode`` to make bit-wise `and`.
Arguments:
- num_bits (:obj:`int`): length of the generating array
Returns:
- to_and (:obj:`np.ndarray`): an array with ``num_bits`` elements, \
each equals to :math:`2^n` (n decreases from num_bits-1 to 0)
"""
return 2 ** np.arange(num_bits - 1, -1, -1).reshape([1, num_bits]) | 0da0d253f8951bf54e9d4c2d9d720813fd99c538 | 3,629,097 |
def in_role_list(role_list):
"""Requires user is associated with any role in the list"""
roles = []
for role in role_list:
try:
role = Role.query.filter_by(
name=role).one()
roles.append(role)
except NoResultFound:
raise ValueError("role '{}' not found".format(role))
except MultipleResultsFound:
raise ValueError("more than one role named '{}'"
"found".format(role))
required = set(roles)
def user_has_given_role(intervention, user):
has = set(user.roles)
if has.intersection(required):
_log(result=True, func_name='in_role_list', user=user,
intervention=intervention.name)
return True
return user_has_given_role | 82f171601b4e823fdc2ab265bd955272d033936d | 3,629,098 |
def handle_not_start(fsm_ctx):
"""
:param ctx: FSM Context
:return: False
"""
global plugin_ctx
plugin_ctx.error("Could not start this install operation because an install operation is still in progress")
return False | dc69de1e2c49ed5cda20fecbf9338081c027ff7b | 3,629,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.