content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def apply_voigt1d(fid, pks, snorms, a0, b0, a, b, zp, bw, flo=None,
up=True, link_gg=True, link_ll=True, dx_snr=None, dx_snr_mode='outside',
f_cutoff=0., ftype='voigt', outfile=''):
""" Apply Voigt-1D window function, fit the spectrum, and return the treated SnR & FWHM
:arguments
fid: np1darray FID signal
pks: list of float peak positions
snorms: float The normalized intensities of the peaks
a0: float FID initial a0
b0: float FID initial b0
a: float Voigt-1D window parameter a
b: float Voigt-1D window parameter b
zp: int zero-padding length
fpk: float peak frequency
bw: float chirp bandwidth
flo: float LO frequency
up: bool is chirp going up
link_gg: bool link all Gaussian FWHM
link_ll: bool link all Lorentzian FWHM
dx_snr: float +/- dx_snr from pk to calculate noise & SnR
dx_snr_mode: str
'inside' take pk - dx_snr < x < pk + dx_snr as noise range
'outside' take x < pk - dx_snr || x > pk + dx_snr as noise range
f_cutoff: float low frequency cutoff
outfile: str filename to save fit
ftype: str lineshape function type
'voigt'
'gaussian'
'lorentzian'
'complex-voigt'
:returns
snr: float SnR of the spectral line
vv: float FWHM of the spectral line determined by the voigt fit
"""
t = np.arange(len(fid)) * 1e-3
if a == 0 and b == 0:
wf = np.ones_like(t)
else:
wf = np.exp(- a * t**2 - b * t) * t
if isinstance(zp, type(None)):
zp = find_fid_tr_len(a0, b0)
y = np.abs(np.fft.rfft(fid * wf, zp))
x = np.fft.rfftfreq(zp) * 1e3
# convert to IF normalized spectrum
xc, yc = to_mol_freq(x, y / np.max(y), bw, up=up, flo=flo, f_cutoff=f_cutoff)
# initial guess
gg0 = 2 * np.sqrt(np.log(2) * (a + a0)) / np.pi
ll0 = max((b + b0) / np.pi, 0)
vv_ab = fwhm_ab(a0 + a, b0 + b)
if dx_snr:
idx = np.array(np.ones_like(xc), dtype='bool')
if dx_snr_mode == 'outside':
for _pk in pks:
idx = np.logical_and(idx, np.logical_or(xc < _pk - dx_snr, xc > _pk + dx_snr))
# fit this xc & yc
res = fit_spectrum(xc, yc, pks, snorms, gg0, ll0, link_gg=link_gg,
link_ll=link_ll, ftype=ftype)
noise = np.std(res.residual[idx])
elif dx_snr_mode == 'inside': # if SnR mode is inside, we only need to fit the data inside
for _pk in pks:
idx = np.logical_or(idx, np.logical_and(xc > _pk - dx_snr, xc < _pk + dx_snr))
# cut xc & yc
xc = xc[idx]
yc = yc[idx]
# fit this xc & yc
res = fit_spectrum(xc, yc, pks, snorms, gg0, ll0, link_gg=link_gg,
link_ll=link_ll, ftype=ftype)
noise = np.std(res.residual)
else:
raise ValueError('Unknown dx_snr_mode string')
else:
res = fit_spectrum(xc, yc, pks, snorms, gg0, ll0, link_gg=link_gg,
link_ll=link_ll, ftype=ftype)
noise = np.std(res.residual)
snr = (np.max(yc) - res.params['p0'].value) / noise
par_list = []
for name, p in res.params.items():
if isinstance(p.stderr, float):
par_list.append('{:s}={:>8.4f}({:>8.4f})'.format(name, p.value, p.stderr))
else:
par_list.append('{:s}={:>8.4f}({:^8s})'.format(name, p.value, 'nan'))
if isinstance(res.redchi, float):
par_list.append('redchi={:>7.4f}'.format(res.redchi))
else:
par_list.append('redchi=nan')
if ftype == 'voigt':
vv_fit = fwhm_voigt_fit(res.params['ll0'].value, res.params['gg0'].value)
elif ftype == 'complex-voigt':
vv_fit = fwhm_complex_voigt_fit(res.params['ll0'].value, res.params['gg0'].value)
elif ftype == 'gaussian':
vv_fit = res.params['gg0'].value
elif ftype == 'lorentzian':
vv_fit = res.params['ll0'].value
else:
raise ValueError('Unknown function type')
print('a={:>6.4f}'.format(a), 'b={:>7.4f}'.format(b), 'vv_ab={:>6.4f}'.format(vv_ab),
'vv_fit={:>6.4f}'.format(vv_fit), 'snr={:>6.2f}'.format(snr), ' '.join(par_list))
if outfile: # save fit
yfits = [] # a list of yfit for each peak
for i in range(len(pks)):
if ftype == 'voigt':
_x0 = res.params['x'+str(i)].value
_gg = res.params['gg'+str(i)].value
_ll = res.params['ll'+str(i)].value
_s = res.params['s'+str(i)].value
yfits.append(voigt(xc - _x0, _gg, _ll) * _s)
elif ftype == 'complex-voigt':
_x0 = res.params['x' + str(i)].value
_gg = res.params['gg' + str(i)].value
_ll = res.params['ll' + str(i)].value
_s = res.params['s' + str(i)].value
yfits.append(complex_voigt(xc - _x0, _gg, _ll) * _s)
elif ftype == 'gaussian':
_x0 = res.params['x' + str(i)].value
_gg = res.params['gg' + str(i)].value
_s = res.params['s' + str(i)].value
_y = np.exp(-((xc - _x0) / _gg)**2 * 4 * np.log(2)) * _s
yfits.append(_y)
elif ftype == 'lorentzian':
_x0 = res.params['x' + str(i)].value
_ll = res.params['ll' + str(i)].value
_s = res.params['s' + str(i)].value
_y = _ll / (2 * np.pi * ((xc - _x0)**2 + _ll**2 / 4))
yfits.append(_y)
else:
raise ValueError('Unknown function type')
if len(pks) == 1:
outdata = np.column_stack((xc, yc, res.residual))
outfmt = ['%6.2f', '%9.6f', '%9.6f']
else:
outdata = np.column_stack((xc, yc, res.residual, *yfits))
outfmt = ['%6.2f', '%9.6f', '%9.6f'] + ['%9.6f'] * len(pks)
hd_list = ['a0={:>6.4f} b0={:>6.4f} FLO={:g}MHz {:s}'.format(a0, b0, flo, 'UP' if up else 'DOWN'),
'Voigt-1D: a={:>6.4f} b={:>6.4f}'.format(a, b),
'SNR={:>6.2f} FWHM_AB={:>6.4f} FWHM_FIT={:>6.4f}'.format(snr, vv_ab, vv_fit),
' | '.join(par_list),
'{:>5s} {:^10s} {:^10s}'.format('freq', 'inten', 'residual')
]
np.savetxt(outfile, outdata, fmt=outfmt, header='\n'.join(hd_list))
return snr, vv_fit | 4e869225bf976138ab3631d3d0def5b7e3e0cc06 | 3,636,100 |
from typing import Optional
def parse_options(dict_in: Optional[dict], defaults: Optional[dict] = None):
"""
Utility function to be used for e.g. kwargs
1) creates a copy of dict_in, such that it is safe to change its entries
2) converts None to an empty dictionary (this is useful, since empty dictionaries cant be argument defaults)
3) optionally, sets defaults, if keys are not present
Parameters
----------
dict_in
defaults
Returns
-------
"""
if dict_in is None:
dict_in = {}
else:
dict_in = dict_in.copy()
if defaults:
for key in defaults:
dict_in.setdefault(key, defaults[key])
return dict_in | d679539ba29f4acab11f5db59c324473a2e24cc6 | 3,636,101 |
import copy
def build_optimizer(model, optimizer_cfg):
"""Build optimizer from configs.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are:
- type: class name of the optimizer.
- lr: base learning rate.
Optional fields are:
- any arguments of the corresponding optimizer type, e.g.,
weight_decay, momentum, etc.
- paramwise_options: a dict with regular expression as keys
to match parameter names and a dict containing options as
values. Options include 6 fields: lr, lr_mult, momentum,
momentum_mult, weight_decay, weight_decay_mult.
Returns:
torch.optim.Optimizer: The initialized optimizer.
Example:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> paramwise_options = {
>>> '(bn|gn)(\\d+)?.(weight|bias)': dict(weight_decay_mult=0.1),
>>> '\\Ahead.': dict(lr_mult=10, momentum=0)}
>>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
>>> weight_decay=0.0001,
>>> paramwise_options=paramwise_options)
>>> optimizer = build_optimizer(model, optimizer_cfg)
"""
optimizer_cfg = copy.deepcopy(optimizer_cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_options', None)
optim_constructor = build_optimizer_constructor(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer | 43d211d57972274de337725a38ae7c08ff7b4e8c | 3,636,102 |
def test_normal_sw(data):
"""Shapiro-Wilk"""
norm_data = (data - np.mean(data))/(np.std(data)/np.sqrt(len(data)))
return st.shapiro(norm_data) | e0b547e0b585a1cb5bf88ef818c3e6e1caf9c16a | 3,636,103 |
from typing import Dict
from typing import Any
def dict_to_namespace(cfg_dict: Dict[str, Any]) -> 'Namespace':
"""Converts a nested dictionary into a nested namespace."""
cfg_dict = deepcopy(cfg_dict)
def expand_dict(cfg):
for k, v in cfg.items():
if isinstance(v, dict) and all(isinstance(k, str) for k in v.keys()):
cfg[k] = expand_dict(v)
elif isinstance(v, list):
for nn, vv in enumerate(v):
if isinstance(vv, dict) and all(isinstance(k, str) for k in vv.keys()):
cfg[k][nn] = expand_dict(vv)
return Namespace(**cfg)
return expand_dict(cfg_dict) | 492e629815f477d3263121fcd19afea7d3ec9f6f | 3,636,104 |
import click
def country_currency(code, country_name):
"""Gives information about the currency of a country"""
_data = return_country(currency_data, country_name)
if _data:
_, currency_name, the_code, symbol = _data
else:
return click.secho('Country does not exist. Perhaps, write the full name?', fg='red')
click.secho("The currency is: {}({})".format(currency_name, symbol), fg='green')
if code:
click.secho("The currency short code is: {}".format(the_code), fg='green') | e576a5a1b6d06ef180c4cb648aecf54005501bc8 | 3,636,105 |
import numpy
import math
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective | c4ec66342720f91c1ec4dbe24c2a89d0b9e7e3f7 | 3,636,106 |
def GetMaxHarmonic( efit ):
"""Determine highest-order of harmonic amplitudes in an ellipse-fit object"""
# We assume that columns named "ai3_err", "ai4_err", "ai5_err", etc.
# exist, up to "aiM_err", where M is the maximum harmonic number
momentNums = [int(cname.rstrip("_err")[2:]) for cname in efit.colNames
if cname[:2] == "ai" and cname[-4:] == "_err"]
return max(momentNums) | e11645efa40ce3995788a05c8955d0d5a8804955 | 3,636,107 |
import math
def quaternary_tournament(population, scores, next_gen_number, random_seed=42):
"""Selects next generation using quaternary tournament selection for
single objective.
This function implements quaternary tournament selection to select a
specific number of members for the next generation.
Args:
population: A list containing members of current population with
parents and children combined.
scores: A list containing scores of each member of the
population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with population
argument.
next_gen_number: An int indicating the number of members to be
selected for the next generation.
random_seed: An int indicating the seed of the random number
generator.
Returns:
A list of members for the next generation of population.
"""
np.random.seed(random_seed)
indices = list(range(len(population)))
indices_array = np.array(indices)
selected = []
for i in range(next_gen_number):
best_score = math.inf
picked = None
selected_indices = np.random.choice(indices_array, size=4)
for indx in selected_indices:
if scores[indx] < best_score:
best_score = scores[indx]
picked = population[indx]
selected.append(picked)
return selected | e59311977358be3a28baeedc8a2e91e8f979eeeb | 3,636,108 |
def empty_list():
"""An empty list"""
return [] | 9d803d40be4c7aa7a6a07e94c19495582ab96154 | 3,636,109 |
def snapshot():
"""Creates a default initialized startup snapshot.
deno_core requires this, although it does not document this outside of a
few random mentions in issues.
"""
return Runtime(will_snapshot=True).snapshot() | 4a9b4be37b5f31c87897e7c3daa9e4cc6884584c | 3,636,110 |
def sample_user(email='riti2874@gmail.com', password='Riti#2807'):
"""Createing a smaple user"""
return get_user_model().objects.create_user(email, password) | 6bf20a2566070cd724bfac66b4591c45b858aef3 | 3,636,111 |
def is_chinese_prior_leap_month(m_prime, m):
"""Return True if there is a Chinese leap month on or after lunar
month starting on fixed day, m_prime and at or before
lunar month starting at fixed date, m."""
return ((m >= m_prime) and
(is_chinese_no_major_solar_term(m) or
is_chinese_prior_leap_month(m_prime, chinese_new_moon_before(m)))) | c196a5135e79b9f3efa6cfcefa5b5f9b1dd175f5 | 3,636,112 |
import yaml
import json
def read_config_file(fname):
"""Reads a JSON or YAML file.
"""
if fname.endswith(".yaml") or fname.endswith(".yml"):
try:
rfunc = partial(yaml.load, Loader=yaml.FullLoader)
except AttributeError:
rfunc = yaml.load
elif fname.endswith(".json"):
rfunc = json.load
else:
raise TypeError("Did not understand file type {}.".format(fname))
try:
with open(fname, "r") as handle:
ret = rfunc(handle)
except FileNotFoundError:
raise FileNotFoundError("No config file found at {}.".format(fname))
return ret | b86d61ee6d0d8027e325a9e436a031f9e171959d | 3,636,113 |
def plot_wo(wo, legend=True, **plot_kwargs):
"""Plot a water observation bit flag image.
Parameters
----------
wo : xr.DataArray
A DataArray containing water observation bit flags.
legend : bool
Whether to plot a legend. Default True.
plot_kwargs : dict
Keyword arguments passed on to DataArray.plot.
Returns
-------
plot
"""
cmap = mcolours.ListedColormap([
np.array([150, 150, 110]) / 255, # dry - 0
np.array([0, 0, 0]) / 255, # nodata, - 1
np.array([119, 104, 87]) / 255, # terrain - 16
np.array([89, 88, 86]) / 255, # cloud_shadow - 32
np.array([216, 215, 214]) / 255, # cloud - 64
np.array([242, 220, 180]) / 255, # cloudy terrain - 80
np.array([79, 129, 189]) / 255, # water - 128
np.array([51, 82, 119]) / 255, # shady water - 160
np.array([186, 211, 242]) / 255, # cloudy water - 192
])
bounds=[
0,
1,
16,
32,
64,
80,
128,
160,
192,
255,
]
norm = mcolours.BoundaryNorm(np.array(bounds) - 0.1, cmap.N)
cblabels = ['dry', 'nodata', 'terrain', 'cloud shadow', 'cloud', 'cloudy terrain', 'water', 'shady water', 'cloudy water']
try:
im = wo.plot.imshow(cmap=cmap, norm=norm, add_colorbar=legend, **plot_kwargs)
except AttributeError:
im = wo.plot(cmap=cmap, norm=norm, add_colorbar=legend, **plot_kwargs)
if legend:
try:
cb = im.colorbar
except AttributeError:
cb = im.cbar
ticks = cb.get_ticks()
cb.set_ticks(ticks + np.diff(ticks, append=256) / 2)
cb.set_ticklabels(cblabels)
return im | 5c756248d0bcc4dfb42e69a0a4e50c6321ed7023 | 3,636,114 |
from thedonald import tweets
import os
import json
def _get_tweets(db_path="tweets.json"):
"""Get Trump's tweets, caching after first use."""
global TWEETS
# Cache tweets if they haven't been fetched yet
if TWEETS is None:
# Try to read from a saved file
if os.path.exists(db_path):
with open(db_path) as f:
TWEETS = json.load(f)
# Fall back to reading from the API, but caching to a file.
else:
TWEETS = tweets.write_tweets_to_file(db_path)
# Return from cache
return TWEETS | 3dbed40d88cac84c6a9d4c9595d181a10551432d | 3,636,115 |
def int_list_data():
"""Item data with an in list. [1, 2, 3, 4, 5, 6]"""
return easymodel.ListItemData([1, 2, 3, 4, 5, 6]) | 3306a7694d22e2ae9d8de64f3e1a01c27789d818 | 3,636,116 |
from typing import Dict
from typing import Any
from typing import Optional
def check_transaction_threw(receipt: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Check if the transaction threw/reverted or if it executed properly by reading
the transaction receipt.
Returns None in case of success and the transaction receipt if the
transaction's status indicator is 0x0.
"""
if "status" not in receipt:
raise ValueError(
"Transaction receipt does not contain a status field. Upgrade your client"
)
if receipt["status"] == RECEIPT_FAILURE_CODE:
return receipt
return None | b79300c45fdb3c598d450cdf42ce2758e2cf8451 | 3,636,117 |
from typing import Callable
from typing import Coroutine
import functools
def button(style: ButtonStyle, label: str, **kwargs) -> Callable[..., Button]:
"""A decorator used to create buttons.
This should be decorating the buttons callback.
Parameters
----------
style: :class:`.ButtonStyle`
The styling to use for the button
label: :class:`str`
The label of the button
custom_id: Optional[:class:`str`]
The custom id to set for the button
disabled: Optional[:class:`bool`]
Whether or not the button should be marked as disabled
emoji: Optional[:class:`.Emoji`]
The emoji to set for the button
url: Optional[:class:`str`]
The url to use for url styled buttons
Returns
-------
:class:`.Button`
The created button instance
"""
def inner(func: Coroutine) -> Button:
button = Button(style, label, **kwargs)
button.callback = functools.partial(func, button) # type: ignore
return button
return inner | 6f5877f32aabcb921f835ecbe6158bd9cc4964d3 | 3,636,118 |
def AuxSource_Cast(*args):
"""
Cast(BaseObject o) -> AuxSource
AuxSource_Cast(Seiscomp::Core::BaseObjectPtr o) -> AuxSource
"""
return _DataModel.AuxSource_Cast(*args) | 10e149a342e9181636371f11152dc05e3942bd08 | 3,636,119 |
def to_byte_array(int_value):
"""Creates a list of bytes out of an integer representing data that is
CODESIZE bytes wide."""
byte_array = [0] * CODESIZE
for i in range(CODESIZE):
byte_array[i] = int_value & 0xff
int_value = int_value >> 8
if BIG_ENDIAN:
byte_array.reverse()
return byte_array | a44f952acd2d0525eed3dbe36b86ae1b305f6c37 | 3,636,120 |
def no_op(loss_tensors):
"""no op on input"""
return loss_tensors | 317474aa2ed41668781042a22fb43834dc672bf2 | 3,636,121 |
def get_example_two_cube(session, varcodes, selected_year=None):
""" Create 2D cube of flight routes per selector variable description, per date selector. """
cubes_api = aa.CubesApi(session.api_client)
# If no selected year, no underlying base query
if selected_year is None:
query = create_query()
# Else, only return flight counts for the selected year
else:
year_rule = aa.Rule(
create_clause(session, REPORTING_PERIOD_YEARS_CODE, [selected_year])
)
query = create_query(rule=year_rule)
dimensions = [create_dimension(code) for code in varcodes]
measure = create_measure()
cube = create_cube(query, dimensions, [measure])
cube_result = cubes_api.cubes_calculate_cube_synchronously(
session.data_view, session.system, cube=cube
)
return cube_result | 5c01f5d48910953fd6238fb987f284a3bfe4fad4 | 3,636,122 |
def svd_reduce(imgs, n_jobs):
"""Reduce data using svd.
Work done in parallel across subjects.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
n_jobs : integer, optional, default=1
The number of CPUs to use to do the computation.
-1 means all CPUs, -2 all CPUs but one, and so on.
Returns
-------
reduced_data : np array shape=(n_subjects, n_timeframes, n_timeframes)
"""
def svd_i(img):
n_voxels = get_safe_shape(img[0])[0]
slices = []
t_i = 0
for i in range(len(img)):
n_voxels, n_timeframes = get_safe_shape(img[i])
slices.append(slice(t_i, t_i + n_timeframes))
t_i = t_i + n_timeframes
total_timeframes = t_i
# First compute X^TX
C = np.zeros((total_timeframes, total_timeframes))
for i in range(len(img)):
Xi = safe_load(img[i])
slice_i = slices[i]
C[slice_i, slice_i] = Xi.T.dot(Xi) / 2
for j in range(i + 1, len(img)):
Xj = safe_load(img[j])
slice_j = slices[j]
C[slice_i, slice_j] = Xi.T.dot(Xj)
del Xj
del Xi
C = C + C.T
# Then compute SVD
V, S, Vt = np.linalg.svd(C)
X_reduced = (
np.sqrt(S.reshape(-1, 1)) * Vt
) # X_reduced = np.diag(np.sqrt(S)).dot(V)
return X_reduced
X = Parallel(n_jobs=n_jobs)(delayed(svd_i)(img) for img in imgs)
return X | b8aaf939487a183bc0cb8498c6fa41ebc5c3b131 | 3,636,123 |
from typing import Tuple
from typing import List
def parse_project(record: Record, add_info: dict) -> Tuple[Pair, List[Pair], List[Pair]]:
"""Parse the project record in airtable to Billinge group format.
Return a key-value pair of the project and a list of key-value pairs of the people doc and institution doc
in the project. The record should be denormalized at first.
Parameters
----------
record : Record
The record from the airtable.
add_info : dict
A dictionary of the additional information.
Returns
-------
project : tuple
The key-value pair of project document.
people : list
The list of the key-value pairs of the people in the collaborators list.
institutions : list
The list of the key-value pairs of the institutions of those collaborators.
"""
record = tools.get_data(record)
pairs = list(map(parse_person, record.get('Collaborators', [])))
people = [pair[0] for pair in pairs if pair[0] is not None]
institutions = [pair[1] for pair in pairs if pair[1] is not None]
key = record.get('Name')
value = {
'begin_date': record.get('Start Date'),
'collaborators': tools.get_keys(people),
'description': record.get('Notes'),
'grants': record.get('Grant'),
'group_members': [tools.gen_person_id(record.get('Project Lead'))],
'lead': tools.gen_person_id(record.get('Project Lead')),
'log_url': _retrieve(add_info, 'log_url'),
'ana_repo_url': record.get('Link to Analysis'),
'man_repo_url': record.get('Link to Paper'),
'deliverable': autogen.auto_gen_deliverable(record.get('Start Date')),
'milestones': autogen.auto_gen_milestons(record.get('Start Date')),
'name': _retrieve(add_info, 'name'),
'pi_id': _retrieve(add_info, 'pi_id'),
'status': record.get('Status')
}
project = (key, value)
return project, people, institutions | a999bd2d5ecc8742739f0143f8df939ff5906a04 | 3,636,124 |
def forbidden_handler_exc(message):
"""More flexible handling of 403-like errors. Used by raising ForbiddenError.
Args:
message (str) - Custom message to display
"""
return render_template('errors/403.html', message=message), 403 | 7a9d6280ac129496c4b50241453aba259812f10e | 3,636,125 |
import unittest
def run_test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('./tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1 | ae567c1821a83cde68f23c1bb51325ea02dcc15b | 3,636,126 |
def resnet34(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 34 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3, in_channels=64, out_channels=64, downsample=False
),
ResNetSectionSettings(
num_blocks=4, in_channels=64, out_channels=128, downsample=True
),
ResNetSectionSettings(
num_blocks=6, in_channels=128, out_channels=256, downsample=True
),
ResNetSectionSettings(
num_blocks=3, in_channels=256, out_channels=512, downsample=True
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | e0006ac140dd2969f140bcddebe7fd80b9609766 | 3,636,127 |
def linspace(start, stop, num=50):
"""
Linspace with additional points
"""
grid = list(np.linspace(start, stop, num))
step = (stop - start) / (num - 1)
grid.extend([0.1 * step, 0.5 * step, stop - 0.1 * step, stop - 0.5 * step])
return sorted(grid) | f416f3b86f062fca09b3be669a1651351621ad8a | 3,636,128 |
def strip_html(markdown):
"""
Strip HTML tags from a markdown string.
Entities present in the markdown will be escaped.
Parameters:
markdown: A :term:`native string` to be stripped and escaped.
Returns:
An escaped, stripped :term:`native string`.
"""
class Parser(HTMLParser):
text_parts = []
def handle_data(self, data):
self.text_parts.append(
data
.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
)
def handle_entityref(self, name):
self.text_parts.append("&" + name + ";")
def handle_charref(self, name):
self.text_parts.append("&#" + name + ";")
parser = Parser()
parser.feed(markdown)
return "".join(parser.text_parts) | 4d8c913a362f9e377b2dcd4172adadf378ffbff3 | 3,636,129 |
def find_character_occurences(processed_txt):
"""
Return a list of actors from `doc` with corresponding occurences.
"""
total_len = len(processed_text)
characters = Counter()
index = 0
for ent in processed_txt.ents:
if ent.label_ == 'PERSON':
characters[ent.lemma_] += 1
if index % (total_len/10) == 0:
print '*',
index += 1
return characters | e3407d1ae5055eb1e43438cde0e467b9f3cc48e1 | 3,636,130 |
import random
def parents_similarity(subject1, subject2, values):
"""
This function creates two new subjects by comparing the bits of the binary encoded provided subjects (the
parents). If both parents have the same bit in a certain location, the offspring have a very high probability
of having the same bit too in that location. If the parents' bits are opposite, than the offspring's bits
are chosen randomly. For example, say the parents are s1 = '11000' and s2 = '11101', then with high probability
the offspring will be s1_new = '11100' and s2_new = '11001' (the middle and last digit are randomly chosen).
:param subject1: one subject of the population
:param subject2: another subject of the population
:param values: list or dict. a sequence of all values a subject in the population can have
:return: a tuple of two new subjects
"""
def create_child(bits1, bits2):
child = ''
for i in range(0,len(bits1)):
r = random.random()
if bits1[i] == bits2[i]:
take_from_1 = r < 0.9
else:
take_from_1 = r < 0.5
if take_from_1:
new_bit = bits1[i]
else:
new_bit = bits2[i]
child += new_bit
return child
bits1 = seq_to_binary_string(subject1, values)
bits2 = seq_to_binary_string(subject2, values)
new_bits1 = create_child(bits1, bits2)
new_bits2 = create_child(bits2, bits1)
return binary_string_to_seq(new_bits1, values), binary_string_to_seq(new_bits2, values) | 6fe49a5bb31b37abf49255534d6f6ae9a8301a59 | 3,636,131 |
import torch
def generic_fftshift(x,axis=[-2,-1],inverse=False):
"""
Fourier shift to center the low frequency components
Parameters
----------
x : torch Tensor
Input array
inverse : bool
whether the shift is for fft or ifft
Returns
-------
shifted array
"""
if len(axis) > len(x.shape):
raise ValueError('Not enough axis to shift around!')
y = x
for axe in axis:
dim_size = x.shape[axe]
shift = int(dim_size/2)
if inverse:
if not dim_size%2 == 0:
shift += 1
y = torch.roll(y,shift,axe)
return y | 8b5f84f0ed2931a3c1afac7c5632e2b1955b1cd5 | 3,636,132 |
def eval_ner(gold, sen, labels):
"""
evaluate NER
"""
if len(gold) != len(sen):
print(len(gold), len(sen))
raise "lenghts not equal"
tp = 0
#tn = 0
fp = 0
fn = 0
list_en = ["LOC", "MISC", "ORG", "PER"]
for en in list_en:
g = list_entities(gold, labels["B-" + en], labels["I-" + en])
s = list_entities(sen, labels["B-" + en], labels["I-" + en])
for loc in g:
if loc in s:
tp += 1
else:
fn += 1
for loc in s:
if loc not in g:
fp += 1
return (tp, fp, fn) | 78daab9ca81466f4f66e5bb542abc9d7a747a44a | 3,636,133 |
def generate_data(m: int = 1000, n: int = 30, sigma: int = 40):
"""Generates data for regression.
To experiment with your own data, just replace the contents of this
function with code that loads your dataset.
Args
----
m : int
The number of examples.
n : int
The number of features per example.
sigma : positive float
The standard deviation of the additive noise.
Returns
-------
X : np.array
An array of featurized examples, shape (m, n), m the number of
examples and n the number of features per example.
Y : np.array
An array of shape (m,) containing the observed labels for the
examples.
beta_star : np.array
The true parameter. This is the quantity we are trying to
estimate.
"""
beta_star = np.random.randn(n)
# Generate an ill-conditioned data matrix
X = np.random.randn(m, n)
U, _, V = np.linalg.svd(X)
s = np.linspace(30, 1, min(m, n))
S = np.zeros((m, n))
S[:min(m, n), :min(m, n)] = np.diag(s)
X = np.dot(U, np.dot(S, V))
# Corrupt the observations with additive Gaussian noise
Y = X.dot(beta_star) + np.random.normal(0, sigma, size=m)
return X, Y, beta_star | 10763c3da63874b35a3173a5473f468edc6ff159 | 3,636,134 |
import logging
def read_datafile(path: str):
""" read a flight data file and unpack it """
with open(path, 'rb') as fp:
data = fp.read()
if len(data) != altacc_format.size:
logging.warning(f"invalid data file length, {len(data)} bytes!")
fields = altacc_format.unpack(data)
flight = AltAccDump._make(fields)
checksum = sum(data[:-4]) % 0x10000
if flight.CkSum != checksum:
raise ValueError(f"checksum mismatch datafile={flight.CkSum} computed:{checksum}")
return flight | 5683db563d2ece768c6efc66aab60d2cbe84b4aa | 3,636,135 |
def get_compiler_option():
""" Determines the compiler that will be used to build extension modules.
Returns
-------
compiler : str
The compiler option specificied for the build, build_ext, or build_clib
command; or the default compiler for the platform if none was
specified.
"""
compiler = get_distutils_build_option('compiler')
if compiler is None:
return ccompiler.get_default_compiler()
return compiler | 944f4352255a83552164bf42dbcd9a976d43d1a8 | 3,636,136 |
def create_output_from_files(data_file_path:str, sheet_name:str, yaml_file_path:str, wikifier_filepath:str, output_filepath:str =None, output_format:str ="json"):
"""A convenience function for creating output from files and saving to an output file.
Equivalent to calling KnowledgeGraph.generate_from_files followed by one of the KnowledgeGraph save functions
But also returns to data generated for saving, so user can examine/process it (same as KnowledgeGraph.get_output)
Args:
data_file_path (str): location of the spreadsheet file
sheet_name (str): name of the sheet being used. for csv files, name of the file
yaml_file_path (str): location of the yaml file describing the region and template
wikifier_filepath (str): location of the wikifier file used to create the item table
output_filename (str, optional): location to save output. Defaults to None.
filetype (str, optional): accepts "json", "tsv" (or "kgtk"). Defaults to "json".
Returns:
str: string of the output data in the [filetype] format
"""
kg = KnowledgeGraph.generate_from_files(
data_file_path, sheet_name, yaml_file_path, wikifier_filepath)
output = kg.get_output(output_format)
if output_filepath:
with open(output_filepath, 'w', encoding="utf-8") as f:
f.write(output)
return output | 6be2fb0ea1c57fd10c9a487756ed97615a76675d | 3,636,137 |
def make_new_images(dataset, imgs_train, imgs_val):
"""
Split the annotations in dataset into two files train and val
according to the img ids in imgs_train, imgs_val.
"""
table_imgs = {x['id']:x for x in dataset['images']}
table_anns = {x['image_id']:x for x in dataset['annotations']}
keys = ['info', 'licenses', 'images', 'annotations', 'categories']
# Train
dataset_train = dict.fromkeys(keys)
dataset_train['info'] = dataset['info']
dataset_train['licenses'] = dataset['licenses']
dataset_train['categories'] = dataset['categories']
dataset_train['images'] = [table_imgs[x] for x in imgs_train]
dataset_train['annotations'] = [table_anns[x] for x in imgs_train]
# Validation
dataset_val = dict.fromkeys(keys)
dataset_val['info'] = dataset['info']
dataset_val['licenses'] = dataset['licenses']
dataset_val['categories'] = dataset['categories']
dataset_val['images'] = [table_imgs[x] for x in imgs_val]
dataset_val['annotations'] = [table_anns[x] for x in imgs_val]
return dataset_train, dataset_val | d5851974ad63caaadd390f91bdf395a4a6f1514d | 3,636,138 |
def test_similarity_sample_multiprocess(pk_target):
"""Test similarity cutoff filter"""
def weight(T):
return T ** 4
_filter = SimilaritySamplingFilter(sample_size=10, weight=weight)
pk_target.react_targets = True
pk_target.filters.append(_filter)
pk_target.transform_all(processes=2, generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452 | 41c6f370480a83c40598adbe3d3660fd4e09878f | 3,636,139 |
def compute_maximum_ts_map(ts_map_results):
"""
Compute maximum TS map across a list of given ts maps.
Parameters
----------
ts_map_results : list
List of `~gammapy.image.SkyImageCollection` objects.
Returns
-------
images : `~gammapy.image.SkyImageCollection`
Images (ts, niter, amplitude)
"""
# Get data
ts = np.dstack([result.ts for result in ts_map_results])
niter = np.dstack([result.niter for result in ts_map_results])
amplitude = np.dstack([result.amplitude for result in ts_map_results])
scales = [result.scale for result in ts_map_results]
# Set up max arrays
ts_max = np.max(ts, axis=2)
scale_max = np.zeros(ts.shape[:-1])
niter_max = np.zeros(ts.shape[:-1])
amplitude_max = np.zeros(ts.shape[:-1])
for i, scale in enumerate(scales):
index = np.where(ts[:, :, i] == ts_max)
scale_max[index] = scale
niter_max[index] = niter[:, :, i][index]
amplitude_max[index] = amplitude[:, :, i][index]
meta = {'MORPH': (ts_map_results[0].morphology, 'Source morphology assumption')}
return SkyImageCollection(ts=ts_max, niter=niter_max, amplitude=amplitude_max,
meta=meta) | c1ab40286d1db40f9d1b3a015871ac88dc7b5198 | 3,636,140 |
import gc
def compute_dist(array1, array2, type='euclidean'):
"""Compute the euclidean or cosine distance of all pairs.
Args:
array1: numpy array with shape [m1, n]
array2: numpy array with shape [m2, n]
type: one of ['cosine', 'euclidean']
Returns:
numpy array with shape [m1, m2]
"""
assert type in ['cosine', 'euclidean']
if type == 'cosine':
array1 = self.normalize(array1, axis=1)
array2 = self.normalize(array2, axis=1)
dist = np.matmul(array1, array2.T)
return dist
else:
# shape [m1, 1]
square1 = np.sum(np.square(array1), axis=1)[..., np.newaxis]
# shape [1, m2]
square2 = np.sum(np.square(array2), axis=1)[np.newaxis, ...]
squared_dist = - 2 * np.matmul(array1, array2.T) + square1 + square2
squared_dist[squared_dist < 0] = 0
dist = np.sqrt(squared_dist)
del square1, square2, squared_dist
gc.collect()
return dist | a1bc531a5d640598f8eeb097b91daf66ec64b0df | 3,636,141 |
def timer(step, callback, *args):
"""定时器"""
s = internet.TimerService(step, callback, *args)
s.startService()
return s | b32ad6064e59937f46424ea4672d4ad86e5fcfcb | 3,636,142 |
import tkinter as tk
from tkinter import filedialog
def select_file(title: str) -> str:
"""Opens a file select window and return the path to selected file"""
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(title=title)
return file_path | 59fdb7945389c2ba75d27e1fe20b596c4497bac1 | 3,636,143 |
import ctypes
def drdpgr(body, lon, lat, alt, re, f):
"""
This routine computes the Jacobian matrix of the transformation
from planetographic to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdpgr_c.html
:param body: Body with which coordinate system is associated.
:type body: str
:param lon: Planetographic longitude of a point (radians).
:type lon: float
:param lat: Planetographic latitude of a point (radians).
:type lat: float
:param alt: Altitude of a point above reference spheroid.
:type alt: float
:param re: Equatorial radius of the reference spheroid.
:type re: float
:param f: Flattening coefficient.
:type f: float
:return: Matrix of partial derivatives.
:rtype: 3x3-Element Array of floats
"""
body = stypes.stringToCharP(body)
lon = ctypes.c_double(lon)
lat = ctypes.c_double(lat)
alt = ctypes.c_double(alt)
re = ctypes.c_double(re)
f = ctypes.c_double(f)
jacobi = stypes.emptyDoubleMatrix()
libspice.drdpgr_c(body, lon, lat, alt, re, f, jacobi)
return stypes.cMatrixToNumpy(jacobi) | 2224edb6413b9d7b3e56b748c1390299e7cea338 | 3,636,144 |
def data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_inter_rule_groupinter_rule_group_uuid_cost_characteristiccost_name_get(uuid, node_uuid, node_rule_group_uuid, inter_rule_group_uuid, cost_name): # noqa: E501
"""data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_inter_rule_groupinter_rule_group_uuid_cost_characteristiccost_name_get
returns tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param node_uuid: Id of node
:type node_uuid: str
:param node_rule_group_uuid: Id of node-rule-group
:type node_rule_group_uuid: str
:param inter_rule_group_uuid: Id of inter-rule-group
:type inter_rule_group_uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:rtype: TapiTopologyCostCharacteristic
"""
return 'do some magic!' | 34ac66f8d0317770df96a35b698cbb24551996ab | 3,636,145 |
from typing import List
async def joinUserChannel(cls:"PhaazebotTwitch", Message:twitch_irc.Message, Context:TwitchCommandContext) -> None:
"""
allowed user and admin to like phaaze to a channel
"""
alternative_target:str = ""
UserPerm:TwitchPermission = TwitchPermission(Message, None)
if UserPerm.rank >= TwitchConst.REQUIRE_ADMIN:
# admin or higher have the permission to remove phaaze from any channel without the owner consent
if len(Context.parts) >= 2:
alternative_target = Context.part(1)
if alternative_target:
alternative_sql:str = """
SELECT COUNT(*) AS `I`
FROM `twitch_user_name`
LEFT JOIN `twitch_channel`
ON `twitch_channel`.`channel_id` = `twitch_user_name`.`user_id`
WHERE `twitch_channel`.`managed` = 1
AND `twitch_user_name`.`user_name` = %s"""
res:List[dict] = cls.BASE.PhaazeDB.selectQuery(alternative_sql, (alternative_target,))
else:
check_sql:str = """
SELECT COUNT(*) AS `I`
FROM `twitch_channel`
WHERE `twitch_channel`.`managed` = 1
AND `twitch_channel`.`channel_id` = %s"""
res:List[dict] = cls.BASE.PhaazeDB.selectQuery(check_sql, (Message.user_id,))
if res[0]['I']:
return_content:str = f"@{Message.display_name} > Phaaze already is in your channel"
if alternative_target: return_content = f"@{Message.display_name} > Phaaze already is in {alternative_target}'s channel"
return await Message.Channel.sendMessage(cls, return_content)
# after this point, we have a user or a admin input how want to add phaaze
if alternative_target:
user_search:List[TwitchUser] = await getTwitchUsers(cls.BASE, alternative_target, item_type="login", limit=1)
if not user_search:
return_content:str = f"@{Message.display_name} > Phaaze could not find a user named {alternative_target} in the Twitch-API"
return await Message.Channel.sendMessage(cls, return_content)
else:
NewEntry:TwitchUser = user_search.pop(0)
# insert ot update managed status
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_channel",
content={
"channel_id": NewEntry.user_id,
"managed": 1
},
)
# insert ot update to name table
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_user_name",
content={
"user_id": NewEntry.user_id,
"user_name": NewEntry.login,
"user_display_name": NewEntry.display_name
},
)
else:
# insert ot update managed status
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_channel",
content={
"channel_id": Message.user_id,
"managed": 1
},
)
# insert ot update to name table
cls.BASE.PhaazeDB.insertQuery(
update_on_duplicate=True,
table="twitch_user_name",
content={
"user_id": Message.user_id,
"user_name": Message.user_name,
"user_display_name": Message.display_name
},
)
if alternative_target:
await cls.joinChannel(alternative_target)
return_content:str = f"@{Message.display_name} > Phaaze successful joined {alternative_target}'s channel"
else:
await cls.joinChannel(Message.user_name)
return_content:str = f"@{Message.display_name} > Phaaze successful joined your channel"
return await Message.Channel.sendMessage(cls, return_content) | 072d14e3c2254b1d40f97261e4c22d9559896e52 | 3,636,146 |
def budget_balanced_ascending_auction(
market:Market, ps_recipes: list)->TradeWithMultipleRecipes:
"""
Calculate the trade and prices using generalized-ascending-auction.
Allows multiple recipes, but only of the following kind:
[ [1,0,0,x], [0,1,0,y], [0,0,1,z] ]
(i.e., there are n-1 buyer categories and 1 seller category.
One agent of category 1 buys x units; of category 2 buys y units; of category 3 buys z units; etc.)
:param market: contains a list of k categories, each containing several agents.
:param ps_recipes: a list of lists of integers, one integer per category.
Each integer i represents the number of agents of category i
that should be in each procurement-set.
:return: Trade object, representing the trade and prices.
>>> # ONE BUYER, ONE SELLER
>>> market = Market([AgentCategory("buyer", [9.]), AgentCategory("seller", [-4.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0], seller: [-4.0]]
No trade
>>> market = Market([AgentCategory("buyer", [9.,8.]), AgentCategory("seller", [-4.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0, 8.0], seller: [-4.0]]
No trade
>>> market = Market([AgentCategory("buyer", [9.]), AgentCategory("seller", [-4.,-3.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0], seller: [-3.0, -4.0]]
seller: [-3.0]: all 1 agents trade and pay -4.0
buyer: [9.0]: all 1 agents trade and pay 4.0
>>> market = Market([AgentCategory("buyer", [9.,8.]), AgentCategory("seller", [-4.,-3.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,1]]))
Traders: [buyer: [9.0, 8.0], seller: [-3.0, -4.0]]
seller: [-3.0, -4.0]: random 1 out of 2 agents trade and pay -8.0
buyer: [9.0]: all 1 agents trade and pay 8.0
>>> # ONE BUYER, TWO SELLERS
>>> market = Market([AgentCategory("buyer", [9.]), AgentCategory("seller", [-4.,-3.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,2]]))
Traders: [buyer: [9.0], seller: [-3.0, -4.0]]
No trade
>>> market = Market([AgentCategory("buyer", [9., 8., 7., 6.]), AgentCategory("seller", [-6., -5., -4.,-3.,-2.,-1.])])
>>> print(market); print(budget_balanced_ascending_auction(market, [[1,2]]))
Traders: [buyer: [9.0, 8.0, 7.0, 6.0], seller: [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0]]
seller: [-1.0, -2.0, -3.0, -4.0]: random 2 out of 4 agents trade and pay -4.0
buyer: [9.0]: all 1 agents trade and pay 8.0
"""
logger.info("\n#### Budget-Balanced Ascending Auction with Multiple Recipes - n-1 buyer categories\n")
logger.info(market)
logger.info("Procurement-set recipes: %s", ps_recipes)
map_buyer_category_to_seller_count = _convert_recipes_to_seller_counts(ps_recipes, market.num_categories)
logger.info("Map buyer category index to seller count: %s", map_buyer_category_to_seller_count)
# NOTE: Calculating the optimal trade cannot be done greedily -
# it requires solving a restricted instance of Knapsack.
# optimal_trade = market.optimal_trade(ps_recipe, max_iterations=max_iterations)[0]
# logger.info("For comparison, the optimal trade is: %s\n", optimal_trade)
remaining_market = market.clone()
buyer_categories = remaining_market.categories[:-1]
num_buyer_categories = market.num_categories-1
seller_category = remaining_market.categories[-1]
prices = AscendingPriceVector([1, 1], -MAX_VALUE)
buyer_price_index = 0
seller_price_index = 1
# prices[0] represents the price for all buyer-categories per single unit.
# prices[1] represents the price for all sellers.
try:
num_units_offered = len(seller_category)
num_units_demanded = sum([len(buyer_categories[i])*map_buyer_category_to_seller_count[i] for i in range(num_buyer_categories)])
target_unit_count = min(num_units_demanded, num_units_offered)
logger.info("%d units demanded by buyers, %d units offered by sellers, minimum is %d",
num_units_demanded, num_units_offered, target_unit_count)
while True:
logger.info("Prices: %s, Target unit count: %d", prices, target_unit_count)
price_index = buyer_price_index
while True:
num_units_demanded = sum([len(buyer_categories[i]) * map_buyer_category_to_seller_count[i] for i in range(num_buyer_categories)])
logger.info(" Buyers demand %d units", num_units_demanded)
if num_units_demanded == 0: raise EmptyCategoryException()
if num_units_demanded <= target_unit_count: break
map_buyer_category_to_lowest_value = [category.lowest_agent_value() for category in buyer_categories]
logger.debug(" map_buyer_category_to_lowest_value=%s", map_buyer_category_to_lowest_value)
map_buyer_category_to_lowest_value_per_unit = [value / count for value,count in zip(map_buyer_category_to_lowest_value,map_buyer_category_to_seller_count)]
logger.debug(" map_buyer_category_to_lowest_value_per_unit=%s", map_buyer_category_to_lowest_value_per_unit)
category_index_with_lowest_value_per_unit = min(range(num_buyer_categories), key=lambda i:map_buyer_category_to_lowest_value_per_unit[i])
category_with_lowest_value_per_unit = buyer_categories[category_index_with_lowest_value_per_unit]
lowest_value_per_unit = map_buyer_category_to_lowest_value_per_unit[category_index_with_lowest_value_per_unit]
logger.info(" lowest value per unit is %f, of category %d (%s)", lowest_value_per_unit, category_index_with_lowest_value_per_unit, category_with_lowest_value_per_unit.name)
prices.increase_price_up_to_balance(price_index, category_with_lowest_value_per_unit.lowest_agent_value()/map_buyer_category_to_seller_count[category_index_with_lowest_value_per_unit], category_with_lowest_value_per_unit.name)
category_with_lowest_value_per_unit.remove_lowest_agent()
category = seller_category
# logger.info("\n### Step 1a: balancing the sellers (%s)", category.name)
price_index = seller_price_index
while True:
num_units_offered = len(category)
logger.info(" Sellers offer %d units", num_units_offered)
if num_units_offered == 0: raise EmptyCategoryException()
if num_units_offered <= target_unit_count: break
prices.increase_price_up_to_balance(price_index, category.lowest_agent_value(), category.name)
category.remove_lowest_agent()
target_unit_count -= 1
except EmptyCategoryException:
logger.info("\nOne of the categories became empty. No trade!")
logger.info(" Final price-per-unit vector: %s", prices)
# Construct the final price-vector:
buyer_price_per_unit = prices[buyer_price_index]
seller_price_per_unit = prices[seller_price_index]
final_prices = \
[buyer_price_per_unit * unit_count for unit_count in map_buyer_category_to_seller_count] + \
[seller_price_per_unit]
logger.info(" %s", remaining_market)
return TradeWithMultipleRecipes(remaining_market.categories, map_buyer_category_to_seller_count, final_prices) | 9891beb8077d87236d1ba5d99df70fddb76b62c7 | 3,636,147 |
import traceback
def copy_inputs(paths,
file_list):
""".. Create copies to inputs from list of files containing copying instructions.
Create copies using instructions contained in files of list ``file_list``. Instructions are `string formatted <https://docs.python.org/3.4/library/string.html#format-string-syntax>`__ using paths dictionary ``paths``. Copies are written in directory ``input_dir``. Status messages are appended to file ``make log``.
Instruction files on how to create copies (destinations) from targets (sources) should be formatted in the following way.
.. code-block:: md
# Each line of instruction should contain a destination and source delimited by a `|`
# Lines beginning with # are ignored
destination | source
.. Note::
Instruction files can be specified with the * shell pattern (see `here <https://www.gnu.org/software/findutils/manual/html_node/find_html/Shell-Pattern-Matching.html>`__). Destinations and their sources can also be specified with the * shell pattern. The number of wildcards must be the same for both destinations and sources.
Parameters
----------
paths : dict
Dictionary of paths. Dictionary should contain values for all keys listed below. Dictionary additionally used to string format copying instructions.
file_list : str, list
File or list of files containing copying instructions.
Path Keys
---------
input_dir : str
Directory to write copies.
makelog : str
Path of makelog.
Returns
-------
source_map : list
List of (source, destination) for each copy created.
Example
-------
Suppose you call the following function.
.. code-block:: python
copy_inputs(paths, ['file1'], formatting_dict)
Suppose ``paths`` contained the following values.
.. code-block:: md
paths = {'root': '/User/root/',
'makelog': 'make.log',
'input_dir': 'input'}
Now suppose instruction file ``file1`` contained the following text.
.. code-block:: md
destination1 | {root}/source1
The ``{root}`` in the instruction file would be string formatted using ``paths``. Therefore, the function would parse the instruction as:
.. code-block:: md
destination1 | /User/root/source1
Example
-------
The following code would use instruction files ``file1`` and ``file2`` to create copies.
.. code-block:: python
copy_inputs(paths, ['file1', 'file2'])
Suppose instruction file ``file1`` contained the following text.
.. code-block:: md
destination1 | source1
destination2 | source2
Copies ``destination1`` and ``destination1`` would be created in directory ``paths['input_dir']``. Their targets would be ``source1`` and ``source2``, respectively.
Example
-------
Suppose you have the following targets.
.. code-block:: md
source1
source2
source3
Specifying ``destination* | source*`` in one of your instruction files would create the following copies in ``paths['input_dir']``.
.. code-block:: md
destination1
destination2
destination3
"""
try:
paths['move_dir'] = get_path(paths, 'input_dir')
source_map = _create_copies(paths, file_list)
message = 'Input copies successfully created!'
write_to_makelog(paths, message)
print(colored(message, metadata.color_success))
return(source_map)
except:
error_message = 'An error was encountered with `copy_inputs`. Traceback can be found below.'
error_message = format_message(error_message)
write_to_makelog(paths, error_message + '\n\n' + traceback.format_exc())
raise_from(ColoredError(error_message, traceback.format_exc()), None) | c638e5c09490667f6aa376dbcc2668d152aafabe | 3,636,148 |
from typing import Tuple
from typing import List
def agaricus_lepiota() -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[ALFeature]]:
"""
Source:
https://archive.ics.uci.edu/ml/datasets/Mushroom
The function requires the file 'agaricus-lepiota.data' to be in the current
folder.
Returns the loaded dataset as a tuple of NumPy arrays, where the first contains
(for each sample) the concatenation of all the features (which are one-hot encoded),
the second contains (for each sample) the raw features as integer numbers and the
the third contains (for each sample) the labels (0 <=> edible, 1 <=> poisonous).
Also returns a list which contains a small description of each feature as a
`ALFeature` object.
"""
FEATURES_NAMES = [
'cap-shape',
'cap-surface',
'cap-color',
'bruises?',
'odor',
'gill-attachment',
'gill-spacing',
'gill-size',
'gill-color',
'stalk-shape',
'stalk-root',
'stalk-surface-above-ring',
'stalk-surface-below-ring',
'stalk-color-above-ring',
'stalk-color-below-ring',
'veil-type',
'veil-color',
'ring-number',
'ring-type',
'spore-print-color',
'population',
'habitat'
]
MISSING_FEATURE_LETTER = '?'
FEATURES_LETTERS = [
['b', 'c', 'x', 'f', 'k', 's'],
['f', 'g', 'y', 's'],
['n', 'b', 'c', 'g', 'r', 'p', 'u', 'e', 'w', 'y'],
['t', 'f'],
['a', 'l', 'c', 'y', 'f', 'm', 'n', 'p', 's'],
['a', 'd', 'f', 'n'],
['c', 'w', 'd'],
['b', 'n'],
['k', 'n', 'b', 'h', 'g', 'r', 'o', 'p', 'u', 'e', 'w', 'y'],
['e', 't'],
['b', 'c', 'u', 'e', 'z', 'r'],
['f', 'y', 'k', 's'],
['f', 'y', 'k', 's'],
['n', 'b', 'c', 'g', 'o', 'p', 'e', 'w', 'y'],
['n', 'b', 'c', 'g', 'o', 'p', 'e', 'w', 'y'],
['p', 'u'],
['n', 'o', 'w', 'y'],
['n', 'o', 't'],
['c', 'e', 'f', 'l', 'n', 'p', 's', 'z'],
['k', 'n', 'b', 'h', 'r', 'o', 'u', 'w', 'y'],
['a', 'c', 'n', 's', 'v', 'y'],
['g', 'l', 'm', 'p', 'u', 'w', 'd'],
]
LABEL_LETTERS = ['e', 'p']
assert len(FEATURES_LETTERS) == 22
assert len(LABEL_LETTERS) == 2
# compute number of features after one-hot encoding
Xn = sum([len(c) for c in FEATURES_LETTERS])
X = [] # one-hot encoded feature vectors
Xr = [] # raw feature vectors
Y = [] # labels
with open("agaricus-lepiota.data") as f:
for line in f:
label_letter, *features_letters = line.removesuffix('\n').split(',')
assert type(label_letter) == str
assert type(features_letters) == list
assert label_letter in LABEL_LETTERS
y = LABEL_LETTERS.index(label_letter)
x = np.zeros(Xn, dtype=float)
xr = np.zeros(22, dtype=int)
idx = 0 # track the starting index of the current feature in the one-hot encoded vector
for i, feature_letter in enumerate(features_letters):
assert i < 22
assert feature_letter in FEATURES_LETTERS[i] or feature_letter == MISSING_FEATURE_LETTER
if feature_letter != MISSING_FEATURE_LETTER:
feature_value = FEATURES_LETTERS[i].index(feature_letter)
# one-hot encode the feature value into the feature vector X only if it is not
# missing (if it is missing, the one-hot vector associated to this feature is left
# at zero in all of its components)
x[idx + feature_value] = 1.0
# store the raw feature
xr[i] = feature_value
else:
# if the feature is missing, the one-hot encoding is correct (i.e. all components
# are left as zero), but the raw encoding must be handled separately: here we set
# the value to -1 to represet the fact that the feature is missing with another
# category
xr[i] = -1
idx += len(FEATURES_LETTERS[i])
X.append(x)
Xr.append(xr)
Y.append(y)
assert len(X) == len(Y)
assert len(Xr) == len(Y)
fds = [ALFeature(name, len(fl)) for name, fl in zip(FEATURES_NAMES, FEATURES_LETTERS)]
return np.array(X, dtype=float), np.array(Xr, dtype=int), np.array(Y), fds | 35afd833171846eb2f6bf1424c12c63e3d9576b2 | 3,636,149 |
import random
import requests
import logging
def receiveVoteFromLowLevel():
"""
input -> params which is received from lower level in the hierarchy
return -> 200, 400
params = {
string: int
"candidate_id_1": num_votes,
"candidate_id_2": num_votes,
"candidate_id_3": num_votes,
"batch_id": unique_int,
"cluster_id": cluster_id_int,
"level_number": level_number_int,
...
}
"""
params = request.get_json()
# logging.debug("Data {} received from lower level with IP = {}".format(params, request.remote_addr))
# select a random orderer from the orderer_ip_list to forward the votedata received from lower level using params
rand_ord_ip = random.choice(orderer_ip_list)
res = requests.post("http://" + rand_ord_ip + ":" + str(orderer_port) + "/api/orderer/receiveFromBCNode", json=params)
if res.status_code != 200:
logging.error("Vote data forwarding to random orderer failed!")
return make_response("vote error occurred", 400)
else:
# logging.info("Vote data forwarded to random orderer with IP = {}".format(rand_ord_ip))
return make_response("vote successfully forwarded to orderer", 200) | 9e3f03040d51d8535294fb455833ae9aeeac4679 | 3,636,150 |
def patch286() -> PatchDiscriminator:
"""
Patch Discriminator from pix2pix
"""
return PatchDiscriminator([64, 128, 256, 512, 512, 512]) | 5a563d5fa1976ab24831c16846bc5ea91d78f581 | 3,636,151 |
def prefix(m):
"""Given a NFA `m`, construct a new NFA that accepts all prefixes of
strings accepted by `m`.
"""
if not m.is_finite():
raise ValueError('m must be a finite automaton')
f = set(m.get_accept_states())
size = None
while len(f) != size:
size = len(f)
for t in m.get_transitions():
[[q], [a]], [[r]] = t.lhs, t.rhs
if r in f:
f.add(q)
mp = machines.FiniteAutomaton()
mp.set_start_state(m.get_start_state())
for t in m.get_transitions():
mp.add_transition(t)
mp.add_accept_states(f)
return mp | dc0cba3a5060ea4dbf5a3d51fa42db2ca20383fc | 3,636,152 |
def _predict_k_neighbors(estimator, X):
"""Predict using a k-nearest neighbors estimator."""
X = estimator._validate_data(X, reset=False)
neigh_dist, neigh_ind = estimator.kneighbors(X)
neigh_Y = estimator._y[neigh_ind]
neigh_weights = _get_weights(neigh_Y, neigh_dist, estimator.weights)
return _aggregate_neighbors(estimator, neigh_Y, neigh_weights) | b705c0c84cb3ebf5f3faf0db4e053dfb295527ea | 3,636,153 |
def _loop_over(var):
""" Checks if a variable is in the form of an iterable (list/tuple)
and if not, returns it as a list. Useful for allowing argument
inputs to be either lists (e.g. [1, 3, 4]) or single-valued (e.g. 3).
Parameters
----------
var : int or float or list
Variable to check for iterability.
Returns
-------
var : list
Variable converted to list if single-valued input.
"""
if hasattr(var,"__iter__"):
return var
else:
return [var] | 254143646416af441d3858140b951b7854a0241c | 3,636,154 |
def _get_servings_rest():
"""
Makes a REST request to Hopsworks to get a list of all servings in the current project
Returns:
JSON response parsed as a python dict
Raises:
:RestAPIError: if there was an error with the REST call to Hopsworks
"""
method = constants.HTTP_CONFIG.HTTP_GET
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_SERVING_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER)
response = util.send_request(method, resource_url)
response_object = response.json()
if response.status_code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise exceptions.RestAPIError("Could not fetch list of servings from Hopsworks REST API (url: {}), "
"server response: \n "
"HTTP code: {}, HTTP reason: {}, error code: {}, "
"error msg: {}, user msg: {}".format(resource_url, response.status_code, response.reason,
error_code, error_msg, user_msg))
return response_object | 215c425ed0b2dd95e897de48ed5aed629f6eaa4f | 3,636,155 |
from typing import Any
from typing import List
from typing import Dict
def transform_database_account_resources(
account_id: Any, name: Any, resource_group: Any, resources: List[Dict],
) -> List[Dict]:
"""
Transform the SQL Database/Cassandra Keyspace/MongoDB Database/Table Resource response for neo4j ingestion.
"""
for resource in resources:
resource['database_account_name'] = name
resource['database_account_id'] = account_id
resource['resource_group_name'] = resource_group
return resources | dac566a1e09e1e395ff6fb78d6f8931a2bca58cb | 3,636,156 |
def jacsim(doc1, doc2, docsAsShingleSets,sign_matrix):
""" Jaccard Similarity.
:param doc1: First doc to be compared
:param doc2: Second doc to be comapred
:param docsAsShingleSets: Document wise shingles
:param sign_matrix: The Signature matrix
:return: The jaccard similarity value
"""
document1 = sign_matrix[:,doc1]
document2 = sign_matrix[:,doc2]
intersect = sum(bool(x) for x in np.logical_and(document1, document2))
return (intersect / len(document1)) | 78871ce8c2bf6fe0f975bc399c3a7baa74ef3b32 | 3,636,157 |
def train(model, loss_function, optimizer, train_loader, val_loader=None, max_epochs=100, verbose=False, debug=False):
"""
Helper method for training a model.
Args:
model: PyTorch model.
loss_function: Loss function.
optimizer: Loss function.
train_loader: pytorch DataLoader for the training set.
val_loader: pytorch DataLoader for the validation set.
max_epochs: maximum number of iterations over the training set.
verbose: print training information
debug: print debugging information, if verbose is True
Returns:
list: List of losses per epoch.
"""
# Stores the loss at the end of each epoch
losses = []
# If we have validation data, use it
if val_loader is not None:
losses.append(validate(model=model, loss_function=loss_function, validation_loader=val_loader))
# Else use an EWMA of the batch losses
loss_eav = 0
eav_lambda = 0.2
for epoch in range(max_epochs):
for batch_num, sample_batch in enumerate(train_loader):
batch_x, batch_target = sample_batch
output = model(batch_x)
loss = loss_function(output, batch_target.unsqueeze(1))
# Calculate the eav of batch loss
loss_eav += eav_lambda * loss - eav_lambda * loss_eav
# zero the gradient buffers
optimizer.zero_grad()
# Store the gradient of loss function on the leaf nodes
loss.backward()
# Does the update
optimizer.step()
if val_loader is not None:
epoch_loss = validate(model=model, loss_function=loss_function, validation_loader=val_loader)
else:
epoch_loss = loss_eav.clone()
losses.append(epoch_loss)
# epoch_print = DEBUG and (not (epoch % int(num_epochs / 10)))
if verbose:
print('\n')
print('Epoch {}'.format(epoch))
print('Loss: {}'.format(epoch_loss))
if verbose and debug:
layer = model.fc3
print('Weight:')
print(layer.weight)
# w = layer.weight.detach().numpy()
# print(w.shape)
# print(numpy.sum(w, axis=1))
# plt.hist(w.flatten())
# plt.show()
print(layer.bias.data)
print('Gradient:')
print(layer.weight.grad)
print(layer.bias.grad)
if verbose:
print('\n')
print('Initial loss: {:.3f}'.format(losses[0]))
print('Final loss: {:.3f}'.format(losses[-1]))
return losses | 988ee7192a2b469a76b595fddc99b5e89088c0de | 3,636,158 |
import os
def recent_stream(model_name=None, filter=None):
""" return a dict, key the model name of the most recent stream, with stage and date
model_name : str | None
the full name of the model, e.g. P302_8years/uw8000. If None, use the current folder path
"""
if model_name is None: model_name='/'.join(os.getcwd().split('/')[-2:])
sinfo = StreamInfo(model_name)
sdf = pd.DataFrame(sinfo).T
# select last one for each model
recent = dict()
for model,s in zip(sdf.model, sdf.index):
m = model.split('/')[-1]
if filter is not None and not filter(m): continue
date = sdf.ix[s].date
stage = sdf.ix[s].stage
job_list= sdf.ix[s].job_list
if m not in recent: recent[m]=dict(stream=s, date=date, stage=stage, job_list=job_list)
else: recent[m].update(stream=s, date=date,stage=stage, job_list=job_list)
return recent | c9b088f7fdb5581c172a6d71dcdb96dd908f9244 | 3,636,159 |
import os
import subprocess
def batch_tile_redshifts(tileid, exptable, group, spectrographs=None,
submit=False, queue='realtime', reservation=None,
dependency=None, system_name=None, run_zmtl=False,
noafterburners=False):
"""
Generate batch script for spectra+coadd+redshifts for a tile
Args:
tileid (int): Tile ID
exptable (Table): has columns NIGHT EXPID to use; ignores other columns.
Doesn't need to be full pipeline exposures table (but could be)
group (str): cumulative, pernight, perexp, or a custom name
Options:
spectrographs (list of int): spectrographs to include
submit (bool): also submit batch script to queue
queue (str): batch queue name
reservation (str): batch reservation name
dependency (str): passed to sbatch --dependency upon submit
system_name (str): batch system name, e.g. cori-haswell, perlmutter-gpu
run_zmtl (bool): if True, also run make_zmtl_files
noafterburners (bool): if True, do not run QSO afterburners
Returns tuple (scriptpath, error):
scriptpath (str): full path to generated script
err (int): return code from submitting job (0 if submit=False)
By default this generates the script but don't submit it
"""
log = get_logger()
if spectrographs is None:
spectrographs = (0,1,2,3,4,5,6,7,8,9)
if (group == 'perexp') and len(exptable)>1:
msg = f'group=perexp requires 1 exptable row, not {len(exptable)}'
log.error(msg)
raise ValueError(msg)
nights = np.unique(np.asarray(exptable['NIGHT']))
if (group in ['pernight', 'pernight-v0']) and len(nights)>1:
msg = f'group=pernight requires all exptable rows to be same night, not {nights}'
log.error(msg)
raise ValueError(msg)
tileids = np.unique(np.asarray(exptable['TILEID']))
if len(tileids)>1:
msg = f'batch_tile_redshifts requires all exptable rows to be same tileid, not {tileids}'
log.error(msg)
raise ValueError(msg)
elif len(tileids) == 1 and tileids[0] != tileid:
msg = f'Specified tileid={tileid} didnt match tileid given in exptable, {tileids}'
log.error(msg)
raise ValueError(msg)
spectro_string = ' '.join([str(sp) for sp in spectrographs])
num_nodes = len(spectrographs)
frame_glob = list()
for night, expid in zip(exptable['NIGHT'], exptable['EXPID']):
frame_glob.append(f'exposures/{night}/{expid:08d}/cframe-[brz]$SPECTRO-{expid:08d}.fits')
#- Be explicit about naming. Night should be the most recent Night.
#- Expid only used for labeling perexp, for which there is only one row here anyway
night = np.max(exptable['NIGHT'])
expid = np.min(exptable['EXPID'])
frame_glob = ' '.join(frame_glob)
batchscript = get_tile_redshift_script_pathname(
tileid, group, night=night, expid=expid)
batchlog = batchscript.replace('.slurm', r'-%j.log')
scriptdir = os.path.split(batchscript)[0]
os.makedirs(scriptdir, exist_ok=True)
outdir = get_tile_redshift_relpath(tileid, group, night=night, expid=expid)
suffix = get_tile_redshift_script_suffix(
tileid, group, night=night, expid=expid)
jobname = f'redrock-{suffix}'
write_redshift_script(batchscript, outdir,
jobname=jobname,
num_nodes=num_nodes,
group=group,
spectro_string=spectro_string, suffix=suffix,
frame_glob=frame_glob,
queue=queue, system_name=system_name,
onetile=True, tileid=tileid, night=night,
run_zmtl=run_zmtl, noafterburners=noafterburners)
err = 0
if submit:
cmd = ['sbatch' ,]
if reservation:
cmd.extend(['--reservation', reservation])
if dependency:
cmd.extend(['--dependency', dependency])
# - sbatch requires the script to be last, after all options
cmd.append(batchscript)
err = subprocess.call(cmd)
basename = os.path.basename(batchscript)
if err == 0:
log.info(f'submitted {basename}')
else:
log.error(f'Error {err} submitting {basename}')
return batchscript, err | b6367abf02f81c534db9df670261d033e53f46e5 | 3,636,160 |
def symplot(b,
max_m = 20,
max_n = 20,
ymin = None,
sqrts = False,
log = True,
B0 = True,
helical_detail = False,
legend_args = {"loc":"best"},
**kwargs):
"""
Plot the radial variation of all the Fourier modes of :math:`|B|`
in Boozer coordinates. Color is used to group modes with
:math:`m=0` and/or :math:`n=0`.
Args:
b (Booz_xform, str): The Booz_xform instance to plot,
or a filename of a boozmn_*.nc file.
max_m (int): Maximum poloidal mode number to include in the plot.
max_n (int): Maximum toroidal mode number (divided by nfp) to include in the plot.
ymin (float): Lower limit for the y-axis. Only used if ``log==True``.
sqrts (bool): If true, the x axis will be sqrt(toroidal flux) instead of toroidal flux.
log (bool): Whether to use a logarithmic y axis.
B0 (bool): Whether to include the m=n=0 mode in the figure.
helical_detail (bool): Whether to show modes with ``n = nfp * m`` and
``n = -nfp * m`` in a separate color.
legend_args (dict): Any arguments to pass to ``plt.legend()``.
Useful for setting the legend font size and location.
kwargs: Any additional key-value pairs to pass to matplotlib's ``plot`` command.
This function can generate figures like this:
.. image:: symplot1.png
:width: 400
.. image:: symplot2.png
:width: 400
"""
b = handle_b_input(b)
background_color = 'b'
QA_color = [0, 0.7, 0]
mirror_color = [0.7, 0.5, 0]
helical_color = [1, 0, 1]
helical_plus_color = 'gray'
helical_minus_color = 'c'
# If ymin is not specified, pick a default value such that the
# plot mostly shows the largest modes, not all the modes down to
# machine precision.
if ymin is None:
ymin = np.max(b.bmnc_b) * 1e-4
mnmax = len(b.xm_b)
if sqrts:
rad = np.sqrt(b.s_b)
else:
rad = b.s_b
def my_abs(x):
if log:
return np.abs(x)
else:
return x
# Draw a reference line at 0.
if not log:
plt.plot([0, 1], [0, 0], ':k')
# First, plot just the 1st mode of each type, so the legend looks nice.
if B0:
for imode in range(mnmax):
if b.xn_b[imode] == 0 and b.xm_b[imode] == 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=background_color,
label='m = 0, n = 0 (Background)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] == 0 and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=QA_color,
label=r'm $\ne$ 0, n = 0 (Quasiaxisymmetric)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] != 0 and b.xm_b[imode] == 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=mirror_color,
label=r'm = 0, n $\ne$ 0 (Mirror)', **kwargs)
break
if helical_detail:
for imode in range(mnmax):
if b.xn_b[imode] == b.xm_b[imode] * b.nfp and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_plus_color,
label=r'n = n$_{fp}$ m (Helical)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] == -b.xm_b[imode] * b.nfp and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_minus_color,
label=r'n = -n$_{fp}$ m (Helical)', **kwargs)
break
for imode in range(mnmax):
if b.xn_b[imode] != 0 and b.xm_b[imode] != 0:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_color,
label=r'Other helical', **kwargs)
break
else:
for imode in range(mnmax):
if b.xn_b[imode] != 0 and b.xm_b[imode] != 0 \
and b.xn_b[imode] != b.xm_b[imode] * b.nfp and b.xn_b[imode] != -b.xm_b[imode] * b.nfp:
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=helical_color,
label=r'm $\ne$ 0, n $\ne$ 0 (Helical)', **kwargs)
break
plt.legend(**legend_args)
# Now that the legend is made, plot all modes
for imode in range(mnmax):
if np.abs(b.xm_b[imode]) > max_m:
continue
if np.abs(b.xn_b[imode]) > max_n * b.nfp:
continue
if b.xn_b[imode] == 0:
if b.xm_b[imode] == 0:
mycolor = background_color
if not B0:
continue
else:
mycolor = QA_color
else:
if b.xm_b[imode] == 0:
mycolor = mirror_color
else:
# The mode is helical
if helical_detail:
if b.xn_b[imode] == b.xm_b[imode] * b.nfp:
mycolor = helical_plus_color
elif b.xn_b[imode] == -b.xm_b[imode] * b.nfp:
mycolor = helical_minus_color
else:
mycolor = helical_color
else:
mycolor = helical_color
plt.plot(rad, my_abs(b.bmnc_b[imode, :]), color=mycolor, **kwargs)
if sqrts:
plt.xlabel('$r/a$ = sqrt(Normalized toroidal flux)')
else:
plt.xlabel('$s$ = Normalized toroidal flux')
plt.title('Fourier harmonics of |B| in Boozer coordinates [Tesla]')
plt.xlim([0, 1])
if log:
plt.yscale("log")
plt.gca().set_ylim(bottom=ymin) | 7d6b65f9cb3a95ea3b660b4181c76affc0b35a3b | 3,636,161 |
import gc
def get_holistic_keypoints(
frames, holistic=mp_holistic.Holistic(static_image_mode=False, model_complexity=2)
):
"""
For videos, it's optimal to create with `static_image_mode=False` for each video.
https://google.github.io/mediapipe/solutions/holistic.html#static_image_mode
"""
keypoints = []
confs = []
for frame in frames:
results = holistic.process(frame)
body_data, body_conf = process_body_landmarks(
results.pose_landmarks, N_BODY_LANDMARKS
)
face_data, face_conf = process_other_landmarks(
results.face_landmarks, N_FACE_LANDMARKS
)
lh_data, lh_conf = process_other_landmarks(
results.left_hand_landmarks, N_HAND_LANDMARKS
)
rh_data, rh_conf = process_other_landmarks(
results.right_hand_landmarks, N_HAND_LANDMARKS
)
data = np.concatenate([body_data, face_data, lh_data, rh_data])
conf = np.concatenate([body_conf, face_conf, lh_conf, rh_conf])
keypoints.append(data)
confs.append(conf)
# TODO: Reuse the same object when this issue is fixed: https://github.com/google/mediapipe/issues/2152
holistic.close()
del holistic
gc.collect()
keypoints = np.stack(keypoints)
confs = np.stack(confs)
return keypoints, confs | a5a534a7827a800642a96960191977766cbe8452 | 3,636,162 |
import struct
def get_arp_info(pkt):
"""
Break the ARP packet into its components.
"""
if len(pkt) < 8:
raise ARPError("ARP header too short")
ar_hrd, ar_pro, ar_hln, ar_pln, ar_op = struct.unpack("!HHBBH", pkt[0:8])
pkt_len = 8+(2*ar_hln)+(2*ar_pln)
if len(pkt) < pkt_len:
raise ARPError("ARP packet too short")
ofs = 8
ar_sha = pkt[ofs:ofs+ar_hln]
ofs += ar_hln
ar_spa = pkt[ofs:ofs+ar_pln]
ofs += ar_pln
ar_tha = pkt[ofs:ofs+ar_hln]
ofs += ar_hln
ar_tpa = pkt[ofs:ofs+ar_pln]
ofs += ar_pln
return (ar_hrd, ar_pro, ar_hln, ar_pln,
ar_op, ar_sha, ar_spa, ar_tha, ar_tpa) | d45a64fc2a78c64a2cf730f1c605461f7f31a806 | 3,636,163 |
def update_ref_point(ref_point, fy):
"""
Update the reference point by an offspring
parameter
----------
ref_point: 1D-Array
the position of original reference point
fy: 1D-Array
the fitness values of the offspring
return
----------
1D-Array
the position of the updated reference point
"""
tmp = np.vstack([ref_point, fy])
return np.min(tmp, axis=0) | d821c765992a3d4474dec09b837be99c00d22be3 | 3,636,164 |
from typing import Any
def parse(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Parse sentence *datum* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
datum (str): the sentence to parse
**kwargs: additional keyword arguments to pass to the ACEParser
Returns:
:class:`~delphin.interface.Response`
Example:
>>> response = ace.parse('erg.dat', 'Dogs bark.')
NOTE: parsed 1 / 1 sentences, avg 797k, time 0.00707s
"""
return next(parse_from_iterable(grm, [datum], **kwargs)) | 37d004cb82c7b5b0a26b69402ad269636927862c | 3,636,165 |
def EVLAPolCal(uv, InsCals, err, InsCalPoln=None, \
doCalib=2, gainUse=0, doBand=1, BPVer=0, flagVer=-1, \
solType=" ", fixPoln=False, avgIF=False, \
solInt=0.0, refAnt=0, ChInc=1, ChWid=1, \
doFitRL=False, doFitOri=True,
check=False, debug = False, \
nThreads=1, noScrat=[], logfile = ""):
"""
Instrumental Polarization
Do Instrumental
Instrumental cal uses PCal
Returns task error code, 0=OK, else failed
* uv = UV data object to calibrate
* InsCals = Instrumental poln calibrators, name or list of names
If None no instrumental cal
* err = Obit error/message stack
* InsCalPoln if non None then the list of source parameters as
tuples in the order of calibrators in InsCals,
(PPol, RLPhase, RM)
PPol = fractional poln, <0 => fit
RLPhase = R-L phase difference in deg
RM = Rotation measure
* doCalib = Apply prior calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = >0 => apply bandpass calibration
* BPVer = AIPS BP table to apply
* flagVer = Input Flagging table version
* solType = solution type, " ", "LM "
* fixPoln = if True, don't solve for source polarization in ins. cal
assumed 0, ignored if InsCalPoln given
* avgIF = NYI if True, average IFs in ins. cal.
* solInt = instrumental solution interval (min)
* refAnt = Reference antenna
* ChInc = channel increment for solutions
* ChWid = number of channels to average for solution.
* doFitRL = Fit R-L (or X-Y) gain phase
* doFitOri = Fit (linear feed) orientations?
* nThreads = Number of threads to use in imaging
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
# Don't bother if not full polarization
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
mess = "Skip Instrumental polarization corrections - not full stokes"
printMess(mess, logfile)
return 0
mess = "Instrumental polarization calibration "
printMess(mess, logfile)
# Instrumental calibration
if InsCals!=None:
pcal = ObitTask.ObitTask("PCal")
try:
pcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
pcal.logFile = logfile
if not check:
setname(uv,pcal)
if type(InsCals)==list:
pcal.Sources = InsCals
pcal.doFitI[0] = True
else:
pcal.Sources = [InsCals]
i = 0
for s in InsCals:
pcal.doFitI[i] = True
i += 1
# Polarization fixed?
if InsCalPoln:
if type(InsCals)==list:
n = len(InsCals)
else:
n = 1
for i in range(0,n):
if InsCalPoln[i][0]>=0.0:
pcal.doFitPol[i] = False
pcal.PPol[i] = InsCalPoln[i][0]
pcal.RLPhase[i] = InsCalPoln[i][1]
pcal.RM[i] = InsCalPoln[i][2]
else:
pcal.doFitPol[i] = True
elif fixPoln:
if type(InsCals)==list:
i = 0
for s in InsCals:
pcal.doFitPol[i] = False
i += 1
else:
pcal.doFitPol[0] = False
pcal.doCalib = doCalib
pcal.gainUse = gainUse
pcal.doBand = doBand
pcal.BPVer = BPVer
pcal.flagVer = flagVer
pcal.solnType = solType
pcal.solInt = solInt
pcal.ChInc = ChInc
pcal.ChWid = ChWid
pcal.refAnt = refAnt
pcal.doFitRL = doFitRL
pcal.doFitOri = doFitOri
pcal.prtLv = 2
pcal.PDSoln = 1
pcal.CPSoln = 1
pcal.nThreads = nThreads
for i in range(0,len(pcal.doFitI)):
pcal.doFitI[i] = True
pcal.taskLog = logfile
i = 1;
for d in noScrat:
pcal.noScrat[i] = d
i += 1
if debug:
pcal.i
pcal.debug = debug
# Trap failure
try:
if not check:
pcal.g
except Exception as exception:
print(exception)
mess = "PCal Failed retCode="+str(pcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end instrumental poln cal
return 0
# End EVLAPolCal | 6d42b151ed8fdff466b73798638f245b16570080 | 3,636,166 |
import os
import subprocess
import shlex
def check_and_store(codechecker_cfg, test_project_name, test_project_path,
clean_project=True):
"""
Check a test project and store the results into the database.
:checkers parameter should be a list of enabled or disabled checkers
Example: ['-d', 'deadcode.DeadStores']
"""
output_dir = codechecker_cfg['reportdir'] \
if 'reportdir' in codechecker_cfg \
else os.path.join(codechecker_cfg['workspace'], 'reports')
build_cmd = project.get_build_cmd(test_project_path)
if clean_project:
ret = project.clean(test_project_path)
if ret:
return ret
check_cmd = ['CodeChecker', 'check',
'-o', output_dir,
'-b', build_cmd,
'--quiet']
suppress_file = codechecker_cfg.get('suppress_file')
if suppress_file:
check_cmd.extend(['--suppress', suppress_file])
skip_file = codechecker_cfg.get('skip_file')
if skip_file:
check_cmd.extend(['--skip', skip_file])
clean = codechecker_cfg.get('clean')
if clean:
check_cmd.extend(['--clean'])
analyzer_config = codechecker_cfg.get('analyzer_config')
if analyzer_config:
check_cmd.append('--analyzer-config')
check_cmd.extend(analyzer_config)
check_cmd.extend(codechecker_cfg['checkers'])
try:
print("RUNNING CHECK")
print(check_cmd)
subprocess.call(
check_cmd,
cwd=test_project_path,
env=codechecker_cfg['check_env'],
encoding="utf-8",
errors="ignore")
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode
store_cmd = ['CodeChecker', 'store', '-n', test_project_name,
output_dir,
'--url', env.parts_to_url(codechecker_cfg)]
tag = codechecker_cfg.get('tag')
if tag:
store_cmd.extend(['--tag', tag])
force = codechecker_cfg.get('force')
if force:
store_cmd.extend(['--force'])
description = codechecker_cfg.get('description')
if description:
store_cmd.extend(['--description', "'" + description + "'"])
try:
print('STORE' + ' '.join(store_cmd))
subprocess.call(
shlex.split(
' '.join(store_cmd)),
cwd=test_project_path,
env=codechecker_cfg['check_env'],
encoding="utf-8",
errors="ignore")
return 0
except CalledProcessError as cerr:
print("Failed to call:\n" + ' '.join(cerr.cmd))
return cerr.returncode | b10e3a222415362463eba26c643b8f02095f97a7 | 3,636,167 |
import re
def ansyArticle(data):
"""分析错误日志"""
ids=[]
with open(data,'r') as fp:
i=0
for line in fp.readlines():
i+=1
if i%5==1:
m=re.search('view_(\d+)\.aspx',line)
ids.append(m.group(1))
else:
continue
return ids | 5c32ca500a4f94f5ab6b38ebc9ca8ab4521f8d8d | 3,636,168 |
def climb_stairs(n):
""" Number of paths to climb n stairs if each move comprises of climbing 1
or 2 steps.
Args:
n integer
Returns:
integer
Preconditions:
n >= 0
"""
return fib(n) | f54645e53d5ed001e023ba368f9e0c0c72d090d3 | 3,636,169 |
def randomInt(bit_length, seed):
"""Returns a random integer."""
s = randomHexString((bit_length + 3) / 4, seed)
return int(s, 16) % (1 << bit_length) | 9e226d189114ef6809b119f50f416fe3fd16beae | 3,636,170 |
import tqdm
import warnings
def best_fit_distribution(data, bins=200, ax=None):
"""Find the best fitting distribution to the data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,
st.anglit,
st.arcsine,
st.argus,
st.beta,
st.betaprime,
st.bradford,
st.burr,
st.burr12,
st.cauchy,
st.chi,
st.chi2,
st.cosine,
st.crystalball,
st.dgamma,
st.dweibull,
st.erlang,
st.expon,
st.exponnorm,
st.exponweib,
st.exponpow,
st.f,
st.fatiguelife,
st.fisk,
st.foldcauchy,
st.foldnorm,
st.genlogistic,
st.gennorm,
st.genpareto,
st.genexpon,
st.genextreme,
st.gausshyper,
st.gamma,
st.gengamma,
st.genhalflogistic,
st.geninvgauss,
st.gilbrat,
st.gompertz,
st.gumbel_r,
st.gumbel_l,
st.halfcauchy,
st.halflogistic,
st.halfnorm,
st.halfgennorm,
st.hypsecant,
st.invgamma,
st.invgauss,
st.invweibull,
st.johnsonsb,
st.johnsonsu,
st.kappa4,
st.kappa3,
st.ksone,
st.kstwo,
st.kstwobign,
st.laplace,
st.laplace_asymmetric,
st.levy,
st.levy_l,
# st.levy_stable, # unstable in v1.6.0
st.logistic,
st.loggamma,
st.loglaplace,
st.lognorm,
st.loguniform,
st.lomax,
st.maxwell,
st.mielke,
st.moyal,
st.nakagami,
st.ncx2,
st.ncf,
st.nct,
st.norm,
st.norminvgauss,
st.pareto,
st.pearson3,
st.powerlaw,
st.powerlognorm,
st.powernorm,
st.rdist,
st.rayleigh,
st.rice,
st.recipinvgauss,
st.semicircular,
st.skewnorm,
st.t,
st.trapezoid,
st.triang,
st.truncexpon,
st.truncnorm,
st.tukeylambda,
st.uniform,
# st.vonmises, # does not work in v1.6.0
st.vonmises_line,
st.wald,
st.weibull_min,
st.weibull_max,
st.wrapcauchy,
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in tqdm(DISTRIBUTIONS):
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if ax is passed, add to plot
try:
if ax:
pd.Series(pdf, x).plot(
label=distribution.name, legend=True, ax=ax
)
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return best_distribution.name, best_params | c13525e36b2ccd2e74fb4d3e3c5393ae3a623e72 | 3,636,171 |
def cumulative_completion_rate(completion_times, inc, top):
"""
Gets the cumulative completion rate data from an array of completion times.
Starting from zero, time is incremented by `inc` until `top` is reached
(inclusive) and the number of timestamps in `completion_times` under the
current time is added to `counts`. The timestamps can be obtained from
`completions()`.
Parameters
----------
completion_times : array
List of completion times (one per trial).
inc : float
Amount of time increment per check.
top : float
Largest time at which to check the completion rate.
Returns
-------
cutoffs : ndarray
Array of cutoff times at which the completion rate is checked.
counts : ndarray
The number of trials completed faster than the corresponding cutoff.
"""
ts = np.array(completion_times)
cutoffs = np.arange(0, top+inc, inc)
counts = np.zeros(cutoffs.shape)
for i, t in enumerate(cutoffs):
counts[i] = np.sum(ts < t)
return cutoffs, counts | e12659c30e4edca1e852f626c6eb54d2a5d95120 | 3,636,172 |
def fitPeak(stack, slices, width, startingfit, **kwargs):
"""
Method to fit a peak through the stack.
The method will track the peak through the stack, assuming that moves
are relatively small from one slice to the next
Parameters
----------
slices : iterator
an iterator which dictates which slices to fit, should yeild
integers only
width : integer
width of fitting window
startingfit : dict
fit coefficients
Returns
-------
list : list of dicts
A list of dictionaries containing the best fits. Easy to turn into
a DataFrame
"""
# set up our variable to return
toreturn = []
# grab the starting fit parameters
popt_d = startingfit.copy()
y0 = int(round(popt_d["y0"]))
x0 = int(round(popt_d["x0"]))
if len(popt_d) == 6 * 2:
modeltype = "norot"
elif len(popt_d) == 5 * 2:
modeltype = "sym"
elif len(popt_d) == 7 * 2:
modeltype = "full"
else:
raise ValueError("Dictionary is too big {}".format(popt_d))
for s in slices:
# make the slice
try:
myslice = slice_maker((y0, x0), width)
except RuntimeError as e:
logger.warning("Fit window moved to edge of ROI")
break
else:
# pull the starting values from it
ystart = myslice[0].start
xstart = myslice[1].start
# insert the z-slice number
myslice = (s,) + myslice
# set up the fit and perform it using last best params
sub_stack = stack[myslice]
if sub_stack.size == 0:
# the fir window has moved to the edge, break
logger.warning("Fit window moved to edge of ROI")
break
fit = Gauss2D(sub_stack)
# move our guess coefs back into the window
popt_d["x0"] -= xstart
popt_d["y0"] -= ystart
# leave this in for now for easier debugging in future.
try:
fit.optimize_params(popt_d, **kwargs)
except TypeError as e:
print(repr(myslice))
raise e
# if there was an error performing the fit, try again without
# a guess
if fit.error:
fit.optimize_params(modeltype=modeltype, **kwargs)
# if there's not an error update center of fitting window and
# move on to the next fit
if not fit.error:
popt_d = fit.all_params_dict()
popt_d["x0"] += xstart
popt_d["y0"] += ystart
popt_d["slice"] = s
# calculate the apparent noise as the standard deviation
# of what's the residuals of the fit
popt_d["noise"] = (sub_stack - fit.fit_model).std()
toreturn.append(popt_d.copy())
y0 = int(round(popt_d["y0"]))
x0 = int(round(popt_d["x0"]))
else:
# if the fit fails, make sure to _not_ update positions.
bad_fit = fit.all_params_dict()
bad_fit["slice"] = s
# noise of a failed fit is not really useful
popt_d["noise"] = np.nan
toreturn.append(bad_fit.copy())
return toreturn | e3b769e6004b263514a937ade8ff114b8c2e8453 | 3,636,173 |
def sbol_empty_space (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in empty space renderer.
"""
# Default options
zorder_add = 0.0
x_extent = 12.0
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
# Check direction add start padding
final_start = prev_end
final_end = final_start+x_extent
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end | 81574119b6ecb1ece556ad02c9e78a6096a2dad5 | 3,636,174 |
def find_max_1(array: list) -> int:
"""
O(n^2)
:param array: list of integers
:return: integer
"""
overallmax = array[0]
for i in array:
is_greatest = True
for j in array:
if j > i:
is_greatest = False
if is_greatest:
overallmax = i
return overallmax | 9bab28c3d72062af75ac5c2e19c1e9d87e6fc468 | 3,636,175 |
def radec2xy(hdr,ra,dec):
"""Transforms sky coordinates (RA and Dec) to pixel coordinates (x and y).
Input:
- hdr: FITS image header
- ra <float> : Right ascension value in degrees
- dec <float>: Declination value in degrees
Output:
- (x,y) <tuple>: pixel coordinates
"""
wcs = wcs.WCS(hdr)
skycrd = np.array([[ra,dec]])
pixcrd = wcs.wcs_sky2pix(skycrd,1)
x = pixcrd[0][0]
y = pixcrd[0][1]
return (x,y) | af829688cbbe72429042c5cb6c5e64f1efb47e57 | 3,636,176 |
def getStepsBySerialNoAndProcessID(SerialNo, ProcessID):
"""
根据流水号和表单ID获取该表单的所有的步骤
:param SerialNo:流水号
:param ProcessID:表单ID
:return:返回{"name":步骤名称,"value":步骤ID,"state":步骤状态}
"""
raw = Raw_sql()
raw.sql = "SELECT a.StepID as value, b.StepName as name, Finished as state FROM RMI_TASK_PROCESS_STEP a WITH(NOLOCK) JOIN RMI_STEP b WITH(NOLOCK) "\
" ON a.StepID = b.StepID WHERE SerialNo = '%s' "\
" AND ProcessID = '%s'" % (SerialNo, ProcessID)
res, columns = raw.query_all(needColumnName=True)
return translateQueryResIntoDict(columns, res) | 5b6c31a9ff1225db20e8423add9804941bf15f73 | 3,636,177 |
def _split_train_dataset(y, tx, jet_num_idx=22):
"""Split the given training dataset into three distinct datasets.
Datasets are split depending on the value of the 'PRI_jet_num' column,
since this column dictates the -999 values for all the columns containing
the latter (except for the 'DER_mass_MMC' column at index 0).
"""
jet0_tx = tx[tx[:, jet_num_idx] == 0]
jet0_tx = np.delete(jet0_tx, [jet_num_idx, 4, 5, 6, 12, 23, 24, 25, 26, 27, 28, 29], axis=1)
jet0_y = y[tx[:, jet_num_idx] == 0]
jet1_tx = tx[tx[:, jet_num_idx] == 1]
jet1_tx = np.delete(jet1_tx, [jet_num_idx, 4, 5, 6, 12, 26, 27, 28], axis=1)
jet1_y = y[tx[:, jet_num_idx] == 1]
jetR_tx = tx[tx[:, jet_num_idx] >= 2]
jetR_y = y[tx[:, jet_num_idx] >= 2]
return jet0_y, jet1_y, jetR_y, jet0_tx, jet1_tx, jetR_tx | b5ab9efff0655f03290c345d14a6f9bd5263a116 | 3,636,178 |
def bear(
transitions=None,
# Common settings
discount_factor=0.99,
# Adam optimizer settings
lr_q=1e-3,
lr_pi=1e-3,
lr_enc=1e-3,
lr_dec=1e-3,
# Training settings
minibatch_size=100,
polyak_rate=0.005,
# BEAR settings
num_qs=2,
kernel_type="laplacian",
):
"""
Bootstrapping error accumulation reduction (BEAR) control preset
Args:
transitions:
dictionary of transitions generated by cpprb.ReplayBuffer.get_all_transitions()
discount_factor (float): Discount factor for future rewards.
lr_q (float): Learning rate for the Q network.
lr_pi (float): Learning rate for the policy network.
lr_enc (float): Learning rate for the encoder.
lr_dec (float): Learning rate for the decoder.
minibatch_size (int): Number of experiences to sample in each training update.
polyak_rate (float): Speed with which to update the target network towards the online network.
num_qs (int): Number of q functions for ensemble.
"""
def _bear(env):
disable_on_policy_mode()
device = get_device()
q_models = nn.ModuleList([fc_q(env) for _ in range(num_qs)]).to(device)
qs_optimizer = Adam(q_models.parameters(), lr=lr_q)
qs = EnsembleQContinuous(
q_models,
qs_optimizer,
target=PolyakTarget(polyak_rate),
name='qs'
)
policy_model = fc_soft_policy(env).to(device)
policy_optimizer = Adam(policy_model.parameters(), lr=lr_pi)
policy = SoftDeterministicPolicy(
policy_model,
policy_optimizer,
env.action_space,
target=PolyakTarget(polyak_rate),
)
latent_dim = env.action_space.shape[0] * 2
encoder_model = fc_bcq_encoder(env, latent_dim=latent_dim).to(device)
encoder_optimizer = Adam(encoder_model.parameters(), lr=lr_enc)
encoder = BcqEncoder(
model=encoder_model,
latent_dim=latent_dim,
optimizer=encoder_optimizer,
name="encoder",
)
decoder_model = fc_bcq_decoder(env, latent_dim=latent_dim).to(device)
decoder_optimizer = Adam(decoder_model.parameters(), lr=lr_dec)
decoder = BcqDecoder(
model=decoder_model,
latent_dim=latent_dim,
space=env.action_space,
optimizer=decoder_optimizer,
name="decoder",
)
replay_buffer = ExperienceReplayBuffer(1e7, env)
if transitions is not None:
samples = replay_buffer.samples_from_cpprb(
transitions, device="cpu")
replay_buffer.store(samples)
set_replay_buffer(replay_buffer)
return BEAR(
qs=qs,
encoder=encoder,
decoder=decoder,
policy=policy,
kernel_type=kernel_type,
discount_factor=discount_factor,
minibatch_size=minibatch_size,
)
return _bear | 24a0ec3e0d8a2ce7074c019d93dcd424b5d967a8 | 3,636,179 |
def benchmark(setup=None, number=10, repeat=3, warmup=5):
"""A parametrized decorator to benchmark the test.
Setting up the bench can happen in the normal setUp,
which is applied to all benches identically, and additionally
the setup parameter, which is bench-specific.
Parameters
----------
setup : function
A function to call once to set up the test.
number : int
The number of loops of repeat repeats to run.
repeat : int
The number of repeats in each loop.
warmup : int
The number of warmup runs of the function.
"""
def real_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if setup is not None:
setup(self)
for i in range(warmup):
func(self, *args, **kwargs)
clock_time_starts = np.zeros((number, repeat))
clock_time_ends = np.zeros((number, repeat))
for i in range(number):
for j in range(repeat):
clock_time_starts[i, j] = perf_counter()
func(self, *args, **kwargs)
clock_time_ends[i, j] = perf_counter()
clock_times = (clock_time_ends - clock_time_starts).min(axis=1)
print(
"[{}] with {} loops, best of {}:".format(
_get_bench_name(func), number, repeat
)
)
print(
"\tmin {:4s} per loop".format(
_timeitlike_time_format(clock_times.min())
)
)
print(
"\tavg {:4s} per loop".format(
_timeitlike_time_format(clock_times.mean())
)
)
return wrapper
return real_decorator | 9ae9cbd0053e1485209881f4cac36241c33fe9d6 | 3,636,180 |
def make_dummy_protein_sequence(
n_supporting_variant_reads,
n_supporting_variant_sequences,
n_supporting_reference_transcripts,
n_total_variant_sequences=None,
n_total_variant_reads=None,
n_total_reference_transcripts=None,
gene=["TP53"],
amino_acids="MKHW", # ATG=M|AAA=K|CAC=H|TGG=W
cdna_sequence="CCCATGAAACACTGGTAG",
variant_cdna_interval_start=8, # assuming variant was AAC>AAA
variant_cdna_interval_end=9,
variant_aa_interval_start=1,
variant_aa_interval_end=2,
number_mismatches=1):
"""
Creates ProteinSequence object with None filled in for most fields
"""
if n_total_variant_reads is None:
n_total_variant_reads = n_supporting_variant_reads
if n_total_variant_sequences is None:
n_total_variant_sequences = n_supporting_variant_sequences
if n_total_reference_transcripts is None:
n_total_reference_transcripts = n_total_reference_transcripts
assert n_supporting_variant_sequences <= n_supporting_variant_reads
assert n_supporting_variant_sequences <= n_total_variant_sequences
assert n_supporting_reference_transcripts <= n_total_reference_transcripts
n_translations = n_total_reference_transcripts * n_total_variant_sequences
translation = make_dummy_translation()
return ProteinSequence(
translations=[translation] * n_translations,
overlapping_reads=[None] * n_total_variant_reads,
ref_reads=[],
alt_reads=[None] * n_total_variant_reads,
alt_reads_supporting_protein_sequence=[None] * n_supporting_variant_reads,
transcripts_supporting_protein_sequence=[None] * n_supporting_reference_transcripts,
transcripts_overlapping_variant=[None] * n_supporting_reference_transcripts,
gene=gene,
amino_acids=amino_acids,
variant_aa_interval_start=variant_aa_interval_start,
variant_aa_interval_end=variant_aa_interval_end,
ends_with_stop_codon=translation.ends_with_stop_codon,
frameshift=translation.frameshift) | 8836afe5a4724779a58e78c5ba064ef63248f20f | 3,636,181 |
import yaml
from yaml import YAMLError
import sys
def parse_file(file):
"""Parses a YAML file containing the bingo board configuration"""
with open(file=file, mode="r", encoding='UTF-8') as stream:
try:
config = yaml.safe_load(stream)
except YAMLError as err:
sys.exit("Failed to parse YAML: " + err)
return config | ca81beb4fa855bd176b10e17525e82360e246347 | 3,636,182 |
import collections
import os
def process_dataset(path):
"""Maps entities and relations to ids and saves corresponding pickle arrays.
Args:
path: Path to dataset directory.
Returns:
examples: Dictionary mapping splits to with Numpy array contatining
corresponding KG triples.
filters: Dictionary containing filters for lhs and rhs predictions.
"""
lhs_skip = collections.defaultdict(set)
rhs_skip = collections.defaultdict(set)
ent2idx, rel2idx = get_idx(dataset_path)
examples = {}
for split in ['train', 'valid', 'test']:
dataset_file = os.path.join(path, split)
examples[split] = to_np_array(dataset_file, ent2idx, rel2idx)
lhs_filters, rhs_filters = get_filters(examples[split], len(rel2idx))
lhs_skip.update(lhs_filters)
rhs_skip.update(rhs_filters)
filters = {'lhs': lhs_skip, 'rhs': rhs_skip}
return examples, filters | d91593ee7869ad4d401bc9836dc789c747a2883c | 3,636,183 |
from pathlib import Path
import random
import os
def prepare_data():
"""Data processing and data partitioning"""
prapare_ner = PrepareNer()
# entity samples_statistics
samples_statistics = defaultdict(int)
dataset = []
for file_ann in Path(Config.annotation_data_dir).rglob("*.ann"):
file_txt = str(file_ann.with_suffix('.txt'))
sents, tags = prapare_ner.get_annoteted_data(file_txt, file_ann, samples_statistics)
dataset.append((sents, tags))
all_case_num = len(dataset)
train_count = int(all_case_num * 0.8)
valid_count = int(all_case_num * 0.1)
test_count = all_case_num - train_count - valid_count
order = list(range(all_case_num))
random.shuffle(order)
train_dataset = [dataset[idx] for idx in order[:train_count]]
valid_dataset = [dataset[idx] for idx in order[train_count:train_count + valid_count]]
test_dataset = [dataset[idx] for idx in order[train_count + valid_count:]]
train_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'train'), train_dataset)
valid_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'valid'), valid_dataset)
test_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'test'), test_dataset)
print('\nall cases num: {}'.format(all_case_num))
print("train cases: {}, samples: {}".format(train_count, train_samples_count))
print("valid cases: {}, samples: {}".format(valid_count, valid_samples_count))
print("test cases: {}, samples: {}".format(test_count, test_samples_count))
return dict(samples_statistics) | 1212896b774d000d6ccf6d22b1ff95e0d8ed89e2 | 3,636,184 |
from typing import Optional
from typing import Tuple
def InlineEditor(item: Item, view, pos: Optional[Tuple[int, int]] = None) -> bool:
"""Show a small editor popup in the diagram. Makes for easy editing without
resorting to the Element editor.
In case of a mouse press event, the mouse position (relative to the
element) are also provided.
"""
return False | f810e6721acb91bbe8f64a71bdab2442dd5b696f | 3,636,185 |
def getMetrics(trueLabels, predictedLabels):
"""Takes as input true labels, predictions, and prediction confidence scores and computes all metrics"""
MSE = sklearn.metrics.mean_squared_error(trueLabels, predictedLabels, squared = True)
MAE = sklearn.metrics.mean_absolute_error(trueLabels, predictedLabels)
MAPE = sklearn.metrics.mean_absolute_percentage_error(trueLabels, predictedLabels)
RMSE = sklearn.metrics.mean_squared_error(trueLabels, predictedLabels, squared = False)
PearsonR = correlation(true = trueLabels,
pred = predictedLabels)
return MSE, MAE, MAPE, RMSE, PearsonR | 141ccca0342bea1ef5a69851b1514fc8bfeda091 | 3,636,186 |
from datetime import datetime
def create_data(feed_slug):
"""Post Data
Post a data point to a feed
---
tags:
- "Data Points"
parameters:
- name: feed_slug
in: path
type: string
required: true
- name: value
in: body
schema:
type: object
required:
- value
properties:
value:
type: stringboolnumber
description: value of data to post. must be the same data type as feed kind
responses:
200:
description: Success
schema:
type: object
properties:
message:
type: string
data:
$ref: '#/definitions/Data'
400:
$ref: '#/responses/Error'
"""
feed = Feed.query.filter_by(slug=feed_slug, owner=current_user).first()
if not feed:
return jsonify(error="Feed doesn't exist!"), 400
value = request.json.get("value", None)
if value is None:
return jsonify(error="Value is required."), 400
if (
(
feed.kind == "number"
and not (isinstance(value, int) or isinstance(value, float))
)
or (feed.kind == "boolean" and not isinstance(value, bool))
or (feed.kind == "image" and not validators.url(value))
):
return (
jsonify(
error=f"Invalid value. Type '{feed.kind}' was expected but got '{value}'."
),
400,
)
data = Data(value=str(value), created=datetime.datetime.utcnow(), feed=feed)
db.session.add(data)
db.session.commit()
return jsonify(message="Data posted!", data=data.to_dict()), 200 | 99d8d8cc9ba6762ea3767d91204604282b1a10ce | 3,636,187 |
from typing import Optional
async def get_hitokoto(*, c: Optional[str] = None) -> Result.TextResult:
"""获取一言"""
url = 'https://v1.hitokoto.cn'
params = {
'encode': 'json',
'charset': 'utf-8'
}
if c is not None:
params.update({'c': c})
headers = HttpFetcher.DEFAULT_HEADERS.update({'accept': 'application/json'})
hitokoto_result = await HttpFetcher(flag='sign_hitokoto', headers=headers).get_json(url=url, params=params)
if hitokoto_result.error:
return Result.TextResult(error=True, info=hitokoto_result.info, result='')
text = f'{hitokoto_result.result.get("hitokoto")}\n——《{hitokoto_result.result.get("from")}》'
if hitokoto_result.result.get("from_who"):
text += f' {hitokoto_result.result.get("from_who")}'
return Result.TextResult(error=False, info='Success', result=text) | 00a5c498e1a27b96c35b0ec239e2d4e776edfda7 | 3,636,188 |
def img_unnormalize(src):
"""
Unnormalize a RGB image.
:param src: Image to unnormalize. Must be RGB order.
:return: Unnormalized Image.
"""
img = src.copy()
img *= NORMALIZE_VARIANCE
img += NORMALIZE_MEAN
return img.astype(np.uint8) | 2194a7c9c5cce225d61845093d51ad46d2ffc771 | 3,636,189 |
def upilab5_9_6() :
"""5.9.6. Exercice UpyLaB 5.26 - Parcours bleu rouge
Une matrice M = \{m_{ij}\} de taille {n}\times{n} est dite antisymétrique lorsque, pour toute paire d’indices i, j, on
a m_{ij} = - m_{ji}.
Écrire une fonction booléenne antisymetrique(M) qui teste si la matrice M reçue est antisymétrique.
Exemple 1 : L’appel suivant de la fonction : antisymetrique([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]) doit retourner : True
Exemple 2 : L’appel suivant de la fonction : antisymetrique([[0, 1], [1, 0]]) doit retourner : False
Exemple 3 : L’appel suivant de la fonction : antisymetrique([[1, -2], [2, 1]]) doit retourner : False
Exemple 4 : L’appel suivant de la fonction : antisymetrique([]) doit retourner : True
"""
def antisymetrique(M):
""" teste si la matrice M est antisymétrique """
rep = True
if M == []:
pass
else:
for li, ligne in enumerate(M):
for co, val in enumerate(ligne):
if val != -M[co][li]: rep = False
return rep
test = [([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]], True),
([[0, 1], [1, 0]], False),
([[1, -2], [2, 1]], False),
([], True),
([[0, -1, -9, -1, -3], [1, 0, -9, -1, -4], [9, 9, 0, -5, -7], [1, 1, 5, 0, -9], [3, 4, 7, 9, 0]], True)
]
rep = [ "non ", ""]
for M,r in test :
print("La matrice : ")
printMatrice(M)
print(" devrait être : " + rep[int(r)] + " antisymétrique et la fonction la trouve" +
+ rep[int(antisymetrique(M))] + "antisymétrique") | 0e082a94ae5c4c88ea5340679368ff07e3b30a56 | 3,636,190 |
def info(request):
"""provide readable information for *request*."""
qs = request.get('QUERY_STRING')
aia = IAdditionalInfo(request, None)
ai = aia and str(aia)
return (request.get('PATH_INFO', '')
+ (qs and '?' + qs or '')
+ (ai and (' [%s] ' % ai) or '')
) | 03ed0981ac758b090545225d3b6b4adfdce6c691 | 3,636,191 |
def put(entity_pb, **options):
"""Store an entity in datastore.
The entity can be a new entity to be saved for the first time or an
existing entity that has been updated.
Args:
entity_pb (datastore_v1.types.Entity): The entity to be stored.
options (Dict[str, Any]): Options for this request.
Returns:
tasklets.Future: Result will be completed datastore key
(entity_pb2.Key) for the entity.
"""
_check_unsupported_options(options)
batch = _get_batch(_CommitBatch, options)
return batch.put(entity_pb) | f7b6c9e7599f43d316999120fe49d22b57dfc5ea | 3,636,192 |
def get_country_flag(country):
"""Returns the corresponding flag of a provided country."""
with nation_flag_info as flag_path_info:
# Validate the provided nation string.
if country.title().replace('_', ' ') not in flag_path_info.keys() and \
country.title().replace(' ', '_') not in flag_path_info.keys() and \
country not in flag_path_info.keys():
raise ValueError(f"Received invalid nation {country}, try another one.")
# Read the flag image path and return the flag image.
return cv2.cvtColor(cv2.imread(flag_path_info[country.replace(' ', '_')]), cv2.COLOR_BGR2RGB) | 21a2b4ed69e5963eb57e7de3f393cd6344184a1b | 3,636,193 |
def create_graph_to_decode_and_normalize_image():
"""See file docstring.
Returns:
input: The placeholder to feed the raw bytes of an encoded image.
y: A Tensor (the decoded, normalized image) to be fed to the graph.
"""
image = tf.placeholder(tf.string, shape=(), name='encoded_image_bytes')
with tf.name_scope("preprocess"):
y = tf.image.decode_image(image, channels=3)
y = tf.cast(y, tf.float32)
y = tf.expand_dims(y, axis=0)
y = tf.image.resize_bilinear(y, (IMAGE_HEIGHT, IMAGE_WIDTH))
y = (y - MEAN) / SCALE
return (image, y) | e7b72a4ff17db70248b6c08ca1a6565938935d99 | 3,636,194 |
def bilinear_upsample(x, scale=2):
"""Bilinear upsample.
Caffe bilinear upsample forked from
https://github.com/ppwwyyxx/tensorpack
Deterministic bilinearly-upsample the input images.
Args:
x (tf.Tensor): a NHWC tensor
scale (int): the upsample factor
Returns:
tf.Tensor: a NHWC tensor.
"""
def bilinear_conv_filler(s):
f = np.ceil(float(s) / 2)
c = float(2 * f - 1 - f % 2) / (2 * f)
ret = np.zeros((s, s), dtype='float32')
for x in range(s):
for y in range(s):
ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
return ret
inp_shape = x.get_shape().as_list()
ch = inp_shape[3]
assert ch is not None
filter_shape = 2 * scale
w = bilinear_conv_filler(filter_shape)
w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch))
weight_var = tf.constant(w, tf.float32,
shape=(filter_shape, filter_shape, ch, ch),
name='bilinear_upsample_filter')
pad = min(scale - 1, inp_shape[1])
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='SYMMETRIC')
if inp_shape[1] < scale:
# may cause problem?
pad = scale - 1 - inp_shape[1]
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]],
mode='CONSTANT')
out_shape = tf.shape(x) * tf.constant([1, scale, scale, 1], tf.int32)
deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape,
[1, scale, scale, 1], 'SAME')
edge = scale * (scale - 1)
deconv = deconv[:, edge:-edge, edge:-edge, :]
if inp_shape[1]:
inp_shape[1] *= scale
if inp_shape[2]:
inp_shape[2] *= scale
deconv.set_shape(inp_shape)
return deconv | b7b04ef1d6caf957ba96ad36b27e1efb96129db7 | 3,636,195 |
from typing import OrderedDict
def _generate_simplifiers_and_detailers():
"""Generate simplifiers, forced full simplifiers and detailers."""
simplifiers = OrderedDict()
forced_full_simplifiers = OrderedDict()
detailers = []
def _add_simplifier_and_detailer(curr_type, simplifier, detailer, forced=False):
if detailer in detailers:
curr_index = detailers.index(detailer)
else:
curr_index = len(detailers)
detailers.append(detailer)
if forced:
forced_full_simplifiers[curr_type] = (curr_index, simplifier)
else:
simplifiers[curr_type] = (curr_index, simplifier)
# Register native and torch types
for curr_type in MAP_TO_SIMPLIFIERS_AND_DETAILERS:
simplifier, detailer = MAP_TO_SIMPLIFIERS_AND_DETAILERS[curr_type]
_add_simplifier_and_detailer(curr_type, simplifier, detailer)
# Register syft objects with custom simplify and detail methods
for syft_type in OBJ_SIMPLIFIER_AND_DETAILERS + EXCEPTION_SIMPLIFIER_AND_DETAILERS:
simplifier, detailer = syft_type.simplify, syft_type.detail
_add_simplifier_and_detailer(syft_type, simplifier, detailer)
# Register syft objects with custom force_simplify and force_detail methods
for syft_type in OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS:
force_simplifier, force_detailer = syft_type.force_simplify, syft_type.force_detail
_add_simplifier_and_detailer(syft_type, force_simplifier, force_detailer, forced=True)
return simplifiers, forced_full_simplifiers, detailers | cd26bd70e0030077cfec9d60592b1848b50bcb09 | 3,636,196 |
def read_machine_def():
"""
Reads the machine definition file.
"""
return read_yaml_file(machine_def_file) | 35f55ee4d05337de656788724254626bea1706f8 | 3,636,197 |
import array
import scipy
def read_libsvm_format(file_path: str) -> 'tuple[list[list[int]], sparse.csr_matrix]':
"""Read multi-label LIBSVM-format data.
Args:
file_path (str): Path to file.
Returns:
tuple[list[list[int]], sparse.csr_matrix]: A tuple of labels and features.
"""
def as_ints(str):
return [int(s) for s in str.split(',')]
prob_y = []
prob_x = array('d')
row_ptr = array('l', [0])
col_idx = array('l')
for i, line in enumerate(open(file_path)):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1:
line += ['']
label, features = line
prob_y.append(as_ints(label))
nz = 0
for e in features.split():
ind, val = e.split(':')
val = float(val)
if val != 0:
col_idx.append(int(ind) - 1)
prob_x.append(val)
nz += 1
row_ptr.append(row_ptr[-1]+nz)
prob_x = scipy.frombuffer(prob_x, dtype='d')
col_idx = scipy.frombuffer(col_idx, dtype='l')
row_ptr = scipy.frombuffer(row_ptr, dtype='l')
prob_x = sparse.csr_matrix((prob_x, col_idx, row_ptr))
return (prob_y, prob_x) | 3c960b1ae2cbb56791c4d17164c7d491e2e47acf | 3,636,198 |
def cristal_load_motor(datafile, root, actuator_name, field_name):
"""
Try to load the CRISTAL dataset at the defined entry and returns it.
Patterns keep changing at CRISTAL.
:param datafile: h5py File object of CRISTAL .nxs scan file
:param root: string, path of the data up to the last subfolder (not included).
This part is expected to not change over time
:param actuator_name: string, name of the actuator (e.g. 'I06-C-C07-EX-DIF-KPHI').
Lowercase and uppercase will
be tested when trying to load the data.
:param field_name: name of the field under the actuator name (e.g. 'position')
:return: the dataset if found or 0
"""
# check input arguments
valid.valid_container(
root, container_types=str, min_length=1, name="cristal_load_motor"
)
if not root.startswith("/"):
root = "/" + root
valid.valid_container(
actuator_name, container_types=str, min_length=1, name="cristal_load_motor"
)
# check if there is an entry for the actuator
if actuator_name not in datafile[root].keys():
actuator_name = actuator_name.lower()
if actuator_name not in datafile[root].keys():
actuator_name = actuator_name.upper()
if actuator_name not in datafile[root].keys():
print(f"\nCould not find the entry for the actuator'{actuator_name}'")
print(f"list of available actuators: {list(datafile[root].keys())}\n")
return 0
# check if the field is a valid entry for the actuator
try:
dataset = datafile[root + "/" + actuator_name + "/" + field_name][:]
except KeyError: # try lowercase
try:
dataset = datafile[root + "/" + actuator_name + "/" + field_name.lower()][:]
except KeyError: # try uppercase
try:
dataset = datafile[
root + "/" + actuator_name + "/" + field_name.upper()
][:]
except KeyError: # nothing else that we can do
print(
f"\nCould not find the field '{field_name}' "
f"in the actuator'{actuator_name}'"
)
print(
"list of available fields: "
f"{list(datafile[root + '/' + actuator_name].keys())}\n"
)
return 0
return dataset | 9815c44965dce0a58de02a4d34b1fed7177baa57 | 3,636,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.