content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def parse_tracks(filename="tracks.csv"):
"""
Builds the tracks matrix #tracks x #attributes (20635 x 4)
where attributes are track_id,album_id,artist_id,duration_sec
"""
with open(os.path.join(data_path, filename), "r") as f:
# Discard first line
lines = f.readlines()[1:]
num_lines = len(lines)
# Sanity check
assert num_lines == NUM_TRACKS
# Build matrices
album_set = sp.dok_matrix((NUM_ALBUMS, NUM_TRACKS), dtype=np.uint8)
artist_set = sp.dok_matrix((NUM_ARTISTS, NUM_TRACKS), dtype=np.uint8)
for i, line in enumerate(lines):
# Parse album and artist
track, album, artist, _ = [np.int32(i) for i in line.split(",")]
album_set[album, track] = 1
artist_set[artist, track] = 1
print("\rParsing tracks: {:.4}%".format((i / num_lines) * 100), end="")
print("\n")
return album_set, artist_set
| 15,600
|
def match_seq_len(*arrays: np.ndarray):
"""
Args:
*arrays:
Returns:
"""
max_len = np.stack([x.shape[-1] for x in arrays]).max()
return [np.pad(x, pad_width=((0, 0), (0, 0), (max_len - x.shape[-1], 0)), mode='constant', constant_values=0) for x
in arrays]
| 15,601
|
def get_cmws_5_loss(
generative_model, guide, memory, obs, obs_id, num_particles, num_proposals, insomnia=1.0
):
"""Normalize over particles-and-memory for generative model gradient
Args:
generative_model
guide
memory
obs: tensor of shape [batch_size, *obs_dims]
obs_id: long tensor of shape [batch_size]
num_particles (int): number of particles used to marginalize continuous latents
num_proposals (int): number of proposed elements to be considered as new memory
Returns: [batch_size]
"""
# Extract
batch_size = obs.shape[0]
# SAMPLE d'_{1:R} ~ q(d | x)
# [num_proposals, batch_size, ...]
proposed_discrete_latent = guide.sample_discrete(obs, (num_proposals,))
# ASSIGN d_{1:(R + M)} = CONCAT(d'_{1:R}, d_{1:M})
# [memory_size + num_proposals, batch_size, ...]
discrete_latent_concat = cmws.memory.concat(memory.select(obs_id), proposed_discrete_latent)
# COMPUTE SCORES s_i = log p(d_i, x) for i {1, ..., (R + M)}
# -- c ~ q(c | d, x)
# [num_particles, memory_size + num_proposals, batch_size, ...]
_continuous_latent = guide.sample_continuous(obs, discrete_latent_concat, [num_particles])
# -- log q(c | d)
# [num_particles, memory_size + num_proposals, batch_size]
_log_q_continuous = guide.log_prob_continuous(obs, discrete_latent_concat, _continuous_latent)
# -- log p(d, c, x)
# [num_particles, memory_size + num_proposals, batch_size]
_log_p = generative_model.log_prob_discrete_continuous(
discrete_latent_concat, _continuous_latent, obs
)
# [memory_size + num_proposals, batch_size]
log_marginal_joint = torch.logsumexp(_log_p - _log_q_continuous, dim=0) - math.log(
num_particles
)
# ASSIGN d_{1:M} = TOP_K_UNIQUE(d_{1:(R + M)}, s_{1:(R + M)})
# [memory_size, batch_size, ...], [memory_size, batch_size]
discrete_latent_selected, _, indices = cmws.memory.get_unique_and_top_k(
discrete_latent_concat, log_marginal_joint, memory.size, return_indices=True
)
# SELECT log q(c | d, x) and log p(d, c, x)
# [num_particles, memory_size, batch_size]
_log_q_continuous = torch.gather(
_log_q_continuous, 1, indices[None].expand(num_particles, memory.size, batch_size)
)
# [num_particles, memory_size, batch_size]
_log_p = torch.gather(_log_p, 1, indices[None].expand(num_particles, memory.size, batch_size))
# COMPUTE WEIGHT
# [num_particles, memory_size, batch_size]
_log_weight = _log_p - _log_q_continuous
# COMPUTE log q(d_i | x) for i in {1, ..., M}
# [memory_size, batch_size]
_log_q_discrete = guide.log_prob_discrete(obs, discrete_latent_selected,)
# UPDATE MEMORY with d_{1:M}
memory.update(obs_id, discrete_latent_selected)
# CHECK UNIQUE
# if not memory.is_unique(obs_id).all():
# raise RuntimeError("memory not unique")
# COMPUTE losses
# --Compute generative model loss
# [num_particles, memory_size, batch_size]
_log_weight_v = torch.softmax(_log_weight.view(-1, batch_size), dim=0).view(
num_particles, memory.size, batch_size
)
# [batch_size]
generative_model_loss = -(_log_weight_v.detach() * _log_p).sum(dim=[0, 1])
# --Compute guide loss
# ----Compute guide wake loss
batch_size = obs.shape[0]
if insomnia < 1.0:
# [batch_size]
guide_loss_sleep = (
get_sleep_loss(generative_model, guide, num_particles * batch_size)
.view(batch_size, num_particles)
.mean(-1)
)
# ----Compute guide CMWS loss
if insomnia > 0.0:
# [memory_size, batch_size]
_log_weight_omega = torch.logsumexp(_log_weight_v, dim=0)
# [batch_size]
discrete_guide_loss_cmws = -(_log_weight_omega.detach() * _log_q_discrete).sum(dim=0)
# [batch_size]
continuous_guide_loss_cmws = -(
(torch.softmax(_log_weight, dim=0).detach() * _log_q_continuous).sum(dim=0).mean(dim=0)
)
# [batch_size]
guide_loss_cmws = discrete_guide_loss_cmws + continuous_guide_loss_cmws
# ----Combine guide sleep and CMWS losses
if insomnia == 0.0:
guide_loss = guide_loss_sleep
elif insomnia == 1.0:
guide_loss = guide_loss_cmws
else:
guide_loss = insomnia * guide_loss_cmws + (1 - insomnia) * guide_loss_sleep
return generative_model_loss + guide_loss
| 15,602
|
def getTerm(Y):
"""Prints index and basis function based on Y index"""
for y in Y:
print(y)
d, t, c, m = coeffs[y - 1]
print("Delta^%f Tau^%f np.exp(-Delta^%f) np.exp(-Tau^%f)" % (d, t, c, m))
| 15,603
|
def chemin_absolu(relative_path):
"""
Donne le chemin absolu d'un fichier.
PRE : -
POST : Retourne ''C:\\Users\\sacre\\PycharmProjects\\ProjetProgra\\' + 'relative_path'.
"""
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
correct = base_path.index("ProjetProgra")
return os.path.join(base_path[:correct + 13], relative_path)
| 15,604
|
def check_entity_value(step,entity_property,value):
"""
Function checks to see if entity_property is of the expected value
:param entity_property: Entity property being tested
:param value: Entity property value being tested
"""
assert entity_property == world.redis_property_key
assert world.value == value
| 15,605
|
def to_float_tensor(np_array):
"""
convert to long torch tensor
:param np_array:
:return:
"""
return torch.from_numpy(np_array).type(torch.float)
| 15,606
|
def display_heatmap(salience_scores,
salience_scores_2=None,
title=None,
title_2=None,
cell_labels=None,
cell_labels_2=None,
normalized=True,
ui=False):
"""
A utility function that displays a Seaborn heatmap.
Input:
- ('salience scores') A list of floats .
If task is something like NLI, then these are the salience scores for the premise, or first
sequence.
- ('salience_scores_2') A list of floats .
Optional. Only necessary when task is a relation labeling task between 2 sequences
like NLI. Then these are the salience scores for the hypothesis, or second sequence.
- ('title') Any object (string, integer, float, etc.) that can be printed.
Optional.
Usually is descriptive blurb for the heatmap for ('salience_scores')
- ('title_2') Any object (string, integer, float, etc.) that can be printed.
Optional. Usually is descriptive blurb for the heatmap for ('salience scores_2')
- ('cell_labels') Optional. list of the same size as ('salience_scores') that is printed
on the corresponding cell. Usually something like salience score values.
- ('cell_labels_2') Optional. list of the same size as ('salience_scores_2') that is printed
on the corresponding cell. Usually something like salience score values.
- ('normalized') A boolean denoting whether the data is normalized or not. If normalized,
the range is from -1 to 1.
- ('ui') A boolean for option of saving the plot instead to a file and returning the filename
Output:
- Return the matplotlib object
"""
if cell_labels is not None:
assert len(cell_labels) == len(salience_scores)
if cell_labels_2 is not None:
assert len(cell_labels_2) == len(salience_scores_2)
cmap = sns.diverging_palette(10, 240, as_cmap=True)
if salience_scores_2 is not None:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.set_title(title if title is not None else "")
ax2.set_title(title_2 if title_2 is not None else "")
sns.heatmap([salience_scores],
ax=ax1,
annot=[cell_labels] if cell_labels is not None else False,
fmt='',
cmap=cmap,
linewidths=0.5,
square=True,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
sns.heatmap([salience_scores_2],
ax=ax2,
annot=[cell_labels_2] if cell_labels_2 is not None else False,
fmt='',
cmap=cmap,
linewidths=0.5,
square=True,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
else:
m = sns.heatmap([salience_scores],
annot=[cell_labels] if cell_labels is not None else False,
fmt='',
linewidths=0.5,
square=True,
cmap=cmap,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
plt.title(title if title is not None else "")
#plt.show()
return plt
| 15,607
|
def _cleanse_line(line, main_character):
"""
Cleanse the extracted lines to remove formatting.
"""
# Strip the line, just in case.
line = line.strip()
# Clean up formatting characters.
line = line.replace('\\' , '') # Remove escape characters.
line = line.replace('[mc]', main_character) # Standardize MC name.
line = re.sub(r'{/?i}' , '*', line) # Convert italics to Markdown.
line = re.sub(r'{cps=\d+}', '' , line) # Remove scroll speed formatting.
return line
| 15,608
|
def require_pandapower(f):
"""
Decorator for functions that require pandapower.
"""
@wraps(f)
def wrapper(*args, **kwds):
try:
getattr(pp, '__version__')
except AttributeError:
raise ModuleNotFoundError("pandapower needs to be manually installed.")
return f(*args, **kwds)
return wrapper
| 15,609
|
def optimal_path_fixture():
"""An optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 1, 2, 3], 10 + 2 + 5
| 15,610
|
def evaluate_DynamicHashtablePlusRemove(output=True):
"""
Compare performance using ability in open addressing to mark deleted values.
Nifty trick to produce just the squares as keys in the hashtable.
"""
# If you want to compare, then add following to end of executable statements:
# print([e[0] for e in ht])
tbl = DataTable([8,20,20], ['M', 'Separate Chaining', 'Open Addressing w/ Remove'], output=output)
for size in [512, 1024, 2048]:
linked_list = min(timeit.repeat(stmt='''
ht = Hashtable({0})
N = {0} // 4
for i in range(1, N, 1):
flip_every_k(ht, i, N)'''.format(size), setup='''
from ch03.hashtable_linked import Hashtable
from ch03.challenge import flip_every_k''', repeat=7, number=5))/5
hashtable_plus = min(timeit.repeat(stmt='''
ht = DynamicHashtablePlusRemove({0})
N = {0} // 4
for i in range(1, N, 1):
flip_every_k(ht, i, N)'''.format(size), setup='''
from ch03.hashtable_open import DynamicHashtablePlusRemove
from ch03.challenge import flip_every_k''', repeat=7, number=5))/5
tbl.row([size, linked_list, hashtable_plus])
return tbl
| 15,611
|
def closest(lat1, lon1):
"""Return distance (km) and city closest to given coords."""
lat1, lon1 = float(lat1), float(lon1)
min_dist, min_city = None, None
for city, lat2, lon2 in CITIES:
dist = _dist(lat1, lon1, lat2, lon2)
if min_dist is None or dist < min_dist:
min_dist, min_city = dist, city
return min_dist, min_city
| 15,612
|
def get_previous_term():
"""
Returns a uw_sws.models.Term object,
for the previous term.
"""
url = "{}/previous.json".format(term_res_url_prefix)
return Term(data=get_resource(url))
| 15,613
|
def used(obj: T) -> T:
"""Decorator indicating that an object is being used.
This stops the UnusedObjectFinder from marking it as unused.
"""
_used_objects.add(obj)
return obj
| 15,614
|
def fit_double_gaussian(x_data, y_data, maxiter=None, maxfun=5000, verbose=1, initial_params=None):
""" Fitting of double gaussian
Fitting the Gaussians and finding the split between the up and the down state,
separation between the max of the two gaussians measured in the sum of the std.
Args:
x_data (array): x values of the data
y_data (array): y values of the data
maxiter (int): maximum number of iterations to perform
maxfun (int): maximum number of function evaluations to make
verbose (int): set to >0 to print convergence messages
initial_params (None or array): optional, initial guess for the fit parameters:
[A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
Returns:
par_fit (array): fit parameters of the double gaussian: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
initial_params (array): initial guess for the fit parameters, either the ones give to the function, or generated by the function: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
separation (float): separation between the max of the two gaussians measured in the sum of the std
split (float): value that seperates the up and the down level
"""
def func(params): return _cost_double_gaussian(x_data, y_data, params)
maxsignal = np.percentile(x_data, 98)
minsignal = np.percentile(x_data, 2)
if initial_params is None:
A_dn = np.max(y_data[:int((len(y_data) / 2))])
A_up = np.max(y_data[int((len(y_data) / 2)):])
sigma_dn = (maxsignal - minsignal) * 1 / 20
sigma_up = (maxsignal - minsignal) * 1 / 20
mean_dn = minsignal + 1 / 4 * (maxsignal - minsignal)
mean_up = minsignal + 3 / 4 * (maxsignal - minsignal)
initial_params = np.array([A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up])
par_fit = scipy.optimize.fmin(func, initial_params, maxiter=maxiter, maxfun=maxfun, disp=verbose >= 2)
# separation is the difference between the max of the gaussians devided by the sum of the std of both gaussians
separation = (par_fit[5] - par_fit[4]) / (abs(par_fit[2]) + abs(par_fit[3]))
# split equal distant to both peaks measured in std from the peak
split = par_fit[4] + separation * abs(par_fit[2])
result_dict = {'parameters initial guess': initial_params, 'separation': separation, 'split': split}
return par_fit, result_dict
| 15,615
|
def test_bascis():
"""
Test mean/sd/var/mode/entropy functionality of Cauchy.
"""
net = CauchyBasics()
ans = net()
assert isinstance(ans, Tensor)
with pytest.raises(ValueError):
net = CauchyMean()
ans = net()
with pytest.raises(ValueError):
net = CauchyVar()
ans = net()
with pytest.raises(ValueError):
net = CauchySd()
ans = net()
| 15,616
|
def test_logging_warning(capsys):
"""
Should output the debug logs
"""
logger = setup_logging(module_name, 'WARNING')
logger.debug('DEBUG')
logger.info('INFO')
logger.warning('WARN')
captured = capsys.readouterr()
assert "WARN" in captured.err
assert "INFO" not in captured.err
assert "DEBUG" not in captured.err
| 15,617
|
def _set_advanced_network_attributes_of_profile(config, profile):
"""
Modify advanced network attributes of profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
config = _set_attribute_of_profile(
config, profile, 'vpc_offering_id', 'VPC offering id', ''
)
return config
| 15,618
|
def Environ(envstring):
"""Return the String associated with an operating system environment variable
envstring Optional. String expression containing the name of an environment variable.
number Optional. Numeric expression corresponding to the numeric order of the
environment string in the environment-string table. The number argument can be any
numeric expression, but is rounded to a whole number before it is evaluated.
Remarks
If envstring can't be found in the environment-string table, a zero-length string ("")
is returned. Otherwise, Environ returns the text assigned to the specified envstring;
that is, the text following the equal sign (=) in the environment-string table for that environment variable.
"""
try:
envint = int(envstring)
except ValueError:
return os.environ.get(envstring, "")
# Is an integer - need to get the envint'th value
try:
return "%s=%s" % (list(os.environ.keys())[envint], list(os.environ.values())[envint])
except IndexError:
return ""
| 15,619
|
def ring_bond_equal(b1, b2, reverse=False):
"""Check if two bonds are equal.
Two bonds are equal if the their beginning and end atoms have the same symbol and
formal charge. Bond type not considered because all aromatic (so SINGLE matches DOUBLE).
Parameters
----------
b1 : rdkit.Chem.rdchem.Bond
An RDKit bond object.
b2 : rdkit.Chem.rdchem.Bond
An RDKit bond object.
reverse : bool
Whether to interchange the role of beginning and end atoms of the second
bond in comparison.
Returns
-------
bool
Whether the two bonds are equal.
"""
b1 = (b1.GetBeginAtom(), b1.GetEndAtom())
if reverse:
b2 = (b2.GetEndAtom(), b2.GetBeginAtom())
else:
b2 = (b2.GetBeginAtom(), b2.GetEndAtom())
return atom_equal(b1[0], b2[0]) and atom_equal(b1[1], b2[1])
| 15,620
|
def test_tensor_symmetrize():
""" test advanced tensor calculations """
grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [2, 2])
t1 = Tensor2Field(grid)
t1.data[0, 0, :] = 1
t1.data[0, 1, :] = 2
t1.data[1, 0, :] = 3
t1.data[1, 1, :] = 4
# traceless = False
t2 = t1.copy()
t1.symmetrize(make_traceless=False, inplace=True)
tr = t1.trace()
assert np.all(tr.data == 5)
t1_trans = np.swapaxes(t1.data, 0, 1)
np.testing.assert_allclose(t1.data, t1_trans.data)
ts = t1.copy()
ts.symmetrize(make_traceless=False, inplace=True)
np.testing.assert_allclose(t1.data, ts.data)
# traceless = True
t2.symmetrize(make_traceless=True, inplace=True)
tr = t2.trace()
assert np.all(tr.data == 0)
t2_trans = np.swapaxes(t2.data, 0, 1)
np.testing.assert_allclose(t2.data, t2_trans.data)
ts = t2.copy()
ts.symmetrize(make_traceless=True, inplace=True)
np.testing.assert_allclose(t2.data, ts.data)
| 15,621
|
def survey_aligned_velocities(od):
"""
Compute horizontal velocities orthogonal and tangential to a survey.
.. math::
(v_{tan}, v_{ort}) = (u\\cos{\\phi} + v\\sin{\\phi},
v\\cos{\\phi} - u\\sin{\\phi})
Parameters
----------
od: OceanDataset
oceandataset used to compute
Returns
-------
ds: xarray.Dataset
| rot_ang_Vel: Angle to rotate geographical
to survey aligned velocities
| tan_Vel: Velocity component tangential to survey
| ort_Vel: Velocity component orthogonal to survey
See Also
--------
subsample.survey_stations
"""
# Check parameters
_check_instance({'od': od}, 'oceanspy.OceanDataset')
if 'station' not in od._ds.dims:
raise ValueError('oceandatasets must be subsampled using'
' `subsample.survey_stations`')
# Get zonal and meridional velocities
var_list = ['lat', 'lon']
try:
# Add missing variables
varList = ['U_zonal', 'V_merid'] + var_list
od = _add_missing_variables(od, varList)
# Extract variables
U = od._ds['U_zonal']
V = od._ds['V_merid']
except Exception as e:
# Assume U=U_zonal and V=V_zonal
_warnings.warn(("\n{}"
"\nAssuming U=U_zonal and V=V_merid."
"\nIf you are using curvilinear coordinates,"
" run `compute.geographical_aligned_velocities`"
" before `subsample.survey_stations`").format(e),
stacklevel=2)
# Add missing variables
varList = ['U', 'V'] + var_list
od = _add_missing_variables(od, varList)
# Extract variables
U = od._ds['U']
V = od._ds['V']
# Extract varibles
lat = _np.deg2rad(od._ds['lat'])
lon = _np.deg2rad(od._ds['lon'])
# Extract grid
grid = od._grid
# Message
print('Computing survey aligned velocities.')
# Compute azimuth
# Translated from matlab:
# https://www.mathworks.com/help/map/ref/azimuth.html
az = _np.arctan2(_np.cos(lat[1:]).values
* _np.sin(grid.diff(lon, 'station')),
_np.cos(lat[:-1]).values * _np.sin(lat[1:]).values
- _np.sin(lat[:-1]).values
* _np.cos(lat[1:]).values
* _np.cos(grid.diff(lon, 'station')))
az = grid.interp(az, 'station', boundary='extend')
az = _xr.where(_np.rad2deg(az) < 0, _np.pi*2 + az, az)
# Compute rotation angle
rot_ang_rad = _np.pi/2 - az
rot_ang_rad = _xr.where(rot_ang_rad < 0,
_np.pi*2 + rot_ang_rad, rot_ang_rad)
rot_ang_deg = _np.rad2deg(rot_ang_rad)
rot_ang_Vel = rot_ang_deg
long_name = 'Angle to rotate geographical to survey aligned velocities'
rot_ang_Vel.attrs['long_name'] = long_name
rot_ang_Vel.attrs['units'] = 'deg (+: counterclockwise)'
# Rotate velocities
tan_Vel = U*_np.cos(rot_ang_rad) + V*_np.sin(rot_ang_rad)
tan_Vel.attrs['long_name'] = 'Velocity component tangential to survey'
if 'units' in U.attrs:
units = U.attrs['units']
else:
units = ' '
tan_Vel.attrs['units'] = ('{} '
'(+: flow towards station indexed'
' with higher number)'
''.format(units))
ort_Vel = V*_np.cos(rot_ang_rad) - U*_np.sin(rot_ang_rad)
ort_Vel.attrs['long_name'] = 'Velocity component orthogonal to survey'
if 'units' in V.attrs:
units = V.attrs['units']
else:
units = ' '
ort_Vel.attrs['units'] = ('{} '
'(+: flow keeps station indexed'
' with higher number to the right)'
''.format(units))
# Create ds
ds = _xr.Dataset({'rot_ang_Vel': rot_ang_Vel,
'ort_Vel': ort_Vel,
'tan_Vel': tan_Vel}, attrs=od.dataset.attrs)
return _ospy.OceanDataset(ds).dataset
| 15,622
|
def insertion_sort(numbers):
"""
At worst this is an O(n2) algorithm
At best this is an O(n) algorithm
"""
for index in xrange(1, len(numbers)):
current_num = numbers[index]
current_pos = index
while current_pos > 0 and numbers[current_pos - 1] > current_num:
numbers[current_pos] = numbers[current_pos - 1]
current_pos = current_pos - 1
numbers[current_pos] = current_num
return numbers
| 15,623
|
def p_ppjoin_block_2(p):
"""
pjoin_block : empty
"""
p[0] = []
| 15,624
|
def applyEffectiveDelayedNeutronFractionToCore(core, cs):
"""Process the settings for the delayed neutron fraction and precursor decay constants."""
# Verify and set the core beta parameters based on the user-supplied settings
beta = cs["beta"]
decayConstants = cs["decayConstants"]
# If beta is interpreted as a float, then assign it to
# the total delayed neutron fraction parameter. Otherwise, setup the
# group-wise delayed neutron fractions and precursor decay constants.
reportTableData = []
if isinstance(beta, float):
core.p.beta = beta
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
elif isinstance(beta, list) and isinstance(decayConstants, list):
if len(beta) != len(decayConstants):
raise ValueError(
f"The values for `beta` ({beta}) and `decayConstants` "
f"({decayConstants}) are not consistent lengths."
)
core.p.beta = sum(beta)
core.p.betaComponents = numpy.array(beta)
core.p.betaDecayConstants = numpy.array(decayConstants)
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
reportTableData.append(
("Group-wise Delayed Neutron Fractions", core.p.betaComponents)
)
reportTableData.append(
("Group-wise Precursor Decay Constants", core.p.betaDecayConstants)
)
# Report to the user the values were not applied.
if not reportTableData and (beta is not None or decayConstants is not None):
runLog.warning(
f"Delayed neutron fraction(s) - {beta} and decay constants"
" - {decayConstants} have not been applied."
)
else:
runLog.extra(
tabulate.tabulate(
tabular_data=reportTableData,
headers=["Component", "Value"],
tablefmt="armi",
)
)
| 15,625
|
def ratio_shimenreservoir_to_houchiweir():
"""
Real Name: Ratio ShiMenReservoir To HouChiWeir
Original Eqn: Sum Allocation ShiMenReservoir To HouChiWeir/Sum Allcation From ShiMenReservoir
Units: m3/m3
Limits: (None, None)
Type: component
"""
return sum_allocation_shimenreservoir_to_houchiweir() / sum_allcation_from_shimenreservoir()
| 15,626
|
def torch_profiler_full(func):
"""
A decorator which will run the torch profiler for the decorated function,
printing the results in full.
Note: Enforces a gpu sync point which could slow down pipelines.
"""
@wraps(func)
def wrapper(*args, **kwargs):
with torch.autograd.profiler.profile(use_cuda=True) as prof:
result = func(*args, **kwargs)
print(prof, flush=True)
return result
return wrapper
| 15,627
|
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
# Arguments
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
Output tensor.
# Raises
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
data_format = normalize_data_format(data_format)
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError('Unexpected bias dimensions %d, '
'expect to be 1 or %d dimensions'
% (len(bias_shape), ndim(x)))
if ndim(x) == 5:
if len(bias_shape) == 1:
new_shape = (1, 1, 1, 1, bias_shape[0])
else:
new_shape = (1,) + bias_shape
new_shape = transpose_shape(new_shape, data_format, spatial_axes=(1, 2, 3))
x += reshape(bias, new_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = tf.nn.bias_add(x, bias,
data_format='NCHW')
else:
x += reshape(bias, (1, bias_shape[0], 1, 1))
else:
x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = tf.nn.bias_add(x, bias,
data_format='NHWC')
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if len(bias_shape) == 1:
new_shape = (1, 1, bias_shape[0])
else:
new_shape = (1,) + bias_shape
new_shape = transpose_shape(new_shape, data_format, spatial_axes=(1,))
x += reshape(bias, new_shape)
else:
x = tf.nn.bias_add(x, bias)
return x
| 15,628
|
def get_news_with_follow(request, user_id):
"""
获取用户关注类型的前30条,未登录300未登录
:param request: 请求对象
:return: Json数据
"""
data = {}
try:
user = User.objects.get(pk=user_id)
follow_set = user.follow_type.value_list('id').all()
follow_list = [x[0] for x in follow_set]
news_set = NewsArticle.objects.filter(type_id__in=follow_list).order_by('-publish_time')[:30]
except db.Error:
data['code'] = 400
data['msg'] = '服务器忙,请稍后再试'
return JsonResponse(data)
except ObjectDoesNotExist:
data['code'] = 505
data['msg'] = '用户不存在'
return JsonResponse(data)
news_list = []
for news in news_set:
item = {
'id': news.id,
'title': news.title,
'type': news.type.name,
'publish_time': news.publish_time
}
news_list.append(item)
data['code'] = 200
data['msg'] = '请求成功'
data['news_list'] = news_list
return JsonResponse(data)
| 15,629
|
def get_ngd_dir(config, absolute = False):
"""Returns the ngd output directory location
Args:
config (dictionary): configuration dictionary
absolute (boolean):
False (default): Relative to project base
True: Absolute
Returns:
(string): string representation of the path to the output
Raises:
Nothing
"""
build_dir = utils.get_build_directory(config, absolute)
ngd_dir = os.path.join(build_dir, NGD_DIR)
return ngd_dir
| 15,630
|
def __setup_row_data_validation(
sheet,
status_data_validation,
priority_data_validation
):
"""
Apply proper style and url for 'Reference' column
Parameters
----------
sheet: Sheet
sheet to setup data validation for
status_data_validation: DataValidation
data validation model for `Status` column
priority_data_validation: DataValidation
data validation model for `Priority` column
"""
status_data_validation.add(sheet[f'{ascii_uppercase[1]}{__CURRENT_ROW}'])
priority_data_validation.add(sheet[f'{ascii_uppercase[7]}{__CURRENT_ROW}'])
| 15,631
|
def trigger():
"""Trigger salt-api call."""
data = {'foo': 'bar'}
return request('/hook/trigger', data=data)
| 15,632
|
def fitness_function(cams: List[Coord], pop: List[Coord]) -> int:
"""
Function to calculate number of surveilled citizens.
Check if all the cameras can see them, if any can score increases
"""
score = []
for cit in pop:
test = False
for cam in cams:
if (
math.sqrt(((cam[0] - cit[0]) ** 2) + ((cam[1] - cit[1]) ** 2))
<= view_radius
):
test = True
score.append(test)
return score.count(True)
| 15,633
|
def main():
"""Get Message and Process."""
# Get the queue
try:
queue = sqs.get_queue_by_name(QueueName=os.environ["SQS_NAME"])
except ClientError:
print("SQS Queue {SQS_NAME} not found".format(SQS_NAME=os.environ["SQS_NAME"]))
sys.exit(1)
while True:
message = False
for message in queue.receive_messages():
m = _parse_message(json.loads(message.body))
print(json.dumps(m))
process(m)
# Let the queue know that the message is processed
message.delete()
if not message:
time.sleep(30) # if no message, let's wait 30secs
time.sleep(1)
| 15,634
|
def set_cross_correction_positive_length(element: int, value: float) -> None:
"""set cross correction positive length
Args:
element (int): element ID
value (float): a value
"""
| 15,635
|
def scrape_opening_hours():
""""scrape opening hours from https://www.designmuseumgent.be/bezoek"""
r = requests.get("https://www.designmuseumgent.be/bezoek")
data = r.text
return data
| 15,636
|
def _demo_mm_inputs(input_shape=(1, 3, 256, 256)):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
target = np.zeros([N, 17, H // 32, W // 32], dtype=np.float32)
mask = np.ones([N, H // 32, W // 32], dtype=np.float32)
joints = np.zeros([N, 30, 17, 2], dtype=np.float32)
img_metas = [{
'image_file':
'test.jpg',
'aug_data': [torch.zeros(1, 3, 256, 256)],
'test_scale_factor': [1],
'base_size': (256, 256),
'center':
np.array([128, 128]),
'scale':
np.array([1.28, 1.28]),
'flip_index':
[0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'target': [torch.FloatTensor(target)],
'mask': [torch.FloatTensor(mask)],
'joints': [torch.FloatTensor(joints)],
'img_metas': img_metas
}
return mm_inputs
| 15,637
|
def compute_one_epoch_baseline():
"""
Function to compute the performance of a simple one epoch baseline.
:return: a line to display (string reporting the experiment results)
"""
best_val_obj_list = []
total_time_list = []
for nb201_random_seed in nb201_random_seeds:
for random_seed in random_seeds:
# randomly sample 256 configurations for the given dataset and NASBench201 seed
# use the same seeds as for our other experiments
random.seed(random_seed)
cfg_list = random.sample(
range(len(df_dict[nb201_random_seed][dataset_name])), 256
)
selected_subset = df_dict[nb201_random_seed][dataset_name].iloc[cfg_list]
# find configuration with the best performance after doing one epoch
max_idx = selected_subset["val_acc_epoch_0"].argmax()
best_configuration = selected_subset.iloc[max_idx]
# find the best validation accuracy of the selected configuration
# as that is the metric that we compare
best_val_obj = best_configuration[epoch_names].max()
# we also need to calculate the time it took for this
# taking into account the number of workers
total_time = selected_subset["eval_time_epoch"].sum() / n_workers
best_val_obj_list.append(best_val_obj)
total_time_list.append(total_time)
line = " & {:.2f} $\pm$ {:.2f}".format(
np.mean(best_val_obj_list), np.std(best_val_obj_list)
)
line += " & {:.1f}h $\pm$ {:.1f}h".format(
np.mean(total_time_list) / 3600, np.std(total_time_list) / 3600
)
line += " & {:.1f}x".format(reference_time / np.mean(total_time_list))
line += " & 1.0 $\pm$ 0.0"
return line
| 15,638
|
def generate_random_string( length ):
"""Generate a random string of a given length containing uppercase and lowercase letters, digits and ASCII punctuation."""
source = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
return ''.join( random.choice( source ) for i in range( length ) )
| 15,639
|
def create(params):
"""Handles the 'change' operation for modifying a dataset.
Expected flags in 'params' are translated to Json Field names for creation content
"""
expectedArgs = {'--name': 'name'}
kwargs = translate_flags(expectedArgs, params)
rsp = server.datasets.create(**kwargs)
if rsp is None:
reportApiError(server, "Failure while creating a dataset")
else:
try:
dsid = server.json()["dataset_id"]
except:
dsid = "???"
reportSuccess(server, f"Successfully created dataset with id {dsid}")
| 15,640
|
def start_session(Target=None, DocumentName=None, Parameters=None):
"""
Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.
See also: AWS API Documentation
Exceptions
:example: response = client.start_session(
Target='string',
DocumentName='string',
Parameters={
'string': [
'string',
]
}
)
:type Target: string
:param Target: [REQUIRED]\nThe instance to connect to for the session.\n
:type DocumentName: string
:param DocumentName: The name of the SSM document to define the parameters and plugin settings for the session. For example, SSM-SessionManagerRunShell . If no document name is provided, a shell to the instance is launched by default.
:type Parameters: dict
:param Parameters: Reserved for future use.\n\n(string) --\n(list) --\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SessionId': 'string',
'TokenValue': 'string',
'StreamUrl': 'string'
}
Response Structure
(dict) --
SessionId (string) --
The ID of the session.
TokenValue (string) --
An encrypted token value containing session and caller information. Used to authenticate the connection to the instance.
StreamUrl (string) --
A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.**region** .amazonaws.com/v1/data-channel/**session-id** ?stream=(input|output)
region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the AWS General Reference .
session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE .
Exceptions
SSM.Client.exceptions.InvalidDocument
SSM.Client.exceptions.TargetNotConnected
SSM.Client.exceptions.InternalServerError
:return: {
'SessionId': 'string',
'TokenValue': 'string',
'StreamUrl': 'string'
}
:returns:
SSM.Client.exceptions.InvalidDocument
SSM.Client.exceptions.TargetNotConnected
SSM.Client.exceptions.InternalServerError
"""
pass
| 15,641
|
def start_cmd(cmd, use_file=False):
"""Start command and returns proc instance from Popen."""
orig_cmd = ""
# Multi-commands need to be written to a temporary file to execute on Windows.
# This is due to complications with invoking Bash in Windows.
if use_file:
orig_cmd = cmd
temp_file = create_temp_executable_file(cmd)
# The temporary file name will have '\' on Windows and needs to be converted to '/'.
cmd = "bash -c {}".format(temp_file.replace("\\", "/"))
# If 'cmd' is specified as a string, convert it to a list of strings.
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if use_file:
LOGGER.debug("Executing '%s', tempfile contains: %s", cmd, orig_cmd)
else:
LOGGER.debug("Executing '%s'", cmd)
# We use psutil.Popen() rather than subprocess.Popen() in order to cache the creation time of
# the process. This enables us to reliably detect pid reuse in kill_process().
proc = psutil.Popen(cmd, close_fds=True)
LOGGER.debug("Spawned process %s pid %d", proc.name(), proc.pid)
return proc
| 15,642
|
def pretty_table(rows, header=None):
"""
Returns a string with a simple pretty table representing the given rows.
Rows can be:
- Sequences such as lists or tuples
- Mappings such as dicts
- Any object with a __dict__ attribute (most plain python objects) which is
equivalent to passing the __dict__ directly.
If no header is given then either all or none of the rows must be sequences
to ensure the correct order. If there are no sequences then the header will be
derived from the keys of the mappings.
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]]))
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]], header='col1 col2 col3 col4'))
col1 | col2 | col3 | col4
---------------------------
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]))
a | b
-----
1 | 2
3 | 4
>>> class C(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]], header=['b', 'a']))
b | a
-----
2 | 1
4 | 3
5 | 6
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]]))
Traceback (most recent call last):
...
ValueError: Cannot mix sequences and other types of rows without specifying a header
>>> print(pretty_table([[1, 2], [3, 4, 5]]))
Traceback (most recent call last):
...
ValueError: Mismatched lengths.
First row (len = 2):
[1, 2]
Current row (len = 3):
[3, 4, 5]
>>> print(pretty_table([{'a': 1, 'b': 2}], header='c d'))
Traceback (most recent call last):
....
KeyError: "Tried to access 'c', only keys are: ['a', 'b']"
"""
rows2 = []
if header:
header = ensure_list_if_string(header)
rows2.insert(0, header)
row_type = ['any']
else:
header = []
row_type = [None]
def require_type(t):
if row_type[0] not in (None, t, 'any'):
raise ValueError('Cannot mix sequences and other types of rows without specifying a header')
if row_type[0] is None:
row_type[0] = t
def handle_dict(d):
require_type('mapping')
if not header:
header[:] = sorted(d.keys())
rows2.insert(0, header)
return [helpful_error_dict_get(d, key) for key in header]
for row in rows:
if isinstance(row, Mapping):
row = handle_dict(row)
elif isinstance(row, Sequence):
require_type('sequence')
if rows2 and len(row) != len(rows2[0]):
raise ValueError('Mismatched lengths.\n'
'First row (len = %s):\n%s\n'
'Current row (len = %s):\n%s' %
(len(rows2[0]), rows2[0], len(row), row))
else:
row = handle_dict(row.__dict__)
rows2.append(row)
rows = [[str(cell) for cell in row] for row in rows2]
widths = [max(len(row[i]) for row in rows) for i in range(len(rows[0]))]
lines = [' | '.join(cell.ljust(width) for cell, width in zip(row, widths)).strip()
for row in rows]
if header:
lines.insert(1, '-' * len(lines[0]))
return '\n'.join(lines)
| 15,643
|
def initGlobals():
"""
function: init global variables
input: NA
output: NA
"""
global g_oldVersionModules
global g_clusterInfo
global g_oldClusterInfo
global g_logger
global g_dbNode
# make sure which env file we use
g_opts.userProfile = g_opts.mpprcFile
# init g_logger
g_logger = GaussLog(g_opts.logFile, g_opts.action)
if g_opts.action in [const.ACTION_RESTORE_CONFIG,
const.ACTION_SWITCH_BIN,
const.ACTION_CLEAN_INSTALL_PATH]:
g_logger.debug(
"No need to init cluster information under action %s."
% g_opts.action)
return
# init g_clusterInfo
# not all action need init g_clusterInfo
try:
g_clusterInfo = dbClusterInfo()
if g_opts.xmlFile == "" or not os.path.exists(g_opts.xmlFile):
g_clusterInfo.initFromStaticConfig(g_opts.user)
else:
g_clusterInfo.initFromXml(g_opts.xmlFile)
except Exception as e:
g_logger.debug(traceback.format_exc())
g_logger.error(str(e))
# init cluster info from install path failed
# try to do it from backup path again
g_opts.bakPath = DefaultValue.getTmpDirFromEnv() + "/"
staticConfigFile = "%s/cluster_static_config" % g_opts.bakPath
if os.path.isfile(staticConfigFile):
try:
# import old module
g_oldVersionModules = OldVersionModules()
sys.path.append(os.path.dirname(g_opts.bakPath))
g_oldVersionModules.oldDbClusterInfoModule = __import__(
'OldDbClusterInfo')
# init old cluster config
g_clusterInfo = \
g_oldVersionModules.oldDbClusterInfoModule.dbClusterInfo()
g_clusterInfo.initFromStaticConfig(g_opts.user,
staticConfigFile)
except Exception as e:
g_logger.error(str(e))
# maybe the old cluster is V1R5C00 TR5 version,
# not support specify static config file
# path for initFromStaticConfig function,
# so use new cluster format try again
try:
g_clusterInfo = dbClusterInfo()
g_clusterInfo.initFromStaticConfig(g_opts.user,
staticConfigFile)
except Exception as e:
g_logger.error(str(e))
try:
# import old module
importOldVersionModules()
# init old cluster config
g_clusterInfo = \
g_oldVersionModules \
.oldDbClusterInfoModule.dbClusterInfo()
g_clusterInfo.initFromStaticConfig(g_opts.user)
except Exception as e:
raise Exception(str(e))
elif g_opts.xmlFile and os.path.exists(g_opts.xmlFile):
try:
sys.path.append(sys.path[0] + "/../../gspylib/common")
curDbClusterInfoModule = __import__('DbClusterInfo')
g_clusterInfo = curDbClusterInfoModule.dbClusterInfo()
g_clusterInfo.initFromXml(g_opts.xmlFile)
except Exception as e:
raise Exception(str(e))
else:
try:
# import old module
importOldVersionModules()
# init old cluster config
g_clusterInfo = \
g_oldVersionModules.oldDbClusterInfoModule.dbClusterInfo()
g_clusterInfo.initFromStaticConfig(g_opts.user)
except Exception as e:
raise Exception(str(e))
# init g_dbNode
localHost = DefaultValue.GetHostIpOrName()
g_dbNode = g_clusterInfo.getDbNodeByName(localHost)
if g_dbNode is None:
raise Exception(
ErrorCode.GAUSS_512["GAUSS_51209"] % ("NODE", localHost))
| 15,644
|
def interpolate_trajectory(world_map, waypoints_trajectory, hop_resolution=1.0):
"""
Given some raw keypoints interpolate a full dense trajectory to be used by the user.
Args:
world: an reference to the CARLA world so we can use the planner
waypoints_trajectory: the current coarse trajectory
hop_resolution: is the resolution, how dense is the provided trajectory going to be made
Return:
route: full interpolated route both in GPS coordinates and also in its original form.
"""
dao = GlobalRoutePlannerDAO(world_map, hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
# Obtain route plan
route = []
for i in range(len(waypoints_trajectory) - 1): # Goes until the one before the last.
waypoint = waypoints_trajectory[i]
waypoint_next = waypoints_trajectory[i + 1]
interpolated_trace = grp.trace_route(waypoint, waypoint_next)
for wp_tuple in interpolated_trace:
route.append((wp_tuple[0].transform, wp_tuple[1]))
return route
| 15,645
|
def import_data(filepath="/home/vagrant/countries/NO.txt", mongodb_url="mongodb://localhost:27017"):
"""
Import the adress data into mongodb
CLI Example:
salt '*' mongo.import_data /usr/data/EN.txt
"""
client = MongoClient(mongodb_url)
db = client.demo
address_col = db.address
#Delete collection if present
print("Dropping collection of addresses")
address_col.delete_many({})
#Create compound indices for full text search
address_col.create_index([
("country_code", TEXT),
("postal_code", TEXT),
("place_name", TEXT),
("admin_name1", TEXT),
("admin_name2", TEXT),
("admin_name3", TEXT),
])
# Split line on the tab character since this is the delimiter.
for line in _read_file(filepath):
parts = line.split("\t")
if parts and len(parts) >= 12:
address = {
"country_code": parts[0],
"postal_code": parts[1],
"place_name": parts[2],
"admin_name1": parts[3],
"admin_code1": parts[4],
"admin_name2": parts[5],
"admin_code2": parts[6],
"admin_name3": parts[7],
"admin_code3": parts[8],
"latitude": parts[9],
"longitude": parts[10],
"accuracy": parts[11].strip()
}
address_col.insert(address)
else:
log.error("Element has to few parts to parse")
return "Done importing all data"
| 15,646
|
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Enphase Envoy sensor."""
ip_address = config[CONF_IP_ADDRESS]
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
name = config[CONF_NAME]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
_LOGGER.info("Envoy async_setup_platform called")
f = EnvoyReaderFactory(host=ip_address, username=username, password=password)
# The factory will return a reader based on the SW/FW version found in info.xml
envoy_reader = await f.get_reader()
entities = []
async def async_update_data():
try:
async with async_timeout.timeout(10):
return await envoy_reader.get_data()
except requests.exceptions.HTTPError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="EnphaseEnvoy",
update_method=async_update_data,
update_interval= timedelta(seconds=30),
)
# Do an initial data collection so the list with inverters is filled
await coordinator.async_refresh()
# Iterate through the list of sensors configured
for condition in monitored_conditions:
if condition == "inverters":
# The initial data collection made sure we know all inverters that are available at this point
for inverter in coordinator.data['inverters']:
entities.append(
EnvoyInverter(
coordinator,
inverter['serial_number'],
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]} {inverter['serial_number']}",
SENSORS[condition][1],
SENSORS[condition][2],
SENSORS[condition][3]
)
)
else:
entities.append(
Envoy(
coordinator,
coordinator.data['serial_number'],
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]}",
SENSORS[condition][1],
SENSORS[condition][2],
SENSORS[condition][3]
)
)
async_add_entities(entities)
| 15,647
|
def _get_piping_verb_node(calling_node: ast.Call) -> ast.Call:
"""Get the ast node that is ensured the piping verb call
Args:
calling_node: Current Call node
Returns:
The verb call node if found, otherwise None
"""
from .register import PIPING_SIGNS
from .verb import Verb
# check if we have the piping node (i.e. >>)
child = calling_node
parent = getattr(child, "parent", None)
token = PIPING_SIGNS[Verb.CURRENT_SIGN].token
while parent:
if (
# data >> verb(...)
(isinstance(parent, ast.BinOp) and parent.right is child)
or
# data >>= verb(...)
(isinstance(parent, ast.AugAssign) and parent.value is child)
) and isinstance(parent.op, token):
return child
child = parent
parent = getattr(parent, "parent", None)
return None
| 15,648
|
def files_to_yaml(*files: str):
""" Reads sample manifests from files."""
for name in files:
with open(name, 'r') as stream:
manifest = yaml.load(stream)
yield (name, manifest)
| 15,649
|
def slot(**kwargs):
"""Creates a SlotConfig instance based on the arguments.
Args:
**kwargs: Expects the following keyed arguments.
in_dist: Distribution for inbound in msec. Optional
in_max_bytes: Optional. Ignored when in_dist is missing.
in_max_pkts: Optional. Ignored when in_dist is missing.
out_dist: Distribution for outbound in msec. Optional
At least one of in_dist and out_dist must be available.
out_max_bytes: Optional. Ignored when out_dist is missing.
out_max_pkts: Optional. Ignored when out_dist is missing.
Returns:
The SlotConfig instance.
Raises:
ValueError: When both in_dist and out_dist are missing.
When an unexpected key is passed.
"""
expected_keys = {'in_dist', 'in_max_bytes', 'in_max_pkts', 'out_dist',
'out_max_bytes', 'out_max_pkts'}
if any(set(kwargs) - expected_keys):
raise ValueError('unexpected args: %s' %
','.join(set(kwargs) - expected_keys))
in_slot = None
out_slot = None
if 'in_dist' in kwargs:
in_slot = Slot(
kwargs['in_dist'],
kwargs['in_max_bytes'] if 'in_max_bytes' in kwargs else 0,
kwargs['in_max_pkts'] if 'in_max_pkts' in kwargs else 0)
if 'out_dist' in kwargs:
out_slot = Slot(
kwargs['out_dist'],
kwargs['out_max_bytes'] if 'out_max_bytes' in kwargs else 0,
kwargs['out_max_pkts'] if 'out_max_pkts' in kwargs else 0)
if not bool(in_slot or out_slot):
raise ValueError('in_dist or out_dist must be defined')
return SlotConfig(in_slot, out_slot)
| 15,650
|
def get_agent_type(player):
""" Prompts user for info as to the type of agent to be created """
print('There are two kinds of Agents you can initialise.')
print(' 1 - <Human> - This would be a totally manually operated agent.')
print(' You are playing the game yourself.')
print(' 2 - <Random> - This is an agent who simply makes totally random moves.')
print(' They select from the set of all legal moves.')
# print(' 3 - <Engine> - This is an agent which selects moves on the basis of some')
# print(' pre-programmed algorithm.')
print(f'\nWhich type of agent should {player} be?')
while True:
result = input(' : ')
if result.isalpha(): # check response is all letters
result = result.lower() # make them all lowercase
if result.lower() == 'human':
agent_type = result.capitalize()
break
elif result.lower() == 'random':
agent_type = result.capitalize()
break
# elif result.lower() == 'engine':
# not_implemented('Engine')
# continue
elif result.lower() in ('close', 'quit', 'exit', 'no'):
exit_program()
elif result.isnumeric():
if result == '1':
agent_type = 'Human'
break
elif result == '2':
agent_type = 'Random'
break
# elif result == '3':
# not_implemented('Engine')
# continue
agent_name = player
print(f'And their name? Typing nothing will use the default name: {player}')
while True:
result = input(' : ')
if result == '':
break
elif result.isalnum():
if result.lower() in ('close', 'quit', 'exit', 'no'):
exit_program()
agent_name = result
break
else:
print('\n Can only include letters or numbers.\n')
return agent_type, agent_name
| 15,651
|
def xml_generation():
"""generate all xml needed for the sizes mode
"""
for max_size in sizes:
byte_file = open(xml_filepath, "rb")
filename = working_dir + "ssxtd_test_" + str(max_size) +".xml"
with open(filename, 'wb') as new_file:
#new_file.write(b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
new_file.write(start_tag.encode())
max_size_reached = False
while max_size_reached ==False:
byte_file.seek(0)
for _, elem in ET.iterparse(byte_file, events=('end', )):
if elem.tag == tag:
new_file.write(ET.tostring(elem))
#size = os.path.getsize(new_file)
size = new_file.tell()
mb_size = size/(1024.0*1024.0)
if mb_size > max_size:
new_file.write(end_tag.encode())
max_size_reached = True
break
print("finished writing "+ str(max_size) + "MB test file")
| 15,652
|
def _try_match_and_transform_pattern_1(reduce_op, block) -> bool:
"""
Identify the pattern:
y = gamma * (x - mean) / sqrt(variance + epsilon) + beta
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
x --> reduce_mean --> sub --> square --> reduce_mean --> add(epsilon) --> rsqrt
| | ^ |
| | | V
|----------------------- mul (gamma)
| | |
| | --------|---------
| | | |
| | | V
| |----------------------------------------------------------------> mul
| | |
| V |
|--------------------------------------------------------------> mul |
| V
| sub (beta) --> add --> [...]
| ^
|-------------------------------
This pattern corresponds to either layer_norm or instance_norm.
It is instance_norm if all of the following are true:
- input is rank 4
- axes of reduce_mean is [-2, -1] or [-3, -2]
(when [-3, -2], a channel first to channel last transpose would be inserted)
- gamma and beta are rank 1, after squeeze
It is layer_norm if all of the following are true:
- axes is either [-1] or [-1, -2] or [-1, -2, -3] and so on
- rank of gamma and beta is equal to the length of the axes
"""
ops_to_remove = []
root_var = reduce_op.x
if root_var.shape is None:
return False
# check that root_var feeds into exactly 3 ops
if len(list(root_var.child_ops)) != 3:
return False
if root_var.op is not None and not _check_child_op_types(
root_var.op, child_op_types=["reduce_mean", "sub", "mul"]
):
return False
# check 1st reduce_mean op
if not _check_reduce_op(reduce_op):
return False
ops_to_remove.append(reduce_op)
# check 1st sub op
if not _check_child_op_types(reduce_op, ["sub", "mul"], check_order=False):
return False
child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops)
op_a = child_ops_reduce_mean[0]
op_b = child_ops_reduce_mean[1]
sub_op1 = op_a if op_a.op_type == "sub" else op_b
if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]):
return False
ops_to_remove.append(sub_op1)
# check square op
square_op = _try_get_child_op_type(sub_op1, "square")
if square_op is None:
return False
ops_to_remove.append(square_op)
# check second reduce mean
reduce_op2 = _try_get_child_op_type(square_op, "reduce_mean")
if not _check_reduce_op(reduce_op2):
return False
ops_to_remove.append(reduce_op2)
# check add op (with epsilon)
add_op1 = _try_get_child_op_type(reduce_op2, "add")
if add_op1 is None:
return False
epsilon_var = add_op1.y if add_op1.x == reduce_op2.outputs[0] else add_op1.x
if epsilon_var.val is None or len(epsilon_var.val.shape) != 0:
return False # must be scalar
ops_to_remove.append(add_op1)
# check rsqrt
rsqrt_op = _try_get_child_op_type(add_op1, "rsqrt")
if rsqrt_op is None:
return False
ops_to_remove.append(rsqrt_op)
# check mul (gamma)
mul_op1 = _try_get_child_op_type(rsqrt_op, "mul")
if mul_op1 is None:
return False
gamma_var = mul_op1.y if mul_op1.x == rsqrt_op.outputs[0] else mul_op1.x
if gamma_var.val is None:
return False
ops_to_remove.append(mul_op1)
# check 2 muls after the gamma mul
if not _check_child_op_types(mul_op1, ["mul", "mul"]):
return False
child_ops = list(mul_op1.outputs[0].child_ops)
mul_op2 = child_ops[0]
mul_op3 = child_ops[1]
mul_op2_other_var = mul_op2.x if mul_op2.y == mul_op1.outputs[0] else mul_op2.y
mul_op3_other_var = mul_op3.x if mul_op3.y == mul_op1.outputs[0] else mul_op3.y
if not (
(mul_op2_other_var == root_var and mul_op3_other_var == reduce_op.outputs[0])
or (mul_op2_other_var == reduce_op.outputs[0] and mul_op3_other_var == root_var)
):
return False
if mul_op2_other_var == root_var:
mul_root_op = mul_op2
mul_mean_op = mul_op3
else:
mul_root_op = mul_op3
mul_mean_op = mul_op2
ops_to_remove.append(mul_mean_op)
ops_to_remove.append(mul_root_op)
# check sub with beta
sub_op2 = _try_get_child_op_type(mul_mean_op, "sub")
if sub_op2 is None:
return False
if sub_op2.y != mul_mean_op.outputs[0]:
return False
beta_var = sub_op2.x
if beta_var.val is None:
return False
ops_to_remove.append(sub_op2)
# check last add op
add_op2 = _try_get_child_op_type(sub_op2, "add")
if add_op2 is None:
return False
if not (add_op2.x == mul_root_op.outputs[0] or add_op2.y == mul_root_op.outputs[0]):
return False
ops_to_remove.append(add_op2)
return _try_apply_transform(
reduce_op, block, gamma_var, beta_var, epsilon_var, add_op2, ops_to_remove
)
| 15,653
|
def cli_resize(maxsize):
"""Resize images to a maximum side length preserving aspect ratio."""
click.echo("Initializing resize with parameters {}".format(locals()))
def _resize(images):
for info, image in images:
yield info, resize(image, maxsize)
return _resize
| 15,654
|
def _fetch_object_array(cursor):
"""
_fetch_object_array() fetches arrays with a basetype that is not considered
scalar.
"""
arrayShape = cursor_get_array_dim(cursor)
# handle a rank-0 array by converting it to
# a 1-dimensional array of size 1.
if len(arrayShape) == 0:
arrayShape.append(1)
# now create the (empty) array of the correct type and shape
array = numpy.empty(dtype=object,shape=arrayShape)
# goto the first element
cursor_goto_first_array_element(cursor)
# loop over all elements excluding the last one
arraySizeMinOne = array.size - 1
for i in range(arraySizeMinOne):
array.flat[i] = _fetch_subtree(cursor)
cursor_goto_next_array_element(cursor)
# final element then back tp parent scope
array.flat[arraySizeMinOne] = _fetch_subtree(cursor)
cursor_goto_parent(cursor)
return array
| 15,655
|
def sldParse(sld_str):
"""
Builds a dictionary from an SldStyle string.
"""
sld_str = sld_str.replace("'", '"').replace('\"', '"')
keys = ['color', 'label', 'quantity', 'opacity']
items = [el.strip() for el in sld_str.split('ColorMapEntry') if '<RasterSymbolizer>' not in el]
sld_items = []
for i in items:
tmp = {}
for k in keys:
v = find_between(i, f'{k}="', '"')
if v: tmp[k] = v
sld_items.append(tmp)
return {
'type': find_between(sld_str, 'type="', '"'),
'extended': find_between(sld_str, 'extended="', '"'),
'items': sld_items
}
| 15,656
|
def x_ideal(omega, phase):
"""
Generates a complex-exponential signal with given frequency
and phase. Does not contain noise
"""
x = np.empty(cfg.N, dtype=np.complex_)
for n in range(cfg.N):
z = 1j*(omega * (cfg.n0+n) * cfg.Ts + phase)
x[n] = cfg.A * np.exp(z)
return x
| 15,657
|
def _wrap_stdout(outfp):
"""
Wrap a filehandle into a C function to be used as `stdout` or
`stderr` callback for ``set_stdio``. The filehandle has to support the
write() and flush() methods.
"""
def _wrap(instance, str, count):
outfp.write(str[:count])
outfp.flush()
return count
return c_stdstream_call_t(_wrap)
| 15,658
|
def svn_fs_apply_textdelta(*args):
"""
svn_fs_apply_textdelta(svn_fs_root_t root, char path, char base_checksum,
char result_checksum, apr_pool_t pool) -> svn_error_t
"""
return _fs.svn_fs_apply_textdelta(*args)
| 15,659
|
def find(x):
"""
Find the representative of a node
"""
if x.instance is None:
return x
else:
# collapse the path and return the root
x.instance = find(x.instance)
return x.instance
| 15,660
|
def convert_shape(node, **kwargs):
"""Map MXNet's shape_array operator attributes to onnx's Shape operator
and return the created node.
"""
return create_basic_op_node('Shape', node, kwargs)
| 15,661
|
def _UploadScreenShotToCloudStorage(fh):
""" Upload the given screenshot image to cloud storage and return the
cloud storage url if successful.
"""
try:
return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT,
_GenerateRemotePath(fh), fh.GetAbsPath())
except cloud_storage.CloudStorageError as err:
logging.error('Cloud storage error while trying to upload screenshot: %s',
repr(err))
return '<Missing link>'
finally: # Must clean up screenshot file if exists.
os.remove(fh.GetAbsPath())
| 15,662
|
def get_timestamp(prev_ts=None):
"""Internal helper to return a unique TimeStamp instance.
If the optional argument is not None, it must be a TimeStamp; the
return value is then guaranteed to be at least 1 microsecond later
the argument.
"""
t = time.time()
t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
if prev_ts is not None:
t = t.laterThan(prev_ts)
return t
| 15,663
|
def mintos(input_file, input_directory, calculation):
"""Calculates income and tax from Mintos transaction log, using D-1 NBP PLN exchange rate."""
account = MintosAccount()
if input_file:
account.load_transaction_log(input_file)
else:
account.load_transaction_logs(input_directory)
account.init_cash_flow()
table = None
for c in calculation:
ls(f"{c}")
if c == "INCOME":
table = account.get_foreign()
elif c == "INCOME_PLN":
table = account.get_pln()
"""
Kwotę należnego podatku wpisuje do pola o enigmatycznej nazwie „Zryczałtowany podatek obliczony od przychodów (dochodów), o których mowa w art. 30a ust. 1 pkt 1–5 ustawy, uzyskanych poza granicami Rzeczypospolitej Polskiej”.
Kwotę podatku pobranego za granicą wpisujemy do pola „Podatek zapłacony za granicą, o którym mowa w art. 30a ust. 9 ustawy”.
2019
W PIT-36 – pola 355, 356, 357 i 358 w sekcji N.
W PIT-36L – pola 115 i 116 w sekcji K.
W PIT-38 – pola 45 i 46 w sekcji G.
"""
if table:
print(tabulate(table, headers="firstrow", floatfmt=".2f", tablefmt="presto"))
| 15,664
|
def get_element_as_string(element):
"""
turn xml element from etree to string
:param element:
:return:
"""
return lxml.etree.tostring(element, pretty_print=True).decode()
| 15,665
|
def get(dirPath):
"""指定したパスのファイル一覧を取得する"""
if sys.version_info.major != 3:
print("Error!!\nPython 3.x is required.")
exit()
if sys.version_info.minor >= 5:
# python 3.5以降
fileList = []
fileList = glob.glob(dirPath, recursive=True)
return fileList
else:
# python3.4以前
fileList = []
for root, dirs, files in os.walk(dirPath):
for filename in files:
fileList.append(os.path.join(root, filename)) # ファイルのみ再帰でいい場合はここまででOK
for dirname in dirs:
fileList.append(os.path.join(root, dirname)) # サブディレクトリまでリストに含めたい場合はこれも書く
print(fileList)
return fileList
| 15,666
|
def _select_ports(count, lower_port, upper_port):
"""Select and return n random ports that are available and adhere to the given port range, if applicable."""
ports = []
sockets = []
for i in range(count):
sock = _select_socket(lower_port, upper_port)
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
| 15,667
|
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
reps = dict(zip(var, (u, v)))
eq = Add(*[j*i.xreplace(reps) for i, j in coeff.items()])
return _mexpand(eq) == 0
| 15,668
|
def is_callable(x):
"""Tests if something is callable"""
return callable(x)
| 15,669
|
def validate_version_argument(version, hint=4):
""" validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version
"""
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version
| 15,670
|
def test_default_logger():
"""Create a logger with default options.
Only stdout logger must be used."""
capture = CaptureStdOut()
with capture:
test_logger = pyansys_logging.Logger()
test_logger.info("Test stdout")
assert "INFO - - test_pyansys_logging - test_default_logger - Test stdout" in capture.content
# File handlers are not activated.
assert os.path.exists(os.path.exists(os.path.join(os.getcwd(), "PyProject.log")))
| 15,671
|
def test_created_nostartup(mock_clean_state, mock_vm_one):
"""
Test with one image and startup_import_event unset/false
"""
# NOTE: this should yield 0 created event _ one state event
with patch.dict(vmadm.VMADM_STATE, mock_clean_state), patch.dict(
vmadm.__salt__, {"vmadm.list": MagicMock(return_value=mock_vm_one)}
):
config = []
ret = vmadm.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = vmadm.beacon(config)
res = [
{
"alias": "vm1",
"tag": "running/00000000-0000-0000-0000-000000000001",
"hostname": "vm1",
"dns_domain": "example.org",
}
]
assert ret == res
| 15,672
|
def roc_auc(probs, labels):
"""
Computes the area under the receiving operator characteristic between output probs
and labels for k classes.
Source: https://github.com/HazyResearch/metal/blob/master/metal/utils.py
args:
probs (tensor) (size, k)
labels (tensor) (size, 1)
"""
probs = torch.nn.functional.softmax(probs, dim=1)
probs = probs.numpy()
# Convert labels to one-hot indicator format, using the k inferred from probs
labels = hard_to_soft(labels, k=probs.shape[1]).numpy()
return skl.roc_auc_score(labels, probs)
| 15,673
|
def test_FUNC_default_WITH_map_attribute_EXPECT_successful_serialization() -> None:
"""
Checks whether serialization works.
:return: No return.
"""
d = MapAttribute(attributes={
'key': Decimal(1),
'key2': {1, 2, 3, 3, 3, 3}
})
try:
json.dumps(d)
except TypeError:
pass
else:
raise AssertionError('Expected to fail.')
json.dumps(d, cls=PynamoDBEncoder)
| 15,674
|
def show_collection(request, collection_id):
"""Shows a collection"""
collection = get_object_or_404(Collection, pk=collection_id)
# New attribute to store the list of problems and include the number of submission in each problem
collection.problem_list = collection.problems()
for problem in collection.problem_list:
problem.num_submissions = problem.num_submissions_by_user(request.user)
problem.solved = problem.solved_by_user(request.user)
return render(request, 'collection.html', {'collection': collection})
| 15,675
|
def alpha(
data: np.ndarray,
delta: Union[Callable[[int, int], float], List[List[float]], str] = "nominal",
):
"""Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for
inter-rater agreement.
[1] K. Krippendorff, Content analysis: An introduction to its
methodology. Sage publications, 2004.
Args:
-----
data: numpy.ndarray
The data matrix, shape (n_raters, n_units). Each cell (i, j)
represents the value assigned to unit j by rater i, or 0
representing no response.
delta: callable, 2-D array-like or str
The delta metric. Default is the nominal metric, which takes the
value 1 in case c != k and 0 otherwise.
"""
# The following implementation was based off the Wikipedia article:
# https://en.wikipedia.org/wiki/Krippendorff%27s_alpha
# Response categories go from 1 to R, 0 represents no response
R = np.max(data)
counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T
count_sum = np.sum(counts, 0)
assert len(count_sum) == R + 1
def ordinal(c: int, k: int):
if k < c:
c, k = k, c
s = (
sum(count_sum[g] for g in range(c, k + 1))
- (count_sum[c] + count_sum[k]) / 2
)
return s ** 2
if isinstance(delta, str):
delta = {
"nominal": Deltas.nominal,
"ordinal": ordinal,
"interval": Deltas.interval,
}[delta]
if not callable(delta):
try:
delta[0][0]
except IndexError:
raise TypeError("delta must be either str, callable or 2D array.")
def _delta(c, k):
new_delta = delta
return new_delta[c][k]
delta = _delta
m_u = np.sum(counts[:, 1:], 1)
valid = m_u >= 2
counts = counts[valid]
m_u = m_u[valid]
data = data[:, valid]
n = np.sum(m_u)
n_cku = np.matmul(counts[:, :, None], counts[:, None, :])
for i in range(R + 1):
n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1)
D_o = 0
for c in range(1, R + 1):
for k in range(1, R + 1):
D_o += delta(c, k) * n_cku[:, c, k]
D_o = np.sum(D_o / (n * (m_u - 1)))
D_e = 0
P_ck = np.bincount(data.flat)
for c in range(1, R + 1):
for k in range(1, R + 1):
D_e += delta(c, k) * P_ck[c] * P_ck[k]
D_e /= n * (n - 1)
return 1 - D_o / D_e
| 15,676
|
def enforce(action, target, creds, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param creds: user credentials
:param action: string representing the action to be checked, which
should be colon separated for clarity.
Or it can be a Check instance.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:param rule_dict: instance of oslo_policy.policy.Rules, it's
actually a dict, with keys are the actions
to be protected and values are parsed Check trees.
:raises: `exception.Forbidden` if verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, service=action[0],
permission=action[1], do_raise=do_raise)
return _ENFORCER.enforce(action, target, creds, **extra)
| 15,677
|
def query_data(session, agency_code, period, year):
""" Request A file data
Args:
session: DB session
agency_code: FREC or CGAC code for generation
period: The period for which to get GTAS data
year: The year for which to get GTAS data
Returns:
The rows using the provided dates for the given agency.
"""
# set a boolean to determine if the original agency code is frec or cgac
frec_provided = len(agency_code) == 4
tas_gtas = tas_gtas_combo(session, period, year)
# Make a list of FRECs to compare to for 011 AID entries
frec_list = []
if not frec_provided:
frec_list = session.query(FREC.frec_code).select_from(outerjoin(CGAC, FREC, CGAC.cgac_id == FREC.cgac_id)).\
filter(CGAC.cgac_code == agency_code).all()
# Group agencies together that need to be grouped
agency_array = []
if agency_code == '097':
agency_array = ['017', '021', '057', '097']
elif agency_code == '1601':
agency_array = ['1601', '016']
elif agency_code == '1125':
agency_array = ['1125', '011']
# Save the ATA filter
agency_filters = []
if not agency_array:
agency_filters.append(tas_gtas.c.allocation_transfer_agency == agency_code)
else:
agency_filters.append(tas_gtas.c.allocation_transfer_agency.in_(agency_array))
# Save the AID filter
if agency_code == '097' and not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier.in_(agency_array)))
elif not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == agency_code))
else:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.fr_entity_type == agency_code))
# If we're checking a CGAC, we want to filter on all of the related FRECs for AID 011, otherwise just filter on
# that FREC
if frec_list:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == '011',
tas_gtas.c.fr_entity_type.in_(frec_list)))
elif not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == '011',
tas_gtas.c.fr_entity_type == agency_code))
rows = initial_query(session, tas_gtas.c).\
filter(func.coalesce(tas_gtas.c.financial_indicator2, '') != 'F').\
filter(or_(*agency_filters)).\
group_by(tas_gtas.c.allocation_transfer_agency,
tas_gtas.c.agency_identifier,
tas_gtas.c.beginning_period_of_availa,
tas_gtas.c.ending_period_of_availabil,
tas_gtas.c.availability_type_code,
tas_gtas.c.main_account_code,
tas_gtas.c.sub_account_code)
return rows
| 15,678
|
def vocublary(vec_docs):
""" vocabulary(vec_docs) -> tuple: (int avg_doc_len, updated vec_docs, corpus Vocabulary dictionary {"word": num_docs_have__this_term, ...})
vec_docs = list of documents as dictionaries [{ID:"word_i word_i+1 ..."} , {ID:"word_i word_i+1"}, ...}]
"""
vocabulary = {}
count_vec = [] #used for aggregating doc lengths in a list to determining avg_doc_len
#Extract len of docs anonymously, convert vec_docs values to c(w,d), Create corups Vocabulary as c(d,w)
for key,value in vec_docs.items(): #recall: {key = "doc_ID": value = [list, of, words, in, each, document]}
doc_words = {}
count_vec.append(len(value))
for word in value:
#convert doc word list into dict storing c(w,d) ∈ D
if word in doc_words:
doc_words[word] = doc_words[word] + 1
else:
doc_words[word] = 1
#Next, create vocubulary c(d,w) ∈ Corpus
for word,count in doc_words.items():
if word in vocabulary:
vocabulary[word] = vocabulary[word] + 1
else:
vocabulary[word] = 1
#last convert {ID:[list,of,words]} -> {ID: {dict:1,of:1,word:1,counts:2} }
vec_docs[key] = doc_words
avg_dl = sum(count_vec) / len(count_vec)
return (avg_dl,vocabulary)
| 15,679
|
def plot_bursts(odf,
bdf,
lowest_level=0,
title=True,
daterange=None,
xrangeoffsets=3,
s=None,
gamma=None):
"""Plots burst and offset data.
odf = an offsets dataframe
bdf = an edgeburst dataframe
lowest_level = subset the burst dataframe with bursts greater than or equal to the specified level
daterange = a tuple with two elements: a start date and end date as *strings*. format is 'year-month-day'
xrangeoffsets = the number of days to add before and after the min and max x dates
"""
svo_title = str(set(bdf['svo']).pop())
fig, (axa, axb) = plt.subplots(2, sharey=False, sharex=True)
fig.set_figwidth(10)
fig.set_figheight(6)
formatter = mdates.DateFormatter("%b %d\n%Y")
axb.xaxis.set_major_formatter(formatter)
# offsets plot
day_freq = odf.resample('D').size()
axa.plot(day_freq, color='#32363A')
axa.xaxis.set_major_formatter(formatter)
axa.xaxis_date()
axa.tick_params(axis='both', which='both', length=0)
axa.set_ylabel('Daily offsets')
if daterange:
axa.set_xlim(pd.Timestamp(daterange[0]), pd.Timestamp(daterange[1]))
# bursts plot
days = [day_freq.index[0]]
levels = [0]
for i in range(1, len(day_freq.index)):
period_start = odf.resample('D').size().index[i - 1]
period_end = odf.resample('D').size().index[i]
max_burst = set()
days.append(period_end)
for j in range(len(bdf)):
burst_start = bdf['start'][j]
burst_end = bdf['end'][j]
level = bdf['level'][j]
if burst_end < period_start or period_end < burst_start :
pass
else:
max_burst.add(level)
levels.append(max(max_burst))
finaldf = pd.DataFrame({"start": days, "level": levels})
if lowest_level > 0:
bdf = bdf[bdf['level'] >= lowest_level]
xmin = min(bdf['start'])
xmax = max(bdf['start'])
if xmin == xmax:
raise Exception("There must be at least two bursts at or above the specified level. Try reducing the `lowest_level` parameter.")
daterange = ((xmin + pd.DateOffset(days=2)).date(), (xmax + pd.DateOffset(days=2)).date())
# bursts plot
axb.bar(finaldf['start'], finaldf['level'], color='#32363A', width=1)
if s != None and gamma != None:
axb.set_ylabel(r'Burst levels (s = {}, $\gamma$ = {})'.format(s, gamma))
else:
axb.set_ylabel('Burst level')
axb.tick_params(axis='both', which='both', length=0)
if daterange:
axb.set_xlim(pd.Timestamp(daterange[0]), pd.Timestamp(daterange[1]))
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
if title is True:
fig.suptitle(f'{svo_title}', fontsize=12, ha='center')
| 15,680
|
def main(unused_argv):
"""Main entry.
Args:
* unused_argv: unused arguments (after FLAGS is parsed)
"""
try:
# setup the TF logging routine
tf.logging.set_verbosity(tf.logging.INFO)
# set the learning phase to 'inference'; data format may be changed if needed
set_learning_phase()
# inspect the model
meta_path = os.path.join(FLAGS.model_dir_dst, 'model.ckpt.meta')
inspect_model(meta_path)
# exit normally
return 0
except ValueError:
traceback.print_exc()
return 1 # exit with errors
| 15,681
|
def load_staging_tables(cur, conn, schema):
"""
The Function load data from S3 to staging tables which user made.
Args:
cur: Database cursor.
conn: Connection for database
schema: Schema for selected talbe
Returns:
None
"""
for query in copy_table_queries:
cur.execute(query.format(schema))
conn.commit()
| 15,682
|
def _build_type(type_, value, property_path=None):
""" Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
for (type_check, builder) in (
(is_enum_type, _build_enum_type),
(is_null_type, _build_null_type),
(is_bool_type, _build_bool_type),
(is_string_type, _build_string_type),
(is_integer_type, _build_integer_type),
(is_number_type, _build_number_type),
(is_array_type, _build_array_type),
(is_object_type, _build_object_type),
):
if type_check(type_):
return builder(value, property_path=property_path)
# NOTE: warning ignores type None (as that is the config var default)
if type_:
warnings.warn(f"unhandled translation for type {type_!r} with value {value!r}")
return {}
| 15,683
|
def lidar_2darray_to_rgb(array: np.ndarray) -> np.ndarray:
"""Returns a `NumPy` array (image) from a 4 channel LIDAR point cloud.
Args:
array: The original LIDAR point cloud array.
Returns:
The `PyGame`-friendly image to be visualized.
"""
# Get array shapes.
W, H, C = array.shape
assert C == 2
# Select channel.
img = np.c_[array, np.zeros(shape=(W, H, 1))]
# Convert to 8-bit image.
img = 255 * (img / img.max())
return img
| 15,684
|
def changed_files(include_added_files=True):
"""
Return a generator of filenames changed in this commit. Excludes files that were just deleted.
"""
diff_filter = "CMRTUXB"
if include_added_files:
diff_filter += "A"
git_diff_command = "git diff-index --cached --name-only --diff-filter=%s HEAD" % diff_filter
git_out, git_err, git_rc = run_command(git_diff_command)
if git_err or git_rc:
print "# Internal hook error:\n%s\n%s\n" % (git_out, git_err)
sys.exit(1)
cleaned_filenames = [filename.strip() for filename in git_out.splitlines()]
for filename in cleaned_filenames:
if len(filename) > 0:
yield filename
| 15,685
|
def args():
"""
--all (some subset that is useful for someone)
--packages (maybe positional?)
"""
parser = argparse.ArgumentParser("serviced-tests")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose logging")
types = parser.add_argument_group("Test Type")
types.add_argument("--unit", action="store_true", help="pass the 'unit' build tag")
types.add_argument("--integration", action="store_true", help="pass the 'integration' build tag")
options = parser.add_argument_group("Test Options")
options.add_argument("--quick", action="store_true", help="don't run tests with the '!quick' build constraint")
options.add_argument("--root", action="store_true", help="run the tests as the root user")
options.add_argument("--race", action="store_true", help="run tests with race detection")
options.add_argument("--cover", action="store_true", help="run tests with coverage")
options.add_argument("--tag", action="append", help="optional extra build tag (may be specified multiple times)")
options.add_argument("--include_vendor", action="store_true", dest="include_vendor", help="run tests against the vendor directory")
coverage = parser.add_argument_group("Coverage Options")
coverage.add_argument("--cover-html", required=False, help="output file for HTML coverage report")
coverage.add_argument("--cover-xml", required=False, help="output file for Cobertura coverage report")
fixtures = parser.add_argument_group("Fixture Options")
fixtures.add_argument("--elastic", action="store_true", help="start an elastic server before the test run")
fixtures.add_argument("--elastic-port", type=int, help="elastic server port", default=9202)
parser.add_argument("--packages", nargs="*", help="serviced packages to test, relative to the serviced root (defaults to ./...)")
parser.add_argument("arguments", nargs=argparse.REMAINDER, help="optional arguments to be passed through to the test runner")
return parser.parse_args()
| 15,686
|
def rollout(dataset: RPDataset,
env: RPEnv,
policy: Policy,
batch_size: int,
num_workers: int = 4,
disable_progress_bar: bool = False,
**kwargs) -> Tuple[Tensor, Union[List, Dict]]:
"""Policy evaluation rollout
Args:
dataset: dataset to evaluate on
env: the routing simulation environment
policy: policy model
batch_size: size of mini-batches
num_workers: num cores to distribute data loading
disable_progress_bar: flag to disable tqdm progress bar
Returns:
tensor of final costs per instance
"""
costs, infos = [], []
for batch in tqdm(
DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False # do not random shuffle data in rollout!
),
disable=disable_progress_bar,
):
with torch.no_grad():
cost, info = eval_episode(batch, env, policy, **kwargs)
costs.append(cost.cpu())
infos.append(info)
env.clear_cache()
return torch.cat(costs, dim=0), infos
| 15,687
|
def test_coinco_bert(bert_subst_generator, coinco_dataset_reader):
"""
Reproduction command:
python lexsubgen/evaluations/lexsub.py solve
--substgen-config-path configs/subst_generators/lexsub/bert.jsonnet
--dataset-config-path configs/dataset_readers/lexsub/coinco.jsonnet
--run-dir='debug/lexsub-all-models/coinco_bert'
--force
--experiment-name='lexsub-all-models'
--run-name='coinco_bert'
"""
scores = LexSubEvaluation(
substitute_generator=bert_subst_generator,
dataset_reader=coinco_dataset_reader,
).evaluate()["mean_metrics"]
assert scores["gap_normalized"] == pytest.approx(50.5, 0.02), str(scores)
assert scores["prec@1"] == pytest.approx(42.56), str(scores)
assert scores["prec@3"] == pytest.approx(32.64), str(scores)
assert scores["rec@10"] == pytest.approx(28.73), str(scores)
| 15,688
|
def merge(fname1, fname2, how="inner", on=None):
"""
Merging two csv files.
Usage: ph merge a.csv b.csv --on=ijk
"""
hows = ("left", "right", "outer", "inner")
if how not in hows:
sys.exit("Unknown merge --how={}, must be one of {}".format(how, hows))
df1 = pd.read_csv(fname1)
df2 = pd.read_csv(fname2)
if on is None:
pipeout(pd.merge(df1, df2, how=how))
else:
pipeout(pd.merge(df1, df2, how=how, on=on))
| 15,689
|
def regex_ignore_case(term_values):
"""
turn items in list "term_values" to regexes with ignore case
"""
output=[]
for item in term_values:
output.append(r'(?i)'+item)
return output
| 15,690
|
def check_aws_installed():
"""Checks that AWS CLI is installed."""
if which('aws') is None:
raise ValueError('AWS CLI is not installed.')
| 15,691
|
def importance_sampling_integrator(function: Callable[..., np.ndarray],
pdf: Callable[..., np.ndarray],
sampler: Callable[..., int],
n: int = 10000,
seed: int = 1
) -> np.array:
"""
Parameters
----------
function : TYPE
DESCRIPTION.
pdf : TYPE
DESCRIPTION.
sampler : TYPE
DESCRIPTION.
n : TYPE, optional
DESCRIPTION. The default is 10000.
seed : TYPE, optional
DESCRIPTION. The default is 1.
Returns
-------
TYPE
DESCRIPTION.
"""
# Set a random seed.
np.random.seed(seed)
# Generate n samples from the probability distribution.
samples = sampler(n)
#ipdb.set_trace()
# Evaluate the function at the samples and divide by the probability
# density of the distribution at those samples.
sampled_values = function(samples) / pdf(samples)
# Add the estimate of the integral to the estimates list.
estimates = np.mean(sampled_values, axis=1) # Altered this for the batching.
# Return the mean of the estimates as the estimate of the integral.
return np.array(estimates)
| 15,692
|
def r1_gradient_penalty_loss(discriminator,
real_data,
mask=None,
norm_mode='pixel',
loss_scaler=None,
use_apex_amp=False):
"""Calculate R1 gradient penalty for WGAN-GP.
R1 regularizer comes from:
"Which Training Methods for GANs do actually Converge?" ICML'2018
Diffrent from original gradient penalty, this regularizer only penalized
gradient w.r.t. real data.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
mask (Tensor): Masks for inpainting. Default: None.
norm_mode (str): This argument decides along which dimension the norm
of the gradients will be calculated. Currently, we support ["pixel"
, "HWC"]. Defaults to "pixel".
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.shape[0]
real_data = real_data.clone().requires_grad_()
disc_pred = discriminator(real_data)
if loss_scaler:
disc_pred = loss_scaler.scale(disc_pred)
elif use_apex_amp:
from apex.amp._amp_state import _amp_state
_loss_scaler = _amp_state.loss_scalers[0]
disc_pred = _loss_scaler.loss_scale() * disc_pred.float()
gradients = autograd.grad(
outputs=disc_pred,
inputs=real_data,
grad_outputs=torch.ones_like(disc_pred),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if loss_scaler:
# unscale the gradient
inv_scale = 1. / loss_scaler.get_scale()
gradients = gradients * inv_scale
elif use_apex_amp:
inv_scale = 1. / _loss_scaler.loss_scale()
gradients = gradients * inv_scale
if mask is not None:
gradients = gradients * mask
if norm_mode == 'pixel':
gradients_penalty = ((gradients.norm(2, dim=1))**2).mean()
elif norm_mode == 'HWC':
gradients_penalty = gradients.pow(2).reshape(batch_size,
-1).sum(1).mean()
else:
raise NotImplementedError(
'Currently, we only support ["pixel", "HWC"] '
f'norm mode but got {norm_mode}.')
if mask is not None:
gradients_penalty /= torch.mean(mask)
return gradients_penalty
| 15,693
|
def stop_all():
"""stop all running animations. see pygame_animation.Animation.stop for more information."""
while _running:
_running[0].stop(noerror=True)
| 15,694
|
def assert_synced_channel_state(
token_network_identifier: TokenNetworkID,
app0: App,
balance0: Balance,
pending_locks0: List[HashTimeLockState],
app1: App,
balance1: Balance,
pending_locks1: List[HashTimeLockState],
) -> None:
""" Assert the values of two synced channels.
Note:
This assert does not work for an intermediate state, where one message
hasn't been delivered yet or has been completely lost."""
# pylint: disable=too-many-arguments
channel0 = get_channelstate(app0, app1, token_network_identifier)
channel1 = get_channelstate(app1, app0, token_network_identifier)
assert channel0.our_state.contract_balance == channel1.partner_state.contract_balance
assert channel0.partner_state.contract_balance == channel1.our_state.contract_balance
total_token = channel0.our_state.contract_balance + channel1.our_state.contract_balance
our_balance0 = channel.get_balance(channel0.our_state, channel0.partner_state)
partner_balance0 = channel.get_balance(channel0.partner_state, channel0.our_state)
assert our_balance0 + partner_balance0 == total_token
our_balance1 = channel.get_balance(channel1.our_state, channel1.partner_state)
partner_balance1 = channel.get_balance(channel1.partner_state, channel1.our_state)
assert our_balance1 + partner_balance1 == total_token
locked_amount0 = sum(lock.amount for lock in pending_locks0)
locked_amount1 = sum(lock.amount for lock in pending_locks1)
assert_balance(channel0, balance0, locked_amount0)
assert_balance(channel1, balance1, locked_amount1)
# a participant's outstanding is the other's pending locks.
assert_locked(channel0, pending_locks0)
assert_locked(channel1, pending_locks1)
assert_mirror(channel0, channel1)
assert_mirror(channel1, channel0)
| 15,695
|
def visualize(args):
"""Return the visualized output"""
ret = ""
cmd_list = json.load(args.results)['cmd_list']
cmd_list = util.filter_cmd_list(cmd_list, args.labels_to_include, args.labels_to_exclude)
(cmd_list, label_map) = util.translate_dict(cmd_list, args.label_map)
for cmd in cmd_list:
values = []
if 'jobs' in cmd:
for job in cmd['jobs']:
if 'results' in job:
for res in job['results']:
match_list = re.findall(args.parse_regex, res['stdout'])
if res['success'] and len(match_list) > 0:
for match in match_list:
values.append(args.py_type(match))
else:
values.append("N/A")
succeed_values = util.extract_succeed_results(cmd, args.parse_regex, args.py_type)
mean = util.mean(succeed_values)
std = util.standard_deviation(succeed_values)
if args.csv:
sep = args.csv_separator
ret += "%s%s %.4f%s %.4f" % (label_map[cmd['label']], sep, mean, sep, std)
else:
ret += "%s: %s" % (label_map[cmd['label']], values)
if len(succeed_values) > 0:
ret += " %.4f" % mean
ret += " (%.4f)" % std
ret += "\n"
return ret
| 15,696
|
def send_request(apikey, key_root, data, endpoint):
"""Send a request to the akismet server and return the response."""
url = 'http://%s%s/%s/%s' % (
key_root and apikey + '.' or '',
AKISMET_URL_BASE,
AKISMET_VERSION,
endpoint
)
try:
response = open_url(url, data=url_encode(data))
except:
return
try:
return response.data.strip()
finally:
response.close()
| 15,697
|
def request_set_bblk_trace_options(*args):
"""
request_set_bblk_trace_options(options)
Post a 'set_bblk_trace_options()' request.
@param options (C++: int)
"""
return _ida_dbg.request_set_bblk_trace_options(*args)
| 15,698
|
def range_str(values: iter) -> str:
"""
Given a list of integers, returns a terse string expressing the unique values.
Example:
indices = [0, 1, 2, 3, 4, 7, 8, 11, 15, 20]
range_str(indices)
>> '0-4, 7-8, 11, 15 & 20'
:param values: An iterable of ints
:return: A string of unique value ranges
"""
trial_str = ''
values = list(set(values))
for i in range(len(values)):
if i == 0:
trial_str += str(values[i])
elif values[i] - (values[i - 1]) == 1:
if i == len(values) - 1 or values[i + 1] - values[i] > 1:
trial_str += f'-{values[i]}'
else:
trial_str += f', {values[i]}'
# Replace final comma with an ampersand
k = trial_str.rfind(',')
if k > -1:
trial_str = f'{trial_str[:k]} &{trial_str[k + 1:]}'
return trial_str
| 15,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.