content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_association_end_updates(create, diagram):
"""Test association end navigability connected to a class."""
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
a = create(AssociationItem)
connect(a, a.head, c1)
c = get_connected(a, a.head)
assert c is c1
connect(a, a.tail, c2)
c = get_connected(a, a.tail)
assert c is c2
assert a.subject.memberEnd, a.subject.memberEnd
assert a.subject.memberEnd[0] is a.head_subject
assert a.subject.memberEnd[1] is a.tail_subject
assert a.subject.memberEnd[0].name is None
a.subject.memberEnd[0].name = "blah"
diagram.update_now((a,))
assert a.head_end._name == "+ blah", a.head_end.get_name()
| 5,336,800
|
def get_voca(base_persons,
vertices,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VOCA model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
vertices : int
Number of 3D geometry vertices.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = VOCA(
base_persons=base_persons,
vertices=vertices,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
| 5,336,801
|
def _l2_project_reference(z_p, p, z_q):
"""Projects distribution (z_p, p) onto support z_q under L2-metric over CDFs.
The supports z_p and z_q are specified as tensors of distinct atoms (given
in ascending order).
Let Kq be len(z_q) and Kp be len(z_p). This projection works for any
support z_q, in particular Kq need not be equal to Kp.
Args:
z_p: Tensor holding support of distribution p, shape `[batch_size, Kp]`.
p: Tensor holding probability values p(z_p[i]), shape `[batch_size, Kp]`.
z_q: Tensor holding support to project onto, shape `[Kq]`.
Returns:
Projection of (z_p, p) onto support z_q under Cramer distance.
"""
# Broadcasting of tensors is used extensively in the code below. To avoid
# accidental broadcasting along unintended dimensions, tensors are defensively
# reshaped to have equal number of dimensions (3) throughout and intended
# shapes are indicated alongside tensor definitions. To reduce verbosity,
# extra dimensions of size 1 are inserted by indexing with `None` instead of
# `tf.expand_dims()` (e.g., `x[:, None, :]` reshapes a tensor of shape
# `[k, l]' to one of shape `[k, 1, l]`).
# Extract vmin and vmax and construct helper tensors from z_q
vmin, vmax = z_q[0], z_q[-1]
d_pos = tf.concat([z_q, vmin[None]], 0)[1:] # 1 x Kq x 1
d_neg = tf.concat([vmax[None], z_q], 0)[:-1] # 1 x Kq x 1
# Clip z_p to be in new support range (vmin, vmax).
z_p = tf.clip_by_value(z_p, vmin, vmax)[:, None, :] # B x 1 x Kp
# Get the distance between atom values in support.
d_pos = (d_pos - z_q)[None, :, None] # z_q[i+1] - z_q[i]. 1 x B x 1
d_neg = (z_q - d_neg)[None, :, None] # z_q[i] - z_q[i-1]. 1 x B x 1
z_q = z_q[None, :, None] # 1 x Kq x 1
# Ensure that we do not divide by zero, in case of atoms of identical value.
d_neg = tf.where(d_neg > 0, 1./d_neg, tf.zeros_like(d_neg)) # 1 x Kq x 1
d_pos = tf.where(d_pos > 0, 1./d_pos, tf.zeros_like(d_pos)) # 1 x Kq x 1
delta_qp = z_p - z_q # clip(z_p)[j] - z_q[i]. B x Kq x Kp
d_sign = tf.cast(delta_qp >= 0., dtype=p.dtype) # B x Kq x Kp
# Matrix of entries sgn(a_ij) * |a_ij|, with a_ij = clip(z_p)[j] - z_q[i].
# Shape B x Kq x Kp.
delta_hat = (d_sign * delta_qp * d_pos) - ((1. - d_sign) * delta_qp * d_neg)
p = p[:, None, :] # B x 1 x Kp.
return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * p, 2)
| 5,336,802
|
def _make_blocksizes(bricksize, surveysize, nlods, dtype, factor=(1,1,1), verbose=None):
"""
CURRENTLY NOT USED.
Calculate the minimum blocksize to read at each lod level. Clip to
the survey size. Also compute the memory needed to hold one buffer
for each lod. Note that the genlod algorithm currently assumes
that the block size is the same for all levels except for the
clipping. And it currently handles clipping itself and might not
like us to do so. Currently this function is not very useful.
"""
blocksizes = np.zeros((nlods, 3), dtype=np.int64)
ss = np.array(surveysize, dtype=np.int64)
bs = np.array([2*factor[0]*bricksize[0],
2*factor[1]*bricksize[1],
ss[2]], dtype=np.int64)
iterations = 0
for lod in range(nlods):
bs = np.minimum(bs, ss)
blocksizes[lod] = bs
iterations += np.product((ss+bs-1) // bs)
ss = (ss + 1) // 2
bytesused = np.sum(np.product(blocksizes, axis=1)) * int(np.dtype(dtype).itemsize)
returntype = namedtuple("BlockSizeInfo", "blocksizes bytesused iterations")
result = returntype(blocksizes, bytesused, iterations)
print(result)
return result
| 5,336,803
|
def kit2fiff():
"""Convert KIT files to the fiff format.
The recommended way to use the GUI is through bash with::
$ mne kit2fiff
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._kit2fiff_gui import Kit2FiffFrame
gui = Kit2FiffFrame()
gui.configure_traits()
return gui
| 5,336,804
|
def sweep_dec_given_x(full_model, z_dec_model, sample1, sample2, sample_layer_name,
sweep_z_samples=False,
nb_samples=10,
nargout=1,
tqdm=tqdm):
"""
sweep the latent space given two samples in the original space
specificaly, get z_mu = enc(x) for both samples, and sweep between those z_mus
"sweep_z_samples" does a sweep between two samples, rather than between two z_mus.
Example:
sample_layer_name='img-img-dense-vae_ae_dense_sample'
"""
# get a model that also outputs the samples z
full_output = [*full_model.outputs,
full_model.get_layer(sample_layer_name).get_output_at(1)]
full_model_plus = keras.models.Model(full_model.inputs, full_output)
# get full predictions for these samples
pred1 = full_model_plus.predict(sample1[0])
pred2 = full_model_plus.predict(sample2[0])
img1 = sample1[0]
img2 = sample2[0]
# sweep range
x_range = np.linspace(0, 1, nb_samples)
# prepare outputs
outs = [None] * nb_samples
for xi, x in enumerate(tqdm(x_range)):
if sweep_z_samples:
z = x * pred1[3] + (1-x) * pred2[3]
else:
z = x * pred1[1] + (1-x) * pred2[1]
if isinstance(sample1[0], (list, tuple)): # assuming prior or something like that
outs[xi] = z_dec_model.predict([z, *sample1[0][1:]])
else:
outs[xi] = z_dec_model.predict(z)
if nargout == 1:
return outs
else:
return (outs, [pred1, pred2])
| 5,336,805
|
def process(observation, current_game_state):
"""
Args:
observation: An observation, which agents get as an input from kaggle environment.
current_game_state: An object provided by kaggle to simplify game info extraction.
Returns:
processed_observations: A prepared observation to save to the buffer.
"""
global units_actions_dict
player = current_game_state.players[observation.player]
opponent = current_game_state.players[(observation.player + 1) % 2]
width, height = current_game_state.map.width, current_game_state.map.height
shift = int((MAX_MAP_SIDE - width) / 2) # to make all feature maps 32x32
turn = current_game_state.turn
player_units_coords = {}
player_city_tiles_coords = {}
player_research_points = player.research_points
player_city_tiles_count = player.city_tile_count
player_cities_count = len(player.cities)
player_units_count = len(player.units)
player_workers_count = 0
player_carts_count = 0
for unit in player.units:
if unit.is_worker():
player_workers_count += 1
elif unit.is_cart():
player_carts_count += 1
else:
raise ValueError
opponent_research_points = opponent.research_points
opponent_city_tiles_count = opponent.city_tile_count
opponent_cities_count = len(opponent.cities)
opponent_units_count = len(opponent.units)
opponent_workers_count = 0
opponent_carts_count = 0
for unit in opponent.units:
if unit.is_worker():
opponent_workers_count += 1
elif unit.is_cart():
opponent_carts_count += 1
else:
raise ValueError
current_cycle, to_next_day, to_next_night, is_night = get_timing(turn)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is wood
# 2 - wood amount
# 3 - is coal
# 4 - coal amount
# 5 - is uranium
# 6 - uranium amount
# 7 - fuel equivalent
# 8 - if a resource is available for the player, 1 when ready
# 9 - a road lvl
# 10 - 19 for coordinates
# number_of_resources_layers = 20
# A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# for yy in range(height):
# for xx in range(width):
# cell = current_game_state.map.get_cell(xx, yy)
# x, y = yy + shift, xx + shift
# if cell.has_resource():
# A1[0, x, y] = 1 # a resource at the point
# resource = cell.resource
# if resource.type == "wood":
# A1[1, x, y] = 1
# wood_amount = resource.amount
# A1[2, x, y] = wood_amount / WOOD_BOUND
# fuel = wood_amount * WOOD_FUEL_VALUE
# A1[8, x, y] = 1 # wood is always available
# elif resource.type == "coal":
# A1[3, x, y] = 1
# coal_amount = resource.amount
# A1[4, x, y] = coal_amount / COAL_BOUND
# fuel = coal_amount * COAL_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / COAL_RESEARCH_POINTS, 1)
# elif resource.type == "uranium":
# A1[5, x, y] = 1
# uran_amount = resource.amount
# A1[6, x, y] = uran_amount / URAN_BOUND
# fuel = uran_amount * URAN_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / URAN_RESEARCH_POINTS, 1)
# else:
# raise ValueError
# A1[7, x, y] = fuel / FUEL_BOUND
# A1[9, x, y] = cell.road / MAX_ROAD
# A1[10:15, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
# A1[15:20, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is available
# 2 - amount
# 3 - fuel equivalent
# 4 - a road lvl
# 5 - 14 for coordinates
# 15 - next available resource
number_of_resources_layers = 16
A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
for yy in range(height):
for xx in range(width):
cell = current_game_state.map.get_cell(xx, yy)
x, y = yy + shift, xx + shift
if cell.has_resource():
A1[0, x, y] = 1 # a resource at the point
resource = cell.resource
fuel = 0
if resource.type == "wood":
A1[1, x, y] = 1
wood_amount = resource.amount
A1[2, x, y] = wood_amount / WOOD_BOUND
fuel = wood_amount * WOOD_FUEL_VALUE
elif resource.type == "coal":
if player_research_points >= COAL_RESEARCH_POINTS:
A1[1, x, y] = 1
coal_amount = resource.amount
A1[2, x, y] = coal_amount / COAL_BOUND
fuel = coal_amount * COAL_FUEL_VALUE
else:
A1[15, x, y] = 1
elif resource.type == "uranium":
if player_research_points >= URAN_RESEARCH_POINTS:
A1[1, x, y] = 1
uran_amount = resource.amount
A1[2, x, y] = uran_amount / URAN_BOUND
fuel = uran_amount * URAN_FUEL_VALUE
elif player_research_points >= URAN_RESEARCH_POINTS - 50:
A1[15, x, y] = 1
else:
raise ValueError
A1[3, x, y] = fuel / FUEL_BOUND
A1[4, x, y] = cell.road / MAX_ROAD
A1[5:10, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
A1[10:15, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# define city tiles, 0 or 1 for bool, 0 to around 1 for float;
# layers:
number_of_main_layers = 39
A2 = np.zeros((number_of_main_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# 0 - a unit
# 1 - is player
# 2 - is opponent
# 3 - at the city tile
# 4 - first place in the city tile is occupied by the unit - fill later (in get_separate_outputs)
# 5 - second place is occupied by the unit, and the first was occupied before - fill later
# 6 - third place is occupied - fill later
# 7 - forth place is occupied - fill later
# 8 - the place number is more than 4th - fill later
# 9 - is worker - X0
# 10 - is cart - X1
# 11 - can act - X2
# 12 - can build - X3
# 13 - cargo wood - X4
# 14 - cargo coal - X5
# 15 - cargo uranium - X6
# 16 - cargo space left - X7
# 17 - fuel equivalent - X8
# 18 - is city tile
# 19 - is player
# 20 - is opponent
# 21 - can act
# 22 - amount of city tiles in the city, which the city tile belongs to
# 23 - current city upkeep
# 24 - fuel amount
# 25 - ratio if city can survive, 1 and more means it can
# 26 - amount of all friendly city tiles
# 27 - amount of cities
# 28 - units build limit reached (workers + carts == city tiles)
# 29 - number of workers
# 30 - number of carts
# 31 - number of friendly units
# 32 - research progress for coal
# 33 - research progress for uranium
# 34 - progress (from 0 to 1) until next day
# 35 - progress until next night
# 36 - progress until finish
# 37 - is night
# 38 - current cycle
# start with city tiles to know their positions to fill units cells
for k, city in list(player.cities.items()) + list(opponent.cities.items()):
if city.team == player.team:
city_tiles_count = player_city_tiles_count
cities_count = player_cities_count
units_count = player_units_count
workers_count = player_workers_count
carts_count = player_carts_count
research_points = player_research_points
elif city.team == opponent.team:
city_tiles_count = opponent_city_tiles_count
cities_count = opponent_cities_count
units_count = opponent_units_count
workers_count = opponent_workers_count
carts_count = opponent_carts_count
research_points = opponent_research_points
else:
raise ValueError
current_light_upkeep = city.get_light_upkeep()
current_fuel = city.fuel
current_city_tiles_count = 0
for _ in city.citytiles:
current_city_tiles_count += 1
for city_tile in city.citytiles:
# city tile group
y, x = city_tile.pos.x + shift, city_tile.pos.y + shift
A2[18, x, y] = 1
if city_tile.team == player.team:
A2[19, x, y] = 1
elif city_tile.team == opponent.team:
A2[20, x, y] = 1
else:
raise ValueError
if city_tile.can_act():
A2[21, x, y] = 1
if city_tile.team == player.team:
player_city_tiles_coords[f"ct_{x}_{y}"] = (x, y) # to save only the operable units
A2[22, x, y] = current_city_tiles_count / CITY_TILES_IN_CITY_BOUND
A2[23, x, y] = UPKEEP_BOUND_PER_TILE / current_light_upkeep
A2[24, x, y] = current_fuel / FUEL_BOUND
A2[25, x, y] = min(1, current_fuel / (min(10, to_next_day) * current_light_upkeep)) # ratio to survive
# common group
A2[26, x, y] = city_tiles_count / CITY_TILES_BOUND
A2[27, x, y] = cities_count / CITIES_BOUND
if units_count == city_tiles_count:
A2[28, x, y] = 1
A2[29, x, y] = workers_count / WORKERS_BOUND
A2[30, x, y] = carts_count / CARTS_BOUND
A2[31, x, y] = units_count / UNITS_BOUND
A2[32, x, y] = min(research_points / COAL_RESEARCH_POINTS, 1)
A2[33, x, y] = min(research_points / URAN_RESEARCH_POINTS, 1)
A2[34, x, y] = 1 - to_next_day / CYCLE_LENGTH
A2[35, x, y] = 1 - to_next_night / CYCLE_LENGTH
A2[36, x, y] = turn / MAX_DAYS
A2[37, x, y] = is_night
A2[38, x, y] = current_cycle / TOTAL_CYCLES
for unit in player.units + opponent.units:
# unit group
if unit.team == player.team:
city_tiles_count = player_city_tiles_count
cities_count = player_cities_count
units_count = player_units_count
workers_count = player_workers_count
carts_count = player_carts_count
research_points = player_research_points
elif unit.team == opponent.team:
city_tiles_count = opponent_city_tiles_count
cities_count = opponent_cities_count
units_count = opponent_units_count
workers_count = opponent_workers_count
carts_count = opponent_carts_count
research_points = opponent_research_points
else:
raise ValueError
y, x = unit.pos.x + shift, unit.pos.y + shift
A2[0, x, y] = 1
if unit.team == player.team:
A2[1, x, y] = 1
elif unit.team == opponent.team:
A2[2, x, y] = 1
else:
raise ValueError
is_unit_at_home = 1 if A2[18, x, y] == 1 else 0
A2[3, x, y] = is_unit_at_home
X = np.zeros(9, dtype=np.half)
if unit.is_worker():
X[0] = 1
elif unit.is_cart():
X[1] = 1
else:
raise ValueError
if unit.can_act():
X[2] = 1
if unit.can_build(current_game_state.map):
X[3] = 1
X[4] = unit.cargo.wood / WORKERS_CARGO
X[5] = unit.cargo.coal / WORKERS_CARGO
X[6] = unit.cargo.uranium / WORKERS_CARGO
X[7] = unit.get_cargo_space_left() / WORKERS_CARGO
X[8] = (unit.cargo.wood * WOOD_FUEL_VALUE +
unit.cargo.coal * COAL_FUEL_VALUE +
unit.cargo.uranium * URAN_FUEL_VALUE) / FUEL_BOUND
# there are many unit can share the same position at home
# so save unique unit parameters in X array and store it in dictionary if unit is at home
# if unit is not at home so it has a unique position, put it inside A2 array
if is_unit_at_home:
if unit.can_act() and unit.team == player.team:
player_units_coords[unit.id] = ((x, y), (X, unit.is_worker()))
else:
if unit.can_act() and unit.team == player.team:
player_units_coords[unit.id] = ((x, y), (None, unit.is_worker()))
A2[9:18, x, y] = X
# common group
A2[26, x, y] = city_tiles_count / CITY_TILES_BOUND
A2[27, x, y] = cities_count / CITIES_BOUND
if units_count == city_tiles_count:
A2[28, x, y] = 1
A2[29, x, y] = workers_count / WORKERS_BOUND
A2[30, x, y] = carts_count / CARTS_BOUND
A2[31, x, y] = units_count / UNITS_BOUND
A2[32, x, y] = min(research_points / COAL_RESEARCH_POINTS, 1)
A2[33, x, y] = min(research_points / URAN_RESEARCH_POINTS, 1)
A2[34, x, y] = 1 - to_next_day / CYCLE_LENGTH
A2[35, x, y] = 1 - to_next_night / CYCLE_LENGTH
A2[36, x, y] = turn / MAX_DAYS
A2[37, x, y] = is_night
A2[38, x, y] = current_cycle / TOTAL_CYCLES
A = np.concatenate((A2, A1), axis=0)
# define headers
# layers:
# 0 - an operable one
# 1 - is worker
# 2 - is cart
# 3 - is city tile
# 4 - prev pos for units
# 5 - prev prev pos for units
number_of_header_layers = 6
units_headers = {}
if player_units_coords:
for k, ((x, y), (X, is_worker)) in player_units_coords.items():
head = np.zeros((number_of_header_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
worker = np.array([1, 1, 0, 0], dtype=np.half)
cart = np.array([1, 0, 1, 0], dtype=np.half)
head[:4, x, y] = worker if is_worker else cart
if k in units_actions_dict.keys():
units_actions_dict[k].append((x, y))
unit_prev_pos = units_actions_dict[k][-2]
if len(units_actions_dict[k]) > 2:
unit_prev_prev_pos = units_actions_dict[k][-3]
else:
unit_prev_prev_pos = units_actions_dict[k][-2]
else:
units_actions_dict[k] = []
units_actions_dict[k].append((x, y))
unit_prev_pos = (x, y)
unit_prev_prev_pos = (x, y)
head[4, unit_prev_pos[0], unit_prev_pos[1]] = 1
head[5, unit_prev_prev_pos[0], unit_prev_prev_pos[1]] = 1
head = np.moveaxis(head, 0, -1)
units_headers[k] = (head, (x, y), X, is_worker)
city_tiles_headers = {}
if player_city_tiles_coords:
for k, (x, y) in player_city_tiles_coords.items():
head = np.zeros((number_of_header_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
head[:4, x, y] = np.array([1, 0, 0, 1], dtype=np.half)
head = np.moveaxis(head, 0, -1)
city_tiles_headers[k] = head
B = np.moveaxis(A, 0, -1)
outputs = {"stem": B,
"units_headers": units_headers,
"city_tiles_headers": city_tiles_headers}
return outputs
| 5,336,806
|
def test_cli_config_debug(
app: Cli, valiant_app_title: str, valiant_version: str, valiant_license: str
) -> None:
"""Test the default output from the `config` command with -vvv."""
command = app.find("config")
command_tester = CommandTester(command)
result = command_tester.execute("-vvv")
assert result == 0
output = command_tester.io.fetch_output()
assert output.find("All config data:\n")
assert output.find("{'tool': {'valiant': {")
| 5,336,807
|
def source_files(goto, wkdir, srcdir=None):
"""Source files appearing in symbol table.
Source file path names in symbol table are absolute or relative to
wkdir. If srcdir is given, return only files under srcdir.
"""
wkdir = srcloct.abspath(wkdir)
srcs = [dfn['file']
for dfn in parse_symbol_table(symbol_table(goto), wkdir)]
srcs = [src for src in srcs if src and not srcloct.is_builtin(src)]
if srcdir:
srcdir = srcloct.abspath(srcdir)
srcs = [src for src in srcs if src.startswith(srcdir)]
return sorted(set(srcs))
| 5,336,808
|
def textBlurBackground(img, text, font, fontScale, textPos, textThickness=1, textColor=(0, 255, 0), kneral=(33, 33),
pad_x=3, pad_y=3):
"""
Draw text with background blured, control the blur value, with kernal(odd, odd)
@param img:(mat) which you want to draw text
@param text: (string) text you want draw
@param font: fonts face, like FONT_HERSHEY_COMPLEX, FONT_HERSHEY_PLAIN etc.
@param fontScale: (double) the size of text, how big it should be.
@param textPos: tuple(x,y) position where you want to draw text
@param textThickness:(int) fonts weight, how bold it should be.
@param textColor: tuple(BGR), values -->0 to 255 each
@param kneral: tuple(3,3) int as odd number: higher the value, more blurry background would be
@param pad_x: int(pixels) padding of in x direction
@param pad_y: int(pixels) padding of in y direction
@return: img mat, with text drawn, with background blured
call the function:
img =textBlurBackground(img, 'Blured Background Text', cv2.FONT_HERSHEY_COMPLEX, 0.9, (20, 60),2, (0,255, 0), (49,49), 13, 13 )
"""
(t_w, t_h), _ = cv.getTextSize(text, font, fontScale, textThickness) # getting the text size
x, y = textPos
blur_roi = img[y - pad_y - t_h: y + pad_y, x - pad_x:x + t_w + pad_x] # croping Text Background
img[y - pad_y - t_h: y + pad_y, x - pad_x:x + t_w + pad_x] = cv.blur(blur_roi,
kneral) # merging the blured background to img
cv.putText(img, text, textPos, font, fontScale, textColor, textThickness)
# cv.imshow('blur roi', blur_roi)
# cv.imshow('blured', img)
return img
| 5,336,809
|
def local_response_normalization_2d_v2(in_vw, alpha, k, beta, n):
"""
cross-channel local response normalization for 2D feature maps
- input is bc01
output[i]
= value of the i-th channel
= input[i] / (k + alpha * sum(input[j]^2 for j) ** beta)
- where j is over neighboring channels (from i - n // 2 to i + n // 2)
This code is adapted from pylearn2.
https://github.com/lisa-lab/pylearn2/blob/master/LICENSE.txt
"""
assert n % 2 == 1, "n must be odd"
in_var = in_vw.variable
b, ch, r, c = in_vw.symbolic_shape()
half_n = n // 2
input_sqr = T.sqr(in_var)
extra_channels = T.zeros((b, ch + 2 * half_n, r, c))
input_sqr = T.set_subtensor(extra_channels[:, half_n:half_n + ch, :, :],
input_sqr)
scale = k + alpha * treeano.utils.smart_sum([input_sqr[:, i:i + ch, :, :]
for i in range(n)])
scale = scale ** beta
return in_var / scale
| 5,336,810
|
def mhc_datasets(table='mhc_data', path='./iedb/', remove_c=False,
remove_u=False, remove_modes=False):
"""
Parameters: 'table' is the table that the data is retrieved
- must be 'mhc_data', 'mhc_test1', 'mhc_test2', or 'mhc_train'
'path' is where the database is stored
remove every sequence with a 'c'
remove every sequence with a 'u'
remove the unusual modes of the dataset
if the table name is 'mhc_data' then will return the entire remaining dataset, otherwise,
returns (in order): the amino acid sequences, the -log10 of binding affinities, and the alleles
"""
if table != 'mhc_data' and table != 'mhc_train' and table != 'mhc_test1' and table != 'mhc_test2':
raise Exception('table name ' + table + ' does not exist')
selection = '*'
if table != 'mhc_data':
selection = 'sequence, meas, mhc'
conn = sql.connect(os.path.join(path, 'mhc.db'))
c = conn.cursor()
c.execute(_create_query(selection, table, remove_c, remove_u, remove_modes))
dataset = np.array(c.fetchall())
conn.close()
if table == 'mhc_data':
return dataset
if table == 'mhc_train':
# Temporary solution to remove benchmark overlaps from train set:
off_limits = np.loadtxt(os.path.join(path, 'benchmark_ic50_sequences.csv'),
delimiter=',', dtype=str)
idx = ~np.array([(seq in off_limits) for seq in dataset[:, 0]]).astype(bool)
dataset = dataset[idx, :]
return dataset.T[0], -np.log10(dataset.T[1].astype(float)), dataset.T[2]
| 5,336,811
|
def add_new_user():
"""
This function adds a new user
:return: Response Code
"""
newuser = {}
if request.method == "POST":
try:
newuser['username'] = str(request.data.get('username').strip())
newuser['first_name'] = str(request.data.get('first_name').strip())
newuser['last_name'] = str(request.data.get('last_name').strip())
newuser['email'] = str(request.data.get('email').strip())
newuser['password'] = str(request.data.get('password').strip())
newuser['verification_code'] = str(request.data.get(
'verification_code').strip())
except Exception as e:
print(e)
abort(500)
user = User(**newuser)
user.save()
return make_response(jsonify(status=201, msg="User {} successfully added".format(user.username) +
"to database"), 201)
| 5,336,812
|
def has_wildcard(url) -> bool:
"""
Check if the url contains a wildcard in last subdomain.
:param url: The url to check
:type url: str
:return: True if the url contains a wildcard in the last subdomain, False otherwise
:rtype: bool
"""
subdomain = extract(url).subdomain
return subdomain.split(".")[0] == "*"
| 5,336,813
|
def tmle_calculator(y, ystar1, ystar0, ystara, h1w, h0w, haw, splits,
measure='ate', lower_bound=None, upper_bound=None):
"""Function to calculate TMLE estimates for SingleCrossfitTMLE, and DoubleCrossfitTMLE
"""
if measure in ["ate", "risk_difference"]:
# Unbounding if continuous outcome (ate)
if measure == "ate":
# Unbounding continuous outcomes
y = tmle_unit_unbound(y, mini=lower_bound, maxi=upper_bound)
ystar1 = tmle_unit_unbound(ystar1, mini=lower_bound, maxi=upper_bound)
ystar0 = tmle_unit_unbound(ystar0, mini=lower_bound, maxi=upper_bound)
ystara = tmle_unit_unbound(ystara, mini=lower_bound, maxi=upper_bound)
# Point Estimate
estimate = np.mean(ystar1 - ystar0)
# Variance estimate
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
haws = haw[splits == s]
ic = haws * (ys - ystaras) + (ystar1s - ystar0s) - estimate
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
elif measure == 'risk_ratio':
# Point Estimate
estimate = np.mean(ystar1) / np.mean(ystar0)
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
h1ws = h1w[splits == s]
h0ws = h0w[splits == s]
ic = (1/np.mean(ystar1s) * (h1ws * (ys - ystaras)) + ystar1s - np.mean(ystar1s) -
(1/np.mean(ystar0s) * (-1 * h0ws * (ys - ystaras)) + ystar0s - np.mean(ystar0s)))
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
elif measure == 'odds_ratio':
# Point Estimate
estimate = (np.mean(ystar1) / (1-np.mean(ystar1))) / (np.mean(ystar0) / (1-np.mean(ystar0)))
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
h1ws = h1w[splits == s]
h0ws = h0w[splits == s]
ic = ((1-np.mean(ystar1s))/np.mean(ystar1s)*(h1ws*(ys - ystaras) + ystar1s) -
(1-np.mean(ystar0s))/np.mean(ystar0s)*(-1*h0ws*(ys - ystaras) + ystar0s))
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
else:
raise ValueError("Invalid measure requested within function: tmle_calculator. Input measure is " +
str(measure) + " but only 'ate', 'risk_difference', 'risk_ratio', and "
"'odds_ratio' are accepted.")
| 5,336,814
|
def gen_gap(Pn, T, Q):
"""Runs the generalization gap test. This test
simply checks the difference between the likelihood
assigned to the training set versus that assigned to
a held out test set.
Inputs:
Pn: (n X d) np array containing the held out test sample
of dimension d
T: (l X d) np array containing the training sample of
dimension d
Q: trained model of type scipy.neighbors.KernelDensity
Outputs:
log_lik_gap: scalar representing the difference of the log
likelihoods of Pn and T
"""
return Q.score(T) - Q.score(Pn)
| 5,336,815
|
def saveWithGDAL(path, image, writeHeader=True, interleave='BSQ'):
"""
Write this image to a file.
*Arguments*:
- path = the path to save to.
- image = the image to write.
- writeHeader = true if a .hdr file will be written. Default is true.
- interleave = data interleaving for ENVI files. Default is 'BSQ', other options are 'BIL' and 'BIP'.
"""
# find GDAL
try:
import osgeo.gdal as gdal
gdal.PushErrorHandler('CPLQuietErrorHandler') # ignore GDAL warnings
except:
assert False, "Error - please install GDAL before using saveWithGDAL(...)"
# make directories if need be
makeDirs( path )
path, ext = os.path.splitext(path)
if "hdr" in str.lower(ext):
ext = ".dat"
#get image driver
driver = 'ENVI'
if '.tif' in str.lower(ext):
driver = 'GTiff'
#todo - add support for png and jpg??
#set byte order
if 'little' in sys.byteorder:
image.header['byte order'] = 0
else:
image.header['byte order'] = 1
#parse data type from image array
data = image.data
dtype = gdal.GDT_Float32
image.header["data type"] = 4
image.header["interleave"] = str.lower(interleave)
if image.data.dtype == np.int or image.data.dtype == np.int32:
dtype = gdal.GDT_Int32
image.header["data type"] = 3
if image.data.dtype == np.int16:
dtype = gdal.GDT_Int16
image.header["data type"] = 2
if image.data.dtype == np.uint8:
data = np.array(image.data, np.dtype('b'))
dtype = gdal.GDT_Byte
image.header["data type"] = 1
if image.data.dtype == np.uint or image.data.dtype == np.uint32:
dtype = gdal.GDT_UInt32
image.header["data type"] = 13
if image.data.dtype == np.uint16:
dtype = gdal.GDT_UInt16
image.header["data type"] = 12
#write
if driver == 'GTiff':
output = gdal.GetDriverByName(driver).Create( path + ext, image.xdim(), image.ydim(), image.band_count(), dtype)
else:
output = gdal.GetDriverByName(driver).Create( path + ext, image.xdim(), image.ydim(), image.band_count(), dtype, ['INTERLEAVE=%s'%interleave] )
#write bands
for i in range(image.band_count()):
rb = output.GetRasterBand(i+1)
rb.WriteArray(data[:, :, i].T)
rb = None #close band
output = None #close file
if writeHeader and not image.header is None: #write .hdr file
image.push_to_header()
saveHeader(path + ".hdr", image.header)
# save geotransform/project information
output = gdal.Open(path + ext, gdal.GA_Update)
output.SetGeoTransform(image.affine)
if not image.projection is None:
output.SetProjection(image.projection.ExportToPrettyWkt())
output = None
| 5,336,816
|
def get_shapes(node, intermediate=False, exclusive=False):
"""Get the shapes of given node.
Args:
node (str): Node to query its shapes
intermediate (bool): Get intermediate shapes when True.
exclusive (bool): Only return the intermediate shapes if True.
Please note that the intermediate flag must be True as well.
Returns:
list: The shapes found below given node.
"""
# if given node is a list, assume first element
if isinstance(node, list):
node = node[0]
LOG.info("Given node is a list. Using first element.")
# return as list if given node is already a shape
if cmds.objectType(node, isAType="shape"):
return [node]
# query shapes
shapes = cmds.listRelatives(
node, shapes=True, type="deformableShape", path=True
)
shapes = shapes or []
# separate shapes orig
orig = []
for each in list(shapes): # duplicated `shapes` object to remove safely
if cmds.ls(each, intermediateObjects=True):
orig.append(each)
shapes.remove(each)
if not intermediate:
return shapes
if exclusive:
return orig
return shapes + orig
| 5,336,817
|
def solve_mip_mlp_elided(verif_instance):
"""Compute optimal attack loss for MLPs, via exactly solving MIP."""
assert MIP_SOLVERS, 'No MIP solvers installed with cvxpy.'
assert verif_instance.type == utils.VerifInstanceTypes.MLP_ELIDED
params, bounds, obj, obj_const = (
verif_instance.params, verif_instance.bounds, verif_instance.obj,
verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
on_state = []
post_activations = [cp.Variable((1, layer_sizes[0]))]
pre_activations = []
constraints = []
for (i, param) in enumerate(params):
W, b = param
b = jnp.reshape(b, (1, b.size))
on_state.append(cp.Variable((1, b.size), boolean=True))
pre_activations.append(cp.Variable((1, b.size)))
post_activations.append(cp.Variable((1, b.size)))
# Linear relaxation of ReLU constraints
constraints += [pre_activations[-1] == post_activations[-2]@W + b]
constraints += [post_activations[-1] >= pre_activations[-1]]
constraints += [post_activations[-1] >= 0]
# If ReLU is off, post activation is non-positive. Otherwise <= ub
constraints += [post_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub)]
# If ReLU is off, pre-activation is non-positive. Otherwise <= ub_pre
constraints += [pre_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub_pre)]
# If ReLU is on, post-activation == pre-activation
# Define <= here, >= constraint added above.
constraints += [post_activations[-1]-pre_activations[-1] <=
cp.multiply(1-on_state[-1],
bounds[i+1].ub-bounds[i+1].lb_pre)]
# Optionally, include IBP bounds to speed up MIP solving
# Post activations are within bounds
# i=0 case encodes input constraint
for (i, post) in enumerate(post_activations):
constraints += [post <= bounds[i].ub]
constraints += [post >= bounds[i].lb]
# # Pre activations are within bounds
for (i, pre) in enumerate(pre_activations):
constraints += [pre <= bounds[i+1].ub_pre]
constraints += [pre >= bounds[i+1].lb_pre]
# Set objective over final post-activations
obj_cp = cp.sum(cp.multiply(obj, post_activations[-1]))
# Define and solve problem
problem = cp.Problem(cp.Maximize(obj_cp), constraints)
# NB: Originally, we used cp.ECOS_BB here, but cvxpy 1.1 drops support,
# so we just use the first available MIP solver (which is dependent on user
# installation).
problem.solve(solver=MIP_SOLVERS[0])
# Report results
info = {
'problem': problem,
'post': post_activations,
'pre': pre_activations,
}
return obj_cp.value + obj_const, info
| 5,336,818
|
def compute_kullback_leibler_check_statistic(n=100, prngstate=None):
"""Compute the lowest of the survival function and the CDF of the exact KL
divergence KL(N(mu1,s1)||N(mu2,s2)) w.r.t. the sample distribution of the
KL divergence drawn by computing log(P(x|N(mu1,s1)))-log(P(x|N(mu2,s2)))
over a sample x~N(mu1,s1). If we are computing the KL divergence
accurately, the exact value should fall squarely in the sample, and the
tail probabilities should be relatively large.
"""
if prngstate is None:
raise TypeError('Must explicitly specify numpy.random.RandomState')
mu1 = mu2 = 0
s1 = 1
s2 = 2
exact = gaussian_kl_divergence(mu1, s1, mu2, s2)
sample = prngstate.normal(mu1, s1, n)
lpdf1 = gaussian_log_pdf(mu1, s1)
lpdf2 = gaussian_log_pdf(mu2, s2)
estimate, std = kl.kullback_leibler(sample, lpdf1, lpdf2)
# This computes the minimum of the left and right tail probabilities of the
# exact KL divergence vs a gaussian fit to the sample estimate. There is a
# distinct negative skew to the samples used to compute `estimate`, so this
# statistic is not uniform. Nonetheless, we do not expect it to get too
# small.
return erfc(abs(exact - estimate) / std) / 2
| 5,336,819
|
def build_signature(inputs, outputs):
"""Build the signature for use when exporting the graph.
Args:
inputs: a dictionary from tensor name to tensor
outputs: a dictionary from tensor name to tensor
Returns:
The signature, a SignatureDef proto, specifies the input/output tensors
to bind when running prediction.
"""
signature_inputs = {
key: saved_model_utils.build_tensor_info(tensor)
for key, tensor in inputs.items()
}
signature_outputs = {
key: saved_model_utils.build_tensor_info(tensor)
for key, tensor in outputs.items()
}
signature_def = signature_def_utils.build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
| 5,336,820
|
def fuse_depthwise_conv2d(input_graph_def):
"""Modifies the provided graph by fusing a set of ops into a single
_FusedDepthwiseConv2d op.
DepthwiseConv2dNative + BiasAdd + Activation => _FusedDepthwiseConv2dNative
Args:
input_graph_def: A GraphDef containing a model.
Returns:
Modified graph with FusedDepthwiseConv2dNative ops generated, and modified
weights.
Raises:
ValueError: If the graph is badly formed with duplicate node names.
"""
# Two passes approach, first find pattern of
# DepthwiseConv2dNative + BiasAdd + Activation
# Then find pattern of
# DepthwiseConv2dNative + BiasAdd
graph_def = _fuse_depthwise_conv2d_with_match_function(
input_graph_def, _find_contraction_with_bias_and_activation)
graph_def = _fuse_depthwise_conv2d_with_match_function(
graph_def, _find_contraction_with_bias)
graph_def = _fuse_depthwise_conv2d_with_match_function(
graph_def, _find_contraction_with_activation)
return graph_def
| 5,336,821
|
async def test_item_search_properties_jsonb(app_client):
"""Test POST search with JSONB query (query extension)"""
items_resp = await app_client.get("/collections/naip/items")
assert items_resp.status_code == 200
first_item = items_resp.json()["features"][0]
# EPSG is a JSONB key
params = {"query": {"proj:epsg": {"eq": first_item["properties"]["proj:epsg"]}}}
print(params)
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 12
| 5,336,822
|
def _validate_device_classes(ext_auxiliary_devices: Collection[Type[Any]],
ext_primary_devices: Collection[Type[Any]],
ext_virtual_devices: Collection[Type[Any]],
package_name: str) -> None:
"""Validates the extension device classes.
Args:
ext_auxiliary_devices: Auxiliary device classes to validate.
ext_primary_devices: Primary device classes to validate.
ext_virtual_devices: Virtual device classes to validate.
package_name: Name of the package providing the extension classes.
Raises:
PackageRegistrationError: Device classes are invalid.
"""
_assert_subclasses(ext_auxiliary_devices, _AuxiliaryDeviceBase, package_name,
"auxiliary device")
_assert_subclasses(ext_primary_devices, _PrimaryDeviceBase, package_name,
"primary device")
_assert_subclasses(ext_virtual_devices, _VirtualDeviceBase, package_name,
"virtual device")
new_device_classes = tuple(itertools.chain(
ext_auxiliary_devices, ext_primary_devices, ext_virtual_devices))
known_device_classes = (extensions.auxiliary_devices
+ extensions.primary_devices
+ extensions.virtual_devices)
new_device_types = tuple(device_class.DEVICE_TYPE
for device_class in new_device_classes)
_assert_unique(new_device_types,
names_description="Device types",
classes_description="device classes",
package_name=package_name)
redefined_device_types = set.intersection(
{device_class.DEVICE_TYPE for device_class in new_device_classes},
{device_class.DEVICE_TYPE for device_class in known_device_classes})
if redefined_device_types:
raise errors.PackageRegistrationError(
f"Device types {redefined_device_types} are already defined in GDM.",
package_name=package_name)
conformance_issues = _get_device_class_conformance_issues(new_device_classes)
if conformance_issues:
issue_messages = []
for cls, issues in conformance_issues:
issue_message = "".join(f"\n\t{issue}" for issue in issues)
issue_messages.append(f"{cls}{issue_message}")
raise errors.PackageRegistrationError(
"The following device class(es) are incompliant with GDM "
"architecture:\n{}".format("\n".join(issue_messages)),
package_name=package_name)
| 5,336,823
|
def pandas_to_example_str(obj, *, local_data_model=None) -> str:
"""
Convert data frame to a Python source code string.
:param obj: data frame to convert.
:param local_data_model: data model to use.
:return: Python source code representation of obj.
"""
if local_data_model is None:
local_data_model = data_algebra.default_data_model
pd_module_name = local_data_model.presentation_model_name
if not local_data_model.is_appropriate_data_instance(obj):
raise TypeError("Expect obj to be local_data_model.pd.DataFrame")
obj = obj.reset_index(drop=True, inplace=False)
nrow = obj.shape[0]
pandas_string = pd_module_name + ".DataFrame({"
for k in obj.columns:
col = obj[k]
nulls = local_data_model.bad_column_positions(col)
cells = ["None" if nulls[i] else col[i].__repr__() for i in range(nrow)]
pandas_string = (
pandas_string + "\n " + k.__repr__() + ": [" + ", ".join(cells) + "],"
)
pandas_string = pandas_string + "\n })"
return pandas_string
| 5,336,824
|
def get_results_object_model(target_node, paths_dict, name_to_description, q1_doid_to_disease, probs=False):
"""
Returns pathway results as an object model
:param target_node: target_node DOID:1234
:param paths_dict: a dictionary (keys OMIM id's) with values (path_name,path_type)
:param name_to_description: a dictionary to translate between source_node and genetic condition name
:param q1_doid_to_disease: a dictionary to translate between target_node and disease name
:param probs: optional probability of the OMIM being the right one
:return: ``dict``
"""
ret_obj = dict()
source_node_list = paths_dict.keys()
if len(source_node_list) > 0:
if target_node in q1_doid_to_disease:
doid_name = q1_doid_to_disease[target_node]
else:
doid_name = target_node
ret_obj['target_disease'] = doid_name
ret_source_nodes_dict = dict()
ret_obj['source_genetic_conditions'] = ret_source_nodes_dict
source_node_names = []
for source_node in source_node_list:
if source_node in name_to_description:
source_node_names.append(name_to_description[source_node])
else:
source_node_names.append(source_node)
for source_node in source_node_list:
source_node_dict = {}
path_names, path_types = paths_dict[source_node]
if len(path_names) == 1:
path_list = []
path_list.append({'type': 'node',
'name': source_node,
'desc': name_to_description.get(source_node, '')})
path_names = path_names[0]
path_types = path_types[0]
for index in range(1, len(path_names) - 1):
if index % 2 == 1:
path_list.append({'type': 'rel',
'name': path_types[index]})
else:
path_list.append({'type': 'node',
'name': path_names[index],
'desc': get_node_property(path_names[index], 'name')})
path_list.append({'type': 'node',
'name': target_node,
'desc': q1_doid_to_disease.get(target_node, '')})
if probs:
if source_node in probs:
source_node_dict['conf'] = probs[source_node]
source_node_dict['path'] = path_list
else:
# print(to_print)
if probs:
if source_node in probs:
source_node_dict['conf'] = probs[source_node]
relationships_and_counts_dict = Counter(map(tuple, path_types))
relationships = list(relationships_and_counts_dict.keys())
counts = []
for rel in relationships:
counts.append(relationships_and_counts_dict[rel])
relationships_and_counts = []
for i in range(len(counts)):
relationships_and_counts.append((relationships[i], counts[i]))
relationships_and_counts_sorted = sorted(relationships_and_counts, key=lambda tup: tup[1])
count_list = []
for index in range(len(relationships_and_counts_sorted)):
relationship = relationships_and_counts_sorted[index][0]
count = relationships_and_counts_sorted[index][1]
count_list.append({'count': count,
'reltype': str(relationship)})
source_node_dict['counts'] = count_list
ret_source_nodes_dict[source_node] = source_node_dict
return ret_obj
| 5,336,825
|
def main():
""" Main program that deals with user input """
clear()
print_script_info()
update_league_path()
# choose between importing or deleting item sets
answer = None
# only accept answers "", "b" (delete) or source name
while answer not in ["", "b"] + [source.name for source in SOURCES]:
print()
print("-"*36, "USO", "-"*36)
print("Importar:")
print("* Para importar las builds de todas las fuentes, presiona ENTER")
for source in SOURCES:
print(
f"* Para importar solo de {source.name.capitalize()}, escribe '{source.name}' y presiona ENTER")
print()
print("Borrar:")
print("* Para eliminar conjuntos de elementos de una o más fuentes, escribe 'b' y luego ENTER")
print("-"*79)
print()
answer = input().lower()
print()
# delete items sets
if answer.lower() == "b":
# delete one or more item sets and exit
clear()
answer = None
while answer not in ["b"] + [source.name for source in SOURCES]:
print("* Para borrar las buils de todas las fuentes, escribe 'd' y luego ENTER")
for source in SOURCES:
print(
f"* Para borrar las builds solo de {source.name.capitalize()}, escribe '{source.name}' y luego ENTER")
print()
answer = input().lower()
# delete item sets from one specified source
if answer.lower() in [source.name for source in SOURCES]:
for source in SOURCES:
if answer.lower() == source.name:
source.delete_item_sets()
break
# delete item sets from all sources
elif answer == "d":
for source in SOURCES:
source.delete_item_sets()
# delete old item sets and import new ones from specified source
elif answer.lower() in [source.name for source in SOURCES]:
for source in SOURCES:
if answer.lower() == source.name:
source.import_item_sets()
break
# delete old item sets and import new ones from all sources
# uses multiprocessing to import from multiple sources at once
else:
# for macos support
if sys.platform == "darwin":
multiprocessing.set_start_method("fork")
p = multiprocessing.Pool(processes=min(len(SOURCES), os.cpu_count()))
for source in SOURCES:
p.apply_async(source.import_item_sets)
# sleep to make sure config doesn't get accessed by more than one process at once
time.sleep(3)
# waits for all source imports to be done before continuing
p.close()
p.join()
# last prompt before exiting the app
print("\nDone!")
answer = None
while answer == None:
answer = input("Presiona ENTER para volver atrás, o cierra esta ventana si quieres salir.")
| 5,336,826
|
def MaxLonSep( maxarc, baselat ):
"""Calculates the maximum separation in longitude that a point can have
from a reference point at latitude baselat and still be within a given
great circle arc length, maxarc, of the reference point. All quantities
in radians."""
if abs(baselat) + maxarc <= 0.5 * pi:
#result = asin( abs( sin(maxarc) ) / cos( baselat ) )
#result = acos(sqrt(cos(baselat)**2 - sin(maxarc)**2)/cos(baselat))
c = cos( baselat )
s = abs( sin( maxarc ) )
y = s
x = sqrt( ( c + s ) * ( c - s ) )
result = atan2( y, x )
else:
result = pi
return result
| 5,336,827
|
def auto_read(filename):
"""Automatically determine the format of filename and open accordingly"""
#XXX: this won't work correctly on pipes
#would be better to use file magic
f = open(filename, 'r')
firstchar = f.read(1)
f.close()
if firstchar == '#':
return gnucap_read(filename)
else:
return spice_read(filename)
| 5,336,828
|
def WaitForOperation(client, messages, operation_name,
operation_description=None,
project=None, timeout=180):
"""Wait for an operation to complete.
Polls the operation requested approximately every second, showing a
progress indicator. Returns when the operation has completed.
Args:
client: The API client to use.
messages: The API message to use.
operation_name: The name of the operation to wait on, as returned by
operations.list.
operation_description: A short description of the operation to wait on,
such as 'create' or 'delete'. Will be displayed to the user.
project: The name of the project that this operation belongs to.
timeout: Number of seconds to wait for. Defaults to 3 minutes.
Returns:
The operation when it is done.
Raises:
HttpException: A http error response was received while executing api
request. Will be raised if the operation cannot be found.
OperationError: The operation finished with error(s).
Error: The operation the timeout without completing.
"""
tick_increment = 1 # every second(s)
ticks = 0
message = ('Waiting for {0}[{1}]'.format(
operation_description + ' ' if operation_description else '',
operation_name))
request = messages.DeploymentmanagerOperationsGetRequest(
project=project, operation=operation_name)
with progress_tracker.ProgressTracker(message, autotick=False) as ticker:
while ticks < timeout:
operation = client.operations.Get(request)
# Operation status is one of PENDING, RUNNING, DONE
if operation.status == 'DONE':
if operation.error:
raise exceptions.OperationError(
'Error in Operation [{0}]: {1}'.format(
operation_name, dm_util.RenderMessageAsYaml(operation.error)))
else: # Operation succeeded
return operation
ticks += tick_increment
ticker.Tick()
time.sleep(tick_increment)
# Timeout exceeded
raise exceptions.Error(
'Wait for Operation [{0}] exceeded timeout [{1}].'.format(
operation_name, str(timeout)))
| 5,336,829
|
def get_unstaged_files(gitobj):
"""
ref:
http://gitpython.readthedocs.io/en/stable/tutorial.html#obtaining-diff-information
"""
diff = []
diff.extend(gitobj.index.diff(gitobj.head.commit))
diff.extend(gitobj.index.diff(None))
return {"changed": diff, "untracked": gitobj.untracked_files}
| 5,336,830
|
def conv2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="CROSS_CORRELATION",
compute_mode="DEFAULT",
) -> Tensor:
"""
2D convolution operation.
Refer to :class:`~.Conv2d` for more information.
:param inp: feature map of the convolution operation.
:param weight: convolution kernel.
:param bias: bias added to the result of convolution (if given).
:param stride: stride of the 2D convolution operation. Default: 1
:param padding: size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
:param dilation: dilation of the 2D convolution operation. Default: 1
:param groups: number of groups into which the input and output channels are divided, so as to perform a ``grouped convolution``. When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be `(groups, out_channel // groups,
in_channels // groups, height, width)`.
:type conv_mode: string or :class:`Convolution.Mode`
:param conv_mode: supports "CROSS_CORRELATION". Default:
"CROSS_CORRELATION"
:type compute_mode: string or
:class:`Convolution.ComputeMode`
:param compute_mode: when set to "DEFAULT", no special requirements will be
placed on the precision of intermediate results. When set to "FLOAT32",
"Float32" would be used for accumulator and intermediate result, but only
effective when input and output are of Float16 dtype.
:return: output tensor.
"""
assert conv_mode == "CROSS_CORRELATION" or conv_mode.name == "CROSS_CORRELATION"
assert compute_mode == "DEFAULT" or compute_mode.name == "DEFAULT"
stride_h, stride_w = expand_hw(stride)
pad_h, pad_w = expand_hw(padding)
dilate_h, dilate_w = expand_hw(dilation)
Sparse = builtin.Convolution.Sparse
sparse_type = "DENSE" if groups == 1 else "GROUP"
op = builtin.Convolution(
stride_h=stride_h,
stride_w=stride_w,
pad_h=pad_h,
pad_w=pad_w,
dilate_h=dilate_h,
dilate_w=dilate_w,
strategy=get_conv_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
inp, weight = utils.convert_inputs(inp, weight)
(output,) = apply(op, inp, weight)
if bias is not None:
output += bias
return output
| 5,336,831
|
def find_tex_directives(texfile, ignore_root_loops=False):
"""Build a dictionary of %!TEX directives.
The main ones we are concerned with are:
root
Specifies a root file to run tex on for this subsidiary
TS-program
Tells us which latex program to run
TS-options
Options to pass to TS-program
encoding
The text encoding of the tex file
Arguments:
texfile
The initial tex file which should be searched for tex directives.
If this file contains a “root” directive, then the file specified
in this directive will be searched next.
ignore_root_loops
Specifies if this function exits with an error status if the tex
root directives contain a loop.
Returns: ``{str: str}``
Examples:
>>> chdir('Tests/TeX')
>>> directives = find_tex_directives('input/packages_input1.tex')
>>> print(directives['root']) # doctest:+ELLIPSIS
/.../Tests/TeX/packages.tex
>>> print(directives['TS-program'])
xelatex
>>> find_tex_directives('makeindex.tex')
{}
>>> chdir('../..')
"""
if not texfile:
return {}
root_chain = [texfile]
directive_regex = compile(r'%\s*!T[E|e]X\s+([\w-]+)\s*=\s*(.+)')
directives = {}
while True:
for encoding in encodings:
try:
lines = [line for (line_number, line)
in enumerate(open(texfile, encoding=encoding))
if line_number < 20]
break
except UnicodeDecodeError:
continue
new_directives = {directive.group(1): directive.group(2).rstrip()
for directive
in [directive_regex.match(line) for line in lines]
if directive}
directives.update(new_directives)
if 'root' in new_directives:
root = directives['root']
new_tex_file = (root if root.startswith('/') else
realpath(join(dirname(texfile), root)))
directives['root'] = new_tex_file
else:
break
if new_tex_file in root_chain:
if ignore_root_loops:
break
print('''<div id="commandOutput"><div id="preText">
<p class="error">There is a loop in your %!TEX root
directives.</p>
</div></div>''')
exit(EXIT_LOOP_IN_TEX_ROOT)
else:
texfile = new_tex_file
root_chain.append(texfile)
return directives
| 5,336,832
|
def projects():
"""
Handles the GET & POST request to '/projects'.
GET: requests to render page
POST: request to edit project with sent data
:return: render projects page / Json containing authorisation error / manage(data) function call
"""
if request.method == "GET":
return render_template('projects.html')
else:
if not current_user.is_authenticated or (current_user.role != "admin" and current_user.role != "employee"):
return jsonify(
{'success': False, "message": "You are not authorized to edit the selected projects"}), 400, {
'ContentType': 'application/json'}
data = request.json
for project in data["projects"]:
if current_user.role != "admin" and not employee_authorized_for_project(current_user.name, project):
return jsonify(
{'success': False, "message": "You are not authorized to edit the selected projects"}), 400, {
'ContentType': 'application/json'}
return manage(data)
| 5,336,833
|
def lock():
"""Lock new dependencies without upgrading"""
OPTIONS['upgrade'] = False
run_configurations(recompile, read_config)
| 5,336,834
|
def evolve_fqe_givens_sector(wfn: Wavefunction, u: np.ndarray,
sector='alpha') -> Wavefunction:
"""Evolve a wavefunction by u generated from a 1-body Hamiltonian.
Args:
wfn: FQE Wavefunction on n-orbitals
u: (n x n) unitary matrix.
sector: Optional either 'alpha' or 'beta' indicating which sector
to rotate
Returns:
New evolved wfn object.
"""
if sector == 'alpha':
sigma = 0
elif sector == 'beta':
sigma = 1
else:
raise ValueError("Bad section variable. Either (alpha) or (beta)")
if not np.isclose(u.shape[0], wfn.norb()):
raise ValueError(
"unitary is not specified for the correct number of orbitals")
rotations, diagonal = givens_decomposition_square(u.copy())
# Iterate through each layer and time evolve by the appropriate
# fermion operators
for layer in rotations:
for givens in layer:
i, j, theta, phi = givens
if not np.isclose(phi, 0):
op = of.FermionOperator(
((2 * j + sigma, 1), (2 * j + sigma, 0)), coefficient=-phi)
wfn = wfn.time_evolve(1.0, op)
if not np.isclose(theta, 0):
op = of.FermionOperator(((2 * i + sigma, 1),
(2 * j + sigma, 0)),
coefficient=-1j * theta) + \
of.FermionOperator(((2 * j + sigma, 1),
(2 * i + sigma, 0)),
coefficient=1j * theta)
wfn = wfn.time_evolve(1.0, op)
# evolve the last diagonal phases
for idx, final_phase in enumerate(diagonal):
if not np.isclose(final_phase, 1.0):
op = of.FermionOperator(
((2 * idx + sigma, 1), (2 * idx + sigma, 0)),
-np.angle(final_phase))
wfn = wfn.time_evolve(1.0, op)
return wfn
| 5,336,835
|
def eval_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
"""TODO
Args:
action_selection: ActionSelection
reg_and_traj_transferer: RegistrationAndTrajectoryTransferer
lfd_env: LfdEnvironment
sim: DynamicSimulation
"""
holdoutfile = h5py.File(args.eval.holdoutfile, 'r')
holdout_items = eval_util.get_indexed_items(holdoutfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
rope_params = sim_util.RopeParams()
if args.eval.rope_param_radius is not None:
rope_params.radius = args.eval.rope_param_radius
if args.eval.rope_param_angStiffness is not None:
rope_params.angStiffness = args.eval.rope_param_angStiffness
num_successes = 0
num_total = 0
for i_task, demo_id_rope_nodes in holdout_items:
redprint("task %s" % i_task)
init_rope_nodes = demo_id_rope_nodes["rope_nodes"][:]
rope = RopeSimulationObject("rope", init_rope_nodes, rope_params)
sim.add_objects([rope])
sim.settle(step_viewer=args.animation)
for i_step in range(args.eval.num_steps):
redprint("task %s step %i" % (i_task, i_step))
sim_util.reset_arms_to_side(sim)
if args.animation:
sim.viewer.Step()
sim_state = sim.get_state()
sim.set_state(sim_state)
scene_state = lfd_env.observe_scene()
# plot cloud of the test scene
handles = []
if args.plotting:
handles.append(sim.env.plot3(scene_state.cloud[:,:3], 2, scene_state.color if scene_state.color is not None else (0,0,1)))
sim.viewer.Step()
eval_stats = eval_util.EvalStats()
start_time = time.time()
if len(scene_state.cloud) == 0:
redprint("Detected 0 points in scene")
break
try:
(agenda, q_values_root), goal_found = action_selection.plan_agenda(scene_state, i_step)
except ValueError: #e.g. if cloud is empty - any action is hopeless
redprint("**Raised Value Error during action selection")
break
eval_stats.action_elapsed_time += time.time() - start_time
eval_stats.generalized = True
num_actions_to_try = MAX_ACTIONS_TO_TRY if args.eval.search_until_feasible else 1
for i_choice in range(num_actions_to_try):
if q_values_root[i_choice] == -np.inf: # none of the demonstrations generalize
eval_stats.generalized = False
break
redprint("TRYING %s"%agenda[i_choice])
best_root_action = str(agenda[i_choice])
start_time = time.time()
try:
test_aug_traj = reg_and_traj_transferer.transfer(GlobalVars.demos[best_root_action], scene_state, plotting=args.plotting)
except ValueError: # If something is cloud/traj is empty or something
redprint("**Raised value error during traj transfer")
break
eval_stats.feasible, eval_stats.misgrasp = lfd_env.execute_augmented_trajectory(test_aug_traj, step_viewer=args.animation, interactive=args.interactive, check_feasible=args.eval.check_feasible)
eval_stats.exec_elapsed_time += time.time() - start_time
if not args.eval.check_feasible or eval_stats.feasible: # try next action if TrajOpt cannot find feasible action and we care about feasibility
break
else:
sim.set_state(sim_state)
knot = is_knot(rope.rope.GetControlPoints())
results = {'scene_state':scene_state, 'best_action':best_root_action, 'values':q_values_root, 'aug_traj':test_aug_traj, 'eval_stats':eval_stats, 'sim_state':sim_state, 'knot':knot, 'goal_found': goal_found}
eval_util.save_task_results_step(args.resultfile, i_task, i_step, results)
if not eval_stats.generalized:
assert not knot
break
if args.eval.check_feasible and not eval_stats.feasible:
# Skip to next knot tie if the action is infeasible -- since
# that means all future steps (up to 5) will have infeasible trajectories
assert not knot
break
if knot:
num_successes += 1
break;
sim.remove_objects([rope])
num_total += 1
redprint('Eval Successes / Total: ' + str(num_successes) + '/' + str(num_total))
redprint('Success Rate: ' + str(float(num_successes)/num_total))
| 5,336,836
|
def clear():
"""Clear cli screen
:return: None
"""
os.system('clear' if 'posix' == os.name else 'cls')
| 5,336,837
|
def latexify(fig_width=None, fig_height=None, columns=1, tick_labelsize=8):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
assert(columns in [1,2])
if fig_width is None:
fig_width = (3.39/6.9)*TEXT_WIDTH_IN if columns==1 else TEXT_WIDTH_IN # width in inches
if fig_height is None:
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + str(fig_height) +
"so will reduce to" + str(MAX_HEIGHT_INCHES) + "inches.")
fig_height = MAX_HEIGHT_INCHES
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "xelatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 10, # LaTeX default is 10pt font.
"font.size": 10,
"legend.fontsize": 8, # Make the legend/label fonts a little smaller
"xtick.labelsize": tick_labelsize,
"ytick.labelsize": tick_labelsize,
'figure.figsize': [fig_width,fig_height],
'text.latex.preamble': [r'\usepackage{gensymb}'],
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
matplotlib.rcParams.update(pgf_with_latex)
| 5,336,838
|
def vidread(filename):
"""
generates images instead of storing as a 3-tensor because the videos
can take up a lot of memory
based on:
http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
"""
cap = cv2.VideoCapture(filename)
assert cap.isOpened() # TODO also could call cap.open()
while cap.isOpened():
ret, frame = cap.read()
if ret:
yield img_uint8_to_01(frame)
else:
break
cap.release()
| 5,336,839
|
def iter_ols_getters() -> Iterable[Type[Getter]]:
"""Iterate over OLS getters."""
for bioregistry_id in bioregistry.read_registry():
yv = make_ols_getter(bioregistry_id)
if yv is not None:
yield yv
| 5,336,840
|
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'):
"""
Write image files to disk. Create specified folder(s) if they
don't exist. Return list of :class:`Tile` instance.
Args:
tiles (list): List, tuple or set of :class:`Tile` objects to save.
prefix (str): Filename prefix of saved tiles.
Kwargs:
directory (str): Directory to save tiles. Created if non-existant.
Returns:
Tuple of :class:`Tile` instances.
"""
# Causes problems in CLI script.
# if not os.path.exists(directory):
# os.makedirs(directory)
for tile in tiles:
tile.save(filename=tile.generate_filename(prefix=prefix,
directory=directory,
format=format),
format=format)
return tuple(tiles)
| 5,336,841
|
async def save_audit(user_id: int, approved_by: int, oldpoi: POI, poi: POI):
"""Warning: does not do db.commit()."""
db = await get_db()
if oldpoi is None:
query = ("insert into poi_audit (user_id, approved_by, poi_id, field, new_value) "
"values (?, ?, ?, 'poi', ?)")
data = json.dumps(poi.get_db_fields())
await db.execute(query, (user_id, approved_by, poi.id, data))
elif poi is None:
query = ("insert into poi_audit (user_id, approved_by, poi_id, field, old_value) "
"values (?, ?, ?, 'poi', ?)")
data = json.dumps(oldpoi.get_db_fields())
await db.execute(query, (user_id, approved_by, oldpoi.id, data))
else:
query = ("insert into poi_audit (user_id, approved_by, poi_id, field, "
"old_value, new_value) values (?, ?, ?, ?, ?, ?)")
old_fields = oldpoi.get_db_fields()
fields = poi.get_db_fields(oldpoi)
for field in fields:
await db.execute(query, (user_id, approved_by, poi.id, field,
old_fields[field], fields[field]))
| 5,336,842
|
def epoch_folding_search(times, frequencies, nbin=128, segment_size=5000,
expocorr=False, gti=None, weights=1, fdots=0):
"""Performs epoch folding at trial frequencies in photon data.
If no exposure correction is needed and numba is installed, it uses a fast
algorithm to perform the folding. Otherwise, it runs a *much* slower
algorithm, which however yields a more precise result.
The search can be done in segments and the results averaged. Use
segment_size to control this
Parameters
----------
times : array-like
the event arrival times
frequencies : array-like
the trial values for the frequencies
Other Parameters
----------------
nbin : int
the number of bins of the folded profiles
segment_size : float
the length of the segments to be averaged in the periodogram
fdots : array-like
trial values of the first frequency derivative (optional)
expocorr : bool
correct for the exposure (Use it if the period is comparable to the
length of the good time intervals). If True, GTIs have to be specified
via the ``gti`` keyword
gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good time intervals
weights : array-like
weight for each time. This might be, for example, the number of counts
if the times array contains the time bins of a light curve
Returns
-------
(fgrid, stats) or (fgrid, fdgrid, stats), as follows:
fgrid : array-like
frequency grid of the epoch folding periodogram
fdgrid : array-like
frequency derivative grid. Only returned if fdots is an array.
stats : array-like
the epoch folding statistics corresponding to each frequency bin.
"""
if expocorr or not HAS_NUMBA or isinstance(weights, Iterable):
if expocorr and gti is None:
raise ValueError('To calculate exposure correction, you need to'
' specify the GTIs')
def stat_fun(t, f, fd=0, **kwargs):
return profile_stat(fold_events(t, f, fd, **kwargs)[1])
return \
_folding_search(stat_fun, times, frequencies,
segment_size=segment_size,
use_times=True, expocorr=expocorr, weights=weights,
gti=gti, nbin=nbin, fdots=fdots)
return _folding_search(lambda x: profile_stat(_profile_fast(x, nbin=nbin)),
times, frequencies, segment_size=segment_size,
fdots=fdots)
| 5,336,843
|
def explain(include: InclusionChoice = "explain") -> None:
"""Shows the previously recorded traceback info again,
with the option to specify different items to include.
For example, ``explain("why")`` is equivalent to ``why()``.
"""
old_include = friendly_traceback.get_include()
friendly_traceback.set_include(include)
session.show_traceback_info_again()
friendly_traceback.set_include(old_include)
| 5,336,844
|
def get_hs300_stocks():
"""
获取沪深300成分股
"""
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取沪深300成分股
rs = bs.query_hs300_stocks()
print('query_hs300 error_code:' + rs.error_code)
print('query_hs300 error_msg:' + rs.error_msg)
# 打印结果集
hs300_stocks = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
hs300_stocks.append(rs.get_row_data())
result = pd.DataFrame(hs300_stocks, columns=rs.fields)
# # 结果集输出到csv文件
# result.to_csv("hs300_stocks.csv", encoding="utf-8", index=False)
dtype = {'updateDate': String(10), 'code': String(9), 'code_name': String(10)}
result.to_sql('odl_bs_hs300_stocks', engine, schema=CQ_Config.DB_SCHEMA, if_exists='replace', index=False, dtype=dtype)
# 登出系统
bs.logout()
| 5,336,845
|
def dark_blue_filter(image):
"""Filter reduces the green making the pixels more of red
and blue causing a dark bluish colour
"""
data = []
image_data = get_image_data(image)
# setting every 'g' pixel to 0
for i in range(len(image_data)):
current_tuple = list(image_data[i])
current_tuple[1] = 0
data.append(tuple(current_tuple))
# saving the image
footer(image, data, "pink_filter")
| 5,336,846
|
def apply_matcher(words,
offsets,
dictionary,
max_ngrams=5,
longest_match_only=True,
case_sensitive = False,
split_on=None):
"""
TODO: cleanup!
"""
# covert to source char offsets
text = get_text(words, offsets)
matches = []
for i in range(0, len(words)):
match = None
start = offsets[i]
for j in range(i + 1, min(i + max_ngrams + 1, len(words) + 1)):
end = offsets[j - 1] + len(words[j - 1])
# term types: normalize whitespace & tokenized + whitespace
for term in [
re.sub(r'''\s{2,}''', ' ', text[start:end]).strip(),
' '.join([w for w in words[i:j] if w.strip()])
]:
if match_term(term, dictionary, case_sensitive):
match = end
break
if match:
term = re.sub(r'''\s{2,}''', ' ', text[start:match]).strip()
matches.append(([start, match], term))
if longest_match_only:
# sort on length then end char
matches = sorted(matches, key=lambda x: x[0][-1], reverse=1)
f_matches = []
curr = None
for m in matches:
if curr is None:
curr = m
continue
(i, j), _ = m
if (i >= curr[0][0] and i <= curr[0][1]) and (j >= curr[0][0] and j <= curr[0][1]):
pass
else:
f_matches.append(curr)
curr = m
if curr:
f_matches.append(curr)
return f_matches
return matches
| 5,336,847
|
def create_and_put_metrics_and_widgets() -> dict:
"""For each repository, aggregates all text and metric data and creates widgets for each
:returns: a dictionary mapping the dashboard name to the list of the text and metric widgets for each repository to
put in the dashboard
:rtype: dict
"""
widgets = {}
for repo_name in os.environ['repo_names'].split(','):
owner = os.environ['owner']
if '/' in repo_name:
[owner, repo_name] = repo_name.split('/')
sorted_widgets = github_docker.aggregate_metrics(owner, repo_name)
# Create a Cloudwatch metric/text widget out of each sorted widget
for widget_title, widget in sorted_widgets.items():
if widget['type'] == 'metric':
title = repo_name
if widget_title != os.environ['default_metric_widget_name']:
title += ' ' + widget_title
formatted_widget = cw_interactions.create_metric_widget(repo_name, widget['data'], title)
elif widget['type'] == 'text':
title = repo_name
if widget_title == os.environ['default_text_widget_name']:
title += ' Properties'
else:
title += ' ' + widget_title
formatted_widget = cw_interactions.create_text_widget(widget['data'], title=title)
else:
print("Invalid widget type specified for widget:", widget_title)
continue
dashboard_name = os.environ['dashboard_name_prefix']
if widget['dashboard_level'] != 'main':
dashboard_name += '-' + repo_name
# Add widgets to dashboard
widgets_for_specified_dashboard = widgets.get(dashboard_name, [])
widgets_for_specified_dashboard.append(formatted_widget)
widgets[dashboard_name] = widgets_for_specified_dashboard
# Add activity widget
main_widgets = widgets.get(os.environ['dashboard_name_prefix'], [])
main_widgets.append(cw_interactions.create_activity_widget(repo_name))
widgets[os.environ['dashboard_name_prefix']] = main_widgets
return widgets
| 5,336,848
|
def test_truncate_default(all_terms):
"""Ensure that terminal.truncate functions with the default argument."""
@as_subprocess
def child(kind):
from blessed import Terminal
term = Terminal(kind)
test = "Testing " + term.red("attention ") + term.blue("please.")
trunc = term.truncate(test)
assert term.length(trunc) <= term.width
assert term.truncate(term.red('x' * 1000)) == term.red('x' * term.width)
child(all_terms)
| 5,336,849
|
def boxbin(x,y,xedge,yedge,c=None,figsize=(5,5),cmap='viridis',mincnt=10,vmin=None,vmax=None,edgecolor=None,powernorm=False,
ax=None,normed=False,method='mean',quantile=None,alpha=1.0,cbar=True,unconditional=False,master_count=np.array([])):
""" This function will grid data for you and provide the counts if no variable c is given, or the median if
a variable c is given. In the future I will add functionallity to do the median, and possibly quantiles.
x: 1-D array
y: 1-D array
xedge: 1-D array for xbins
yedge: 1-D array for ybins
c: 1-D array, same len as x and y
returns
axis handle
cbar handle
C matrix (counts or median values in bin)
"""
midpoints = np.empty(xedge.shape[0]-1)
for i in np.arange(1,xedge.shape[0]):
midpoints[i-1] = xedge[i-1] + (np.abs(xedge[i] - xedge[i-1]))/2.
#note on digitize. bin 0 is outside to the left of the bins, bin -1 is outside to the right
ind1 = np.digitize(x,bins = xedge) #inds of x in each bin
ind2 = np.digitize(y,bins = yedge) #inds of y in each bin
#drop points outside range
outsideleft = np.where(ind1 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind1 != len(xedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
outsideleft = np.where(ind2 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind2 != len(yedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
if c is None:
c = np.zeros(len(ind1))
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
df2 = df.groupby(["x","y"]).count()
df = df2.where(df2.values >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if normed:
n_samples = np.ma.sum(C)
C = C/n_samples
C = C*100
print('n_samples= {}'.format(n_samples))
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,edgecolor=edgecolor,norm=colors.PowerNorm(gamma=0.5),vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=edgecolor,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
elif unconditional:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].sum()
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = C/master_count.values
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].mean()
elif method=='std':
df2 = df.groupby(["x","y"])['c'].std()
elif method=='median':
df2 = df.groupby(["x","y"])['c'].median()
elif method=='qunatile':
if quantile is None:
print('No quantile given, defaulting to median')
quantile = 0.5
else:
pass
df2 = df.groupby(["x","y"])['c'].apply(percentile(quantile*100))
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
| 5,336,850
|
def testDataRate(data_rate):
"""
This method tests data rate for transmission
"""
print('Data rate = '.format(data_rate))
# toRedit(data_rate, 'DATA_RATE',pipe)
if __name__ == '__main__':
for i in range(60):
get_color_and_depth_frames()
r.set('im-shape', '720 1280')
p.subscribe(**{'cam-data-server': send_data})
thread = p.run_in_thread(sleep_time=0.00001)
thread.join()
| 5,336,851
|
def _get_elastic_document(
tasks: list[dict],
symprec: float,
fitting_method: str,
) -> ElasticDocument:
"""
Turn a list of deformation tasks into an elastic document.
Parameters
----------
tasks : list of dict
A list of deformation tasks.
symprec : float
Symmetry precision for deriving symmetry equivalent deformations. If
``symprec=None``, then no symmetry operations will be applied.
fitting_method : str
The method used to fit the elastic tensor. See pymatgen for more details on the
methods themselves. The options are:
- "finite_difference" (note this is required if fitting a 3rd order tensor)
- "independent"
- "pseudoinverse"
Returns
-------
ElasticDocument
An elastic document.
"""
structure = get(tasks[0], "output.transformations.history.0.input_structure")
stresses = []
deformations = []
uuids = []
job_dirs = []
for doc in tasks:
deformation = get(doc, "output.transformations.history.0.deformation")
stress = get(doc, "output.output.stress")
deformations.append(Deformation(deformation))
stresses.append(Stress(stress))
uuids.append(doc["uuid"])
job_dirs.append(doc["output"]["dir_name"])
return ElasticDocument.from_stresses(
structure,
stresses,
deformations,
uuids,
job_dirs,
fitting_method=fitting_method,
symprec=symprec,
)
| 5,336,852
|
def first(filename: Union[str, Path]) -> int:
"""
Sort the input, prepend with 0 and append with 3 + the max.
Return:
(# of successive differences == 1) * (# of successive differences == 3)
"""
with open(filename, "rt") as infile:
jolts = sorted(int(line.strip()) for line in infile)
jolts = [0] + jolts + [jolts[-1] + 3]
diffs = Counter(right - left for left, right in zip(jolts[:-1], jolts[1:]))
return diffs[3] * diffs[1]
| 5,336,853
|
def charge_is_valid(charge_profile, capacity=6, max_charge_rate=2.5, time_unit=0.5):
"""
Function determining if a charge profile is valid (and fully charges the battery)
"""
if np.all(np.isclose(capacity/time_unit, charge_profile.groupby(charge_profile.index.date).sum())) is False:
return False
elif np.all(charge_profile.groupby(charge_profile.index.date).max() <= max_charge_rate) is False:
return False
else:
return True
| 5,336,854
|
def create_slice_obj(start, end, step):
"""Create slice object"""
return slice(start, end, step)
| 5,336,855
|
def send_wxnotification(message):
"""发送公众号提醒""" #文档字符串用三引号括起,Python使用它们来生成有关程序中函数的文档。
miao_code="tLmPyT4"
text = message
page = request.urlopen("http://miaotixing.com/trigger?" + parse.urlencode({"id":miao_code, "text":text, "type":"json"}))
result = page.read()
jsonObj = json.loads(result)
if(jsonObj["code"] == 0):
logger.debug("send " + message + " to WeChat success!")
else:
logger.error("failed, err code:" + str(jsonObj["code"]) + ", desc:" + jsonObj["msg"])
| 5,336,856
|
def binary_cross_entropy_error(y, t):
"""バイナリー交差エントロピー誤差"""
#y.shape (N,C,H,W)
delta = 1e-7
return -np.mean(t*np.log(y + delta) + (1-t)*np.log(1-y + delta))
| 5,336,857
|
def get_abc():
"""
:return: list all the abcs as a list
"""
# ok
return list(abcs.find({}, {'_id': False}))
| 5,336,858
|
def matching_system_code(concept: CodeableConcept, system: str) -> Optional[str]:
"""
Returns a code from a specified *system* contained within a given *concept*.
If no code is found for the given *system*, returns None.
Raises an :class:`AssertionError` if more than one encoding for a *system*
is found within the given FHIR *concept*.
"""
system_codes: List[CodeableConcept] = []
if not concept:
return None
system_codes += list(filter(lambda c: matching_system(c, system), concept.coding))
assert len(system_codes) <= 1, "Multiple encodings found in FHIR concept " + \
f"«{concept.concept_type}» for system «{system}»."
if not system_codes:
return None
return system_codes[0].code
| 5,336,859
|
def get_pybullet(env_name):
""" Returns pybullet dataset and envrironment.
The dataset is provided through d4rl-pybullet. See more details including
available dataset from its GitHub page.
.. code-block:: python
from d3rlpy.datasets import get_pybullet
dataset, env = get_pybullet('hopper-bullet-mixed-v0')
References:
* https://github.com/takuseno/d4rl-pybullet
Args:
env_name (str): environment id of d4rl-pybullet dataset.
Returns:
tuple: tuple of :class:`d3rlpy.dataset.MDPDataset` and gym environment.
"""
try:
import d4rl_pybullet
env = gym.make(env_name)
dataset = MDPDataset(**env.get_dataset())
return dataset, env
except ImportError:
raise ImportError(
'd4rl-pybullet is not installed.\n' \
'pip install git+https://github.com/takuseno/d4rl-pybullet')
| 5,336,860
|
def reverse(d: Iterable) -> Any:
"""Reverses the provided iterable, but also RETURNS it"""
d.reverse()
return d
| 5,336,861
|
def justify(words, width):
"""
Justify input words.
:param words: list of words
:type words : list
:param width: width of each line
:type width : int
:return: list of justified words as list
"""
line = []
col = 0
for word in words:
if line and col + len(word) > width:
if len(line) == 1:
yield left_justify(line, width)
else:
# After n + 1 spaces are placed between each pair of
# words, there are r spaces left over; these result in
# wider spaces at the left.
n, r = divmod(width - col + 1, len(line) - 1)
narrow = ' ' * (n + 1)
if r == 0:
yield narrow.join(line)
else:
wide = ' ' * (n + 2)
yield wide.join(line[:r] + [narrow.join(line[r:])])
line, col = [], 0
line.append(word)
col += len(word) + 1
if line:
yield left_justify(line, width)
| 5,336,862
|
def scalar(typename):
"""
Returns scalar type from ROS message data type, like "uint8" from "uint8[100]".
Returns type unchanged if already a scalar.
"""
return typename[:typename.index("[")] if "[" in typename else typename
| 5,336,863
|
def subject(mock_messenger: AsyncMock) -> initiator.FirmwareUpdateInitiator:
"""The test subject."""
return initiator.FirmwareUpdateInitiator(mock_messenger)
| 5,336,864
|
def onetangent(ri, rf, ta_transb, k=0, use_alts=True, center='earth'):
"""Orbit transfer with one tangential burn and one nontangential
burn. Must be circular or coaxially elliptic. Currently only for
circular orbits.
:param ri: altitude (or radius) of initial circular orbit (km)
:param rf: altitude (or radius) of initial circular orbit (km)
:param ta_transb: true anomaly of transfer orbit at point b (rad)
:param k: number of revolutions through perigee
:param use_alts: Boolean for switching between ri,rf=altitude
(True) and ri,rf=radius to center
:param center: planetary center of focus; default=earth
:return vtransa: transfer velocity required at point a (km/s)
:return vtransb: transfer velocity required at point b (km/s)
:return fpa_transb: flight path angle for the nontangential
transfer (rad)
:return TOF: time of flight (s)
in work
"""
# update constants and parameters
mu = get_mu(center=center)
if use_alts and center.lower() == 'earth':
ri, rf = [r+r_earth for r in [ri, rf]]
# check location of tangent burn
Rinv = ri/rf
if Rinv > 1:
# tangent burn is at apogee
e_trans = (Rinv-1)/(np.cos(ta_transb)+Rinv)
a_trans = ri/(1+e_trans)
E0 = np.pi
else:
# tangent burn is at perigee
e_trans = (Rinv-1)/(np.cos(ta_transb)-Rinv)
a_trans = ri/(1-e_trans)
E0 = 0.
# compute initial, final, and transfer velocities at a, b
vi = sqrt(mu/ri)
vf = sqrt(mu/rf)
vtransa = sqrt(2*mu/ri - mu/a_trans)
vtransb = sqrt(2*mu/rf - mu/a_trans)
# flight path angle of nontangential transfer
fpa_transb = np.arctan(e_trans*np.sin(ta_transb)
/ (1+e_trans*np.cos(ta_transb)))
# get delta-v's at each point and its total
dva = vtransa - vi
dvb = sqrt( vtransb**2 + vf**2 - 2*vtransb*vf*np.cos(fpa_transb) )
dv_otb = np.abs(dva) + np.abs(dvb)
# computing eccentric anomaly
E = np.arccos((e_trans+np.cos(ta_transb))/(1+e_trans*np.cos(ta_transb)))
# computing time of flight
TOF = sqrt(a_trans**3/mu) * \
(2*k*np.pi+(E-e_trans*np.sin(E))-(E0 - e_trans*np.sin(E0)))
return vtransa, vtransb, fpa_transb, TOF
| 5,336,865
|
def test_user_moira_lists_anonymous():
"""
Test that empty list is returned for anonymous user
"""
assert user_moira_lists(AnonymousUser()) == []
| 5,336,866
|
def save_keys_to_single_site_bed(keys, outfn, callBaseFormat=1, outBaseFormat=1, nonstr='.'):
"""
Save all keys in set of ('chr 123 123 . . +\n', etc.) to outfn.
We use non-string like . in 3rd, 4th columns by BED file format.
:param keys:
:param outfn:
:return:
"""
if outfn.endswith('.gz'):
outfile = gzip.open(outfn, 'wt')
else:
outfile = open(outfn, 'w')
for key in keys:
if outBaseFormat == 0:
outfile.write(
f'{key[0]}\t{key[1] - callBaseFormat + outBaseFormat}\t{key[1] - callBaseFormat + outBaseFormat + 1}\t{nonstr}\t{nonstr}\t{key[2]}\n')
else:
outfile.write(
f'{key[0]}\t{key[1] - callBaseFormat + outBaseFormat}\t{key[1] - callBaseFormat + outBaseFormat}\t{nonstr}\t{nonstr}\t{key[2]}\n')
outfile.close()
| 5,336,867
|
def display_matplot(images, title = None, gray=None):
"""[Standard display fuction used throughout testing to see the output of thhe various transforms.
Displays multilpe plots at once for comparison, always in a square format.]
Arguments:
images {[Array]} -- [the array that contains all of the images you wish to display]
Keyword Arguments:
title {[String]} -- [A title to display on the plot to keep track of which image is bing shown.] (default: {None})
gray {[Opencv const]} -- [The colour space you wish to display the image in.] (default: {None})
Returns:
[matplotlib plot] -- [The created plot]
"""
n = np.ceil(np.sqrt(len(images)))
index = 1
plt.set_cmap('gray')
plt.title(title)
for image in images:
plt.subplot(n, n, index)
plt.imshow(image)
plt.xticks([]), plt.yticks([])
index += 1
plt.waitforbuttonpress(0)
plt.close()
return plt
| 5,336,868
|
def test_step_should_set_state_to_running_before_running_step_impl(mocker):
"""A Step should set its State to RUNNING before it runs the Step Implementation function"""
# given
class WrapperForMockerSpy:
def step_func(self, step):
assert step.state is State.RUNNING
w = WrapperForMockerSpy()
mocker.spy(w, "step_func")
step = Step(1, "keyword", "used_keyword", "text", None, None, None, None)
step_impl_mock = mocker.MagicMock(name="Step Impl")
step_impl_mock.func = w.step_func
step_impl_match_mock = mocker.MagicMock(name="Step Impl Match")
step_impl_match_mock.evaluate.return_value = ([], {})
step.assign_implementation(step_impl_mock, step_impl_match_mock)
# when
step.run(None)
# then
w.step_func.assert_called_once_with(step)
| 5,336,869
|
def test_if_two_tables(small_table, large_table):
"""Test two filled tables."""
assert left_join(small_table, large_table) == [['yellow', 'blue', 'green'], ['gray', 'brown', 'pink'], ['black', 'red', 'orange'], ['cyan', 'puce', 'white']]
| 5,336,870
|
def get_version():
"""
It returns the pmml version .
Returns
-------
version : String
Returns the version of the pmml.
"""
version = '4.4'
return version
| 5,336,871
|
def customsoftmax(inp, multihotmask):
"""
Custom Softmax
"""
soft = F.softmax(inp, dim=1)
# This takes the mask * softmax ( sums it up hence summing up the classes in border
# then takes of summed up version vs no summed version
return torch.log(
torch.max(soft, (multihotmask * (soft * multihotmask).sum(1, keepdim=True)))
)
| 5,336,872
|
def mandoline(
D_src: np.ndarray,
D_tgt: np.ndarray,
edge_list: np.ndarray,
sigma: float=None,
):
"""
Mandoline solver.
Args:
D_src: (n_src x d) matrix of (example, slices) for the source distribution.
D_tgt: (n_tgt x d) matrix of (example, slices) for the source distribution.
edge_list: list of edge correlations between slices that should be modeled.
sigma: optional parameter that activates RBF kernel-based KLIEP with scale
`sigma`.
Returns: SimpleNamespace that contains
opt: result of scipy.optimize
Phi_D_src: source potential matrix used in Mandoline
Phi_D_tgt: target potential matrix used in Mandoline
n_src: number of source samples
n_tgt: number of target samples
edge_list: the `edge_list` parameter passed as input
"""
# Copy and binarize the input matrices to -1/1
D_src, D_tgt = np.copy(D_src), np.copy(D_tgt)
if np.min(D_src) == 0:
D_src[D_src == 0] = -1
D_tgt[D_tgt == 0] = -1
# Edge list encoding dependencies between gs
if edge_list is not None:
edge_list = np.array(edge_list)
# Create the potential matrices
Phi_D_tgt, Phi_D_src = Phi(D_tgt, edge_list), Phi(D_src, edge_list)
# Number of examples
n_src, n_tgt = Phi_D_src.shape[0], Phi_D_tgt.shape[0]
def f(x):
obj = Phi_D_tgt.dot(x).sum() - n_tgt * scipy.special.logsumexp(Phi_D_src.dot(x))
return -obj
# Set the kernel
kernel = partial(skmetrics.rbf_kernel, gamma=sigma)
def llkliep_f(x):
obj = kernel(
Phi_D_tgt, x[:, np.newaxis]
).sum() - n_tgt * scipy.special.logsumexp(kernel(Phi_D_src, x[:, np.newaxis]))
return -obj
# Solve
if not sigma:
opt = scipy.optimize.minimize(
f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
else:
opt = scipy.optimize.minimize(
llkliep_f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
return SimpleNamespace(
opt=opt,
Phi_D_src=Phi_D_src,
Phi_D_tgt=Phi_D_tgt,
n_src=n_src,
n_tgt=n_tgt,
edge_list=edge_list,
)
| 5,336,873
|
def service_transformer_info_get(service): # noqa: E501
"""Retrieve transformer info
Provides information about the transformer. # noqa: E501
:param service: Inxight_Drugs service
:rtype: TransformerInfo
"""
return transformer[service].info
| 5,336,874
|
def create_app(config=None, app_name=None):
"""Create a Flask app."""
if app_name is None:
app_name = DefaultConfig.PROJECT
app = Flask(app_name, instance_path=INSTANCE_FOLDER_PATH, instance_relative_config=True)
configure_app(app, config)
configure_hook(app)
configure_blueprints(app)
configure_extensions(app)
configure_logging(app)
configure_template_filters(app)
configure_error_handlers(app)
configure_cli(app)
return app
| 5,336,875
|
def read_translocations_tumors(gene_A, gene_B,\
tumor_barcodes,\
data_location=default_location):
"""
For a given set of tumor barcode and a gene, finds with a lookup the mutation for this
particular gene on the TCGA dataset.
INPUT:
- gene_A (str): first gene of translocation
- gene_B (str): second gene of translocation
- tumor_barcodes (list): list of tumor barcodes
- data_location (str, optional): where data is located
OUTPUT:
- indicator list with 1 on tumor barcodes with a translocation
"""
translocated_genes = [gene_A, gene_B]
# Read data and filter
df = pd.read_csv(data_location, sep='\t')
df = df[np.isin(df.Gene_A, translocated_genes)]
df = df[np.isin(df.Gene_B, translocated_genes)]
# Common barcode length
barcode_length = np.unique([len(e) for e in df['sampleId'].values])
if barcode_length.shape[0] > 1:
raise ValueError('File does not the same barcoding length')
barcode_length = barcode_length[0]
print(barcode_length)
# Map translocated tumors
translocated_barcodes = df['sampleId'].values.astype(str)
translocated_barcodes = [e.replace('.', '-') for e in translocated_barcodes]
print(translocated_barcodes)
translocated_tumors = np.where(np.isin([e[5:5+barcode_length] for e in tumor_barcodes], translocated_barcodes))
print(translocated_barcodes)
is_translocated = np.zeros(len(tumor_barcodes))
is_translocated[translocated_tumors] = 1
return is_translocated
| 5,336,876
|
def simplify_if_constant(symbol, keep_domains=False):
"""
Utility function to simplify an expression tree if it evalutes to a constant
scalar, vector or matrix
"""
if keep_domains is True:
domain = symbol.domain
auxiliary_domains = symbol.auxiliary_domains
else:
domain = None
auxiliary_domains = None
if symbol.is_constant():
result = symbol.evaluate_ignoring_errors()
if result is not None:
if (
isinstance(result, numbers.Number)
or (isinstance(result, np.ndarray) and result.ndim == 0)
or isinstance(result, np.bool_)
):
return pybamm.Scalar(result)
elif isinstance(result, np.ndarray) or issparse(result):
if result.ndim == 1 or result.shape[1] == 1:
return pybamm.Vector(
result, domain=domain, auxiliary_domains=auxiliary_domains
)
else:
# Turn matrix of zeros into sparse matrix
if isinstance(result, np.ndarray) and np.all(result == 0):
result = csr_matrix(result)
return pybamm.Matrix(
result, domain=domain, auxiliary_domains=auxiliary_domains
)
return symbol
| 5,336,877
|
def arith_ln_sub_div(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = (x - beta[None, :, None]) / alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_sub_div(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
| 5,336,878
|
def overall_dist(df_train, df_test, target, path):
"""This function takes in the train and test dataframes and plots both
target distributions stacked in a histogram, It is saved to the path
----------
df_train : pandas dataframe
dataframe of train set
df_test : pandas dataframe
dataframe of test set
target : string
name of target column in dataframes
path : string
path where png file will be stored
"""
target_train = df_train[str(target)].values.astype(np.float)
target_test = df_test[str(target)].values.astype(np.float)
labels = ['train', 'test']
plt.hist([target_train, target_test],
label=labels, stacked=True) # add auto bin number
plt.ylabel('frequency')
plt.xlabel(f"{target}")
plt.legend(loc='upper right')
plt.grid(axis = 'y')
plt.savefig(f"{path}/overall_dist.png", bbox_inches='tight', dpi=1200)
plt.clf()
| 5,336,879
|
def is_visible(window):
"""
Check whether the window is visible or not.
"""
return lib.is_visible(window)
| 5,336,880
|
def activateMiracle(session, event, stdin_fd):
"""
Parameters
----------
session : ikabot.web.session.Session
event : multiprocessing.Event
stdin_fd: int
"""
sys.stdin = os.fdopen(stdin_fd)
try:
banner()
islands = obtainMiraclesAvailable(session)
if islands == []:
print(_('There are no miracles available.'))
enter()
event.set()
return
island = chooseIsland(islands)
if island is None:
event.set()
return
if island['available']:
print(_('\nThe miracle {} will be activated').format(island['wonderName']))
print(_('Proceed? [Y/n]'))
activate_miracle_input = read(values=['y', 'Y', 'n', 'N', ''])
if activate_miracle_input.lower() == 'n':
event.set()
return
miracle_activation_result = activateMiracleHttpCall(session, island)
if miracle_activation_result[1][1][0] == 'error':
print(_('The miracle {} could not be activated.').format(island['wonderName']))
enter()
event.set()
return
data = miracle_activation_result[2][1]
for elem in data:
if 'countdown' in data[elem]:
enddate = data[elem]['countdown']['enddate']
currentdate = data[elem]['countdown']['currentdate']
break
wait_time = enddate - currentdate
print(_('The miracle {} was activated.').format(island['wonderName']))
enter()
banner()
while True:
print(_('Do you wish to activate it again when it is finished? [y/N]'))
reactivate_again_input = read(values=['y', 'Y', 'n', 'N', ''])
if reactivate_again_input.lower() != 'y':
event.set()
return
iterations = read(msg=_('How many times?: '), digit=True, min=0)
if iterations == 0:
event.set()
return
duration = wait_time * iterations
print(_('It will finish in:{}').format(daysHoursMinutes(duration)))
print(_('Proceed? [Y/n]'))
reactivate_again_input = read(values=['y', 'Y', 'n', 'N', ''])
if reactivate_again_input.lower() == 'n':
banner()
continue
break
else:
print(_('\nThe miracle {} will be activated in {}').format(island['wonderName'], daysHoursMinutes(island['available_in'])))
print(_('Proceed? [Y/n]'))
user_confirm = read(values=['y', 'Y', 'n', 'N', ''])
if user_confirm.lower() == 'n':
event.set()
return
wait_time = island['available_in']
iterations = 1
print(_('\nThe mirable will be activated.'))
enter()
banner()
while True:
print(_('Do you wish to activate it again when it is finished? [y/N]'))
reactivate_again_input = read(values=['y', 'Y', 'n', 'N', ''])
again = reactivate_again_input.lower() == 'y'
if again is True:
try:
iterations = read(msg=_('How many times?: '), digit=True, min=0)
except KeyboardInterrupt:
iterations = 1
break
if iterations == 0:
iterations = 1
break
iterations += 1
duration = wait_time * iterations
print(_('It is not possible to calculate the time of finalization. (at least: {})').format(daysHoursMinutes(duration)))
print(_('Proceed? [Y/n]'))
try:
activate_input = read(values=['y', 'Y', 'n', 'N', ''])
except KeyboardInterrupt:
iterations = 1
break
if activate_input.lower() == 'n':
iterations = 1
banner()
continue
break
except KeyboardInterrupt:
event.set()
return
set_child_mode(session)
event.set()
info = _('\nI activate the miracle {} {:d} times\n').format(island['wonderName'], iterations)
setInfoSignal(session, info)
try:
do_it(session, island, iterations)
except:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
finally:
session.logout()
| 5,336,881
|
def fix_mol(
mol: Chem.rdchem.Mol,
n_iter: int = 1,
remove_singleton: bool = False,
largest_only: bool = False,
inplace: bool = False,
) -> Optional[Chem.rdchem.Mol]:
"""Fix error in molecule using a greedy approach.
Args:
mol: input molecule to fix
n_iter: Number of valence fix iteration to apply
remove_singleton: Whether `adjust_singleton` should be applied
largest_only: Whether only the largest fragment should be kept
inplace: Whether to return a copy of the mol or perform in place operation
Returns:
Fixed molecule.
"""
if not inplace:
mol = copy.copy(mol)
m = sanitize_mol(mol) or mol # fail back to mol when the fixer fail
if m is not None:
m = remove_dummies(m)
for _ in range(n_iter):
m = fix_valence(m)
if remove_singleton:
m = adjust_singleton(m)
if largest_only:
# m = max(Chem.rdmolops.GetMolFrags(m, asMols=True, sanitizeFrags=False), key=lambda m: m.GetNumAtoms())
m = rdMolStandardize.FragmentParent(m, skipStandardize=True)
return m
| 5,336,882
|
def permutation_circuit(swaps: Iterable[List[Swap[_V]]]) -> PermutationCircuit:
"""Produce a circuit description of a list of swaps.
With a given permutation and permuter you can compute the swaps using the permuter function
then feed it into this circuit function to obtain a circuit description.
Args:
swaps: An iterable of swaps to perform.
Returns:
A MappingCircuit with the circuit and a mapping of node to qubit in the circuit.
"""
# Construct a circuit with each unique node id becoming a quantum register of size 1.
dag = DAGCircuit()
swap_list = list(swaps)
# Set of unique nodes used in the swaps.
nodes = {
swap_node
for swap_step in swap_list
for swap_nodes in swap_step
for swap_node in swap_nodes
}
node_qargs = {node: QuantumRegister(1) for node in nodes}
for qubit in node_qargs.values():
dag.add_qreg(qubit)
inputmap = {node: q[0] for node, q in node_qargs.items()}
# Apply swaps to the circuit.
for swap_step in swap_list:
for swap0, swap1 in swap_step:
dag.apply_operation_back(SwapGate(), [inputmap[swap0], inputmap[swap1]])
return PermutationCircuit(dag, inputmap)
| 5,336,883
|
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
def service(): build('calendar', 'v3', credentials=creds)
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
| 5,336,884
|
def create_release_branch(version, base_branch):
"""
checkout a new Git branch to make changes on and later tag as a release.
:param bytes version: The version of Flocker to create a release branch
for.
:param base_branch: See :func:`git.Head`. The branch to create the release
branch from.
"""
try:
base_branch.checkout(b='release/flocker-' + version)
except GitCommandError:
raise BranchExists()
| 5,336,885
|
def Add(a, b):
"""
Adds two numbers, throws on overflow.
"""
c = a + b
Require(c >= a)
return c
| 5,336,886
|
def remap_shared_output_descriptions(output_descriptions: Dict[str, str], outputs: Dict[str, Type]) -> Dict[str, str]:
"""
Deals with mixed styles of return value descriptions used in docstrings. If the docstring contains a single entry of return value description, that output description is shared by each output variable.
:param output_descriptions: Dict of output variable names mapping to output description
:param outputs: Interface outputs
:return: Dict of output variable names mapping to shared output description
"""
# no need to remap
if len(output_descriptions) != 1:
return output_descriptions
_, shared_description = next(iter(output_descriptions.items()))
return {k: shared_description for k, _ in outputs.items()}
| 5,336,887
|
def dist_matrix():
"""Fix dist_matrix for the next two tests."""
dist_matrix = np.array([[0, 4, 5, 6], [4, 0, 7, 8], [5, 7, 0, 9], [6, 8, 9, 0]])
return dist_matrix
| 5,336,888
|
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
| 5,336,889
|
def generate_file(filename, lines, columns):
"""Creates and propogates a file containing random dna strings.
This file can in turn be used for gss benchmarking.
Args:
- filename: The name of the file to the written.
- lines: The number of lines to be written to the file.
- columns: The width of each line to be written to the file.
"""
with open(filename, 'w+') as outfile:
for _ in range(lines):
line = random_string(columns)
outfile.write(line + '\n')
outfile.close()
| 5,336,890
|
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.contiguous().view(C, -1)
| 5,336,891
|
def file_to_jsobj(src,
chart_type=DFLT_CHART_TYPE,
enable_playback=DFLT_ENABLE_PLAYBACK,
height=DFLT_HEIGHT,
params=DFLT_PARAMS,
title=None,
subtitle='',
**kwargs
):
"""Renders a time visualization of a WAV file from its file.
:param src: The filepath str (or posix path) or file-like object (e.g. open file, or BytesIO object)
:param chart_type: The chart type to render, either 'peaks' (default) or 'spectrogram'
:param enable_playback: Whether to enable playback on double click (default True)
:param height: The height of the chart in pixels (default 50)
:param params: Extra rendering parameters, currently unused
:param title: The title to display, defaults to the filename
:param subtitle: An optional subtitle to display under the title
:param kwargs: extra kwargs to be passed on to Javascript object constructor
"""
import soundfile
wfsr = soundfile.read(src, dtype='int16')
if title is None and isinstance(src, str):
title = os.path.basename(src)
return wfsr_to_jsobj(wfsr,
chart_type=chart_type,
enable_playback=enable_playback,
height=height,
params=params,
title=title,
subtitle=subtitle,
**kwargs
)
| 5,336,892
|
def node_to_edge(edges, directed=True):
"""
From list of edges, record per node, incoming and outgoing edges
"""
outgoing = defaultdict(set)
incoming = defaultdict(set) if directed else outgoing
nodes = set()
for i, edge in enumerate(edges):
a, b, = edge[:2]
outgoing[a].add(i)
incoming[b].add(i)
nodes.add(a)
nodes.add(b)
nodes = sorted(nodes)
if directed:
return outgoing, incoming, nodes
return outgoing, nodes
| 5,336,893
|
def grouping_cumulative(df, col_index, col_column):
""" compute histogram statistic over selected column and in addition group this histograms
:param DataFrame df: rich table
:param str col_index: column which will be used s index in resulting table
:param str col_column: column used for computing a histogram
:return DF:
>>> np.random.seed(0)
>>> df = pd.DataFrame()
>>> df['result'] = np.random.randint(0, 2, 50)
>>> df['user'] = np.array(list('abc'))[np.random.randint(0, 3, 50)]
>>> grouping_cumulative(df, 'user', 'result').astype(int) # doctest: +NORMALIZE_WHITESPACE
0 1
user
a 10 12
b 4 9
c 6 9
"""
df_counts = pd.DataFrame()
for idx, dfg in df[[col_index, col_column]].groupby(col_index):
counts = dict(Counter(dfg[col_column]))
counts[col_index] = idx
df_counts = df_counts.append(counts, ignore_index=True)
df_counts.set_index(col_index, inplace=True)
return df_counts
| 5,336,894
|
def _get_matching_stream(smap, itag):
""" Return the url and signature for a stream matching itag in smap. """
for x in smap:
if x['itag'] == itag and x.get("s"):
return x['url'], x['s']
raise IOError("Sorry this video is not currently supported by pafy")
| 5,336,895
|
def Debounce(threshold=100):
"""
Simple debouncing decorator for apigpio callbacks.
Example:
`@Debouncer()
def my_cb(gpio, level, tick)
print('gpio cb: {} {} {}'.format(gpio, level, tick))
`
The threshold can be given to the decorator as an argument (in millisec).
This decorator can be used both on function and object's methods.
Warning: as the debouncer uses the tick from pigpio, which wraps around
after approximately 1 hour 12 minutes, you could theoretically miss one
call if your callback is called twice with that interval.
"""
threshold *= 1000
max_tick = 0xFFFFFFFF
class _decorated(object):
def __init__(self, pigpio_cb):
self._fn = pigpio_cb
self.last = 0
self.is_method = False
def __call__(self, *args, **kwargs):
if self.is_method:
tick = args[3]
else:
tick = args[2]
if self.last > tick:
delay = max_tick-self.last + tick
else:
delay = tick - self.last
if delay > threshold:
self._fn(*args, **kwargs)
print('call passed by debouncer {} {} {}'
.format(tick, self.last, threshold))
self.last = tick
else:
print('call filtered out by debouncer {} {} {}'
.format(tick, self.last, threshold))
def __get__(self, instance, type=None):
# with is called when an instance of `_decorated` is used as a class
# attribute, which is the case when decorating a method in a class
self.is_method = True
return functools.partial(self, instance)
return _decorated
| 5,336,896
|
def doctest_LazyDate():
"""LazyDate fulfills its interface.
>>> from zope.interface.verify import verifyObject
>>> from cipher.lazydate import lazydate, interfaces
>>> lazy = lazydate.LazyDate("now")
>>> verifyObject(interfaces.ILazyDate, lazy)
True
String representations returns the stored string:
>>> print(lazy)
now
>>> lazy
LazyDate('now')
"""
| 5,336,897
|
def test_bad_config_section(mock_config):
"""Test that getting or setting a bad section gives an error."""
with pytest.raises(spack.config.ConfigSectionError):
spack.config.set('foobar', 'foobar')
with pytest.raises(spack.config.ConfigSectionError):
spack.config.get('foobar')
| 5,336,898
|
def get_folder(default_location, title_string=None):
"""Dialog box to browse to a folder. Returns folder path.
Usage: full_folder_name = get_folder(default_location, [title]),
where "default_location" is a starting folder location,
"title" is an optional message to list in the dialog box,
and "full_folder_name" is the complete selected folder name.
Written by Phil Wilmarth, 2008, 2016
"""
# set up GUI elements
root = tkinter.Tk()
root.withdraw()
try:
root.tk.call('console', 'hide')
except:
pass
# set default title string and location if not passed
if title_string is None:
title_string = 'Select a folder with desired files/dirs'
if not default_location:
default_location = os.getcwd()
# create dialog box for folder selection
root.update() # helps make sure dialog box goes away after selection
full_folder_name = filedialog.askdirectory(parent=root, initialdir=default_location,
title=title_string, mustexist=True)
# return full folder name
return full_folder_name
| 5,336,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.