content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def gen_csrv_msome(shape, n_parts, mic_rad, min_ip_dst):
"""
Generates a list of 3D coordinates and rotations a CSRV pattern
:param shape: tomogram shape
:param n_parts: number of particles to try to generate
:param mic_rad: microsome radius
:param min_ip_dst: minimum interparticle distance
:param c_jump_prob: probabilty to create a new cluster evaluated each time a particle is addded [0, 1]
:return: two output lists; coordinates and rotations
"""
# Initialization
count = 0
min_ip_dst_2 = float(min_ip_dst) ** 2
locs, rots = list(), list()
mic_cent = .5 * np.asarray(shape, dtype=np.float)
mic_rad_f = float(mic_rad)
max_n_tries = np.prod(np.asarray(shape, dtype=np.int))
# Loop for particles
mic_end, n_try = False, 0
p_end = False
while not p_end:
p_cent = np.random.randn(1, 3)[0]
norm = mic_rad_f / np.linalg.norm(p_cent)
p_cent *= norm
p_cent += mic_cent
# Check that the particle is within the tomogram
if (p_cent[0] >= 0) and (p_cent[0] < shape[0]) \
and (p_cent[1] >= 0) and (p_cent[1] < shape[1]) \
and (p_cent[2] >= 0) and (p_cent[2] < shape[2]):
if len(locs) > 0:
# Check that the new particle does not overlap with other already inserted
hold_dst = p_cent - np.asarray(locs, dtype=np.float)
if np.sum(hold_dst * hold_dst, axis=1) >= min_ip_dst_2:
locs.append(p_cent)
tilt, psi = vect_to_zrelion(p_cent - mic_cent, mode='active')[1:]
rots.append((360. * np.random.rand() - 180., tilt, psi))
count += 1
else:
locs.append(p_cent)
tilt, psi = vect_to_zrelion(p_cent - mic_cent, mode='active')[1:]
rots.append((360. * np.random.rand() - 180., tilt, psi))
count += 1
# Ensure termination
n_try += 1
if (n_try > max_n_tries) or (count >= n_parts):
p_end = True
return locs, rots
| 5,341,000
|
def electra_model(request):
"""Exposes the command-line option to a test case."""
electra_model_path = request.config.getoption("--electra_model")
if not electra_model_path:
pytest.skip("No --electra_model given")
else:
return electra_model_path
| 5,341,001
|
def By_2d_approximation(x, w, d, j):
"""Approximation of By_surface valid except near edges of slab."""
mu0_over_4pi = 1e-7
return 2e-7 * j * d * np.log((w/2 + x) / (w/2 - x))
| 5,341,002
|
def read_lists(paths: Dict[str, Path]) -> Dict[str, List[str]]:
"""Return a dictionary of song lists read from file.
Arguments:
paths {Dict[str, Path]} -- A dictionary of type returned by find_paths.
Returns:
Dict[str, List[str]] -- The keys are a string song list id ('1' to '6' or 'F'),
and the value lists contains the song keys to be written to that list.
"""
sl_dict: Dict[str, List[str]] = dict()
for song_list_id, file_path in paths.items():
logger.log_this(
f"Reading file '{file_path.name}'' for song list '{song_list_id}'."
)
with open(file_path, "rt", encoding="locale") as file_handle:
song_list = simplejson.load(file_handle)
# structure checks - could have used a schema for this.
# because I'm a bit lazy here, might also fail if a song key
# is pure digits and has been converted to a number on the way in
# We can tidy this up if it ever happens.
if not isinstance(song_list, list):
raise TypeError(
f"Invalid format in file '{file_path.name}'."
f"\n This should be a JSON list of strings, but I found "
f"a {type(song_list)}."
)
for val in song_list:
if not isinstance(val, str):
raise TypeError(
f"Invalid song list member in file '{file_path.name}'."
f"\n This should be a JSON list of strings, but I found "
f"a member with {type(val)}."
)
# just to be sure, clean out white space and empty strings silently.
song_list = [x for x in song_list if x.strip() != ""]
sl_dict[song_list_id] = song_list
logger.log_this("All song list files passed structure tests.")
return sl_dict
| 5,341,003
|
def learningCurve(theta, X_train, y_train, X_cv, y_cv, lambda_param):
"""
:param X_train:
:param y_train:
:param X_cv:
:param y_cv:
:param lambda_param:
:return:
"""
number_examples = y_train.shape[0]
J_train, J_cv = [], []
for i in range(1, number_examples + 1):
theta, _ = gradientDescent(theta, X_train[:i, :], y_train[:i, :], 0.001, 3000, lambda_param)
cost_train = linearRegressionCostFunction(theta, X_train[0:i, :], y_train[:i, :], lambda_param)
J_train.append(cost_train)
cost_cv = linearRegressionCostFunction(theta, X_cv, y_cv, lambda_param)
J_cv.append(cost_cv)
return J_train, J_cv
| 5,341,004
|
def reduce_labels(y):
"""Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe"""
labels = [] # new y
themes = []
disciplines = []
for i, elements in enumerate(y):
tmp_all_labels = []
tmp_themes = []
tmp_disciplines = []
#print("\nlabels in y an der Stelle %s: %s" % (i, elements))
for element in elements:
#print("\nLabel:", element)
# themes
for key, value in themes_dic.items():
if element == key:
tmp_all_labels.append(element)
tmp_themes.append(element)
#print("\nTheme key:", element)
elif element in value:
tmp_all_labels.append(key)
tmp_themes.append(key)
#print("\nTheme:", key)
else:
("Element nicht gefunden:", element)
# discipilnes
for key, value in disciplines_dic.items():
if element == key:
tmp_all_labels.append(element)
tmp_disciplines.append(element)
#print("\nDiscipline key:", element)
elif element in value:
tmp_all_labels.append(key)
tmp_disciplines.append(key)
#print("\nDiscipline:", key)
else:
("Element nicht gefunden:", element)
#print("\ntmp_list:", tmp_all_labels)
labels.append(list(set(tmp_all_labels)))
themes.append(list(set(tmp_themes)))
disciplines.append(list(set(tmp_disciplines)))
#print("\nnew labelset:", labels)
return labels, themes, disciplines
| 5,341,005
|
def api_get_categories(self):
"""
Gets a list of all the categories.
"""
response = TestCategory.objects.all()
s = ""
for cat in response:
s += b64(cat.name) + "," + b64(cat.description) + ","
return HttpResponse(s.rstrip(','))
| 5,341,006
|
def histtab(items, headers=None, item="item", count="count", percent="percent",
cols=None):
"""Make a histogram table."""
if cols is not None:
# items is a Table.
items = items.as_tuples(cols=cols)
if headers is None:
headers = cols + [count, percent]
if headers is None:
headers = [item, count, percent]
h = util.hist_dict(items)
tab = Table(headers=headers)
tot = float(sum(h.itervalues()))
hist_items = h.items()
if cols is not None:
for key, val in hist_items:
row = dict(zip(cols, key))
row[count] = val
tab.append(row)
else:
for key, val in hist_items:
tab.append({item: key,
count: val})
if percent is not None:
for i, (key, val) in enumerate(hist_items):
tab[i][percent] = val / tot
tab.sort(col=count, reverse=True)
return tab
| 5,341,007
|
def read_sdf_to_mol(sdf_file, sanitize=False, add_hs=False, remove_hs=False):
"""Reads a list of molecules from an SDF file.
:param add_hs: Specifies whether to add hydrogens. Defaults to False
:type add_hs: bool
:param remove_hs: Specifies whether to remove hydrogens. Defaults to False
:type remove_hs: bool
:param sanitize: Specifies whether to sanitize the molecule. Defaults to False
:type sanitize: bool
:return: list of molecules in RDKit format.
:rtype: list[rdkit.Chem.rdchem.Mol]
"""
from rdkit import Chem
suppl = Chem.SDMolSupplier(sdf_file, sanitize=sanitize, removeHs=remove_hs)
molecules = [mol for mol in suppl]
if add_hs:
for mol in molecules:
if mol is not None:
mol = Chem.AddHs(mol, addCoords=True)
return molecules
| 5,341,008
|
def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0)
| 5,341,009
|
def gen_fulltest_buildfile_windows() -> None:
"""Generate fulltest command list for jenkins.
(so we see nice pretty split-up build trees)
"""
import batools.build
batools.build.gen_fulltest_buildfile_windows()
| 5,341,010
|
def search_and_score(milvus_collection_name, mongo_name, field_name, vectors,
topk, nprobe, inner_score_mode: str):
"""
search vectors from milvus and score by inner field score mode
:param milvus_collection_name: collection name will be search
:param mongo_name: mongo collection name will be selected from
:param field_name: field name for searching from mongodb
:param vectors: vectors which will be searched in milvus
:param topk: milvus topk number
:param nprobe: milvus nprobe number
:param inner_score_mode:
:return: image id of entity
"""
result_dbs = []
MAX_TOPK = 2048
magic_number = 60
increase_rate = 0.1
query_topk = topk + magic_number
end_flag = False
try:
inner_score_mode = InnerFieldScoreMode(inner_score_mode)
except Exception as e:
raise WrongInnerFieldModeError("Unsupported inner field mode", e)
while (len(result_dbs) < topk) and (not end_flag):
# check query topk max value
query_topk = min(query_topk, MAX_TOPK)
vids = MilvusIns.search_vectors(milvus_collection_name, vectors, topk=query_topk, nprobe=nprobe)
if len(vids) == 0:
raise NoneVectorError("milvus search result is None", "")
# filter -1 and if exist -1 or len(vids) < topk
if (-1 in vids.id_array[0]) or len(vids[0]) < query_topk:
end_flag = True
# inner field score function here
res_vids = get_inner_field_score_result(vids, query_topk, inner_score_mode)
if len(res_vids) < topk:
if query_topk < MAX_TOPK:
# calc a new query_topk and needn't to query from mysql
query_topk += math.ceil(query_topk * increase_rate)
increase_rate *= 2
if not end_flag:
continue
end_flag = True
result_dbs = MongoIns.search_by_vector_id(mongo_name, field_name, res_vids)
# calc a new query_topk if len(result_dbs) < topk
query_topk += math.ceil(query_topk * increase_rate)
return result_dbs[:topk]
| 5,341,011
|
def fix_simulation():
""" Create instance of Simulation class."""
return Simulation()
| 5,341,012
|
def build_tree(vectors, algorithm='kd_tree', metric='minkowski', **kwargs):
"""Build NearestNeighbors tree."""
kwargs.pop('algorithm', None)
kwargs.pop('metric', None)
return NearestNeighbors(algorithm=algorithm, metric=metric,
**kwargs).fit(vectors)
| 5,341,013
|
def _expectedValues():
"""
These values are expected for well exposed spot data. The dictionary has a tuple for each wavelength.
Note that for example focus is data set dependent and should be used only as an indicator of a possible value.
keys: l600, l700, l800, l890
tuple = [radius, focus, widthx, widthy]
"""
out = dict(l600=(0.45, 0.40, 0.34, 0.32),
l700=(0.47, 0.40, 0.32, 0.31),
l800=(0.49, 0.41, 0.30, 0.30),
l800l=(0.49, 0.41, 0.27, 0.27),
l800m=(0.49, 0.41, 0.30, 0.30),
l800h=(0.49, 0.41, 0.31, 0.31),
l890=(0.54, 0.38, 0.29, 0.29))
return out
| 5,341,014
|
def storyOne(player):
"""First Story Event"""
player.story += 1
clear()
print("The dust gathers around, swirling, shaking, taking some sort of shape.")
time.sleep(2)
print("Its the bloody hermit again!")
time.sleep(2)
clear()
print("Hermit: Greetings, " + str(player.name) + ". It is good to see you.")
print(str(player.name) + ": Really? You still alive?")
time.sleep(5)
clear()
print("Hermit: Shut up.\n\nAlso, incidentally, I'm here to warn you. The world has noticed you... Your progress will become... Difficult.")
time.sleep(4)
clear()
print("Hermit: Now, a choice awaits you. I have the power to offer you a gift!")
time.sleep(2)
clear()
print("0: A better weapon.")
print("1: Better armor.")
print("2: A better enchantment.")
print("3: A rank increase.")
choice = input("Enter a number between 0 and 3: ")
if choice == "0":
player.weapon += 1
elif choice == "1":
player.armor += 1
elif choice == "2":
player.enchantment += 1
elif choice == "3":
player.level += 1
else:
pass
clear()
print("Hermit: Excellent!")
print(kill_hermit())
time.sleep(4)
clear()
return True
| 5,341,015
|
def test_compute_c_max_D():
"""Runs compute_c_max with isotope T and checks that the correct value is
produced
"""
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
c_max = divHretention.compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=False, isotope="T")
# test
D_0_W = 1.9e-7
E_D_W = 0.2
k_B = 8.617e-5
D = D_0_W*np.exp(-E_D_W/k_B/T)
D *= 1/3**0.5
# implantation ranges
implantation_range_ions = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
implantation_range_atoms = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
# reflection coefficients
reflection_coeff_ions = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
reflection_coeff_atoms = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
reflection_coeff_ions = np.array(reflection_coeff_ions)
reflection_coeff_atoms = np.array(reflection_coeff_atoms)
c_max_ions = (1 - reflection_coeff_ions) * \
ion_flux*implantation_range_ions/D
c_max_atoms = (1 - reflection_coeff_atoms) * \
atom_flux*implantation_range_atoms/D
c_max_expected = c_max_ions + c_max_atoms
assert c_max.all() == c_max_expected.all()
assert c_max.all() == c_max_expected.all()
| 5,341,016
|
def restart():
"""Restart beobench. This will stop any remaining running beobench containers."""
beobench.utils.restart()
| 5,341,017
|
def mcat(i):
"""Concatenate a list of matrices into a single matrix using separators
',' and ';'. The ',' means horizontal concatenation and the ';' means
vertical concatenation.
"""
if i is None:
return marray()
# calculate the shape
rows = [[]]
final_rows = 0
final_cols = 0
crows = ccols = 0
pos = []
pos2 = []
for x in i:
#if x == ';':
if x is Ellipsis:
rows.append([])
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
ccols = 0
pos.append(Ellipsis)
elif isinstance(x, mvar):
shp = x.msize
if len(shp) < 1: shp = [0]
if len(shp) < 2: shp += [0]
rows[-1].append(shp[0])
pos.append( (slice(final_rows, final_rows+shp[0]),
slice(ccols, ccols+shp[1])) )
crows = shp[0] # FIXME
ccols += shp[1]
elif _isscalar(x):
rows[-1].append(1)
pos.append( (final_rows, ccols) )
crows = 1
ccols += 1
else:
raise OMPCException("Unsupported type: %s!"%type(x))
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
out = empty((final_rows, final_cols), 'double')
for sl, x in _izip(pos, i):
if x is not Ellipsis:
if isinstance(x, mvar): x = x._a
out._a.__setitem__(sl[::-1], x)
#out._a.reshape(final_cols, final_rows).T.__setitem__(sl, x)
return out
| 5,341,018
|
def get_optional_info() -> Dict[str, Union[str, bool]]:
"""Get optional package info (tensorflow, pytorch, hdf5_bloscfilter, etc.)
Returns
-------
Dict[str, Union[str, False]]
package name, package version (if installed, otherwise False)
"""
res = {}
try:
import h5py
bloscFilterAvail = h5py.h5z.filter_avail(32001)
except ImportError: # pragma: no cover
bloscFilterAvail = False
res['blosc-hdf5-plugin'] = bloscFilterAvail
try:
import torch
torchVersion = torch.__version__
except ImportError: # pragma: no cover
torchVersion = False
res['pytorch'] = torchVersion
try:
import tensorflow
tensorflowVersion = tensorflow.__version__
except ImportError: # pragma: no cover
tensorflowVersion = False
res['tensorflow'] = tensorflowVersion
return res
| 5,341,019
|
def epsilon_experiment(dataset, n: int, eps_values: list):
"""
Function for the experiment explained in part (g).
eps_values is a list, such as: [0.0001, 0.001, 0.005, 0.01, 0.05, 0.1, 1.0]
Returns the errors as a list: [9786.5, 1234.5, ...] such that 9786.5 is the error when eps = 0.0001,
1234.5 is the error when eps = 0.001, and so forth.
"""
timer_list = []
total_errors = []
non_private_histogram = get_histogram(dataset)
for epsilon in eps_values:
start = timeit.default_timer()
error_list = []
for _ in range(30):
dp_histogram = get_dp_histogram(dataset, n, epsilon)
av_error = calculate_average_error(non_private_histogram, dp_histogram)
error_list.append(av_error)
total_average_error = sum(error_list) / len(error_list)
total_errors.append(total_average_error)
stop = timeit.default_timer()
timer_list.append(stop-start)
return total_errors, timer_list
| 5,341,020
|
def infer_scaletype(scales):
"""Infer whether `scales` is linearly or exponentially distributed (if latter,
also infers `nv`). Used internally on `scales` and `ssq_freqs`.
Returns one of: 'linear', 'log', 'log-piecewise'
"""
scales = asnumpy(scales).reshape(-1, 1)
if not isinstance(scales, np.ndarray):
raise TypeError("`scales` must be a numpy array (got %s)" % type(scales))
elif scales.dtype not in (np.float32, np.float64):
raise TypeError("`scales.dtype` must be np.float32 or np.float64 "
"(got %s)" % scales.dtype)
th_log = 1e-15 if scales.dtype == np.float64 else 4e-7
th_lin = th_log * 1e3 # less accurate for some reason
if np.mean(np.abs(np.diff(scales, 2, axis=0))) < th_lin:
scaletype = 'linear'
nv = None
elif np.mean(np.abs(np.diff(np.log(scales), 2, axis=0))) < th_log:
scaletype = 'log'
# ceil to avoid faulty float-int roundoffs
nv = int(np.round(1 / np.diff(np.log2(scales), axis=0)[0]))
elif logscale_transition_idx(scales) is None:
raise ValueError("could not infer `scaletype` from `scales`; "
"`scales` array must be linear or exponential. "
"(got diff(scales)=%s..." % np.diff(scales, axis=0)[:4])
else:
scaletype = 'log-piecewise'
nv = nv_from_scales(scales)
return scaletype, nv
| 5,341,021
|
def authorized_http(credentials):
"""Returns an http client that is authorized with the given credentials.
Args:
credentials (Union[
google.auth.credentials.Credentials,
oauth2client.client.Credentials]): The credentials to use.
Returns:
Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An
authorized http client.
"""
from googleapiclient.http import build_http
if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
if google_auth_httplib2 is None:
raise ValueError(
"Credentials from google.auth specified, but "
"google-api-python-client is unable to use these credentials "
"unless google-auth-httplib2 is installed. Please install "
"google-auth-httplib2."
)
return google_auth_httplib2.AuthorizedHttp(credentials, http=build_http())
else:
return credentials.authorize(build_http())
| 5,341,022
|
def add_grating_couplers_with_loopback_fiber_array(
component: Component,
grating_coupler: ComponentSpec = grating_coupler_te,
excluded_ports: Optional[List[str]] = None,
grating_separation: float = 127.0,
bend_radius_loopback: Optional[float] = None,
gc_port_name: str = "o1",
gc_rotation: int = -90,
straight_separation: float = 5.0,
bend: ComponentSpec = bend_euler,
straight: ComponentSpec = straight_function,
layer_label: Tuple[int, int] = (200, 0),
layer_label_loopback: Optional[Tuple[int, int]] = None,
component_name: Optional[str] = None,
with_loopback: bool = True,
nlabels_loopback: int = 2,
get_input_labels_function: Callable = get_input_labels,
cross_section: CrossSectionSpec = strip,
select_ports: Callable = select_ports_optical,
**kwargs,
) -> Component:
"""Returns a component with grating_couplers and loopback.
Args:
component: to add grating_couplers.
grating_coupler: grating_coupler.
excluded_ports: list of ports to exclude.
grating_separation: in um.
bend_radius_loopback: um.
gc_port_name: optional grating coupler name.
gc_rotation: grating coupler rotation in degrees.
straight_separation:
bend: bend spec.
straight: straight spec.
layer_label: optional layer_label.
component_name: optional component name.
with_loopback: If True, add compact loopback alignment ports.
nlabels_loopback: number of ports to label
(0: no labels, 1: first port, 2: both ports).
cross_section: CrossSectionSpec.
select_ports: function to select ports.
kwargs: cross_section settings
"""
x = gf.get_cross_section(cross_section, **kwargs)
bend_radius_loopback = bend_radius_loopback or x.radius
excluded_ports = excluded_ports or []
gc = gf.get_component(grating_coupler)
direction = "S"
component_name = component_name or component.metadata_child.get("name")
c = Component()
c.component = component
c.info["polarization"] = gc.info["polarization"]
c.info["wavelength"] = gc.info["wavelength"]
c.add_ref(component)
# Find grating port name if not specified
if gc_port_name is None:
gc_port_name = list(gc.ports.values())[0].name
# List the optical ports to connect
optical_ports = select_ports(component.ports)
optical_ports = list(optical_ports.values())
optical_ports = [p for p in optical_ports if p.name not in excluded_ports]
optical_ports = direction_ports_from_list_ports(optical_ports)[direction]
# Check if the ports are equally spaced
grating_separation_extracted = check_ports_have_equal_spacing(optical_ports)
if grating_separation_extracted != grating_separation:
raise ValueError(
f"Grating separation must be {grating_separation}. Got {grating_separation_extracted}"
)
# Add grating references
references = []
for port in optical_ports:
gc_ref = c.add_ref(gc)
gc_ref.connect(gc.ports[gc_port_name].name, port)
references += [gc_ref]
labels = get_input_labels_function(
io_gratings=references,
ordered_ports=optical_ports,
component_name=component_name,
layer_label=layer_label,
gc_port_name=gc_port_name,
)
c.add(labels)
if with_loopback:
y0 = references[0].ports[gc_port_name].y
xs = [p.x for p in optical_ports]
x0 = min(xs) - grating_separation
x1 = max(xs) + grating_separation
gca1, gca2 = [
gc.ref(position=(x, y0), rotation=gc_rotation, port_id=gc_port_name)
for x in [x0, x1]
]
gsi = gc.size_info
port0 = gca1.ports[gc_port_name]
port1 = gca2.ports[gc_port_name]
p0 = port0.position
p1 = port1.position
a = bend_radius_loopback + 0.5
b = max(2 * a, grating_separation / 2)
y_bot_align_route = -gsi.width - straight_separation
points = np.array(
[
p0,
p0 + (0, a),
p0 + (b, a),
p0 + (b, y_bot_align_route),
p1 + (-b, y_bot_align_route),
p1 + (-b, a),
p1 + (0, a),
p1,
]
)
bend90 = gf.get_component(
bend, radius=bend_radius_loopback, cross_section=cross_section, **kwargs
)
loopback_route = round_corners(
points=points,
bend=bend90,
straight=straight,
cross_section=cross_section,
**kwargs,
)
c.add([gca1, gca2])
c.add(loopback_route.references)
component_name_loopback = f"loopback_{component_name}"
if nlabels_loopback == 1:
io_gratings_loopback = [gca1]
ordered_ports_loopback = [port0]
if nlabels_loopback == 2:
io_gratings_loopback = [gca1, gca2]
ordered_ports_loopback = [port0, port1]
if nlabels_loopback == 0:
pass
elif 0 < nlabels_loopback <= 2:
c.add(
get_input_labels_function(
io_gratings=io_gratings_loopback,
ordered_ports=ordered_ports_loopback,
component_name=component_name_loopback,
layer_label=layer_label_loopback or layer_label,
gc_port_name=gc_port_name,
)
)
else:
raise ValueError(
f"Invalid nlabels_loopback = {nlabels_loopback}, "
"valid (0: no labels, 1: first port, 2: both ports2)"
)
c.copy_child_info(component)
return c
| 5,341,023
|
def enable_x64():
"""Use double (x64) precision for jax arrays"""
jax.config.update("jax_enable_x64", True)
| 5,341,024
|
def FEBA_Save_Tables(gene_fit_d, genes_df, organism_name_str,
op_dir, exps_df,
cfg=None,
writeImage=False, debug=False):
"""
Args:
gene_fit_d (python dict): Documentation above function
genes_df (pandas DataFrame): table genes.GC
organism_name_str (str): Name of organism
op_dir (str): Directory to write all saved tables and JSON to.
exps_df (pandas DataFrame): from FEBA.BarSeq
Must contain cols:
name
short
writeImage (bool): Should we save all the data in one image to
be easily imported into python/R?
Note:
We merge many dataframes on the locusId columns
"""
if cfg is None:
cfg = {
"strong_lr": 2,
"strong_t": 5
}
# Setting print options for debugging:
pd.set_option('display.max_columns', None)
if not os.path.isdir(op_dir):
os.mkdir(op_dir)
for expected_key in ["q","lr","lrn","lrn1","lrn2","t", "genesUsed","g", "lrNaive"]:
if expected_key not in gene_fit_d:
raise Exception(f"Missing expected key in gene_fit_d: {expected_key}")
for name in gene_fit_d['q']['name']:
if name not in gene_fit_d['lr'].columns:
raise Exception(f"Name {name} missing from 'lr' object.")
if name not in gene_fit_d['lrn'].columns:
raise Exception(f"Name {name} missing from 'lrn' object.")
for val in ["locusId", "sysName", "desc"]:
if val not in genes_df.columns:
raise Exception(f"Column name {val} not in genes_df")
# Preparing variables that make it simpler to create_tables
first3_cols = ["locusId", "sysName", "desc"]
genes_first3 = genes_df[first3_cols]
final_colnames = list(gene_fit_d['q']['name'] + ' ' + gene_fit_d['q']['short'])
# WRITING TABLES:
write_DataFrame_and_log(os.path.join(op_dir, "fit_quality.tsv"), gene_fit_d['q'], df_name="quality")
#2 Fit genes - All genes, with some having the used column = True
# used is a boolean list
used = [(genes_df['locusId'].iat[i] in gene_fit_d['genesUsed']) \
for i in range(len(genes_df['locusId']))]
new_genes_df = genes_df.copy(deep=True)
new_genes_df['used'] = used
write_DataFrame_and_log(os.path.join(op_dir, "fit_genes.tab"), new_genes_df, df_name = "Fit genes")
del new_genes_df, used
#3 Fit Log Ratios unnormalized
pre_merge = gene_fit_d['lr']
pre_merge['locusId'] = gene_fit_d['g']
# below how is 'inner' by default, which is the fitting merge type
tmp_df = genes_first3.merge(pre_merge, on="locusId")
write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_unnormalized.tab"),
tmp_df, df_name = "log ratios unnormalized")
#4 Log Ratios Unnormalized Naive (Can put into 'extract...' function)
pre_merge = gene_fit_d['lrNaive'].copy(deep=True)
pre_merge['locusId'] = gene_fit_d['g']
tmp_df = genes_first3.merge(pre_merge, on="locusId")
write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_unnormalized_naive.tab"),
tmp_df, df_name = "log ratios unnormalized naive")
#5 Fit Logratios
pre_merge = gene_fit_d['lrn'].copy(deep=True)
pre_merge['locusId'] = gene_fit_d['g']
tmp_df = genes_first3.merge(pre_merge, on="locusId")
write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios.tab"),
tmp_df, df_name = "fit logratios")
#6 Fit Log Ratios 1st half (Can put into 'extract...' function)
pre_merge = gene_fit_d['lrn1'].copy(deep=True)
pre_merge['locusId'] = gene_fit_d['g']
tmp_df = genes_first3.merge(pre_merge, on="locusId")
write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_half1.tab"),
tmp_df, df_name = "fit logratios 1st half")
#7 Fit Log Ratios 2nd half (Can put into 'extract...' function)
pre_merge = gene_fit_d['lrn2'].copy(deep=True)
pre_merge['locusId'] = gene_fit_d['g']
tmp_df = genes_first3.merge(pre_merge, on="locusId")
write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_half2.tab"),
tmp_df, df_name = "fit logratios 2nd half")
print(genes_df)
#8 Fit Log Ratios Good (?)
genes_in_g_bool = [bool(genes_df['locusId'].iat[i] in gene_fit_d['g'].values) for i \
in range(genes_df.shape[0])]
f3col_genes_df = genes_df[first3_cols][genes_in_g_bool]
f3col_genes_df['comb'] = f3col_genes_df['sysName'] + ' ' + f3col_genes_df['desc']
tmp_df = f3col_genes_df.copy(deep=True)
# q is quality, u is used
if list(gene_fit_d['q']['u']).count(True) == 0:
logging.warning("***Warning: 0 'OK' experiments.")
tmp_new = tmp_df.sort_values(by='locusId')
else:
used_q_rows = gene_fit_d['q'][gene_fit_d['q']['u']]
used_names = used_q_rows['name']
lrn_copy = gene_fit_d['lrn'].copy(deep=True)
lrn_copy = lrn_copy[used_names]
lrn_copy['locusId'] = gene_fit_d['g']
tmp_new = tmp_df.merge(lrn_copy, on="locusId")
rename_columns = list(used_q_rows['name'] + ' ' + used_q_rows['short'])
rename_d = {val: rename_columns[ix] for ix, val in enumerate(list(tmp_new.columns[4:]))}
tmp_new = tmp_new.rename(columns=rename_d)
tmp_new = tmp_new.sort_values(by='locusId')
del lrn_copy
write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_good.tab"),
tmp_new, df_name = "fit logratios good")
del tmp_new
#9 Gene Counts
pre_merge = gene_fit_d['tot'].copy(deep=True)
pre_merge['locusId'] = gene_fit_d['g']
tmp_df = f3col_genes_df.merge(pre_merge, on="locusId")
write_DataFrame_and_log(os.path.join(op_dir, "gene_counts.tab"),
tmp_df, df_name = "gene counts")
#10 Fit T Scores
extract_gene_fit_d_category_to_tsv_basic(gene_fit_d['t'],
gene_fit_d['g'],
genes_first3,
final_colnames,
os.path.join(op_dir, "fit_t.tab"),
"fit t")
#11 Fit standard error
extract_gene_fit_d_category_to_tsv_basic(gene_fit_d['se'],
gene_fit_d['g'],
genes_first3,
final_colnames,
os.path.join(op_dir, "fit_standard_error_obs.tab"),
"fit standard error")
#12 Fit Standard Error Naive
extract_gene_fit_d_category_to_tsv_basic(gene_fit_d['sdNaive'],
gene_fit_d['g'],
genes_first3,
final_colnames,
os.path.join(op_dir, "fit_standard_error_naive.tab"),
"fit standard error naive")
#13 Strain Fit
logging.info("Getting order of scaffolds to print Strain Fit.")
tmp_df = gene_fit_d['strains'].join(gene_fit_d['strain_lrn'])
tmp_df.sort_values(by=['scaffold','pos'])
write_DataFrame_and_log(os.path.join(op_dir,"strain_fit.tab"),
tmp_df,
df_name="Strain Fit")
#14 expsUsed (subset of original exps file with used experiments
write_DataFrame_and_log(os.path.join(op_dir,"expsUsed.tab"),
exps_df,
df_name="expsUsed")
#15 Cofit
if 'cofit' in gene_fit_d and gene_fit_d['cofit'] is not None:
# Why do we repeat the three columns sysName, locusId and desc
# with hitSysName, hitId, and hitDesc etc?
tmp_df = f3col_genes_df.merge(gene_fit_d['cofit'], on="locusId")
pre_merge_df = pd.DataFrame.from_dict({
"hitId" : genes_df["locusId"],
"hitSysName" : genes_df["sysName"],
"hitDesc" : genes_df["desc"]
})
tmp_df = tmp_df.merge(pre_merge_df)
tmp_df.sort_values(by=["locusId", "rank"], inplace=True, axis=0)
else:
logging.warning("Cofit not found in gene_fit_d")
tmp_df = pd.DataFrame.from_dict({
"locusId": [""],
"sysName": [""],
"desc": [""],
"cofit": [""],
"rank":[""],
"hitId": [""],
"hitSysName": [""],
"hitDesc": [""]
})
write_DataFrame_and_log(os.path.join(op_dir, "cofit.tab"),
tmp_df,
df_name="cofit")
#16 specphe - specific phenotypes
if "specphe" in gene_fit_d and gene_fit_d["specphe"] is not None:
#print(f3col_genes_df)
#print(f3col_genes_df.dtypes)
#print(gene_fit_d['specphe'])
#print(gene_fit_d['specphe'].dtypes)
tmp_df = f3col_genes_df.merge(gene_fit_d['specphe'], on="locusId")
else:
tmp_df = pd.DataFrame.from_dict({
"locusId": [""],
"sysName": [""],
"desc": [""],
"short": [""],
"Group": [""],
"Condition_1": [""],
"Concentraion_1": [""],
"Units_1": [""],
"Condition_2": [""],
"Concentration_2": [""],
"Units_2": [""],
})
print(tmp_df.head(6))
write_DataFrame_and_log(os.path.join(op_dir, "specific_phenotypes.tab"),
tmp_df,
df_name="specific phenotypes")
# 17 Strong -
# We create the dataframe 'strong.tab'
# we find which normalized log ratios are greater than 2 e.g. and
# 't' scores are greater than 5 e.g.
create_strong_tab(gene_fit_d, genes_df, exps_df, op_dir,
strong_lr=cfg["strong_lr"], strong_t=cfg["strong_t"],
debug=debug)
#18 High
# High Fitness
if "high" in gene_fit_d:
write_DataFrame_and_log(os.path.join(op_dir, "high_fitness.tab"),
gene_fit_d['high'],
df_name="high fitness")
#19 HTML Info
html_info_d = {
"organism_name": organism_name_str,
"number_of_experiments": len(gene_fit_d['q']['short']) - \
list(gene_fit_d['q']['short']).count("Time0"),
"number_of_successes": list(gene_fit_d['q']['u']).count(True),
"version": gene_fit_d['version'],
"date": str(datetime.now())
}
with open(os.path.join(op_dir, "html_info.json"), 'w') as g:
g.write(json.dumps(html_info_d, indent=2))
logging.info("Finished exporting all tables and files to " + op_dir)
return 0
| 5,341,025
|
def auto_update_function(cities):
"""Auto-update weather function
The function takes a list of the cities to update.
If the error connecting to sources - an error with
a status of 500 and JSON with the cause of the error and URL.
If the connection is successful, it enters the
data into the database and returns an empty response with code 200.
"""
try:
connect = psycopg2.connect(database = 'django_test', user = 'roman',
host = 'localhost', password = 'admin')
cursor = connect.cursor()
cursor.execute(
'SELECT city_name FROM frontend_city;'
)
utc_timezone = pytz.timezone('UTC')
#read current city list from database
cities_list = []
cities_cursor = cursor.fetchall()
#list of tuple to just list
for i in range(len(cities_cursor)):
cities_list.append(cities_cursor[i][0])
for i in range(len(cities)):
yandex_value = yandex(cities[i])
open_weather_value = open_weather_map(cities[i])
# error in yandex source
if type(yandex_value[0]) == error.HTTPError:
data = {
'Error': 'Error in auto update function.',
'Time': str(datetime.datetime.now(utc_timezone)),
'Reason': '{}. Please, check url: {}'.format(yandex_value[0], yandex_value[1])
}
json_data_error = json.dumps(data)
response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8')
return response
# error in open weather source
elif (type(open_weather_value[0]) == error.HTTPError):
data = {
'Error': 'Error in auto update function.',
'Time': datetime.datetime.now(utc_timezone),
'Reason': '{}. Please, check url: {}'.format(open_weather_value[0], open_weather_value[1])
}
json_data_error = json.dumps(data)
response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8')
return response
#If the city has not been checked before
elif (cities[i] not in cities_list):
cursor.execute("INSERT INTO frontend_city (city_name) values ('{}');".format(cities[i]))
connect.commit()
data = {
'Yandex': str(yandex_value[0]),
'Open weather': str(open_weather_value[0])
}
cursor.execute("SELECT id FROM frontend_city WHERE city_name = '{}';".format(cities[i]))
city_id = cursor.fetchall()
city_id = city_id[0][0]
json_data = json.dumps(data)
cursor.execute(
"INSERT INTO frontend_history (city_id, temp_values, created) \
VALUES ({},'{}', '{}');".format(city_id, json_data,
datetime.datetime.now(utc_timezone)))
connect.commit()
connect.close()
response = HttpResponse(status=200, content_type='text/html', charset='utf-8')
return response
except Exception as connection_db_error:
data = {
'Error': 'Error in auto update function.',
'Time': str(datetime.datetime.now(utc_timezone)),
'Reason': '{}'.format(connection_db_error)
}
json_data_error = json.dumps(data)
response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8')
return response
connect.close()
| 5,341,026
|
def absolute_sum_of_changes(x):
"""
Returns the sum over the absolute value of consecutive changes in the series x
.. math::
\\sum_{i=1, \ldots, n-1} \\mid x_{i+1}- x_i \\mid
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: float
"""
return np.sum(abs(np.diff(x)))
| 5,341,027
|
def handle_program_options():
"""
Uses the built-in argparse module to handle command-line options for the
program.
:return: The gathered command-line options specified by the user
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Convert Sanger-sequencing \
derived data files for use with the \
metagenomics analysis program QIIME, by \
extracting Sample ID information, adding\
barcodes and primers to the sequence \
data, and outputting a mapping file and\
single FASTA-formatted sequence file \
formed by concatenating all input data.")
parser.add_argument('-i', '--input_dir', required=True,
help="The directory containing sequence data files. \
Assumes all data files are placed in this \
directory. For files organized within folders by\
sample, use -s in addition.")
parser.add_argument('-m', '--map_file', default='map.txt',
help="QIIME-formatted mapping file linking Sample IDs \
with barcodes and primers.")
parser.add_argument('-o', '--output', default='output.fasta',
metavar='OUTPUT_FILE',
help="Single file containing all sequence data found \
in input_dir, FASTA-formatted with barcode and \
primer preprended to sequence. If the -q option \
is passed, any quality data will also be output \
to a single file of the same name with a .qual \
extension.")
parser.add_argument('-b', '--barcode_length', type=int, default=12,
help="Length of the generated barcode sequences. \
Default is 12 (QIIME default), minimum is 8.")
parser.add_argument('-q', '--qual', action='store_true', default=False,
help="Instruct the program to look for quality \
input files")
parser.add_argument('-u', '--utf16', action='store_true', default=False,
help="UTF-16 encoded input files")
parser.add_argument('-t', '--treatment',
help="Inserts an additional column into the mapping \
file specifying some treatment or other variable\
that separates the current set of sequences \
from any other set of seqeunces. For example:\
-t DiseaseState=healthy")
# data input options
sidGroup = parser.add_mutually_exclusive_group(required=True)
sidGroup.add_argument('-d', '--identifier_pattern',
action=ValidateIDPattern,
nargs=2, metavar=('SEPARATOR', 'FIELD_NUMBER'),
help="Indicates how to extract the Sample ID from \
the description line. Specify two things: \
1. Field separator, 2. Field number of Sample \
ID (1 or greater). If the separator is a space \
or tab, use \s or \\t respectively. \
Example: >ka-SampleID-2091, use -i - 2, \
indicating - is the separator and the Sample ID\
is field #2.")
sidGroup.add_argument('-f', '--filename_sample_id', action='store_true',
default=False, help='Specify that the program should\
the name of each fasta file as the Sample ID for use\
in the mapping file. This is meant to be used when \
all sequence data for a sample is stored in a single\
file.')
return parser.parse_args()
| 5,341,028
|
def start_v_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original data.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_v
rows, cols = model.mod.names_v
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
| 5,341,029
|
async def websocket_network_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get the status of the Z-Wave JS network."""
data = {
"client": {
"ws_server_url": client.ws_server_url,
"state": "connected" if client.connected else "disconnected",
"driver_version": client.version.driver_version,
"server_version": client.version.server_version,
},
"controller": {
"home_id": client.driver.controller.data["homeId"],
"nodes": list(client.driver.controller.nodes),
},
}
connection.send_result(
msg[ID],
data,
)
| 5,341,030
|
def get_image_features(X, y, appearance_dim=32):
"""Return features for every object in the array.
Args:
X (np.array): a 3D numpy array of raw data of shape (x, y, c).
y (np.array): a 3D numpy array of integer labels of shape (x, y, 1).
appearance_dim (int): The resized shape of the appearance feature.
Returns:
dict: A dictionary of feature names to np.arrays of shape
(n, c) or (n, x, y, c) where n is the number of objects.
"""
appearance_dim = int(appearance_dim)
# each feature will be ordered based on the label.
# labels are also stored and can be fetched by index.
num_labels = len(np.unique(y)) - 1
labels = np.zeros((num_labels,), dtype='int32')
centroids = np.zeros((num_labels, 2), dtype='float32')
morphologies = np.zeros((num_labels, 3), dtype='float32')
appearances = np.zeros((num_labels, appearance_dim,
appearance_dim, X.shape[-1]), dtype='float32')
# iterate over all objects in y
props = regionprops(y[..., 0], cache=False)
for i, prop in enumerate(props):
# Get label
labels[i] = prop.label
# Get centroid
centroid = np.array(prop.centroid)
centroids[i] = centroid
# Get morphology
morphology = np.array([
prop.area,
prop.perimeter,
prop.eccentricity
])
morphologies[i] = morphology
# Get appearance
minr, minc, maxr, maxc = prop.bbox
appearance = np.copy(X[minr:maxr, minc:maxc, :])
resize_shape = (appearance_dim, appearance_dim)
appearance = resize(appearance, resize_shape)
appearances[i] = appearance
# Get adjacency matrix
# distance = cdist(centroids, centroids, metric='euclidean') < distance_threshold
# adj_matrix = distance.astype('float32')
return {
'appearances': appearances,
'centroids': centroids,
'labels': labels,
'morphologies': morphologies,
# 'adj_matrix': adj_matrix,
}
| 5,341,031
|
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
CTB -- swiped from Python 2.5, module 'dis', so that earlier versions
of Python could use the function, too.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
| 5,341,032
|
def test_broken_yaml_header(testdata_dir: pathlib.Path) -> None:
"""Test for a bad markdown header."""
bad_file = testdata_dir / 'author' / 'bad_md_header.md'
with pytest.raises(TrestleError):
ControlIOReader._load_control_lines_and_header(bad_file)
| 5,341,033
|
def deactivate_text(shell: dict, env_vars: dict) -> str:
"""Returns the formatted text to write to the deactivation script
based on the passed dictionaries."""
lines = [shell["shebang"]]
for k in env_vars.keys():
lines.append(shell["deactivate"].format(k))
return "\n".join(lines)
| 5,341,034
|
def is_dict(etype) -> bool:
""" Determine whether etype is a Dict """
return get_origin(etype) is dict or etype is dict
| 5,341,035
|
def t_plot_parameters(thickness_curve, section, loading, molar_mass, liquid_density):
"""Calculates the parameters from a linear section of the t-plot."""
slope, intercept, corr_coef, p, stderr = scipy.stats.linregress(
thickness_curve[section],
loading[section])
# Check if slope is good
if slope * (max(thickness_curve) / max(loading)) < 3:
adsorbed_volume = intercept * molar_mass / liquid_density
area = slope * molar_mass / liquid_density * 1000
result_dict = {
'section': section,
'slope': slope,
'intercept': intercept,
'corr_coef': corr_coef,
'adsorbed_volume': adsorbed_volume,
'area': area,
}
return result_dict
return None
| 5,341,036
|
def main():
"""主函数
"""
temp_str = input("1. 测试\n2. raw data\n3. json文件绝对路径\n")
if temp_str == '1':
test_file = open('./resources/test.json', mode='r')
raw_data = test_file.read()
print_json(raw_data)
test_file.close()
elif temp_str == '2':
raw_data = input("给定json数据:\n")
print_json(raw_data)
elif temp_str == '3':
file_absolute_path = input("给定绝对路径:\n")
rd_file = open(file_absolute_path, mode='r')
raw_data = rd_file.read()
print_json(raw_data)
rd_file.close()
else:
print("再来一遍")
main()
| 5,341,037
|
def get_data(stock, start_date):
"""Fetch a maximum of the 100 most recent records for a given
stock starting at the start_date.
Args:
stock (string): Stock Ticker
start_date (int): UNIX date time
"""
# Build the query string
request_url = f"https://api.pushshift.io/reddit/search/comment/?q={stock}&sort=asc&size=100&after={start_date}"
# get the query and convert to json
result_json = requests.get(request_url).json()
return result_json
| 5,341,038
|
def depart_people(state, goal):
"""Departs all passengers that can depart on this floor"""
departures = []
for departure in state.destin.items():
passenger = departure[0]
if passenger in goal.served and goal.served[passenger]:
floor = departure[1]
if state.lift_at == floor and state.boarded[passenger] and not state.served[passenger]:
departures.append(('depart', passenger, state.lift_at))
return departures
| 5,341,039
|
def get_topic_for_subscribe():
"""
return the topic string used to subscribe for receiving future responses from DPS
"""
return _get_topic_base() + "res/#"
| 5,341,040
|
def generate_AES_key(bytes = 32):
"""Generates a new AES key
Parameters
----------
bytes : int
number of bytes in key
Returns
-------
key : bytes
"""
try:
from Crypto import Random
return Random.get_random_bytes(bytes)
except ImportError:
print('PyCrypto not install. Reading from /dev/random instead')
with open('/dev/random', 'r') as rand:
return rand.read(bytes)
| 5,341,041
|
def remove_deploy_networkIPv6_configuration(user, networkipv6, equipment_list):
"""Loads template for removing Network IPv6 equipment configuration, creates file and
apply config.
Args: NetworkIPv6 object
Equipamento objects list
Returns: List with status of equipments output
"""
data = dict()
# lock network id to prevent multiple requests to same id
with distributedlock(LOCK_NETWORK_IPV6 % networkipv6.id):
with distributedlock(LOCK_VLAN % networkipv6.vlan.id):
if networkipv6.active == 0:
data['output'] = 'Network already not active. Nothing to do.'
return data
# load dict with all equipment attributes
dict_ips = get_dict_v6_to_use_in_configuration_deploy(
user, networkipv6, equipment_list)
status_deploy = dict()
# TODO implement threads
for equipment in equipment_list:
# generate config file
file_to_deploy = _generate_config_file(
dict_ips, equipment, TEMPLATE_NETWORKv6_DEACTIVATE)
# deploy config file in equipments
lockvar = LOCK_EQUIPMENT_DEPLOY_CONFIG_NETWORK_SCRIPT % (
equipment.id)
status_deploy[equipment.id] = deploy_config_in_equipment_synchronous(
file_to_deploy, equipment, lockvar)
networkipv6.deactivate(user)
transaction.commit()
if networkipv6.vlan.ativada == 1:
# if there are no other networks active in vlan, remove int
# vlan
if not _has_active_network_in_vlan(networkipv6.vlan):
# remove int vlan
for equipment in equipment_list:
if equipment.maintenance is not True:
status_deploy[
equipment.id] += _remove_svi(equipment, networkipv6.vlan.num_vlan)
networkipv6.vlan.remove(user)
return status_deploy
| 5,341,042
|
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
Parameters
----------
x : str
Candidate file path
Returns
-------
str
Validated path
"""
if not os.path.isfile(x):
# ArgumentTypeError gives a rejection message of the form:
# error: argument input: <passed error message>
if os.path.exists(x):
raise argparse.ArgumentTypeError("{0} is not a file".format(x))
else:
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return str(x)
| 5,341,043
|
def can_pay_with_two_coins(denoms, amount):
""" (list of int, int) -> bool
Return True if and only if it is possible to form amount, which is a
number of cents, using exactly two coins, which can be of any of the
denominations in denoms.
>>> can_pay_with_two_coins([1, 5, 10, 25], 35)
True
>>> can_pay_with_two_coins([1, 5, 10, 25], 20)
True
>>> can_pay_with_two_coins([1, 5, 10, 25], 12)
#TODO: complete the example output
"""
#TODO: complete the function body
for i in range(len(denoms)):
for j in range(len(denoms)):
if denoms[i] + denoms[j] == amount:
return True
return False
| 5,341,044
|
def make_column_kernelizer(*transformers, **kwargs):
"""Construct a ColumnKernelizer from the given transformers.
This is a shorthand for the ColumnKernelizer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Parameters
----------
*transformers : tuples
Tuples of the form (transformer, columns) specifying the
transformer objects to be applied to subsets of the data.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support ``fit`` and ``transform``.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively. If the transformer does not return a
kernel (as informed by the attribute kernelizer=True), a linear
kernelizer is applied after the transformer.
columns : str, array-like of str, int, array-like of int, slice, \
array-like of bool or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support ``fit`` and ``transform``.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
n_jobs does not work with with GPU backends.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
column_kernelizer : ColumnKernelizer
See also
--------
himalaya.kernel_ridge.ColumnKernelizer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> import numpy as np
>>> from himalaya.kernel_ridge import make_column_kernelizer
>>> from himalaya.kernel_ridge import Kernelizer
>>> ck = make_column_kernelizer(
... (Kernelizer(kernel="linear"), [0, 1, 2]),
... (Kernelizer(kernel="polynomial"), slice(3, 5)))
>>> X = np.array([[0., 1., 2., 2., 3.],
[0., 2., 0., 0., 3.],
[0., 0., 1., 0., 3.],
... [1., 1., 0., 1., 2.]])
>>> # Kernelize separately the first three columns and the last two
>>> # columns, creating two kernels of shape (n_samples, n_samples).
>>> ck.fit_transform(X).shape
(2, 4, 4)
"""
# transformer_weights keyword is not passed through because the user
# would need to know the automatically generated names of the transformers
n_jobs = kwargs.pop('n_jobs', None)
remainder = kwargs.pop('remainder', 'drop')
verbose = kwargs.pop('verbose', False)
if kwargs:
raise TypeError('Unknown keyword arguments: "{}"'.format(
list(kwargs.keys())[0]))
transformer_list = _get_transformer_list(transformers)
return ColumnKernelizer(transformer_list, n_jobs=n_jobs,
remainder=remainder, verbose=verbose)
| 5,341,045
|
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {
'name': 'PT100 Poll Plugin',
'version': '1.9.2',
'mode': 'poll',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
| 5,341,046
|
def test_no_celery_task():
"""
If the 'CELERY_TASK_NAME' env var is not set, not tasks are sent to the
Celery queue.
"""
(
flexmock(os)
.should_receive("getenv")
.with_args("CELERY_TASK_NAME")
.and_return(None)
.once()
)
flexmock(celery_app).should_receive("send_task").never()
updater = Updater(configuration=flexmock())
updater._create_task(flexmock(), "branch", "some_commit")
| 5,341,047
|
def get_engine(isolation_level=None):
"""
Creates an engine with the given isolation level.
"""
# creates a shallow copy with the given isolation level
if not isolation_level:
return _get_base_engine()
else:
return _get_base_engine().execution_options(isolation_level=isolation_level)
| 5,341,048
|
def phantomjs_driver(capabilities, driver_path, port):
"""
Overrides default `phantomjs_driver` driver from pytest-selenium.
Default implementation uses ephemeral ports just as our tests but
it doesn't provide any way to configure them, for this reason we basically
recreate the driver fixture using port fixture.
"""
kwargs = {}
if capabilities:
kwargs['desired_capabilities'] = capabilities
if driver_path is not None:
kwargs['executable_path'] = driver_path
kwargs['port'] = port.get()
from selenium.webdriver import PhantomJS
return PhantomJS(**kwargs)
| 5,341,049
|
def to_TH1x(
fName,
fTitle,
data,
fEntries,
fTsumw,
fTsumw2,
fTsumwx,
fTsumwx2,
fSumw2,
fXaxis,
fYaxis=None,
fZaxis=None,
fNcells=None,
fBarOffset=0,
fBarWidth=1000,
fMaximum=-1111.0,
fMinimum=-1111.0,
fNormFactor=0.0,
fContour=None,
fOption="",
fFunctions=None,
fBufferSize=0,
fBuffer=None,
fBinStatErrOpt=0,
fStatOverflows=2,
fLineColor=602,
fLineStyle=1,
fLineWidth=1,
fFillColor=0,
fFillStyle=1001,
fMarkerColor=1,
fMarkerStyle=1,
fMarkerSize=1.0,
):
"""
Args:
fName (None or str): Temporary name, will be overwritten by the writing
process because Uproot's write syntax is ``file[name] = histogram``.
fTitle (str): Real title of the histogram.
data (numpy.ndarray or :doc:`uproot.models.TArray.Model_TArray`): Bin contents
with first bin as underflow, last bin as overflow. The dtype of this array
determines the return type of this function (TH1C, TH1D, TH1F, TH1I, or TH1S).
fEntries (float): Number of entries. (https://root.cern.ch/doc/master/classTH1.html)
fTsumw (float): Total Sum of weights.
fTsumw2 (float): Total Sum of squares of weights.
fTsumwx (float): Total Sum of weight*X.
fTsumwx2 (float): Total Sum of weight*X*X.
fSumw2 (numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array
of sum of squares of weights.
fXaxis (:doc:`uproot.models.TH.Model_TAxis_v10`): Use :doc:`uproot.writing.identify.to_TAxis`
with ``fName="xaxis"`` and ``fTitle=""``.
fYaxis (None or :doc:`uproot.models.TH.Model_TAxis_v10`): None generates a
default for 1D histograms.
fZaxis (None or :doc:`uproot.models.TH.Model_TAxis_v10`): None generates a
default for 1D and 2D histograms.
fNcells (None or int): Number of bins(1D), cells (2D) +U/Overflows. Computed
from ``data`` if None.
fBarOffset (int): (1000*offset) for bar charts or legos
fBarWidth (int): (1000*width) for bar charts or legos
fMaximum (float): Maximum value for plotting.
fMinimum (float): Minimum value for plotting.
fNormFactor (float): Normalization factor.
fContour (None or numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array
to display contour levels. None generates an empty array.
fOption (str or :doc:`uproot.models.TString.Model_TString`): Histogram options.
fFunctions (None, list, or :doc:`uproot.models.TList.Model_TList`): ->Pointer to
list of functions (fits and user). None generates an empty list.
fBufferSize (None or int): fBuffer size. Computed from ``fBuffer`` if None.
fBuffer (None or numpy.ndarray of numpy.float64): Buffer of entries accumulated
before automatically choosing the binning. (Irrelevant for serialization?)
None generates an empty array.
fBinStatErrOpt (int): Option for bin statistical errors.
fStatOverflows (int): Per object flag to use under/overflows in statistics.
fLineColor (int): Line color. (https://root.cern.ch/doc/master/classTAttLine.html)
fLineStyle (int): Line style.
fLineWidth (int): Line width.
fFillColor (int): Fill area color. (https://root.cern.ch/doc/master/classTAttFill.html)
fFillStyle (int): Fill area style.
fMarkerColor (int): Marker color. (https://root.cern.ch/doc/master/classTAttMarker.html)
fMarkerStyle (int): Marker style.
fMarkerSize (float): Marker size.
This function is for developers to create TH1* objects that can be
written to ROOT files, to implement conversion routines. The choice of
TH1C, TH1D, TH1F, TH1I, or TH1S depends on the dtype of the ``data`` array.
"""
tobject = uproot.models.TObject.Model_TObject.empty()
tobject._members["@fUniqueID"] = 0
tobject._members["@fBits"] = 0
tnamed = uproot.models.TNamed.Model_TNamed.empty()
tnamed._deeply_writable = True
tnamed._bases.append(tobject)
tnamed._members["fName"] = fName
tnamed._members["fTitle"] = fTitle
tattline = uproot.models.TAtt.Model_TAttLine_v2.empty()
tattline._deeply_writable = True
tattline._members["fLineColor"] = fLineColor
tattline._members["fLineStyle"] = fLineStyle
tattline._members["fLineWidth"] = fLineWidth
tattfill = uproot.models.TAtt.Model_TAttFill_v2.empty()
tattfill._deeply_writable = True
tattfill._members["fFillColor"] = fFillColor
tattfill._members["fFillStyle"] = fFillStyle
tattmarker = uproot.models.TAtt.Model_TAttMarker_v2.empty()
tattmarker._deeply_writable = True
tattmarker._members["fMarkerColor"] = fMarkerColor
tattmarker._members["fMarkerStyle"] = fMarkerStyle
tattmarker._members["fMarkerSize"] = fMarkerSize
th1 = uproot.models.TH.Model_TH1_v8.empty()
th1._bases.append(tnamed)
th1._bases.append(tattline)
th1._bases.append(tattfill)
th1._bases.append(tattmarker)
if fYaxis is None:
fYaxis = to_TAxis(fName="yaxis", fTitle="", fNbins=1, fXmin=0.0, fXmax=1.0)
if fZaxis is None:
fZaxis = to_TAxis(fName="zaxis", fTitle="", fNbins=1, fXmin=0.0, fXmax=1.0)
if fContour is None:
fContour = numpy.array([], dtype=numpy.float64)
if fFunctions is None:
fFunctions = []
if fBuffer is None:
fBuffer = numpy.array([], dtype=numpy.float64)
if isinstance(data, uproot.models.TArray.Model_TArray):
tarray_data = data
else:
tarray_data = to_TArray(data)
if isinstance(fSumw2, uproot.models.TArray.Model_TArray):
tarray_fSumw2 = fSumw2
else:
tarray_fSumw2 = to_TArray(fSumw2)
if not isinstance(tarray_fSumw2, uproot.models.TArray.Model_TArrayD):
raise TypeError("fSumw2 must be an array of float64 (TArrayD)")
if isinstance(fContour, uproot.models.TArray.Model_TArray):
tarray_fContour = fContour
else:
tarray_fContour = to_TArray(fContour)
if not isinstance(tarray_fContour, uproot.models.TArray.Model_TArrayD):
raise TypeError("fContour must be an array of float64 (TArrayD)")
if isinstance(fOption, uproot.models.TString.Model_TString):
tstring_fOption = fOption
else:
tstring_fOption = to_TString(fOption)
if isinstance(fFunctions, uproot.models.TList.Model_TList):
tlist_fFunctions = fFunctions
else:
tlist_fFunctions = to_TList(fFunctions, name="")
# FIXME: require all list items to be the appropriate class (TFunction?)
th1._members["fNcells"] = len(tarray_data) if fNcells is None else fNcells
th1._members["fXaxis"] = fXaxis
th1._members["fYaxis"] = fYaxis
th1._members["fZaxis"] = fZaxis
th1._members["fBarOffset"] = fBarOffset
th1._members["fBarWidth"] = fBarWidth
th1._members["fEntries"] = fEntries
th1._members["fTsumw"] = fTsumw
th1._members["fTsumw2"] = fTsumw2
th1._members["fTsumwx"] = fTsumwx
th1._members["fTsumwx2"] = fTsumwx2
th1._members["fMaximum"] = fMaximum
th1._members["fMinimum"] = fMinimum
th1._members["fNormFactor"] = fNormFactor
th1._members["fContour"] = tarray_fContour
th1._members["fSumw2"] = tarray_fSumw2
th1._members["fOption"] = tstring_fOption
th1._members["fFunctions"] = tlist_fFunctions
th1._members["fBufferSize"] = len(fBuffer) if fBufferSize is None else fBufferSize
th1._members["fBuffer"] = fBuffer
th1._members["fBinStatErrOpt"] = fBinStatErrOpt
th1._members["fStatOverflows"] = fStatOverflows
th1._speedbump1 = b"\x00"
th1._deeply_writable = tlist_fFunctions._deeply_writable
if isinstance(tarray_data, uproot.models.TArray.Model_TArrayC):
cls = uproot.models.TH.Model_TH1C_v3
elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayS):
cls = uproot.models.TH.Model_TH1S_v3
elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayI):
cls = uproot.models.TH.Model_TH1I_v3
elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayF):
cls = uproot.models.TH.Model_TH1F_v3
elif isinstance(tarray_data, uproot.models.TArray.Model_TArrayD):
cls = uproot.models.TH.Model_TH1D_v3
else:
raise TypeError(
"no TH1* subclasses correspond to {0}".format(tarray_data.classname)
)
th1x = cls.empty()
th1x._bases.append(th1)
th1x._bases.append(tarray_data)
th1x._deeply_writable = th1._deeply_writable
return th1x
| 5,341,050
|
def parse_args():
"""
Parse and validate the command line arguments, and set the defaults.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description='Utility commands for handle.net EPIC persistent identifiers')
parser.add_argument('command',
metavar='COMMAND',
choices=['handle', 'handles', 'count', 'download', 'rewrite-aliases'],
help=textwrap.dedent('''\
command to run:
- `handle`: retrieve details for the given POSTFIX; this may be the same output as the
public endpoint `https://hdl.handle.net/api/handles/<prefix>/<postfix>?pretty`
- `handles`: retrieve details for the given postfixes taken from a file
- `count`: count existing handles on the server, including special postfixes such as `ADMIN`, `CONTACT`,
`EPIC_HEALTHCHECK` and `USER01`
- `download`: create file with existing handles, each line holding `1-based-counter; prefix/postfix`
- `rewrite-aliases`: rewrite handles based on a file, each line holding `postfix; postfix` where both
should already exist as a handle, and where the first will become an alias for the latter
'''))
parser.add_argument('postfix', metavar='POSTFIX', nargs='?',
help='optional postfix, for a single full handle `<prefix>/<postfix>`')
parser.add_argument('-p', '--prefix', required=True, help='prefix, like `21.12102`, required')
parser.add_argument('-i', '--index', required=True, help='user index, like `312`, required')
parser.add_argument('--server', default='https://epic-pid.storage.surfsara.nl:8001',
help='base PID server URL, default `https://epic-pid.storage.surfsara.nl:8001`, to which, '
'e.g., `/api/sessions` and `/api/handles` are appended')
parser.add_argument('--certfile', help='certificate file, default `<prefix>_USER01_<index>_certificate_only.pem`')
parser.add_argument('--keyfile', help='private key file, default `<prefix>_USER01_<index>_privkey.pem`')
parser.add_argument('-f', '--file', metavar='INPUT', help='semicolon-separated input file, default `<command>.csv`')
parser.add_argument('-o', '--output', help='semicolon-separated output file, default `<command>-<yyyymmdd>.csv`')
parser.add_argument('--start', type=int,
help='zero-based start row from input file (default 1, hence ignoring the header), or start '
'page when downloading handles (default 0)')
parser.add_argument('--count', default=3, type=int,
help='number of rows to process or pages to download, default 3')
parser.add_argument('--size', metavar='PAGESIZE', default=10000, type=int,
help='page size when downloading paginated data, default 10,000')
parser.add_argument('--throttle', metavar='SECONDS', default=10, type=float,
help='number of seconds between requests, default 10')
parser.add_argument('-l', '--log', help='log file, default `<command>-<yyyymmdd>.log`')
parser.add_argument('-q', '--quiet', help='reduce output on terminal to be the same as the log',
action='store_true')
args = parser.parse_args()
args.certfile = args.certfile or f'{args.prefix}_USER01_{args.index}_certificate_only.pem'
args.keyfile = args.keyfile or f'{args.prefix}_USER01_{args.index}_privkey.pem'
args.file = args.file or f'{args.command}.csv'
args.output = args.output or f'{args.command}-{date.today().strftime("%Y%m%d")}.csv'
args.log = args.log or f'{args.command}-{date.today().strftime("%Y%m%d")}.log'
# For `rewrite-aliases` default to 1, skipping the CSV header
args.start = args.start if args.start is not None else 1 if args.command == 'rewrite-aliases' else 0
return args
| 5,341,051
|
def parse(address, addr_spec_only=False, strict=False, metrics=False):
"""
Given a string, returns a scalar object representing a single full
mailbox (display name and addr-spec), addr-spec, or a url.
If parsing the entire string fails and strict is not set to True, fall back
to trying to parse the last word only and assume everything else is the
display name.
Returns an Address object and optionally metrics on processing
time if requested.
Examples:
>>> address.parse('John Smith <john@smith.com')
John Smith <john@smith.com>
>>> print address.parse('John <john@smith.com>', addr_spec_only=True)
None
>>> print address.parse('john@smith.com', addr_spec_only=True)
'john@smith.com'
>>> address.parse('http://host.com/post?q')
http://host.com/post?q
>>> print address.parse('foo')
None
"""
mtimes = {'parsing': 0}
if addr_spec_only:
parser = addr_spec_parser
else:
parser = mailbox_or_url_parser
# normalize inputs to bytestrings
if isinstance(address, unicode):
address = address.encode('utf-8')
# sanity checks
if not address:
return None, mtimes
if len(address) > MAX_ADDRESS_LENGTH:
_log.warning('address exceeds maximum length of %s', MAX_ADDRESS_LENGTH)
return None, mtimes
bstart = time()
try:
parse_rs = parser.parse(address.strip(), lexer=lexer.clone())
addr_obj = _lift_parse_result(parse_rs)
except (LexError, YaccError, SyntaxError):
addr_obj = None
if addr_obj is None and not strict:
addr_parts = address.split(' ')
addr_spec = addr_parts[-1]
if len(addr_spec) < len(address):
try:
parse_rs = parser.parse(addr_spec, lexer=lexer.clone())
addr_obj = _lift_parse_result(parse_rs)
if addr_obj:
addr_obj._display_name = ' '.join(addr_parts[:-1])
if isinstance(addr_obj._display_name, str):
addr_obj._display_name = addr_obj._display_name.decode('utf-8')
except (LexError, YaccError, SyntaxError):
addr_obj = None
mtimes['parsing'] = time() - bstart
return addr_obj, mtimes
| 5,341,052
|
def infer(
args: Namespace,
model: BaseModel
) -> None:
"""Perform the inference.
Parameters
----------
model : BaseModel
The model to be used for inference.
args : Namespace
Arguments to configure the model and the inference.
See Also
--------
ptlflow.models.base_model.base_model.BaseModel : The parent class of the available models.
"""
model.eval()
if torch.cuda.is_available():
model = model.cuda()
cap, img_paths, num_imgs, prev_img = init_input(args.input_path)
io_adapter = IOAdapter(model, prev_img.shape[:2], args.input_size, cuda=torch.cuda.is_available())
for i in tqdm(range(1, num_imgs)):
img, img_name, is_img_valid = _read_image(cap, img_paths, i)
if not is_img_valid:
break
inputs = io_adapter.prepare_inputs([prev_img, img])
preds = model(inputs)
preds = io_adapter.unpad_and_unscale(preds)
preds_npy = tensor_dict_to_numpy(preds)
preds_npy['flows_viz'] = flow_to_rgb(preds_npy['flows'])[:, :, ::-1]
if preds_npy.get('flows_b') is not None:
preds_npy['flows_b_viz'] = flow_to_rgb(preds_npy['flows_b'])[:, :, ::-1]
if args.write_outputs:
write_outputs(preds_npy, args.output_path, img_name, args.flow_format)
if args.show:
img1 = prev_img
img2 = img
if min(args.input_size) > 0:
img1 = cv.resize(prev_img, args.input_size[::-1])
img2 = cv.resize(img, args.input_size[::-1])
key = show_outputs(
img1, img2, preds_npy, args.auto_forward, args.max_show_side)
if key == 27:
break
prev_img = img
| 5,341,053
|
def test_wb_has_agm():
"""test_wb_has_agm"""
query = """
MATCH (a:AffectedGenomicModel)
WHERE a.primaryKey = 'WB:WBStrain00023353'
RETURN count(a) as counter """
result = execute_transaction(query)
for record in result:
assert record["counter"] == 1
| 5,341,054
|
def unfreeze_file(user, data):
""" unfreeze a file.
:return: status code, response data
"""
r = requests.post('%s/unfreeze' % URL, json=data, auth=(user, PASS), verify=False)
return r.status_code, r.json()
| 5,341,055
|
def p_ir_metadata_debug_attribute_bool(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : TRUE
| FALSE
"""
| 5,341,056
|
def test_mg_l004_mg_l004_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : choice: with 1 elements, 1 element is in
the instant XML doc
"""
assert_bindings(
schema="msData/modelGroups/mgL004.xsd",
instance="msData/modelGroups/mgL004.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,341,057
|
def prompt_url(q):
"""
:param q: The prompt to display to the user
:return: The user's normalized input. We ensure there is an URL scheme, a domain, a "/" path,
and no trailing elements.
:rtype: str
"""
return prompt(q, _url_coerce_fn)
| 5,341,058
|
def eval_mnl_logsums(choosers, spec, locals_d, trace_label=None):
"""
like eval_nl except return logsums instead of making choices
Returns
-------
logsums : pandas.Series
Index will be that of `choosers`, values will be
logsum across spec column values
"""
trace_label = tracing.extend_trace_label(trace_label, 'mnl')
check_for_variability = tracing.check_for_variability()
print("running eval_mnl_logsums")
expression_values = eval_variables(spec.index, choosers, locals_d)
if check_for_variability:
_check_for_variability(expression_values, trace_label)
# utility values
utilities = compute_utilities(expression_values, spec)
# logsum is log of exponentiated utilities summed across
# columns of each chooser row
utils_arr = utilities.as_matrix().astype('float')
logsums = np.log(np.exp(utils_arr).sum(axis=1))
logsums = pd.Series(logsums, index=choosers.index)
if trace_label:
# add logsum to utilities for tracing
utilities['logsum'] = logsums
tracing.trace_df(choosers, '%s.choosers' % trace_label)
tracing.trace_df(utilities, '%s.utilities' % trace_label,
column_labels=['alternative', 'utility'])
tracing.trace_df(logsums, '%s.logsums' % trace_label,
column_labels=['alternative', 'logsum'])
tracing.trace_df(
expression_values, '%s.expression_values' % trace_label,
column_labels=['expression', None])
return logsums
| 5,341,059
|
def find_zeroed_indices(adjusted, original):
"""Find the indices of the values present in ``original`` but missing in ``adjusted``.
Parameters
----------
adjusted: np.array
original: array_like
Returns
-------
Tuple[np.ndarray]
Indices of the values present in ``original`` but missing in ``adjusted``.
"""
if sp.issparse(original):
i, j, v = sp.find(original)
# Use hash maps to figure out which indices have been lost in the original
original_indices = set(zip(i, j))
adjusted_indices = set(zip(*np.where(~adjusted.mask)))
zeroed_indices = original_indices - adjusted_indices
# Convert our hash map of coords into the standard numpy indices format
indices = list(zip(*zeroed_indices))
indices = tuple(map(np.array, indices))
return indices
else:
original = np.ma.masked_array(original, mask=original <= 0)
return np.where(adjusted.mask & ~original.mask)
| 5,341,060
|
def inventory_update(arr1, arr2):
"""Add the inventory from arr2 to arr1.
If an item exists in both arr1 and arr2, then
the quantity of the item is updated in arr1.
If an item exists in only arr2, then the item
is added to arr1. If an item only exists in
arr1, then that item remains unaffected.
Arguments:
arr1: the destination inventory
arr2: the inventory to add to the
destination inventory
Returns: a combined inventory
"""
# Set longer to the longer of the two arrays
longer = arr2
if len(longer) > len(arr1):
temp = arr1
arr1 = longer
longer = temp
# Since longer is potentially modified, set it
# to a copy of itself.
longer = longer.copy()
# Iterate over the shorter array, appending
# items that don't exist in the longer array,
# or updating the quantity of existing items.
for tup in arr1:
qty = tup[0]
name = tup[1]
# Funny way to get the index of an array
# object based on the object's own indexed
# elements.
try:
i = [x[1] for x in longer].index(name)
except ValueError:
i = -1
if i < 0:
longer.append(tup)
else:
longer[i][0] += qty
# Man, why doesn't the index function accept a
# key argument? Sort on the string description
# of each inventory item.
longer.sort(key=lambda x: x[1])
return longer
| 5,341,061
|
def racaty(url: str) -> str:
""" Racaty direct link generator
based on https://github.com/SlamDevs/slam-mirrorbot"""
dl_url = ''
try:
link = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("No Racaty links found\n")
scraper = create_scraper()
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
op = soup.find("input", {"name": "op"})["value"]
ids = soup.find("input", {"name": "id"})["value"]
rpost = scraper.post(url, data = {"op": op, "id": ids})
rsoup = BeautifulSoup(rpost.text, "lxml")
dl_url = rsoup.find("a", {"id": "uniqueExpirylink"})["href"].replace(" ", "%20")
return dl_url
| 5,341,062
|
def test_copyto(func_interface):
"""Tests for numpoly.copyto."""
poly = numpoly.polynomial([1, X, Y])
poly_ref = numpoly.polynomial([1, X, Y])
with raises(ValueError):
func_interface.copyto(poly.values, poly_ref, casting="safe")
with raises(ValueError):
numpoly.copyto(poly.values, [1, 2, 3], casting="safe")
with raises(ValueError):
numpoly.copyto(X, Y, casting="unsafe")
func_interface.copyto(poly, X)
assert_equal(poly, [X, X, X])
func_interface.copyto(poly.values, poly_ref, casting="unsafe")
assert_equal(poly, poly_ref)
func_interface.copyto(poly, 4)
assert_equal(poly, [4, 4, 4])
func_interface.copyto(poly.values, poly_ref.values, casting="unsafe")
assert_equal(poly, poly_ref)
poly = numpoly.polynomial([1, 2, 3])
func_interface.copyto(poly, [3, 2, 1], casting="unsafe")
assert_equal(poly, [3, 2, 1])
func_interface.copyto(
poly.values, numpoly.polynomial([1, 2, 3]), casting="unsafe")
assert_equal(poly, [1, 2, 3])
out = numpy.zeros(3, dtype=float)
numpoly.copyto(out, poly, casting="unsafe")
assert_equal(out, [1., 2., 3.])
| 5,341,063
|
def addDBToConf(conf_name, db_name, db_username, db_password, db_location = 'local', host_ip = None, host_port = None):
"""
Method for adding a database configuration to the your own database configuration.
:param str username: the username given by user
"""
db_settings = settings.database_settings
if conf_name in db_settings.keys():
raise Exception('database with conf_name {0} already exists! Please use another name or alter the existing configuration'.format(conf_name))
if db_location not in ['local', 'remote']:
raise Exception('db_location is not remote or local. ({0})'.format(db_location))
db_settings[conf_name] = { 'dbname':db_name,
'user':db_username,
'password':db_password,
'location':db_location,
'host':host_ip,
'port':host_port}
writeDBConf(db_settings, settings.active_database)
| 5,341,064
|
def get_logger(
name='mltk',
level='INFO',
console=False,
log_file=None,
log_file_mode='w',
parent:logging.Logger=None
):
"""Get or create a logger, optionally adding a console and/or file handler"""
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
if parent is None:
logger.propagate = False
else:
logger.parent = parent
logger.propagate = True
logger.setLevel('DEBUG')
if console:
add_console_logger(logger, level=level)
if log_file:
log_dir = os.path.dirname(log_file)
if log_dir:
os.makedirs(log_dir, exist_ok=True)
fh = logging.FileHandler(log_file, mode=log_file_mode)
fh.setLevel('DEBUG')
logger.addHandler(fh)
if not hasattr(logger, 'close'):
def _close(cls):
for handler in cls.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
logger.close = types.MethodType(_close, logger)
return logger
| 5,341,065
|
def _remove_old_snapshots(connection, volume, max_snapshots):
""" Remove old snapshots
:type connection: boto.ec2.connection.EC2Connection
:param connection: EC2 connection object
:type volume: boto.ec2.volume.Volume
:param volume: Volume to check
:returns: None
"""
logging.info(kayvee.formatLog("ebs-snapshots", "info", "removing old snapshots", data={"volume":volume.id}))
retention = max_snapshots
if not type(retention) is int and retention >= 0:
logging.warning(kayvee.formatLog("ebs-snapshots", "warning", "invalid max_snapshots value", {
"volume": volume.id,
"max_snapshots": retention
}))
return
snapshots = connection.get_all_snapshots(filters={'volume-id': volume.id})
# Sort the list based on the start time
snapshots.sort(key=lambda x: x.start_time)
# Remove snapshots we want to keep
snapshots = snapshots[:-int(retention)]
if not snapshots:
logging.info(kayvee.formatLog("ebs-snapshots", "info", "no old snapshots to remove", data={"volume":volume.id}))
return
for snapshot in snapshots:
logging.info(kayvee.formatLog("ebs-snapshots", "info", "deleting snapshot", {"snapshot": snapshot.id}))
try:
snapshot.delete()
except EC2ResponseError as error:
logging.warning(kayvee.formatLog("ebs-snapshots", "warning", "could not remove snapshot", {
"snapshot": snapshot.id,
"msg": error.message
}))
logging.info(kayvee.formatLog("ebs-snapshots", "info", "done deleting snapshots", data={"volume":volume.id}))
| 5,341,066
|
def main():
"""pull the region sets as bed files to visualize results
"""
# input files
mpra_file = sys.argv[1]
rule_dir = sys.argv[2]
dir_url = "http://mitra.stanford.edu/kundaje/dskim89/ggr/paper"
# for rule in rule dir, pull the BED file from it
rules = pd.read_csv(mpra_file, sep="\t")
rules = rules[~rules["interaction"].str.contains("FAILED")]
json_entries = []
for rule_idx in range(rules.shape[0]):
rule_name = rules["grammar"].iloc[rule_idx]
pwm1_name = rules["pwm1_clean"].iloc[rule_idx]
pwm2_name = rules["pwm2_clean"].iloc[rule_idx]
# get gml file
gml_file = "{}/{}.gml".format(rule_dir, rule_name)
graph = nx.read_gml(gml_file)
# make bed file
bed_file = "{}.{}_x_{}.bed.gz".format(rule_name, pwm1_name, pwm2_name)
if not os.path.isfile(bed_file):
print bed_file
get_bed_from_nx_graph(
graph,
bed_file,
interval_key="active",
merge=True,
return_key="region")
os.system("chmod a+x {}".format(bed_file))
display_name = bed_file.split(".")[-3]
json_entry = _make_json_bed_entry(bed_file, display_name, dir_url)
json_entries.append(json_entry)
# also make a tbi file
make_tbi = "tabix -p bed {}".format(bed_file)
tbi_file = "{}.tbi".format(bed_file)
if not os.path.isfile(tbi_file):
os.system(make_tbi)
# set up json file with appropriate names
json_file = "combinatorial_rules.json"
json_data = json.dumps(json_entries, indent=1)
json_data = re.sub(r'"(.*?)"(?=:)', r'\1', json_data)
with open(json_file, "w") as fp:
fp.write(json_data)
return
| 5,341,067
|
def _parse_cli_args() -> Tuple[str, List[str]]:
"""Parses CLI args to return device name and args for unittest runner."""
parser = argparse.ArgumentParser(
description="Runs a GDM + unittest reboot test on a device. All "
"arguments other than the device name are passed through to "
"the unittest runner.")
parser.add_argument(
"-d", "--device", required=True,
help="GDM device name to run the test on. For example, 'device-1234'. "
"The device must be shown as 'available' or 'connected' in the "
"output of 'gdm devices'.")
args, remaining_argv = parser.parse_known_args()
return args.device, [sys.argv[0]] + remaining_argv
| 5,341,068
|
def build_tree(tree, parent, counts, ordered_ids):
"""
Recursively splits the data, which contained in the tree object itself
and is indexed by ordered_ids.
Parameters
----------
tree: Tree object
parent: TreeNode object
The last node added to the tree, which will be the parent of the
two nodes resulting from the split (if any) of this function call.
counts: numpy array (int)
The class counts of the samples reaching the parent node.
ordered_ids: numpy array (int)
The ids of the samples reaching the parent node.
"""
root = TreeNode(0, counts, parent, ordered_ids, False)
queue = List()
queue.append(root)
n_nodes = 1
np.random.seed(tree.random_state)
while len(queue) > 0:
node = queue.pop(0)
split = find_best_split(node, tree, np.random.randint(1e6))
if split is not None:
node.split = split
left_child = TreeNode(n_nodes, split.left_counts, node,
split.left_ids, False)
node.left_child = left_child
queue.append(left_child)
n_nodes += 1
right_child = TreeNode(n_nodes, split.right_counts, node,
split.right_ids, False)
node.right_child = right_child
queue.append(right_child)
n_nodes += 1
else:
node.isleaf = True
tree.depth = max(tree.depth, node.depth)
return root, n_nodes
| 5,341,069
|
def get_perf_measure_by_group(aif_metric, metric_name):
"""Get performance measures by group."""
perf_measures = ['TPR', 'TNR', 'FPR', 'FNR', 'PPV', 'NPV', 'FDR', 'FOR', 'ACC']
func_dict = {
'selection_rate': lambda x: aif_metric.selection_rate(privileged=x),
'precision': lambda x: aif_metric.precision(privileged=x),
'recall': lambda x: aif_metric.recall(privileged=x),
'sensitivity': lambda x: aif_metric.sensitivity(privileged=x),
'specificity': lambda x: aif_metric.specificity(privileged=x),
'power': lambda x: aif_metric.power(privileged=x),
'error_rate': lambda x: aif_metric.error_rate(privileged=x),
}
if metric_name in perf_measures:
metric_func = lambda x: aif_metric.performance_measures(privileged=x)[metric_name]
elif metric_name in func_dict.keys():
metric_func = func_dict[metric_name]
else:
raise NotImplementedError
df = pd.DataFrame({
'Group': ['all', 'privileged', 'unprivileged'],
metric_name: [metric_func(group) for group in [None, True, False]],
})
return df
| 5,341,070
|
def test_skew_single_return_type():
"""This function tests the return type for the skew method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.skew(dask_array).compute()
assert isinstance(result, np.float64)
| 5,341,071
|
def _strxor_direct(term1, term2, result):
"""Very fast XOR - check conditions!"""
_raw_strxor.strxor(term1, term2, result, c_size_t(len(term1)))
| 5,341,072
|
def hello_world(request):
"""Return a greeting."""
return HttpResponse('Hello, world!{now}'.format(
now=datetime.now().strftime('%b %dth, %Y : %M HttpResponses')
))
| 5,341,073
|
def getfile(id, name):
"""
Retorna um arquivo em anexo.
"""
mime = mimetypes.guess_type(name)[0]
if mime is None:
mime = "application/octet-stream"
c = get_cursor()
c.execute(
"""
select files.ticket_id as ticket_id,
files.size as size,
files.contents as contents,
tickets.admin_only as admin_only
from files
join tickets
on tickets.id = files.ticket_id
where files.id = :id
""",
{"id": id},
)
row = c.fetchone()
blob = zlib.decompress(row["contents"])
if not user_admin(current_user()) and row["admin_only"] == 1:
return "você não tem permissão para acessar este recurso!"
else:
response.content_type = mime
return blob
| 5,341,074
|
def get_default_wavelet():
"""Sets the default wavelet to be used for scaleograms"""
global DEFAULT_WAVELET
return DEFAULT_WAVELET
| 5,341,075
|
def check_output(file_path: str) -> bool:
"""
This function checks an output file, either from geomeTRIC or
from Psi4, for a successful completion keyword. Returns
True if the calculation finished successfully, otherwise
False.
"""
with open(file_path, "r") as read_file:
text = read_file.read()
checks = ["Converged! =D", "Psi4 exiting successfully"]
return any([check in text for check in checks])
| 5,341,076
|
def _check_func_signature_supported(func: Callable) -> None:
"""
Checks if a given function has a supported type.
If function signature includes parameters that are
of kind POSTIONAL_ONLY, VAR_KEYWORD or VAR_POSITIONAL
an FunctionSignatureNotSupportedException is raised.
"""
sig = signature(func)
for _, param in sig.parameters.items():
if param.kind in (
param.POSITIONAL_ONLY,
param.VAR_KEYWORD,
param.VAR_POSITIONAL,
):
raise FunctionSignatureNotSupportedException(
(
"Currently only functions with arguments that have types "
"of POSITIONAL_OR_KEYWORD and KEYWORD_ONLY are supported."
)
)
| 5,341,077
|
def dumps(value):
"""
Dumps a data structure to TOML source code.
The given value must be either a dict of dict values, a dict,
or a TOML file constructed by this module.
"""
if not isinstance(value, TOMLFile):
raise RuntimeError(
'Can only dump a TOMLFile instance loaded by load() or loads()'
)
return value.dumps()
| 5,341,078
|
def calc_graph(dict_graph):
"""
creates scatter of comfort and curves of constant relative humidity
:param dict_graph: contains comfort conditions to plot, output of comfort_chart.calc_data()
:type dict_graph: dict
:return: traces of scatter plot of 4 comfort conditions
:rtype: list of plotly.graph_objs.Scatter
"""
traces = []
# draw scatter of comfort conditions in building
trace = go.Scatter(x=dict_graph['t_op_occupied_winter'], y=dict_graph['x_int_occupied_winter'],
name='occupied hours winter', mode='markers', marker=dict(color=COLORS_TO_RGB['red']))
traces.append(trace)
trace = go.Scatter(x=dict_graph['t_op_unoccupied_winter'], y=dict_graph['x_int_unoccupied_winter'],
name='unoccupied hours winter', mode='markers', marker=dict(color=COLORS_TO_RGB['blue']))
traces.append(trace)
trace = go.Scatter(x=dict_graph['t_op_occupied_summer'], y=dict_graph['x_int_occupied_summer'],
name='occupied hours summer', mode='markers', marker=dict(color=COLORS_TO_RGB['purple']))
traces.append(trace)
trace = go.Scatter(x=dict_graph['t_op_unoccupied_summer'], y=dict_graph['x_int_unoccupied_summer'],
name='unoccupied hours summer', mode='markers', marker=dict(color=COLORS_TO_RGB['orange']))
traces.append(trace)
return traces
| 5,341,079
|
def index():
"""Display a user's account information."""
if not current_user.is_authenticated:
return redirect(url_for("account.login"))
cancel_reservation_form = CancelReservationForm()
if cancel_reservation_form.validate_on_submit():
cancel_id = int(cancel_reservation_form.id.data)
if cancel_reservation_form.type.data == "space":
print("SR ID: " + str(cancel_id), file=sys.stderr)
sr = Space_Reservation.query.filter_by(id=cancel_id).first()
db.session.delete(sr)
db.session.commit()
flash('Your Space Reservation has been cancelled')
elif cancel_reservation_form.type.data == "equipment":
print("ER ID: " + str(cancel_id), file=sys.stderr)
er = Equipment_Reservation.query.filter_by(id=cancel_id).first()
db.session.delete(er)
db.session.commit()
flash('Your Equipment Reservation has been cancelled')
space_sql = '''SELECT sr.*, s.name AS space_name, l.name AS location_name, c.name AS campus_name
FROM space_reservations sr
JOIN spaces s ON s.id=sr.space_id
JOIN locations l ON s.location_id = l.id
JOIN campuses c ON l.campus_id = c.id
WHERE sr.reserver_id=''' + str(current_user.id) + ";"
sr_response = db.engine.execute(text(space_sql))
space_reservations = []
for sr in sr_response:
space_reservations.append(dict(zip(sr.keys(), sr)))
equipment_sql = '''SELECT er.*, e.name AS equipment_name, et.name AS equipment_type_name, l.name AS location_name, c.name AS campus_name
FROM equipment_reservations er
JOIN equipment e ON e.id=er.equipment_id
JOIN equipment_types et ON e.equipment_type_id = et.id
JOIN locations l ON e.location_id = l.id
JOIN campuses c ON l.campus_id = c.id
WHERE er.reserver_id=''' + str(current_user.id) + ";"
er_response = db.engine.execute(text(equipment_sql))
equipment_reservations = []
for er in er_response:
equipment_reservations.append(dict(zip(er.keys(), er)))
return render_template('main/index.html', user=current_user, space_reservations=space_reservations, equipment_reservations=equipment_reservations, cancel_reservation_form=cancel_reservation_form)
| 5,341,080
|
def report_raw_stats(
sect,
stats: LinterStats,
old_stats: Optional[LinterStats],
) -> None:
"""calculate percentage of code / doc / comment / empty"""
total_lines = stats.code_type_count["total"]
sect.description = f"{total_lines} lines have been analyzed"
lines = ["type", "number", "%", "previous", "difference"]
for node_type in ("code", "docstring", "comment", "empty"):
node_type = cast(Literal["code", "docstring", "comment", "empty"], node_type)
total = stats.code_type_count[node_type]
percent = float(total * 100) / total_lines if total_lines else None
old = old_stats.code_type_count[node_type] if old_stats else None
diff_str = diff_string(old, total) if old else None
lines += [
node_type,
str(total),
f"{percent:.2f}" if percent is not None else "NC",
str(old) if old else "NC",
diff_str if diff_str else "NC",
]
sect.append(Table(children=lines, cols=5, rheaders=1))
| 5,341,081
|
def add_property(inst, name_to_method: Dict[str, Callable]):
"""Dynamically add new properties to an instance by creating a new class
for the instance that has the additional properties"""
cls = type(inst)
# Avoid creating a new class for the inst if it was already done before
if not hasattr(cls, "__perinstance"):
cls = type(cls.__name__, (cls,), {})
cls.__perinstance = True
inst.__class__ = cls
for name, method in name_to_method.items():
setattr(cls, name, property(method))
| 5,341,082
|
def centrality(raw_centrality: List[Hist], centralities: Dict[params.EventActivity, Hist],
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Plot the centrality distribution for a list of centralities
Args:
raw_centrality: Raw centrality. Each entry should have the same content.
centralities: Centrality selected distributions.
output_info: Output information.
Returns:
None. The centralities are plotted.
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# First plot the first raw distribution (we don't need the others).
h = histogram.Histogram1D.from_existing_hist(raw_centrality[0])
ax.plot(h.x, h.y, label = "Raw centrality")
# Then plot the selected ones
for event_activity, hist in centralities.items():
h = histogram.Histogram1D.from_existing_hist(hist)
ax.plot(h.x, h.y, label = event_activity.display_str())
# Final presentation settings
ax.legend(frameon = False, loc = "upper right")
fig.tight_layout()
# Finally, save and cleanup
plot_base.save_plot(output_info, fig, "centrality")
plt.close(fig)
| 5,341,083
|
def generate_arrays(df, resize=True, img_height=50, img_width=200):
""" Generates image array and labels array from a dataframe """
num_items = len(df)
images = np.zeros((num_items, img_height, img_width), dtype=np.float32)
labels = [0] * num_items
for i in range(num_items):
input_img = keras.preprocessing.image.load_img(df["img_path"][i], color_mode='grayscale')
img_array = keras.preprocessing.image.img_to_array(input_img)
if resize:
img_array = np.resize(img_array, (img_height, img_width))
img_array = (img_array/255.).astype(np.float32)
label = df["label"][i]
if is_valid_captcha(label):
images[i, :, :] = img_array
labels[i] = label
return images, np.array(labels)
| 5,341,084
|
def printWifiNames():
"""print all cells from interface wlan0"""
res= wifi.Cell.all('wlan0')
for r in res:
print r
| 5,341,085
|
def compute_cosine_distance(Q, feats, names):
"""
feats and Q: L2-normalize, n*d
"""
dists = np.dot(Q, feats.T)
# print("dists:",dists)
# exit(1)
idxs = np.argsort(dists)[::-1]
rank_dists = dists[idxs]
rank_names = [names[k] for k in idxs]
return (idxs, rank_dists, rank_names)
| 5,341,086
|
def test_fuss_pretty(fixed):
"""Test Fuss' pretty formatting."""
examples = [
(Fuss(fixed, "abc.txt", 2, "message"), "abc.txt:1: NIP002 message"),
(Fuss(fixed, "abc.txt", 2, "message", "", 15), "abc.txt:15: NIP002 message"),
(
Fuss(fixed, "abc.txt", 1, "message", "\tsuggestion\n\t "),
f"abc.txt:1: NIP001 message{SUGGESTION_BEGIN}\n\tsuggestion{SUGGESTION_END}",
),
(Fuss(fixed, " ", 3, "no filename"), "NIP003 no filename"),
]
for fuss, expected in examples:
compare(actual=fuss.pretty, expected=dedent(expected))
| 5,341,087
|
def phase(ifc, inc_pt, d_in, normal, z_dir, wvl, n_in, n_out):
""" apply phase shift to incoming direction, d_in, about normal """
try:
d_out, dW = ifc.phase(inc_pt, d_in, normal, z_dir, wvl, n_in, n_out)
return d_out, dW
except ValueError:
raise TraceEvanescentRayError(ifc, inc_pt, d_in, normal, n_in, n_out)
| 5,341,088
|
def generate_udf(spec: "rikai.spark.sql.codegen.base.ModelSpec"):
"""Construct a UDF to run sklearn model.
Parameters
----------
spec : ModelSpec
the model specifications object
Returns
-------
A Spark Pandas UDF.
"""
def predict(model, X):
if hasattr(model, "predict"):
return model.predict(X)
elif hasattr(model, "transform"):
return model.transform(X)
else:
raise RuntimeError("predict or transform is not available")
def sklearn_inference_udf(
iter: Iterator[pd.Series],
) -> Iterator[pd.Series]:
model = spec.load_model()
for series in list(iter):
X = np.vstack(series.apply(_pickler.loads).to_numpy())
y = [_pickler.dumps(pred.tolist()) for pred in predict(model, X)]
yield pd.Series(y)
return pandas_udf(sklearn_inference_udf, returnType=BinaryType())
| 5,341,089
|
def test_list_hex_binary_max_length_nistxml_sv_iv_list_hex_binary_max_length_1_3(mode, save_output, output_format):
"""
Type list/hexBinary is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/hexBinary/Schema+Instance/NISTSchema-SV-IV-list-hexBinary-maxLength-1.xsd",
instance="nistData/list/hexBinary/Schema+Instance/NISTXML-SV-IV-list-hexBinary-maxLength-1-3.xml",
class_name="NistschemaSvIvListHexBinaryMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,341,090
|
def ssl_loss_mean_teacher(labels_x, logits_x, logits_teacher, logits_student):
"""
Computes two cross entropy losses based on the labeled and unlabeled data.
loss_x is referring to the labeled CE loss and loss_u to the unlabeled CE loss.
Args:
labels_x: tensor, contains labels corresponding to logits_x of shape [batch, num_classes]
logits_x: tensor, contains the logits of an batch of images of shape [batch, num_classes]
logits_teacher: tensor, logits of teacher model of shape [batch, num_classes]
labels_student: tensor, logits of student model of shape [batch, num_classes]
Returns:
Two floating point numbers, the first representing the labeled CE loss
and the second holding the MSE loss values.
"""
x_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_x, logits=logits_x)
x_loss = tf.reduce_mean(x_loss)
loss_mt = tf.reduce_mean((tf.nn.softmax(logits_teacher) - tf.nn.softmax(logits_student)) ** 2, -1)
loss_mt = tf.reduce_mean(loss_mt)
return x_loss, loss_mt
| 5,341,091
|
def _sch_el(self, *wert, **kwargs):
"""Element einer Schar; für einen Parameter"""
if kwargs.get('h'):
print("\nElement einer Schar von Matrizen\n")
print("Aufruf matrix . sch_el( wert )\n")
print(" matrix Matrix")
print(" wert Wert des Scharparameters")
print("\nEs ist nur ein Scharparameter zugelassen\n")
return
schar = any([ve.is_schar for ve in self.vekt])
if not schar or len(self.sch_par) > 1:
print('agla: keine Schar mit einem Parameter')
return
if not wert or len(wert) != 1:
print('agla: einen Wert für den Scharparameter angeben')
return
p = Tuple(*self.sch_par)[0]
wert = sympify(*wert)
if not is_zahl(wert):
print('agla: für den Scharparameter Zahl oder freien Parameter angeben')
return
try:
wert = nsimplify(wert)
except RecursionError:
pass
vekt = []
for ve in self.vekt:
if p in ve.sch_par:
vekt.append(ve.sch_el(wert))
else:
vekt.append(ve)
return Matrix(*vekt)
| 5,341,092
|
def check_directory(path, read=False, write=False, execute=False):
"""Does that path exist and can the current user rwx."""
if os.path.isdir(path) and check_mode(path, read=read, write=write, execute=execute):
return True
else:
return False
| 5,341,093
|
def test_parse_from_yaml(petstore_expanded):
"""
Tests that we can parse a valid yaml file
"""
spec = OpenAPI(petstore_expanded)
| 5,341,094
|
def scsilun_to_int(lun):
"""
There are two style lun number, one's decimal value is <256 and the other
is full as 16 hex digit. According to T10 SAM, the full 16 hex digit
should be swapped and converted into decimal.
For example, SC got zlinux lun number from DS8K API, '40294018'. And it
should be swapped to '40184029' and converted into decimal, 1075331113.
When the lun number is '0c' and its decimal value is <256, it should be
converted directly into decimal, 12.
https://github.com/kubernetes/kubernetes/issues/45024
"""
pretreated_scsilun = int(lun, 16)
if pretreated_scsilun < 256:
return pretreated_scsilun
return (pretreated_scsilun >> 16 & 0xFFFF) | \
(pretreated_scsilun & 0xFFFF) << 16
| 5,341,095
|
def consolidate_control_snp(data_containers, filename_or_fileobj):
"""saves a pickled dataframe with non-CpG probe values.
NOTE: no longer called in pipeline. Kept for legacy purposes, but because it uses all of SampleDataContainer objects, this happens inline with each sample to save memory.
Returns:
Control: red and green intensity
SNP: beta values, based on uncorrected meth and unmeth intensity.
Where
0-0.25 ~~ homogyzous-recessive
0.25--0.75 ~~ heterozygous
0.75--1.00 ~~ homozygous-dominant
for each of 50-60 SNP locii on the genome.
methylcheck can plot these to genotype samples.
Notes:
Each data container has a ctrl_red and ctrl_green dataframe.
This shuffles them into a single dictionary of dataframes with keys as Sample_ID matching the meta_data.
Where Sample_ID is:
{sample.sentrix_id_sample.sentrix_position}
and each dataframe contains both the ctrl_red and ctrl_green data. Column names will specify which channel
(red or green) the data applies to.
snp values are stored in container.snp_methylated.data_frame
and container.snp_unmethylated.data_frame
"""
out = {}
for idx,sample in enumerate(data_containers):
sample_id = f"{sample.sample.sentrix_id}_{sample.sample.sentrix_position}"
RED = sample.ctrl_red.rename(columns={'mean_value': 'Mean_Value_Red'})
GREEN = sample.ctrl_green.rename(columns={'mean_value': 'Mean_Value_Green'})
GREEN = GREEN.drop(['Control_Type', 'Color', 'Extended_Type'], axis='columns')
SNP = sample.snp_methylated.data_frame.rename(columns={'mean_value': 'snp_meth'})
SNP_UNMETH = sample.snp_unmethylated.data_frame.rename(columns={'mean_value': 'snp_unmeth'})
SNP_UNMETH = SNP_UNMETH.loc[:, ['snp_unmeth']]
SNP = pd.merge(SNP, SNP_UNMETH, left_index=True, right_index=True, how='outer')
# below (snp-->beta) is analogous to:
# SampleDataContainer._postprocess(input_dataframe, calculate_beta_value, 'beta_value')
# except that it doesn't use the predefined noob columns.
SNP['snp_beta'] = calculate_beta_value(
SNP['snp_meth'].values,
SNP['snp_unmeth'].values,
)
SNP = SNP[['snp_beta','snp_meth','snp_unmeth']]
SNP = SNP.astype({
'snp_meth': 'int32',
'snp_unmeth': 'int32'})
merged = pd.merge(RED, GREEN, left_index=True, right_index=True, how='outer')
merged = merged.astype({
'Mean_Value_Green': 'int32',
'Mean_Value_Red': 'int32'})
merged = pd.merge(merged, SNP, left_index=True, right_index=True, how='outer')
merged = merged.round({'snp_beta':3})
out[sample_id] = merged
if is_file_like(filename_or_fileobj):
pickle.dump(out, filename_or_fileobj)
else: #except TypeError: # File must have a write attribute
with open(filename_or_fileobj, 'wb') as f:
pickle.dump(out, f)
return
| 5,341,096
|
def ordered_indices(src_sizes,tgt_sizes,common_seed,shuffle=True,buckets=None):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if shuffle:
indices = np.random.RandomState(common_seed).permutation(len(src_sizes)).astype(np.int64)
else:
indices = np.arange(len(src_sizes), dtype=np.int64)
if buckets is None:
# sort by target length, then source length # 排序
if tgt_sizes is not None: # 先按照tgt的tokens数排序
indices = indices[
np.argsort(tgt_sizes[indices], kind="mergesort")] # 把indices把tgtsize打乱后,再用稳定的mergesort排序,得到排序后的索引
return indices[np.argsort(src_sizes[indices], kind="mergesort")] # 再按照src tokens排序
else:
# 按照最大的进行排序
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
bucketed_num_tokens=np.array([max(src_size,tgt_size) for src_size,tgt_size in zip(src_sizes,tgt_sizes)])
return indices[
np.argsort(bucketed_num_tokens[indices], kind="mergesort")
]
| 5,341,097
|
def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None):
""" Running pPXF. """
velscale = context.velscale if velscale is None else velscale
V0 = context.V if V0 is None else V0
# Reading templates
ssp_templates = fits.getdata(templates_file, extname="SSPS").T
params = Table.read(templates_file, hdu=1)
nssps = ssp_templates.shape[1]
logwave_temp = Table.read(templates_file, hdu=2)["loglam"].data
wave_temp = np.exp(logwave_temp)
# Use first spectrum to set emission lines
start0 = [V0, 100., 0., 0.]
bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]]
for spec in specs:
print("Processing spectrum {}".format(spec))
name = spec.replace(".fits", "")
outyaml = os.path.join(outdir, "{}.yaml".format(name))
if os.path.exists(outyaml) and not redo:
continue
table = Table.read(spec)
wave_lin = table["wave"]
flux = table["flux"]
fluxerr = table["fluxerr"]
# Removing red part of the spectrum
idx = np.where(wave_lin < 7000)[0]
wave_lin = wave_lin[idx]
flux = flux[idx]
fluxerr = fluxerr[idx]
der_sn = misc.snr(flux)[2]
data_sn = np.nanmedian(flux / fluxerr)
###################################################################
# Rebinning the data to a logarithmic scale for ppxf
wave_range = [wave_lin[0], wave_lin[-1]]
logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1]
wave = np.exp(logwave)
wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1]
flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr)
####################################################################
# Setting up the gas templates
gas_templates, line_names, line_wave = \
ppxf_util.emission_lines(logwave_temp,
[wave_lin[0], wave_lin[-1]], 2.95)
ngas = gas_templates.shape[1]
####################################################################
# Masking bad pixels
skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863])
goodpixels = np.arange(len(wave))
for line in skylines:
sky = np.argwhere((wave < line - 10) | (wave > line + 10)).ravel()
goodpixels = np.intersect1d(goodpixels, sky)
# Making goodpixels mask
goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0])
goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(
fluxerr))[0])
# Cleaning input spectrum
fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr)
flux[~np.isfinite(flux)] = 0.
########################################################################
# Preparing the fit
dv = (logwave_temp[0] - logwave[0]) * \
constants.c.to("km/s").value
templates = np.column_stack((ssp_templates, gas_templates))
components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype(
np.int)
gas_component = components > 0
start = [start0[:2]] * (ngas + 1)
bounds = [bounds0] * (ngas + 1)
moments = [2] * (ngas + 1)
########################################################################
# Fitting with two components
pp = ppxf(templates, flux, fluxerr, velscale=velscale,
plot=True, moments=moments, start=start, vsyst=dv,
lam=wave, component=components, mdegree=-1,
gas_component=gas_component, gas_names=line_names,
quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels)
plt.savefig(os.path.join(outdir, "{}.png".format(name)), dpi=250)
plt.close()
pp.name = name
# Saving results and plot
save(pp, outdir)
| 5,341,098
|
def test_mix_up_single():
"""
Test single batch mix up op
"""
logger.info("Test single batch mix up op")
resize_height = 224
resize_width = 224
# Create dataset and define map operations
ds1 = ds.ImageFolderDatasetV2(DATA_DIR_2)
num_classes = 10
decode_op = c_vision.Decode()
resize_op = c_vision.Resize((resize_height, resize_width), c_vision.Inter.LINEAR)
one_hot_encode = c.OneHot(num_classes) # num_classes is input argument
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.map(input_columns=["image"], operations=resize_op)
ds1 = ds1.map(input_columns=["label"], operations=one_hot_encode)
# apply batch operations
batch_size = 3
ds1 = ds1.batch(batch_size, drop_remainder=True)
ds2 = ds1
alpha = 0.2
transforms = [py_vision.MixUp(batch_size=batch_size, alpha=alpha, is_single=True)
]
ds1 = ds1.map(input_columns=["image", "label"], operations=transforms)
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1), ds2.create_dict_iterator(num_epochs=1)):
image1 = data1["image"]
label = data1["label"]
logger.info("label is {}".format(label))
image2 = data2["image"]
label2 = data2["label"]
logger.info("label2 is {}".format(label2))
lam = np.abs(label - label2)
for index in range(batch_size - 1):
if np.square(lam[index]).mean() != 0:
lam_value = 1 - np.sum(lam[index]) / 2
img_golden = lam_value * image2[index] + (1 - lam_value) * image2[index + 1]
assert image1[index].all() == img_golden.all()
logger.info("====test single batch mixup ok====")
| 5,341,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.