content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from pathlib import Path
import scipy
from datetime import datetime
def fit_sir(times, T_real, gamma, population, store, pathtoloc, tfmt='%Y-%m-%d', method_solver='DOP853', verbose=True, \
b_scale=1):
"""
Fit the dynamics of the SIR starting from real data contained in `pathtocssegi`.
The initial condition is taken from the real data.
The method assumes that in the `store` at the indicated `path`, there are entries
in the format %Y-%m-%d that described the infectivity matrices
for the times `times[:-1]`.
`populations` is the vector with the population per community.
OUTPUT:
* Xs
* ts
* scales
For the output the dumping interval is one day.
"""
# initializations
nt = len(times)
t = times[0]
B = read_df(t, tfmt, store, pathtoloc).to_numpy()
N = B.shape[0]
Y_real = np.einsum('ta,a->t', T_real, population) / np.sum(population)
X = np.zeros((2, N), dtype=np.float_)
I = T_real[0]
S = 1 - I
X = sir_SI_to_X(S, I)
y = get_sir_omega_X(X, population)
ts = [t]
Xs = [X.reshape(2,N)]
Ys = [y]
b_scales = []
blo = 0.
# print("nt = ", nt)
for i in range(1, nt):
if verbose:
print(f'Integrating day {t}')
mykey = Path(pathtoloc) / t.strftime(tfmt)
mykey = str(mykey)
if mykey in store.keys():
B = read_df(t, tfmt, store, pathtoloc).to_numpy()
elif verbose:
print("Infectivity matrix not updated!")
tnew = times[i]
dt = int((tnew - t).days)
ypred = Y_real[i]
# root finding method
func_root = lambda b: get_sir_omega_X(compute_sir_X(X, dt, b*B, gamma, method_solver), \
population) - ypred
# initial bracketing
bhi = b_scale
fscale = 3.
for k in range(1,10):
f = func_root(bhi)
if f > 0:
break
else:
bhi *= fscale
if f < 0:
raise ValueError("Problem in bracketing!")
# find the root
sol = scipy.optimize.root_scalar(func_root, bracket=(blo, bhi), method='brentq', \
options={'maxiter': 100})
if not (sol.converged):
raise ValueError("root finding failed!")
b_scale = sol.root
# compute next state with optimal scale
t_eval = np.arange(dt+1)
Xnews = compute_sir_X(X, dt, b_scale*B, gamma, method_solver, t_eval=t_eval)
Xnew = Xnews[-1]
y = get_sir_omega_X(Xnew,population)
print(f"b = {b_scale}, y = {y}, ypred = {ypred}, y-ypred = {y-ypred}")
# dump
# data.append(Xnew.reshape(2,N))
Xs += [Xnew.reshape(2,N) for Xnew in Xnews]
ts += [t + datetime.timedelta(days=int(dt)) for dt in t_eval[1:]]
Ys.append(y)
b_scales.append(b_scale)
# update
t = tnew
X = Xnew
b_scales.append(None) # B has ndays-1 entries
print("Fitting complete")
# prepare export of results
S = np.array([X[0] for X in Xs])
I = np.array([X[1] for X in Xs])
clusters = np.arange(N, dtype=np.uint)
df_S = pd.DataFrame(data=S, index=ts, columns=clusters)
df_I = pd.DataFrame(data=I, index=ts, columns=clusters)
df_fit = pd.DataFrame(data=np.array([b_scales, Ys]).T, index=times, columns=["scale", "frac_infected_tot"])
return df_S, df_I, df_fit
|
7a7da41fc178c805cc334e5a0060a2f9cc5f29d3
| 3,641,600
|
from typing import Dict
from typing import OrderedDict
def panelist_debuts_by_year(database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict of show years with a list of panelists'
debut information"""
show_years = retrieve_show_years(database_connection)
panelists = retrieve_panelists_first_shows(database_connection)
years_debut = OrderedDict()
for year in show_years:
years_debut[year] = []
for panelist in panelists:
panelist_info = panelists[panelist]
years_debut[panelist_info["year"]].append(panelist_info)
return years_debut
|
40ba0cd67991b7c83b33e77522065b8bb75232c1
| 3,641,601
|
def _stirring_conditions_html(stirring: reaction_pb2.StirringConditions) -> str:
"""Generates an HTML-ready description of stirring conditions.
Args:
stirring: StirringConditions message.
Returns:
String description of the stirring conditions.
"""
if stirring.type == stirring.NONE:
return ""
txt = ""
if stirring.type != stirring.UNSPECIFIED:
txt += {
stirring.CUSTOM: stirring.details,
stirring.STIR_BAR: "stir bar",
stirring.OVERHEAD_MIXER: "overhead mixer",
stirring.AGITATION: "agitation",
}[stirring.type]
if stirring.rate.rpm:
txt += f" ({stirring.rate.rpm} rpm)"
return txt
|
0f03c67602163da3b732dfdcb0d367c6a0806c0d
| 3,641,602
|
def load_action_plugins():
"""
Return a list of all registered action plugins
"""
logger.debug("Loading action plugins")
plugins = get_plugins(action, ActionPlugin)
if len(plugins) > 0:
logger.info("Discovered {n} action plugins:".format(n=len(plugins)))
for ap in plugins:
logger.debug(" - {ap}".format(ap=ap.PLUGIN_NAME))
return plugins
|
55588021e933392136cb0d7f9dff7224716cce34
| 3,641,603
|
import io
import os
def read_file_in_root_directory(*names, **kwargs):
"""Read a file."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf-8')
).read().strip()
|
0fdf6d445a0cd152e66988d59d29a04e5f0a1651
| 3,641,604
|
def set_effective_property_value_for_node(
nodeId: dom.NodeId, propertyName: str, value: str
) -> dict:
"""Find a rule with the given active property for the given node and set the new value for this
property
Parameters
----------
nodeId: dom.NodeId
The element id for which to set property.
propertyName: str
value: str
"""
return {
"method": "CSS.setEffectivePropertyValueForNode",
"params": {"nodeId": int(nodeId), "propertyName": propertyName, "value": value},
}
|
36cf035bd878ac4c4936cebbacc115273807b892
| 3,641,605
|
def classroom_page(request,unique_id):
"""
Classroom Setting Page.
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
pending_members = classroom.pending_members.all()
admins = classroom.special_permissions.all()
members = admins | classroom.members.all()
is_admin = classroom.special_permissions.filter(username = request.user.username).exists()
#classroom_update
if request.method=="POST":
form = CreateclassForm(request.POST,request.FILES,instance=classroom)
if form.is_valid():
form.save()
return redirect(reverse('subjects',kwargs={'unique_id':classroom.unique_id}))
else:
form = CreateclassForm(instance=classroom)
params={
'members':members.distinct(),
'admins':admins,
'pending_members':pending_members,
'classroom':classroom,
'is_admin':is_admin,
'form':form,
}
return render(request,'classroom_settings.html',params)
|
fc37979a44da63fb0dc174799523f3a77fefb1e4
| 3,641,606
|
def concat_hists(hist_array: np.array):
"""Concatenate multiple histograms in an array by adding them up with error prop."""
hist_final = hist_array[0]
for hist in hist_array[1:]:
hist_final.addhist(hist)
return hist_final
|
e659ceb97f38620f561920ddab6339ecb901ee55
| 3,641,607
|
def renorm_flux_lightcurve(flux, fluxerr, mu):
""" Normalise flux light curves with distance modulus."""
d = 10 ** (mu/5 + 1)
dsquared = d**2
norm = 1e18
# print('d**2', dsquared/norm)
fluxout = flux * dsquared / norm
fluxerrout = fluxerr * dsquared / norm
return fluxout, fluxerrout
|
97f2606d54b106d2051983dfc29d942112e7a1e3
| 3,641,608
|
import argparse
from pathlib import Path
import sys
from io import StringIO
def retrieve(args: argparse.Namespace, file_handler: DataFilesHandler, homepath: Path) -> str:
"""Find an expression by name."""
name = NAME_MULTIPLEXOR.join(args.REGULAR_EXPRESSION_NAME)
try:
return file_handler.get_pattern(name)
except KeyError:
pass
if args.local or (not file_handler.config.should_lookup and not args.online):
print(f"{name} not found locally, lookups disabled.", file=sys.stderr)
sys.exit(1)
external_patterns = (
PoolManager().request("GET", file_handler.config.lookup_location).data.decode("utf-8")
)
try:
pattern = DataFilesHandler(StringIO(), StringIO(external_patterns)).get_pattern(name)
except KeyError:
print(
f"{name} not found at {file_handler.config.lookup_location} or locally.",
file=sys.stderr,
)
sys.exit(1)
if args.save or (file_handler.config.should_save and not args.no_save):
file_handler.set_pattern(name, pattern)
file_handler.flush(str(homepath), patterns=True)
return pattern
|
acb2fd8c624eff05fd1181974afa46585dec275e
| 3,641,609
|
def find_focus(stack):
"""
Parameters
----------
stack: (nd-array) Image stack of dimension (Z, ...) to find focus
Returns
-------
focus_idx: (int) Index corresponding to the focal plane of the stack
"""
def brenner_gradient(im):
assert len(im.shape) == 2, 'Input image must be 2D'
return np.mean((im[:-2, :] - im[2:, :]) ** 2)
focus_scores = []
for img in stack:
focus_score = brenner_gradient(img)
focus_scores.append(focus_score)
focus_idx_min = np.where(focus_scores == np.min(focus_scores))[0][0]
focus_idx_max = np.where(focus_scores == np.max(focus_scores))[0][0]
return focus_idx_max, focus_idx_min
|
234cecb9c43f9427cd8c5d1e9b2ae24c14239835
| 3,641,610
|
def get_amr_line(input_f):
"""Read the amr file. AMRs are separated by a blank line."""
cur_amr=[]
has_content=False
for line in input_f:
if line[0]=="(" and len(cur_amr)!=0:
cur_amr=[]
if line.strip()=="":
if not has_content:
continue
else:
break
elif line.strip().startswith("#"):
# omit the comment in the AMR file
continue
else:
has_content=True
cur_amr.append(delete_pattern(line.strip(), '~e\.[0-9]+(,[0-9]+)*'))
#cur_amr.append(line.strip())
return "".join(cur_amr)
|
5b0c980a8c68143d8fdeb413185ee445b11cd30b
| 3,641,611
|
def getHwAddrForIp(ip):
"""
Returns the MAC address for the first interface that matches the given IP
Returns None if not found
"""
for i in netifaces.interfaces():
addrs = netifaces.ifaddresses(i)
try:
if_mac = addrs[netifaces.AF_LINK][0]['addr']
if_ip = addrs[netifaces.AF_INET][0]['addr']
except IndexError, KeyError: # Ignore ifaces that dont have MAC or IP
if_mac = if_ip = None
if if_ip == ip:
return if_mac
return None
|
efbeb494ed0a3fb135e87a66a170a94f4ca78231
| 3,641,612
|
def rbf_multiquadric(r, epsilon=1.0, beta=2.5):
"""
multiquadric
"""
return np.sqrt((epsilon*r)**2 + 1.0)
|
068ab09a609a47e631d91f90634fe4a5810e0fd1
| 3,641,613
|
def is_valid_sudoku(board):
"""
Checks if an input sudoku board is valid
Algorithm:
For all non-empty squares on board, if value at that square is a number,
check if the that value exists in that square's row, column,
and minor square.
If it is, return False.
"""
cols = [set() for _ in range(9)]
squares = [[set() for _ in range(3)] for x in range(3)]
for row in range(9):
rows = set()
for col in range(9):
if board[row][col] == ".":
continue
# Check row
if board[row][col] in rows:
return False
else:
rows.add(board[row][col])
# Check col
if board[row][col] in cols[col]:
return False
else:
cols[col].add(board[row][col])
# Check square
if board[row][col] in squares[row // 3][col // 3]:
return False
else:
squares[row // 3][col // 3].add(board[row][col])
return True
|
001a02a47acbaa192215d985f3d743c42a9fb42b
| 3,641,614
|
def lab_to_nwb_dict(lab_key):
"""
Generate a dictionary containing all relevant lab and institution info
:param lab_key: Key specifying one entry in element_lab.lab.Lab
:return: dictionary with NWB parameters
"""
lab_info = (lab.Lab & lab_key).fetch1()
return dict(
institution=lab_info.get("institution"),
lab=lab_info.get("lab_name"),
)
|
dcde08b3421d56003d23ca19747430c6d95bf431
| 3,641,615
|
from typing import Set
from re import A
def length(self: Set[A]) -> int:
"""
Returns the length (number of elements) of the set. `size` is an alias for length.
Returns:
The length of the set
"""
return len(self)
|
cab214f7b06fc8ae604286cd40d6d558d05b7175
| 3,641,616
|
import time
def timestamp(tdigits=8):
"""Return a unique timestamp string for the session. useful for ensuring
unique function identifiers, etc.
"""
return str(time.clock()).replace(".", "").replace("-", "")[: tdigits + 1]
|
b209795f67735ada82238e5fa47f5132efa61384
| 3,641,617
|
from typing import List
from typing import Optional
def hierarchical_mutation(original_individual: Individual, strength: float, **kwargs) -> List[Optional[Individual]]:
# TODO: Double Check
"""Choose a node in the graph_manager, choose a parameter inside the node, mutate it.
Each parameter has probability: `1/len(nodes) * 1/len(parameters in that node)`.
Args:
original_individual (Individual): source individual to mutate
strength (float): mutation strength
Returns:
A list with the new mutated individual or None if it is not valid
"""
check_muation_parameters(original_individual, strength)
new_individual = clone_individual(original_individual)
new_individual.parents = {original_individual}
new_individual.operator = hierarchical_mutation
# Do while not (rnd.random() < strength)
while True:
# Use "while loop" to try choosing a node that doesn't contain only the Information parameter or the mutation
# had no effect
while True:
# Choose a node that contains the parameter to mutate
chosen_node = random_generator.choice(new_individual.nodes())
# Create a list of parameters contained into the macro
candidate_parameters = list()
for parameter_name, parameter in new_individual.nodes[chosen_node]['parameters'].items():
if not isinstance(parameter, Information):
candidate_parameters.append(parameter)
# If I tried to mutate a macro that contains only an Information parameter -> pick another node to mutate
# else -> mutate a random parameter
if candidate_parameters:
# Choose only one parameter to mutate in the list of all parameters of the chosen macro
chosen_parameter = random_generator.choice(candidate_parameters)
assert strength
chosen_parameter.mutate(strength)
break
# Stop condition
if strength == 1.0 or not (random_generator.random() < strength):
break
new_individual.finalize()
if not new_individual.valid:
return [None]
else:
# print_individual(original_individual, 'ORIGINAL', True)
# print_individual(individual, 'MUTATED', True)
return [new_individual]
|
a726478230b1cd37a0065f08c42b3dd125db9357
| 3,641,618
|
def is_wrapped_exposed_object(obj):
"""
Return True if ``obj`` is a Lua (lupa) wrapper for a BaseExposedObject
instance
"""
if not hasattr(obj, 'is_object') or not callable(obj.is_object):
return False
return bool(obj.is_object())
|
117a43f9dcc886dc88a77c2ace016b89e43b3c4c
| 3,641,619
|
def no_transform(image):
"""Pass through the original image without transformation.
Returns a tuple with None to maintain compatability with processes that
evaluate the transform.
"""
return (image, None)
|
25b45a5c77d3c2864ebc7a046e0f47b2fafb067b
| 3,641,620
|
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
"""Builds a menu with the given style using the provided buttons
:return:
list of buttons
"""
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, [header_buttons])
if footer_buttons:
menu.append([footer_buttons])
return menu
|
f068ef9222b7e16cf19d901961f0315b2d6aebe3
| 3,641,621
|
def dbbox2result(dbboxes, labels, num_classes):
"""
Convert detection results to a list of numpy arrays.
:param dbboxes: (Tensor): shape (n, 9)
:param labels: (Tensor): shape (n, )
:param num_classes: (int), class number, including background class
:return: list (ndarray): dbbox results of each class
"""
# TODO: merge it with bbox2result
if dbboxes.shape[0] == 0:
return [
np.zeros((1, 9), dtype=np.float32) for i in range(num_classes - 1)
]
else:
dbboxes = dbboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [dbboxes[labels == i, :] for i in range(num_classes - 1)]
# result = [dbboxes[labels == i, :] for i in range(num_classes - 1)]
# for i, each_class in enumerate(result):
# if each_class.shape[0] == 0:
# result[i] = np.zeros((1, 9))
# return result
|
945ba8c82837d51446eb8d3123497facafb0d503
| 3,641,622
|
def SSderivative(ds):
"""
Given a time-step ds, and an single input time history u, this SS model
returns the output y=[u,du/ds], where du/dt is computed with second order
accuracy.
"""
A = np.array([[0]])
Bm1 = np.array([0.5 / ds])
B0 = np.array([[-2 / ds]])
B1 = np.array([[1.5 / ds]])
C = np.array([[0], [1]])
D = np.array([[1], [0]])
# change state
Aout, Bout, Cout, Dout = SSconv(A, B0, B1, C, D, Bm1)
return Aout, Bout, Cout, Dout
|
c255937fd1f727932d5b09fc70c586e7bdb10bf1
| 3,641,623
|
def clean_post(value):
"""Remove unwanted elements in post content"""
doc = lxml.html.fragment_fromstring(value)
doc.tag = 'div' # replaces <li>
doc.attrib.clear()
# remove comment owner info
for e in doc.xpath('//div[@class="weblog_keywords"]'):
e.drop_tree()
return lxml.html.tostring(doc)
|
c7670d5632760b577aa7ac9dae24de15bf164c67
| 3,641,624
|
def get_houdini_version(as_string=True):
"""
Returns version of the executed Houdini
:param as_string: bool, Whether to return the stiring version or not
:return: variant, int or str
"""
if as_string:
return hou.applicationVersionString()
else:
return hou.applicationVersion()
|
efcc18a89552f8dd1c4807be2042b51db2c2fb61
| 3,641,625
|
import socket
def check_port_open(port: int) -> bool:
"""
Проверка на свободный порт port
Является частью логики port_validation
"""
try:
sock = socket.socket()
sock.bind(("", port))
sock.close()
print(f"Порт {port} свободен")
return True
except OSError:
print(f"Порт {port} занят")
return False
|
76ba3ddd03bf1672b8b4ce5fd048561c3a9e78e8
| 3,641,626
|
from datetime import datetime
def convert_date_to_tick_tick_format(datetime_obj, tz: str):
"""
Parses ISO 8601 Format to Tick Tick Date Format
It first converts the datetime object to UTC time based off the passed time zone, and then
returns a string with the TickTick required date format.
!!! info Required Format
ISO 8601 Format Example: 2020-12-23T01:56:07+00:00
TickTick Required Format: 2020-12-23T01:56:07+0000 -> Where the last colon is removed for timezone
Arguments:
datetime_obj (datetime): Datetime object to be parsed.
tz: Time zone string.
Returns:
str: The TickTick accepted date string.
??? info "Import Help"
```python
from ticktick.helpers.time_methods import convert_iso_to_tick_tick_format
```
??? example
```python
date = datetime(2022, 12, 31, 14, 30, 45)
converted_date = convert_iso_to_tick_tick_format(date, 'US/Pacific')
```
??? success "Result"
The proper format for a date string to be used with TickTick dates.
```python
'2022-12-31T22:30:45+0000'
```
"""
date = convert_local_time_to_utc(datetime_obj, tz)
date = date.replace(tzinfo=datetime.timezone.utc).isoformat()
date = date[::-1].replace(":", "", 1)[::-1]
return date
|
9f8efc2136b75310649d31328d4359d2030aff97
| 3,641,627
|
def measurement(resp, p):
"""model measurement effects in the filters by translating the response at
each location and stimulus (first 3 axes of resp) toward the filterwise mean
(4th axis) according to proportion p. p=1 means that all filters reduce
to their respective means; p=0 does nothing; p<0 is possible but probably
not something you want."""
resp = tf.convert_to_tensor(resp)
# average the filter dim
meanresp = tf.reduce_mean(resp, axis=3, keepdims=False)
# make resp the origin of meanresp and scale by p
transresp = (meanresp[:, :, :, None] - resp) * p
return resp + transresp
|
99d24b3b790c0aa1d2873ca5521144a1e326b661
| 3,641,628
|
def irpf(salario,base=12.5,prorrateo=0):
"""Entra el salario y la base, opcionalmente un parametro para prorratear
Si no se da el valor de la bas3e por defecto es 12.5"""
if type(salario)==float and type(base)==float:
if prorrateo==True:
return (salario*(1+2/12))*(base/100)
elif prorrateo==False:
return salario*(base/100)
else:
return None
|
b549e78f2cbd3227cc99d4ce7277a90058696895
| 3,641,629
|
def get2p3dSlaterCondonUop(Fdd=(9, 0, 8, 0, 6), Fpp=(20, 0, 8), Fpd=(10, 0, 8), Gpd=(0, 3, 0, 2)):
"""
Return a 2p-3d U operator containing a sum of
different Slater-Condon proccesses.
Parameters
----------
Fdd : tuple
Fpp : tuple
Fpd : tuple
Gpd : tuple
"""
# Calculate F_dd^{0,2,4}
FddOp = getUop(l1=2,l2=2,l3=2,l4=2,R=Fdd)
# Calculate F_pp^{0,2}
FppOp = getUop(l1=1,l2=1,l3=1,l4=1,R=Fpp)
# Calculate F_pd^{0,2}
FpdOp1 = getUop(l1=1,l2=2,l3=2,l4=1,R=Fpd)
FpdOp2 = getUop(l1=2,l2=1,l3=1,l4=2,R=Fpd)
FpdOp = addOps([FpdOp1,FpdOp2])
# Calculate G_pd^{1,3}
GpdOp1 = getUop(l1=1,l2=2,l3=1,l4=2,R=Gpd)
GpdOp2 = getUop(l1=2,l2=1,l3=2,l4=1,R=Gpd)
GpdOp = addOps([GpdOp1,GpdOp2])
# Add operators
uOp = addOps([FddOp,FppOp,FpdOp,GpdOp])
return uOp
|
6ae077b1913bf40f93adcdbbbbc882baa9d56eea
| 3,641,630
|
from typing import AnyStr
import pickle
def read_meta_fs(filename: AnyStr):
"""
Read meta data from disk.
"""
settings.Path(filename).mkdir(parents=True, exist_ok=True)
filepath = settings.pj(filename, "meta.pkl")
with open(filepath, "rb") as fh:
return pickle.load(fh)
|
8fdf4c74d34c623cd1ac7d15f32f891685f1d863
| 3,641,631
|
def compile(model, ptr, vtr, num_y_per_branch=1):
"""Create a list with ground truth, loss functions and loss weights.
"""
yholder_tr = []
losses = []
loss_weights = []
num_blocks = int(len(model.output) / (num_y_per_branch + 1))
printcn(OKBLUE,
'Compiling model with %d outputs per branch and %d branches.' %
(num_y_per_branch, num_blocks))
for i in range(num_blocks):
for j in range(num_y_per_branch):
yholder_tr.append(ptr)
losses.append(elasticnet_loss_on_valid_joints)
loss_weights.append(1.)
yholder_tr.append(vtr)
losses.append('binary_crossentropy')
loss_weights.append(0.01)
printcn(OKBLUE, 'loss_weights: ' + str(loss_weights))
model.compile(loss=losses, optimizer=RMSprop(), loss_weights=loss_weights)
return yholder_tr
|
24af75f3b5bc6ba06d88f81023c2c7011f1d6922
| 3,641,632
|
import html
def strip_clean(input_text):
"""Strip out undesired tags.
This removes tags like <script>, but leaves characters like & unescaped.
The goal is to store the raw text in the database with the XSS nastiness.
By doing this, the content in the database is raw
and Django can continue to assume that it's unsafe by default.
"""
return html.unescape(bleach.clean(input_text, strip=True))
|
83e2bd3cb5c2645dd4ea611fd0e0577d118b8326
| 3,641,633
|
def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,
draw_probability=DRAW_PROBABILITY, backend=None, env=None):
"""Setups the global environment.
:param env: the specific :class:`TrueSkill` object to be the global
environment. It is optional.
>>> Rating()
trueskill.Rating(mu=25.000, sigma=8.333)
>>> setup(mu=50) #doctest: +ELLIPSIS
trueskill.TrueSkill(mu=50.000, ...)
>>> Rating()
trueskill.Rating(mu=50.000, sigma=8.333)
"""
if env is None:
env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend)
global_env.__trueskill__ = env
return env
|
ce797c9994e477bc618f8f52cc63babcc61b78fd
| 3,641,634
|
def _bytepad(x, length):
"""Zero pad byte string as defined in NIST SP 800-185"""
to_pad = _left_encode(length) + x
# Note: this implementation works with byte aligned strings,
# hence no additional bit padding is needed at this point.
npad = (length - len(to_pad) % length) % length
return to_pad + b'\x00' * npad
|
b02304fbb0e4bc42a80bc3fdc246c4fc9d55c816
| 3,641,635
|
import argparse
def str2bool(val):
"""enable default constant true arguments"""
# https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(val, bool):
return val
elif val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected')
|
ca229cd53674c6e9a8f37c60909826bf50c6accb
| 3,641,636
|
def get_scalefactor(metadata):
"""Add scaling factors to the metadata dictionary
:param metadata: dictionary with CZI or OME-TIFF metadata
:type metadata: dict
:return: dictionary with additional keys for scling factors
:rtype: dict
"""
# set default scale factore to 1
scalefactors = {'xy': 1.0,
'zx': 1.0
}
try:
# get the factor between XY scaling
scalefactors['xy'] = metadata['XScale'] / metadata['YScale']
# get the scalefactor between XZ scaling
scalefactors['zx'] = metadata['ZScale'] / metadata['YScale']
except KeyError as e:
print('Key not found: ', e)
return scalefactors
|
0619d5fa8f24008ddf4364a965268755c07d09c3
| 3,641,637
|
def alignmentEntropy(align, statistic='absolute', removeGaps=False, k=1, logFunc=np.log):
"""Calculates the entropy in bits of each site (or kmer) in a sequence alignment.
Also can compute:
- "uniqueness" which I define to be the fraction of unique sequences
- "uniquenum" which is the number of unique sequences
Parameters
----------
align : pd.Series() or list
Alignment of sequences.
statistic : str
Statistic to be computed: absolute, uniqueness
Uniqueness is the fraction of unique sequences.
Uniquenum is the number of unique AA at each position.
removeGaps : bool
Remove from the alignment at each position, kmers that start with a gap character.
Also use "non-gapped kmers" (ie skipping gaps)
k : int
Length of the kmer to consider at each start position in the alignment.
(default 1 specifies site-wise entropy)
logFunc : function
Default is natural log, returning nats. Can also use log2 for bits.
Return
------
out : float
Output statistic."""
if removeGaps:
grabKmerFlag = 1
else:
grabKmerFlag = 0
align = padAlignment(align)
L = len(align[align.index[0]])
nKmers = L - k + 1
entropy = np.zeros(nKmers, dtype=float)
for aai in np.arange(nKmers):
kmers = [grabKmer(seq, aai, k)[grabKmerFlag] for seq in align]
"""kmers that start with a gap or that are at the end and are of insufficent length, will be None"""
kmers = [mer for mer in kmers if not mer is None]
oh = objhist(kmers)
if statistic == 'absolute':
entropy[aai] = oh.entropy()
elif statistic == 'uniqueness':
entropy[aai] = oh.uniqueness()
elif statistic == 'uniquenum':
entropy[aai] = len(list(oh.keys()))
return entropy
|
ea06ae01cd1aa69cfc7dd19c72caafc5478fda38
| 3,641,638
|
def NodeToString(xml_node):
"""Returns an XML string.
Args:
xml_node: xml.dom.Node object
Returns:
String containing XML
"""
return xml_node.toxml()
|
043072bbb40f33947febedf967679e3e39931834
| 3,641,639
|
def difference(data, interval):
""" difference dataset
parameters:
data: dataset to be differenced
interval: the interval between the two elements to be differenced.
return:
dataset: with the length = len(data) - interval
"""
return [data[i] - data[i - interval] for i in range(interval, len(data))]
|
611f4ad36935000ae7dc16f76aef7cbb494b36ac
| 3,641,640
|
def merge_dictionaries(dict1, dict2):
""" Merges two dictionaries handling embedded lists and
dictionaries.
In a case of simple type, values from dict1 are preserved.
Args:
dict1, dict2 dictionaries to merge
Return merged dictionaries
"""
for k2, v2 in dict2.items():
if k2 not in dict1:
dict1[k2] = v2
else:
if isinstance(v2, list):
dict1[k2] = merge_lists(dict1[k2], v2)
elif isinstance(v2, dict):
dict1[k2] = merge_dictionaries(dict1[k2], v2)
else:
# if the type is int or strings we do nothing
# its already in dict1
pass
return dict1
|
8d46ce04496be2b5ba0e66788aed1a4e5ec1c85c
| 3,641,641
|
def build(model_def, model_name, optimizer, loss_name, custom_objects=None):
"""build keras model instance in FastEstimator
Args:
model_def (function): function definition of tf.keras model or path of model file(h5)
model_name (str, list, tuple): model name(s)
optimizer (str, optimizer, list, tuple): optimizer(s)
loss_name (str, list, tuple): loss name(s)
custom_objects (dict): dictionary that maps custom
Returns:
model: model(s) compiled by FastEstimator
"""
with fe.distribute_strategy.scope() if fe.distribute_strategy else NonContext():
if isinstance(model_def, str):
model = tf.keras.models.load_model(model_def, custom_objects=custom_objects)
else:
model = model_def()
model = to_list(model)
model_name = to_list(model_name)
optimizer = to_list(optimizer)
loss_name = to_list(loss_name)
assert len(model) == len(model_name) == len(optimizer) == len(loss_name)
for idx, (m, m_n, o, l_n) in enumerate(zip(model, model_name, optimizer, loss_name)):
model[idx] = _fe_compile(m, m_n, o, l_n)
if len(model) == 1:
model = model[0]
return model
|
28cf56036b00790cf3e6350cc2741d93dd047e3a
| 3,641,642
|
import wave
def check_audio_file(audio_file):
"""
Check if the audio file contents and format match the needs of the speech service. Currently we only support
16 KHz, 16 bit, MONO, PCM audio format. All others will be rejected.
:param audio_file: file to check
:return: audio duration, if file matches the format expected, otherwise None
"""
# Verify that all wave files are in the right format
try:
with wave.open(audio_file) as my_wave:
frame_rate = my_wave.getframerate()
if frame_rate >= 8000 and my_wave.getnchannels() in [1, 2] \
and my_wave.getsampwidth() == 2 and my_wave.getcomptype() == 'NONE':
audio_duration = my_wave.getnframes() / frame_rate
return audio_duration
else:
raise InvalidAudioFormatError(
"File {0} is not in the right format, it must be: Mono/Stereo, 16bit, PCM, 8KHz or above. "
"Found: ChannelCount={1}, SampleWidth={2}, CompType={3}, FrameRate={4}. Ignoring input!".format(
audio_file,
my_wave.getnchannels(),
my_wave.getsampwidth(),
my_wave.getcomptype(),
frame_rate
)
)
except Exception as e:
raise InvalidAudioFormatError("Invalid wave file {0}, reason: {1} :{2}".format(audio_file, type(e).__name__, e))
|
a6807cddefa7440b2f1cb11b2b3b309579f372e0
| 3,641,643
|
def uniform(name):
"""
Calls the findUniform function from util.py to return the uniform bounds for the given molecule.
Input: name of molecule
Output: array of length [2] with the upper and lower bounds for the uniform prior
"""
prior = findUniform(name, 'd_h')
return prior
|
e01b8c5056d199a8e0048e148170d5fc4c5c28a1
| 3,641,644
|
def merge_two_dicts(x, y):
"""Merges two dicts, returning a new copy."""
z = x.copy()
z.update(y)
return z
|
9126ada395d9d7f3da5a45b7d46c5b440b5cf23d
| 3,641,645
|
def num_utterances(dataset: ds.DatasetSplit):
"""Returns the total number of utterances in the dataset."""
return sum([len(interaction) for interaction in dataset.examples])
|
0927b96666f2f409c9fb0ec3c63576632810b6dc
| 3,641,646
|
def __virtual__():
"""
Only return if requests and boto are installed.
"""
if HAS_LIBS:
return __virtualname__
else:
return False
|
633ec9294e7585a6d5fc8a1dba2b436a20a4ab7a
| 3,641,647
|
def register():
"""Register user"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
username = request.form.get("username")
email = request.form.get("email")
password = request.form.get("password")
# Logs user into database
rows = db.execute("SELECT * FROM users WHERE username = ?",username)
email_check = db.execute("SELECT * FROM users WHERE email = ?",email)
# Check if Username is taken or not
if len(rows) != 0:
flash("Username Already Taken!", "danger")
return redirect("/register")
# Check if Email is taken or not
if len(email_check) != 0:
flash("Email Already Taken!", "danger")
return redirect("/register")
# Create a hashed password based on sha256 hashing function and store it into database
hashed_password = generate_password_hash(password, method='pbkdf2:sha256', salt_length=8)
db.execute("INSERT INTO users(email, username, hash) VALUES(?, ?, ?)",
email, username, hashed_password)
# Reddirect user back to login page after registering
flash("Register Successfully!", "success")
return redirect("/login")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("register.html")
|
1c37ad0eac8f6a2230106cfd9e3754d6053956ff
| 3,641,648
|
def _csd_multitaper(X, sfreq, n_times, window_fun, eigvals, freq_mask, n_fft,
adaptive):
"""Compute cross spectral density (CSD) using multitaper module.
Computes the CSD for a single epoch of data.
Parameters
----------
X : ndarray, shape (n_channels, n_times)
The time series data consisting of n_channels time-series of length
n_times.
sfreq : float
The sampling frequency of the data in Hertz.
n_times : int
Number of time samples
window_fun : ndarray
Window function(s) of length n_times. This corresponds to first output
of `dpss_windows`.
eigvals : ndarray | float
Eigenvalues associated with window functions.
freq_mask : ndarray
Which frequencies to use.
n_fft : int
Length of the FFT.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
"""
x_mt, _ = _mt_spectra(X, window_fun, sfreq, n_fft)
if adaptive:
# Compute adaptive weights
_, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
return_weights=True)
# Tiling weights so that we can easily use _csd_from_mt()
weights = weights[:, np.newaxis, :, :]
weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
else:
# Do not use adaptive weights
weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :, np.newaxis]
x_mt = x_mt[:, :, freq_mask]
# Calculating CSD
# Tiling x_mt so that we can easily use _csd_from_mt()
x_mt = x_mt[:, np.newaxis, :, :]
x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
csds = _csd_from_mt(x_mt, y_mt, weights, weights_y)
# FIXME: don't compute full matrix in the first place
csds = np.array([_sym_mat_to_vector(csds[:, :, i])
for i in range(csds.shape[-1])]).T
# Scaling by sampling frequency for compatibility with Matlab
csds /= sfreq
return csds
|
e936cb89ed18df8da25737be6fdfb31822c8db6f
| 3,641,649
|
def _build_tmp_access_args(method, ip, ttl, port, direction, comment):
"""
Builds the cmd args for temporary access/deny opts.
"""
opt = _get_opt(method)
args = "{0} {1} {2}".format(opt, ip, ttl)
if port:
args += " -p {0}".format(port)
if direction:
args += " -d {0}".format(direction)
if comment:
args += " #{0}".format(comment)
return args
|
17a00e10af84519edb1a5dd8d89be614cb548ea1
| 3,641,650
|
def add_two_values(value1, value2):
""" Adds two integers
Arguments:
value1: first integer value e.g. 10
value2: second integer value e.g. 2
"""
return value1 + value2
|
10f71fcbde9d859f094724c94568eee55a7b989a
| 3,641,651
|
import pandas
def combine_nearby_breakends(events, distance=5000):
"""
1d clustering, prioritizing assembled breakpoint coords
"""
breakends = []
positions = get_positions(events)
for (chrom, orientation), cur_events in positions.groupby(["chrom", "orientation"]):
cur_events = cur_events.sort_values("pos")
groups = ((cur_events["pos"]-cur_events["pos"].shift()) > distance).cumsum()
for i, cur_group in cur_events.groupby(groups):
if cur_group["assembled"].any():
cur_combined = cur_group.loc[cur_group["assembled"]].copy()
cur_combined["assembled"] = True
else:
cur_orientations = cur_group["orientation"].unique()
cur_combined = pandas.DataFrame({"orientation":cur_orientations})
cur_combined["chrom"] = chrom
cur_combined["pos"] = int(cur_group["pos"].mean())
cur_combined["assembled"] = False
breakends.append(cur_combined)
return pandas.concat(breakends, ignore_index=True)
|
dad6867e7dfa406f8785b131fb2c93694fe60f0d
| 3,641,652
|
def get_mongo_database(connection, database_name):
""" Access the database
Args:
connection (MongoClient): Mongo connection to the database
database_name (str): database to be accessed
Returns:
Database: the Database object
"""
try:
return connection.get_database(database_name)
except:
return None
|
9299cbe0b697dec2e548fb5e26e2013214007575
| 3,641,653
|
from typing import Dict
from typing import Callable
def make_mappings() -> Dict[str, Callable[[], None]]:
"""サンプル名と実行する関数のマッピングを生成します"""
# noinspection PyDictCreation
m = {}
extlib.regist_modules(m)
return m
|
598decb0b3197b1c64c982354de1fea9fdb3ce3d
| 3,641,654
|
def S(state):
"""Stringify state
"""
if state == State.IDLE: return "IDLE"
if state == State.TAKING_OFF: return "TAKING_OFF"
if state == State.HOVERING: return "HOVERING"
if state == State.WAITING_ON_ASSIGNMENT: return "WAITING_ON_ASSIGNMENT"
if state == State.FLYING: return "FLYING"
if state == State.IN_FORMATION: return "IN_FORMATION"
if state == State.GRIDLOCK: return "GRIDLOCK"
if state == State.COMPLETE: return "\033[32;1mCOMPLETE\033[0m"
if state == State.TERMINATE: return "\033[31;1mTERMINATE\033[0m"
|
58c6005dcf8549225c233cc1af486fca9578111d
| 3,641,655
|
import os
import logging
def eval_classif_cross_val_roc(clf_name, classif, features, labels,
cross_val, path_out=None, nb_steps=100):
""" compute mean ROC curve on cross-validation schema
http://scikit-learn.org/0.15/auto_examples/plot_roc_crossval.html
:param str clf_name: name of selected classifier
:param obj classif: sklearn classifier
:param ndarray features: features in dimension nb_samples x nb_features
:param list(int) labels: annotation for samples
:param object cross_val:
:param str path_out: path for exporting statistic
:param int nb_steps: number of thresholds
:return:
>>> np.random.seed(0)
>>> labels = np.array([0] * 150 + [1] * 100 + [3] * 50)
>>> data = np.tile(labels, (6, 1)).T.astype(float)
>>> data += np.random.random(data.shape)
>>> data.shape
(300, 6)
>>> from sklearn.model_selection import StratifiedKFold
>>> cv = StratifiedKFold(n_splits=5, random_state=0)
>>> classif = create_classifiers()[DEFAULT_CLASSIF_NAME]
>>> fp_tp, auc = eval_classif_cross_val_roc(DEFAULT_CLASSIF_NAME, classif,
... data, labels, cv, nb_steps=10)
>>> fp_tp
FP TP
0 0.000000 0.0
1 0.111111 1.0
2 0.222222 1.0
3 0.333333 1.0
4 0.444444 1.0
5 0.555556 1.0
6 0.666667 1.0
7 0.777778 1.0
8 0.888889 1.0
9 1.000000 1.0
>>> auc
0.94444444444444442
>>> labels[-50:] -= 1
>>> data[-50:, :] -= 1
>>> path_out = 'temp_eval-cv-roc'
>>> os.mkdir(path_out)
>>> fp_tp, auc = eval_classif_cross_val_roc(DEFAULT_CLASSIF_NAME, classif,
... data, labels, cv, nb_steps=5, path_out=path_out)
>>> fp_tp
FP TP
0 0.00 0.0
1 0.25 1.0
2 0.50 1.0
3 0.75 1.0
4 1.00 1.0
>>> auc
0.875
>>> import shutil
>>> shutil.rmtree(path_out, ignore_errors=True)
"""
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, nb_steps)
labels_bin = np.zeros((len(labels), np.max(labels) + 1))
unique_labels = np.unique(labels)
assert all(unique_labels >= 0), \
'some labels are negative: %r' % unique_labels
for lb in unique_labels:
labels_bin[:, lb] = (labels == lb)
# since version change the CV is not iterable by default
if not hasattr(cross_val, '__iter__'):
cross_val = cross_val.split(features, labels)
count = 0.
for train, test in cross_val:
classif_cv = clone(classif)
classif_cv.fit(np.copy(features[train], order='C'),
np.copy(labels[train], order='C'))
proba = classif_cv.predict_proba(np.copy(features[test], order='C'))
# Compute ROC curve and area the curve
for i, lb in enumerate(unique_labels):
fpr, tpr, _ = metrics.roc_curve(labels_bin[test, lb], proba[:, i])
fpr = [0.] + fpr.tolist() + [1.]
tpr = [0.] + tpr.tolist() + [1.]
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
count += 1
# roc_auc = metrics.auc(fpr, tpr)
mean_tpr /= count
mean_tpr[-1] = 1.0
# mean_auc = metrics.auc(mean_fpr, mean_tpr)
df_roc = pd.DataFrame(np.array([mean_fpr, mean_tpr]).T, columns=['FP', 'TP'])
auc = metrics.auc(mean_fpr, mean_tpr)
if path_out is not None:
assert os.path.exists(path_out), 'missing: "%s"' % path_out
name_csv = NAME_CSV_CLASSIF_CV_ROC.format(clf_name, 'mean')
df_roc.to_csv(os.path.join(path_out, name_csv))
name_txt = NAME_TXT_CLASSIF_CV_AUC.format(clf_name, 'mean')
with open(os.path.join(path_out, name_txt), 'w') as fp:
fp.write(str(auc))
logging.debug('cross_val ROC: \n %r', df_roc)
return df_roc, auc
|
b7196e024082015601f71cb9faaeb0c84444e419
| 3,641,656
|
def trace_get_watched_net(trace, i):
"""
trace_get_watched_net(Int_trace trace, unsigned int i) -> Int_net
Parameters
----------
trace: Int_trace
i: unsigned int
"""
return _api.trace_get_watched_net(trace, i)
|
f7140cbfcc27d511b3212ba7adf97f0b6c91582b
| 3,641,657
|
from typing import Optional
from typing import OrderedDict
def dist_batch_tasks_for_all_layer_mdl_vs_adapted_mdl(
mdl: nn.Module,
spt_x: Tensor, spt_y: Tensor, qry_x: Tensor, qry_y: Tensor,
layer_names: list[str],
inner_opt: DifferentiableOptimizer,
fo: bool,
nb_inner_train_steps: int,
criterion: nn.Module,
metric_comparison_type: str = 'pwcca',
iters: int = 1,
effective_neuron_type: str = 'filter',
downsample_method: Optional[str] = None,
downsample_size: Optional[int] = None,
subsample_effective_num_data_method: Optional[str] = None,
subsample_effective_num_data_param: Optional[int] = None,
metric_as_sim_or_dist: str = 'dist',
force_cpu: bool = False,
training: bool = True,
copy_initial_weights: bool = False,
track_higher_grads: bool = False
) -> list[OrderedDict[LayerIdentifier, float]]:
"""
:param mdl:
:param spt_x: not as a tuple due to having to move them to gpu potentially.
:param spt_y:
:param qry_x:
:param qry_y:
:param layer_names:
:param inner_opt:
:param fo:
:param nb_inner_train_steps:
:param criterion:
:param metric_comparison_type:
:param iters:
:param effective_neuron_type:
:param downsample_method:
:param downsample_size:
:param subsample_effective_num_data_method:
:param subsample_effective_num_data_param:
:param metric_as_sim_or_dist:
:param force_cpu:
:param training:
:param copy_initial_weights:
:param track_higher_grads:
:return:
"""
# - [B, M, C, H, W] -> [B, L]
L: int = len(layer_names)
B: int = spt_x.size(0)
dists_per_batch_per_layer: list[OrderedDict[LayerIdentifier, float]] = []
for t in range(B):
spt_x_t, spt_y_t, qry_x_t, qry_y_t = spt_x[t], spt_y[t], qry_x[t], qry_y[t]
#
adapted_mdl: FuncModel = get_maml_adapted_model_with_higher_one_task(mdl,
inner_opt,
spt_x_t, spt_y_t,
training,
copy_initial_weights,
track_higher_grads,
fo,
nb_inner_train_steps,
criterion)
# - [M, C, H, W], [L] -> [L]
X: Tensor = qry_x_t
dists_per_layer: OrderedDict[LayerIdentifier, float] = dist_data_set_per_layer(mdl1=mdl,
mdl2=adapted_mdl,
X1=X,
X2=X,
layer_names1=layer_names,
layer_names2=layer_names,
metric_comparison_type=metric_comparison_type,
iters=iters,
effective_neuron_type=effective_neuron_type,
downsample_method=downsample_method,
downsample_size=downsample_size,
subsample_effective_num_data_method=subsample_effective_num_data_method,
subsample_effective_num_data_param=subsample_effective_num_data_param,
metric_as_sim_or_dist=metric_as_sim_or_dist,
force_cpu=force_cpu
)
assert len(dists_per_layer) == L
# - appending to [B, L]
dists_per_batch_per_layer.append(dists_per_layer)
#
# del adapted_mdl
# gc.collect()
assert len(dists_per_batch_per_layer) == B
# Invariant due to asserts: [B, L] list
# - [B, L] distances ready!
return dists_per_batch_per_layer
|
72830d75e195b8363936d78a8c249b9f6bbd7125
| 3,641,658
|
from typing import Callable
from typing import List
import numbers
def adjust_payload(tree: FilterableIntervalTree,
a_node: FilterableIntervalTreeNode,
adjustment_interval: Interval,
adjustments: dict,
filter_vector_generator: Callable[[dict], int]=None)\
-> List[FilterableIntervalTreeNode]:
"""
Adjusts the payload of a node int its tree
:param tree: tee to be adjusted
:param a_node: node to adjust
:param adjustment_interval: the interval for which we would like to see the adjustments made
:param adjustments: the changes that we want to see made to the node's payload (only works for dictionaries)
:param filter_vector_generator: a function that returns a filter vector for each payload
:return: None
"""
if filter_vector_generator is None:
filter_vector_generator = lambda x: a_node.filter_vector
old_interval = a_node.key
remaining_intervals = old_interval.remove(adjustment_interval)
new_payload = a_node.payload.copy()
relevant_keys = adjustments.keys()
for key in relevant_keys:
old_property_value = new_payload.get(key)
if isinstance(old_property_value, numbers.Number):
new_payload[key] += adjustments[key]
else:
new_payload[key] = adjustments[key]
filter_vector = filter_vector_generator(new_payload)
remaining_nodes = \
[FilterableIntervalTreeNode(_, a_node.payload.copy(), a_node.filter_vector) for _ in remaining_intervals]
new_node = FilterableIntervalTreeNode(adjustment_interval, new_payload, filter_vector)
result_list = [new_node] + remaining_nodes
result_list = sorted(result_list, key=lambda node: node.key)
added_nodes = set()
first_item = result_list[0]
last_item = result_list[-1]
first_payload = first_item.payload
last_payload = last_item.payload
pre_node = get_predecessor_for_node(tree, a_node, qualifier=lambda x: x == first_payload)
post_node = get_successor_for_node(tree, a_node, qualifier=lambda x: x == last_payload)
delete_node(tree, a_node)
if pre_node and Interval.touches(pre_node.key, first_item.key) and pre_node.payload == first_item.payload:
consolidate_nodes(pre_node, first_item, tree)
added_nodes.add(first_item)
if post_node and Interval.touches(post_node.key, last_item.key) and post_node.payload == last_item.payload:
consolidate_nodes(last_item, post_node, tree)
added_nodes.add(last_item)
for node in result_list:
if node not in added_nodes:
add_node(tree, node)
return new_node
|
fa93deede3e7fee950834e5e02bc79bb98e68f03
| 3,641,659
|
from typing import Tuple
import os
def _read_cropped() -> Tuple[np.ndarray, np.ndarray]:
"""Reads the cropped data and labels.
"""
print('\nReading cropped images.')
path_cropped = os.path.join(DATA_FOLDER, FOLDER_CROPPED)
result = _recursive_read_cropped(path_cropped)
print('Done reading cropped images.')
return result
|
798a60b13c49903672c65c9fd631141061a0873f
| 3,641,660
|
def get_max(data, **kwargs):
"""
Assuming the dataset is loaded as type `np.array`, and has shape
(num_samples, num_features).
:param data: Provided dataset, assume each row is a data sample and \
each column is one feature.
:type `np.ndarray`
:param kwargs: Dictionary of differential privacy arguments \
for computing the maximum value of each feature across all samples, \
e.g., epsilon and delta, etc.
:type kwargs: `dict`
:return: A vector of shape (1, num_features) stores the maximum value \
of each feature across all samples.
:rtype: `np.array` of `float`
"""
try:
max_vec = np.max(data, axis=0)
except Exception as ex:
raise FLException('Error occurred when calculating '
'the maximum value. ' + str(ex))
return max_vec
|
03697d2a2bc6afe3c1d576bd9f8766c97e86626d
| 3,641,661
|
def find_u_from_v(matrix, v, singular_value):
"""
Finds the u column vector of the U matrix in the SVD UΣV^T.
Parameters
----------
matrix : numpy.ndarray
Matrix for which the SVD is calculated
v : numpy.ndarray
A column vector of V matrix, it is the eigenvector of the Gramian of `matrix`.
singular_value : float
A singular value of `matrix` corresponding to the `v` vector.
Returns
-------
numpy.ndarray
u column vector of the U matrix in the SVD.
"""
return matrix @ v / singular_value
|
ef2871c86bf7ddc4c42446a54230068282ad85df
| 3,641,662
|
import torch
def transform(dataset, perm_idx, model, view):
"""
for view1 utterance, simply encode using view1 encoder
for view 2 utterances:
- encode each utterance, using view 1 encoder, to get utterance embeddings
- take average of utterance embeddings to form view 2 embedding
"""
model.eval()
latent_zs, golds = [], []
n_batch = (len(perm_idx) + BATCH_SIZE - 1) // BATCH_SIZE
for i in range(n_batch):
indices = perm_idx[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
v1_batch, v2_batch = list(zip(*[dataset[idx][0] for idx in indices]))
golds += [dataset[idx][1] for idx in indices]
if view == 'v1':
latent_z = model(v1_batch, encoder='v1')
elif view == 'v2':
latent_z_l = [model(conv, encoder='v1').mean(dim=0) for conv in v2_batch]
latent_z = torch.stack(latent_z_l)
latent_zs.append(latent_z.cpu().data.numpy())
latent_zs = np.concatenate(latent_zs)
return latent_zs, golds
|
484adb7d53f80366b591ef45551b245dce00acca
| 3,641,663
|
from typing import List
def double(items: List[str]) -> List[str]:
"""
Returns a new list that is the input list, repeated twice.
"""
return items + items
|
9e4b6b9e84a80a9f5cbd512ca820274bb8cad924
| 3,641,664
|
def system_from_problem(problem: Problem) -> System:
"""Extracts the "system" part of a problem.
Args:
problem: Problem description
Returns:
A :class:`System` object containing a copy of the relevant parts of the problem.
"""
return System(
id=problem.id,
name=problem.name,
apps=tuple(w.app for w in problem.workloads),
instance_classes=problem.instance_classes,
performances=problem.performances,
)
|
42c0db09d00043ba61ae164bb58a0ecb48599027
| 3,641,665
|
def get_service_endpoints(ksc, service_type, region_name):
"""Get endpoints for a given service type from the Keystone catalog.
:param ksc: An instance of a Keystone client.
:type ksc: :class: `keystoneclient.v3.client.Client`
:param str service_type: An endpoint service type to use.
:param str region_name: A name of the region to retrieve endpoints for.
:raises :class: `keystone_exceptions.EndpointNotFound`
"""
try:
catalog = {
endpoint_type: ksc.service_catalog.url_for(
service_type=service_type, endpoint_type=endpoint_type,
region_name=region_name)
for endpoint_type in ['publicURL', 'internalURL', 'adminURL']}
except keystone_exceptions.EndpointNotFound:
# EndpointNotFound is raised for the case where a service does not
# exist as well as for the case where the service exists but not
# endpoints.
log.error('could not retrieve any {} endpoints'.format(service_type))
raise
return catalog
|
c962ad44e4d73a102f9c09803f94c68cee2aeb51
| 3,641,666
|
def get_task_for_node(node_id):
""" Get a new task or previously assigned task for node """
# get ACTIVE task that was previously assigned to this node
query = Task.query.filter_by(node_id=node_id).filter_by(status=TaskStatus.ACTIVE)
task = query.first()
if task:
return task
node = Node.query.filter_by(id=node_id).one()
return _assign_task(node)
|
5a01869f40f5c0840dfdc2ed1e3417c694f51aca
| 3,641,667
|
from xpedite.profiler.profileInfo import loadProfileInfo
import os
def loadProfileInfo(profileInfoPath, remote=None):
"""
Load profile information from a profileInfo.py file, set a default application information
file, and set the profile information's host if the application is running remotely
@param remote: Remote environment information if a remote host is passed to the pytest parser
@type remote: C{xpedite.transport.remote.Remote}
"""
profileInfo = loadProfileInfo(os.path.join(dataDir, profileInfoPath))
profileInfo.appInfo = os.path.join(dataDir, 'xpedite-appinfo.txt')
if remote:
profileInfo.appHost = remote.host
return profileInfo
|
95f25683174e157fa8031276d96f9bbef41706df
| 3,641,668
|
import subprocess
def open_process(verbose, args, outputs):
""" Run the given arguments as a subprocess. Time out after TIMEOUT
seconds and report failures or stdout. """
report_output(outputs["stdout"],
verbose, "Writing", args)
proc = None
if outputs["stderr"] is not None:
try:
proc = Popen(args, stdout=subprocess.PIPE, shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
report_err(outputs["stderr"], "Failed executing: ", e)
if proc is None:
# Never even started
report_err(outputs["stderr"], "Process failed to start")
return proc
|
47b32c34d422d7809ee3d885632467f4c7aacb89
| 3,641,669
|
def cik_list():
"""Get CIK list and use it as a fixture."""
return UsStockList()
|
ec845471860dcf4ce9dcf0e82e2effda21bcbf0b
| 3,641,670
|
def get_eval_config(hidden_dim,
max_input_length=None,
num_input_timesteps=None,
model_temporal_relations=True,
node_position_dim=1,
num_input_propagation_steps=None,
token_vocab_size=None,
node_text_pad_token_id=None,
num_transformer_attention_heads=None,
num_edge_types=None,
num_time_edge_types=None,
use_relational_bias=False,
max_output_length=None,
type_vocab_size=None,
output_vocab_size=None,
num_output_propagation_steps=None,
use_pointer_candidate_masking=False,
jax2tf_compatible=None,
dropout_rate: float = 0.1):
"""Returns a model config for evaluating, which disables drop-out."""
return create_model_config(
is_training=False,
hidden_dim=hidden_dim,
max_input_length=max_input_length,
num_input_timesteps=num_input_timesteps,
model_temporal_relations=model_temporal_relations,
node_position_dim=node_position_dim,
num_input_propagation_steps=num_input_propagation_steps,
token_vocab_size=token_vocab_size,
node_text_pad_token_id=node_text_pad_token_id,
dropout_rate=dropout_rate,
num_transformer_attention_heads=num_transformer_attention_heads,
num_edge_types=num_edge_types,
num_time_edge_types=num_time_edge_types,
use_relational_bias=use_relational_bias,
max_output_length=max_output_length,
type_vocab_size=type_vocab_size,
output_vocab_size=output_vocab_size,
num_output_propagation_steps=num_output_propagation_steps,
use_pointer_candidate_masking=use_pointer_candidate_masking,
jax2tf_compatible=jax2tf_compatible)
|
90ff743a372a2db3eb52927bf8c6d996a11137cb
| 3,641,671
|
def classNew(u_id):
"""
Allow an ADMIN to create a new class (ADMIN ONLY)
Returns: none
"""
myDb, myCursor = dbConnect()
data = request.get_json()
createNewClass(myCursor, myDb, data)
dbDisconnect(myCursor, myDb)
return dumps({})
|
29532ea5c979b725b46c1dd775c1f093006b1a43
| 3,641,672
|
import types
import functools
def copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)."""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
|
d661876d8568c5f33ae07682c874edd8d71dd7c9
| 3,641,673
|
from typing import List
def augment(img_list: list, hflip: bool = True, rot: bool = True) -> List[np.ndarray]:
"""
Augments the image inorder to add robustness to the model
@param img_list: The List of images
@param hflip: If True, add horizontal flip
@param rot: If True, add 90 degrees rotation
@return: A list of the augmented images
"""
# horizontal flip OR rotate
hflip = hflip and np.random.random() < 0.5
vflip = rot and np.random.random() < 0.5
rot90 = rot and np.random.random() < 0.5
return [perform_augment(hflip, vflip, rot90, img) for img in img_list]
|
3d953ba2c9ce869ec612644d9a5370690c930e22
| 3,641,674
|
def _neurovault_collections(parts, query):
"""Mocks the Neurovault API behind the `/api/collections/` path.
parts: the parts of the URL path after "collections"
ie [], ["<somecollectionid>"], or ["<somecollectionid>", "images"]
query: the parsed query string, e.g. {"offset": "15", "limit": "5"}
returns a dictionary of API results
See the neurovault API docs for details: https://neurovault.org/api-docs
"""
if parts:
return _neurovault_one_collection(parts)
collections, _ = _get_neurovault_data()
offset, limit = int(query.get("offset", 0)), int(query.get("limit", 2))
batch = collections.iloc[
offset: offset + limit].to_dict(orient="records")
return {"count": len(collections), "results": batch}
|
5ee1e6b9b59fb12e76c38c20cde65c18c3fd201a
| 3,641,675
|
def display_states():
""" Display the states"""
storage_states = storage.all(State)
return render_template('7-states_list.html', states=storage_states)
|
b9dc5c739546fee0abce077df1bba38587062f1a
| 3,641,676
|
def recompress_folder(folders, path, extension):
"""Recompress folder"""
dest = runez.SYS_INFO.platform_id.composed_basename("cpython", path.name, extension=extension)
dest = folders.dist / dest
runez.compress(path, dest, logger=print)
return dest
|
5cadc1a0b32509630cd3fa5af9fd758899e4bf94
| 3,641,677
|
import pathlib
def guessMimetype(filename):
"""Return the mime-type for `filename`."""
path = pathlib.Path(filename) if not isinstance(filename, pathlib.Path) else filename
with path.open("rb") as signature:
# Since filetype only reads 262 of file many mp3s starting with null bytes will not find
# a header, so ignoring null bytes and using the bytes interface...
buf = b""
while not buf:
data = signature.read(_NUM_SIGNATURE_BYTES)
if not data:
break
data = data.lstrip(b"\x00")
if data:
data_len = len(data)
if data_len >= _NUM_SIGNATURE_BYTES:
buf = data[:_NUM_SIGNATURE_BYTES]
else:
buf = data + signature.read(_NUM_SIGNATURE_BYTES - data_len)
# Special casing .id3/.tag because extended filetype with add_type() prepends, meaning
# all mp3 would be labeled mimetype id3, while appending would mean each .id3 would be
# mime mpeg.
if path.suffix in ID3_MIME_TYPE_EXTENSIONS:
if Id3Tag().match(buf) or Id3TagExt().match(buf):
return Id3TagExt.MIME
return filetype.guess_mime(buf)
|
84f6b2f80b341f330e3f6b9e65b4863d055f8796
| 3,641,678
|
import json
import sys
def collectMessages():
""" A generic stimulus invocation """
global rmlEngine
try:
stimuli = []
rawRequest = request.POST.dict
for rawKey in rawRequest.keys():
keyVal = rawKey
jsonPayload = json.loads(keyVal)
try:
actionID = jsonPayload["actionID"]
except KeyError:
errorMsg = "Missing required JSON parameter 'actionID'"
raise Exceptions.MissingActionError(errorMsg)
try:
ownerID = jsonPayload["ownerID"]
except KeyError:
errorMsg = "Missing required JSON parameter 'ownerID'"
raise Exceptions.InvalidControllerError()
try:
subjectID = jsonPayload["subjectID"]
except KeyError:
subjectID = ownerID
try:
objectID = jsonPayload["objectID"]
except KeyError:
objectID = ownerID
try:
objectID = jsonPayload["objectID"]
except KeyError:
objectID = ownerID
try:
insertionModeText = jsonPayload["insertionMode"]
if insertionModeText == 'head_clear':
insertionMode = ationInsertionTypes.HEAD_CLEAR
elif insertionModeText == 'head':
insertionMode = ationInsertionTypes.HEAD
elif insertionModeText == 'append':
insertionMode = ationInsertionTypes.APPEND
else:
errorMsg = "Invalid insertionMode parameter. Valid values are 'head', 'head_clear' and 'append'" %insertionModeText
raise Exceptions.InsertionModeError()
except KeyError:
insertionMode = ationInsertionTypes.HEAD_CLEAR
try:
rtparams = jsonPayload["actionParams"]
except KeyError:
rtparams = {}
actionInvocation = Engine.ActionRequest(actionID, insertionMode, rtparams, subjectID, objectID, ownerID)
rmlEngine.aQ.put(actionInvocation)
response.body = json.dumps({"status": stimuli})
response.status = 200
return response
except Exceptions.InvalidControllerError:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
response.body = "Failed to post action. %s, %s" %(errorID, errorMsg)
response.status = 400
return response
except Exceptions.MissingActionError:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
response.body = "Failed to post action. %s, %s" %(errorID, errorMsg)
response.status = 400
return response
except Exception as unusedE:
#When this exception happens, the actionID variable won't be in scope,
# But we can expect that actionID is available, or a MissingActionError would have been thrown.
rawRequest = request.POST.dict
for rawKey in rawRequest.keys():
keyVal = rawKey
jsonPayload = json.loads(keyVal)
actionID = jsonPayload["actionID"]
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
response.body = "Failed to post action %s. %s, %s" %(actionID, errorID, errorMsg)
response.status = 500
return response
|
55c3cb0444f4cfe4a1bfa4631f3e2351f63e9394
| 3,641,679
|
def filter_ptr_checks(props):
"""This function will filter out extra pointer checks.
Our support to primitives and overflow pointer checks is unstable and
can result in lots of spurious failures. By default, we filter them out.
"""
def not_extra_check(prop):
return extract_property_class(prop) not in ["pointer_arithmetic", "pointer_primitives"]
return list(filter(not_extra_check, props))
|
e5964637c3f1a27521f5305673c9e5af3189e15d
| 3,641,680
|
import time
def makeKeylistObj(keylist_fname, includePrivate=False):
"""Return a new unsigned keylist object for the keys described in
'mirror_fname'.
"""
keys = []
def Key(obj): keys.append(obj)
preload = {'Key': Key}
r = readConfigFile(keylist_fname, (), (), preload)
klist = []
for k in keys:
k = thandy.keys.RSAKey.fromJSon(k)
if includePrivate and not k.isPrivateKey():
raise thandy.FormatException("Private key information not found.")
klist.append({'key': k.format(private=includePrivate), 'roles' : k.getRoles() })
result = { '_type' : "Keylist",
'ts' : formatTime(time.time()),
'keys' : klist }
KEYLIST_SCHEMA.checkMatch(result)
return result
|
13e79fbb9ac8ad207cc2533532c6be6bb0372beb
| 3,641,681
|
def getwpinfo(id,wps):
"""Help function to create description of WP inputs."""
try:
wpmin = max([w for w in wps if 'loose' in w.lower()],key=lambda x: len(x)) # get loose WP with most 'V's
wpmax = max([w for w in wps if 'tight' in w.lower()],key=lambda x: len(x)) # get tight WP with most 'V's
info = f"{id} working point: {wpmin}-{wpmax}"
except:
info = f"{id} working point: {', '.join(wps)}"
return info
|
0dcf6c205a1988227e23a77e169a9114f1fdf2cc
| 3,641,682
|
def build_word_dg(target_word, model, depth, model_vocab=None, boost_counter=None, topn=5):
""" Accept a target_word and builds a directed graph based on
the results returned by model.similar_by_word. Weights are initialized
to 1. Starts from the target_word and gets similarity results for it's children
and so forth, up to the specified depth.
Args
----
target_word (string): Root node.
model (gensim.models): Gensim word embedding model.
depth (int): Depth to restrict the search to.
topn (int): Number of words to check against in the embedding model, default=5.
"""
_DG = init_digraph()
seen_set = set()
do_hs_boosting = (
boost_counter and model_vocab and target_word in model_vocab)
if do_hs_boosting:
weight_boost = log10(float(model.vocab[target_word].count)) * boost_counter[
target_word] if target_word in boost_counter else 0
_DG.add_weighted_edges_from([(target_word, word[0], weight_boost + word[1])
for word in model.similar_by_word(target_word, topn=topn)])
else:
_DG.add_weighted_edges_from([(target_word, word[0], word[1])
for word in model.similar_by_word(target_word, topn=topn)])
seen_set.add(target_word)
for _idx in range(1, depth):
current_nodes = _DG.nodes()
for node in current_nodes:
if node not in seen_set:
_DG.add_weighted_edges_from(
[(node, word[0], word[1]) for word in model.similar_by_word(node, topn=topn)])
seen_set.add(node)
return _DG
|
ffd32cef2b44fd9e9cd554cd618091dfe8e5377f
| 3,641,683
|
import sys
def train(network, num_epochs, train_fn, train_batches, test_fn=None,
validation_batches=None, threads=None, early_stop=np.inf,
early_stop_acc=False, save_epoch_params=False, callbacks=None,
acc_func=onehot_acc, train_acc=False):
"""
Train a neural network by updating its parameters.
Parameters
----------
network : lasagne neural network handle
Network to be trained.
num_epochs: int
Maximum number of epochs to train
train_fn : theano function
Function that computes the loss and updates the network parameters.
Takes parameters from the batch iterators
train_batches : batch iterator
Iterator that yields mini batches from the training set. Must be able
to re-iterate multiple times.
test_fn : theano function
Function that computes loss and predictions of the network.
Takes parameters from the batch iterators.
validation_batches : batch iterator
Iterator that yields mini batches from the validation set. Must be able
to re-iterate multiple times.
threads : int
Number of threads to use to prepare mini batches. If None, use
a single thread.
early_stop : int
Number of iterations without loss improvement on validation set that
stops training.
early_stop_acc : boolean
Use validation accuracy instead of loss for early stopping.
save_epoch_params : str or False
Save neural network parameters after each epoch. If False, do not save.
If you want to save the parameters, provide a filename with an
int formatter so the epoch number can be inserted.
callbacks : list of callables
List of callables to call after each training epoch. Can be used to k
update learn rates or plot data. Functions have to accept the
following parameters: current epoch number, lists of per-epoch train
losses, train accuracies, validation losses, validation accuracies.
The last three lists may be empty, depending on other parameters.
acc_func : callable
Function to use to compute accuracies.
train_acc : boolean
Also compute accuracy for training set. In this case, the training
loss will be also re-computed after an epoch, which leads to lower
train losses than when not using this parameter.
Returns
-------
tuple of four lists
Train losses, trian accuracies, validation losses,
validation accuracies for each epoch
"""
if (test_fn is not None) != (validation_batches is not None):
raise ValueError('If test function is given, validation set is '
'necessary (and vice-versa)!')
best_val = np.inf if not early_stop_acc else 0.0
epochs_since_best_val_loss = 0
if callbacks is None:
callbacks = []
if callbacks is None:
callbacks = []
best_params = get_params(network)
train_losses = []
val_losses = []
val_accs = []
train_accs = []
if threads is not None:
def threaded(it):
return dmgr.iterators.threaded(it, threads)
else:
def threaded(it):
return it
for epoch in range(num_epochs):
timer = Timer()
timer.start('epoch')
timer.start('train')
try:
train_losses.append(
avg_batch_loss(threaded(train_batches), train_fn, timer))
except RuntimeError as e:
print(Colors.red('Error during training:'), file=sys.stderr)
print(Colors.red(str(e)), file=sys.stderr)
return best_params
timer.stop('train')
if save_epoch_params:
save_params(network, save_epoch_params.format(epoch))
if validation_batches:
val_loss, val_acc = avg_batch_loss_acc(
threaded(validation_batches), test_fn, acc_func)
val_losses.append(val_loss)
val_accs.append(val_acc)
if train_acc:
train_loss, tr_acc = avg_batch_loss_acc(
threaded(train_batches), test_fn, acc_func)
train_losses[-1] = train_loss
train_accs.append(tr_acc)
print('Ep. {}/{} {:.1f}s (tr: {:.1f}s th: {:.1f}s)'.format(
epoch + 1, num_epochs,
timer['epoch'], timer['train'], timer['theano']),
end='')
print(' tl: {:.6f}'.format(train_losses[-1]), end='')
if train_acc:
print(' tacc: {:.6f}'.format(tr_acc), end='')
if validation_batches:
# early stopping
cmp_val = val_losses[-1] if not early_stop_acc else -val_accs[-1]
if cmp_val < best_val:
epochs_since_best_val_loss = 0
best_val = cmp_val
best_params = get_params(network)
# green output
c = Colors.green
else:
epochs_since_best_val_loss += 1
# neutral output
c = lambda x: x
print(c(' vl: {:.6f}'.format(val_losses[-1])), end='')
print(c(' vacc: {:.6f}'.format(val_accs[-1])), end='')
if epochs_since_best_val_loss >= early_stop:
print(Colors.yellow('\nEARLY STOPPING!'))
break
else:
best_params = get_params(network)
print('')
for cb in callbacks:
cb(epoch, train_losses, val_losses, train_accs, val_accs)
# set the best parameters found
set_params(network, best_params)
return train_losses, val_losses, train_accs, val_accs
|
8b5860883b04b9856b8794813aae3493a0389588
| 3,641,684
|
def sample_normal_gamma(mu, lmbd, alpha, beta):
""" https://en.wikipedia.org/wiki/Normal-gamma_distribution
"""
tau = np.random.gamma(alpha, beta)
mu = np.random.normal(mu, 1.0 / np.sqrt(lmbd * tau))
return mu, tau
|
0f11ce95cfb772aeb023b61300bdb03d827cab37
| 3,641,685
|
from typing import Iterable
from typing import Callable
from typing import Type
from typing import Optional
from typing import List
from typing import Set
async def _common_discover_entities(
current_entity_platform: EntityPlatform,
config_entry: ConfigEntry,
source_objects: Iterable[TObject],
object_code_getter: Callable[[TObject], TIdentifier],
entity_cls: Type[TSensor],
final_config: Optional[ConfigType] = None,
existing_entities: Optional[List[TSensor]] = None,
sensor_type_name: Optional[str] = None,
entity_code_getter: Callable[[TSensor], TIdentifier] = None,
log_prefix: Optional[str] = None,
) -> DiscoveryReturnType:
"""
Common entity discovery helper.
:param current_entity_platform: Entity platform used
:param config_entry: Configuration entry
:param final_config: Final configuration data
:param source_objects: Objects to use when creating entities
:param object_code_getter: Getter for identifier for objects
:param entity_cls: Entity class (subclass of `MESEntity`)
:param existing_entities: (optional) Existing entities list
(default: retrieved at runtime)
:param sensor_type_name: (optional) Sensor type name for log prefixing
(default: derrived from configuration key)
:param entity_code_getter: (optional) Getter for identifier for entities
(default: `code` property of provided entity class)
:param log_prefix: (optional) Log prefix to prepend to internal loggin
(default: empty string)
:return: Tuple[new entities list, async tasks]
"""
hass = current_entity_platform.hass
config_key = entity_cls.config_key
if final_config is None:
final_config = hass.data.get(DATA_FINAL_CONFIG, {}).get(config_entry.entry_id)
if final_config is None:
raise ValueError('Final configuration not available for entry "%s"' % (config_entry.entry_id,))
if sensor_type_name is None:
sensor_type_name = config_key
if sensor_type_name.endswith('s'):
sensor_type_name = sensor_type_name[:-1]
if log_prefix is None:
log_prefix = _make_log_prefix(
config_entry,
current_entity_platform,
'discvr',
sensor_type_name
)
if entity_code_getter is None:
entity_code_getter = entity_cls.code
if current_entity_platform is None:
current_entity_platform = entity_platform.current_platform.get()
if existing_entities is None:
existing_entities = hass.data\
.get(DATA_ENTITIES, {})\
.get(config_entry.entry_id, {})\
.get(config_key, [])
entities = []
tasks = []
added_entities: Set[TSensor] = set(existing_entities or [])
entity_filter = final_config[CONF_ENTITIES][config_key]
name_formats = final_config[CONF_NAME_FORMAT][config_key]
scan_intervals = final_config[CONF_SCAN_INTERVAL][config_key]
for iter_object in source_objects:
identifier = object_code_getter(iter_object)
if not identifier:
_LOGGER.warning('No identifier on: %s: %s', iter_object, iter_object.data)
continue
log_sensor_type_name = sensor_type_name.ljust(7),
log_identifier = '*' + identifier[-5:]
granular_log_prefix = _make_log_prefix(
config_entry,
current_entity_platform,
'discvr',
log_sensor_type_name,
log_identifier
)
if not entity_filter[identifier]:
_LOGGER.info(granular_log_prefix + 'Skipping setup/update due to filter')
continue
obj_entity = None
for entity in added_entities:
if entity_code_getter(entity) == identifier:
obj_entity = entity
break
entity_log_prefix = _make_log_prefix(
config_entry,
current_entity_platform,
'entity',
log_sensor_type_name,
log_identifier
)
if obj_entity is None:
_LOGGER.debug(granular_log_prefix + 'Setting up entity')
entities.append(
entity_cls.async_discover_create(
iter_object,
name_formats[identifier],
scan_intervals[identifier],
entity_log_prefix
)
)
else:
added_entities.remove(obj_entity)
if obj_entity.enabled:
_LOGGER.debug(granular_log_prefix + 'Updating entity')
update_task = obj_entity.async_discover_update(
iter_object,
name_formats[identifier],
scan_intervals[identifier],
entity_log_prefix
)
if update_task is not None:
tasks.append(update_task)
if entities:
register_update_services(entity_cls, current_entity_platform, log_prefix)
if added_entities:
_LOGGER.info(log_prefix + f'Removing {len(added_entities)} {sensor_type_name} entities')
tasks.extend(get_remove_tasks(hass, added_entities))
return entities, tasks
|
6ead5fa56712bf4186969f47a40c70cfea51b5da
| 3,641,686
|
def _dice(terms):
"""
Returns the elements of iterable *terms* in tuples of every possible length
and range, without changing the order. This is useful when parsing a list of
undelimited terms, which may span multiple tokens. For example:
>>> _dice(["a", "b", "c"])
[('a', 'b', 'c'), ('a', 'b'), ('b', 'c'), ('a',), ('b',), ('c',)]
"""
# remove all of the terms that look like delimiters
terms = filter(lambda x: _is_delimiter(x) == False, terms)
y = []
for n in range(len(terms), 0, -1):
for m in range(0, len(terms)-(n-1)):
y.append(tuple(terms[m:m+n]))
return y
|
bb8f567d82405864c0bf81b2ee9f3cb89b875d11
| 3,641,687
|
from datetime import datetime
def parse_date(val, format):
"""
Attempts to parse the given string date according to the
provided format, raising InvalidDateError in case of problems.
@param str val (e.g. 2014-08-12)
@param str format (e.g. %Y-%m-%d)
@return datetime.date
"""
try:
return datetime.strptime(val, format).date()
except ValueError:
raise InvalidDateError("unable to parse %s" % val)
|
4686bf46d12310ee7ac4aa1986df55b598909a06
| 3,641,688
|
def get_capture_dimensions(capture):
"""Get the dimensions of a capture"""
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
return width, height
|
9a13253c1ca5c44b7a1ef4440989b1af9abcb776
| 3,641,689
|
import os
def run_system_optimization(des_vars, subsystems, scalers, loop_number):
"""Method to run the top-level system optimization based on the disciplinary surrogate models.
:param des_vars: definition of design variables
:type des_vars: dict
:param subsystems: definition of the disciplinary surrogate models
:type subsystems: dict
:param scalers: scalers of all the system values
:type scalers: dict
:param loop_number: number of the BLISS iteration
:type loop_number: int
:return: tuple with Problem object and driver status
:rtype: tuple
"""
# Set up problem and model
prob = Problem()
prob.model = model = SsbjBLISS2000(
des_vars=des_vars,
subsystems=subsystems,
scalers=scalers,
loop_number=loop_number,
)
# Set driver
prob.driver = pyOptSparseDriver()
prob.driver.options["optimizer"] = "SLSQP"
prob.driver.opt_settings["MAXIT"] = 50
prob.driver.opt_settings["ACC"] = 1e-6
# Add design variables
for des_var, details in des_vars.items():
prob.model.add_design_var(
des_var, lower=details["lower"], upper=details["upper"]
)
# Add objective
model.add_objective("performance.R", scaler=-1.0)
# Add constraints
model.add_constraint("consistency_constraints.gc_D", equals=0.0)
model.add_constraint("consistency_constraints.gc_WE", equals=0.0)
model.add_constraint("consistency_constraints.gc_WT", equals=0.0)
model.add_constraint("consistency_constraints.gc_L", equals=0.0)
model.add_constraint("consistency_constraints.gc_Theta", equals=0.0)
model.add_constraint("consistency_constraints.gc_ESF", equals=0.0)
model.add_constraint("consistency_constraints.gc_WT_L", equals=0.0)
model.add_constraint("constraints.con_dpdx", upper=0.0)
# Add recorder
recorder = SqliteRecorder(
os.path.join(
cr_files_folder,
"ssbj_cr_{}_system_loop{:02d}.sql".format(cr_files_keyword, loop_number),
)
)
prob.driver.add_recorder(recorder)
prob.driver.recording_options["includes"] = []
prob.driver.recording_options["record_objectives"] = True
prob.driver.recording_options["record_constraints"] = True
prob.driver.recording_options["record_desvars"] = True
# prob.driver.recording_options['record_metadata'] = True
# Set up
prob.setup(mode="rev")
# View model
n2(
prob,
outfile=os.path.join(cr_files_folder, "bliss2000_sys_ssbj.html"),
show_browser=False,
)
# Run problem (either once (run_model) or full optimization (run_driver))
prob.run_driver()
# Report result in the log
print("- - - - - - - - - - - - - - - - - - - - - - - - - -")
print("\nOutcome of system optimization (BLISS loop: {})".format(loop_number))
print("\n\nDesign variables")
print("z_sh_low= ", des_vars["z_sh"]["lower"])
print("z_sh_val= ", prob["z_sh"])
print("z_sh_upp= ", des_vars["z_sh"]["upper"])
print("")
print("z_c_low= ", des_vars["z_c"]["lower"])
print("z_c_val= ", prob["z_c"])
print("z_c_upp= ", des_vars["z_c"]["upper"])
print("")
print("z_w_low= ", des_vars["z_w"]["lower"])
print("z_w_val= ", prob["z_w"])
print("z_w_upp= ", des_vars["z_w"]["upper"])
print("")
print("\nObjectives")
print("R_opt=", prob["performance.R"] * scalers["R"])
print("\nConstraints")
print("gc_D=", prob["consistency_constraints.gc_D"])
print("gc_WE=", prob["consistency_constraints.gc_WE"])
print("gc_WT=", prob["consistency_constraints.gc_WT"])
print("gc_L=", prob["consistency_constraints.gc_L"])
print("gc_Theta=", prob["consistency_constraints.gc_Theta"])
print("gc_ESF=", prob["consistency_constraints.gc_ESF"])
print("gc_WT_L=", prob["consistency_constraints.gc_WT_L"])
print("c_dpdx=", prob["constraints.con_dpdx"])
print("- - - - - - - - - - - - - - - - - - - - - - - - - -")
return prob, prob.driver.fail
|
faa46f94f6c866c5f1ec20827ce78ef7e13dfe95
| 3,641,690
|
def analyze_lines(msarc, trcdict, slit, pixcen, order=2, function='legendre', maskval=-999999.9):
"""
.. todo::
This needs a docstring!
"""
# Analyze each spectral line
aduse = trcdict["aduse"]
arcdet = trcdict["arcdet"]
xtfits = trcdict["xtfit"]
ytfits = trcdict["ytfit"]
wmasks = trcdict["wmask"]
badlines = trcdict["badlines"]
xtilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
ytilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
mtilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
wtilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
# For displaying later
xmodel = []
ymodel = []
for j in range(arcdet.size):
if not aduse[j]:
continue
xtfit = xtfits[j]
ytfit = ytfits[j]
wmask = wmasks[j]
xint = int(xtfit[0])
sz = (xtfit.size-1)//2
# Trim if we are off the detector
lastx = min(xint + 2 * sz + 1, msarc.shape[1])
if (lastx-xint) < xtfit.size: # Cut down
dx = (lastx-xint)-xtfit.size
xtfit = xtfit[:dx]
ytfit = ytfit[:dx]
wmask = wmask[np.where(wmask < (xtfit.size+dx))]
# Perform a scanning polynomial fit to the tilts
wmfit = np.where(ytfit != maskval)
if wmfit[0].size > order + 1:
cmfit = utils.func_fit(xtfit[wmfit], ytfit[wmfit], function, order, minx=0.0,
maxx=msarc.shape[1] - 1.0)
model = utils.func_val(cmfit, xtfit, function, minx=0.0, maxx=msarc.shape[1] - 1.0)
else:
aduse[j] = False
badlines += 1
continue
# Can this actually happen??
if maskval in model:
# Model contains masked values
aduse[j] = False
badlines += 1
continue
# Perform a robust polynomial fit to the traces
wmsk, mcoeff = utils.robust_polyfit(xtfit[wmask], ytfit[wmask], order, function=function,
sigma=2.0, minx=0.0, maxx=msarc.shape[1] - 1.0)
# Save model
model = utils.func_val(mcoeff, xtfit, function, minx=0.0, maxx=msarc.shape[1] - 1.0)
xmodel.append(xtfit)
ymodel.append(model)
# Save
xtilt[xint:lastx, j] = xtfit / (msarc.shape[1] - 1.0)
# These should be un-normalized for now
pcen = pixcen[arcdet[j], slit]
ytilt[xint:lastx, j] = model[pcen-int(xtfit[wmask[0]])]
mtilt[xint:lastx, j] = model
# Save
trcdict['xmodel'] = xmodel
trcdict['ymodel'] = ymodel
trcdict["aduse"] = aduse
# Return
all_tilts = (xtilt, ytilt, mtilt, wtilt)
return badlines, all_tilts
|
6e14c21545736d37ed6b231e5fc0e62293317b38
| 3,641,691
|
def ad_modify_user_pwd_by_mail(user_mail_addr, old_password, new_password):
"""
通过mail修改某个用户的密码
:param user_mail_addr:
:return:
"""
conn = __ad_connect()
user_dn = ad_get_user_dn_by_mail(user_mail_addr)
result = conn.extend.microsoft.modify_password(user="%s" % user_dn, new_password="%s" % new_password,
old_password="%s" % old_password)
conn.unbind()
return result
|
7cc5c654517ad3f175e06500310f0bbfec516ad1
| 3,641,692
|
def markup_record(record_text, record_nr, modifiers, targets, output_dict):
""" Takes current Patient record, applies context algorithm,
and appends result to output_dict
"""
# Is used to collect multiple sentence markups. So records can be complete
context = pyConText.ConTextDocument()
# Split record into sentences making use of TextBlob
blob = TextBlob(record_text.lower())
# print(blob)
count = 0
markup_result = []
# Add markup per sentence
for sentence in blob.sentences:
m = markup_sentence(sentence.raw, modifiers=modifiers, targets=targets)
markup_result.append(m)
count = count + 1
print("\nFor record number:", record_nr)
print("Number of sentences that have been marked up:", count)
# print("\nMarkup result:")
# print(markup_result)
# Add sentence markup to contextDocument
for sentence_markup in markup_result:
context.addMarkup(sentence_markup)
# Append context object and xml to output dictionary,
# with as key the record number
context_xml = context.getXML()
output_dict.update({record_nr: {"object": context, "xml": context_xml}})
return(output_dict)
|
2eee4560a411bcd7ef364b6ed9b37cc2870cd3b5
| 3,641,693
|
import inspect
def get_file_name(file_name):
"""
Returns a Testsuite name
"""
testsuite_stack = next(iter(list(filter(lambda x: file_name in x.filename.lower(), inspect.stack()))), None)
if testsuite_stack:
if '/' in testsuite_stack.filename:
split_character = '/'
else:
split_character = '\\'
return testsuite_stack.filename.split(split_character)[-1].split(".")[0]
else:
return ""
|
97172600d785339501f5e58e8aca6581a0a690e0
| 3,641,694
|
import torch
def track_edge_matrix_by_spt(batch_track_bbox, batch_track_frames, history_window_size=50):
"""
:param batch_track_bbox: B, M, T, 4 (x, y, w, h)
:return:
"""
B, M, T, _ = batch_track_bbox.size()
batch_track_xy = batch_track_bbox[:, :, :, :2]
batch_track_wh = batch_track_bbox[:, :, :, 2:]
batch_track_t = batch_track_frames[:, :, :, None]
batch_track_diff_t = 1 - torch.abs(batch_track_t[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_t[:, :, None, :, :].expand(-1, -1, T, -1, -1)) / history_window_size
batch_track_diff_xy = 1 - torch.abs(batch_track_xy[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_xy[:, :, None, :, :].expand(-1, -1, T, -1, -1))
batch_track_diff_wh = 1 - torch.abs(batch_track_wh[:, :, :, None, :].expand(-1, -1, -1, T, -1) - (batch_track_wh[:, :, None, :, :].expand(-1, -1, T, -1, -1)))
# B, M, T, T, 5
track_edge_matrix = torch.cat([batch_track_diff_t, batch_track_diff_xy, batch_track_diff_wh], dim=-1)
return track_edge_matrix
|
5303f401d925c26a1c18546ba371a2119a41ec3d
| 3,641,695
|
def _file(space, fname, flags=0, w_ctx=None):
""" file - Reads entire file into an array
'FILE_USE_INCLUDE_PATH': 1,
'FILE_IGNORE_NEW_LINES': 2,
'FILE_SKIP_EMPTY_LINES': 4,
'FILE_NO_DEFAULT_CONTEXT': 16,
"""
if not is_in_basedir(space, 'file', fname):
space.ec.warn("file(%s): failed to open stream: %s " %
(fname, 'Operation not permitted'))
return space.w_False
if flags > 23 or flags < 0:
space.ec.warn("file(): '%d' flag is not supported" % flags)
return space.w_False
if fname == "":
space.ec.warn("file(): Filename cannot be empty")
return space.w_False
ignore_new_lines = flags & 2 != 0
skip_empty_lines = flags & 4 != 0
try:
_fname = rpath.normpath(fname)
arr_list = []
fstream = open(_fname)
line = fstream.readline()
while line != '':
if ignore_new_lines:
line = line.rstrip('\n')
if skip_empty_lines and line == "":
line = fstream.readline()
continue
arr_list.append(space.newstr(line))
line = fstream.readline()
return space.new_array_from_list(arr_list)
except OSError:
space.ec.warn("file(%s): failed to open stream: "
"No such file or directory" % fname)
return space.w_False
except IOError:
space.ec.warn("file(%s): failed to open stream: "
"No such file or directory" % fname)
return space.w_False
|
d8a04244c90f3f730c297a8dbaa1372acd61993b
| 3,641,696
|
def prepare_features(tx_nan, degree, mean_nan=None, mean=None, std=None):
"""Clean and prepare for learning. Mean imputing, missing value indicator, standardize."""
# Get column means, if necessary
if mean_nan is None: mean_nan = np.nanmean(tx_nan,axis=0)
# Replace NaNs
tx_val = np.where(np.isnan(tx_nan), mean_nan, tx_nan)
# Polynomial features
tx = build_poly(tx_val, degree)
const_col = tx.shape[1]-1
# Add NaN indicator columns
nan_cols = np.flatnonzero(np.any(np.isnan(tx_nan), axis=0))
ind_cols = np.empty((tx_nan.shape[0], nan_cols.shape[0]))
ind_cols = np.where(np.isnan(tx_nan[:,nan_cols]), 1, 0)
tx = np.c_[tx, ind_cols]
# Standardize
tx, mean, std = standardize_numpy(tx, mean, std)
tx[:,const_col] = 1.0
return tx, mean, std, mean_nan, nan_cols
|
2f9fd73cd04b40a85556573a62a083a0ffaa725c
| 3,641,697
|
def _write_matt2(model, name, mids, nmaterials, op2, op2_ascii, endian):
"""writes the MATT2"""
#Record - MATT2(803,8,102)
#Word Name Type Description
#1 MID I Material identification number
#2 TID(15) I TABLEMi entry identification numbers
#17 UNDEF None
key = (803, 8, 102)
nfields = 17
spack = Struct(endian + b'17i')
nbytes = write_header(name, nfields, nmaterials, key, op2, op2_ascii)
for mid in sorted(mids):
mat = model.MATT2[mid]
data = [
mat.mid,
mat.g11_table,
mat.g12_table,
mat.g13_table,
mat.g22_table,
mat.g23_table,
mat.g33_table,
mat.rho_table,
mat.a1_table,
mat.a2_table,
mat.a3_table,
0,
mat.ge_table,
mat.st_table,
mat.sc_table,
mat.ss_table,
0,
]
assert None not in data, data
#print('MATT2', data, len(data))
assert len(data) == nfields, len(data)
op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:]))
op2.write(spack.pack(*data))
return nbytes
|
607b94a6c1e3daf4b482acbb1df1ce967f1bce3b
| 3,641,698
|
def all_subclasses(cls):
"""Returns all known (imported) subclasses of a class."""
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in all_subclasses(s)]
|
8b9a2ecd654b997b5001820d6b85e442af9cee3b
| 3,641,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.