content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def wait_for_task(task, actionName='job', hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result | c750238117579236b159bf2389e947e08c8af979 | 28,700 |
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence | bc42c4af0822fcaa419047644e9d4b9d064a42fd | 28,701 |
def relative_difference(x: np.array, y: np.array) -> np.array:
""" Returns the relative difference estimator for two Lagrange multipliers.
"""
maximum = np.max([x, y])
minimum = np.min([x, y])
difference = maximum-minimum
return np.abs(difference) / np.max(np.abs([x, y, difference, 1.])) | 6b169a3deb3f6ed91958744521aa8028451fb3d8 | 28,702 |
def ConvertPngToYuvBarcodes(input_directory='.', output_directory='.'):
"""Converts PNG barcodes to YUV barcode images.
This function reads all the PNG files from the input directory which are in
the format frame_xxxx.png, where xxxx is the number of the frame, starting
from 0000. The frames should be consecutive numbers. The output YUV file is
named frame_xxxx.yuv. The function uses ffmpeg to do the conversion.
Args:
input_directory(string): The input direcotry to read the PNG barcodes from.
output_directory(string): The putput directory to write the YUV files to.
Return:
(bool): True if the conversion was without errors.
"""
return helper_functions.PerformActionOnAllFiles(
input_directory, 'barcode_', 'png', 0, _ConvertToYuvAndDelete,
output_directory=output_directory, pattern='barcode_') | 43cc0dd4126b0699212064e445608c82123ad7b9 | 28,703 |
from pathlib import Path
def _ignore_on_copy(directory, contents): # pylint: disable=unused-argument
"""Provides list of items to be ignored.
Args:
directory (Path): The path to the current directory.
contents (list): A list of files in the current directory.
Returns:
list: A list of files to be ignored.
"""
# shutil passes strings, so ensure a Path
directory = Path(directory)
if directory.name == "material":
return ["mkdocs_theme.yml", "main.html", "404.html"]
if directory.name == "partials":
return ["integrations"]
if directory.name == "images":
return ["favicon.png"]
return [] | 3a551f6a252406b88fb19c0dc8180631cd5996ce | 28,704 |
def registDeptUser(request):
"""
홈택스 현금영수증 부서사용자 계정을 등록합니다.
- https://docs.popbill.com/htcashbill/python/api#RegistDeptUser
"""
try:
# 팝빌회원 사업자번호
CorpNum = settings.testCorpNum
# 홈택스 부서사용자 계정아이디
DeptUserID = "deptuserid"
# 홈택스 부서사용자 계정비밀번호
DeptUserPWD = "deptuserpwd"
response = htCashbillService.registDeptUser(CorpNum, DeptUserID, DeptUserPWD)
return render(request, 'response.html', {'code': response.code, 'message': response.message})
except PopbillException as PE:
return render(request, 'exception.html', {'code': PE.code, 'message': PE.message}) | e5f923ac4290fd029eafdf6e408d7847be9d0c6b | 28,705 |
import os
def generate_repository_path(object_id):
"""
Generate the path of a cilantro (sub)object in the repository.
This is based on the last 4/2 digits of the object_id, which should
be a zenon or atom ID.
E.g. object_id "JOURNAL-ZID1234567" is stored under
"4500/4567/JOURNAL-ZID1234567".
:param str object_id: The object_id of the cilantro object.
:return str: The path where the object is stored in the repository
"""
if len(object_id) < 4:
raise InvalidObjectIdError(f"object_id '{object_id}' "
f"has to have at least 4 characters")
folder = object_id[-4:]
if not folder.isdigit():
raise InvalidObjectIdError(f"The last 4 characters of object_id "
f"'{object_id}' have to be numeric")
path = os.path.join(folder[0:2] + "00", folder, object_id)
return path | 05f7eabf865128dc01f50a9aded186f8f613f11d | 28,706 |
import os
def write_sysctl(entry, value):
"""Write value to a sysctl entry. Return this value if successful"""
path = SYSCTL_ENDPOINT + '/' + '/'.join(entry.split('.'))
if not os.path.exists(path):
logger.debug("{} does not exist".format(entry))
return
try:
with open(path, 'w') as f:
f.write(value)
logger.debug("entry {} set to {}".format(entry, value))
return value
except IOError:
logger.warning("unable to set {} to {}".format(entry, value))
return | 878a0a1505e43d1ff2ad667b16500c5a57709aa8 | 28,707 |
import httpx
import asyncio
async def get_rank(mode: str, num: int) -> list[dict]:
"""请求pixiv榜单的函数
Args:
mode (str): 日榜daily/周榜weekly/月榜monthly
num (int): 数量(1-50)
Returns:
list[dict]: 一个包含图片标题与id,排名,画师名称与id,原图链接以及tags的列表,请求失败的返回请求失败字符串
"""
url = "https://www.pixiv.net/ranking.php"
param = {"mode": mode, "p": 1, "format": "json"}
async with httpx.AsyncClient(proxies=config.proxies, params=param, timeout=25) as s:
logger.debug(f"连接url:{url}")
res = await s.get(url)
logger.debug(f"status_code:{str(res.status_code)}")
if res.status_code != 200:
raise Pixiv_api_Connect_Error
js = res.json()
logger.debug(f"json-data:{str(js)}")
im_data = []
for index, x in enumerate(js["contents"]):
if index < num:
urls = (
x["url"]
.replace("/c/240x480/img-master", "/img-original")
.replace("_master1200", "")
)
if config.proxy_pixiv:
urls = urls.replace("i.pximg.net", "i.pixiv.cat")
im_data.append(
{
"rank": x["rank"],
"date": x["date"],
"title": x["title"],
"pid": x["illust_id"],
"user_name": x["user_name"],
"user_id": x["user_id"],
"tags": ",".join(x["tags"]),
"url": urls,
"seq": "",
}
)
else:
break
coros = [utils.dl_image(x["url"]) for x in im_data]
res = await asyncio.gather(*coros, return_exceptions=True)
for result, item in zip(res, im_data):
if isinstance(result, Exception):
item["seq"] = "请求失败"
else:
item["seq"] = str(MessageSegment.image(r"file:///" + result))
return im_data | e32e2c680ff2c2066de5ff207c9ea53e2565e974 | 28,708 |
import torch
def generate_fake_data_loader():
"""" Generate fake-DataLoader with four batches, i.e. a list with sub-lists of samples and labels.
It has four batches with three samples each. """
samples1 = torch.tensor([[2., 2., 2., 2.], [2., 2., 0., 0.], [0., 0., 2., 2.]])
samples2 = torch.tensor([[1., 2., 3., 4.], [1., 1., 2., 2.], [2., 2., 2., 2.]])
labels1 = torch.tensor([0, 0, 1])
labels2 = torch.tensor([1, 1, 0])
return [[samples1, labels1], [samples1, labels2], [samples2, labels1], [samples2, labels2]] | 4d86ab464653f5766a44f03e41fd2c26714cabf1 | 28,709 |
def get_graph(node, seq):
"""Get the relaxed pose graph from the map server"""
request = GraphRelaxation.Request()
request.seq = int(seq)
request.update_graph = False # TODO not used?
request.project = True # TODO always True?
return ros_service_request(node, "relaxed_graph", GraphRelaxation, request) | d618e9f557f83f875fe30343fc53fe31ded634b6 | 28,710 |
def get_RV_K(
P_days,
mp_Mearth,
Ms_Msun,
ecc=0.0,
inc_deg=90.0,
nsamples=10000,
percs=[50, 16, 84],
return_samples=False,
plot=False,
):
"""Compute the RV semiamplitude in m/s via Monte Carlo
P_days : tuple
median and 1-sigma error
mp_Mearth : tuple
median and 1-sigma error
Ms_Msun : tuple
median and 1-sigma error
"""
if (
isinstance(P_days, tuple),
isinstance(Ms_Msun, tuple),
isinstance(mp_Mearth, tuple),
):
# generate samples
P_days = np.random.rand(nsamples) * P_days[1] + P_days[0]
mp_Mearth = np.random.rand(nsamples) * mp_Mearth[1] + mp_Mearth[0]
Ms_Msun = np.random.rand(nsamples) * Ms_Msun[1] + Ms_Msun[0]
P = P_days * u.day.to(u.second) * u.second
Ms = Ms_Msun * u.Msun.to(u.kg) * u.kg
mp = mp_Mearth * u.Mearth.to(u.kg) * u.kg
inc = np.deg2rad(inc_deg)
K_samples = (
(2 * np.pi * c.G / (P * Ms * Ms)) ** (1.0 / 3)
* mp
* np.sin(inc)
/ unumpy.sqrt(1 - ecc ** 2)
).value
K, K_lo, K_hi = np.percentile(K_samples, percs)
K, K_siglo, K_sighi = K, K - K_lo, K_hi - K
if plot:
_ = hist(K_samples, bins="scott")
if return_samples:
return (K, K_siglo, K_sighi, K_samples)
else:
return (K, K_siglo, K_sighi) | 11cdb7bfeef27d5a05638d74232e105a22fa0222 | 28,711 |
def arc(color, start_angle, stop_angle, width, height,
x=None, y=None, thickness=1, anchor='center', **kwargs):
"""
Function to make an arc.
:param color: color to draw arc
:type color: str or List[str]
:param start_angle: angle to start drawing arc at
:type start_angle: int
:param stop_angle: angle to stop drawing arc at
:type stop_angle: int
:param thickness: thickness of arc in pixels
:type thickness: int
:param args: left top corner of arc and width and height of arc
:type args: two Tuples (left, top), (width, height) or four ints left, top, width, height
:return: Arc object created
"""
return Arc((x, y), width, height, start_angle, stop_angle, anchor, color, thickness, **kwargs) | 42c0a53632315ff03b92c53cbc172a0cfd08f5a7 | 28,712 |
def calculate_shapley_value(g, prob_vals, maxIter=20000):
"""
This algorithm is based on page 29 of the following paper:
https://arxiv.org/ftp/arxiv/papers/1402/1402.0567.pdf
:param g: the graph
:param prob_vals: a list. it contains the weight of each node in the graph
:param maxIter: maximum number of iterations. for 6-12 nodes, the value should be near 2000. for 1000 nodes, this value is
around 200000
:return:
"""
## first block
n_nodes = len(g)
node_list = list(range(0, n_nodes))
shapley_val_list = [0] * n_nodes
##second block
for i in range(0, maxIter):
shuffle(node_list)
P = []
for node in node_list:
## forming the subgraph based on the nodes in P
subgraph2 = nx.Graph()
if P:
subgraph2_nodes = P
subgraph2.add_nodes_from(subgraph2_nodes)
if len(subgraph2_nodes) > 1:
for x in range(0, len(subgraph2_nodes)):
for y in range(x + 1, len(subgraph2_nodes)):
if g.has_edge(subgraph2_nodes[x], subgraph2_nodes[y]):
subgraph2.add_edge(subgraph2_nodes[x], subgraph2_nodes[y])
map_val2 = tshp.get_map_value(subgraph2, prob_vals)
else:
map_val2 = 0
## adding extra node to get map value 1
subgraph2.add_node(node)
if len(subgraph2) > 1:
nbrs = set(g.neighbors(node))
for nbr in nbrs - set([node]):
if subgraph2.has_node(nbr):
subgraph2.add_edge(node, nbr)
map_val1 = tshp.get_map_value(subgraph2, prob_vals)
shapley_val_list[node] += (map_val1 - map_val2)
P.append(node)
## third block
for i in range(0, n_nodes):
shapley_val_list[i] = shapley_val_list[i]/float(maxIter)
## fourth block
return shapley_val_list | 41329a17f0914597bcf457ea04e9dc0a7053ae62 | 28,713 |
from typing import List
from typing import Dict
from typing import Any
async def complete_multipart_upload(bucket: str, s3_key: str, parts: List, upload_id: str) -> Dict[str, Any]:
"""Complete multipart upload to s3.
Args:
bucket (str): s3 bucket
s3_key (str): s3 prefix
parts (List): all parts info
upload_id (str): multipart upload Id
Returns:
Dict[str, Any]: response of operation
"""
response = await S3['client']['obj'].complete_multipart_upload(
Bucket=bucket,
Key=s3_key,
UploadId=upload_id,
MultipartUpload={'Parts': parts})
return response | 01441cbc196f594bead4dd9a9b17fe1a3c8bfa4d | 28,714 |
def build_mask(module='A', pixscale=0.03):
"""Create coronagraphic mask image
Return a truncated image of the full coronagraphic mask layout
for a given module.
+V3 is up, and +V2 is to the left.
"""
if module=='A':
names = ['MASK210R', 'MASK335R', 'MASK430R', 'MASKSWB', 'MASKLWB']
elif module=='B':
names = ['MASKSWB', 'MASKLWB', 'MASK430R', 'MASK335R', 'MASK210R']
allims = [coron_trans(name,module,pixscale) for name in names]
return np.concatenate(allims, axis=1) | 97e068fe8eef6e8fdd65b1e426428001cf549332 | 28,715 |
async def update_login_me(
*,
password: str = Body(...),
new_email: tp.Optional[EmailStr] = Body(None, alias='newEmail'),
new_password: tp.Optional[str] = Body(None, alias='newPassword'),
current_user: models.User = Depends(common.get_current_user),
uow: IUnitOfWork = Depends(common.get_uow),
) -> models.User:
"""Updates the user's login credentials."""
user = uow.user.authenticate(current_user.email, password)
if not user:
raise HTTPException(
status_code=400,
detail="Incorrect password"
)
elif not user.is_active:
raise HTTPException(
status_code=400,
detail="Inactive user",
)
user_in = schema.UserUpdate()
if new_email:
user_in.email = new_email
if new_password:
user_in.password = new_password
with uow:
new_user = uow.user.update(obj=user, obj_in=user_in)
return new_user | ec3f56ee474d19a4fd89c51940f3a198322672a1 | 28,716 |
def comp_sharpness(is_stationary, signal, fs, method='din', skip=0):
""" Acoustic sharpness calculation according to different methods:
Aures, Von Bismarck, DIN 45692, Fastl
Parameters:
----------
is_stationary: boolean
True if the signal is stationary, false if it is time varying
signal: numpy.array
time history values
fs: integer
sampling frequency
method: string
'din' by default,'aures', 'bismarck','fastl'
skip : float
number of second to be cut at the beginning of the analysis
Outputs
------
S : float
sharpness value
"""
if method!= 'din' and method!='aures' and method !='fastl' and method != 'bismarck' :
raise ValueError("ERROR: method must be 'din', 'aures', 'bismarck', 'fastl'")
loudness = comp_loudness(is_stationary, signal, fs)
if method == 'din':
S = comp_sharpness_din(loudness['values'], loudness['specific values'], is_stationary )
elif method == 'aures':
S = comp_sharpness_aures(loudness['values'], loudness['specific values'], is_stationary )
elif method == 'bismarck':
S = comp_sharpness_bismarck(loudness['values'], loudness['specific values'], is_stationary )
elif method == 'fastl':
S = comp_sharpness_fastl(loudness['values'], loudness['specific values'], is_stationary )
if is_stationary == False:
# Cut transient effect
time = np.linspace(0, len(signal/fs, len(S)))
cut_index = np.argmin(np.abs(time - skip))
S = S[cut_index:]
output = {
"name" : "sharpness",
"method" : method,
"values" : S,
"skip" : skip
}
return output | a8ae39740c90e824081e3979d5ff2b5c96a8ad75 | 28,717 |
def load_hobbies(path='data', extract=True):
"""
Downloads the 'hobbies' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'hobbies'
data = _load_file_data(name, path, extract)
return data | e60e024d0fe1766c599a3b693f51522cb7d7303a | 28,718 |
def is_viable(individual):
"""
evaluate.evaluate() will set an individual's fitness to NaN and the
attributes `is_viable` to False, and will assign any exception triggered
during the individuals evaluation to `exception`. This just checks the
individual's `is_viable`; if it doesn't have one, this assumes it is viable.
:param individual: to be checked if viable
:return: True if individual is viable
"""
if hasattr(individual, 'is_viable'):
return individual.is_viable
else:
return True | c1e5c839f362e99800dcd1a996be9345cabb4261 | 28,719 |
def combine_counts(hits1,
hits2,
multipliers=None,
total_reads=0,
unmatched_1="Unknown",
unmatched_2="Unknown",
):
""" compile counts into nested dicts """
total_counted = 0
counts = {}
# keep track of all unique hit ids from set 2
types2 = set()
if multipliers is None:
def _get_increment(read):
return 1
def _update_hits(increment, counts, hit1, hit2):
""" Just add 1 to get raw numbers """
h1counts = counts.setdefault(hit1, {})
h1counts[hit2] = h1counts.get(hit2, 0) + increment
else:
# if a mult table was given, use it to get total count
if total_reads == 0:
total_reads = len(multipliers)
def _get_increment(read):
""" get multiplier. Use pop to see leftovers """
return multipliers.pop(read, 1)
def _update_hits(increment, counts, hit1, hit2):
""" count both raw numbers and multiplied """
h1counts = counts.setdefault(hit1, {})
count_tuple = h1counts.get(hit2, (0, 0))
count_tuple = (count_tuple[0] + 1,
count_tuple[1] + increment)
h1counts[hit2] = count_tuple
# Start by using reads from hits1 as index
for (read, hit_list1) in hits1.items():
# remove hits from hits2 as we go, so we know what didn't match hits1
# default to umatched_2
total_counted += 1
increment = _get_increment(read)
hit_list2 = hits2.pop(read, [unmatched_2, ])
for hit2 in hit_list2:
for hit1 in hit_list1:
_update_hits(increment, counts, hit1, hit2)
types2.add(hit2)
# remaining reads in hits2 counted as Unknown
# we know these don't exist in hits1
hit1 = unmatched_1
for read, hit_list2 in hits2.items():
total_counted += 1
increment = _get_increment(read)
for hit2 in hit_list2:
_update_hits(increment, counts, hit1, hit2)
types2.add(hit2)
# if a total was given
if total_reads > 0:
unknown_counts = counts.setdefault(unmatched_1, {})
if multipliers is None:
unknown_counts[unmatched_2] = total_reads - total_counted
else:
unknown_counts[unmatched_2] = (total_reads - total_counted,
sum(multipliers.values()))
return (counts, types2) | 505e91f6538267e40f438926df201cf25cb1a3f9 | 28,720 |
def root():
"""Serves the website home page"""
return render_template("index.html") | 676c966da523108bd9802c2247cf320993815124 | 28,721 |
import string
import random
def getCookie():
"""
This function will return a randomly generated cookie
:return: A cookie
"""
lettersAndDigits = string.ascii_lowercase + string.digits
cookie = 'JSESSIONID='
cookie += ''.join(random.choice(lettersAndDigits) for ch in range(31))
return cookie | 6fff76d37921174030fdaf9d4cb8a39222c8906c | 28,722 |
def get_authenticated_igramscraper(username: str, password: str):
"""Gets an authenticated igramscraper Instagram client instance."""
client = Instagram()
client.with_credentials(username, password)
#client.login(two_step_verificator=True)
client.login(two_step_verificator=False)
return client | c8f7cf4500aa82f11cf1b27a161d75a7261ee84a | 28,723 |
def read_in_nn_path(path):
"""
Read in NN from a specified path
"""
tmp = np.load(path)
w_array_0 = tmp["w_array_0"]
w_array_1 = tmp["w_array_1"]
w_array_2 = tmp["w_array_2"]
b_array_0 = tmp["b_array_0"]
b_array_1 = tmp["b_array_1"]
b_array_2 = tmp["b_array_2"]
x_min = tmp["x_min"]
x_max = tmp["x_max"]
wavelength_payne = tmp["wavelength_payne"]
NN_coeffs = (w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max)
tmp.close()
return NN_coeffs, wavelength_payne | 3f2366ab9fd4b4625c8b7d00b1191429678b466b | 28,724 |
def crop_zeros(array, remain=0, return_bound=False):
"""
Crop the edge zero of the input array.
Parameters
----------
array : numpy.ndarray
2D numpy array.
remain : int
The number of edges of all zeros which you want to remain.
return_bound : str or bool
Select the mode to manipulate the drawing.
True: return array and bound.
'only_bound': return bound.
Others: return array.
Returns
-------
out : np.ndarray, optional
Cropped array.
left_bound : int, optional
The edge of the left cropping.
right_bound : int, optional
The edge of the right cropping.
upper_bound : int, optional
The edge of the upper cropping.
lower_bound : int, optional
The edge of the lower cropping.
References
----------
https://stackoverflow.com/questions/48987774/how-to-crop-a-numpy-2d-array-to-non-zero-values
"""
row = array.any(1)
if row.any():
row_size, col_size = array.shape
col = array.any(0)
left_bound = np.max([col.argmax() - remain, 0])
right_bound = np.min([col_size - col[::-1].argmax() + remain, col_size - 1]) # col[::-1] is reverse of col
upper_bound = np.max([row.argmax() - remain, 0])
lower_bound = np.min([row_size - row[::-1].argmax() + remain, row_size - 1]) # row[::-1] is reverse of row
out = array[upper_bound:lower_bound, left_bound:right_bound]
else:
left_bound = None
right_bound = None
upper_bound = None
lower_bound = None
out = np.empty((0, 0), dtype=bool)
if isinstance(return_bound, bool) and return_bound:
return out, (left_bound, right_bound, upper_bound, lower_bound)
elif return_bound == 'only_bound':
return left_bound, right_bound, upper_bound, lower_bound
else:
return out | 13cb5a0a289ef622d3dd663777e6a0d2814b5104 | 28,725 |
def go_info_running(data, info_name, arguments):
"""Returns "1" if go is running, otherwise "0"."""
return '1' if 'modifier' in hooks else '0' | 8027d0106e379156225c87db1959110fcfac6777 | 28,726 |
def letra_mas_comun(cadena: str) -> str:
""" Letra
Parámetros:
cadena (str): La cadena en la que se quiere saber cuál es la letra más común
Retorno:
str: La letra más común en la cadena que ingresa como parámetro, si son dos es la letra alfabéticamente
posterior.
"""
letras_en_conteo = {}
mayor_conteo = 0
letra_moda = ""
for cada_caracter in cadena:
if cada_caracter >= "A" and cada_caracter <= "z":
# Conteo de cada caracter
if cada_caracter not in letras_en_conteo:
letras_en_conteo[cada_caracter] = 1
else:
letras_en_conteo[cada_caracter] += 1
# Verificación del mayor carácter contado
if letras_en_conteo[cada_caracter] == mayor_conteo:
if cada_caracter > letra_moda:
letra_moda = cada_caracter
elif letras_en_conteo[cada_caracter] > mayor_conteo:
mayor_conteo = letras_en_conteo[cada_caracter]
letra_moda = cada_caracter
print(letras_en_conteo)
print(letra_moda)
return letra_moda | c36a753717365164ca8c3089b398d9b6e358ef3f | 28,727 |
def dwt2(image, wavelet, mode="symmetric", axes=(-2, -1)):
"""Computes single level wavelet decomposition for 2D images
"""
wavelet = ensure_wavelet_(wavelet)
image = promote_arg_dtypes(image)
dec_lo = wavelet.dec_lo
dec_hi = wavelet.dec_hi
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("Expected two dimensions")
# make sure that axes are positive
axes = [a + image.ndim if a < 0 else a for a in axes]
ca, cd = dwt_axis(image, wavelet, axes[0], mode)
caa, cad = dwt_axis(ca, wavelet, axes[1], mode)
cda, cdd = dwt_axis(cd, wavelet, axes[1], mode)
return caa, (cda, cad, cdd) | 4ee7e1f3c19bb1b0bf8670598f1744b7241b235d | 28,728 |
def recommended_global_tags_v2(release, base_tags, user_tags, metadata):
"""
Determine the recommended set of global tags for the given conditions.
This function is called by b2conditionsdb-recommend and it may be called
by conditions configuration callbacks. While it is in principle not limited
to the use case of end user analysis this is expected to be the main case
as in the other cases the production manager will most likely set the
global tags explicitly in the steering file.
Parameters:
release (str): The release version that the user has set up.
base_tags (list(str)): The global tags of the input files or default global tags in case of no input.
user_tags (list(str)): The global tags provided by the user.
metadata (list): The EventMetaData objects of the input files or None in case of no input.
Returns:
A dictionary with the following keys:
tags : list of recommended global tags (mandatory)
message: a text message for the user (optional)
release: a recommended release (optional)
"""
# gather information that we may want to use for the decision about the recommended GT:
# existing GTs, release used to create the input data
existing_master_tags = [tag for tag in base_tags if tag.startswith('master_') or tag.startswith('release-')]
existing_data_tags = [tag for tag in base_tags if tag.startswith('data_')]
existing_mc_tags = [tag for tag in base_tags if tag.startswith('mc_')]
existing_analysis_tags = [tag for tag in base_tags if tag.startswith('analysis_')]
data_release = metadata[0]['release'] if metadata else None
# if this is run-independent MC we don't want to show data tags (all other cases, we do)
if metadata:
is_mc = bool(metadata[0]['isMC'])
experiments = [int(metadata[0]['experimentLow']), int(metadata[0]['experimentHigh'])]
is_run_independent_mc = experiments[0] == experiments[1] and experiments[0] in [0, 1002, 1003]
else:
is_run_independent_mc = False
# now construct the recommendation
result = {'tags': [], 'message': ''}
# recommended release
recommended_release = supported_release(release)
if (release.startswith('release') or release.startswith('light')) and recommended_release != release:
result['message'] += 'You are using %s, but we recommend to use %s.\n' % (release, recommended_release)
result['release'] = recommended_release
# tag to be used for (raw) data processing, depending on the release used for the processing
# data_tags provides a mapping of supported release to the recommended data GT
data_tags = {_supported_releases[-1]: 'data_reprocessing_proc9'}
data_tag = data_tags.get(recommended_release, None)
# tag to be used for run-dependent MC production, depending on the release used for the production
# mc_tags provides a mapping of supported release to the recommended mc GT
mc_tags = {_supported_releases[-1]: 'mc_production_mc12'}
mc_tag = mc_tags.get(recommended_release, None)
# tag to be used for analysis tools, depending on the release used for the analysis
# analysis_tags provides a mapping of supported release to the recommended analysis GT
analysis_tags = {_supported_releases[-1]: 'analysis_tools_light-2106-rhea'}
analysis_tag = analysis_tags.get(recommended_release, None)
# In case of B2BII we do not have metadata
if metadata == []:
result['tags'] = ['B2BII']
else:
# If we have a master GT this means either we are generating events
# or we read a file that was produced with it. So we keep it as last GT.
result['tags'] += existing_master_tags
# Always use online GT
result['tags'].insert(0, 'online')
# Prepend the data GT if the file is not run-independent MC
if metadata is None or not is_run_independent_mc:
if data_tag:
result['tags'].insert(0, data_tag)
else:
result['message'] += 'WARNING: There is no recommended data global tag.'
# Prepend the MC GT if we generate events (no metadata)
# or if we read a file that was produced with a MC GT
if metadata is None or existing_mc_tags:
if mc_tag:
result['tags'].insert(0, mc_tag)
else:
result['message'] += 'WARNING: There is no recommended mc global tag.'
# Prepend the analysis GT
if analysis_tag:
result['tags'].insert(0, analysis_tag)
else:
result['message'] += 'WARNING: There is no recommended analysis global tag.'
# What else do we want to tell the user?
if result['tags'] != base_tags:
result['message'] += 'The recommended tags differ from the base tags: %s' % ' '.join(base_tags) + '\n'
result['message'] += 'Use the default conditions configuration if you want to take the base tags.\n'
return result | 8396dcc2d54a5e36dfe5485d33ef439059a944c6 | 28,729 |
def plot_corelation_matrix(data):
"""
Plotting the co-relation matrix on the dataset
using the numeric columns only.
"""
corr = data.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(22, 22))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(
corr, mask=mask, cmap=cmap, center=0.0,
vmax=1, square=True, linewidths=.5, ax=ax
)
return corr | 49e89f3ba844f0bf9676bca4051c72ad1305294f | 28,730 |
import urllib
from bs4 import BeautifulSoup
def product_by_id(product_id):
"""
Get Product description by product id
:param product_id: Id of the product
:return:
"""
host = "https://cymax.com/"
site_data = urllib.urlopen(host + str(product_id) + '--C0.htm').read()
soup = BeautifulSoup(site_data)
product = soup.find_all("div", class_="product-item")
# if search result is more tha one item,
# it's most likely returning all items
if len(product) == 1:
product_description = product[0].find(class_="product-description").getText()
product_img = product[0].find(class_="product-item-img")["src"]
return product_description, product_img | 2f2f3abfd0dcf5a124ae4a1bd3975734fbac7783 | 28,731 |
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
ws = window_size
ss = window_step
a = X
valid = len(a) - ws
nw = (valid) // ss
out = np.ndarray((nw, ws), dtype=a.dtype)
for i in range(nw):
# "slide" the window along the samples
start = i * ss
stop = start + ws
out[i] = a[start: stop]
return out | 4f53be9c87d0ce9800a6e1b1d96ae4786eace78b | 28,732 |
def may_ozerov_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 3 using Indyk-Motwani for NN search
[MayOze15] May, A. and Ozerov, I.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_depth_3_complexity
>>> may_ozerov_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [20, 200, 20, 10]
i_val_inc = [10, 10, 10, 10]
params = [-1 for _ in range(4)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):
k1 = (k + l) // 2
for p2 in range(max(params[2] - i_val_inc[2] // 2, p // 2 + ((p // 2) % 2)), p + i_val[2], 2):
for p1 in range(max(params[3] - i_val_inc[3] // 2, (p2 + 1) // 2),
min(p2 + i_val[3], k1 - p2 // 2)):
L1 = binom(k1, p1)
if log2(L1) > time:
continue
reps1 = (binom(p2, p2 // 2) * binom(k1 - p2, p1 - p2 // 2)) ** 2
l1 = int(ceil(log2(reps1)))
if l1 > l:
continue
L12 = max(1, L1 ** 2 // 2 ** l1)
reps2 = (binom(p, p // 2) * binom(k1 - p, p2 - p // 2)) ** 2
L1234 = max(1, L12 ** 2 // 2 ** (l - l1))
tmp_mem = log2((2 * L1 + L12 + L1234) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions,
0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 4 * _list_merge_complexity(L1, l1, hmap) + 2 * _list_merge_complexity(L12,
l - l1,
hmap) + _indyk_motwani_complexity(
L1234,
n - k - l,
w - 2 * p,
hmap)
T_rep = int(ceil(2 ** (max(l - log2(reps2), 0) + 3 * max(l1 - log2(reps1), 0))))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
if tmp < time:
time = tmp
memory = tmp_mem
params = [p, l, p2, p1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l": params[1], "p": params[0], "p1": params[3], "p2": params[2], "depth": 3}
res = {"time": time, "memory": memory, "parameters": par}
return res | b390a515626185912cbc234fbebd492a0e154bbb | 28,733 |
import json
import copy
def unpack_single_run_meta(storage, meta, molecules):
"""Transforms a metadata compute packet into an expanded
QC Schema for multiple runs.
Parameters
----------
db : DBSocket
A live connection to the current database.
meta : dict
A JSON description of the metadata involved with the computation
molecules : list of str, dict
A list of molecule ID's or full JSON molecules associated with the run.
Returns
-------
ret : tuple(dict, list)
A dictionary of JSON representations with keys built in.
Examples
--------
>>> meta = {
"procedure": "single",
"driver": "energy",
"method": "HF",
"basis": "sto-3g",
"options": "default",
"program": "psi4",
}
>>> molecules = [{"geometry": [0, 0, 0], "symbols" : ["He"]}]
>>> unpack_single_run_meta(storage, meta, molecules)
"""
# Get the required molecules
indexed_molecules = {k: v for k, v in enumerate(molecules)}
raw_molecules_query = storage.mixed_molecule_get(indexed_molecules)
# Pull out the needed options
if meta["options"] is None:
option_set = {}
else:
option_set = storage.get_options(program=meta["program"], name=meta["options"], with_ids=False)["data"][0]
del option_set["name"]
del option_set["program"]
# Create the "universal header"
task_meta = json.dumps({
"schema_name": "qc_schema_input",
"schema_version": 1,
"program": meta["program"],
"driver": meta["driver"],
"keywords": option_set,
"model": {
"method": meta["method"],
"basis": meta["basis"]
},
"qcfractal_tags": {
"program": meta["program"],
"options": meta["options"]
}
})
tasks = {}
indexer = copy.deepcopy(meta)
for idx, mol in raw_molecules_query["data"].items():
data = json.loads(task_meta)
data["molecule"] = mol
indexer["molecule"] = mol["id"]
tasks[interface.schema.format_result_indices(indexer)] = data
return tasks, [] | 3a3237067b4e52a5f7cb7d5ecc314061eaaa2b15 | 28,734 |
def getKey(event):
"""Returns the Key Identifier of the given event.
Available Codes: https://www.w3.org/TR/2006/WD-DOM-Level-3-Events-20060413/keyset.html#KeySet-Set
"""
if hasattr(event, "key"):
return event.key
elif hasattr(event, "keyIdentifier"):
if event.keyIdentifier in ["Esc", "U+001B"]:
return "Escape"
else:
return event.keyIdentifier
return None | 0935ad4cb1ba7040565647b2e26f265df5674e1d | 28,735 |
def get_long_season_name(short_name):
"""convert short season name of format 1718 to long name like 2017-18.
Past generations: sorry this doesn't work for 1999 and earlier!
Future generations: sorry this doesn't work for the 2100s onwards!
"""
return '20' + short_name[:2] + '-' + short_name[2:] | 314ef85571af349e2e31ab4d08497a04e19d4118 | 28,736 |
from typing import List
from typing import Any
from typing import Dict
def make_variables_snapshots(*, variables: List[Any]) -> str:
"""
Make snapshots of specified variables.
Parameters
----------
variables : list
Variables to make snapshots.
Returns
-------
snapshot_name : str
Snapshot name to be used.
"""
ended: Dict[int, bool] = {}
snapshot_name: str = ''
for variable in variables:
if not isinstance(variable, RevertInterface):
continue
var_id: int = id(variable)
if var_id in ended:
continue
if snapshot_name == '':
snapshot_name = variable._get_next_snapshot_name()
variable._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
ended[var_id] = True
return snapshot_name | d6a7bf5be51ebe7f4fb7985b2a440548c502d4ec | 28,737 |
def sext_to(value, n):
"""Extend `value` to length `n` by replicating the msb (`value[-1]`)"""
return sext(value, n - len(value)) | 683316bd7259d624fddb0d9c947c7a06c5f28c7e | 28,738 |
def parse_matching_pairs(pair_txt):
"""Get list of image pairs for matching
Arg:
pair_txt: file contains image pairs and essential
matrix with line format
image1 image2 sim w p q r x y z ess_vec
Return:
list of 3d-tuple contains (q=[wpqr], t=[xyz], essential matrix)
"""
im_pairs = {}
f = open(pair_txt)
for line in f:
cur = line.split()
im1, im2 = cur[0], cur[1]
q = np.array([float(i) for i in cur[3:7]], dtype=np.float32)
t = np.array([float(i) for i in cur[7:10]], dtype=np.float32)
ess_mat = np.array([float(i) for i in cur[10:19]], dtype=np.float32).reshape(3,3)
im_pairs[(im1, im2)] = (q, t, ess_mat)
f.close()
return im_pairs | 6697e63a091b23701e0751c59f8dc7fe0e582a97 | 28,739 |
import threading
from typing import OrderedDict
def compile_repo_info(repos, all=False, fetch=False):
"""Compiles all the information about found repos."""
# global to allow for threading work
global git_info
git_info = {}
max_ = len(repos)
threads = []
for i, repo in enumerate(repos):
t = threading.Thread(target=process_repo, args=(repo, fetch))
threads.append(t)
t.start()
for thread in threads:
thread.join()
git_info = OrderedDict(sorted(git_info.items(), key=lambda t: t[0]))
output_table = create_repo_table(git_info, fetch, all)
return output_table | b3cbdcdd53ce2c5274990520756390f396156aaa | 28,740 |
def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
"""
Computes the multidimensional histogram of some data.
Note:
Deprecated numpy argument `normed` is not supported.
Args:
x (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the x
coordinates of the points to be histogrammed.
y (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the y
coordinates of the points to be histogrammed.
bins (Union[int, tuple, list], optional): The bin specification:
If int, the number of bins for the two dimensions ``(nx=ny=bins)``.
If array_like, the bin edges for the two dimensions ``(x_edges=y_edges=bins)``.
If [int, int], the number of bins in each dimension ``(nx, ny = bins)``.
If [array, array], the bin edges in each dimension ``(x_edges, y_edges = bins)``.
A combination [int, array] or [array, int], where int is the number of bins and
array is the bin edges.
range(Union[list, tuple], optional): has shape (2, 2), the leftmost and rightmost
edges of the bins along each dimension (if not specified explicitly in the bins
parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
`w_i` weighing each sample `(x_i, y_i)`.
density (boolean, optional): If False, the default, returns the number of samples
in each bin. If True, returns the probability density function at the bin,
``bin_count / sample_count / bin_volume``.
Returns:
(Tensor, Tensor, Tensor), the values of the bi-directional histogram and the bin edges
along the first and second dimensions.
Raises:
ValueError: if `range` does not have the same size as the number of samples.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import numpy as np
>>> x = np.arange(5)
>>> y = np.arange(2, 7)
>>> print(np.histogram2d(x, y, bins=(2, 3)))
(Tensor(shape=[2, 3], dtype=Float32, value=
[[ 2.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]]),
Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 2.00000000e+00, 4.00000000e+00]),
Tensor(shape=[4], dtype=Float32, value=
[ 2.00000000e+00, 3.33333349e+00, 4.66666698e+00, 6.00000000e+00]))
"""
count, bin_edges = histogramdd((x, y), bins=bins, range=range, weights=weights, density=density)
return count, bin_edges[0], bin_edges[1] | 8b537168cb7248ccd2959c95ae4fb742b81aa225 | 28,741 |
def get_project_arg_details():
"""
**get_project_arg_details**
obtains project details from arguments and then returns them
:return:
"""
project_id = request.args.get('project_id')
names = request.args.get('names')
cell = request.args.get('cell')
email = request.args.get('email')
website = request.args.get('website')
facebook = request.args.get('facebook')
twitter = request.args.get('twitter')
company = request.args.get('company')
freelancing = request.args.get('freelancing')
project_type = request.args.get('project-type')
project_title = request.args.get('project-title')
project_description = request.args.get('project-description')
estimated_budget = request.args.get('estimated-budget')
start_date = request.args.get('start-date')
project_status = request.args.get('project-status')
return (cell, company, email, facebook, freelancing, names, project_description, project_id, project_status,
project_title, project_type, start_date, twitter, website) | 5efcaebf0efe89a5d8fa5f52d50777041b545177 | 28,742 |
def vibronic_ls(x, s, sigma, gamma, e_vib, kt=0, n_max=None, m_max=None):
"""
Produce a vibronic (Frank-Condom) lineshape.
The vibronic transition amplitude computed relative to 0 (ie: relative to
the electronic transition energy). Lines are broadened using a voigt
profile.
Parameters
----------
x : np.ndarray
Energy values. x==0 is the 0->0 line (no vibrational quanta change)
s : float
Huang-Rhys parameter S
e_vib : float
Energy of a vibrational quanta
sigma : float
Width (1/e^2) of gaussian component
gamma : float
Width of Lorententzian component
kt : float
Thermal energy. If >0, will compute transitions from vibrationally
excited states. Default 0.
n_max : int
Largest vibrational number in final manifold. If not supplied, a guess
is provided, but may not be adequate.
m_max : int
Largest vibrational number in orginal manifold. If not supplied, a guess
is provided, but may not be adequate.
"""
#determine n, m, values
if m_max is None:
m_max = 0 if kt==0 else int(kt/e_vib*10) # found that factor with my thumb
if n_max is None:
n_max = m_max + int(10*s)
n = np.arange(n_max+1)
m = np.arange(m_max+1)
fcf = vibronic_intensity(m, n, s, e_vib, kt)
n, m = np.meshgrid(n, m)
dvib = n-m
y = np.zeros_like(x)
for d, f in zip(dvib.flatten(), fcf.flatten()):
y += voigt(x, f, d*e_vib, sigma, gamma)
return y | 428f0c44566cf3a824902fc9f7fb8012089d1b89 | 28,743 |
import re
import requests
def handleFunction(command,func):
"""
Function to calculate, Translate
"""
try:
# re.search(r"(?i)"+func,' '.join(SET_OF_FUNCTIONS))
if("calculate" == func.lower()):
func,command = command.split()
try:
return eval(command)
except:
return "Sorry! We are unable to calculate this expression."
elif("translate" == func.lower()):
command = re.split(r'\s',command)
isoLan = findISO(command[len(command)-1])
if isoLan == None:
translation = "Sorry! we are unable to translate into this language"
return translation
translator= Translator(to_lang=isoLan)
translation = translator.translate(' '.join(command[1:len(command)-2]))
return translation
elif("temperature" == func.lower() or "weather" == func.lower()):
command = re.split(r'\s',command)
cityName = (command[len(command)-1]).capitalize()
temp = getTemp(cityName)
if temp:
temp_in_celcius = "It is "+str(round(temp[0]-273,2))+" C, "+temp[1]
return temp_in_celcius
return "Sorry we are unable to calculate temperature at this moment. Please try after sometime."
elif re.search(r"(.)* ?horoscope ?(.)*",command,re.I):
for sign in ZODIAC_SIGNS:
if re.search(r'\b'+sign+r'\b',command,re.I):
zodiac_sign = re.search(r'\b'+sign+r'\b',command,re.I).group(0)
API_response = requests.get(url = "http://horoscope-api.herokuapp.com/horoscope/today/"+zodiac_sign)
return API_response.json()['horoscope']
return "Please choose appropriate zodiac sign"
else:
return None
except:
return None | c5ff05b0b31a7441f7efaf9ce76c496f3f708eea | 28,744 |
import json
def auth():
"""returns worker_id !!!currently!!! does not have auth logic"""
response_body = {}
status_code = 200
try:
auth_token = request.args.get("auth_token", None)
resp = fl_events_auth({"auth_token": auth_token}, None)
resp = json.loads(resp)["data"]
except Exception as e:
status_code = 401
resp = {"error_auth_failed": e}
return Response(json.dumps(resp), status=status_code, mimetype="application/json") | bbbeb0dbf7401b11e56399890f43a799f859eb87 | 28,745 |
def redo_a_task():
"""Allows the user to unfinish a task
so they can complete it again"""
user = get_user_info()
if user['id'] != g.user['id']:
abort(403)
if request.method == 'POST':
redo = request.form['redoTask']
# get the database connection
with db.get_db() as con:
# Begin the transaction
with con.cursor() as cur:
#Updates the table so the task is now "uncompleted"
cur.execute("""UPDATE todos
SET completed = False
WHERE id = %s
""",
(redo, )
)
con.commit()
# Displays all the to-dos on the todo.html
todos = display_todos()
return render_template("todo.html", todos=todos, user=user) | 78910778a93246bb3a819cb5392400fa8e65de0a | 28,746 |
def get_templates_environment(templates_dir):
"""Create and return a Jinja environment to deal with the templates."""
env = Environment(
loader=PackageLoader('charmcraft', 'templates/{}'.format(templates_dir)),
autoescape=False, # no need to escape things here :-)
keep_trailing_newline=True, # they're not text files if they don't end in newline!
optimized=False, # optimization doesn't make sense for one-offs
undefined=StrictUndefined) # fail on undefined
return env | 9f3571ce4cb8f18f64912e6c259bc2f1022698f2 | 28,747 |
import numpy as np
def return_U_given_sinusoidal_u1(i,t,X,u1,**kwargs):
"""
Takes in current step (i), numpy.ndarray of time (t) of shape (N,), state numpy.ndarray (X) of shape (8,), and previous input scalar u1 and returns the input U (shape (2,)) for this time step.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) Bounds - must be a (2,2) list with each row in ascending order. Default is given by Activation_Bounds.
"""
assert (np.shape(t) == (len(t),)) and (str(type(t)) == "<class 'numpy.ndarray'>"),\
"t must be a numpy.ndarray of shape (len(t),)."
assert np.shape(X) == (8,) and str(type(X)) == "<class 'numpy.ndarray'>", "X must be a (8,) numpy.ndarray"
assert str(type(u1)) in ["<class 'int'>","<class 'float'>","<class 'numpy.float64'>"], \
"u1 must be an int or a float."
Bounds = kwargs.get("Bounds",Activation_Bounds)
assert type(Bounds) == list and np.shape(Bounds) == (2,2), "Bounds for Muscle Activation Control must be a (2,2) list."
assert Bounds[0][0]<Bounds[0][1],"Each set of bounds must be in ascending order."
assert Bounds[1][0]<Bounds[1][1],"Each set of bounds must be in ascending order."
Coefficient1,Coefficient2,Constraint1 = return_constraint_variables(t[i],X)
assert Coefficient1!=0 and Coefficient2!=0, "Error with Coefficients. Shouldn't both be zero"
if Constraint1 < 0:
assert not(Coefficient1 > 0 and Coefficient2 > 0), "Infeasible activations. (Constraint1 < 0, Coefficient1 > 0, Coefficient2 > 0)"
if Constraint1 > 0:
assert not(Coefficient1 < 0 and Coefficient2 < 0), "Infeasible activations. (Constraint1 > 0, Coefficient1 < 0, Coefficient2 < 0)"
u2 = (Constraint1 - Coefficient1*u1)/Coefficient2
NextU = np.array([u1,u2])
assert (Bounds[0][0]<=u1<=Bounds[0][1]) and (Bounds[1][0]<=u2<=Bounds[1][1]), "Error! Choice of u1 results in infeasible activation along backstepping constraint."
return(NextU) | b5faba122af139f29f20dbce983b84fe5c0c277c | 28,748 |
def verify(s):
"""
Check if the cube definition string s represents a solvable cube.
@param s is the cube definition string , see {@link Facelet}
@return 0: Cube is solvable<br>
-1: There is not exactly one facelet of each colour<br>
-2: Not all 12 edges exist exactly once<br>
-3: Flip error: One edge has to be flipped<br>
-4: Not all 8 corners exist exactly once<br>
-5: Twist error: One corner has to be twisted<br>
-6: Parity error: Two corners or two edges have to be exchanged
"""
count = [0] * 6 # new int[6]
try:
for i in range(54):
assert s[i] in colors
count[colors[s[i]]] += 1
except:
return -1
for i in range(6):
if count[i] != 9:
return -1
fc = FaceCube(s)
cc = fc.toCubieCube()
return cc.verify() | d3e765af153a7400d84e59c72d292a9ccd9170f5 | 28,749 |
from typing import cast
import copy
def copy_jsons(o: JSONs) -> MutableJSONs:
"""
Make a new, mutable copy of a JSON array.
>>> a = [{'a': [1, 2]}, {'b': 3}]
>>> b = copy_jsons(a)
>>> b[0]['a'].append(3)
>>> b
[{'a': [1, 2, 3]}, {'b': 3}]
>>> a
[{'a': [1, 2]}, {'b': 3}]
"""
return cast(MutableJSONs, copy.deepcopy(o)) | c9fffefe0dd541e20a7a3bef503e0b1af847909d | 28,750 |
def string_to_dot(typed_value):
# type: (TypedValue) -> Tuple[List[str], List[str]]
"""Serialize a String object to Graphviz format."""
string = f'{typed_value.value}'.replace('"', r'\"')
dot = f'_{typed_value.name} [shape="record", color="#A0A0A0", label="{{{{String | {string}}}}}"]'
return [dot], [] | 287d2c886aca5ca940b323b751af91e33ed54fc4 | 28,751 |
def resolve_country_subdivisions(_, info, alpha_2):
"""
Country resolver
:param info: QraphQL request context
:param alpha_2: ISO 3166 alpha2 code
:param code: ISO 3166-2 code
"""
return CountrySubdivision.list_for_country(country_code=alpha_2) | 56ffa7343f1da686819c85dee54770cd4d1564d3 | 28,752 |
from typing import Tuple
def bool2bson(val: bool) -> Tuple[bytes, bytes]:
"""Encode bool as BSON Boolean."""
assert isinstance(val, bool)
return BSON_BOOLEAN, ONE if val else ZERO | 3d4456f6db88939966997b8a49c3d766b1ef4ba1 | 28,753 |
def isbn13_to_isbn10 (isbn_str, cleanse=True):
"""
Convert an ISBN-13 to an ISBN-10.
:Parameters:
isbn_str : string
The ISBN as a string, e.g. " 0-940016-73-6 ". It should be 13
digits after normalisation.
cleanse : boolean
If true, formatting will be stripped from the ISBN before
conversion.
:Returns:
A normalaised ISBN-10, e.g. "0940016736", or ``None`` if no conversion
is possible.
For example::
>>> isbn13_to_isbn10 ("978-0-940016-73-6")
'0940016737'
>>> isbn13_to_isbn10 ("9780940016736", cleanse=False)
'0940016737'
>>> isbn13_to_isbn10 ("979-1-234-56789-6")
>>> isbn13_to_isbn10 ("978-3-8055-7505-8")
'380557505X'
>>> isbn13_to_isbn10 ("978-0-940016-73-6", cleanse=False)
Traceback (most recent call last):
...
AssertionError: input '978-0-940016-73-6' is not 13 digits
"""
## Preconditions:
if (cleanse):
isbn_str = clean_isbn (isbn_str)
assert (len (isbn_str) == 13), "input '%s' is not 13 digits" % isbn_str
if (not isbn_str.startswith ('978')):
return None
## Main:
isbn_str = isbn_str[3:-1]
isbn_str += isbn10_checksum (isbn_str)
## Return:
assert (len (isbn_str) == 10), "output ISBN-10 is '%s'" % isbn_str
return isbn_str | b39d6d9f7a850a8b0edbb6b9502f4d6bb73f8848 | 28,754 |
import json
def import_data():
"""Import datasets to internal memory"""
with open('data/names.json') as f:
data_names = json.load(f)
with open('data/issues.json') as f:
data_issues = json.load(f)
with open('data/disasters.json') as f:
data_disasters = json.load(f)
with open('data/options.json') as f:
data_options = json.load(f)
return data_names, data_issues, data_disasters, data_options | 11db10c2c56b6b714ecffa57510c9a79abfa1d86 | 28,755 |
def synthesize_ntf_dunn(order=3, osr=64, H_inf=1.5):
"""
Alias of :func:`ntf_dunn`
.. deprecated:: 0.11.0
Function has been moved to the :mod:`NTFdesign` module with
name :func:`ntf_dunn`.
"""
warn("Function superseded by ntf_dunn in "
"NTFdesign module", PyDsmDeprecationWarning)
return ntf_dunn(order, osr, H_inf) | 2920ec676ab070ebb5f7c95e245baf078b723fae | 28,756 |
def schedule_notification() -> str:
"""Randomly select either news or covid stats to add to the notifcation column,
if there is already a 'news' item in the notificaitons column then it will
update the item with a newer piece of news"""
#NEWS
news_title, news_content = get_news()
notif_exists = False
for notif in notifications:
if notif["title"] == news_title:
notif_exists = True
break
if not notif_exists:
if len(notifications) <6:
notifications.insert(0,{"title":news_title, "content":news_content})
#COVID NEWS
covid_news_title, covid_news_content = get_covid_news()
notif_covid_exists = False
for notif in notifications:
if notif["title"] == "COVID-19 Statistics":
notif_covid_exists = True
break
if not notif_covid_exists:
notifications.insert(0,{"title":covid_news_title, "content":covid_news_content})
#WEATHER
weather_notif_exists = False
notif_content = get_forecast("Exeter")
for notif in notifications:
if notif["title"] == "Weather":
weather_notif_exists = True
notif["content"] = notif_content
break
if not weather_notif_exists:
notifications.insert(0,{"title":"Weather", "content":notif_content})
return "Passed" | 9aed44251170dc124f71b11bf482ef009ebe973e | 28,757 |
def _parse_hexblob(blob: str) -> bytes:
"""
Binary conversions from hexstring are handled by bytes(hstr2bin()).
:param blob:
:return:
"""
return bytes(hstr2bin(blob)) | e49348f7cb15bbba850dbf05c0a3625427d0ac2d | 28,758 |
from typing import List
from typing import Dict
def _row_to_col_index_dict(headers: List[Cell]) -> Dict[str, int]:
"""Calculate a mapping of cell contents to column index.
Returns:
dict[str, int]: {MFP nutrient name: worksheet column index} mapping.
int: N
"""
return {h.value: h.col - 1 for h in headers} | 11f7a68cd211b216d2a27850be99291cc830d52f | 28,759 |
def preprocess_cat_cols(X_train, y_train, cat_cols=[], X_test=None,
one_hot_max_size=1, learning_task=LearningTask.CLASSIFICATION):
"""Preprocess categorial columns(cat_cols) in X_train
and X_test(if specified) with cat-counting(the same as in catboost)
or with one-hot-encoding,
depends on number of unique labels(one_hot_max_size)
Args:
X_train (numpy.ndarray): train dataset
y_train (numpy.ndarray): train labels
cat_cols (list of columns indices): categorical columns
X_test (None or numpy.ndarray): test dataset
one_hot_max_size(int): max unique labels for one-hot-encoding
learning_task (LearningTask): a type of learning task
Returns:
numpy.ndarray(, numpy.ndarray): transformed train and test datasets or
only train, depends on X_test is None or not
"""
one_hot_cols = [col for col in cat_cols
if len(np.unique(X_train[:, col])) <= one_hot_max_size]
cat_count_cols = list(set(cat_cols) - set(one_hot_cols))
preprocess_counter_cols(X_train, y_train, cat_count_cols,
X_test, learning_task=learning_task)
X_train, X_test = preprocess_one_hot_cols(X_train, one_hot_cols, X_test)
if X_test is None:
return X_train
else:
return X_train, X_test | 10402fe0fd534eb73598fa99a1202b970202f2c0 | 28,760 |
def get_start_time(period, time_zone=None):
"""Doc."""
today = pd.Timestamp.today(tz=time_zone or 'Europe/Stockholm')
if period == 'thisyear':
return pd.Timestamp(f'{today.year}0101').strftime('%Y-%m-%d %H:%M:%S')
elif period in DAYS_MAPPER:
return (today - pd.Timedelta(days=DAYS_MAPPER.get(period))
).strftime('%Y-%m-%d %H:%M:%S')
else:
# Return according to "day".
return (today - pd.Timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S') | c5e9ab4543f813f7210bc278e83d9c4a554d242b | 28,761 |
def setup_textbox(parent,
font="monospace",
width=70, height=12):
"""Setup for the textboxes, including scrollbars and Text widget."""
hsrl = ttk.Scrollbar(parent, orient="horizontal")
hsrl.pack(side=tk.BOTTOM, fill=tk.X)
vsrl = ttk.Scrollbar(parent)
vsrl.pack(side=tk.RIGHT, fill=tk.Y)
textbox = tk.Text(parent,
xscrollcommand=hsrl.set,
yscrollcommand=vsrl.set,
font=font,
width=width, height=height,
wrap=tk.NONE)
textbox.bind("<Tab>", focus_next_widget)
textbox.pack(side="top", fill="both", expand=True)
hsrl.config(command=textbox.xview)
vsrl.config(command=textbox.yview)
return textbox | 674bc72eefacc16485a4b369535f1253187e5ded | 28,762 |
def generate_mock_statuses(naive_dt=True, datetime_fixtures=None):
"""
A dict of statuses keyed to their id. Useful for mocking an API response.
These are useful in``Timeline`` class testing.
May be set to have a utc timezone with a ``False`` value for the ``naive_dt`` argument.
"""
mock_statuses = list()
if datetime_fixtures is None:
datetime_fixtures = generate_datetime_fixtures()
for dt in datetime_fixtures:
identifier = len(mock_statuses) + 1
mock_status_text = 'Content for tweet mock status {0}'.format(identifier)
mock_status = generate_mock_status(identifier, mock_status_text)
mock_status.created_at = dt
mock_statuses.append(mock_status)
return mock_statuses | aca7bd235ef6fd404f8da894b6917636d7895dcb | 28,763 |
import os
def get_current_ingest_id():
"""Get the uuid of the active ingest
:return: the id of the active ingest
:rtype: uuid
"""
return os.getenv('JETA_CURRENT_INGEST_ID') | 31299e8422e07fe38bc7a850033cf128a9a27749 | 28,764 |
import hashlib
def hashlib_mapper(algo):
"""
:param algo: string
:return: hashlib library for specified algorithm
algorithms available in python3 but not in python2:
sha3_224 sha3_256, sha3_384, blake2b, blake2s, sha3_512, shake_256, shake_128
"""
algo = algo.lower()
if algo == "md5":
return hashlib.md5()
elif algo == "sha1":
return hashlib.sha1()
elif algo == "sha224":
return hashlib.sha224()
elif algo == "sha256":
return hashlib.sha256()
elif algo == "sha384":
return hashlib.sha384()
elif algo == "sha3_224":
return hashlib.sha3_224()
elif algo == "sha3_256":
return hashlib.sha3_256()
elif algo == "sha3_384":
return hashlib.sha3_384()
elif algo == "sha3_512":
return hashlib.sha3_512()
elif algo == "sha512":
return hashlib.sha512()
elif algo == "blake2b":
return hashlib.blake2b()
elif algo == "blake2s":
return hashlib.blake2s()
elif algo == "shake_128":
return hashlib.shake_128()
elif algo == "shake_256":
return hashlib.shake_256()
else:
raise Exception("Unsupported hashing algorithm: %s" % algo) | 56830caccd0b3f88982bfe09a8789002af99c1e7 | 28,765 |
def partition_cells(config, cells, edges):
""" Partition a set of cells
- cells -- A DataFrame of cells
- edges -- a list of edge times delimiting boundaries between cells
Returns a DataFrame of combined cells, with times and widths adjusted to account for missing cells
"""
# get indices of cell indexes just beyond each edge time
ii = np.searchsorted(cells.t, edges)
# Get the appropriate boundary times to apply to combined cells
# this is complicated by missing cells, need to put boundary in gaps if ncessary
ileft = ii[:-1]
cleft = cells.iloc[ileft ]
tleft = (cleft.t - cleft.tw/2).values
iright = ii[1:]-1
cright = cells.iloc[iright ]
tright = (cright.t+cright.tw/2).values
betweens = 0.5*(tleft[1:] + tright[:-1])
tboundary = np.append(np.insert(betweens, 0, tleft[0]), tright[-1])
# now combine the cells,
newcells = []
for k in range(len(ii)-1):
a,b = ii[k:k+2]
check = cells.iloc[a:b]
subset = check[~pd.isna(check.n)]
# ca, cb = subset.iloc[0], subset.iloc[-1]
# newcell = dict(t= 0.5*(ca.t-ca.tw/2 + cb.t+cb.tw/2) )
tl, tr = tboundary[k:k+2]
newcell = dict(t=0.5*(tl+tr), tw=tr-tl)
for col in 'e n S B'.split():
newcell[col] = subset[col].sum()
newcell['e'] /= len(subset)
newcell['w'] = np.concatenate(list(subset.w.values)) #np.array(w, np.uint8)
newcells.append(newcell)
return pd.DataFrame(newcells) | c8532cbf148802b482380f8978dbc8d9d3b1b35f | 28,766 |
from typing import List
from typing import Union
def timing_stats(results: List[Result]) -> List[str]:
"""Calculate and format lines with timings across completed results."""
def percentile(data: List[float], percent: int) -> Union[float, str]:
if not data:
return '-'
data_sorted = sorted(data)
pos = max(int(round(percent / 100 * len(data) + 0.5)), 2)
return data_sorted[pos - 2]
def format_line(name: str, *values: Union[float, int, str]) -> str:
line = f'{name:<10s}'
for value in values:
if isinstance(value, float):
line += f' {value:6.0f}'
else:
line += f' {value:>6}'
return line
total_times = [r.total_time * 1000 for r in results if r.total_time]
ttfb_times = [r.ttfb_time * 1000 for r in results if r.ttfb_time]
conn_times = [r.conn_time * 1000 for r in results if r.conn_time]
percentiles = (50, 80, 95, 99)
lines = [
format_line(
'', 'Mean', 'Min', *(f'{p}%' for p in percentiles), 'Max',
),
format_line(
'Connect:',
mean(conn_times) if conn_times else '-',
min(conn_times) if conn_times else '-',
*(percentile(conn_times, p) for p in percentiles),
max(conn_times) if conn_times else '-',
),
format_line(
'TTFB:',
mean(ttfb_times) if ttfb_times else '-',
min(ttfb_times) if ttfb_times else '-',
*(percentile(ttfb_times, p) for p in percentiles),
max(ttfb_times) if ttfb_times else '-',
),
format_line(
'Total:',
mean(total_times) if total_times else '-',
min(total_times) if total_times else '-',
*(percentile(total_times, p) for p in percentiles),
max(total_times) if total_times else '-',
),
]
return lines | 08d671b2866674924dc070dda2e7e85a4c56c064 | 28,767 |
import logging
def analyse_gamma(
snps_object,
output_summary_filename,
output_logger,
SWEEPS,
TUNE,
CHAINS,
CORES,
N_1kG,
fix_intercept=False,
):
"""
Bayesian hierarchical regression on the dataset with the gamma model.
:param snps_object: snps instance
:param output_summary_filename: output summary table
:param output_logger: logger instance
:param SWEEPS: samples for each chain
:param TUNE: burn-in samples
:param CHAINS: number of chains
:param CORES: number of cores
:param N1kG: number of SNPs
:param fix_intercept: if True the model fixes the intercept.
"""
snp_dataset = snps_object.table.copy().reset_index(drop=True)
n_patients = snps_object.n_patients
nSNP = snps_object.n_snps
# to run the regression as a mixed effect model, I need a vector (cat) to assign each SNP to its gene
idx = 0
cat = np.zeros(nSNP)
Mg = []
genes = []
# g2 are all the SNPs inside the gene
for k2, g2 in snp_dataset.groupby("gene"):
cat[g2.index] = int(idx)
idx += 1
genes.append(k2)
Mg.append(len(g2))
cat = cat.astype(int)
logging.info("Model Evaluation Started")
logging.info("Average stats: %f" % np.mean(snps_object.table["stats"].values))
with pm.Model() as model:
e = pm.Normal("e", mu=1, sd=0.001)
mi = pm.Beta("mi", 1, 1)
beta = pm.Gamma("beta", alpha=mi, beta=N_1kG, shape=idx)
diff = pm.Deterministic("diff", subtract(beta, mi / N_1kG))
herTOT = pm.Deterministic("herTOT", tt.sum(beta * Mg))
if fix_intercept:
fixed_variable = pm.Normal(
"fxd",
mu=(n_patients) * beta[cat] * (snps_object.table["l"]) + 1,
sd=np.sqrt(np.asarray(snps_object.table["l"])),
observed=snps_object.table["stats"],
)
else:
fixed_variable = pm.Normal(
"fxd",
mu=(n_patients) * beta[cat] * (snps_object.table["l"]) + e,
sd=np.sqrt(np.asarray(snps_object.table["l"])),
observed=snps_object.table["stats"],
)
# step = pm.Metropolis()
trace = pm.sample(
SWEEPS,
tune=TUNE,
chains=CHAINS,
cores=CORES,
nuts_kwargs=dict(target_accept=0.90),
)
if CHAINS > 1:
logging.info("evaluating Gelman-Rubin")
GR = pm.diagnostics.gelman_rubin(trace, varnames=["mi"])
output_logger.info(
"DIAGNOSTIC (gelman-rubin) "
+ str(GR)
+ "\n"
+ "(If this number is >> 1 the method has some convergence problem, \n try increasing the number of s and b)"
)
logging.info("Writing output")
# save general stats to summary file
su = pm.summary(
trace,
varnames=["mi", "herTOT", "e"],
extend=True,
stat_funcs=[trace_median, trace_quantiles],
)
su.to_csv(output_summary_filename, sep=",", mode="w")
d = {}
d["beta"] = N_1kG * trace["beta"]
e_GW = np.mean(trace["e"])
e_GW_sd = np.std(trace["e"])
output_logger.info(" Intercept: " + str(e_GW) + " (sd= " + str(e_GW_sd) + ")\n")
herTOT = np.median(trace["herTOT"])
herTOT_sd = np.std(trace["herTOT"])
output_logger.info(
" heritability from genes: " + str(herTOT) + " (sd= " + str(herTOT_sd) + ")\n"
)
mi_mean = np.mean(trace["mi"], axis=0)
mi_median = np.median(trace["mi"], axis=0)
mi_std = np.std(trace["mi"], axis=0)
mi_5perc = np.percentile(trace["mi"], 5, axis=0)
mi_95perc = np.percentile(trace["mi"], 95, axis=0)
output_logger.info(
" Heritability: "
+ str(mi_mean)
+ " (std= "
+ str(mi_std)
+ ")\n"
+ "[ 5th perc= "
+ str(mi_5perc)
+ ","
+ " 95 perc= "
+ str(mi_95perc)
+ "]\n"
)
Prob = (np.sum(trace["diff"] > 0, axis=0) / len(trace["diff"]))[:, np.newaxis]
data = np.hstack((np.asarray(genes)[:, np.newaxis], Prob))
df = pd.DataFrame(data, columns=("name", "P"))
df["bg_mean"] = np.mean(d["beta"], axis=0)[:, np.newaxis]
df["bg_median"] = np.median(d["beta"], axis=0)[:, np.newaxis]
df["bg_var"] = np.var(d["beta"], axis=0)[:, np.newaxis]
df["bg_5perc"] = np.percentile(d["beta"], 5, axis=0)[:, np.newaxis]
df["bg_95perc"] = np.percentile(d["beta"], 95, axis=0)[:, np.newaxis]
df["mi_mean"] = mi_mean
df["mi_median"] = mi_median
return df | fac6111e4ad87d63d89d2942e5cfc28023950117 | 28,768 |
def dpc_variant_to_string(variant: _DV) -> str:
"""Convert a Basix DPCVariant enum to a string.
Args:
variant: The DPC variant
Returns:
The DPC variant as a string.
"""
return variant.name | 2eb7eeff47eb36bea47714b9e233f3d286925d3b | 28,769 |
import secrets
from datetime import datetime
async def refresh_token(request: web.Request) -> web.Response:
""" Refresh Token endpoints """
try:
content = await request.json()
if "token" not in content:
return web.json_response({"error": "Wrong data. Provide token."}, status=400)
except Exception:
return web.json_response({"error": "Wrong data. Provide token."}, status=400)
Session = sessionmaker(bind=request.app["db_engine"])
s = Session()
r = s.query(Token).filter(Token.token == content["token"]).first()
if r is not None:
token = secrets.token_hex(20)
now = datetime.now()
r.token = token
r.expire = now + timedelta(days=1)
r.updated_at = now
s.commit()
s.close()
return web.json_response({"token": token})
else:
s.close()
return web.json_response({"error": "Token not found. Provide correct token."}, status=400) | a8008a33793ccb7b34900724b62fb3add061fa30 | 28,770 |
def get_my_choices_projects():
""" Retrieves all projects in the system
for the project management page
"""
proj_list = Project.objects.all()
proj_tuple = []
counter = 1
for proj in proj_list:
proj_tuple.append((counter, proj))
counter = counter + 1
return proj_tuple | f35563adb12aff32ac1b60152b3085c63dc839f0 | 28,771 |
import platform
import locale
import sys
import struct
import os
def _get_sys_info() -> dict[str, JSONSerializable]:
"""
Returns system information as a JSON serializable dictionary.
"""
uname_result = platform.uname()
language_code, encoding = locale.getlocale()
return {
"commit": _get_commit_hash(),
"python": ".".join([str(i) for i in sys.version_info]),
"python-bits": struct.calcsize("P") * 8,
"OS": uname_result.system,
"OS-release": uname_result.release,
"Version": uname_result.version,
"machine": uname_result.machine,
"processor": uname_result.processor,
"byteorder": sys.byteorder,
"LC_ALL": os.environ.get("LC_ALL"),
"LANG": os.environ.get("LANG"),
"LOCALE": {"language-code": language_code, "encoding": encoding},
} | d86e84d90dc93d762f6ea33b776acaa28d1e8869 | 28,772 |
import math
def normalDistributionBand(collection, band, mean=None, std=None,
name='normal_distribution'):
""" Compute a normal distribution using a specified band, over an
ImageCollection. For more see:
https://en.wikipedia.org/wiki/Normal_distribution
:param band: the name of the property to use
:type band: str
:param mean: the mean value. If None it will be computed from the source.
defaults to None.
:type mean: float
:param std: the standard deviation value. If None it will be computed from
the source. Defaults to None.
:type std: float
"""
if mean is None:
imean = ee.Image(collection.mean())
else:
imean = ee.Image.constant(mean)
if std is None:
istd = ee.Image(collection.reduce(ee.Reducer.stdDev()))
else:
istd = ee.Image.constant(std)
ipi = ee.Image.constant(math.pi)
imax = ee.Image(1) \
.divide(istd.multiply(ee.Image.constant(2).multiply(ipi).sqrt()))
return gaussFunctionBand(collection, band, mean=imean,
output_max=imax, std=istd, name=name) | 57b0d6beb590126253c4934e403487bd69c7c094 | 28,773 |
import torch
def compute_ctrness_targets(reg_targets):
"""
:param reg_targets:
:return:
"""
if len(reg_targets) == 0:
return reg_targets.new_zeros(len(reg_targets))
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(ctrness) | 538a63b6adcd73fbd601d6e61eea5f27642746fa | 28,774 |
import hashlib
import hmac
import base64
def create_hmac_signature(key:bytes, data_to_sign:str, hashmech:hashlib=hashlib.sha256) -> str:
"""
Creates an HMAC signature for the provided data string
@param key: HMAC key as bytes
@param data_to_sign: The data that needs to be signed
@param hashmech: The hashing mechanism to use, defaults to sha256
@return: Base64 encoded signature
"""
sig = hmac.new(key, data_to_sign.encode("utf-8"), hashmech).digest()
return base64.b64encode(sig).decode("utf-8") | 0c3f5b8bef6e3330e8c24fca62ce2707b0de5286 | 28,775 |
def CV_range(
bit_depth: Integer = 10, is_legal: Boolean = False, is_int: Boolean = False
) -> NDArray:
"""
Returns the code value :math:`CV` range for given bit depth, range legality
and representation.
Parameters
----------
bit_depth
Bit depth of the code value :math:`CV` range.
is_legal
Whether the code value :math:`CV` range is legal.
is_int
Whether the code value :math:`CV` range represents integer code values.
Returns
-------
:class:`numpy.ndarray`
Code value :math:`CV` range.
Examples
--------
>>> CV_range(8, True, True)
array([ 16, 235])
>>> CV_range(8, True, False) # doctest: +ELLIPSIS
array([ 0.0627451..., 0.9215686...])
>>> CV_range(10, False, False)
array([ 0., 1.])
"""
if is_legal:
ranges = np.array([16, 235])
ranges *= 2 ** (bit_depth - 8)
else:
ranges = np.array([0, 2 ** bit_depth - 1])
if not is_int:
ranges = as_float_array(ranges) / (2 ** bit_depth - 1)
return ranges | e1eb079e4e75cb7b8353d88e13bb7eb82d15428c | 28,776 |
import torch
def bw_transform(x):
"""Transform rgb separated balls to a single color_channel."""
x = x.sum(2)
x = torch.clamp(x, 0, 1)
x = torch.unsqueeze(x, 2)
return x | 3ecec3ada4b75486ff96c30890e8a3e173ca7d31 | 28,777 |
def fom(A, b, x0=None, maxiter=None, residuals=None, errs=None):
"""Full orthogonalization method
Parameters
----------
A : {array, matrix, sparse matrix, LinearOperator}
n x n, linear system to solve
b : {array, matrix}
right hand side, shape is (n,) or (n,1)
x0 : {array, matrix}
initial guess, default is a vector of zeros
maxiter : int
maximum number of allowed iterations
residuals : list
residuals has the residual norm history,
including the initial residual, appended to it
errs : list of errors returned through (Ax,x), so test the errors on Ax=0
"""
n = len(b)
if maxiter is None:
maxiter = n
if x0 is None:
x = np.ones((n,))
else:
x = x0.copy()
r = b - A * x
beta = np.linalg.norm(r)
if residuals is not None:
residuals[:] = [beta] # initial residual
if errs is not None:
errs[:] = [np.sqrt(np.dot(A * x, x))]
V = np.zeros((n, maxiter))
H = np.zeros((maxiter, maxiter))
V[:, 0] = (1 / beta) * r
for j in range(0, maxiter):
w = A * V[:, j]
for i in range(0, j + 1):
H[i, j] = np.dot(w, V[:, i])
w += -H[i, j] * V[:, i]
newh = np.linalg.norm(w)
if abs(newh) < 1e-13:
break
elif j < (maxiter - 1):
H[j + 1, j] = newh
V[:, j + 1] = (1 / newh) * w
# do some work to check the residual
#
if residuals is not None:
e1 = np.zeros((j + 1, 1))
e1[0] = beta
y = np.linalg.solve(H[0:j + 1, 0:j + 1], e1)
z = np.dot(V[:, 0:j + 1], y)
x = x0 + z.ravel()
residuals.append(abs(newh * y[j]))
if errs is not None:
e1 = np.zeros((j + 1, 1))
e1[0] = beta
y = np.linalg.solve(H[0:j + 1, 0:j + 1], e1)
z = np.dot(V[:, 0:j + 1], y)
x = x0 + z.ravel()
errs.append(np.sqrt(np.dot(A * x, x)))
e1 = np.zeros((j + 1, 1))
e1[0] = beta
y = np.linalg.solve(H[0:j + 1, 0:j + 1], e1)
z = np.dot(V[:, 0:j + 1], y)
x = x0 + z.ravel()
return (x, newh) | b95ac8b383150e57ffd599fb2e77608dd7503d9d | 28,778 |
def get_gprMax_materials(fname):
"""
Returns the soil permittivities. Fname is an .in file.
"""
materials = {'pec': 1.0, # Not defined, usually taken as 1.
'free_space': 1.000536}
for mat in get_lines(fname, 'material'):
props = mat.split()
materials[props[-1]] = float(props[0])
return materials | f56e720c5c2209b67ca521b779ce9472665beb6a | 28,779 |
import random
def generate_utt_pairs(librispeech_md_file, utt_pairs, n_src):
"""Generate pairs of utterances for the mixtures."""
# Create a dict of speakers
utt_dict = {}
# Maps from speaker ID to list of all utterance indices in the metadata file
speakers = list(librispeech_md_file["speaker_ID"].unique())
for speaker in speakers:
utt_indices = librispeech_md_file.index[librispeech_md_file["speaker_ID"] == speaker]
utt_dict[speaker] = list(utt_indices)
while len(speakers) >= n_src:
# Select random speakers
selected = random.sample(speakers, n_src)
# Select random utterance from each speaker
utt_list = []
for speaker in selected:
utt = random.choice(utt_dict[speaker])
utt_list.append(utt)
utt_dict[speaker].remove(utt)
if not utt_dict[speaker]: # no more utts for this speaker
speakers.remove(speaker)
utt_pairs.append(utt_list)
return utt_pairs | 9079fa35b961de053c86b08527085e8eb84609b8 | 28,780 |
def simpson(so, spl: str, attr: str, *, local=True, key_added=None, graph_key='knn', inplace=True) -> None:
"""Computes the Simpson Index on the observation or the sample level
Args:
so: SpatialOmics instance
spl: Spl for which to compute the metric
attr: Categorical feature in SpatialOmics.obs to use for the grouping
local: Whether to compute the metric on the observation or the sample level
key_added: Key added to either obs or spl depending on the choice of `local`
graph_key: Specifies the graph representation to use in so.G[spl] if `local=True`.
inplace: Whether to add the metric to the current SpatialOmics instance or to return a new one.
Returns:
"""
if key_added is None:
key_added = 'simpson'
key_added = f'{key_added}_{attr}'
if local:
key_added += f'_{graph_key}'
metric = _simpson
kwargs_metric = {}
return _compute_metric(so=so, spl=spl, attr=attr, key_added=key_added, graph_key=graph_key, metric=metric,
kwargs_metric=kwargs_metric,
local=local, inplace=inplace) | d10fd40305f384d75f8c33d391a87f6b5c8adcd5 | 28,781 |
def _rk4(dparam=None, k0=None, y=None, kwdargs=None):
"""
a traditional RK4 scheme, with:
- y = array of all variables
- p = parameter dictionnary
dt is contained within p
"""
if 'itself' in dparam[k0]['kargs']:
dy1 = dparam[k0]['func'](itself=y, **kwdargs)
dy2 = dparam[k0]['func'](itself=y+dy1/2., **kwdargs)
dy3 = dparam[k0]['func'](itself=y+dy2/2., **kwdargs)
dy4 = dparam[k0]['func'](itself=y+dy3, **kwdargs)
else:
dy1 = dparam[k0]['func'](**kwdargs)
dy2 = dparam[k0]['func'](**kwdargs)
dy3 = dparam[k0]['func'](**kwdargs)
dy4 = dparam[k0]['func'](**kwdargs)
return (dy1 + 2*dy2 + 2*dy3 + dy4) * dparam['dt']['value']/6. | a44e177e6925c36fa9355ed9c5ee41d0604d01bd | 28,782 |
def generate_discord_markdown_string(lines):
"""
Wraps a list of message into a discord markdown block
:param [str] lines:
:return: The wrapped string
:rtype: str
"""
output = ["```markdown"] + lines + ["```"]
return "\n".join(output) | 1c0db2f36f4d08e75e28a1c024e6d4c35638d8f5 | 28,783 |
from typing import Optional
def _sanitize_ndim(
result: ArrayLike, data, dtype: Optional[DtypeObj], index: Optional[Index]
) -> ArrayLike:
"""
Ensure we have a 1-dimensional result array.
"""
if getattr(result, "ndim", 0) == 0:
raise ValueError("result should be arraylike with ndim > 0")
elif result.ndim == 1:
# the result that we want
result = _maybe_repeat(result, index)
elif result.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
# i.e. PandasDtype("O")
result = com.asarray_tuplesafe(data, dtype=object)
cls = dtype.construct_array_type()
result = cls._from_sequence(result, dtype=dtype)
else:
result = com.asarray_tuplesafe(data, dtype=dtype)
return result | 6a1e49e07658ea3f7b9e80915c73464548715419 | 28,784 |
from typing import Callable
from typing import Any
from re import T
from typing import List
from typing import Dict
def from_list_dict(f: Callable[[Any], T], x: Any) -> List[Dict[str, T]]:
"""Parses list of dictionaries, applying `f` to the dictionary values.
All items must be dictionaries.
"""
assert isinstance(x, list)
assert all(isinstance(d, dict) for d in x)
return [ { k: f(v) for (k, v) in d.items() } for d in x] | 2a1316098165367e8657d22717245a6c695cb96e | 28,785 |
def TSTR_eICU(identifier, epoch):
"""
"""
# get "train" data
exp_data = np.load('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy').item()
X_synth = exp_data['synth_data']
Y_synth = exp_data['synth_labels']
n_synth = X_synth.shape[0]
X_synth = X_synth.reshape(n_synth, -1)
# get test data
data = np.load('./data/eICU_task_data.npy').item()
X_test = data['X_test']
Y_test = data['Y_test']
# iterate over labels
results = []
for label in range(Y_synth.shape[1]):
print('task:', data['Y_columns'][label])
print('(', np.mean(Y_synth[:, label]), 'positive in train, ', np.mean(Y_test[:, label]), 'in test)')
#model = RandomForestClassifier(n_estimators=50).fit(X_synth, Y_synth[:, label])
model = SVC(gamma=0.001).fit(X_synth, Y_synth[:, label])
predict = model.predict(X_test)
print('(predicted', np.mean(predict), 'positive labels)')
accuracy = sklearn.metrics.accuracy_score(Y_test[:, label], predict)
precision = sklearn.metrics.precision_score(Y_test[:, label], predict)
recall = sklearn.metrics.recall_score(Y_test[:, label], predict)
print('\tacc:', accuracy, '\tprec:', precision, '\trecall:', recall)
results.append([accuracy, precision, recall])
# do the OR task
extreme_heartrate_test = Y_test[:, 1] + Y_test[:, 4]
extreme_respiration_test = Y_test[:, 2] + Y_test[:, 5]
extreme_systemicmean_test = Y_test[:, 3] + Y_test[:, 6]
Y_OR_test = np.vstack([extreme_heartrate_test, extreme_respiration_test, extreme_systemicmean_test]).T
Y_OR_test = (Y_OR_test > 0)*1
extreme_heartrate_synth = Y_synth[:, 1] + Y_synth[:, 4]
extreme_respiration_synth = Y_synth[:, 2] + Y_synth[:, 5]
extreme_systemicmean_synth = Y_synth[:, 3] + Y_synth[:, 6]
Y_OR_synth = np.vstack([extreme_heartrate_synth, extreme_respiration_synth, extreme_systemicmean_synth]).T
Y_OR_synth = (Y_OR_synth > 0)*1
OR_names = ['extreme heartrate', 'extreme respiration', 'extreme MAP']
OR_results = []
for label in range(Y_OR_synth.shape[1]):
print('task:', OR_names[label])
print('(', np.mean(Y_OR_synth[:, label]), 'positive in train, ', np.mean(Y_OR_test[:, label]), 'in test)')
model = RandomForestClassifier(n_estimators=50).fit(X_synth, Y_OR_synth[:, label])
predict = model.predict(X_test)
print('(predicted', np.mean(predict), 'positive labels)')
accuracy = sklearn.metrics.accuracy_score(Y_OR_test[:, label], predict)
precision = sklearn.metrics.precision_score(Y_OR_test[:, label], predict)
recall = sklearn.metrics.recall_score(Y_OR_test[:, label], predict)
print(accuracy, precision, recall)
OR_results.append([accuracy, precision, recall])
return results, OR_results | 8f719e94689b1354e6463935e6dbdc2c5a110779 | 28,786 |
def wizard_active(step, current):
"""
Return the proper classname for the step div in the badge wizard.
The current step needs a 'selected' class while the following step needs a
'next-selected' class to color the tip of the arrow properly.
"""
if current == step:
return 'selected'
elif (current + 1) == step:
return 'next-selected' | 2daad3f7651df7609f3473af698e116ce419c9df | 28,787 |
def set_token(token: OAuth2Token):
"""Set dynamics client token in a thread, so it can be done in an async context."""
def task():
name = "dynamics-client-token"
expires = int(token["expires_in"]) - 60
cache.set(name, token, expires)
with ThreadPoolExecutor() as executor:
future = executor.submit(task)
return future.result() | 61b4bfa3dbe1ddd03ff608a476f34905ec2440e9 | 28,788 |
def pFind_clumps(f_list, n_smooth=32, param=None, arg_string=None, verbose=True):
"""
A parallel implementation of find_clumps. Since SKID is not parallelized
this can be used to run find_clumps on a set of snapshots from one
simulation.
**ARGUMENTS**
f_list : list
A list containing the filenames of snapshots OR the tipsy snapshots
n_smooth : int (optional)
Number of nearest neighbors used for particle smoothing in the
simulation. This is used in the definition of a density threshold
for clump finding.
param : str (optional)
filename for a tipsy .param file
arg_string : str (optional)
Additional arguments to be passed to SKID. Cannot use -tau, -d, -m, -s, -o
verbose : bool
Verbosity flag. Default is True
**RETURNS**
clumpnum_list : list
A list containing the particle clump assignment for every snapshot in
f_list. clumps[i][j] gives the clump number for particle j in
snapshot i.
"""
# Number of processes to create = number of cores
n_proc = cpu_count()
# Set up the arguments for calls to find_clumps
arg_list = []
for i, f_name in enumerate(f_list):
arg_list.append([f_name, n_smooth, param, arg_string, i, verbose])
print arg_list
# Set up the pool
pool = Pool(n_proc)
# Run the job in parallel
results = pool.map(_parallel_find_clumps, arg_list, chunksize=1)
pool.close()
pool.join()
return results | 85e2c80f3fdb95f2c324b8b934550788faa6c5bb | 28,789 |
import math
def gamma_dis(x):
"""fix gamma = 2
https://www.itl.nist.gov/div898/handbook/eda/section3/eda366b.htm
"""
x = round(x, 14)
res = round(x*math.exp(-x) / TAU_2, 14)
return res | a3375b7ae16755d0dab47ecd4f54ebc8c40143b9 | 28,790 |
import os
import pprint
import json
import sys
def generate_schema_dictionary(source_type, csdl_schema_dirs, json_schema_dirs,
entity, schema_file_name, oem_entities=None,
oem_schema_file_names=None, profile=None, schema_url=None,
copyright=None):
""" Generate the schema dictionary.
Args:
source_type: Type of schema file. local or remote.
csdl_schema_dirs: List of CSDL schema directories.
json_schema_dirs: List of JSON schema directories.
entity: Schema entity name.
schema_file_name: Schema file name.
oem_entities: List of oem entities (default None).
oem_schema_file_names: List of OEM schema file names (default None).
profile: Schema profile (default None)
schema_url: Schema URL. Used when source_type is remote (default None).
copyright: Copyright string that should be appended to the binary dictionary
Return:
SchemaDictionary: Named tuple which has the following fields:
dictionary - The schema dictionary.
dictionary_byte_array - The schema dictionary in byte array.
json_dictionary - Schema dictionary in JSON format.
"""
global includeNamespaces
global verbose
# Initialize the global variables.
doc_list = {}
oem_sources = []
oem_entity_type = ''
entity_repo = {}
entity_offset_map = {}
includeNamespaces = {}
# Validate source type.
if source_type not in ['local', 'remote']:
if verbose:
print('Error, invalid source_type: {0}'.format(source_type))
return (SchemaDictionary(dictionary=None,
dictionary_byte_array=None,
json_dictionary=None))
# Set the source variable. If source_type is remote set source to schema_url.
if source_type == 'remote':
source = schema_url
else:
# compute source starting with the first csdl directory. The first one wins
source = schema_file_name
for csdl_dir in csdl_schema_dirs:
if os.path.isfile(os.path.join(csdl_dir, schema_file_name)):
source = os.path.join(csdl_dir, schema_file_name)
break
# Set oem sources and entity repo for oem schema file names.
if oem_schema_file_names:
for oem_schema_file in oem_schema_file_names:
for csdl_dir in csdl_schema_dirs:
if os.path.isfile(os.path.join(csdl_dir, oem_schema_file)):
oem_sources.append(os.path.join(csdl_dir, oem_schema_file))
oem_entity_type = entity + '.Oem'
# create a special entity for OEM and set the major entity's oem section to it
entity_repo[oem_entity_type] = ('Set', [])
for oemEntityPair in oem_entities:
oemName, oem_entity = oemEntityPair.split('=')
entity_repo[oem_entity_type][ENTITY_REPO_TUPLE_PROPERTY_LIST_INDEX].append(
[oemName, 'Set', '', oem_entity])
# Add namespaces.
add_namespaces(csdl_schema_dirs, source_type, source, doc_list)
for oemSource in oem_sources:
add_namespaces(csdl_schema_dirs, source_type, oemSource, doc_list)
if verbose:
pprint.PrettyPrinter(indent=3).pprint(doc_list)
add_all_entity_and_complex_types(json_schema_dirs, source_type, doc_list, entity_repo)
if verbose:
pprint.PrettyPrinter(indent=3).pprint(entity_repo)
# set the entity oem entry to the special OEM entity type
if source_type == 'local' and oem_schema_file_names:
for property in entity_repo[entity][ENTITY_REPO_TUPLE_PROPERTY_LIST_INDEX]:
if property[PROPERTY_FIELD_STRING] == 'Oem':
property[PROPERTY_OFFSET] = oem_entity_type
# search for entity and build dictionary
if entity in entity_repo:
ver = ''
dictionary = []
if source_type == 'local':
# truncate the entity_repo first if a profile is specified
is_truncated = False
if profile:
with open(profile) as file:
json_profile = json.load(file)
# Fix up the profile
profile_requirements = process_profile(json_profile, entity)
if profile_requirements:
truncate_entity_repo(entity_repo, entity, profile_requirements, is_truncated)
else:
if verbose:
print('Error parsing profile')
sys.exit(1)
add_dictionary_entries(dictionary, entity_repo, entity, entity_offset_map, True, get_entity_name(entity))
dictionary = generate_dictionary(dictionary, entity_repo, entity_offset_map)
ver = get_latest_version_as_ver32(entity)
if verbose:
print(entity_offset_map)
# Generate dictionary_byte_array.
dictionary_byte_array = generate_byte_array(dictionary, ver, False, copyright)
# Generate JSON dictionary.
json_dictionary = generate_json_dictionary(json_schema_dirs, dictionary, dictionary_byte_array, entity)
# Return the named tuple.
return (SchemaDictionary(dictionary=dictionary,
dictionary_byte_array=dictionary_byte_array,
json_dictionary=json_dictionary))
# Reached here means something went wrong. Return an empty named tuple.
else:
if verbose:
print('Error, cannot find entity:', entity)
return (SchemaDictionary(dictionary=None,
dictionary_byte_array=None,
json_dictionary=None)) | c550d5c03dc7577e3ede9db9bc469e76bbe68f9b | 28,791 |
import calendar
def get_month_number(year):
""" Function to get month from the user input. The month should be number from 1-12.
:returns: the number of month enterd by user
:rtype: int
"""
year = int(year)
while True:
val = input("Please, enter the number of month? (1-12)\n")
closed = False
try:
month = int(val)
if (month <= 0 and month > 12):
continue
closed = True
except ValueError:
print("Invalid number!")
continue
if closed:
week_day = calendar.monthrange(year, month) # get the tuple where the first number is weekday of first day of the month and second is number of days in month
date1 = pd.Timestamp(date(year, month, 1))
date2 = pd.Timestamp(date(year, month, week_day[1]))
month_name = f'{date1.strftime("%B")}_{year}' # get the name of month
break
return month_name, date1, date2 | c2d0f5010b8f1de6d1764a216c43eb1901c8093c | 28,792 |
def reconstruct_with_whole_molecules(struct):
""" Build smallest molecule representation of struct.
"""
rstruct = Structure()
rstruct.set_lattice_vectors(struct.get_lattice_vectors())
molecule_struct_list = get_molecules(struct)
for molecule_struct in molecule_struct_list:
geo_array = molecule_struct.get_geo_array()
ele = molecule_struct.geometry['element']
for i,coord in enumerate(geo_array):
rstruct.append(coord[0],coord[1],coord[2],ele[i])
return rstruct | f3595fdd23e22fc0c24b9a7cfa6e000206eda93f | 28,793 |
def _json_serialize_no_param(cls):
""" class decorator to support json serialization
Register class as a known type so it can be serialized and deserialzied
properly
"""
return _patch(cls, _get_type_key(cls), 0) | 3eaf4c7c53694c316898b1a9e4d41dc4b212afed | 28,794 |
def aireTriangle(a,b,c):
"""
Aire du triangle abc dans l'espace.
C'est la moitié de la norme du produit vectoriel ab vect ac
"""
u,v=b-a,c-a
r=u[2]*v[0]-u[0]*v[2]
s=u[0]*v[1]-u[1]*v[0]
t=u[1]*v[2]-u[2]*v[1]
return 0.5*sqrt(r*r+s*s+t*t) | 641aa598d36189c787b91af4a98734f2289173e0 | 28,795 |
def ez_admin(admin_client, admin_admin, skip_auth):
"""A Django test client that has been logged in as admin. When EZID endpoints are
called via the client, a cookie for an active authenticated session is included
automatically. This also sets the admin password to "admin".
Note: Because EZID does not use a standard authentication procedure, it's also
necessary to pull in skip_auth here.
"""
admin_client.login(username='admin', password='admin')
#log.info('cookies={}'.format(admin_client.cookies))
return admin_client | 0b2ac749a690ad5ac0dc83ca9c8f3905da5a016b | 28,796 |
import textwrap
def _template_message(desc, descriptor_registry):
# type: (Descriptor, DescriptorRegistry) -> str
"""
Returns cls_def string, list of fields, list of repeated fields
"""
desc = SimpleDescriptor(desc)
descriptor_registry[desc.identifier] = desc
slots = desc.field_names
# NOTE: the "pass" statement is a hack to provide a body when args is empty
initialisers = ['pass']
initialisers += [
'self.{} = self.{}()'.format(field_name, field_type)
for field_name, field_type in (desc.inner_fields)
]
initialisers += [
'self.{} = {}()'.format(field_name, field_type)
for field_name, field_type in (desc.external_fields)
]
initialisers += [
'self.{} = []'.format(field_name)
for field_name in desc.repeated_fields
]
args = ['self'] + ['{}=None'.format(f) for f in slots]
init_str = 'def __init__({argspec}):\n{initialisers}\n'.format(
argspec=', '.join(args),
initialisers=textwrap.indent('\n'.join(initialisers), ' '),
)
helpers = ""
if desc.options.map_entry:
# for map <key, value> fields
# This mirrors the _IsMessageMapField check
value_type = desc.fields_by_name['value']
if value_type.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
base_class = MessageMap
else:
base_class = ScalarMap
# Rather than (key, value), use the attributes of the correct
# MutableMapping type as the "slots"
slots = tuple(m for m in dir(base_class) if not m.startswith("_"))
helpers = 'def __getitem__(self, idx):\n pass\n'
helpers += 'def __delitem__(self, idx):\n pass\n'
body = ''.join([
_template_enum(d, descriptor_registry) for d in desc.enum_types
] + [
_template_message(d, descriptor_registry) for d in desc.nested_types
])
cls_str = (
'class {name}(object):\n'
' {docstring!r}\n'
' __slots__ = {slots}\n'
'{helpers}{body}{init}\n'
).format(
name=desc.name,
docstring="descriptor={}".format(desc.identifier),
slots=slots,
body=textwrap.indent(body, ' '),
helpers=textwrap.indent(helpers, ' '),
init=textwrap.indent(init_str, ' '),
)
return cls_str | 2586ffe0b81ea683a40bc20700ddb970fc385962 | 28,797 |
import re
def parse_archive(path, objdump):
"""Parses a list of ObjectFiles from an objdump archive output.
Args:
path: String path to the archive.
objdump: List of strings of lines of objdump output to parse.
Returns:
List of ObjectFile objects representing the objects contained within the
archive.
"""
object_files = []
current_file = None
for line in objdump:
if not line:
continue
match = re.match(r'^(.*[^\)])(\((.+)\))?:\s+file format', line)
if match:
filename = match.group(3) if match.group(3) else match.group(1)
current_file = ObjectFile(filename, path)
object_files.append(current_file)
continue
if not current_file:
raise Exception('Archive does not specify object to attribute '
'symbols to ' + path)
sym = parse_symbol(line)
if not sym:
if current_file.symbols:
current_file = None
continue
current_file.symbols.append(sym)
return object_files | 1f30804ba1d723bf8656dd26f522c5a369db4b3d | 28,798 |
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
config = TRIGGER_SCHEMA(config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(config[CONF_DEVICE_ID])
trigger = config[CONF_TYPE]
if (
not device
or device.model not in DEVICES
or trigger not in DEVICES[device.model]
):
raise InvalidDeviceAutomationConfig(f"Unsupported model {device.model}")
return config | f43e1b58bd37e0cf989da8076505cf34c4386830 | 28,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.