content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def cmd_issuer_hash(cert):
"""Returns hash of certificate issuer.
"""
return cert.get_issuer().hash() | d35d35c39ba9c33c5b0015bb9f4d4ddf433cd71d | 3,627,500 |
def linearly_enhancing_details(alphas, numOfScales=2, window_scale=3):
"""
:param alphas: the high frequency component of the image calculated with Shearlet transformation.
i * j * (kl) array, where i, j are equal to the height and width of the original image, kl is
depend by the number of scales and the number of directions per scale. kl = 4 + 8 + 16 + ...
when there are 1, 2, 3... scales.
numOfScales: the number of scales of high frequency coefficients.
:return:
"""
# the gain factor of each scale
if numOfScales > 3 or numOfScales < 1:
print('Number of Scale mast be in the set of {1, 2, 3} !!!')
gain_weight = np.array([0.9, 1.0, 1.1, 1.2, 1.5, 3.0])
lam = 1000
# calculate the gain factor vector W
w_array = []
for i in range(1, numOfScales + 1):
w_array = np.concatenate((w_array, gain_weight[i] * np.ones(2 ** (i + 1))))
betas = alphas * w_array
a_times_b = alphas * betas
kernel = np.ones((window_scale, window_scale))
numerator_ab_part = 1.0 / (window_scale**2) * cv2.filter2D(a_times_b, -1, kernel=kernel)
mu0 = cv2.filter2D(alphas, -1, kernel=kernel)
mu1 = cv2.filter2D(betas, -1, kernel=kernel)
#mu1 = mu0 * w_array
# fast method to calculate the local standard deviation (mean(img^2) - mean(img)^2)
# instead of using the original method std = 1/n x sum(Xn - mean)
sigma2 = cv2.filter2D(alphas**2, -1, kernel) - (cv2.filter2D(alphas, -1, kernel)) ** 2
pm = (numerator_ab_part - mu0 * mu1) / (lam + sigma2)
qm = mu1 - pm * mu0
gammai = pm * alphas + qm
return gammai | bdba70e9b8d63da29c8a87c09857f6698dcd12fb | 3,627,501 |
def strip_c(buf, dia):
"""This is the ugliest python function I've ever written and I'm ashamed
that it exists. Can you tell that it's an almost line for line translation
of a C program? The two embedded functions were macros.
"""
pos = bytes(buf, 'ascii', errors='replace')
single_q = double_q = slash = escape = skip = False
space = 0
buf = bytearray(len(pos) + 1)
buf[0] = ord(b' ')
i, x = 0, 1
def check_quote(tocheck, other):
nonlocal skip, escape
if not other:
if tocheck:
if not escape:
tocheck = False
skip = True
else:
tocheck = True
return tocheck, other
def QUOTE():
nonlocal double_q, single_q
return double_q or single_q
while i < len(pos):
ch = chr(pos[i])
if ch == '/':
if not QUOTE():
if slash:
x -= 1
end = i + pos[i:].find(b'\n')
if end < 0:
dia.error("Failed to find end of comment")
return
while pos[end - 1] == '\\':
end = pos[end+1:].find(b'\n')
i = end
if chr(buf[x-1]) == '\n':
skip = True
else:
slash = True
elif ch == '*':
if not QUOTE() and slash:
x -= 1
end = i + pos[i:].find(b'*/')
if end < 0:
dia.error("Failed to find end of comment")
return
i = end + 2
try:
ch = chr(pos[i])
except IndexError:
break
if ch == '\n' and chr(buf[x-1]) == '\n':
skip = True
slash = False
elif ch == '\n':
if not escape:
slash = double_q = False
if (chr(buf[x-1]) == '\n'):
skip = True
elif ch == '#':
slash = False
endln = i + pos[i+1:].find(b'\n')
if chr(buf[x-1]) == '\n' and endln > 0:
tmp = i + 1
if chr(pos[i+1]).isspace():
while chr(pos[tmp]).isspace() and tmp < endln:
tmp += 1
thing = bytes(pos[tmp:tmp + 7])
if thing == b'include':
i = endln + 2
continue
elif ch == '\\':
pass
elif ch == '"':
double_q, single_q = check_quote(double_q, single_q)
slash = False
elif ch == "'":
single_q, double_q = check_quote(single_q, double_q)
slash = False
else:
slash = False
escape = not escape if (ch == '\\') else False
skip = True if (skip) else (ch.isspace() and chr(buf[x-1]) == '\n')
space = space + 1 if (ch.isspace() and not skip) else 0
if skip:
skip = False
elif not QUOTE() and space < 2:
buf[x] = ord(ch)
x += 1
i += 1
return bytes(buf[:x]) | 0d23d9826fdbba09b06d3a866d54bcda13d43509 | 3,627,502 |
def map_bool(to_bool) -> bool:
"""Maps value to boolean from a string.
Parameters
----------
to_bool: str
Value to be converted to boolean.
Returns
-------
mapped_bool: bool
Boolean value converted from string.
Example
-------
>>> boolean_string = "True" # can also be lower case
>>> boolean_value = map_bool(boolean_string)
"""
try:
boolean_map = {"true": True, "false": False}
mapped_bool = boolean_map[to_bool.lower()]
except KeyError:
raise KeyError("Boolean Value Expected got '{}'".format(to_bool))
return mapped_bool | 4e3bb175f653174a56cb6ddc72ba7bcc56755826 | 3,627,503 |
def _format_koff_text(properties, timeunit):
"""Format text for koff plot. """
tu = "ns" if timeunit == "ns" else r"$\mu$s"
text = "{:18s} = {:.3f} {:2s}$^{{-1}} $\n".format("$k_{{off1}}$", properties["ks"][0], tu)
text += "{:18s} = {:.3f} {:2s}$^{{-1}} $\n".format("$k_{{off2}}$", properties["ks"][1], tu)
text += "{:14s} = {:.4f}\n".format("$R^2$", properties["r_squared"])
ks_boot_avg = np.mean(properties["ks_boot_set"], axis=0)
cv_avg = 100 * np.std(properties["ks_boot_set"], axis=0) / np.mean(properties["ks_boot_set"], axis=0)
text += "{:18s} = {:.3f} {:2s}$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", ks_boot_avg[0],
tu, cv_avg[0])
text += "{:18s} = {:.3f} {:2s}$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", ks_boot_avg[1],
tu, cv_avg[1])
text += "{:14s} = {:.4f}\n".format("$R^2$$_{{boot}}$", np.mean(properties["r_squared_boot_set"]))
text += "{:18s} = {:.3f} {:2s}".format("$Res. Time$", properties["res_time"], tu)
return text | c657173779a7e63c149de364c3b5d3dfb27b4618 | 3,627,504 |
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=("black", "white"),
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A pair of colors. The first is used for values below a threshold,
the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts | 2409be396c214db7f9fb7e31254bbc6ebe3aca33 | 3,627,505 |
def fmt_val(val, shorten=True):
"""Format a value for inclusion in an
informative text string.
"""
val = repr(val)
max = 50
if shorten:
if len(val) > max:
close = val[-1]
val = val[0:max-4] + "..."
if close in (">", "'", '"', ']', '}', ')'):
val = val + close
return val | c8a10f187d971f8b3f4222549375642b6c12a6a6 | 3,627,506 |
from typing import List
def start_nodes(aliases: List[str] = [],
ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE) -> bool:
"""
Start indy-node service on a list of nodes.
:param aliases: A list of nodes. Required.
:type aliases: List[str]
:param ssh_config_file: The relative or absolute path to the SSH config
file.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_SSH_CONFIG_FILE)
:type ssh_config_file: str
:return: bool
"""
# Start all nodes listed in aliases list
count = len(aliases)
tried_to_start = 0
are_alive = 0
for alias in aliases:
logger.debug("alias to start: %s", alias)
if start_by_node_name(alias, ssh_config_file):
are_alive += 1
tried_to_start += 1
logger.debug("are_alive: %s -- count: %s -- tried_to_start: %s -- " \
"len-aliases: %s", are_alive, count, tried_to_start,
len(aliases))
if are_alive != int(count):
return False
return True | 0b9fb3e3fd21e56b571a554577309a2495a73165 | 3,627,507 |
def get_rms_radius(ts):
"""
Calculate the RMS radius at the different iterations of the timeseries.
"""
r = []
for iteration in ts.iterations:
x, w = ts.get_particle( ['x', 'w'], iteration=iteration )
r.append( np.sqrt( np.average( x**2, weights=w ) ) )
return( 1.e-6*np.array(r) ) | f5d7fcbb90e2f29d7e2a38427f7fdb5aea53c962 | 3,627,508 |
def encode_function_data(function=None, *args):
"""Encodes the function call so we can work with an initializer.
Args:
initializer ([brownie.network.contract.ContractTx], optional):
The initializer function we want to call. Example: `box.store`.
Defaults to None.
args (Any, optional):
The arguments to pass to the initializer function
Returns:
[bytes]: Return the encoded bytes.
"""
if len(args) == 0 or not function:
return eth_utils.to_bytes(hexstr="0x")
else:
return function.encode_input(*args) | 645f663e29672e96ce1cd18c727c4ed56b7d93db | 3,627,509 |
def analyzeTarget(obj, targetPath):
"""
This function is used (more during the development cycle than in the
runtime application) to analyze the difference between the vertex positions of
a mesh object and those recorded in a file on disk.
The result is a representation of each vertex displacement as a color.
This provides a graphical representation of the deformations
with respect to the original positions as defined by the mesh object.
Plugin developers may find this function useful for visualizing and checking
integration plugins.
Parameters
----------
obj:
*3D object*. The object possessing the 'original' vertex positions.
targetPath:
*string*. The file system path to the file containing the target vertex positions.
"""
try:
fileDescriptor = open(targetPath)
except:
print 'Unable to open %s'%(targetPath)
return 0
targetData = fileDescriptor.readlines()
distMax = 0.00001
# Step 1: calculate max length
for vData in targetData:
vectorData = vData.split()
targetsVector = [float(vectorData[1]), float(vectorData[2]), float(vectorData[3])]
dist = aljabr.vlen(targetsVector)
if dist > distMax:
distMax = dist
# Step 2: calculate color
for vData in targetData:
vectorData = vData.split()
mainPointIndex = int(vectorData[0])
targetsVector = [float(vectorData[1]), float(vectorData[2]), float(vectorData[3])]
dist = aljabr.vlen(targetsVector)
v = obj.verts[mainPointIndex]
if dist == 0:
v.color = [255, 255, 255, 255]
else:
R = int((dist / distMax) * 255)
G = 255 - int(R / 10)
B = 255 - int(R / 10)
v.color = [R, G, B, 255]
v.update(0, 0, 1)
fileDescriptor.close() | 2534ddf77ae356fbd189052890ab5e3c6b2b1dc0 | 3,627,510 |
def generate_ranklist(data, rerank_lists):
"""
Create a reranked lists based on the data and rerank documents ids.
Args:
data: (Raw_data) the dataset that contains the raw data
rerank_lists: (list<list<int>>) a list of rerank list in which each
element represents the original rank of the documents
in the initial list.
Returns:
qid_list_map: (map<list<int>>) a map of qid with the reranked document id list.
"""
if len(rerank_lists) != len(data.initial_list):
raise ValueError("The number of queries in rerank ranklists number must be equal to the initial list,"
" %d != %d." % (len(rerank_lists)), len(data.initial_list))
qid_list_map = {}
for i in range(len(data.qids)):
if len(rerank_lists[i]) != len(data.initial_list[i]):
raise ValueError("The number of docs in each rerank ranklists must be equal to the initial list,"
" %d != %d." % (len(rerank_lists[i]), len(data.initial_list[i])))
# remove duplicated docs and organize rerank list
index_list = []
index_set = set()
for idx in rerank_lists[i]:
if idx not in index_set:
index_set.add(idx)
index_list.append(idx)
# doc idxs that haven't been observed in the rerank list will be put at
# the end of the list
for idx in range(len(rerank_lists[i])):
if idx not in index_set:
index_list.append(idx)
# get new ranking list
qid = data.qids[i]
did_list = []
new_list = [data.initial_list[i][idx] for idx in index_list]
# remove padding documents
for ni in new_list:
if ni >= 0:
did_list.append(data.dids[ni])
qid_list_map[qid] = did_list
return qid_list_map | 3ad3431efff6a7ad81b649a8654d5ec30184de74 | 3,627,511 |
def is_merge_brances_has_written(from_branch, to_branch, merge_msg="auto merge"):
"""
returns True, if merge is successful and it has modified some untracked_files
return False, if there is nothing to merge
"""
for line in git("merge", from_branch, to_branch, "-m", merge_msg, _iter=True, _tty_out=False):
if "Already up-to-date." in line:
return False
return True | 59d4580127d4627b5edc7478d7588710874ff693 | 3,627,512 |
def desktop_extra_assigner(self, user):
"""Assign the extra packages name of the selected desktop.
Arguments
---------
user: "Dictionary containing user's answers"
Returns
-------
"String containing question for the desktop extras"
"""
choice = ['Gnome extra',
'KDE applications',
'Deepin extra',
'Mate extra',
'XFCE goodies']
question = self.trad('Do you wish to install {extra}').format(
extra=choice[user['desktop']])
return question | 529910c70e7dfd83ab58a4937668d35607457271 | 3,627,513 |
from typing import Dict
from typing import Tuple
from typing import List
def _get_openmm_parameters(
force: openmm.Force,
) -> Dict[Tuple[int, ...], List[Tuple[unit.Quantity, ...]]]:
"""Returns the parameters stored in a given force.
Args:
force: The force to retrieve the parameters from.
Returns:
A dictionary of the retrieved parameters where each key is a tuple of atom
indices, and each value is a list of the parameter sets associated with those
atom indices.
"""
omm_parameters = defaultdict(list)
if isinstance(force, openmm.HarmonicBondForce):
for i in range(force.getNumBonds()):
index_a, index_b, *parameters = force.getBondParameters(i)
omm_parameters[(index_a, index_b)].append(parameters)
assert sum(len(x) for x in omm_parameters.values()) == force.getNumBonds()
elif isinstance(force, openmm.HarmonicAngleForce):
for i in range(force.getNumAngles()):
index_a, index_b, index_c, *parameters = force.getAngleParameters(i)
omm_parameters[(index_a, index_b, index_c)].append(parameters)
assert sum(len(x) for x in omm_parameters.values()) == force.getNumAngles()
elif isinstance(force, openmm.PeriodicTorsionForce):
for i in range(force.getNumTorsions()):
(
index_a,
index_b,
index_c,
index_d,
*parameters,
) = force.getTorsionParameters(i)
omm_parameters[(index_a, index_b, index_c, index_d)].append(parameters)
assert sum(len(x) for x in omm_parameters.values()) == force.getNumTorsions()
else:
raise NotImplementedError
return omm_parameters | 12178a67b060f85da83929465a359d3f377c9a90 | 3,627,514 |
def create_scene(info, color="cpk", scale=1.0, show_bonds=False):
"""Create a fresnel.Scene object.
Adds geometries for particles, bonds, and box (or boundingbox).
Parameters
----------
info : list
List containing N, types, typeids, positions, N_bonds, bonds, box
color : str, default "cpk"
Color scheme to use
("cpk", "bsu", name of a matplotlib colormap, or a custom dictionary)
scale : float, default 1.0
Scaling factor for the particle radii, bond and box lengths
show_bonds : bool, default False
Whether to show bonds
Returns
-------
fresnel.Scene
"""
N, types, typeids, positions, N_bonds, bonds, box = info
color_array = np.empty((N, 3), dtype="float64")
if type(color) is dict:
# Populate the color_array with colors based on particle name
# if name is not defined in the dictionary, try using the cpk dictionary
for i, n in enumerate(typeids):
try:
ncolor = color[types[n]]
color_array[i, :] = fresnel.color.linear(
mplcolors.to_rgba(ncolor)
)
except KeyError:
try:
color_array[i, :] = cpk_colors[types[n]]
except KeyError:
color_array[i, :] = cpk_colors["default"]
elif color == "cpk":
# Populate the color_array with colors based on particle name
# -- if name is not defined in the dictionary, use pink (the default)
for i, n in enumerate(typeids):
try:
color_array[i, :] = cpk_colors[types[n]]
except KeyError:
color_array[i, :] = cpk_colors["default"]
elif color == "bsu":
# Populate the color array with the brand standard bsu colors
# https://www.boisestate.edu/communicationsandmarketing/
# brand-standards/colors/
# if there are more unique particle names than colors,
# colors will be reused
for i, n in enumerate(typeids):
color_array[i, :] = bsu_colors[n % len(bsu_colors)]
else:
# Populate the color_array with colors based on particle name
# choose colors evenly distributed through a matplotlib colormap
try:
cmap = matplotlib.cm.get_cmap(name=color)
except ValueError:
print(
"The 'color' argument takes either 'cpk', 'bsu', or the name "
"of a matplotlib colormap."
)
raise
mapper = matplotlib.cm.ScalarMappable(
norm=mplcolors.Normalize(vmin=0, vmax=1, clip=True), cmap=cmap
)
N_types = len(types)
v = np.linspace(0, 1, N_types)
# Color by typeid
for i in range(N_types):
color_array[typeids == i] = fresnel.color.linear(
mapper.to_rgba(v)[i]
)
# Make an array of the radii based on particle name
# -- if name is not defined in the dictionary, use default
rad_array = np.empty((N), dtype="float64")
for i, n in enumerate(typeids):
try:
rad_array[i] = radii_dict[types[n]] * scale
except KeyError:
rad_array[i] = radii_dict["default"] * scale
## Start building the fresnel scene
scene = fresnel.Scene()
# Spheres for every particle in the system
geometry = fresnel.geometry.Sphere(scene, N=N)
geometry.position[:] = positions
geometry.material = fresnel.material.Material(roughness=1.0)
geometry.outline_width = 0.01 * scale
# use color instead of material.color
geometry.material.primitive_color_mix = 1.0
geometry.color[:] = color_array
# resize radii
geometry.radius[:] = rad_array
# bonds
if N_bonds > 0 and show_bonds:
bond_cyls = fresnel.geometry.Cylinder(scene, N=N_bonds)
bond_cyls.material = fresnel.material.Material(roughness=0.5)
bond_cyls.outline_width = 0.01 * scale
# bonds are white
bond_colors = np.ones((N_bonds, 3), dtype="float64")
bond_cyls.material.primitive_color_mix = 1.0
bond_cyls.points[:] = bonds
bond_cyls.color[:] = np.stack(
[
fresnel.color.linear(bond_colors),
fresnel.color.linear(bond_colors)
], axis=1
)
bond_cyls.radius[:] = [0.03 * scale] * N_bonds
# Create box in fresnel
fresnel.geometry.Box(scene, box, box_radius=0.008 * scale)
# Set the initial camera position
max_dist = np.max(positions) - np.min(positions)
scene.camera.height = 1.5 * max_dist
scene.camera.position = [max_dist, max_dist, max_dist]
return scene | 1be9e76c7eb1379a516fbdc1bace26891648f505 | 3,627,515 |
from bs4 import BeautifulSoup
def crawl_malware_domains(url):
""" This function crawls the malware domain indicator and returns all the dataset links to be downloaded and scraped
later.
@param
url (string) url of the indicator web page
@return
"""
print('Crawling site: ', url)
downloader = Downloader()
print(url)
html = downloader(url)
soup = BeautifulSoup(html, 'html5lib')
possible_links = soup.find_all('a')
htmlLinks, htmlRemovedLinks = list([]), list([])
for link in possible_links :
if link.has_attr('href') and link.attrs['href'][0].isdigit():
# construct full path using function parameter url = 'https://mirror.uce.edu.ec/malwaredomains/'
full_link = '{}{}'.format(url, link.attrs['href'])
htmlLinks.append(full_link)
elif link.has_attr('href') and link.attrs['href'].startswith('removed-domains-'):
# in this loop we gather all the removed ip lists
full_link = '{}{}'.format(url, link.attrs['href'])
htmlRemovedLinks.append(full_link)
return {"blocked": htmlLinks, "removed": htmlRemovedLinks} | 93c518d7db9140951b6133b8157e87e0a7205577 | 3,627,516 |
import asyncio
def ip_address_middleware(get_response):
"""A Middleware to attach the IP Address and if its Routable to the request
object.
"""
if asyncio.iscoroutinefunction(get_response):
async def middleware(request):
client_ip, is_routable = get_client_ip(request)
request.client_ip = client_ip
request.is_client_ip_routable = is_routable
country = get_country_by_ip(client_ip)
request.country = country
response = await get_response(request)
return response
else:
def middleware(request):
client_ip, is_routable = get_client_ip(request)
request.client_ip = client_ip
request.is_client_ip_routable = is_routable
country = get_country_by_ip(client_ip)
request.country = country
response = get_response(request)
return response
return middleware | e31213ef515d9b747e77a9b5f257ee6b394d8526 | 3,627,517 |
from datetime import datetime
def parse_datetime(datetime_str: Text) -> datetime.datetime:
""" form string parse datetime """
for str_format in cfg.datetime_str_formats:
try:
datetime_object = datetime.datetime.strptime(datetime_str, str_format)
return datetime_object
except ValueError:
continue | 860c24a80d8e8f555f554d4343f7f37357a0673e | 3,627,518 |
import re
import logging
def get_gpu():
"""Returns video device as listed by WMI.
Not cached as the GPU driver may change underneat.
"""
wbem = _get_wmi_wbem()
if not wbem:
return None, None
_, pythoncom = _get_win32com()
dimensions = set()
state = set()
# https://msdn.microsoft.com/library/aa394512.aspx
try:
for device in wbem.ExecQuery('SELECT * FROM Win32_VideoController'):
# The string looks like:
# PCI\VEN_15AD&DEV_0405&SUBSYS_040515AD&REV_00\3&2B8E0B4B&0&78
pnp_string = device.PNPDeviceID
ven_id = u'UNKNOWN'
dev_id = u'UNKNOWN'
match = re.search(r'VEN_([0-9A-F]{4})', pnp_string)
if match:
ven_id = match.group(1).lower()
match = re.search(r'DEV_([0-9A-F]{4})', pnp_string)
if match:
dev_id = match.group(1).lower()
dev_name = device.VideoProcessor or u''
version = device.DriverVersion or u''
ven_name, dev_name = gpu.ids_to_names(
ven_id, u'Unknown', dev_id, dev_name)
dimensions.add(ven_id)
dimensions.add(u'%s:%s' % (ven_id, dev_id))
if version:
dimensions.add(u'%s:%s-%s' % (ven_id, dev_id, version))
state.add(u'%s %s %s' % (ven_name, dev_name, version))
else:
state.add(u'%s %s' % (ven_name, dev_name))
except pythoncom.com_error as e:
# This generally happens when this is called as the host is shutting down.
logging.error('get_gpu(): %s', e)
return sorted(dimensions), sorted(state) | cb2ca5c26d9df4788d1f5596922ab97107649578 | 3,627,519 |
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum descending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
"""
return _findall(node, filter_=filter_, stop=stop,
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount) | f7b8d5694c0d17de1476145aa8c603c3f7074bca | 3,627,520 |
from typing import Union
from typing import Iterable
from typing import Mapping
from typing import Optional
from typing import Hashable
def concat(
objs: Union[
Iterable[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion]
],
axis=0,
join="outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
copy: bool = True,
) -> FrameOrSeriesUnion:
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series or DataFrame objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed to not sort by default.
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(
objs,
axis=axis,
ignore_index=ignore_index,
join=join,
keys=keys,
levels=levels,
names=names,
verify_integrity=verify_integrity,
copy=copy,
sort=sort,
)
return op.get_result() | 5574293208e2b1b7a61733ffd48af04be55c639e | 3,627,521 |
import random
import string
def create_hash():
"""
Creates a unique hash for each URL.
:return: Hash
"""
_hash = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
if UrlShortenModel.find_by_hash(_hash):
create_hash()
return _hash | 36467353abe2a6f59746ff4bb403de74b2e6f2d5 | 3,627,522 |
def str_or_list_like(x):
"""Determine if x is list-list (list, tuple) using duck-typing.
Here is a set of Attributes for different classes
| x | type(x) | x.strip | x.__getitem__ | x.__iter__ |
| aa | <class 'str'> | True | True | True |
| ['a', 'b'] | <class 'list'> | False | True | True |
| ('a', 'b') | <class 'tuple'> | False | True | True |
| {'b', 'a'} | <class 'set'> | False | False | True |
| {'a': 1, 'b': 2} | <class 'dict'> | False | True | True |
"""
if hasattr(x, "strip"):
return "str"
elif hasattr(x, "__getitem__") or hasattr(x, "__iter__"):
return "list_like"
else:
return "others" | 5ea7a6ff90f702c766401d0a973ac02347c66ade | 3,627,523 |
from typing import Optional
def get_carrier_gateway(carrier_gateway_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCarrierGatewayResult:
"""
An example resource schema demonstrating some basic constructs and validation rules.
:param str carrier_gateway_id: The ID of the carrier gateway.
"""
__args__ = dict()
__args__['carrierGatewayId'] = carrier_gateway_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:ec2:getCarrierGateway', __args__, opts=opts, typ=GetCarrierGatewayResult).value
return AwaitableGetCarrierGatewayResult(
carrier_gateway_id=__ret__.carrier_gateway_id,
owner_id=__ret__.owner_id,
state=__ret__.state,
tags=__ret__.tags) | ab70ed301bf14e47347a7d260f68594f6507c67b | 3,627,524 |
def _bootstrap_dm(ids, dm, new_names=None):
"""Makes a bootstrapped distance matrix
Parameters
----------
ids: array-like
A list of ids in the distance matrix. These do not have
to be unique.
dm : DistanceMatrix
The distance matrix object to resample.
new_names: array_like, optional
The names to be used in the new array. Note, this must be
unique. If nothing is specified, a numeric index will be
used.
Returns
-------
A DistanceMatrix with the samples above and the index
names
"""
if new_names is None:
new_names = np.arange(0, len(ids))
dm_ids = dm.ids
id_pos = [dm_ids.index(id_) for id_ in ids]
dm_data = dm.data[id_pos][:, id_pos]
return skbio.DistanceMatrix(dm_data, new_names) | 8f1ad0203f43a5033d83e21fbb7eb3d6082e0d57 | 3,627,525 |
def ai(listAI, listHuman, list_all):
"""
AI计算落子位置
"""
if len(listHuman) == 0:
next_point[0] = 7
next_point[1] = 7
else:
#listAI = listai
#listHuman = listhuman
for i in range(len(listAI)):
listAIAndHuman.append(listAI[i])
for i in range(len(listHuman)):
listAIAndHuman.append(listHuman[i])
maxmin(True, DEPTH, -99999999, 99999999, listAI, listHuman, listAIAndHuman, list_all)
return next_point[0], next_point[1] | 869f77276f0a4ed92fd1be820329e44a750bb568 | 3,627,526 |
import os
def readprobes(path, probes_name="probes", time_name="0", name="U"):
"""read the data contained in the force file .
create the forces variables in the Forcesfile object
Args:
path: str\n
probes_name: str\n
time_name: str ('latestTime' and 'mergeTime' are supported)\n
name: str
Returns:
array: array of time values and array of probes data;
A way you might use me is:\n
probe_data = read('path_of_OpenFoam_case', '0', 'probes', 'U')
"""
path_probes_name = os.path.join(path, "postProcessing", probes_name)
if time_name is "latestTime":
time_name = _find_latesttime(path_probes_name)
elif time_name is "mergeTime":
time_list = []
dir_list = os.listdir(path + "/postProcessing/" + probes_name)
for directory in dir_list:
try:
float(directory)
time_list.append(directory)
except:
pass
time_list.sort(key=float)
time_list = np.array(time_list)
for timename in time_list:
time_vect, tab = readprobes(path, probes_name, timename, name)
if "tab_merge" in locals():
for jj in range(np.size(time_vect[:])):
if time_vect[jj] > timevect_merge[-1]:
break
else:
continue
if jj + 1 < np.size(time_vect[:]):
timevect_merge = np.concatenate(
[timevect_merge, time_vect[jj:]]
)
tab_merge = np.concatenate([tab_merge, tab[jj:, :]])
else:
timevect_merge = time_vect
tab_merge = tab
return timevect_merge, tab_merge
with open(os.path.join(path_probes_name, time_name, name), "rb") as f:
content = f.readlines()
j = 0
header = True
for dummy, line in enumerate(content):
if "#".encode() in line:
j += 1
elif "#".encode() not in line and header:
header = False
line = line.replace(b")", b"")
line = line.split(b"(")
try:
dim = len(line[1].split())
except IndexError:
dim = 1
line = line[0].split()
time_vect = np.zeros(len(content) - j)
time_vect[0] = line[0]
tab = np.zeros([len(content) - j, len(line) - 1, dim], dtype=float)
print(
"Reading file "
+ os.path.join(
path, "postProcessing", probes_name, time_name, name
)
)
print(
str(len(line) - 1)
+ " probes over "
+ str(len(tab[:, 0, 0]))
+ " timesteps"
)
for k, probedata in enumerate(line[1:]):
values = probedata.split()
for l, vect in enumerate(values):
tab[0, k, l] = np.array(vect, dtype=float)
j = 0
else:
j += 1
line = line.replace(b")", b"")
line = line.split(b"(")
try:
time_vect[j] = line[0]
except ValueError:
line = line[0].split()
time_vect[j] = line[0]
for k, probedata in enumerate(line[1:]):
values = probedata.split()
for l, vect in enumerate(values):
tab[j, k, l] = np.array(vect, dtype=float)
return time_vect, tab | a3345bc0aafdf00291193b17ecab66189b386924 | 3,627,527 |
import itertools
def list_as_range_strings(values):
"""
Format a list of single-range strings from a list of values; sorts input.
:param values:
:return:
"""
values.sort() # make sure numbers are consecutive
value_groups = itertools.groupby(values, lambda n, c=itertools.count(): n - next(c))
return [list_as_range_string(group) for i, group in value_groups] | d4b8da4a1d6c501705cade252cff11eb7c898a9a | 3,627,528 |
def squared_distance(v: Vector, w: Vector) -> float:
"""Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
return sum_of_squares(subtract(v, w)) | 889f77eebc691d4d1f0f49e357ae06506f0eb5e0 | 3,627,529 |
def fp2tan(xfp, yfp):
"""
Convert focal plane to tangent plane coordinates
Args:
xfp, yfp: CS5 focal plane coordinates in mm
return xtan, ytan where xtan=sin(theta)*cos(phi), ytan=sin(theta)*sin(phi)
"""
#- phi=0 aligned with +xtan = -RA = +HA = +xfp
phi = np.arctan2(yfp, xfp)
r = np.sqrt(xfp**2 + yfp**2)
theta = radius2theta(r)
rtan = np.sin(np.radians(theta))
xtan = rtan * np.cos(phi)
ytan = rtan * np.sin(phi)
return xtan, ytan | 97bbc6730a9321a18cbf0d60ab0393ee16a04d42 | 3,627,530 |
def bounding_box(fixtures, pose):
"""Get the axis aligned bounding box of the fixtures
Args:
fixtures (iterable): an iterable containing the fixtures to bound
pose (tuple): an (x, y, theta) tuple. All fixtures will be
transformed by this pose before the bounding box is
computed.
Returns:
b2.aabb: the smallest axis-aligned bounding box which completely
encloses all fixtures after being transformed by pose. If no
fixtures are supplied, the value None (rather than an empty
bounding box) is returned.
"""
transform = b2.transform()
transform.position = pose[0:2]
transform.angle = pose[2]
aabb = None
for fixture in fixtures:
# 0 is the 'child index', which is not described in the pybox2d
# documentation so I'm not really sure what it is.
if aabb is None:
aabb = fixture.shape.getAABB(transform, 0)
else:
aabb.Combine(fixture.shape.getAABB(transform, 0))
return aabb | eaf8f6cfca0e97550a8579e7db3b99fd8ed8306f | 3,627,531 |
def dir_key(dir_name):
""" Used for sorting """
p = parse_dir(dir_name)
if p is None:
return 0
abits, wbits, r = p
return 1000*abits + 100*wbits + int(100*r) | f8fc4af4f3fe1db6bf7a8e18d3e0fa9041729a31 | 3,627,532 |
def get_impact_from_xmlfile(element):
"""Gets the impact value of a step/testcase/suite from the
testcase.xml/testsuite.xml/project.xml file """
return TCOBJ.get_impact_from_xmlfile(element) | 9925c26956aaeb17672dabf5bb124830e0f2d32d | 3,627,533 |
def readlist(infile):
"""Read each row of file as an element of the list"""
with open(infile, 'r') as f:
list_of_rows = [r for r in f.readlines()]
return list_of_rows | 50ea79f3c64e5e90a0f8b3bfd4cd8108304d57b2 | 3,627,534 |
def is_indvar(expr):
"""
An individual variable must be a single lowercase character other than 'e',
followed by zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return INDVAR_RE.match(expr) is not None | 4e9bd018da3950adb68d93ec36bdfafb8a7219c5 | 3,627,535 |
from scipy import signal
def estimate_ringing_samples(system, max_try=100000):
"""Estimate filter ringing.
Parameters
----------
system : tuple | ndarray
A tuple of (b, a) or ndarray of second-order sections coefficients.
max_try : int
Approximate maximum number of samples to try.
This will be changed to a multiple of 1000.
Returns
-------
n : int
The approximate ringing.
"""
if isinstance(system, tuple): # TF
kind = 'ba'
b, a = system
zi = [0.] * (len(a) - 1)
else:
kind = 'sos'
sos = system
zi = [[0.] * 2] * len(sos)
n_per_chunk = 1000
n_chunks_max = int(np.ceil(max_try / float(n_per_chunk)))
x = np.zeros(n_per_chunk)
x[0] = 1
last_good = n_per_chunk
thresh_val = 0
for ii in range(n_chunks_max):
if kind == 'ba':
h, zi = signal.lfilter(b, a, x, zi=zi)
else:
h, zi = signal.sosfilt(sos, x, zi=zi)
x[0] = 0 # for subsequent iterations we want zero input
h = np.abs(h)
thresh_val = max(0.001 * np.max(h), thresh_val)
idx = np.where(np.abs(h) > thresh_val)[0]
if len(idx) > 0:
last_good = idx[-1]
else: # this iteration had no sufficiently lange values
idx = (ii - 1) * n_per_chunk + last_good
break
else:
warn('Could not properly estimate ringing for the filter')
idx = n_per_chunk * n_chunks_max
return idx | 689d1afb7fe99136fa3741099cb68af11151f437 | 3,627,536 |
def read_popularity(path):
"""
:param path: a path of popularity file. A file contains '<id>,<rank>' rows.
:return: a set of popularity object ids
"""
ids = set()
for line in open(path):
try:
ident = int(line.split(",", maxsplit=1)[0])
except (AttributeError, IndexError):
continue
ids.add(ident)
return ids | a97f20b129bd7849a4bf9a91d40c23ad664b500b | 3,627,537 |
def classify(tree, input):
"""classify the input using the given decision tree"""
# if this is a leaf node, return its value
if tree in [True, False]:
return tree
# otherwise find the correct subtree
attribute, subtree_dict = tree
subtree_key = input.get(attribute) # None if input is missing attribute
if subtree_key not in subtree_dict: # if no subtree for key,
subtree_key = None # we'll use the None subtree
subtree = subtree_dict[subtree_key] # choose the appropriate subtree
return classify(subtree, input) | 66b7558ac8658aa83b1796c17a637daa5a2309bb | 3,627,538 |
def fmt_null_obj(obj):
"""将空对象转为空字符串(obj)
\t\t@param: obj 传入对象
"""
if not __check_null(obj): return ''
if type(obj) in (list, tuple):
# Change 'None' to ''
obj_new = __fmt_null_ListTuple(obj)
elif type(obj) == dict:
# Change None's value to ''
obj_new = __fmt_null_Dict(obj)
else:
obj_new = obj
return obj_new | fc52615d2db94485c9431a89c18c1b93fe6c7cb0 | 3,627,539 |
def email_loeschen(request):
"""
Dekrementiert die Anzahl der Formulare für eine E-Mail in der mitgliedBearbeitenView oder mitgliedErstellenView nach Löschen eines Formulars.
Aufgaben:
* Erfassen der Anzahl der E-Mails
* Rechteeinschränkung: Nur angemeldete Nutzer können den Vorgang auslösen
:param request: Die Ajax-Request, welche den Aufruf der Funktion ausgelöst hat.
:return: HTTP Response
"""
if not request.user.is_authenticated:
return HttpResponse("Permission denied")
if not request.user.is_superuser:
return HttpResponse("Permission denied")
global emailnum
emailnum-=1
return HttpResponse() | 46b6dd6d0e4728563e6f7e5530a9f1ebefb28141 | 3,627,540 |
def extendDynaForm(dynaform, dynainclude=None, dynaexclude=None,
dynaproperties=None, append=False):
"""Extends an existing dynaform.
If any of dynainclude, dynaexclude or dynaproperties are not present,
they are retrieved from dynaform (if present in it's Meta class).
While it is rather useless to extend from a dynaform that does not have
a Meta class, it is allowed, the resulting DynaForm is the same as if
newDynaForm was called with all extendDynForm's keyword arguments.
If append is True, the form's original values for include and
exclude will be appended to the supplied dynainclude and
dynaexclude, which both are still allowed to be None.
"""
# Try to retrieve the Meta class from the existing dynaform
meta = getattr(dynaform, 'Meta', None)
# If we find one, we can use it to 'extend' from
if meta:
dynamodel = getattr(meta, 'model', None)
originclude = getattr(meta, 'include', [])
origexclude = getattr(meta, 'exclude', [])
if not dynainclude:
dynainclude = originclude
originclude = []
if not dynaexclude:
dynaexclude = origexclude
origexclude = []
if append:
dynainclude += originclude
dynaexclude += origexclude
# The most interesting parameter, the 'extra fields' dictionary
dynaconf = getattr(meta, 'dynaconf', {})
dynaproperties = dicts.merge(dynaproperties, dynaconf)
# Create a new DynaForm, using the properties we extracted
return newDynaForm(
dynamodel=dynamodel,
dynabase=dynaform,
dynainclude=dynainclude,
dynaexclude=dynaexclude,
dynaproperties=dynaproperties) | 656d0d86ffb5bb2f9141f1f5e2ecf616d9c56535 | 3,627,541 |
def _parse_write_checkpoint(write_checkpoint):
""" Returns the appropriate value of ``write_checkpoint``. """
if isinstance(write_checkpoint, bool):
if not write_checkpoint:
write_checkpoint = "NONE"
else:
write_checkpoint = "ALL"
if write_checkpoint.upper() not in ("NONE", "MINIMAL", "ALL", "BOTH", "FULL"):
LOG.warning(
f"Invalid value of `write_checkpoint` provided: {write_checkpoint}, using 'ALL'"
)
write_checkpoint = "ALL"
return write_checkpoint | 752db50a4782fb13584ee0daf691f37edcb095eb | 3,627,542 |
def compute_shape_features(df_samples, sig, center='trough'):
"""Compute shape features for each spike.
Parameters
---------
df_samples : pandas.DataFrame
Contains cycle points locations for each spike.
sig : 1d array
Voltage time series.
center : {'trough', 'peak'}
Center extrema of the spike.
Returns
-------
df_shape_features : pd.DataFrame
Dataframe containing spike shape features. Each row is one cycle. Columns:
- time_decay : time between trough and start
- time_rise : time between trough and next peak
- time_decay_sym : fraction of cycle in the first decay period
- time_rise_sym : fraction of cycle in the rise period
- volt_trough : Voltage at the trough.
- volt_last_peak : Voltage at the last peak.
- volt_next_peak : Voltage at the next peak.
- volt_decay : Voltage at the decay before the trough.
- volt_rise : Voltage at the rise after the trough.
- period : The period of each spike.
- time_trough : Time between zero-crossings adjacent to trough.
"""
# Compute durations
period, time_trough = compute_durations(df_samples)
# Compute extrema and zero-crossing voltage
volts = compute_voltages(df_samples, sig)
volt_trough, volt_last_peak, volt_next_peak, volt_decay, volt_rise, = volts
# Compute symmetry characteristics
sym_features = compute_symmetry(df_samples)
# Organize shape features into a dataframe
shape_features = {}
shape_features['period'] = period
shape_features['time_trough'] = time_trough
shape_features['volt_trough'] = volt_trough
shape_features['volt_last_peak'] = volt_next_peak
shape_features['volt_next_peak'] = volt_last_peak
shape_features['volt_decay'] = volt_decay
shape_features['volt_rise'] = volt_rise
shape_features['time_decay'] = sym_features['time_decay']
shape_features['time_rise'] = sym_features['time_rise']
shape_features['time_decay_sym'] = sym_features['time_decay_sym']
shape_features['time_rise_sym'] = sym_features['time_rise_sym']
df_shape_features = pd.DataFrame.from_dict(shape_features)
return df_shape_features | 54605acbfa0b96aa1430401cc3aa3c0a2a2a1626 | 3,627,543 |
def uCSIsCyrillic(code):
"""Check whether the character is part of Cyrillic UCS Block """
ret = libxml2mod.xmlUCSIsCyrillic(code)
return ret | e099fce1c26940bc7e5facc8ef783da5d9478126 | 3,627,544 |
def ping():
"""
Send a ping query
---
tags:
- ping
parameters:
- in: query
name: time
description: timestamp
required: false
type: integer
- in: states
name: states
description: states
required: false
type: string
responses:
200:
description: pong received
"""
return Response("Pong", mimetype="text/plain") | 21acff6108a07bf5f982260409843dc05662aa31 | 3,627,545 |
import string
def names_to_usernames(names):
"""
Take the given list of names and convert it to usernames.
"John Doe" -> "john.doe"
Each name is stripped before conversion, then split by spaces.
If the name contains anything except letters and spaces,
raise an exception.
If duplicate names or invalid characters are found, raise an exception.
"""
allowed_chars = set(string.ascii_letters + " ")
usernames = set()
for name in names:
name = name.strip()
# Empty or comment.
if not name or name.startswith("#"):
continue
# Illegal characters.
if not set(name).issubset(allowed_chars):
raise Exception("Invalid characters found: %s" % name)
name_parts = name.lower().split()
# Invalid name format (expected full name).
if len(name_parts) <= 1:
raise Exception("Too few parts: %s" % name_parts)
# Convert to username.
username = ".".join(name_parts)
if username in usernames:
raise Exception("Duplicate: %s" % username)
usernames.add(username)
return list(usernames) | 0156f8402541e64dc1ed2b62d21bfcfbda55f167 | 3,627,546 |
def measurement_chain_with_equipment() -> MeasurementChain:
"""Get a default measurement chain with attached equipment."""
source = SignalSource(
"Current measurement",
output_signal=Signal(signal_type="analog", units="V"),
error=Error(Q_(1, "percent")),
)
ad_conversion = SignalTransformation(
"AD conversion current measurement",
error=Error(Q_(0, "percent")),
func=MathematicalExpression(
expression="a*x+b", parameters=dict(a=Q_(1, "1/V"), b=Q_(1, ""))
),
)
calibration = SignalTransformation(
"Current measurement calibration",
error=Error(Q_(1.2, "percent")),
func=MathematicalExpression(
expression="a*x+b", parameters=dict(a=Q_(1, "A"), b=Q_(1, "A"))
),
)
eq_source = MeasurementEquipment(
name="Source Equipment",
sources=[source],
)
eq_ad_conversion = MeasurementEquipment(
name="AD Equipment", transformations=[ad_conversion]
)
eq_calibration = MeasurementEquipment(
name="Calibration Equipment", transformations=[calibration]
)
mc = MeasurementChain.from_equipment("Measurement chain", eq_source)
mc.add_transformation_from_equipment(eq_ad_conversion)
mc.add_transformation_from_equipment(eq_calibration)
return mc | e9a673a37baf07cbe5a6e284755c66d9bf51e5c9 | 3,627,547 |
def get_all():
"""
Get All Profiles
---
/api/users/profiles_all:
get:
summary: Get all profiles Function
security:
- APIKeyHeader: []
tags:
- Profile
responses:
'200':
description: Returns all profiles
'400':
description: User not found
'401':
description: Permission denied
"""
post_user= UserModel.get_one_user(g.user.get('id'))
if not post_user:
return custom_response({'error': 'user not found'}, 400)
data_user= UserSchema.dump(post_user).data
if data_user.get('role') != 'Admin':
return custom_response({'error': 'permission denied'}, 401)
posts = profileModel.get_all_profiles()
data = Profile_schema.dump(posts, many=True).data
return custom_response(data, 200) | ae3f780a045b4c3d5b0430c38b411ead8de8fb13 | 3,627,548 |
import numpy
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M | 95ae6e3ec348a49c15607e5e9e1f35c0a393b66f | 3,627,549 |
def value_to_cpp(type_, value):
"""
Convert a python value into a string representing that value in C++.
This is equivalent to primitive_value_to_cpp but can process arrays values as well
Warning this still processes only primitive types
@param type_: a ROS IDL type
@type type_: builtin.str
@param value: the value to convert
@type value: python builtin (bool, int, float, str or list)
@returns: a string containing the C++ representation of the value
"""
assert not isinstance(type_, NamespacedType), \
"Could not convert non-primitive type '%s' to CPP" % (type_)
assert value is not None, "Value for type '%s' must not be None" % (type_)
if not isinstance(type_, AbstractNestedType):
return primitive_value_to_cpp(type_, value)
cpp_values = []
is_string_array = isinstance(type_.value_type, AbstractGenericString)
for single_value in value:
cpp_value = primitive_value_to_cpp(type_.value_type, single_value)
if is_string_array:
tmp_cpp_value = '{%s}' % cpp_value
else:
tmp_cpp_value = cpp_value
cpp_values.append(tmp_cpp_value)
cpp_value = '{%s}' % ', '.join(cpp_values)
if len(cpp_values) > 1 and not is_string_array:
# Only wrap in a second set of {} if the array length is > 1.
# This avoids "warning: braces around scalar initializer"
cpp_value = '{%s}' % cpp_value
return cpp_value | c771656e3163efb1c077b22a1832a7e78440f132 | 3,627,550 |
def make_reference(x, crop_size, ref_type):
"""
ref_type: {'bmf', 'bt'}
bmf: Ball-Mid-Frame, normalize by where the ball is at mid-frame
tb: Track-Ball, normalize by where the ball is at each frame
"""
print('Running DataUtils:make_reference')
assert(crop_size[0] % 2 == 1 and crop_size[1] % 2 == 1)
# x.shape = (N, 11, T, 2)
if ref_type == 'bmf':
ball_mid_frame = x[:, 0, x.shape[2] // 2] # shape = (N,2)
# shape = (11, T, N, 2)
ball_mid_frame = np.tile(ball_mid_frame, (11, x.shape[2], 1, 1))
ball_mid_frame = np.rollaxis(
ball_mid_frame, 0, 3) # shape = (T, N, 11, 2)
ball_mid_frame = np.rollaxis(
ball_mid_frame, 0, 3) # shape = (N, 11, T, 2)
elif ref_type == 'tb':
ball_mid_frame = x[:, 0] # shape = (N,T,2)
ball_mid_frame = np.tile(ball_mid_frame, (11, 1, 1, 1))
ball_mid_frame = np.rollaxis(ball_mid_frame, 0, 2)
else:
raise Exception(
'either unknown reference type, or just dont use "crop" in config')
reference = ball_mid_frame
r0 = np.ceil(crop_size[0] / 2).astype('int32') + 1
r1 = np.ceil(crop_size[1] / 2).astype('int32') + 1
reference = reference - np.tile(np.array([r0, r1]),(x.shape[0], x.shape[1], x.shape[2], 1))
return reference | 10627c3c1174129ed1e90e44d348a87b1a5dacdf | 3,627,551 |
def vessel_tip_coupling_data_to_str(data_list):
"""A list of vessel tip data elements is converted into a string."""
s = []
for v in data_list:
s.append('VesselTipData(')
s.append(' p = Point(x={}, y={}, z={}),'.format(v.p.x, v.p.y, v.p.z))
s.append(' vertex_id = {},'.format(v.vertex_id))
s.append(' pressure = {},'.format(v.pressure))
s.append(' concentration = {},'.format(v.concentration))
s.append(' R2 = {},'.format(v.R2))
s.append(' radius_first = {},'.format(v.radius_first))
s.append(' radius_last = {},'.format(v.radius_last))
s.append(' level = {}'.format(v.level))
s.append('),')
return '\n'.join(s) | 6768afa9497e5343bc20736a963d81c7ec298867 | 3,627,552 |
def user_labels_insert(*args):
"""
user_labels_insert(map, key, val) -> user_labels_iterator_t
Insert new (int, qstring) pair into user_labels_t.
@param map (C++: user_labels_t *)
@param key (C++: const int &)
@param val (C++: const qstring &)
"""
return _ida_hexrays.user_labels_insert(*args) | 9c255306ee2e42e947e4f9d939681a79e090e1d4 | 3,627,553 |
def RequestReasonInterceptor():
"""Returns an interceptor that adds a request reason header."""
return HeaderAdderInterceptor(_GetRequestReasonHeader) | c85b4b1c2a4e893610df17bdfabda7effbfeed54 | 3,627,554 |
from typing import Union
from typing import Dict
from typing import Set
from typing import Any
def mem_usage_pd(pd_obj: Union[pd.DataFrame, pd.Series], index: bool = True, deep: bool = True,
details: bool = True) -> Dict[str, Union[str, Set[Any]]]:
"""
Calculate the memory usage of a pandas object.
If `details`, returns a dictionary with the memory usage and type of
each column (DataFrames only). Key=column, value=(memory, type).
Else returns a dictionary with the total memory usage. Key=`total`, value=memory.
Parameters
----------
pd_obj : pd.DataFrame or pd.Series
DataFrame or Series to calculate the memory usage.
index : bool, default True
If True, include the memory usage of the index.
deep : bool, default True
If True, introspect the data deeply by interrogating object dtypes for system-level
memory consumption.
details : bool, default True
If True and a DataFrame is given, give the detail (memory and type) of each column.
Returns
-------
dict of str to str
Dictionary with the column or total as key and the memory usage as value (with 'MB').
Raises
------
AttributeError
If argument is not a pandas object.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': [f'value{i}' for i in range(100_000)],
... 'B': [i for i in range(100_000)],
... 'C': [float(i) for i in range(100_000)]}).set_index('A')
>>> mem_usage_pd(df)
{'total': '7.90 MB'}
>>> mem_usage_pd(df, details=True)
{'Index': {'6.38 MB', 'Index type'},
'B': {'0.76 MB', dtype('int64')},
'C': {'0.76 MB', dtype('float64')},
'total': '7.90 MB'}
>>> serie = df.reset_index()['B']
>>> mem_usage_pd(serie)
{'total': '0.76 MB'}
>>> mem_usage_pd(serie, details=True)
2019-06-24 11:23:39,500 Details is only available for DataFrames.
{'total': '0.76 MB'}
"""
try:
usage_b = pd_obj.memory_usage(index=index, deep=deep)
except AttributeError as e:
raise AttributeError('Object does not have a `memory_usage` function, '
'use only pandas objects.') from e
# Convert bytes to megabytes.
usage_mb = usage_b / 1024 ** 2
res: Dict[str, Union[str, Set[Any]]] = {}
if details:
if isinstance(pd_obj, pd.DataFrame):
res.update({idx: {f'{value:03.2f} MB',
pd_obj[idx].dtype if idx != 'Index' else 'Index type'}
for (idx, value) in usage_mb.iteritems()})
else:
LOGGER.warning('Details is only available for DataFrames.')
# Sum the memory usage of the columns if this is a DataFrame.
if isinstance(pd_obj, pd.DataFrame):
usage_mb = usage_mb.sum()
res['total'] = f'{usage_mb:03.2f} MB'
return res | 793a75301da3bb7e9ff784db55a26afdc347180c | 3,627,555 |
import re
def LF_DG_METHOD_DESC(c):
"""
This label function is designed to look for phrases
that imply a sentence is description an experimental design
"""
#TODO FIX for words that change the sentence menaing from methods to results
if "we found" in get_tagged_text(c):
return 0
if re.search(ltp(method_indication), get_tagged_text(c), flags=re.I):
return -1
else:
return 0 | 00a4816ebf04d26cdcc3a444749cd65b1a06d827 | 3,627,556 |
import torch
def gumbel_binary(theta, temperature=0.5, hard=False):
"""theta is a vector of unnormalized probabilities
Returns:
A vector that becomes binary as the temperature --> 0
"""
u = Variable(torch.rand(theta.size()))
z = theta + torch.log(u / (1 - u))
a = F.sigmoid(z / temperature)
if hard:
a_hard = torch.round(a)
return (a_hard - a).detach() + a
else:
return a | 530eff4fb887b863c65c2e2a669953f49b0b83db | 3,627,557 |
import re
def price_quantity_us_number(price):
"""Extract the numeric quantity of the price,
assuming the number uses dot for decimal and comma for thousands, etc."""
p = re.sub('[^0-9.]', '', price.strip())
return p | 9e35d8096bd3edfe80b6fae6ab0641107828a50b | 3,627,558 |
import typing
import re
def build_global_regexes() -> typing.Dict[str, typing.Pattern]:
"""
Returns a list where each element is a tuple of
``(label, possible_regexes)``.
"""
nums = r'-?\d+'
ip_atom = r'(({0})|({0}:)|(:{0})|({0}:{0})|({0}-{0})|(:))?'.format(nums)
ip = r'^{0}(,{0})*$'.format(ip_atom)
ip_qform = r'^\[{0}(,{0})*\]$'.format(ip_atom)
sp = r'^{0}(,{0})*$'.format(ip_qform[1:-1])
return {
'ip': re.compile(ip), # unquoted form only, quoted form is-a sp
'sp': re.compile(sp),
} | 43ce3ef0b0bdd6afee39e1cb710272eab859e123 | 3,627,559 |
import os
def register_api(provider: str, api_dir: str = '.') -> object:
"""
decorator for registering api of the domain
:param provider:
:type provider:
:param api_dir:
:type api_dir:
:return:
:rtype:
"""
def generate(cls):
if context['register_api']:
import_str = 'from {} import {}'.format(context['domain_module'], cls.__name__)
the_func = api_template.format(import_str, cls.__tablename__, provider, cls.__name__)
with open(os.path.join(api_dir, f'{cls.__tablename__}.api'), "w") as myfile:
myfile.write(the_func)
myfile.write('\n')
return cls
return generate | a25409aa1f7e0aa63fd669f0fd9e510ea0c433fc | 3,627,560 |
def _ratio_sample_rate(ratio):
"""
:param ratio: geodesic distance ratio to Euclid distance
:return: value between 0.008 and 0.144 for ration 1 and 1.1
"""
return 20 * (ratio - 0.98) ** 2 | 1cd2989937a992e2f558b01be6fadebc66c50782 | 3,627,561 |
def normalize_boolean(val):
"""Returns None if val is None, otherwise ensure value
converted to boolean"""
if val is None:
return val
else:
return ensure_boolean(val) | c9cfb505fb4ace5c01ab06ff3c218cbb73cf915f | 3,627,562 |
def make_braced_expr(tokens):
"""Make a braced expr from a recursive, nested list of tokens."""
result = ""
for e in tokens[1:-1]:
if isinstance(e, list):
result += "".join([getattr(t, 'value', t) for t in flatten(e)])
else:
result.append(e.value)
contents = ''.join([getattr(token, 'content', token) for token in result])
return tokens[0].value + contents + tokens[2].value | d928ae98b067bc79ac7284adffedd637cbeb750d | 3,627,563 |
def basic_stats(G, area=None, clean_intersects=False, tolerance=15, circuity_dist="gc"):
"""
Calculate basic descriptive metric and topological stats for a graph.
For an unprojected lat-lng graph, tolerance and graph units should be in
degrees, and circuity_dist should be 'gc'. For a projected graph,
tolerance and graph units should be in meters (or similar) and
circuity_dist should be 'euclidean'.
Parameters
----------
G : networkx.MultiDiGraph
input graph
area : numeric
the area covered by the street network, in square meters (typically
land area); if none, will skip all density-based metrics
clean_intersects : bool
if True, calculate consolidated intersections count (and density, if
area is provided) via consolidate_intersections function
tolerance : numeric
tolerance value passed along if clean_intersects=True, see
consolidate_intersections function documentation for details and usage
circuity_dist : string
'gc' or 'euclidean', how to calculate straight-line distances for
circuity measurement; use former for lat-lng networks and latter for
projected networks
Returns
-------
stats : dict
dictionary of network measures containing the following elements (some
keys may not be present, based on the arguments passed into the function):
- n = number of nodes in the graph
- m = number of edges in the graph
- k_avg = average node degree of the graph
- intersection_count = number of intersections in graph, that is,
nodes with >1 street emanating from them
- streets_per_node_avg = how many streets (edges in the undirected
representation of the graph) emanate from each node (ie,
intersection or dead-end) on average (mean)
- streets_per_node_counts = dict, with keys of number of streets
emanating from the node, and values of number of nodes with this
count
- streets_per_node_proportion = dict, same as previous, but as a
proportion of the total, rather than counts
- edge_length_total = sum of all edge lengths in the graph, in meters
- edge_length_avg = mean edge length in the graph, in meters
- street_length_total = sum of all edges in the undirected
representation of the graph
- street_length_avg = mean edge length in the undirected
representation of the graph, in meters
- street_segments_count = number of edges in the undirected
representation of the graph
- node_density_km = n divided by area in square kilometers
- intersection_density_km = intersection_count divided by area in
square kilometers
- edge_density_km = edge_length_total divided by area in square
kilometers
- street_density_km = street_length_total divided by area in square
kilometers
- circuity_avg = edge_length_total divided by the sum of the great
circle distances between the nodes of each edge
- self_loop_proportion = proportion of edges that have a single node
as its two endpoints (ie, the edge links nodes u and v, and u==v)
- clean_intersection_count = number of intersections in street
network, merging complex ones into single points
- clean_intersection_density_km = clean_intersection_count divided by
area in square kilometers
"""
sq_m_in_sq_km = 1e6 # there are 1 million sq meters in 1 sq km
Gu = None
# calculate the number of nodes, n, and the number of edges, m, in the graph
n = len(G)
m = len(G.edges())
# calculate the average degree of the graph
k_avg = 2 * m / n
if "streets_per_node" in G.graph:
# get the degrees saved as a graph attribute (from an undirected
# representation of the graph). this is not the degree of the nodes in
# the directed graph, but rather represents the number of streets
# (unidirected edges) emanating from each node. see
# count_streets_per_node function.
spn = G.graph["streets_per_node"]
else:
# count how many street segments emanate from each node in this graph
spn = utils_graph.count_streets_per_node(G)
# count number of intersections in graph, as nodes with >1 street emanating
# from them
node_ids = set(G.nodes())
intersection_count = len(
[True for node, count in spn.items() if (count > 1) and (node in node_ids)]
)
# calculate streets-per-node average: the average number of streets
# (unidirected edges) incident to each node
spna = sum(spn.values()) / n
# calculate streets-per-node counts
# create a dict where key = number of streets (unidirected edges) incident
# to each node, and value = how many nodes are of this number in the graph
spnc = {num: list(spn.values()).count(num) for num in range(max(spn.values()) + 1)}
# calculate streets-per-node proportion
# degree proportions: dict where key = each degree and value = what
# proportion of nodes are of this degree in the graph
spnp = {num: count / n for num, count in spnc.items()}
# calculate the total and average edge lengths
edge_length_total = sum([d["length"] for u, v, d in G.edges(data=True)])
edge_length_avg = edge_length_total / m
# calculate the total and average street segment lengths (so, edges without
# double-counting two-way streets)
if Gu is None:
Gu = utils_graph.get_undirected(G)
street_length_total = sum([d["length"] for u, v, d in Gu.edges(data=True)])
street_segments_count = len(Gu.edges(keys=True))
street_length_avg = street_length_total / street_segments_count
# calculate clean intersection counts
if clean_intersects:
points = simplification.consolidate_intersections(G, tolerance, False, False)
clean_intersection_count = len(points)
else:
clean_intersection_count = None
# we can calculate density metrics only if area is not null
if area is not None:
area_km = area / sq_m_in_sq_km
# calculate node density as nodes per sq km
node_density_km = n / area_km
# calculate intersection density as nodes with >1 street emanating from
# them, per sq km
intersection_density_km = intersection_count / area_km
# calculate edge density as linear meters per sq km
edge_density_km = edge_length_total / area_km
# calculate street density as linear meters per sq km
street_density_km = street_length_total / area_km
if clean_intersects:
clean_intersection_density_km = clean_intersection_count / area_km
else:
clean_intersection_density_km = None
else:
# if area is None, then we cannot calculate density
node_density_km = None
intersection_density_km = None
edge_density_km = None
street_density_km = None
clean_intersection_density_km = None
# average circuity: sum of edge lengths divided by sum of straight-line
# distance between edge endpoints. first load all the edges origin and
# destination coordinates as a dataframe, then calculate the straight-line
# distance
coords = np.array(
[
[G.nodes[u]["y"], G.nodes[u]["x"], G.nodes[v]["y"], G.nodes[v]["x"]]
for u, v, k in G.edges(keys=True)
]
)
df_coords = pd.DataFrame(coords, columns=["u_y", "u_x", "v_y", "v_x"])
if circuity_dist == "gc":
gc_distances = distance.great_circle_vec(
lat1=df_coords["u_y"],
lng1=df_coords["u_x"],
lat2=df_coords["v_y"],
lng2=df_coords["v_x"],
)
elif circuity_dist == "euclidean":
gc_distances = distance.euclidean_dist_vec(
y1=df_coords["u_y"], x1=df_coords["u_x"], y2=df_coords["v_y"], x2=df_coords["v_x"]
)
else:
raise ValueError('circuity_dist must be "gc" or "euclidean"')
gc_distances = gc_distances.fillna(value=0)
try:
circuity_avg = edge_length_total / gc_distances.sum()
except ZeroDivisionError:
circuity_avg = np.nan
# percent of edges that are self-loops, ie both endpoints are the same node
self_loops = [True for u, v, k in G.edges(keys=True) if u == v]
self_loops_count = len(self_loops)
self_loop_proportion = self_loops_count / m
# assemble the results
stats = {
"n": n,
"m": m,
"k_avg": k_avg,
"intersection_count": intersection_count,
"streets_per_node_avg": spna,
"streets_per_node_counts": spnc,
"streets_per_node_proportion": spnp,
"edge_length_total": edge_length_total,
"edge_length_avg": edge_length_avg,
"street_length_total": street_length_total,
"street_length_avg": street_length_avg,
"street_segments_count": street_segments_count,
"node_density_km": node_density_km,
"intersection_density_km": intersection_density_km,
"edge_density_km": edge_density_km,
"street_density_km": street_density_km,
"circuity_avg": circuity_avg,
"self_loop_proportion": self_loop_proportion,
"clean_intersection_count": clean_intersection_count,
"clean_intersection_density_km": clean_intersection_density_km,
}
# return the results
return stats | 5f35239c7b5b572b39ce515d1a841d0e910d70fe | 3,627,564 |
from typing import Counter
def trip_finder(hand):
"""
Takes a 5-card hand, concats, only takes ranks
If we get 3 of a kind only, returns list:
[6, trip rank, 0, 0, 0, 0]
"""
cards = ''.join(hand)[::2]
if Counter(cards).most_common(1)[0][1] == 3:
if Counter(cards).most_common(2)[1][1] == 1:
return [6, Counter(cards).most_common(1)[0][0], 0, 0, 0, 0]
else:
return [0, 0, 0, 0, 0, 0]
else:
return [0, 0, 0, 0, 0, 0] | 42281d141bdb41e0c745610257cc4376a6262d64 | 3,627,565 |
def crossfadein(clip, duration):
"""
Makes the clip appear progressively, over ``duration`` seconds.
Only works when the clip is included in a CompositeVideoClip.
"""
newclip = clip.copy()
newclip.mask = clip.mask.fx(fadein, duration)
return newclip | 74c40f98c3c55069b132a306b2b6f6f763caccd4 | 3,627,566 |
import torch
import time
def get_feats(model, loader, logger, opt):
"""Obtain features and labels for all samples in data loader using current model.
"""
batch_time = AverageMeterV2('Time', ':6.3f')
progress = ProgressMeter(
len(loader),
[batch_time],
prefix='Test: ')
# switch to evaluate mode
model.eval()
feats, labels, ptr = None, None, 0
with torch.no_grad():
end = time.time()
for i, (_, images, target, _) in enumerate(loader):
images = images.cuda(non_blocking=True)
cur_targets = target.cpu()
cur_feats = normalize(model(images)).cpu()
B, D = cur_feats.shape
inds = torch.arange(B) + ptr
if not ptr:
feats = torch.zeros((len(loader.dataset), D)).float()
labels = torch.zeros(len(loader.dataset)).long()
feats.index_copy_(0, inds, cur_feats)
labels.index_copy_(0, inds, cur_targets)
ptr += B
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % opt.print_freq == 0 and logger is not None:
logger.info(progress.display(i))
elif i % opt.print_freq == 0:
progress.display(i)
return feats, labels | 91a053de5bbc188eb461fea10b2b650084eda8c6 | 3,627,567 |
from .mastercatalog import MasterCatalog
def match(*args, verbose=True, threshold=0.036*u.arcsec):
"""
Find sources that match up between any number of dendrocat objects.
Parameters
----------
*args : `~dendrocat.Radiosource`, `~dendrocat.Mastercatalog`, or `~astropy.table.Table` object
A catalog with which to compare radio sources.
verbose : bool, optional
If enabled, output is fed to the console.
Returns
----------
`~dendrocat.MasterCatalog` object
"""
# original threshold was 1e-5 degrees
threshold = threshold.to(u.deg).value
current_arg = args[0]
for k in range(len(args)-1):
arg1 = current_arg
arg2 = args[k+1]
all_colnames = set(arg1.catalog.colnames + arg2.catalog.colnames)
stack = vstack([arg1.catalog, arg2.catalog])
all_colnames.add('_index')
try:
stack.add_column(Column(range(len(stack)), name='_index'))
except ValueError:
stack['_index'] = range(len(stack))
stack = stack[sorted(list(all_colnames))]
rejected = np.where(stack['rejected'] == 1)[0]
if verbose:
print('Combining matches')
pb = ProgressBar(len(stack) - len(rejected))
i = 0
while True:
if i >= len(stack) - 1:
break
if i in rejected:
i += 1
continue
teststar = stack[i]
delta_p = deepcopy(stack[stack['rejected']==0]['_idx', '_index', 'x_cen', 'y_cen'])
delta_p.remove_rows(np.where(delta_p['_index']==teststar['_index'])[0])
delta_p['x_cen'] = np.abs(delta_p['x_cen'] - teststar['x_cen'])
delta_p['y_cen'] = np.abs(delta_p['y_cen'] - teststar['y_cen'])
delta_p.sort('x_cen')
found_match = False
dist_col = MaskedColumn(length=len(delta_p), name='dist',
mask=True)
for j in range(min(10, len(delta_p))):
dist_col[j] = np.sqrt(delta_p[j]['x_cen']**2. +
delta_p[j]['y_cen']**2)
if dist_col[j] <= threshold:
found_match = True
delta_p.add_column(dist_col)
delta_p.sort('dist')
if found_match:
match_index = np.where(stack['_index'] == delta_p[0]['_index'])
match = deepcopy(stack[match_index])
stack.remove_row(match_index[0][0])
# Find the common bounding ellipse
new_x_cen = np.average([match['x_cen'], teststar['x_cen']])
new_y_cen = np.average([match['y_cen'], teststar['y_cen']])
# Find new ellipse properties
new_maj, new_min, new_pa = commonbeam(
float(match['major_fwhm']),
float(match['minor_fwhm']),
float(match['position_angle']),
float(teststar['major_fwhm']),
float(teststar['minor_fwhm']),
float(teststar['position_angle'])
)
# Replace properties of test star
stack[i]['x_cen'] = new_x_cen
stack[i]['y_cen'] = new_y_cen
stack[i]['major_fwhm'] = new_maj.value
stack[i]['minor_fwhm'] = new_min.value
stack[i]['position_angle'] = new_pa.value
# Replace masked data with available values from the match
for k, masked in enumerate(stack.mask[i]):
colname = stack.colnames[k]
if masked:
stack[i][colname] = match[colname]
i += 1
if verbose:
pb.update()
# Fill masked detection column fields with 'False'
for colname in stack.colnames:
if 'detected' in colname:
stack[colname].fill_value = 0
stack['_index'] = range(len(stack))
current_arg = MasterCatalog(arg1, arg2, catalog=stack)
return current_arg | 43cc2064a7f5cad7a421496f79a6f4549eb5a0ff | 3,627,568 |
import re
def show_user(func):
"""Register a function to be displayed to the user as an option"""
global USER_FUNCTIONS
try:
key = re.search(r'\[(.+?)\]', func.__doc__).group(1)
except AttributeError as e:
key = func.__name__
print(e)
USER_FUNCTIONS[key] = func
return func | 1d9f0dd8217f7d493bed48a32c01453f7e1473b5 | 3,627,569 |
def check_balancing_time_gran(param_name,
granmap,
entity,
comp='coarser',
find_EC_=lambda x, y: y[-1]):
"""check if given granularity map specifies granularity appropriately as specified
by comp parameter.
Parameters
----------
param_name: str
parameter name , some granularity map
granmap: pd.DataFrame
data for given param_name
entity: str
one of EnergyCarrier, EnergyConvTech or EnergyStorTech
comp: str, default 'coarser'
one of coarser or finer
Returns
-------
bool
True if comp is 'finer' and granmap has granularity finer than balancing time
True if comp is 'coarser' and granmap has granularity coarser than balancing time
else False
"""
granmap = granmap.set_index(entity)
for entity_ in granmap.index:
ec = find_EC_(entity, entity_)
balacing_gran = balancing_time(ec)
data_gran = granmap.loc[entity_]['TimeGranularity']
if comp == "finer":
if len(constant.TIME_COLUMNS[balacing_gran]) > len(constant.TIME_COLUMNS[data_gran]):
logger.error(
f"For {param_name} time granularity for {entity},{entity_} is incorrect. It should be finer than balancing time of {ec}")
return False
else:
if len(constant.TIME_COLUMNS[balacing_gran]) < len(constant.TIME_COLUMNS[data_gran]):
logger.error(
f"For {param_name} time granularity for {entity},{entity_} is incorrect. It should be coarser than balancing time of {ec}")
return False
return True | 3e47918769fcdd770e09feca0ea1c281c8b7797a | 3,627,570 |
def plot_integer_part(xs, ns, alpha, show=True):
"""Plot the integer part of real numbers mod alpha."""
fig = plt.figure()
ax = plt.gca()
xmin, xmax = alpha * (xs[0] // alpha), alpha * (xs[-1] // alpha) + alpha
newxticks = np.linspace(xmin, xmax, int((xmax - xmin) // alpha) + 1)
ax.xaxis.set_major_formatter(PiFormatter())
plt.plot(xs, ns, ".")
plt.title("Integer Part")
plt.xlabel("$x$")
plt.xticks(newxticks)
plt.ylabel(r"$\mathrm{int}(x)$")
if show:
plt.show()
return fig, ax | 97107a6087f99fb9f6f39f58b26b0617a6f182fe | 3,627,571 |
def array_to_sentence(vocab_dict, array: np.array, cut_at_eos=True):
"""
Converts an array of IDs to a sentence, optionally cutting the result
off at the end-of-sequence token.
:param array: 1D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:return: list of strings (tokens)
"""
sentence = []
for i in array:
s = vocab_dict[i]
if cut_at_eos and s == EOS_TOKEN:
break
sentence.append(s)
return sentence | dd0fbe2426f429ef71325cfbebbe365c7e8a1cd7 | 3,627,572 |
import keras
def make_time_scheme(dt, trend):
""" Implémentation d'un schéma de RK4 sous forme de réseau de neurones """
state = keras.layers.Input(shape = trend.input_shape[1:])
# k1
k1 = trend(state)
# k2
_tmp_1 = keras.layers.Lambda(lambda x : 0.5*dt*x)(k1)
input_k2 = keras.layers.add([state,_tmp_1])
k2 = trend(input_k2)
# k3
_tmp_2 = keras.layers.Lambda(lambda x : 0.5*dt*x)(k2)
input_k3 = keras.layers.add([state,_tmp_2])
k3 = trend(input_k3)
# k4
_tmp_3 = keras.layers.Lambda(lambda x : dt*x)(k3)
input_k4 = keras.layers.add([state,_tmp_3])
k4 = trend(input_k4)
# output
# k2+k3
add_k2_k3 = keras.layers.add([k2,k3])
add_k2_k3_mul2 = keras.layers.Lambda(lambda x:2.*x)(add_k2_k3)
# Add k1,k4
_sum = keras.layers.add([k1,add_k2_k3_mul2,k4])
# *dt
_sc_mul = keras.layers.Lambda(lambda x:dt/6.*x)(_sum)
output = keras.layers.add([state, _sc_mul])
time_scheme = keras.models.Model(inputs =[state], outputs=[output])
return time_scheme | 3777638ea910687d4e084133d0fb8c5268cb329a | 3,627,573 |
def print_parameters(opt):
"""
Generate a string with the options pretty-printed (used in the --verbose mode).
"""
return str(cg.ParamBlock(opt, '')) | 670b984d1cac8b0379d5200ca8ae1256935f0962 | 3,627,574 |
import os
def read_scores(scores_dir, targets):
"""
Return a pandas DataFrame containing scores of all decoys for all targets
in <targets>. Search in <scores_dir> for the label files.
"""
frames = []
for target in targets:
df = pd.read_csv(os.path.join(scores_dir, '{:}.dat'.format(target)),
delimiter='\s+', engine='python').dropna()
frames.append(df)
scores_df = dt.merge_dfs(frames)
return scores_df | 6100085100c546149df67fb8fb8dba776644009d | 3,627,575 |
def compute_regularizer_fft(n, weight_tv, weight_l2):
"""Precompute 2D filter regularizer (total variation + L2) in Fourier domain.
This function implements w^2 in eq. 23 of the paper:
Fast Fourier Color Constancy, Barron and Tsai, CVPR 2017
https://arxiv.org/abs/1611.07596
Args:
n: specifies the square filter size in one of the dimensions.
weight_tv: weight for the total variation term, can be a list or scalar.
weight_l2: weight for the l2 term, can be a list of scalar.
Returns:
A numpy array containing an n-by-n regularizer for FFTs, with a shape of
[n, n, channel]. The channel size is max(len(weight_tv), len(weight_l2)).
"""
weight_tv = np.atleast_1d(weight_tv)
weight_l2 = np.atleast_1d(weight_l2)
magnitude = lambda x: np.real(x)**2 + np.imag(x)**2
grad_squared_x = magnitude(np.fft.fft2([[-1, 1]], s=(n, n)))
grad_squared_y = magnitude(np.fft.fft2([[-1], [1]], s=(n, n)))
grad_squared = grad_squared_x + grad_squared_y
regularizer = (grad_squared[:, :, np.newaxis] * weight_tv + weight_l2)
return regularizer | 74e481ca0517d984d443dafb04749ae68983e231 | 3,627,576 |
from pathlib import Path
from typing import Optional
import json
async def read_data(*, file_path: Path) -> Optional[Box]:
"""Return the data read from file_path."""
if not file_path.is_file():
return None
lock = FileLock(f"{file_path.as_posix()}.lck")
with lock.acquire():
async with aiofiles.open(file_path) as fh: # type: ignore
raw_data = ""
async for line in fh:
raw_data += line
try:
data = json.loads(raw_data)
except Exception as e:
log.warning(f"Could not read and parse data from {file_path}: {e}.")
return None
return Box(data) | 0cb963cd42a199c18d71ff8e916269155e287cff | 3,627,577 |
def savefacedata(request):
"""Save face data"""
if request.method == "GET":
return render(request, "savefacedata.html", {"form": FaceDataForm()})
else:
try:
fd = get_object_or_404(FaceData, user=request.user)
form = FaceDataForm(request.POST, request.FILES, instance=fd)
form.save()
except:
form = FaceDataForm(request.POST, request.FILES)
newfacedata = form.save(commit=False)
newfacedata.user = request.user
newfacedata.save()
resp = collectTrainingData(str(request.user.id))
if not resp["Success"]:
fd = FaceData.objects.get(user=request.user)
fd.save()
return JsonResponse(resp) | cf8d046ca5d754de96a2caa986cd506d4590c68b | 3,627,578 |
import urllib
def addToCal(url, date_from, date_end, summary):
""" Add entry in calendar to period date_from, date_end """
vcal_entry = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:Pyvac Calendar
BEGIN:VEVENT
SUMMARY:%s
DTSTART;VALUE=DATE:%s
DTEND;VALUE=DATE:%s
END:VEVENT
END:VCALENDAR
"""
client = caldav.DAVClient(url)
# use url parameter to force calendar to use
principal = caldav.Principal(client, url)
calendars = principal.calendars()
if not len(calendars):
return False
vcal_entry = vcal_entry % (summary,
date_from.strftime('%Y%m%d'),
(date_end + relativedelta(days=1)).strftime('%Y%m%d'))
calendar = calendars[0]
log.info('Using calendar %r' % calendar)
log.info('Using entry: %s' % vcal_entry)
event = caldav.Event(client, data=vcal_entry, parent=calendar).save()
log.info('Event %s created' % event)
url_obj = event.url
url_obj = str(url_obj)
url_obj = urllib.parse.quote(url_obj, safe='/:')
return url_obj | 5d0914167ce26202f2ddf1b75550957798d55fb1 | 3,627,579 |
import os
def get_bool_from_environment(env, default):
"""Read an environment variable as a boolean.
:param env: the environment variable name
:param default: the default value
:return:
"""
try:
v = os.environ[env].lower()
if v == 'true':
v = True
elif v == 'false':
v = False
else:
raise ValueError
logger.info('Reading from environment: {}'.format(env))
except KeyError:
v = default
logger.warning('Not found. Using default value: {}={}'.format(env, default))
except ValueError:
v = default
logger.warning('Failed to parse. Using default value: {}={}'.format(env, default))
finally:
return v | 73bde49b05b09db598438c8e604b110d1b753688 | 3,627,580 |
def SetIamPolicy(zone_ref, policy):
"""Set Iam Policy request."""
set_iam_policy_req = dataplex_api.GetMessageModule(
).DataplexProjectsLocationsLakesZonesSetIamPolicyRequest(
resource=zone_ref.RelativeName(),
googleIamV1SetIamPolicyRequest=dataplex_api.GetMessageModule()
.GoogleIamV1SetIamPolicyRequest(policy=policy))
return dataplex_api.GetClientInstance(
).projects_locations_lakes_zones.SetIamPolicy(set_iam_policy_req) | f2bbf272084aa874fbb39ccbf9742090445d55f2 | 3,627,581 |
def sticky_attribute_assignment(trackable, name, value):
"""Adds dependencies, generally called from __setattr__.
This behavior is shared between Trackable and Model.
Respects NoDependency indicators, but otherwise makes trackable objects
out of common data structures and tracks objects by their attribute names.
Args:
trackable: The object to add dependencies to (generally the one having
an attribute assigned).
name: The attribute name being assigned.
value: The value being assigned. Not necessarily a trackable object.
Returns:
The value which should be stored in the attribute (unwrapped from a
NoDependency object if necessary).
"""
if isinstance(value, NoDependency):
add_dependency = False
else:
add_dependency = True
value = _wrap_or_unwrap(value)
if not add_dependency:
return value
if isinstance(value, base.Trackable):
trackable._track_trackable( # pylint: disable=protected-access
value, name=name,
# Allow the user to switch the Trackable which is tracked by this
# name, since assigning a new variable to an attribute has
# historically been fine (e.g. Adam did this).
overwrite=True)
return value | b8070181ec4c73aee852b164044e4a3c61e021d5 | 3,627,582 |
def raw_to_pos_prob(raw):
"""Raw model output to positive class probability"""
probs_pos_class = []
for out in raw:
out = np.array(out)
if len(out.shape) == 1:
# This is typical style of outputs.
probs_pos_class.append(softmax(out)[1])
elif len(out.shape) == 2:
# This is the style of outputs when we use sliding windows.
# Take the average prob of all the window predictions.
_prob = softmax(out, axis=1)[:, 1].mean()
probs_pos_class.append(_prob)
else:
raise Exception(f"Unclear how to deal with raw dimension: {out.shape}")
return probs_pos_class | 0a75cd13afdaf32a45cd6f871d95ef5acdfb18bc | 3,627,583 |
def is_tt_object(arg) -> bool:
"""Determine whether the object is a `TT-Tensor`, `TT-Matrix` or `WrappedTT` with one of them.
:return: `True` if `TT-object`, `False` otherwise
:rtype: bool
"""
return is_tt_tensor(arg) or is_tt_matrix(arg) | 57c560ae04da2d3e493b940a29db8a6627c703de | 3,627,584 |
def get_nltk_builder(languages):
"""Returns a builder with stemmers for all languages added to it.
Args:
languages (list): A list of supported languages.
"""
#all_stemmers = []
all_stopwords_filters = []
all_word_characters = set()
for language in languages:
if language == "en":
# use Lunr's defaults
#all_stemmers.append(lunr.stemmer.stemmer)
all_stopwords_filters.append(stop_word_filter)
all_word_characters.update({r"\w"})
else:
stopwords, word_characters = _get_stopwords_and_word_characters(language)
#all_stemmers.append(
# Pipeline.registered_functions["stemmer-{}".format(language)]
#)
all_stopwords_filters.append(
generate_stop_word_filter(stopwords, language=language)
)
all_word_characters.update(word_characters)
builder = Builder()
multi_trimmer = generate_trimmer("".join(sorted(all_word_characters)))
Pipeline.register_function(
multi_trimmer, "lunr-multi-trimmer-{}".format("-".join(languages))
)
builder.pipeline.reset()
for fn in chain([multi_trimmer], all_stopwords_filters):#, all_stemmers):
builder.pipeline.add(fn)
#for fn in all_stemmers:
# builder.search_pipeline.add(fn)
return builder | ad7624ff6701826ec04961b559ef771fe7b294a8 | 3,627,585 |
from shapely.geometry import Point
def from_edge_geoms_to_node_geoms(edge_network,logger = None):
"""
Infer the coordinates of the nodes from a DataFrame that describes the coordinates of the edges as Linestrings.
This assumes that coords in the linestring are in the direction 'from' to 'to' the edge ID
A simple check will be carried out to verify this assumption
Arguments:
*edge_network* (Pandas DataFrame) :
This df should have the columns:
*from_id* (float/int) : containing the indices of the 'from' nodes
*to_id* (float/int) : contains the indices of the 'to' nodes
*geometry* (Shapely linestrings) : describing the geometry of the edge
Returns:
*result* (dict) : keys are the node IDs; values are shapely Points with the coordinates
"""
#identify the begin and end point of each linestring object
#edge_network['boundary'] = edge_network['geometry'].apply(lambda x: x.boundary if len(x.coords) > 2 else None)
#the .boundary option does not work for linestrings with len 2
#edge_network['boundary'] = edge_network['geometry'].apply(lambda x: (x.coords[0],x.coords[-1]))
edge_network['from_point_geom'] = edge_network['geometry'].apply(lambda x: Point(x.coords[0]))
edge_network['to_point_geom'] = edge_network['geometry'].apply(lambda x: Point(x.coords[-1]))
result = {} # keys : point_ids, values: shapely geometry points
#so one list of (id,geom)-pairs infered from the 'from points'
point_geoms_froms = edge_network[['from_id', 'from_point_geom']].values
# and one infered from the 'to_points'
point_geoms_tos = edge_network[['to_id', 'to_point_geom']].values
#Check for possible conflicts between the multiple linestrings from which the same id could be inferred
for (id, geom) in point_geoms_froms: #start by iterating over the from points
if not id in result.keys():
result[id] = geom
else:
if not result[id] == geom: #if not exactly the same as existing
report_conflict(id,result[id],geom,logger=logger) #report the difference
for (id, geom) in point_geoms_tos: #start by iterating over the from points
if not id in result.keys(): #if not already exists: save the geometry as end result
result[id] = geom
else: #if already exists:
if not result[id] == geom: #if not exactly the same as existing
report_conflict(id,result[id],geom,logger=logger) #report the difference
result = dict(sorted(result.items())) #sort the keys before returning
return result | 6a679f1cc04046b59b2a0a96e454a1be9907b4d8 | 3,627,586 |
import _datetime
from datetime import datetime
def from_timestamp(
timestamp, tz=UTC
): # type: (Union[int, float], Union[str, _Timezone]) -> DateTime
"""
Create a DateTime instance from a timestamp.
"""
dt = _datetime.datetime.utcfromtimestamp(timestamp)
dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond
)
if tz is not UTC or tz != "UTC":
dt = dt.in_timezone(tz)
return dt | 1342dc14559265d3d9902d2dd4c8b4c7ba03a438 | 3,627,587 |
def _check_shape_(joint_positions):
""" should be (7, <nb frames>, 38, 2)
7 for the images, some should be 0 because it didn't record the images for these points
1000 for the nb of frames
38 for the features (some for the legs, antennae ...) check skeleton.py in semigh's code
2 for the pose dimensions
"""
s = joint_positions.shape
if s[0] != SetupConfig.value('n_cameras') \
or s[2] != len(skeleton.tracked_points) \
or s[3] != SetupConfig.value('n_recorded_dimesions'):
raise ValueError(f"shape of pose data is wrong, it's {joint_positions.shape}")
return joint_positions | de7f5924f3da6a6f8dc390c5bf22fe7155b256a8 | 3,627,588 |
def image_read(path):
"""
Simple abstraction over imread
Parameters
----------
path : string
Path to be loaded
Returns
-------
image : opencv image
"""
if CV_V3 or CV_V4:
return cv2.imread(path, cv2.IMREAD_GRAYSCALE)
else:
return cv2.imread(path, cv2.CV_LOAD_IMAGE_GRAYSCALE) | 9b0c98697850fac2e5452d09156e590f5e3462ce | 3,627,589 |
def mpg(miles, gallons):
"""Write a program that will compute MPG for a car. Prompt the user to enter
the number of miles driven and the number of gallons used. Print a nice
message with the answer."""
miles = float(miles) / gallons
km = miles * 0.425
print miles, "mpg or in km/liter:", km
return miles | c3e79b6b828c6f1d4b900bb92d3057165c302780 | 3,627,590 |
def add_genesets_name(df: pd.DataFrame) -> pd.DataFrame:
"""
Add genesets names. Used for genesets analysis.
"""
# if 'geneset' not in df.columns:
genesets = df.index.map(lambda s: s.split("_")[0])
df.insert(loc=0, column="geneset", value=genesets)
return df | 5c0c46c475fbf8b152b2d6adcdb6d7ebea74661f | 3,627,591 |
import sys
import os
import json
def write_index_files(files, regex, hdrnum, print_trace, content_mode="translated",
outpath=None, outstream=sys.stdout, errstream=sys.stderr):
"""Process each file and create JSON index file.
The index file will have common information in the toplevel.
There is then a ``__DIFF__`` key that is a dictionary with file
names as keys and per-file differences as the values in a dict.
Parameters
----------
files : iterable of `str`
The files or directories from which the headers are to be read.
regex : `str`
Regular expression string used to filter files when a directory is
scanned.
hdrnum : `int`
The HDU number to read. The primary header is always read and merged
with the specified header.
print_trace : `bool`
If there is an error reading the file and this parameter is `True`,
a full traceback of the exception will be reported. If `False` prints
a one line summary of the error condition.
content_mode : `str`
Form of data to write in index file. Options are:
``translated`` (default) to write ObservationInfo to the index;
``metadata`` to write native metadata headers to the index.
The index file is called ``_index.json``
outpath : `str`, optional
If specified a single index file will be written to this location
combining all the information from all files. If `None`, the default,
and index file will be written to each directory in which files
are found.
outstream : `io.StringIO`, optional
Output stream to use for standard messages. Defaults to `sys.stdout`.
errstream : `io.StringIO`, optional
Stream to send messages that would normally be sent to standard
error. Defaults to `sys.stderr`.
Returns
-------
okay : `list` of `str`
All the files that were processed successfully.
failed : `list` of `str`
All the files that could not be processed.
"""
if content_mode not in ("translated", "metadata"):
raise ValueError(f"Unrecognized content mode {content_mode}")
if outpath is not None:
_, ext = os.path.splitext(outpath)
if ext != ".json":
raise ValueError(f"Override output file must end in .json but given {outpath}")
found_files = find_files(files, regex)
failed = []
okay = []
files_per_directory = {}
# Group each file by directory if no explicit output path
if outpath is None:
for path in found_files:
head, tail = os.path.split(path)
files_per_directory.setdefault(head, []).append(tail)
else:
files_per_directory["."] = list(found_files)
# Extract translated metadata for each file in each directory
for directory, files_in_dir in files_per_directory.items():
output, this_okay, this_failed = index_files(files_in_dir, directory, hdrnum, print_trace,
content_mode, outstream, errstream)
failed.extend(this_failed)
okay.extend(this_okay)
# Write the index file
if outpath is None:
index_file = os.path.join(directory, "_index.json")
else:
index_file = outpath
with open(index_file, "w") as fd:
print(json.dumps(output), file=fd)
log.info("Wrote index file to %s", index_file)
return okay, failed | 1cff92ab3dfac39a5dc4a795b622e30413b58480 | 3,627,592 |
def set_relation_hierarchy(items_query, type_relation):
"""
Запись иерархических связей и пререквизитов
"""
try:
for key,value in items_query.items():
names = value.split(', ')
items_set = Items.objects.filter(name__in = names)
item1 = Items.objects.get(name = key)
set_relation(item1, items_set, type_relation)
return Response(status=200)
except:
return Response(status=400) | 8a75ba57cd503057a2411a4e570e150054cffda2 | 3,627,593 |
import unittest
def makeTestSuiteV201004():
"""Set up test suite using v201004.
Returns:
TestSuite test suite using v201004.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(ReportServiceTestV201004))
return suite | 3ffe1ebcbcd3446d7cf1d81f9c6f31a026f8936b | 3,627,594 |
def copy_data_to_csv(data=[]):
"""
将从飞书下载的数据存储到csv模板里
:param data:
:return:
"""
csv_path = cfg.excel_path
try:
csv_df = open_excel(csv_path)
data_fmt = [str(item[0]) for item in data]
csv_df["完成情况"] = data_fmt
# 保存
csv_df.to_excel(csv_path, index=False, encoding='utf-8')
return True
except Exception as e:
logger.exception(e)
return False | 13c6ec903211f329a7e9caf55ae21f472e94c87e | 3,627,595 |
def add(x, y):
"""add `x` and `y`."""
return x + y | 5651ff2331c1298377db3836af534c7cce37e1fa | 3,627,596 |
def beale(position):
"""
optimum at (3.0, 0.5) = 0
:param position:
:return:
"""
x, y = position
return (1.5 - x + x * y) ** 2 + (2.25 - x + x * y ** 2) ** 2 + (2.625 - x + x * y ** 3) ** 2 | bb5bc6d50b793155f81fdd75f8a1be8889ab7839 | 3,627,597 |
def cvReleaseHist(*args):
"""cvReleaseHist(PyObject obj)"""
return _cv.cvReleaseHist(*args) | 408fa751003d9384b8027165fbd8b6561b8459f5 | 3,627,598 |
def naive_log_ctz(x: int) -> int:
"""Count trailing zeros, in a O(log(zeros)) steps.
Args:
x: An int.
Returns:
The number of trailing zeros in x, as an int.
This implementation is much faster than the naive linear implementation,
as it performs a logarithmic number of steps relative to the number of
trailing zeros in x. Unlike the linear implementation, this one avoids
looking at the high bits of x if it can, so it only returns the number
of zeros, not the remaining significant bits of x.
We still say this implementation is "naive" because it does not try
to use any specific optimization tricks besides the logarithmic algorithm.
>>> naive_log_ctz(0)
0
>>> naive_log_ctz(1)
0
>>> naive_log_ctz(-1)
0
>>> naive_log_ctz(2)
1
>>> naive_log_ctz(-2)
1
>>> naive_log_ctz(40) # 0b101000 = 2**3 * 5
3
>>> naive_log_ctz(-40) # 0b1..1011000
3
>>> naive_log_ctz(37 << 100)
100
# Of course the behavior should match for all integers...
>>> all(naive_ctz2(x)[0] == naive_log_ctz(x) for x in range(1024))
True
"""
if x == 0:
return 0
else:
zmask = 1
zscale = 1
low_bits = x & zmask
while low_bits == 0:
zmask = (zmask << zscale) | zmask
zscale <<= 1
low_bits = x & zmask
zscale >>= 1
zmask >>= zscale
zeros : int = 0
while zscale > 0:
if low_bits & zmask == 0:
low_bits >>= zscale
zeros += zscale
zscale >>= 1
zmask >>= zscale
return zeros | dfa4c0fb890bbb13803c653a3f7f65b25bb3158f | 3,627,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.