content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def handle_mpd_rm(bot, ievent):
""" arguments: <playlist> - remove playlist. """
handle_mpd_playlist_manipulation(bot, ievent, 'rm')
| 5,336,600
|
def createOutputBuffer(file, encoding):
"""Create a libxml2 output buffer from a Python file """
ret = libxml2mod.xmlCreateOutputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateOutputBuffer() failed')
return outputBuffer(_obj=ret)
| 5,336,601
|
def get_table_name(yaml_path):
"""gives how the yaml file name should be in the sql query"""
table_name = os.path.basename(yaml_path)
table_name = os.path.splitext(table_name)[0]
return table_name
| 5,336,602
|
def wait_for_proof(node, proofid_hex, timeout=60, expect_orphan=None):
"""
Wait for the proof to be known by the node. If expect_orphan is set, the
proof should match the orphan state, otherwise it's a don't care parameter.
"""
def proof_found():
try:
wait_for_proof.is_orphan = node.getrawavalancheproof(proofid_hex)[
"orphan"]
return True
except JSONRPCException:
return False
wait_until_helper(proof_found, timeout=timeout)
if expect_orphan is not None:
assert_equal(expect_orphan, wait_for_proof.is_orphan)
| 5,336,603
|
def check(verbose=1):
"""
Runs a couple of functions to check the module is working.
:param verbose: 0 to hide the standout output
:return: list of dictionaries, result of each test
"""
return []
| 5,336,604
|
def cylinder_sideways():
"""
sideways cylinder for poster
"""
call_separator('cylinder sidweays')
T1 = .1
#gs = gridspec.GridSpec(nrows=2,ncols=3,wspace=-.1,hspace=.5)
fig = plt.figure(figsize=(5,4))
ax11 = fig.add_subplot(111,projection='3d')
#ax12 = fig.add_subplot(gs[0,2])
#ax22 = fig.add_subplot(gs[1,2])
a = lubrication(phi1=.57,Rp=0.96,Rc=1.22,base_radius=1.22,
pi3=1,pi4=4.7,pi5=0.1,pi6=10,
mu=1.2,T=T1,constriction='piecewise',U0=0.2,
dt=0.02,eps=1,
F0=50,method='euler')
a.Z0 = -5/a.Rp
z = np.linspace(-7,7,100) # dimensional
r = a.pi1(z)
th = np.linspace(0,2*np.pi,100)
radius_al = 0.25
# draw arrow going into spine
ar1 = Arrow3D([-5,-1.5],[0,0],[0,0],
mutation_scale=10,
lw=2, arrowstyle="-|>", color="k")
ax11.add_artist(ar1)
# A
# draw spine
Z,TH = np.meshgrid(z,th)
#Z,TH = np.mgrid[-7:7:.1, 0:2*np.pi:.1]
X = np.zeros_like(Z)
Y = np.zeros_like(Z)
#print(np.shape(Z))
for i in range(len(Z[:,0])):
X[i,:] = a.pi1(Z[i,:])*np.cos(TH[i,:])
Y[i,:] = a.pi1(Z[i,:])*np.sin(TH[i,:])
ax11.plot_surface(Z,Y,X,alpha=.25)
shifts = np.array([-6,0,-4])
names = ['z','y','x']
size = 2
for i in range(3):
coords = np.zeros((3,2))
coords[:,0] += shifts
coords[:,1] += shifts
coords[i][1] += size
arx = Arrow3D(*list(coords),
mutation_scale=5,
lw=2, arrowstyle="-|>", color="k")
ax11.text(*list(coords[:,1]),names[i],horizontalalignment='center')
ax11.add_artist(arx)
# draw sphere for cap
b = a.base_radius
r = np.sqrt(b**2+7**2)
th2 = np.linspace(0,np.arctan(b/7),100)
phi = np.linspace(0,2*np.pi,100)
TH2,PHI = np.meshgrid(th2,phi)
X = r*np.sin(TH2)*np.cos(PHI)
Y = r*np.sin(TH2)*np.sin(PHI)
Z = r*np.cos(TH2)
ax11.plot_surface(Z,Y,X,color='tab:blue',alpha=.5)
# draw sphere vesicle
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
X = np.cos(u)*np.sin(v)
Y = np.sin(u)*np.sin(v)
Z = np.cos(v)
ax11.plot_surface(Z,Y,X,color='gray',alpha=.5)
# label spine head and base
ax11.text(7,0,-2,r'\setlength{\parindent}{0pt}Spine Head\\(Closed End)')
ax11.text(-4,0,3,r'\setlength{\parindent}{0pt}Spine Base\\(Open End)')
# set equal aspect ratios
#ax11.set_aspect('auto') # only auto allowed??
ax11.set_box_aspect((np.ptp(X), np.ptp(Y), np.ptp(Z)))
ax11.set_axis_off()
lo = -4.4
hi = 4.4
dx = -.5
ax11.set_xlim(lo-dx,hi+dx)
ax11.set_ylim(lo-dx,hi+dx)
ax11.set_zlim(lo,hi)
ax11.view_init(20,65)
return fig
| 5,336,605
|
def test(dir):
"""Discover and run unit tests."""
from unittest import TestLoader, TextTestRunner
testsuite = TestLoader().discover(f'./{dir}')
TextTestRunner(verbosity=2, buffer=True).run(testsuite)
| 5,336,606
|
def get_generic_or_msg(intent, result):
""" The master method. This method takes in the
intent and the result dict structure
and calls the proper interface method. """
return Msg_Fn_Dict[intent](result)
| 5,336,607
|
def s3_example_tile(gtiff_s3):
"""Example tile for fixture."""
return (5, 15, 32)
| 5,336,608
|
def execute_list_of_commands(command_list):
"""
INPUT:
- ``command_list`` -- a list of strings or pairs
OUTPUT:
For each entry in command_list, we attempt to run the command.
If it is a string, we call ``os.system()``. If it is a pair [f, v],
we call f(v).
If the environment variable :envvar:`SAGE_NUM_THREADS` is set, use
that many threads.
"""
t = time.time()
# Determine the number of threads from the environment variable
# SAGE_NUM_THREADS, which is set automatically by sage-env
try:
nthreads = int(os.environ['SAGE_NUM_THREADS'])
except KeyError:
nthreads = 1
# normalize the command_list to handle strings correctly
command_list = [ [run_command, x] if isinstance(x, str) else x for x in command_list ]
# No need for more threads than there are commands, but at least one
nthreads = min(len(command_list), nthreads)
nthreads = max(1, nthreads)
def plural(n,noun):
if n == 1:
return "1 %s"%noun
return "%i %ss"%(n,noun)
print "Executing %s (using %s)"%(plural(len(command_list),"command"), plural(nthreads,"thread"))
execute_list_of_commands_in_parallel(command_list, nthreads)
print "Time to execute %s: %s seconds"%(plural(len(command_list),"command"), time.time() - t)
| 5,336,609
|
def get_transceiver_diagnostics(baseurl, cookie_header, transceiver):
"""
Get the diagnostics of a given transceivers in the switch
:param baseurl: imported baseurl variable
:param cookie_header: Parse cookie resulting from successful loginOS.login_os(baseurl)
:param transceiver: data parsed to specify a transceiver in switch
:return return transceiver's diagnostics information in json format
"""
url = baseurl + 'transceivers/' + transceiver + '/diagnostics'
headers = {'cookie': cookie_header}
response = requests.get(url, verify=False, headers=headers)
if response.status_code == 200:
return response.json()
| 5,336,610
|
def plot_cross_sections(obj, paths, xs_label_size=12, xs_color='black',
map_style='contour', scale='lin', n=101, x_labeling='distance',
show_max=True, fp_max=True, fp_text=True, cmap='viridis_r',
max_fig_width=12, max_fig_height=8, legend_padding=6, **kw):
"""Generate a map style plot (either contour or pcolormesh) with
cross sections labeled on it and generate plots of the fields corresponding
to the cross sections
args:
obj - Results object or Model object, because the grid of fields
in a Results object must be interpolated to compute fields
along each cross section, passing a Model object instead will
yield smoother profiles of the fields along each cross section.
The Model object will, however, be used to compute a grid of
fields for the map style plot (contour or pcolormesh), so
passing a Model object could be slower.
paths - An iterable of iterables of x,y pairs representing paths through
the results domain to plot as cross sections. For example,
([(1,2), (3,5)], [(2,5), (9,3), (4,7)], [(5,3), (9,2)])
optional args:
xs_label_size - int, fontsize of text labels on the map style figure
xs_color - any matplotlib compatible color definition
map_style - str, 'contour' or 'pcolormesh', determines which map style
plot is generated with the cross sections labeled on it,
default is 'contour'
scale - str, can be 'log' or 'lin', only applies if map_style is
'contour' (default is 'lin')
n - integer, number of points sampled along the sections (default 101)
x_labeling - 'distance' or 'location', for x axis ticks on the cross
section plots labeled according to the distance along
the segment or with the (x,y) coordinates of sample
points, default is 'distance'
show_max - bool, toggle labeling of the maximum field location,
default is True
fp_max - bool, toggle whether the maximum fields along footprints
"of concern" are labeled
fp_text - bool, toggle footprint group labeling with text
cmap - str, name of matplotlib colormap, see:
http://matplotlib.org/examples/color/colormaps_reference.html
max_fig_width - float/int, inches, maximum width of figure
max_fig_height - float/int, inches, maximum height of figure
legend_padding - float/int, inches, width left for legend area
kw:
prefix - string prepended to the file names of saved plots
suffix - string appended to the file names of saved plots
and
any keyword arguments that can be passed to plot_contour(),
plot_pcolormesh(), or plot_segment()
note: Only a directory name can be passed to the 'path' keyword.
File names aren't accepted, which prevents saved plots from
overwriting each other. File names are created automatically.
returns:
A tuple of tuples of plotting objects. The first tuple contains the
return arguments of the map plot (contour or pcolormesh) and the
other tuples contain the return arguments of plot_path, for however
many cross sections are created."""
#deal with Model vs Results input
if(type(obj) is subcalc_class.Results):
res = obj
elif(type(obj) is subcalc_class.Model):
res = obj.calculate()
#separate saving kw from others
save_kw = {}
for k in ['save', 'path', 'format', 'prefix', 'suffix']:
if(k in kw):
save_kw[k] = kw[k]
kw.pop(k)
#deal with the saving kw
if('prefix' in save_kw):
save_kw['save'] = True
fn_prefix = save_kw['prefix']
if(fn_prefix[-1] != '-'):
fn_prefix = fn_prefix + '-'
else:
fn_prefix = ''
if('suffix' in save_kw):
save_kw['save'] = True
fn_suffix = save_kw['suffix']
if(fn_suffix[0] != '-'):
fn_suffix = '-' + fn_suffix
else:
fn_suffix = ''
if('save' in save_kw):
save = save_kw['save']
elif('path' in save_kw):
if(not os.path.isdir(save_kw['path'])):
raise(subcalc_class.EMFError('The path keyword argument to plot_cross_sections must be a directory path. Plot names are created automatically, with some control available through the prefix and suffix keyword arguments.'))
save_kw['save'] = True
save = True
else:
save = False
#check inputs
if(len(paths) > 26):
raise(subcalc_class.EMFError('There cannot be more than 26 cross sections on a single figure. Make sure that your input for the "points" argument has the correct number of levels (sublists).'))
#list of return arguments
R = []
#plot the map style figure
if(map_style == 'contour'):
r = plot_contour(res, scale, show_max, fp_max, fp_text, cmap,
max_fig_width, max_fig_height, legend_padding, **kw)
R.append(r)
fig, ax = r[0], r[1]
fn = fn_prefix + 'contour-with-cross-sections' + fn_suffix
else:
r = plot_pcolormesh(res, show_max, fp_max, fp_text, cmap, max_fig_width,
max_fig_height, legend_padding, **kw)
R.append(r)
fig, ax = r[0], r[1]
fn = fn_prefix + 'pcolormesh-with-cross-sections' + fn_suffix
#draw cross section traces on the figure
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i, path in enumerate(paths):
#get x,y
x, y = zip(*path)
xb, xe, yb, ye = x[0], x[-1], y[0], y[-1]
#plot the trace
ax.plot(x, y, color=xs_color)
#label the trace
hab, vab = _get_text_alignment(path[1], path[0])
hae, vae = _get_text_alignment(path[-2], path[-1])
ax.text(xb, yb, alphabet[i], ha=hab, va=vab,
color=xs_color, fontsize=xs_label_size)
ax.text(xe, ye, alphabet[i] + "'", ha=hae, va=vae,
color=xs_color, fontsize=xs_label_size)
#save or don't
if(save):
_save_fig(fn, fig, **save_kw)
#plot the cross sections
for i, path in enumerate(paths):
r = plot_path(obj, path, n, x_labeling, scale, cmap, **kw)
R.append(r)
fig, ax = r
c = alphabet[i]
ax.set_title("Cross Section %s-%s'" % (c, c))
if(save):
fn = '%scross-section%s' % (fn_prefix, c + fn_suffix)
_save_fig(fn, fig, **save_kw)
return(tuple(R))
| 5,336,611
|
def mask_valid_boxes(boxes, return_mask=False):
"""
:param boxes: (cx, cy, w, h,*_)
:return: mask
"""
w = boxes[:,2]
h = boxes[:,3]
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
mask = (w > 2) & (h > 2) & (ar < 30)
if return_mask:
return mask
else:
return boxes[mask]
| 5,336,612
|
def import_buffer_to_hst(buf):
"""Import content from buf and return an Hy AST."""
return tokenize(buf + "\n")
| 5,336,613
|
def main():
"""Start web application."""
host = os.environ.get("HOST")
if not host:
host = "0.0.0.0" if os.getenv("DYNO") else "127.0.0.1" # noqa: S104
port = os.getenv("PORT", "")
port = int(port) if port.isdigit() else 5000
uvicorn.run("syndio_backend_test.api:API", host=host, port=port, log_level="info")
| 5,336,614
|
def parse_args():
""" Parse command line arguments.
"""
parser = argparse.ArgumentParser(description="Deep SORT")
parser.add_argument(
"--sequence_dir", help="Path to MOTChallenge sequence directory",
default=None, required=False)
parser.add_argument(
"--detection_file", help="Path to custom detections.", default=None,
required=False)
parser.add_argument(
"--output_file", help="Path to the tracking output file. This file will"
" contain the tracking results on completion.",
default="/tmp/hypotheses.txt")
parser.add_argument(
"--min_confidence", help="Detection confidence threshold. Disregard "
"all detections that have a confidence lower than this value.",
default=0.8, type=float)
parser.add_argument(
"--min_detection_height", help="Threshold on the detection bounding "
"box height. Detections with height smaller than this value are "
"disregarded", default=0, type=int)
parser.add_argument(
"--nms_max_overlap", help="Non-maxima suppression threshold: Maximum "
"detection overlap.", default=1.0, type=float)
parser.add_argument(
"--max_cosine_distance", help="Gating threshold for cosine distance "
"metric (object appearance).", type=float, default=0.2)
parser.add_argument(
"--max_frame_idx", help="Maximum size of the frame ids.", type=int, default=None)
parser.add_argument(
"--display", help="Show intermediate tracking results",
default=True, type=bool_string)
parser.add_argument('--min-box-area', type=float, default=50, help='filter out tiny boxes')
parser.add_argument('--cfg_file', default='aic_mcmt.yml', help='Config file for mcmt')
parser.add_argument('--seq_name', default='c041', help='Seq name')
return parser.parse_args()
| 5,336,615
|
def load_lane_segments_from_xml(map_fpath: _PathLike) -> Mapping[int, LaneSegment]:
"""
Load lane segment object from xml file
Args:
map_fpath: path to xml file
Returns:
lane_objs: List of LaneSegment objects
"""
tree = ET.parse(os.fspath(map_fpath))
root = tree.getroot()
logger.info(f"Loaded root: {root.tag}")
all_graph_nodes = {}
lane_objs = {}
# all children are either Nodes or Ways
for child in root:
if child.tag == "node":
node_obj = extract_node_from_ET_element(child)
all_graph_nodes[node_obj.id] = node_obj
elif child.tag == "way":
lane_obj, lane_id = extract_lane_segment_from_ET_element(child, all_graph_nodes)
lane_objs[lane_id] = lane_obj
else:
logger.error("Unknown XML item encountered.")
raise ValueError("Unknown XML item encountered.")
return lane_objs
| 5,336,616
|
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
| 5,336,617
|
def build_gauss_kernel(sigma_x, sigma_y, angle):
"""
Build the rotated anisotropic gaussian filter kernel
Parameters
----------
sigma_x : numpy.float64
sigma in x-direction
sigma_y: numpy.float64
sigma in y-direction
angle: int
angle in degrees of the needle holder measuered with respect to 'vertical' transducer axis
Returns
-------
kernel: numpy.ndarray
roteted filter kernel
"""
angle = np.pi/2-np.deg2rad(angle)
# Calculate gaussian kernel
kernel = ascon.Gaussian2DKernel(sigma_x, sigma_y, 0)
# Extract size and kernel values
x_size = kernel.shape[0]; y_size = kernel.shape[1]
kernel = kernel.array
# Rotate
kernel = ndimage.rotate(kernel,np.rad2deg(-angle), reshape=False)
# Parameters for cropping
max_in_kernel = np.amax(abs(kernel))
threshold = 0.05*max_in_kernel
# Crop the kernel to reduce its size
x_start = 0;
for i in range(0, x_size, 1):
if abs(max(kernel[i,:])) > threshold:
x_start = i
break
x_end = (x_size-1)-x_start
y_start = 0;
for i in range(0, y_size, 1):
if abs(max(kernel[:,i])) > threshold:
y_start = i
break
y_end = (y_size-1)-y_start
kernel = kernel[x_start:x_end, y_start:y_end]
return kernel
| 5,336,618
|
def parse_tle(fileobj):
"""Parse a file of TLE satellite element sets.
Builds an Earth satellite from each pair of adjacent lines in the
file that start with "1 " and "2 " and have 69 or more characters
each. If the preceding line is exactly 24 characters long, then it
is parsed as the satellite's name. For each satellite found, yields
a tuple `(names, sat)` giving the name(s) on the preceding line (or
`None` if no name was found) and the satellite object itself.
An exception is raised if the attempt to parse a pair of candidate
lines as TLE elements fails.
"""
b0 = b1 = b''
for b2 in fileobj:
if (b1.startswith(b'1 ') and len(b1) >= 69 and
b2.startswith(b'2 ') and len(b2) >= 69):
b0 = b0.rstrip(b'\n\r')
if len(b0) == 24: # Celestrak
name = b0.decode('ascii').rstrip()
names = [name]
elif b0.startswith(b'0 '): # Spacetrack 3-line format
name = b0[2:].decode('ascii').rstrip()
names = [name]
else:
name = None
names = ()
line1 = b1.decode('ascii')
line2 = b2.decode('ascii')
sat = EarthSatellite(line1, line2, name)
if name and ' (' in name:
# Given a name like `ISS (ZARYA)` or `HTV-6 (KOUNOTORI
# 6)`, also support lookup by the name inside or outside
# the parentheses.
short_name, secondary_name = name.split(' (')
secondary_name = secondary_name.rstrip(')')
names.append(short_name)
names.append(secondary_name)
yield names, sat
b0 = b1
b1 = b2
| 5,336,619
|
def kwargs_to_flags(**kwargs):
"""Convert `kwargs` to flags to pass on to CLI."""
flag_strings = []
for (key, val) in kwargs.items():
if isinstance(val, bool):
if val:
flag_strings.append(f"--{key}")
else:
flag_strings.append(f"--{key}={val}")
return " ".join(flag_strings)
| 5,336,620
|
def extractBananas(item):
"""
Parser for 'Bananas'
"""
badwords = [
'iya na kao manga chapters',
]
if any([bad in item['tags'] for bad in badwords]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('isekai joushu chapters', 'Struggling Hard As The Lord Of A Castle In A Different World', 'translated'),
('dungeon harem wn chapters', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'),
('erufu seidorei wn', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
chp_prefixes = [
('AARASL', 'An A-ranked Adventurer’s “Slow-living”', 'translated'),
('Isekai Taneuma', 'Isekai Taneuma', 'translated'),
('Gang of Yuusha', 'Gang of Yusha', 'translated'),
('Gang of Yusha', 'Gang of Yusha', 'translated'),
('The Revenge of the Soul Eater', 'Soul Eater of the Rebellion', 'translated'),
('Soul Eater of the Rebellion', 'Soul Eater of the Rebellion', 'translated'),
('Sparta Teikoku ', 'Sparta Teikoku Kenkoku Senki ', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 5,336,621
|
def main():
"""Make a jazz noise here"""
args = get_args()
text = args.text
for line in args.text.splitlines():
print(' '.join(map(calc, line.split())))
| 5,336,622
|
def BuildSymbolToFileAddressMapping():
"""
Constructs a map of symbol-string -> [ (file_id, address), ... ] so that each
symbol is associated with all the files and addresses where it occurs.
"""
result = defaultdict(list)
# Iterate over all the extracted_symbols_*.txt files.
for filename in os.listdir(FLAGS.work_directory):
if fnmatch.fnmatch(filename, "extracted_symbols_*.txt"):
contents = open( FLAGS.work_directory + "/" + filename, "rt" ).readlines()
for line in contents:
file_id, filename, address, symbol, vuln = line.split()
result[symbol].append((file_id, address))
return result
| 5,336,623
|
def get_template_parameters_s3(template_key, s3_resource):
"""
Checks for existance of parameters object in S3 against supported suffixes and returns parameters file key if found
Args:
template_key: S3 key for template file. omit bucket.
s3_resource: a boto3 s3 resource
Returns:
filename of parameters file if it exists
"""
for suffix in EFConfig.PARAMETER_FILE_SUFFIXES:
parameters_key = template_key.replace("/templates", "/parameters") + suffix
try:
obj = s3_resource.Object(EFConfig.S3_CONFIG_BUCKET, parameters_key)
obj.get()
return parameters_key
except ClientError:
continue
return None
| 5,336,624
|
def cell_from_system(sdict):
"""
Function to obtain cell from namelist SYSTEM read from PW input.
Args:
sdict (dict): Dictinary generated from namelist SYSTEM of PW input.
Returns:
ndarray with shape (3,3):
Cell is 3x3 matrix with entries::
[[a_x b_x c_x]
[a_y b_y c_y]
[a_z b_z c_z]],
where a, b, c are crystallographic vectors,
and x, y, z are their coordinates in the cartesian reference frame.
"""
ibrav = sdict.get('ibrav', None)
if ibrav == 0:
return None
params = ['a', 'b', 'c', 'cosab', 'cosac', 'cosbc']
celldm = [sdict.get(f'celldm({i + 1})', 0) for i in range(6)]
if not any(celldm):
abc = [sdict.get(a, 0) for a in params]
celldm = celldms_from_abc(ibrav, abc)
if not any(celldm):
return None
if ibrav == 1:
cell = np.eye(3) * celldm[0]
return cell
elif ibrav == 2:
v1 = celldm[0] / 2 * np.array([-1, 0, 1])
v2 = celldm[0] / 2 * np.array([0, 1, 1])
v3 = celldm[0] / 2 * np.array([-1, 1, 0])
elif ibrav == 3:
v1 = celldm[0] / 2 * np.array([1, 1, 1])
v2 = celldm[0] / 2 * np.array([-1, 1, 1])
v3 = celldm[0] / 2 * np.array([-1, -1, 1])
elif ibrav == -3:
v1 = celldm[0] / 2 * np.array([-1, 1, 1])
v2 = celldm[0] / 2 * np.array([1, -1, 1])
v3 = celldm[0] / 2 * np.array([1, 1, -1])
elif ibrav == 4:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([-1 / 2, np.sqrt(3) / 2, 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 5:
term_1 = np.sqrt(1 + 2 * celldm[3])
term_2 = np.sqrt(1 - celldm[3])
v1 = celldm[0] * np.array([term_2 / np.sqrt(2), -term_2 / np.sqrt(6), term_1 / np.sqrt(3)])
v2 = celldm[0] * np.array([0, term_2 * np.sqrt(2 / 3), term_1 / np.sqrt(3)])
v3 = celldm[0] * np.array([-term_2 / np.sqrt(2), -term_2 / np.sqrt(6), term_1 / np.sqrt(3)])
elif ibrav == -5:
term_1 = np.sqrt(1 + 2 * celldm[3])
term_2 = np.sqrt(1 - celldm[3])
v1 = celldm[0] * np.array([(term_1 - 2 * term_2) / 3, (term_1 + term_2) / 3, (term_1 + term_2) / 3])
v2 = celldm[0] * np.array([(term_1 + term_2) / 3, (term_1 - 2 * term_2) / 3, (term_1 + term_2) / 3])
v3 = celldm[0] * np.array([(term_1 + term_2) / 3, (term_1 + term_2) / 3, (term_1 - 2 * term_2) / 3])
elif ibrav == 6:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([0, 1, 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 7:
v1 = celldm[0] / 2 * np.array([1, -1, celldm[2]])
v2 = celldm[0] / 2 * np.array([1, 1, celldm[2]])
v3 = celldm[0] / 2 * np.array([-1, -1, celldm[2]])
elif ibrav == 8:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([0, celldm[1], 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 9:
v1 = celldm[0] / 2 * np.array([1, celldm[1], 0])
v2 = celldm[0] / 2 * np.array([-1, celldm[1], 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == -9:
v1 = celldm[0] / 2 * np.array([1, -celldm[1], 0])
v2 = celldm[0] / 2 * np.array([+1, celldm[1], 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 91:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] / 2 * np.array([0, celldm[1], -celldm[2]])
v3 = celldm[0] / 2 * np.array([0, celldm[1], celldm[2]])
elif ibrav == 10:
v1 = celldm[0] / 2 * np.array([1, 0, celldm[2]])
v2 = celldm[0] / 2 * np.array([1, celldm[1], 0])
v3 = celldm[0] / 2 * np.array([0, celldm[1], celldm[2]])
elif ibrav == 11:
v1 = celldm[0] / 2 * np.array([1, celldm[1], celldm[2]])
v2 = celldm[0] / 2 * np.array([-1, celldm[1], celldm[2]])
v3 = celldm[0] / 2 * np.array([-1, -celldm[1], celldm[2]])
elif ibrav == 12:
sen = np.sqrt(1 - celldm[3] ** 2)
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([celldm[1] * celldm[3], celldm[1] * sen, 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == -12:
sen = np.sqrt(1 - celldm[4] ** 2)
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([0, celldm[1], 0])
v3 = celldm[0] * np.array([celldm[2] * celldm[4], 0, celldm[2] * sen])
elif ibrav == 13:
sen = np.sqrt(1 - celldm[3] ** 2)
v1 = celldm[0] / 2 * np.array([1, 0, -celldm[2]])
v2 = celldm[0] * np.array([celldm[1] * celldm[3], celldm[1] * sen, 0])
v3 = celldm[0] / 2 * np.array([1, 0, celldm[2]])
elif ibrav == -13:
sen = np.sqrt(1 - celldm[4] ** 2)
v1 = celldm[0] / 2 * np.array([1, celldm[1], 0])
v2 = celldm[0] / 2 * np.array([-1, celldm[1], 0])
v3 = celldm[0] * np.array([celldm[2] * celldm[4], 0, celldm[2] * sen])
elif ibrav == 14:
singam = np.sqrt(1 - celldm[5] ** 2)
term = (1 + 2 * celldm[3] * celldm[4] * celldm[5] - celldm[3] ** 2 - celldm[4] ** 2 - celldm[5] ** 2)
term = np.sqrt(term / (1 - celldm[5] ** 2))
v1 = celldm[0] * np.array([1,
0,
0])
v2 = celldm[0] * np.array([celldm[1] * celldm[5],
celldm[1] * singam,
0])
v3 = celldm[0] * np.array([celldm[2] * celldm[4],
celldm[2] * (celldm[3] - celldm[4] * celldm[5]) / singam,
celldm[2] * term])
else:
raise ValueError('Unsupported ibrav')
cell = np.stack([v1, v2, v3], axis=1)
return cell
| 5,336,625
|
def _exit(msg):
""" Exits with red output """
exit(CFAIL + msg + CEND)
| 5,336,626
|
def main():
"""Execute a rbpkg command.
The registered console script will call this when executing
:command:`rbpkg`. This will locate the appropriate command and execute it.
"""
parser = argparse.ArgumentParser(
prog='rbpkg',
usage='%(prog)s [--version] <command> [options] [<args>]',
add_help=False)
parser.add_argument('-v', '--version',
action='version',
version='rbpkg %s' % get_version_string())
parser.add_argument('-h', '--help',
action='store_true',
dest='help',
default=False)
parser.add_argument('command',
nargs=argparse.REMAINDER,
help='The command to execute, and any arguments. '
'(See below.)')
opt = parser.parse_args()
if not opt.command:
_show_help([], parser)
command_name = opt.command[0]
args = opt.command[1:]
if command_name == 'help':
_show_help(args, parser)
elif opt.help or '--help' in args or '-h' in args:
_show_help(opt.command, parser)
# Attempt to retrieve the command class from the entry points.
command = _get_command(command_name)
if command:
command.run_from_argv(args)
else:
parser.error('"%s" is not a valid command.' % command_name)
| 5,336,627
|
def A12_6_3_2(FAxial, eta, Pp, Pu, Muey , Muez, Muay, Muaz,
Ppls, Mby, Mbz, GammaRPa, GammaRPb):
"""
A.12.6.3.2 Interaction equation approach
where :
Pu is the applied axial force in a member due to factored actions,
determined in an analysis that includes Pu effects (see A.12.4);
Ppls is the representative local axial strength of a non-circular
prismatic member,
Pp is the representative axial strength of a non-circular prismatic
member,
Muey is the corrected bending moment due to factored actions about
the member y-axis from A.12.4;
Muez is the corrected bending moment due to factored actions about
the member z-axis from A.12.4;
Muay is the amplified bending moment due to factored actions about
the member y-axis from A.12.4;
Muaz is the amplified bending moment due to factored actions about
the member z-axis from A.12.4;
Mby is the representative bending moment strength about the member
y-axis, as defined in A.12.6.2.5 or A.12.6.2.6.
"""
# Each non-circular prismatic structural member should satisfy
# the following conditions in Equations (A.12.6-38] to [A.12.6-40]
# at all cross-sections along its length. When the shear due to
# factored actions is greater than 60 percent of the shear strength,
# the bending moment strength should be reduced parabolically to zero
# when the shear equals the shear strength (Pv in A.12.6.3.4).
#
# Local strength check (for all members):
# (A.12.6-38)
_UR1 = ((GammaRPa * Pu / Ppls)
+ pow((pow((GammaRPb * Muey / Mby),eta)
+ pow((GammaRPb * Muez / Mbz),eta)), 1.0 / eta))
print("")
print("A.12.6.3.2 Interaction equation approach")
print("Uint [Local strength check ] = {: 1.4f}".format(_UR1))
_UR2 = 0
if FAxial == 'compression':
# and beam-column check (for members subject to axial compression):
if GammaRPa * Pu / Pp > 0.20:
# after AISC[A.12.5-1], Equation H1-1a (A.12.6-39)
_UR2 = ((GammaRPa * Pu / Pp)
+ (8.0 / 9.0) * pow((pow((GammaRPb * Muay / Mby),eta)
+ pow((GammaRPb * Muaz / Mbz),eta)), 1.0 / eta))
#
else:
# after AISC[A.12.5-1], Equation H1-1b (A.12.6-40)
_UR2 = ((GammaRPa * Pu / (2.0 * Pp))
+ pow((pow((GammaRPb * Muay / Mby),eta)
+ pow((GammaRPb * Muaz / Mbz),eta)), 1.0/eta))
print("Uint [beam-column check ] = {: 1.4f}".format(_UR2))
print("-----------------")
#
#
#
return _UR1, _UR2
#
| 5,336,628
|
def gain_deploy_data():
"""
@api {get} /v1/deploy/new_data 获取当前deploy_id 的信息
@apiName deployNew_data
@apiGroup Deploy
@apiDescription 获取当前deploy_id 的信息
@apiParam {int} project_id 项目id
@apiParam {int} flow_id 流程id
@apiParam {int} deploy_id 部署id
@apiParamExample {json} Request-Example:
{
"project_id": 45,
"flow_id": 1,
"deploy_id": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"branch": "develop",
"deploy_id": 160,
"flow_id": 232,
"id": 179,
"node_id": 31,
"node_name": "yn-244",
"project_id": 4,
"result": 1,
"server_id": 45,
"server_name": "submarine-test",
"status": 0,
"version": "1.1.75"
}
],
"message": "成功"
}
"""
data = DeployRecordBusiness.query_deploy_id_json()
combine_data = {'is_one_Key': 1, 'data': data}
single_data = DeployRecordBusiness.is_one_key()
if len(single_data) == 0:
combine_data['is_one_Key'] = 0
return json_detail_render(0, combine_data)
| 5,336,629
|
def topk_mask(score, k):
"""Efficient implementation of topk_mask for TPUs.
This is a more efficient implementation of the following snippet with support
for higher rank tensors. It has the limitation that it only supports float32
as element type. The mask only contains k elements even if other elements
have the same value as the kth largest.
def topk_mask(score, k):
_, indices = tf.nn.top_k(score, k=k)
return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k),
tf.squeeze(score).shape.as_list())
The implementation binary searches for the kth value along each row of the
input and once the kth value is found it creates the mask via a single select
instruction. This approach is more than 100x faster on TPUs for large inputs
compared with the above snippet.
Args:
score: 1-D or higher Tensor with last dimension at least k.
k: Number of top elements to look for along the last dimension (along each
row for matrices).
"""
last_dim_size = score.get_shape().as_list()[-1]
# Choose top k+epsilon where epsilon is the number of times the k'th largest i
# element is present in the input.
topk_mask_with_duplicate = topk_mask_internal(score, k)
# Calculate the number of redudant duplicate values to discard.
select_num = tf.cast(
tf.reduce_sum(topk_mask_with_duplicate, axis=-1, keepdims=True), tf.int32)
redudant_num = select_num - k
# softmax cross entropy value range [0, 1].
# k's largest value is the smallest value being selected.
k_th_value = tf.reduce_min(
tf.where(
tf.cast(topk_mask_with_duplicate, tf.bool), score,
tf.ones_like(score) * 2.0),
axis=-1,
keepdims=True)
# Mask to indicate if score equals k th largest value.
equal_k_th_value = tf.equal(score, k_th_value)
# Creates a tensor wherer the value is 1 if the value is equal to kth largest
# value, otherwise, 0.
k_th_value = tf.where(equal_k_th_value, tf.ones_like(score, dtype=tf.int32),
tf.zeros_like(score, dtype=tf.int32))
index = tf.range(last_dim_size)
k_th_value_index = tf.multiply(k_th_value, index)
duplicate_mask = topk_mask_internal(
tf.cast(k_th_value_index, tf.float32), redudant_num)
return tf.where(
tf.cast(duplicate_mask, tf.bool), tf.zeros_like(topk_mask_with_duplicate),
topk_mask_with_duplicate)
| 5,336,630
|
def _generateGroundTruth(uids, COURSEDESCRIPTIONS):
"""Generate the ground truths from pre-stored bert model results given unique id lists
:param uids: list of unique ids
:type uids: list
:param COURSEDESCRIPTIONS: dictionary of course Descriptions
:type COURSEDESCRIPTIONS: dict
:return: a dictionary with (uid, ground truth similarity) as key-val pair
:rtype: dict
"""
gt = {}
# _pseudoGroundTruth(COURSEDESCRIPTIONS)
bertVecs = np.load("data/bert_vecs.npy")
cidLists = list(COURSEDESCRIPTIONS.keys())
for uid in uids:
twoids = uid.split("-")
id1, id2 = twoids[0], twoids[1]
vec1, vec2 = bertVecs[cidLists.index(
id1)], bertVecs[cidLists.index(id2)]
sim = np.dot(vec1, vec2)/np.linalg.norm(vec1)/np.linalg.norm(vec2)
# augment to [0,1]
gt[uid] = sim
# to ensure the similarities are comparable, 0-center the similarity value
# ? variance TODO
ave = np.mean(list(gt.values()))
for key in gt.keys():
gt[key] -= ave
return gt
| 5,336,631
|
def test_common_Generator():
"""Test Generator module class."""
scale = MajorScale('C')
chord_generator = Generator(
pitches = scale.getPitches('C','B')
)
generated_chords = set()
for chord in chord_generator.run():
generated_chords.add(
chordSymbolFigure(chord, inversion=0)
)
assert generated_chords <= reference_chords
assert generated_chords >= reference_chords
| 5,336,632
|
def test_get_option_env_var(os_env):
"""
If an option is given as an environment variable, then it should be
returned.
"""
os_env['DJANGO_ADMIN_USERNAME'] = 'foo'
assert createadmin.Command.get_option('username') == 'foo'
| 5,336,633
|
def valueSearch(stat_type,op,value,**kwargs):
"""Quick function to designate a value, and the days or months where the
attribute of interest exceeded, equalled, or was less than the passed
value
valueSearch("attribute","operator",value,**{sortmonth=False})
* "attribute" must be in ["prcp","snow","snwd","tavg","tmax","tmin"] (other
values are accepted, but these are what are assessed
* "operator" must be in ["<=","<","==","!=",">",">="]
* value must be an integer or a float
OPT **kwarg: sortmonth = True --> If set to true, it will do a value
search based on monthly data instead of
daily (no snwd data is available for
months though)
EXAMPLE: valueSearch("prcp",">=",5) --> returns a list of all days on
record where 5+ inches of rain
fell
"""
#operator=">", year=1984, month=12,season="winter"
# v, args[rain,prcp,snow,temp,avgtemp,tmax,avgtmax,tmin,avgtmin], kwargs[condition,year,metyear,season,month]
valid_yrs = sorted([x for x in clmt.keys() if type(x) == int])
valid_metyrs = sorted([x for x in metclmt.keys() if type(x) == int])
# ERROR HANDLING
if stat_type.lower() not in ["rain","prcp","precip","snow","snwd","temp","temps","temperature","temperatures","avgtemp","tavg","tempavglist","tmax","hi","high","tmin","lo","low"]:
return print("OOPS! {} is not a supported stat category. Try again!".format(stat_type))
if op not in ["<","<=","==",">",">="]: return print("OOPS! '{}' is not a supported operator. Try again!".format(op))
if type(value) not in [int,float]: return print("OOPS! Only integers or floats are supported for value intake")
# Format passed variables
stat_type = stat_type.lower() # Convert to lower-case for homogeniety
if stat_type in ["rain","prcp","precip"]: stat_type = "prcp"
if stat_type in ["snow"]: stat_type = "snow"
if stat_type in ["snwd"]: stat_type = "snwd"
if stat_type in ["avgtemp","tavg","tempavglist","temp","temps","temperature","temperatures"]: stat_type = "tavg"
if stat_type in ["tmax","hi","high"]: stat_type = "tmax"
if stat_type in ["tmin","lo","low"]: stat_type = "tmin"
if "sortmonth" in kwargs and kwargs["sortmonth"] == True:
CLMTDICT = clmt_vars_months
stype = "month"
else: # Just sorting indv days
CLMTDICT = clmt_vars_days
stype = "day"
results = []
for VAR in CLMTDICT[stat_type]:
for DAY in CLMTDICT[stat_type][VAR]:
if op == "<":
if stype == "month":
if VAR < value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY)
else:
if VAR < value: results.append(DAY)
elif op == "<=":
if stype == "month":
if VAR <= value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY)
else:
if VAR <= value: results.append(DAY)
elif op == "!=":
if VAR != value: results.append(DAY)
elif op == "==":
if VAR == value: results.append(DAY)
elif op == ">=":
if VAR >= value: results.append(DAY)
elif op == ">":
if VAR > value: results.append(DAY)
results.sort()
if "sortmonth" in kwargs and kwargs["sortmonth"] == True:
if stat_type == "prcp": print("Total months where the Total Rainfall {} {}: {}".format(op,value,len(results)))
elif stat_type == "snow": print("Total months where the Total Snowfall {} {}: {}".format(op,value,len(results)))
elif stat_type in ["tmax","tmin"]:
print("Total months where the Average {} {} {}: {}".format(stat_type.upper(),op,value,len(results)))
elif stat_type == "tavg":
print("Total months where the Average Temperature {} {}: {}".format(op,value,len(results)))
else:
return print("*** valueSearch does not report on monthly variations of {} ***".format(stat_type))
if len(results) <= 50: stillprint = True
else:
stillpr = input("print results? ('y'/'n'): ")
if stillpr == "y": stillprint = True
else: stillprint = False
if stillprint == True:
if stat_type == "prcp":
for x in results: print("{:6.2f}: {} {}".format(round(sum(clmt[x.year][x.month]["prcp"]),2),calendar.month_abbr[x.month],x.year))
if stat_type == "snow":
for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snow"]),1),calendar.month_abbr[x.month],x.year))
#if stat_type == "snwd":
#for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snwd"]),1),calendar.month_abbr[x.month],x.year))
if stat_type == "tavg":
for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tempAVGlist"]),1),calendar.month_abbr[x.month],x.year))
if stat_type == "tmax":
for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmax"]),1),calendar.month_abbr[x.month],x.year))
if stat_type == "tmin":
for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmin"]),1),calendar.month_abbr[x.month],x.year))
else: # Just assessing individual days
print("Total days where '{}' {} {}: {}".format(stat_type,op,value,len(results)))
if len(results) <= 50: stillprint = True
else:
stillpr = input("print results? ('y'/'n'): ")
if stillpr == "y": stillprint = True
else: stillprint = False
if stillprint == True:
if stat_type == "prcp":
for x in results: print("{:>5.2f}: {}".format(float(clmt[x.year][x.month][x.day].prcp),x))
if stat_type == "snow":
for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snow),x))
if stat_type == "snwd":
for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snwd),x))
if stat_type == "tmax":
for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmax,x))
if stat_type == "tmin":
for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmin,x))
print("")
| 5,336,634
|
def get_gnid(rec):
"""
Use geonames API (slow and quota limit for free accounts)
"""
if not any("http://www.geonames.org" in s for s in rec.get("sameAs")) and rec["geo"].get("latitude") and rec["geo"].get("longitude"):
changed = False
r = requests.get("http://api.geonames.org/findNearbyJSON?lat="+rec["geo"].get(
"latitude")+"&lng="+rec["geo"].get("longitude")+"&username=slublod")
if r.ok and isiter(r.json().get("geonames")):
for geoNameRecord in r.json().get("geonames"):
if rec.get("name") in geoNameRecord.get("name") or geoNameRecord.get("name") in rec.get("name"): # match!
newSameAs = {'@id': "https://sws.geonames.org/"+str(geoNameRecord.get("geonameId"))+"/",
'publisher': {'abbr': "geonames",
'preferredName': "GeoNames",
"isBasedOn": {"@type": "Dataset",
"@id": "https://sws.geonames.org/"+str(record.get("id"))+"/"
}
}
}
rec["sameAs"] = litter(rec.get("sameAs"), newSameAs)
changed = True
else:
if r.json().get("status").get("message").startswith("the hourly limit") or r.json().get("status").get("message").startswith("the daily limit"):
eprint("Limit exceeded!\n")
exit(0)
if changed:
return rec
| 5,336,635
|
def handle_message(message):
"""Handles every message and creates the reply"""
if re_vpncheck_short.search(message.body) or re_vpncheck_long.search(message.body):
"""Checks for VPN Connectivity"""
servername = None
protocol = None
if re_vpncheck_short.search(message.body):
servername = (re_vpncheck_short.search(message.body).group(2) + "#" + re_vpncheck_short.search(
message.body).group(4).lstrip("0")).upper()
if re_vpncheck_short.search(message.body).group(5) != None:
protocol = re_vpncheck_short.search(message.body).group(5).strip().lower()
else:
protocol = "udp"
elif re_vpncheck_long.search(message.body):
servername = (re_vpncheck_long.search(message.body).group(3) + "-" + re_vpncheck_long.search(
message.body).group(5) + "#" + re_vpncheck_long.search(message.body).group(7).lstrip("0")).upper()
if re_vpncheck_long.search(message.body).group(8) != None:
protocol = re_vpncheck_long.search(message.body).group(8).strip().lower()
else:
protocol = "udp"
ServerID = get_vpnserver_id(servername)
if ServerID != None:
res = test_vpn(servername, ServerID, protocol)
return res
else:
if servername != None:
logger.debug("Server {} not found".format(servername))
return "Server {} not found".format(servername)
else:
return
if re_vpncheck_random.search(message.body):
return test_vpn("FillerServername", "FillerServerID", rand=True)
if re_mailcheck_login.search(message.body):
return test_pm_login()
| 5,336,636
|
def test_list_g_month_enumeration_1_nistxml_sv_iv_list_g_month_enumeration_2_3(mode, save_output, output_format):
"""
Type list/gMonth is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/gMonth/Schema+Instance/NISTSchema-SV-IV-list-gMonth-enumeration-2.xsd",
instance="nistData/list/gMonth/Schema+Instance/NISTXML-SV-IV-list-gMonth-enumeration-2-3.xml",
class_name="NistschemaSvIvListGMonthEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,336,637
|
def allowed_file(filename):
""" Is file extension allowed for upload"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
| 5,336,638
|
def parse_tsv(filename, name_dict):
"""
"""
output_matrix = []
with open(filename, 'rU') as handle:
curr_protein = []
for line in handle:
if line[0] == "#" or line[0] == "-" or len(line.strip('\n')) < 1:
continue
if re.match("Protein", line):
continue
arow = line.strip('\n').split()
if arow[0] == "pos":
continue
arow[12] = float(arow[12])
if len(arow[10].split('-')) == 3:
#arow = arow[:10] + arow[10].split('_') + arow[11:]
arow = arow[:10] + name_dict[arow[10]].split('-') + arow[11:]
#print arow
output_matrix.append(arow)
return output_matrix
| 5,336,639
|
def get_orlist(site=DEFAULT_SITE, namespace="0|6|10|14|100|828", redirects="nonredirects"):
"""Get list of oldreviewed pages."""
request = Request(site=site,
action="query",
list="oldreviewedpages",
ornamespace=namespace,
orfilterredir=redirects,
orlimit="5000")
result = []
while True:
answer = request.submit()
result += [page["title"] for page in answer["query"]["oldreviewedpages"]]
if "query-continue" in answer:
request["orstart"] = answer["query-continue"]["oldreviewedpages"]["orstart"]
else:
break
return result
| 5,336,640
|
def meshVolume(verts, norm, tri):
"""Compute the Volume of a mesh specified by vertices, their normals, and
indices of triangular faces
"""
# TEST
zeronorms = []
for i, n in enumerate(norm):
#if n == [0., 0., 0.] or n == (0., 0., 0.):
if n[0] == 0 and n[1] == 0 and n[2] == 0:
#print "normal %d is zero!" % i, n
zeronorms.append(i)
#print "in meshVolume, zeronorms length: ", len(zeronorms), "normals length:", len(norm)
# Initialize
volSum = 0.0
oneThird = 1./3.
# Compute face normals
trinorm = []
for t in tri:
n1 = norm[t[0]]
n2 = norm[t[1]]
n3 = norm[t[2]]
tn = [ (n1[0]+n2[0]+n3[0])*oneThird,
(n1[1]+n2[1]+n3[1])*oneThird,
(n1[2]+n2[2]+n3[2])*oneThird ]
trinorm.append(tn)
# print trinorm # TEST
# Compute volume
for t,tn in zip(tri, trinorm):
s1 = verts[t[0]]
s2 = verts[t[1]]
s3 = verts[t[2]]
area = triangleArea(s1,s2,s3)
g = [ (s1[0]+s2[0]+s3[0])*oneThird,
(s1[1]+s2[1]+s3[1])*oneThird,
(s1[2]+s2[2]+s3[2])*oneThird ]
volSum += (g[0]*tn[0] + g[1]*tn[1] + g[2]*tn[2])*area
return volSum*oneThird
| 5,336,641
|
def _groupby_clause(uuid=None, owner=None, human_name=None, processing_name=None):
"""
Build the groupby clause. Simply detect which fields are set, and group by those.
Args:
uuid:
owner:
human_name:
processing_name:
Returns:
(str): "field, ..., field"
"""
gbc = ''
clauses = []
if uuid is not None:
clauses.append('uuid')
if owner is not None:
clauses.append('owner')
if human_name is not None:
clauses.append('human_name')
if processing_name is not None:
clauses.append('processing_name')
if len(clauses) > 0:
gbc = ','.join(clauses)
return gbc
| 5,336,642
|
async def test_response_body_can_be_missing(mock_app):
"""
The TestClient should allow for a response with no headers in the message
https://asgi.readthedocs.io/en/latest/specs/www.html#response-body-send-event
"""
async def custom_http_request(scope, receive, send, msg):
await send({"type": "http.response.start", "headers": [], "status": 200})
# A http.response.body is NOT required to have a body key; if missing, defaults to b"".
await send({"type": "http.response.body", "more_body": False})
mock_app.http_request = custom_http_request
async with TestClient(mock_app) as client:
resp = await client.get("/")
assert resp.status_code == 200
| 5,336,643
|
def internalize_lexicon(mode, to_add):
"""Reads all entries from the lexicons specified in to_add
and puts them in sql. They may then be edited.
The ES index is not effected and this operation may hence
be run at any time
"""
ok = 0
es = conf_mgr.elastic(mode)
sql_bulk = None
for lex in to_add:
print("Internalize", lex)
# Go through each lexicon separately
query = {"query": {"term": {"lexiconName": lex}}}
# scan and scroll
ans = es_helpers.scan(
es,
query=query,
scroll="3m",
raise_on_error=True,
preserve_order=False,
index=mode,
request_timeout=30,
)
sql_bulk = []
for hit in ans: # ans is an iterator of objects from in hits.hits
_id = hit.get("_id")
source = hit.get("_source")
if not isinstance(source, dict):
source = json.loads(source)
sql_doc = document.doc_to_sql(source, lex, "bulk")
sql_bulk.append(
(
_id,
json.dumps(sql_doc),
"admin",
"entry automatically added or reloaded",
lex,
"imported",
)
)
db_loaded, db_error = db.update_bulk(lex, sql_bulk)
if db_error:
raise Exception(db_error)
ok += db_loaded
if sql_bulk:
print("will load %s entries, starting with %s" % (len(sql_bulk), sql_bulk[0]))
if not ok:
_logger.warning("No data. 0 documents uploaded.")
raise Exception("No data")
print("Ok. %s documents loaded to sql\n" % ok)
| 5,336,644
|
def closeConn(client):
"""Remove client data and close connection with client"""
logMsg('Closing client connection')
# Clear client data
logout(client, False)
client.factory.clients.remove(client)
# Disconnect
client.abortConnection()
| 5,336,645
|
def _coeff_mod_wfe_drift(self, wfe_drift, key='wfe_drift'):
"""
Modify PSF polynomial coefficients as a function of WFE drift.
"""
# Modify PSF coefficients based on WFE drift
if wfe_drift==0:
cf_mod = 0 # Don't modify coefficients
elif (self._psf_coeff_mod[key] is None):
_log.warning("You must run `gen_wfedrift_coeff` first before setting the wfe_drift parameter.")
_log.warning("Will continue assuming `wfe_drift=0`.")
cf_mod = 0
else:
_log.info("Generating WFE drift modifications...")
psf_coeff = self.psf_coeff
cf_fit = self._psf_coeff_mod[key]
lxmap = self._psf_coeff_mod['wfe_drift_lxmap']
# Fit function
cf_fit_shape = cf_fit.shape
cf_fit = cf_fit.reshape([cf_fit.shape[0], -1])
cf_mod = jl_poly(np.array([wfe_drift]), cf_fit, use_legendre=True, lxmap=lxmap)
cf_mod = cf_mod.reshape(cf_fit_shape[1:])
# Pad cf_mod array with 0s if undersized
if not np.allclose(psf_coeff.shape, cf_mod.shape):
new_shape = psf_coeff.shape[1:]
cf_mod_resize = np.asarray([pad_or_cut_to_size(im, new_shape) for im in cf_mod])
cf_mod = cf_mod_resize
return cf_mod
| 5,336,646
|
def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user=None):
""" Returns the GitHub webhook JSON payload transformed into our own payload
format. If the gh_payload is not valid, returns None.
"""
try:
validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA)
except Exception as exc:
raise InvalidPayloadException(exc.message)
payload = JSONPathDict(gh_payload)
if payload['head_commit'] is None:
raise SkipRequestException
config = SafeDictSetter()
config['commit'] = payload['head_commit.id']
config['ref'] = payload['ref']
config['default_branch'] = payload['repository.default_branch'] or default_branch
config['git_url'] = payload['repository.ssh_url']
config['commit_info.url'] = payload['head_commit.url']
config['commit_info.message'] = payload['head_commit.message']
config['commit_info.date'] = payload['head_commit.timestamp']
config['commit_info.author.username'] = payload['head_commit.author.username']
config['commit_info.author.url'] = payload.get('head_commit.author.html_url')
config['commit_info.author.avatar_url'] = payload.get('head_commit.author.avatar_url')
config['commit_info.committer.username'] = payload.get('head_commit.committer.username')
config['commit_info.committer.url'] = payload.get('head_commit.committer.html_url')
config['commit_info.committer.avatar_url'] = payload.get('head_commit.committer.avatar_url')
# Note: GitHub doesn't always return the extra information for users, so we do the lookup
# manually if possible.
if (lookup_user and not payload.get('head_commit.author.html_url') and
payload.get('head_commit.author.username')):
author_info = lookup_user(payload['head_commit.author.username'])
if author_info:
config['commit_info.author.url'] = author_info['html_url']
config['commit_info.author.avatar_url'] = author_info['avatar_url']
if (lookup_user and
payload.get('head_commit.committer.username') and
not payload.get('head_commit.committer.html_url')):
committer_info = lookup_user(payload['head_commit.committer.username'])
if committer_info:
config['commit_info.committer.url'] = committer_info['html_url']
config['commit_info.committer.avatar_url'] = committer_info['avatar_url']
return config.dict_value()
| 5,336,647
|
def get_retweeted_tweet(tweet):
"""
Get the retweeted Tweet and return it as a dictionary
If the Tweet is not a Retweet, return None
Args:
tweet (Tweet or dict): A Tweet object or a dictionary
Returns:
dict: A dictionary representing the retweeted status
or None if there is no quoted status. \n
- For original format, this is the value of "retweeted_status" \n
- For activity streams, If the Tweet is a Retweet this is the value of the key "object"
"""
if get_tweet_type(tweet) == "retweet":
if is_original_format(tweet):
return tweet["retweeted_status"]
else:
return tweet["object"]
else:
return None
| 5,336,648
|
def listminus(c1, c2):
"""Return a list of all elements of C1 that are not in C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
c = []
for delta in c1:
if not s2.has_key(delta):
c.append(delta)
return c
| 5,336,649
|
def distribute_quantity_skew(batch_size, grouped_data, distributed_dataset, groupings, p=0.5, scalar=1.5):
"""
Adds quantity skew to the data distribution. If p=0. or scalar=1., no skew is applied and the data are divided
evenly among the workers in each label group.
:param batch_size: the batch size for training
:param grouped_data: a dictionary containing the data for each label skew group, key is the label integer and value
is the data
:param distributed_dataset: an initialized empty dictionary that will be filled with data for each worker
:param groupings: a dictionary of the groupings for each worker id, key is the label integer and value is a list of
worker ids
:param p: the portion of workers within each group that will receive higher data quantities, p=0 indicates no skew
:param scalar: the factor used to multiply the size of datasets for high quantity workers, e.g. if scalar=1.5 then
each worker with high quantity skew has 1.5x as many data points as the low quantity workers in their group
:return: the distributed dataset
"""
for n, group in groupings.items():
high_quantity = random.sample(group, k=int(p*len(group)))
low_quantity = [i for i in group if i not in high_quantity]
base_k = int(len(grouped_data[n])/len(group))
print(f"Base K: {base_k}")
print(f"Length of grouped data: {len(grouped_data[n])}")
if p > 0.:
low_k = int(len(grouped_data[n]) / (len(low_quantity) + len(high_quantity) * scalar))
high_k = int(low_k * scalar)
print(f"High Quantity Skew: {high_quantity}")
print(f"High Quantity K: {high_k}")
print(f"Low Quantity Skew: {low_quantity}")
print(f"Low Quantity K: {low_k}")
else:
low_k = base_k
assert len(high_quantity) == 0, "Quantity skew with probability 0 should have no high quantity clients"
print(f"High Quantity Skew: {high_quantity}")
print(f"Low Quantity Skew: {low_quantity}")
print(f"Base K: {base_k}")
for worker in high_quantity:
selected = random.sample(list(range(len(grouped_data[n]))), k=high_k)
temp = [grouped_data[n][i] for i in selected]
# This would need to be changed if the number of samples is not divisible by batch size
worker_vals = []
for i in range(len(temp) // batch_size):
ix = i * batch_size
vals = temp[ix:ix + batch_size]
targets = []
inputs = []
for j in vals:
targets.append(int(j[1].numpy()))
inputs.append(j[0].numpy())
worker_vals.append((torch.Tensor(inputs), torch.Tensor(targets)))
distributed_dataset[worker].extend(worker_vals)
grouped_data[n] = [grouped_data[n][i] for i in range(len(grouped_data[n])) if i not in selected]
for nx, worker in enumerate(low_quantity):
if nx+1 == len(low_quantity):
print(f"Length of remaining data = {len(grouped_data[n])}\nLow_k = {low_k}")
temp = grouped_data[n]
else:
selected = random.sample(list(range(len(grouped_data[n]))), k=low_k)
temp = [grouped_data[n][i] for i in selected]
# This would need to be changed if the number of samples is not divisible by batch size
worker_vals = []
for i in range(len(temp) // batch_size):
ix = i * batch_size
vals = temp[ix:ix + batch_size]
targets = []
inputs = []
for j in vals:
targets.append(int(j[1].numpy()))
inputs.append(j[0].numpy())
worker_vals.append((torch.Tensor(inputs), torch.Tensor(targets)))
distributed_dataset[worker].extend(worker_vals)
if nx+1 != len(low_quantity):
grouped_data[n] = [grouped_data[n][i] for i in range(len(grouped_data[n])) if i not in selected]
return distributed_dataset
| 5,336,650
|
def xmkpy3_tpf_get_coordinates_v1():
"""Unit test"""
import numpy as np
import lightkurve as lk
print(lk.__version__, "=lk.__version__")
def msg(ok, tag_): # helper function
print("***" + tag_ + ": ", end="")
if ok:
print("PASS***")
else:
print("FAIL***")
# fed
tpf = lk.search_targetpixelfile(
target="kepler-138b", mission="kepler", cadence="long", quarter=10
).download(quality_bitmask=0)
w = tpf.wcs # alias
ll_x0 = 0
ll_y0 = 0
print(ll_x0, "=ll_x0")
print(ll_y0, "=ll_y0")
origin0 = 0
ra_ll_x0, dec_ll_y0 = w.wcs_pix2world(ll_x0, ll_y0, origin0)
print(ra_ll_x0, dec_ll_y0, "=ra_ll_x0, dec_ll_y0")
print()
x0_ra_ll_x0, y0_dec_ll_y0 = w.wcs_world2pix(ra_ll_x0, dec_ll_y0, origin0)
print(
x0_ra_ll_x0, y0_dec_ll_y0, "=x0_ra_ll_x0, y0_dec_ll_y0 [should be about (0,0)]"
)
ra_x0_ra_ll_x0, dec_y0_dec_ll_y0 = w.wcs_pix2world(
x0_ra_ll_x0, y0_dec_ll_y0, origin0
)
print(ra_x0_ra_ll_x0, dec_y0_dec_ll_y0, "=ra_x0_ra_ll_x0, dec_y0_dec_ll_y0")
print("\nra_x0_ra_ll_x0 is_close_to ra_ll_x0 ?")
ok = np.abs(ra_x0_ra_ll_x0 - ra_ll_x0) < 0.000001
msg(ok, "TEST1")
print("^--- THIS BETTER PASS!")
print("\ndec_y0_dec_ll_y0 is_close_to dec_ll_y0 ?")
ok = np.abs(dec_y0_dec_ll_y0 - dec_ll_y0) < 0.000001
msg(ok, "TEST2")
print("^--- THIS BETTER PASS!")
print()
frame0 = 0
# Set one of the next 3 if statements to TRUE depending on the function to
# be tested
if False:
print("---> check tpf.get_coordinates()")
rax_ll_x0 = tpf.get_coordinates()[0][frame0][0][0]
decx_ll_y0 = tpf.get_coordinates()[1][frame0][0][0]
print(
"NOTE: next two tests will PASS --- if the tpf.get_coordinates "
"bug has been fixed"
)
# fi
if True:
print("---> check mkpy3_tpf_get_coordinates_v1()")
rax_ll_x0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf)[0][frame0][0][0]
decx_ll_y0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf)[1][frame0][0][0]
print("NOTE: next two tests should PASS")
# fi
if False:
print("---> check mkpy3_tpf_get_coordinates_v1(...,recreate_bug=True)")
rax_ll_x0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf, recreate_bug=True)[0][frame0][
0
][0]
decx_ll_y0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf, recreate_bug=True)[1][
frame0
][0][0]
print("NOTE: next two tests should FAIL")
# fi
print(rax_ll_x0, decx_ll_y0, "=rax_ll_x0, decx_ll_y0")
print()
x0_rax_ll_x0, y0_decx_ll_y0 = w.wcs_world2pix(rax_ll_x0, decx_ll_y0, origin0)
print(x0_rax_ll_x0, y0_decx_ll_y0, "=x0_rax_ll_x0, y_decx_ll_y0")
tpf_pos_corr1_frame0 = tpf.pos_corr1[frame0]
tpf_pos_corr2_frame0 = tpf.pos_corr2[frame0]
print(
tpf_pos_corr1_frame0,
tpf_pos_corr2_frame0,
"=tpf_pos_corr1_frame0, tpf_pos_corr2_frame0",
)
xx0_rax_ll_x0 = x0_rax_ll_x0 - tpf_pos_corr1_frame0
yy0_decx_ll_y0 = y0_decx_ll_y0 - tpf_pos_corr2_frame0
print(
xx0_rax_ll_x0,
yy0_decx_ll_y0,
"=xx0_rax_ll_x0, yy0_decx_ll_y0 [should be about (0,0)]",
)
ra_xx0_rax_ll_x0, dec_yy0_decx_ll_y0 = w.wcs_pix2world(
xx0_rax_ll_x0, yy0_decx_ll_y0, origin0
)
print(ra_xx0_rax_ll_x0, dec_yy0_decx_ll_y0, "=ra_xx0_rax_ll_x0, dec_yy0_decx_ll_y0")
print("\nra_xx0_rax_ll_x0 is_close_to ra_ll_x0 ?")
ok = np.abs(ra_xx0_rax_ll_x0 - ra_ll_x0) < 0.000001
msg(ok, "TEST3")
print("\ndec_yy0_decx_ll_y0 is_close_to dec_ll_y0 ?")
ok = np.abs(dec_yy0_decx_ll_y0 - dec_ll_y0) < 0.000001
msg(ok, "TEST4")
return None
# fed
| 5,336,651
|
def __loadModule(modulePath):
# type: (str) -> module
""" Load module
Args:
modulePath (str): Full path to the python module
Return:
mod (module object): command module
None: if path doesn't exist
"""
# Create module names for import, for exapmle ...
#
# "rush/template"
# "animation/animate"
# "common/create"
# "common/display"
normPath = os.path.normpath(modulePath)
if sys.platform == "win32":
name = os.path.splitext(normPath)[0].split("\\")
else:
name = os.path.splitext(normPath)[0].split("/")
name = "/".join(name[-2:])
# If arnold is not loaded or installed, ignore modules for arnold
if name.startswith("Arnold"):
hasArnold = cmds.pluginInfo("mtoa", q=True, loaded=True)
if not hasArnold:
return None
try:
mod = imp.load_source(name, modulePath)
return mod
except Exception:
print("Failed to load module : %s" % modulePath)
return None
| 5,336,652
|
def get_dtype(names, array_dtype=DEFAULT_FLOAT_DTYPE):
"""
Get a list of tuples containing the dtypes for the structured array
Parameters
----------
names : list of str
Names of parameters
array_dtype : optional
dtype to use
Returns
-------
list of tuple
Dtypes as tuples with (field, dtype)
"""
return [(n, array_dtype) for n in names] \
+ [('logP', array_dtype), ('logL', LOGL_DTYPE)]
| 5,336,653
|
def test_unhexlify():
"""
Ensure that we can get the script back out using unhexlify and that the
result is a properly decoded string.
"""
hexlified = uflash.hexlify(TEST_SCRIPT)
unhexlified = uflash.unhexlify(hexlified)
assert unhexlified == TEST_SCRIPT.decode('utf-8')
| 5,336,654
|
def print_progress_table(col_headers, col_widths = None, col_init_data = None,
col_format_specs = None, skip_header=False):
""" Live updates on progress with NUPACK and Multistrand computations.
Note: This table has two rows. The
Args:
col_headers (list(str)): The header of the table.
col_widths (list(int), optional): Spacing of the table columns. Strings are
clipped to width-1. (?)
col_intit_data (list(), optional): Prints initial data into the first row.
col_format_specs (list(), optional): ?
Returns:
A progress update function which overwrites the data row (or the last line on screen).
"""
def update_progress(col_data, inline=True):
"""Print new data to your progress table."""
str_data = [('{:<'+str(w-1)+'}').format(f.format(d))[:w-1] for d,w,f in zip(col_data, col_widths, col_format_specs)]
print("# {}{}".format(' '.join(str_data), "\r" if inline else "\n"), end='')
sys.stdout.flush()
if col_widths is None:
col_widths = [max(len(h)+1, 8) for h in col_headers]
else:
assert len(col_widths) == len(col_headers)
if col_format_specs is None:
col_format_specs = ['{}'] * len(col_headers)
else:
assert len(col_format_specs) == len(col_headers)
header = ' '.join([(h+' '*(w-1))[:w-1] for h,w in zip(col_headers, col_widths)])
if not skip_header:
print("# {}".format(header))
if col_init_data is not None:
update_progress(col_init_data)
return update_progress
| 5,336,655
|
def main():
"""
Centralized virtual environments.
"""
| 5,336,656
|
def remove_stroke(settings):
"""Removes the stroke (i.e. border in Faint) from the settings object,
preserving fill.
"""
if settings.fillstyle == 'border':
settings.fillstyle = 'none'
elif settings.fillstyle == 'fill+border':
settings.fg = settings.bg
settings.fillstyle = 'fill'
| 5,336,657
|
def if_analyser(string):
"""调用python的eval函数计算True false"""
trans = sign_transform(string.strip().lower())
# print('if_analyser>>', trans)
boool = eval(trans)
boool = 1 if boool else 0
return boool
| 5,336,658
|
def zzX_trunc(f, p):
"""Reduce Z[X] polynomial modulo polynomial p. """
return zzX_strip([ zzX_rem(g, p) for g in f ])
| 5,336,659
|
def update_points_constellation(
points_in_range, new_constelation, point_to_constellation):
"""Update point_to_constellation mapping for points_in_range."""
for point_in_range in points_in_range:
point_to_constellation[point_in_range] = new_constelation
| 5,336,660
|
def all_instances(clazz):
"""Return all subjects."""
logging.info(
"Starting aggregation for all AnVIL workspaces, this will take several minutes.")
print("Starting aggregation for all AnVIL workspaces, this will take several minutes.")
consortiums = (
('CMG', 'AnVIL_CMG_.*'),
('CCDG', 'AnVIL_CCDG_.*'),
('GTEx', '^AnVIL_GTEx_V8_hg38$'),
('ThousandGenomes', '^1000G-high-coverage-2019$')
)
for item in reconcile_all(user_project=os.environ['GOOGLE_PROJECT'], consortiums=consortiums):
if isinstance(item, Sample):
append_drs(item)
if clazz is None or isinstance(item, clazz):
yield item
| 5,336,661
|
def _format_bin_intervals(bins_arr: np.ndarray) -> List[str]:
"""
Auxillary function to format bin intervals in a histogram
Parameters
----------
bins_arr: np.ndarray
Bin endpoints to format into intervals
Returns
-------
List of formatted bin intervals
"""
bins_arr = np.round(bins_arr, 3)
intervals = [f"[{bins_arr[i]},{bins_arr[i+1]})" for i in range(len(bins_arr) - 2)]
intervals.append(f"[{bins_arr[-2]},{bins_arr[-1]}]")
return intervals
| 5,336,662
|
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
| 5,336,663
|
def createDataset(businessIDs, dataset):
"""Writes a new json file containing subset of given Yelp dataset given a dictionary of business IDs to specify which businesses"""
writer = jsonlines.open(dataset + 'Dataset.json', mode='w')
with open(dataset + '.json', 'r', encoding='utf8') as dataJson:
for jsonObjects in dataJson:
tempDict = json.loads(jsonObjects)
if tempDict['business_id'] in businessIDs:
writer.write(tempDict)
writer.close()
| 5,336,664
|
def main():
"""Zeigt den Link zum Repository in der Konsole an."""
print("Dieses Tool stellt übergreifende, grundlegende Funktionalitäten zur Verfügung.\n"
"Für weitergehende Informationen: https://github.com/MZH-bust/general_helpers")
| 5,336,665
|
def get_embedder_functions(corpus: List[str]) -> Dict[str, Callable[[List[str]], List[float]]]:
"""
Returns a list of the available embedders.
#! If updated, update next function too
"""
embedders = {
# 'Bag of Words': bow_embedder(corpus),
'FastText (CBOW)': fasttext_embedder(corpus, model_type="cbow"),
'FastText (Skipgram)': fasttext_embedder(corpus, model_type="skipgram"),
'Doc2Vec': doc2vec_embedder(corpus),
'GPT2 Small Spanish': bert_embedder(model_name="datificate/gpt2-small-spanish"),
'BERT: TinyBERT-spanish-uncased-finetuned-ner':
bert_embedder(model_name='mrm8488/TinyBERT-spanish-uncased-finetuned-ner'),
'BERT: paraphrase-xlm-r-multilingual-v1': bert_embedder(model_name='paraphrase-xlm-r-multilingual-v1'),
'BERT: distiluse-base-multilingual-cased-v2': bert_embedder(model_name='distiluse-base-multilingual-cased-v2'),
}
reduced_embedders = {}
for name, embedder in embedders.items():
reduced_embedders[f"{name} (50-d)"] = reduce_dimensionality(embedder)
return {**embedders, **reduced_embedders}
| 5,336,666
|
def test_stimeit():
""" Test the stimeit function """
dummy_function = lambda x: x + 2
@vtime.stimeit(logging.info)
def stimeit_function(x):
return dummy_function(x)
assert dummy_function(42) == stimeit_function(42)
| 5,336,667
|
def tag_dataset(client, short_name, tag, description, force=False):
"""Creates a new tag for a dataset."""
dataset_ = client.load_dataset(short_name)
if not dataset_:
raise ParameterError('Dataset not found.')
try:
dataset = client.add_dataset_tag(dataset_, tag, description, force)
except ValueError as e:
raise ParameterError(e)
dataset.to_yaml()
| 5,336,668
|
def main_aster(folder_name):
"""
@Input
folder_name : name of the folder where training data are stored.
@Output
trained parameters are stored in 'params' folder
"""
# arguments are stored in pred_params.py
from pred_params import Get_ocr_args
args = Get_ocr_args()
print('Evaluation : '+str(args.eval))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
if args.cuda:
print('using cuda.')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
print('using cpu.')
torch.set_default_tensor_type('torch.FloatTensor')
# Create Character dict & max seq len
args, char2id_dict , id2char_dict= Create_char_dict(args)
print(id2char_dict)
rec_num_classes = len(id2char_dict)
# Get rec num classes / max len
print('max len : '+str(args.max_len))
# Get file list for train set
filenames = glob.glob('./data/' + folder_name + '/*/*.xml')
filenames = [x[:-4] for x in filenames]
print('file len : '+str(len(filenames)))
# files are not splitted into train/valid set.
train_list = Create_data_list_byfolder(args, char2id_dict, id2char_dict, filenames)
encoder = ResNet_ASTER(with_lstm = True, n_group = args.n_group, use_cuda = args.cuda)
encoder_out_planes = encoder.out_planes
decoder = AttentionRecognitionHead(num_classes = rec_num_classes,
in_planes = encoder_out_planes,
sDim = args.decoder_sdim,
attDim = args.attDim,
max_len_labels = args.max_len,
use_cuda = args.cuda)
# Load pretrained weights
if not args.eval:
if args.use_pretrained:
# use pretrained model
pretrain_path = './data/demo.pth.tar'
if args.cuda:
pretrained_dict = torch.load(pretrain_path)['state_dict']
else:
pretrained_dict = torch.load(pretrain_path, map_location='cpu')['state_dict']
encoder_dict = {}
decoder_dict = {}
for i, x in enumerate(pretrained_dict.keys()):
if 'encoder' in x:
encoder_dict['.'.join(x.split('.')[1:])] = pretrained_dict[x]
elif 'decoder' in x:
decoder_dict['.'.join(x.split('.')[1:])] = pretrained_dict[x]
encoder.load_state_dict(encoder_dict)
decoder.load_state_dict(decoder_dict)
print('pretrained model loaded')
else:
# init model parameters
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
#m.bias.data.fill_(0.01)
encoder.apply(init_weights)
decoder.apply(init_weights)
print('Random weight initialized!')
else:
# loading parameters for inference
if args.cuda:
encoder.load_state_dict(torch.load('params/encoder_final'))
decoder.load_state_dict(torch.load('params/decoder_final'))
else:
encoder.load_state_dict(torch.load('params/encoder_final', map_location=torch.device('cpu')))
decoder.load_state_dict(torch.load('params/decoder_final', map_location=torch.device('cpu')))
print('fine-tuned model loaded')
# Training Phase
rec_crit = SequenceCrossEntropyLoss()
if (args.cuda == True) & torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
encoder.to(device)
decoder.to(device)
param_groups = encoder.parameters()
param_groups = filter(lambda p: p.requires_grad, param_groups)
optimizer = torch.optim.Adadelta(param_groups, lr = args.lr, weight_decay = args.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = [4,5], gamma = 0.1)
train_loader = DataLoader(train_list,
batch_size = args.batch_size,
shuffle = False,
collate_fn = AlignCollate(
imgH = args.height, imgW = args.width, keep_ratio = True)
)
for epoch in range(args.n_epochs):
for batch_idx, batch in enumerate(train_loader):
x, rec_targets, rec_lengths = batch[0], batch[1], batch[2]
x = x.to(device)
encoder_feats = encoder(x) # bs x w x C
rec_pred = decoder([encoder_feats, rec_targets, rec_lengths])
loss_rec = rec_crit(rec_pred, rec_targets, rec_lengths)
if batch_idx == 0:
print('train Loss : '+str(loss_rec))
rec_pred_idx = np.argmax(rec_pred.detach().cpu().numpy(), axis = -1)
print(rec_pred[:3])
print(rec_pred_idx[:5])
optimizer.zero_grad()
loss_rec.backward()
optimizer.step()
# Training phase ends
# this is where trained model parameters are saved
torch.save(encoder.state_dict(), 'params/encoder_final')
torch.save(decoder.state_dict(), 'params/decoder_final')
| 5,336,669
|
def parse_thermal_properties(f):
"""thermal_properties.yaml parser."""
thermal_properties = {
"temperatures": [],
"free_energy": [],
"entropy": [],
"heat_capacity": [],
}
data = yaml.load(f, Loader=Loader)
for tp in data["thermal_properties"]:
thermal_properties["temperatures"].append(tp["temperature"])
thermal_properties["entropy"].append(tp["entropy"])
thermal_properties["free_energy"].append(tp["free_energy"])
thermal_properties["heat_capacity"].append(tp["heat_capacity"])
for key in thermal_properties:
thermal_properties[key] = np.array(thermal_properties[key])
tprops = get_thermal_properties(thermal_properties)
return tprops
| 5,336,670
|
async def test_add_invalid_repository_file(coresys, store_manager):
"""Test add custom repository."""
current = coresys.config.addons_repositories
with patch("supervisor.store.repository.Repository.load", return_value=None), patch(
"pathlib.Path.read_text",
return_value=json.dumps({"name": "Awesome repository"}),
), patch("pathlib.Path.exists", return_value=False):
await store_manager.update_repositories(current + ["http://example.com"])
assert not store_manager.get_from_url("http://example.com").validate()
assert "http://example.com" in coresys.config.addons_repositories
assert coresys.resolution.suggestions[-1].type == SuggestionType.EXECUTE_REMOVE
| 5,336,671
|
def convert(ts, new_freq, include_partial=True, **kwargs):
"""
This function converts a timeseries to another frequency. Conversion only
works from a higher frequency to a lower frequency, for example daily to
monthly.
NOTE: add a gatekeeper for invalid kwargs.
"""
new_ts = ts.clone()
series_dir = ts.series_direction()
new_ts.sort_by_date(reverse=True)
freq_idx = HIERARCHY.index(ts.frequency)
new_idx = HIERARCHY.index(FREQ_Q)
daily_idx = HIERARCHY.index(FREQ_D)
if freq_idx > new_idx:
raise ValueError(
"Cannot convert from %s to %s." % (ts.frequency, new_freq)
)
dates = new_ts.datetime_series()
date_series_type = ts.get_date_series_type()
if date_series_type == TS_ORDINAL:
selected = _filter_dates(dates, new_freq, kwargs)
elif date_series_type == TS_TIMESTAMP:
selected = _filter_idates(
dates, new_freq, end_of_period=ts.end_of_period
)
else:
raise ValueError("Invalid date series type: %s" % (date_series_type))
if selected.shape[0] > 0:
if new_ts.end_of_period:
selected += 1 # shift to start of next period
if include_partial or freq_idx > daily_idx:
if selected[0] != 0:
# insert most recent date
# selected = np.insert(selected, 0, 0)
# np.insert(arr, obj, values, axis=None)
selected = np.insert(selected, 0, 0)
if freq_idx > daily_idx:
# already processed (probably)
if selected[-1] != len(dates) - 1:
selected = np.append(selected, len(dates) - 1)
new_ts.tseries = new_ts.tseries[selected.flatten()]
new_ts.frequency = new_freq
if new_freq == FREQ_D:
# convert dates from timestamp to ordinal
new_ts.dseries = np.fromiter(
[date.toordinal() for date in np.array(dates)[selected]],
dtype=np.int32,
)
else:
new_ts.dseries = new_ts.dseries[selected]
new_ts.dseries = new_ts.dseries.flatten()
if series_dir != new_ts.series_direction():
new_ts.reverse()
return new_ts
| 5,336,672
|
def _get_image_info(name: str) -> versions.Image:
"""Retrieve an `Image` information by name from the versions listing."""
try:
return versions.CONTAINER_IMAGES_MAP[name]
except KeyError:
raise ValueError(
'Missing version for container image "{}"'.format(name)
)
| 5,336,673
|
def _mother_proc_cpp_stat(
amplitude_distribution, t_stop, rate, t_start=0 * pq.ms):
"""
Generate the hidden ("mother") Poisson process for a Compound Poisson
Process (CPP).
Parameters
----------
amplitude_distribution : np.ndarray
CPP's amplitude distribution :math:`A`. `A[j]` represents the
probability of a synchronous event of size `j` among the generated
spike trains. The sum over all entries of :math:`A` must be equal to
one.
t_stop : pq.Quantity
The stopping time of the mother process
rate : pq.Quantity
Homogeneous rate of the n spike trains that will be generated by the
CPP function
t_start : pq.Quantity, optional
The starting time of the mother process
Default: 0 pq.ms
Returns
-------
Poisson spike train representing the mother process generating the CPP
"""
n_spiketrains = len(amplitude_distribution) - 1
# expected amplitude
exp_amplitude = np.dot(
amplitude_distribution, np.arange(n_spiketrains + 1))
# expected rate of the mother process
exp_mother_rate = (n_spiketrains * rate) / exp_amplitude
return StationaryPoissonProcess(
rate=exp_mother_rate, t_stop=t_stop, t_start=t_start
).generate_spiketrain()
| 5,336,674
|
def main():
"""
Main execution flow.
"""
try:
args = process_arguments()
config = utilities.read(args.config_file)
manifest = utilities.read(args.manifest_file)
# TODO: Refactor
core.config = config
utilities.TOKENIZER = core.Tokenizer()
database = Database(config['options']['datasource'])
globaloptions = {
'today': config['options']['today'],
'timeout': config['options']['timeout']
}
attributes = Attributes(
manifest['attributes'], database, args.cleanup, args.key_string,
**globaloptions
)
if not os.path.exists(args.repositories_root):
os.makedirs(args.repositories_root, exist_ok=True)
table = 'reaper_results'
if args.goldenset:
table = 'reaper_goldenset'
_run = run.Run(
args.repositories_root, attributes, database,
config['options']['threshold'], args.num_processes
)
_run.run([int(line) for line in args.repositories_sample], table)
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
| 5,336,675
|
def get_service(hass, config):
"""Get the Telegram notification service."""
import telegram
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY, 'chat_id']},
_LOGGER):
return None
try:
bot = telegram.Bot(token=config[CONF_API_KEY])
username = bot.getMe()['username']
_LOGGER.info("Telegram bot is '%s'.", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token.")
return None
return TelegramNotificationService(config[CONF_API_KEY], config['chat_id'])
| 5,336,676
|
def has_numbers(input_str: str):
""" Check if a string has a number character """
return any(char.isdigit() for char in input_str)
| 5,336,677
|
def test_pyfs_delete_fail(pyfs, pyfs_testpath):
"""Test init of files."""
pyfs.save(BytesIO(b'somedata'))
os.rename(pyfs_testpath, join(dirname(pyfs_testpath), 'newname'))
pytest.raises(DirectoryNotEmpty, pyfs.delete)
| 5,336,678
|
def _validate_LIMS_data(input, field_label, selectedTemplate, planObj):
"""
No validation but LIMS data with leading/trailing blanks in the input will be trimmed off
"""
errorMsg = None
if input:
data = input.strip()
try:
if planObj.get_planObj().metaData:
logger.debug(
"plan_csv_validator._validator_LIMS_data() B4 planObj.get_planObj().metaData=%s"
% (planObj.get_planObj().metaData)
)
else:
planObj.get_planObj().metaData = {}
if len(planObj.get_planObj().metaData.get("LIMS", [])) == 0:
planObj.get_planObj().metaData["LIMS"] = []
planObj.get_planObj().metaData["LIMS"].append(data)
logger.debug(
"EXIT plan_csv_validator._validator_LIMS_data() AFTER planObj.get_planObj().metaData=%s"
% (planObj.get_planObj().metaData)
)
except Exception:
logger.exception(format_exc())
errorMsg = "Internal error during LIMS data processing"
# self.metaData["Status"] = status
# self.metaData["Date"] = "%s" % timezone.now()
# self.metaData["Info"] = info
# self.metaData["Comment"] = comment
#
# Try to read the Log entry, if it does not exist, create it
# if len(self.metaData.get("Log",[])) == 0:
# self.metaData["Log"] = []
# self.metaData["Log"].append({"Status":self.metaData.get("Status"), "Date":self.metaData.get("Date"), "Info":self.metaData.get("Info"), "Comment":comment})
return errorMsg
| 5,336,679
|
def show(ctx, advisory, yaml, json):
""" Show RPMDiff failures for an advisory.
"""
runtime = ctx.obj # type: Runtime
if not advisory:
runtime.initialize()
advisory = runtime.group_config.advisories.get("rpm", 0)
if not advisory:
raise ElliottFatalError("No RPM advisory number configured in ocp-build-data.")
else:
runtime.initialize(no_group=True)
logger = runtime.logger
logger.info("Fetching RPMDiff runs from Errata Tool for advisory {}...".format(advisory))
rpmdiff_runs = list(errata.get_rpmdiff_runs(advisory))
logger.info("Found {} RPMDiff runs.".format(len(rpmdiff_runs)))
# "good" means PASSED, INFO, or WAIVED
good_runs = []
# "bad" means NEEDS_INSPECTION or FAILED
bad_runs = []
incomplete_runs = []
for rpmdiff_run in rpmdiff_runs:
attr = rpmdiff_run['attributes']
if attr["status"] in constants.ET_GOOD_EXTERNAL_TEST_STATUSES:
good_runs.append(rpmdiff_run)
elif attr["status"] in constants.ET_BAD_EXTERNAL_TEST_STATUSES:
bad_runs.append(rpmdiff_run)
else:
incomplete_runs.append(rpmdiff_run)
util.green_prefix("good: {}".format(len(good_runs)))
click.echo(", ", nl=False)
util.red_prefix("bad: {}".format(len(bad_runs)))
click.echo(", ", nl=False)
util.yellow_print("incomplete: {}".format(len(incomplete_runs)))
if not bad_runs:
return
logger.info("Fetching detailed information from RPMDiff for bad RPMDiff runs...")
rpmdiff_client = RPMDiffClient(constants.RPMDIFF_HUB_URL)
rpmdiff_client.authenticate()
if yaml or json:
_structured_output(bad_runs, rpmdiff_client, yaml)
else:
_unstructured_output(bad_runs, rpmdiff_client)
| 5,336,680
|
def get_report_permission(report: Report, user: User) -> Permission:
"""Get permission of given user for the report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the report
:rtype: Permission
"""
if 'reports' in session and str(report.id) in session['reports']:
return session['reports'][str(report.id)]
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
if rp is None:
return 'n'
return rp.Type
| 5,336,681
|
def peak_time_from_sxs(
sxs_format_waveform,
metadata,
extrapolation_order='Extrapolated_N2'):
"""Returns the time when the sum of the squared amplitudes of an
SXS-format waveform is largest. Note: this is not necessarily the time of
the peak of the l=m=2 mode."""
extrap = extrapolation_order + ".dir"
# All modes have the same time, so just look at the l=m=2 mode to get the
# times
times = sxs_format_waveform[extrapolation_order +
".dir"]['Y_l2_m2.dat'][:, 0]
start = first_index_before_reference_time(times, metadata)
sum_amp_squared = waveform_norm_squared(
sxs_format_waveform, extrapolation_order)
index_peak = start + sum_amp_squared[start:].argmax()
return sxs_format_waveform[extrap]['Y_l2_m2.dat'][index_peak][0]
| 5,336,682
|
def create_trackhub_resource(project_dir, api_client, create_user_resource, create_genome_assembly_dump_resource):
"""
This fixture is used to create a temporary trackhub using POST API
The created trackhub will be used to test GET API
"""
_, token = create_user_resource
api_client.credentials(HTTP_AUTHORIZATION='Token ' + str(token))
submitted_hub = {
'url': 'file:///' + str(project_dir) + '/' + 'samples/JASPAR_TFBS/hub.txt'
}
response = api_client.post('/api/trackhub/', submitted_hub, format='json')
return response
| 5,336,683
|
def create_node(x, y):
"""Create a node along the network.
Parameters
----------
x : {float, int}
The x coordinate of a point.
y : {float, int}
The y coordinate of a point.
Returns
-------
_node : shapely.geoemtry.Point
Instantiated node.
"""
_node = Point(list(zip(x, y))[0])
return _node
| 5,336,684
|
def get_matchroom_name(match: Match) -> str:
"""Get a new unique channel name corresponding to the match.
Parameters
----------
match: Match
The match whose info determines the name.
Returns
-------
str
The name of the channel.
"""
name_prefix = match.matchroom_name
cut_length = len(name_prefix) + 1
largest_postfix = 1
found = False
for channel in server.server.channels:
if channel.name.startswith(name_prefix):
found = True
try:
val = int(channel.name[cut_length:])
largest_postfix = max(largest_postfix, val)
except ValueError:
pass
return name_prefix if not found else '{0}-{1}'.format(name_prefix, largest_postfix + 1)
| 5,336,685
|
def unidecode_name(uname):
"""
unidecode() of cjk ideograms can produce strings which contain spaces.
Strip leading and trailing spaces, and reduce double-spaces to single.
For some other ranges, unidecode returns all-lowercase names; fix these
up with capitalization.
"""
# Fix double spacing
name = unidecode.unidecode(uname)
if name == uname:
return name
name = re.sub(' +', ' ', name.strip().replace('@', '').replace('"', ''))
name = re.sub(r'(\w)\.(\w)', r'\1\2', name)
# Fix all-upper and all-lower names:
# Check for name particles -- don't capitalize those
m = name_particle_match(name)
particle = m.group(1) if m else None
# Get the name parts
prefix, first, middle, last, suffix = name_parts(name)
# Capitalize names
first = first.title()
middle = ' '.join([ capfirst(p) for p in middle.split() ])
last = ' '.join([ capfirst(p) for p in last.split() ])
if len(last) == 1:
last = (last+last).capitalize()
# Restore the particle, if any
if particle and last.startswith(capfirst(particle)+' '):
last = ' '.join([ particle, last[len(particle)+1:] ])
# Recombine the parts
parts = prefix, first, middle, last, suffix
name = ' '.join([ p for p in parts if p and p.strip() != '' ])
name = re.sub(' +', ' ', name)
return name
| 5,336,686
|
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
| 5,336,687
|
def xception_block(inputs, depth_list, prefix, skip_connect_type, stride, rate=1,
depth_activation=False, return_skip=False):
"""用于构建xception,同样用到了残差结构,但是将卷积换成了 深度可分离卷积(depthwise + pointwise + conv 1x1)"""
residual = inputs
for i in range(3):
# depthwise + pointwise + conv2d
residual = sep_layer(residual, depth_list[i], prefix + '_separable_conv{}'.format(i + 1),
stride=stride if stride == 2 else 1, rate=rate, depth_activation=depth_activation)
if i == 1:
skip = residual # 两次: depth_wise + conv2d
if skip_connect_type == 'conv':
# 采用跳跃连接: 输入经过侧边conv后与主路输出相加
shortcut = conv_same_layer(inputs, depth_list[-1], prefix + '_shortcut', k_size=1, stride=stride)
shortcut = layers.BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
output = layers.Add()([residual, shortcut])
elif skip_connect_type == 'sum':
# 采用跳跃连接直接与输入相加
output = layers.Add()([residual, shortcut])
elif skip_connect_type == 'none':
# 不采用跳跃连接
output = residual
if return_skip:
# output是整个block的输出,skip只是主路的经过两次sep_conv的输出
return output, skip
else:
return output
| 5,336,688
|
def auth_test():
"""
Test's the endpoint authenticiation works.
:return:
"""
return "hello"
| 5,336,689
|
def cnn_2x_lstm_siamese(voc_size, max_len, dropout=0.5):
"""Two siamese branches, each embedding a statement.
Binary classifier on top.
Args:
voc_size: size of the vocabulary for the input statements.
max_len: maximum length for the input statements.
dropout: Fraction of units to drop.
Returns:
A Keras model instance.
"""
pivot_input = layers.Input(shape=(max_len,), dtype='int32')
statement_input = layers.Input(shape=(max_len,), dtype='int32')
x = layers.Embedding(
output_dim=256,
input_dim=voc_size,
input_length=max_len)(pivot_input)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(3)(x)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
embedded_pivot = layers.LSTM(256)(x)
encoder_model = Model(pivot_input, embedded_pivot)
embedded_statement = encoder_model(statement_input)
concat = layers.merge([embedded_pivot, embedded_statement], mode='concat')
x = layers.Dense(256, activation='relu')(concat)
x = layers.Dropout(dropout)(x)
prediction = layers.Dense(1, activation='sigmoid')(x)
model = Model([pivot_input, statement_input], prediction)
return model
| 5,336,690
|
def play_throne_room(game):
"""
You may play an Action card from your hand twice.
"""
ps = game.player_state
if 'Action' not in set(c.Type for c in ps.hand):
print('ThroneRoom: No actions cards in hand')
return
card_name = game._respond(game.player, 'ThroneRoom')
card_class = get_card_class(card_name)
if card_class.Type != 'Action':
print(f'ThroneRoom: {card_class.Name()} is not an action card')
return
# Play the requested action action card for the first time
ps.actions += 1
result = game.play_action_card(card_name)
if result:
# Return the action card from the play area to the hand and play it again
card = ps.play_area.pop()
ps.actions += 1
if card.Name() != card_name:
raise RuntimeError('Something went wrong during throne room!')
ps.hand.append(card)
game.play_action_card(card_name)
| 5,336,691
|
def SerialWrite(serialPort, Message):
""" Write message on serial port."""
serialPort.flushInput()
serialPort.write(Message)
time.sleep(3)
| 5,336,692
|
def flatten_images_data(images, layer_path_segments=0, _test=False):
"""
Yield mapping for each layer of each image of an `images` list of Image.
This is a flat data structure for CSV and tabular output.
Keep only ``layer_path_segments`` trailing layer location segments (or keep
the locations unmodified if ``layer_path_segments`` is 0)
"""
for img in images:
img_extracted_location = img.extracted_location
base_data = dict(
image_extracted_location='' if _test else img_extracted_location,
image_archive_location='' if _test else img.archive_location,
image_id=img.image_id,
image_tags=','.join(img.tags),
)
for layer in img.layers:
layer_data = dict(base_data)
layer_data['is_empty_layer'] = layer.is_empty_layer
layer_data['layer_id'] = layer.layer_id
layer_data['layer_sha256'] = layer.sha256
layer_data['author'] = layer.author
layer_data['created_by'] = layer.created_by
layer_data['created'] = layer.created
layer_data['comment'] = layer.comment
lay_extracted_location = layer.extracted_location
lay_archive_location = layer.archive_location
if layer_path_segments:
lay_extracted_location = get_trimmed_path(
location=lay_extracted_location,
num_segments=layer_path_segments,
)
lay_archive_location = get_trimmed_path(
location=lay_archive_location,
num_segments=layer_path_segments,
)
layer_data['layer_archive_location'] = lay_archive_location
layer_data['layer_extracted_location'] = lay_extracted_location
yield layer_data
| 5,336,693
|
def roll_function(positions, I, angular_velocity):
"""
Due to how the simulations are generated where the first point of the simulation
is at the smallest x value and the subsequent positions are in a clockwise
(counterclockwise) direction when the vorticity is positive (negative), the first
point of the simulated intensity might lie in the middle of an intensity trace.
This needs to be compensated for by rolling array elements. Simulations come onto
the screen from one of 4 sides. Which side the sim comes onto the screen and
which side the sim leaves the screen defines how to roll the intensity as a function
of time such that the first returned position is at the entrance and the final returned
position is at the exit.
Args:
positions (array): position of Particle
I (array): intensities calculated as a function of position
angular velocity (float): Particle angular velocity
Returns:
p (array): position of Particle, adjusted to preserve order of peaks
I (array): intensities calculated as a function of p, adjusted to preserve order of peaks
"""
p = positions.T
x_0 = p[0][0]
y_0 = p[1][0]
clockwise = True
if angular_velocity < 0:
clockwise = False
roll = 0
if clockwise:
if (x_0>0) and (y_0>0) and (y_0<616):
# need to roll
if 616/2 > y_0: # orbit starts in upper half of screen
try:
rollval = -np.argwhere(p[1][:(len(p[1])//4+1)]==0)[0]
except IndexError: #if none of the points is actually equal to 0
rollval = -np.abs(p[1][:(len(p[1])//4+1)]).argmin()
p = np.roll(p,rollval,axis=1)
I = np.roll(I,rollval)
else: #orbit starts in middle or lower half of screen
try:
rollval = np.argwhere(p[1]==616)[0]+len(p[1])//2
except IndexError: #if none of the points is actually equal to 0
rollval = np.abs(p[1][3*(len(p[1])//4):]).argmin()
p = np.roll(p,rollval,axis=1)
I = np.roll(I,rollval)
else:
print('need to implement this still... rolling for counterclockwise vorticity.')
raise ValueError
return p.T, I
| 5,336,694
|
def run_exkeys(hosts, capture=False):
"""
Runs gpssh-exkeys for the given list of hosts. If capture is True, the
(returncode, stdout, stderr) from the gpssh-exkeys run is returned;
otherwise an exception is thrown on failure and all stdout/err is untouched.
"""
host_opts = []
for host in hosts:
host_opts.extend(['-h', host])
args = [ 'gpssh-exkeys', '-v' ] + host_opts
if not capture:
subprocess.check_call(args)
return
# Capture stdout/err for later use, while routing it through tee(1) so that
# developers can still see the live stream output.
#
# XXX This is a very heavy-weight solution, using pipes.Template() for the
# creation of shell pipeline processes. It's also platform-specific as it
# relies on the functionality of /dev/stdout and /dev/stderr.
#
# The overview: we open up two shell processes running tee(1), using
# pipes.Template(), and connect their standard output to the stdout/err of
# the current Python process using Template.open(). We then connect the
# stdout/stderr streams of subprocess.call() to the stdin of those tee
# pipelines. tee(1) will duplicate all output to temporary files, which we
# read after the subprocess call completes. NamedTemporaryFile() then cleans
# up those files when we return.
with tempfile.NamedTemporaryFile() as temp_out, tempfile.NamedTemporaryFile() as temp_err:
pipe_out = pipes.Template()
pipe_out.append('tee %s' % pipes.quote(temp_out.name), '--')
pipe_err = pipes.Template()
pipe_err.append('tee %s' % pipes.quote(temp_err.name), '--')
with pipe_out.open('/dev/stdout', 'w') as out, pipe_err.open('/dev/stderr', 'w') as err:
ret = subprocess.call(args, stdout=out, stderr=err)
stored_out = temp_out.read()
stored_err = temp_err.read()
return ret, stored_out, stored_err
| 5,336,695
|
def ip_address(value):
"""
Validate whether or not the given string is a valid IP address.
Args:
value (str): the value to validate.
Raises:
`~serde.exceptions.ValidationError`: when the value is not a valid IP
address.
"""
if not validators.ipv4(value) and not validators.ipv6(value):
raise ValidationError('{!r} is not a valid IP address'.format(value))
| 5,336,696
|
def top10():
"""Renders the top 10 page."""
top10_urls = ShortURL.query.order_by(ShortURL.hits.desc()).limit(10)
return render_template("top10.html", urls=top10_urls)
| 5,336,697
|
def rename_group(str_group2=None):
"""
Rename OFF food group (pnns_group_2) to a standard name
Args:
str_group2 (str): OFF food group name
Returns:
conv_group (str): standard food group name
"""
#convert_group1 = {'Beverage':['Beverages'],
# 'Cereals':['Cereals and potatoes'],
# 'Meal':['Composite foods'],
# 'Fat':['Fat and sauces'],
# 'Meat':['Fish Meat Eggs'],
# 'Fruits and vegetables':['Fruits and vegetables','fruits-and-vegetables'],
# 'Dairy':['Milk and dairy products'],
# 'Snack':['Salty snacks','Sugary snacks','sugary-snacks'],
# None:[None,'unknown','']}
convert_group2 = {'Beverage':['Alcoholic beverages','Artificially sweetened beverages',
'Fruit juices','Fruit nectars','Non-sugared beverages',
'Sweetened beverages'],
'Cereals':['Bread','Breakfast cereals','Cereals','Legumes','Patatoes'],
'Meal':['One-dish meals','Pizza pies and quiche','Sandwich'],
'Fat':['Dressings and sauces','Fats'],
'Meat':['Tripe dishes','Eggs','Fish and seafood','Meat','Processed meat','Nuts'],
'Fruit':['Fruits','fruits','Dried fruits'],
'Vegetable':['Soups','Vegetables','vegetables'],
'Dairy':['Cheese','Dairy desserts','Ice cream','Milk and yogurt'],
'Snack':['Appetizers','Salty and fatty products','Biscuits and cakes',
'Chocolate products','Sweets','pastries'],
None:[None,'unknown','']}
conv_group = [key for (key, value) in convert_group2.items() if (str_group2 in value)]
conv_group = [None] if not conv_group else conv_group
return conv_group[0]
| 5,336,698
|
def save_png(fig: Figure, path: Union[None, str, pathlib.Path],
width: Union[int, float] = None, height: Union[int, float] = None, unit: str = 'px',
print_info: bool = False) -> Union[str, io.BytesIO]:
"""
Save PNG image of the figure.
:param fig: Figure to save.
:param path: Full path of the image to save. If directory (string ending in slash - '/' or '\\') then
the figure window title is used as a file name. If `None`, in-memory :class:`io.BytesIO`
file will be generated and returned.
:param width: Image width in `unit`. If not provided it will be left as it is.
:param height: Image height in `unit`. If not provided it will be left as it is.
:param unit: Unit of the image width and height, one of: 'px' (pixels), 'cm' (centimeters), 'in' (inch).
:param print_info: Whether to print information about saved file.
:return: Full path of the generated image if `path` was provided or in-memory :class:`io.BytesIO` file.
"""
if path:
directory, file_name = os.path.split(path)
# Create the directory if not existent
os.makedirs(directory, exist_ok=True)
# If the provided path is only a directory, use window title as filename
if not file_name:
file_name = get_window_title(fig)
# Image path must have .png extension!
if os.path.splitext(file_name)[1] != ".png":
file_name += ".png"
path = os.path.join(directory, file_name)
dpi = fig.get_dpi()
if width or height:
size = fig.get_size_inches()
if unit == 'px':
fig.set_size_inches((width or size[0]) / dpi, (height or size[1]) / dpi)
elif unit in ('mm', 'cm', 'in', 'inch'):
if unit == 'mm':
width /= 25.4
height /= 25.4
elif unit == 'cm':
width /= 2.54
height /= 2.54
# Unit is inches.
fig.set_size_inches(width or size[0], height or size[1])
else:
raise ValueError(f"Unsupported size unit '{unit}'")
width = fig.get_figwidth()
height = fig.get_figheight()
width_px = int(round(width * dpi))
height_px = int(round(height * dpi))
width_mm = width * 25.4
height_mm = height * 25.4
if path:
fig.savefig(path, dpi=dpi)
ret = path
if print_info:
print(f"Saved plot ({width_px}x{height_px} px = {width_mm:.1f}x{height_mm:.1f} mm @ {dpi} dpi)"
f" to '{os.path.normpath(path)}'")
else:
file = io.BytesIO()
fig.savefig(file, dpi=dpi)
file.seek(0)
ret = file
return ret
| 5,336,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.