content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import jinja2
def render_template(path, ctx):
"""Render a Jinja2 template"""
with path.open() as f:
content = f.read()
tmpl = jinja2.Template(content)
return html_minify(tmpl.render(**ctx))
|
0eb4b2a73a645283998260cdadbab37da32d6784
| 3,644,200
|
import os
import subprocess
def linux_compute_tile_singlecore(optimsoc_buildroot):
"""
Module-scoped fixture: build a Linux image for a single-core compute tile
"""
# Get the buildroot base directory from the optimsoc_buildroot() fixture.
# Note that this directory is cached between pytest runs. Make sure the
# commands executed as part of this test can deal with that and rebuild
# artifacts as needed.
src_optimsoc_buildroot = optimsoc_buildroot.join('optimsoc-buildroot')
src_buildroot = optimsoc_buildroot.join('buildroot')
config = 'optimsoc_computetile_singlecore_defconfig'
# buildroot doesn't like our OpTiMSoC compiler being in the path. Error is:
# ---
# You seem to have the current working directory in your
# LD_LIBRARY_PATH environment variable. This doesn't work.
# support/dependencies/dependencies.mk:21: recipe for target 'dependencies' failed
# ---
env = dict(os.environ, LD_LIBRARY_PATH='', PATH='/bin:/usr/bin:/usr/local/bin')
cmd = ['make',
'-C', str(src_buildroot),
'BR2_EXTERNAL='+str(src_optimsoc_buildroot),
config]
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,
universal_newlines=True)
cmd = ['make',
'-C', str(src_buildroot)]
env = dict(os.environ, LD_LIBRARY_PATH='')
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,
universal_newlines=True)
linux_img = src_buildroot.join('output/images/vmlinux')
return linux_img
|
9eec134fb48c678eb18a25290d82427648c8ea31
| 3,644,201
|
def reverse( sequence ):
"""Return the reverse of any sequence
"""
return sequence[::-1]
|
f08ae428844347e52d8dbf1cd8ad07cfbf4ef597
| 3,644,202
|
def createOutputBuffer(file, encoding):
"""Create a libxml2 output buffer from a Python file """
ret = libxml2mod.xmlCreateOutputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateOutputBuffer() failed')
return outputBuffer(_obj=ret)
|
28ece9b710362d710ff6df25f426d91a0b318ebf
| 3,644,203
|
import os
def get_table_name(yaml_path):
"""gives how the yaml file name should be in the sql query"""
table_name = os.path.basename(yaml_path)
table_name = os.path.splitext(table_name)[0]
return table_name
|
5181e1e68a844bc529573da02a78f034092def46
| 3,644,204
|
def wait_for_proof(node, proofid_hex, timeout=60, expect_orphan=None):
"""
Wait for the proof to be known by the node. If expect_orphan is set, the
proof should match the orphan state, otherwise it's a don't care parameter.
"""
def proof_found():
try:
wait_for_proof.is_orphan = node.getrawavalancheproof(proofid_hex)[
"orphan"]
return True
except JSONRPCException:
return False
wait_until_helper(proof_found, timeout=timeout)
if expect_orphan is not None:
assert_equal(expect_orphan, wait_for_proof.is_orphan)
|
f8f390424fe084bf8bf62bf1d16ac780d5c5df69
| 3,644,205
|
def check(verbose=1):
"""
Runs a couple of functions to check the module is working.
:param verbose: 0 to hide the standout output
:return: list of dictionaries, result of each test
"""
return []
|
4ecf144fc64a165b5b0f9766b76eb6b703eba130
| 3,644,206
|
def cylinder_sideways():
"""
sideways cylinder for poster
"""
call_separator('cylinder sidweays')
T1 = .1
#gs = gridspec.GridSpec(nrows=2,ncols=3,wspace=-.1,hspace=.5)
fig = plt.figure(figsize=(5,4))
ax11 = fig.add_subplot(111,projection='3d')
#ax12 = fig.add_subplot(gs[0,2])
#ax22 = fig.add_subplot(gs[1,2])
a = lubrication(phi1=.57,Rp=0.96,Rc=1.22,base_radius=1.22,
pi3=1,pi4=4.7,pi5=0.1,pi6=10,
mu=1.2,T=T1,constriction='piecewise',U0=0.2,
dt=0.02,eps=1,
F0=50,method='euler')
a.Z0 = -5/a.Rp
z = np.linspace(-7,7,100) # dimensional
r = a.pi1(z)
th = np.linspace(0,2*np.pi,100)
radius_al = 0.25
# draw arrow going into spine
ar1 = Arrow3D([-5,-1.5],[0,0],[0,0],
mutation_scale=10,
lw=2, arrowstyle="-|>", color="k")
ax11.add_artist(ar1)
# A
# draw spine
Z,TH = np.meshgrid(z,th)
#Z,TH = np.mgrid[-7:7:.1, 0:2*np.pi:.1]
X = np.zeros_like(Z)
Y = np.zeros_like(Z)
#print(np.shape(Z))
for i in range(len(Z[:,0])):
X[i,:] = a.pi1(Z[i,:])*np.cos(TH[i,:])
Y[i,:] = a.pi1(Z[i,:])*np.sin(TH[i,:])
ax11.plot_surface(Z,Y,X,alpha=.25)
shifts = np.array([-6,0,-4])
names = ['z','y','x']
size = 2
for i in range(3):
coords = np.zeros((3,2))
coords[:,0] += shifts
coords[:,1] += shifts
coords[i][1] += size
arx = Arrow3D(*list(coords),
mutation_scale=5,
lw=2, arrowstyle="-|>", color="k")
ax11.text(*list(coords[:,1]),names[i],horizontalalignment='center')
ax11.add_artist(arx)
# draw sphere for cap
b = a.base_radius
r = np.sqrt(b**2+7**2)
th2 = np.linspace(0,np.arctan(b/7),100)
phi = np.linspace(0,2*np.pi,100)
TH2,PHI = np.meshgrid(th2,phi)
X = r*np.sin(TH2)*np.cos(PHI)
Y = r*np.sin(TH2)*np.sin(PHI)
Z = r*np.cos(TH2)
ax11.plot_surface(Z,Y,X,color='tab:blue',alpha=.5)
# draw sphere vesicle
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
X = np.cos(u)*np.sin(v)
Y = np.sin(u)*np.sin(v)
Z = np.cos(v)
ax11.plot_surface(Z,Y,X,color='gray',alpha=.5)
# label spine head and base
ax11.text(7,0,-2,r'\setlength{\parindent}{0pt}Spine Head\\(Closed End)')
ax11.text(-4,0,3,r'\setlength{\parindent}{0pt}Spine Base\\(Open End)')
# set equal aspect ratios
#ax11.set_aspect('auto') # only auto allowed??
ax11.set_box_aspect((np.ptp(X), np.ptp(Y), np.ptp(Z)))
ax11.set_axis_off()
lo = -4.4
hi = 4.4
dx = -.5
ax11.set_xlim(lo-dx,hi+dx)
ax11.set_ylim(lo-dx,hi+dx)
ax11.set_zlim(lo,hi)
ax11.view_init(20,65)
return fig
|
98c0ed70c11ffe619d28623a5c5f4c4e2be40889
| 3,644,207
|
def get_generic_or_msg(intent, result):
""" The master method. This method takes in the
intent and the result dict structure
and calls the proper interface method. """
return Msg_Fn_Dict[intent](result)
|
00853e2e74892a6d01ba1c6986e72f6436c88a92
| 3,644,208
|
def s3_example_tile(gtiff_s3):
"""Example tile for fixture."""
return (5, 15, 32)
|
a4b7e35fc6f7bf51a551ac8cb18003c23ff35a01
| 3,644,209
|
def execute_list_of_commands(command_list):
"""
INPUT:
- ``command_list`` -- a list of strings or pairs
OUTPUT:
For each entry in command_list, we attempt to run the command.
If it is a string, we call ``os.system()``. If it is a pair [f, v],
we call f(v).
If the environment variable :envvar:`SAGE_NUM_THREADS` is set, use
that many threads.
"""
t = time.time()
# Determine the number of threads from the environment variable
# SAGE_NUM_THREADS, which is set automatically by sage-env
try:
nthreads = int(os.environ['SAGE_NUM_THREADS'])
except KeyError:
nthreads = 1
# normalize the command_list to handle strings correctly
command_list = [ [run_command, x] if isinstance(x, str) else x for x in command_list ]
# No need for more threads than there are commands, but at least one
nthreads = min(len(command_list), nthreads)
nthreads = max(1, nthreads)
def plural(n,noun):
if n == 1:
return "1 %s"%noun
return "%i %ss"%(n,noun)
print "Executing %s (using %s)"%(plural(len(command_list),"command"), plural(nthreads,"thread"))
execute_list_of_commands_in_parallel(command_list, nthreads)
print "Time to execute %s: %s seconds"%(plural(len(command_list),"command"), time.time() - t)
|
79247f8dc15cc790b6f1811e3cb79de47c514bc4
| 3,644,210
|
import requests
def get_transceiver_diagnostics(baseurl, cookie_header, transceiver):
"""
Get the diagnostics of a given transceivers in the switch
:param baseurl: imported baseurl variable
:param cookie_header: Parse cookie resulting from successful loginOS.login_os(baseurl)
:param transceiver: data parsed to specify a transceiver in switch
:return return transceiver's diagnostics information in json format
"""
url = baseurl + 'transceivers/' + transceiver + '/diagnostics'
headers = {'cookie': cookie_header}
response = requests.get(url, verify=False, headers=headers)
if response.status_code == 200:
return response.json()
|
c2863b54b03ae3bdcf779fbd18a50e2bcdb2edd7
| 3,644,211
|
import os
def plot_cross_sections(obj, paths, xs_label_size=12, xs_color='black',
map_style='contour', scale='lin', n=101, x_labeling='distance',
show_max=True, fp_max=True, fp_text=True, cmap='viridis_r',
max_fig_width=12, max_fig_height=8, legend_padding=6, **kw):
"""Generate a map style plot (either contour or pcolormesh) with
cross sections labeled on it and generate plots of the fields corresponding
to the cross sections
args:
obj - Results object or Model object, because the grid of fields
in a Results object must be interpolated to compute fields
along each cross section, passing a Model object instead will
yield smoother profiles of the fields along each cross section.
The Model object will, however, be used to compute a grid of
fields for the map style plot (contour or pcolormesh), so
passing a Model object could be slower.
paths - An iterable of iterables of x,y pairs representing paths through
the results domain to plot as cross sections. For example,
([(1,2), (3,5)], [(2,5), (9,3), (4,7)], [(5,3), (9,2)])
optional args:
xs_label_size - int, fontsize of text labels on the map style figure
xs_color - any matplotlib compatible color definition
map_style - str, 'contour' or 'pcolormesh', determines which map style
plot is generated with the cross sections labeled on it,
default is 'contour'
scale - str, can be 'log' or 'lin', only applies if map_style is
'contour' (default is 'lin')
n - integer, number of points sampled along the sections (default 101)
x_labeling - 'distance' or 'location', for x axis ticks on the cross
section plots labeled according to the distance along
the segment or with the (x,y) coordinates of sample
points, default is 'distance'
show_max - bool, toggle labeling of the maximum field location,
default is True
fp_max - bool, toggle whether the maximum fields along footprints
"of concern" are labeled
fp_text - bool, toggle footprint group labeling with text
cmap - str, name of matplotlib colormap, see:
http://matplotlib.org/examples/color/colormaps_reference.html
max_fig_width - float/int, inches, maximum width of figure
max_fig_height - float/int, inches, maximum height of figure
legend_padding - float/int, inches, width left for legend area
kw:
prefix - string prepended to the file names of saved plots
suffix - string appended to the file names of saved plots
and
any keyword arguments that can be passed to plot_contour(),
plot_pcolormesh(), or plot_segment()
note: Only a directory name can be passed to the 'path' keyword.
File names aren't accepted, which prevents saved plots from
overwriting each other. File names are created automatically.
returns:
A tuple of tuples of plotting objects. The first tuple contains the
return arguments of the map plot (contour or pcolormesh) and the
other tuples contain the return arguments of plot_path, for however
many cross sections are created."""
#deal with Model vs Results input
if(type(obj) is subcalc_class.Results):
res = obj
elif(type(obj) is subcalc_class.Model):
res = obj.calculate()
#separate saving kw from others
save_kw = {}
for k in ['save', 'path', 'format', 'prefix', 'suffix']:
if(k in kw):
save_kw[k] = kw[k]
kw.pop(k)
#deal with the saving kw
if('prefix' in save_kw):
save_kw['save'] = True
fn_prefix = save_kw['prefix']
if(fn_prefix[-1] != '-'):
fn_prefix = fn_prefix + '-'
else:
fn_prefix = ''
if('suffix' in save_kw):
save_kw['save'] = True
fn_suffix = save_kw['suffix']
if(fn_suffix[0] != '-'):
fn_suffix = '-' + fn_suffix
else:
fn_suffix = ''
if('save' in save_kw):
save = save_kw['save']
elif('path' in save_kw):
if(not os.path.isdir(save_kw['path'])):
raise(subcalc_class.EMFError('The path keyword argument to plot_cross_sections must be a directory path. Plot names are created automatically, with some control available through the prefix and suffix keyword arguments.'))
save_kw['save'] = True
save = True
else:
save = False
#check inputs
if(len(paths) > 26):
raise(subcalc_class.EMFError('There cannot be more than 26 cross sections on a single figure. Make sure that your input for the "points" argument has the correct number of levels (sublists).'))
#list of return arguments
R = []
#plot the map style figure
if(map_style == 'contour'):
r = plot_contour(res, scale, show_max, fp_max, fp_text, cmap,
max_fig_width, max_fig_height, legend_padding, **kw)
R.append(r)
fig, ax = r[0], r[1]
fn = fn_prefix + 'contour-with-cross-sections' + fn_suffix
else:
r = plot_pcolormesh(res, show_max, fp_max, fp_text, cmap, max_fig_width,
max_fig_height, legend_padding, **kw)
R.append(r)
fig, ax = r[0], r[1]
fn = fn_prefix + 'pcolormesh-with-cross-sections' + fn_suffix
#draw cross section traces on the figure
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i, path in enumerate(paths):
#get x,y
x, y = zip(*path)
xb, xe, yb, ye = x[0], x[-1], y[0], y[-1]
#plot the trace
ax.plot(x, y, color=xs_color)
#label the trace
hab, vab = _get_text_alignment(path[1], path[0])
hae, vae = _get_text_alignment(path[-2], path[-1])
ax.text(xb, yb, alphabet[i], ha=hab, va=vab,
color=xs_color, fontsize=xs_label_size)
ax.text(xe, ye, alphabet[i] + "'", ha=hae, va=vae,
color=xs_color, fontsize=xs_label_size)
#save or don't
if(save):
_save_fig(fn, fig, **save_kw)
#plot the cross sections
for i, path in enumerate(paths):
r = plot_path(obj, path, n, x_labeling, scale, cmap, **kw)
R.append(r)
fig, ax = r
c = alphabet[i]
ax.set_title("Cross Section %s-%s'" % (c, c))
if(save):
fn = '%scross-section%s' % (fn_prefix, c + fn_suffix)
_save_fig(fn, fig, **save_kw)
return(tuple(R))
|
1617695199087a79ed0388f6673204a37f5d0e0e
| 3,644,212
|
def mask_valid_boxes(boxes, return_mask=False):
"""
:param boxes: (cx, cy, w, h,*_)
:return: mask
"""
w = boxes[:,2]
h = boxes[:,3]
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
mask = (w > 2) & (h > 2) & (ar < 30)
if return_mask:
return mask
else:
return boxes[mask]
|
3a3c00f934dabce78ee8a28f0ece2105d79f9f3f
| 3,644,213
|
import tokenize
def import_buffer_to_hst(buf):
"""Import content from buf and return an Hy AST."""
return tokenize(buf + "\n")
|
4571bac8987911bf9b9a277590be6204be6120ab
| 3,644,214
|
import argparse
def parse_args():
""" Parse command line arguments.
"""
parser = argparse.ArgumentParser(description="Deep SORT")
parser.add_argument(
"--sequence_dir", help="Path to MOTChallenge sequence directory",
default=None, required=False)
parser.add_argument(
"--detection_file", help="Path to custom detections.", default=None,
required=False)
parser.add_argument(
"--output_file", help="Path to the tracking output file. This file will"
" contain the tracking results on completion.",
default="/tmp/hypotheses.txt")
parser.add_argument(
"--min_confidence", help="Detection confidence threshold. Disregard "
"all detections that have a confidence lower than this value.",
default=0.8, type=float)
parser.add_argument(
"--min_detection_height", help="Threshold on the detection bounding "
"box height. Detections with height smaller than this value are "
"disregarded", default=0, type=int)
parser.add_argument(
"--nms_max_overlap", help="Non-maxima suppression threshold: Maximum "
"detection overlap.", default=1.0, type=float)
parser.add_argument(
"--max_cosine_distance", help="Gating threshold for cosine distance "
"metric (object appearance).", type=float, default=0.2)
parser.add_argument(
"--max_frame_idx", help="Maximum size of the frame ids.", type=int, default=None)
parser.add_argument(
"--display", help="Show intermediate tracking results",
default=True, type=bool_string)
parser.add_argument('--min-box-area', type=float, default=50, help='filter out tiny boxes')
parser.add_argument('--cfg_file', default='aic_mcmt.yml', help='Config file for mcmt')
parser.add_argument('--seq_name', default='c041', help='Seq name')
return parser.parse_args()
|
318c5dd4d3c62c730f49c4a19fce9d782651499f
| 3,644,215
|
from typing import Mapping
import os
def load_lane_segments_from_xml(map_fpath: _PathLike) -> Mapping[int, LaneSegment]:
"""
Load lane segment object from xml file
Args:
map_fpath: path to xml file
Returns:
lane_objs: List of LaneSegment objects
"""
tree = ET.parse(os.fspath(map_fpath))
root = tree.getroot()
logger.info(f"Loaded root: {root.tag}")
all_graph_nodes = {}
lane_objs = {}
# all children are either Nodes or Ways
for child in root:
if child.tag == "node":
node_obj = extract_node_from_ET_element(child)
all_graph_nodes[node_obj.id] = node_obj
elif child.tag == "way":
lane_obj, lane_id = extract_lane_segment_from_ET_element(child, all_graph_nodes)
lane_objs[lane_id] = lane_obj
else:
logger.error("Unknown XML item encountered.")
raise ValueError("Unknown XML item encountered.")
return lane_objs
|
57819b11e51dace02464e7c3e3436dc82d629564
| 3,644,216
|
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
|
ca81dff57f51184042899849dff6623d32e475c0
| 3,644,217
|
def build_gauss_kernel(sigma_x, sigma_y, angle):
"""
Build the rotated anisotropic gaussian filter kernel
Parameters
----------
sigma_x : numpy.float64
sigma in x-direction
sigma_y: numpy.float64
sigma in y-direction
angle: int
angle in degrees of the needle holder measuered with respect to 'vertical' transducer axis
Returns
-------
kernel: numpy.ndarray
roteted filter kernel
"""
angle = np.pi/2-np.deg2rad(angle)
# Calculate gaussian kernel
kernel = ascon.Gaussian2DKernel(sigma_x, sigma_y, 0)
# Extract size and kernel values
x_size = kernel.shape[0]; y_size = kernel.shape[1]
kernel = kernel.array
# Rotate
kernel = ndimage.rotate(kernel,np.rad2deg(-angle), reshape=False)
# Parameters for cropping
max_in_kernel = np.amax(abs(kernel))
threshold = 0.05*max_in_kernel
# Crop the kernel to reduce its size
x_start = 0;
for i in range(0, x_size, 1):
if abs(max(kernel[i,:])) > threshold:
x_start = i
break
x_end = (x_size-1)-x_start
y_start = 0;
for i in range(0, y_size, 1):
if abs(max(kernel[:,i])) > threshold:
y_start = i
break
y_end = (y_size-1)-y_start
kernel = kernel[x_start:x_end, y_start:y_end]
return kernel
|
14dd4143ad94bcdfa3298b4acf9b2d4c2bd0b7e6
| 3,644,218
|
def kwargs_to_flags(**kwargs):
"""Convert `kwargs` to flags to pass on to CLI."""
flag_strings = []
for (key, val) in kwargs.items():
if isinstance(val, bool):
if val:
flag_strings.append(f"--{key}")
else:
flag_strings.append(f"--{key}={val}")
return " ".join(flag_strings)
|
aa672fe26c81e7aaf8a6e7c38354d1649495b8df
| 3,644,219
|
def extractBananas(item):
"""
Parser for 'Bananas'
"""
badwords = [
'iya na kao manga chapters',
]
if any([bad in item['tags'] for bad in badwords]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('isekai joushu chapters', 'Struggling Hard As The Lord Of A Castle In A Different World', 'translated'),
('dungeon harem wn chapters', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'),
('erufu seidorei wn', 'The Dungeon Harem I Built With My Elf Sex Slave', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
chp_prefixes = [
('AARASL', 'An A-ranked Adventurer’s “Slow-living”', 'translated'),
('Isekai Taneuma', 'Isekai Taneuma', 'translated'),
('Gang of Yuusha', 'Gang of Yusha', 'translated'),
('Gang of Yusha', 'Gang of Yusha', 'translated'),
('The Revenge of the Soul Eater', 'Soul Eater of the Rebellion', 'translated'),
('Soul Eater of the Rebellion', 'Soul Eater of the Rebellion', 'translated'),
('Sparta Teikoku ', 'Sparta Teikoku Kenkoku Senki ', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
f06167a0d379ec3b1921bb7ad8146b0bca9fd8aa
| 3,644,220
|
import os
import fnmatch
def BuildSymbolToFileAddressMapping():
"""
Constructs a map of symbol-string -> [ (file_id, address), ... ] so that each
symbol is associated with all the files and addresses where it occurs.
"""
result = defaultdict(list)
# Iterate over all the extracted_symbols_*.txt files.
for filename in os.listdir(FLAGS.work_directory):
if fnmatch.fnmatch(filename, "extracted_symbols_*.txt"):
contents = open( FLAGS.work_directory + "/" + filename, "rt" ).readlines()
for line in contents:
file_id, filename, address, symbol, vuln = line.split()
result[symbol].append((file_id, address))
return result
|
386ce1c15af09295fb098286f87ceb020bbb3b3d
| 3,644,221
|
def get_template_parameters_s3(template_key, s3_resource):
"""
Checks for existance of parameters object in S3 against supported suffixes and returns parameters file key if found
Args:
template_key: S3 key for template file. omit bucket.
s3_resource: a boto3 s3 resource
Returns:
filename of parameters file if it exists
"""
for suffix in EFConfig.PARAMETER_FILE_SUFFIXES:
parameters_key = template_key.replace("/templates", "/parameters") + suffix
try:
obj = s3_resource.Object(EFConfig.S3_CONFIG_BUCKET, parameters_key)
obj.get()
return parameters_key
except ClientError:
continue
return None
|
3b68dc9c1fa8636bd0d066780aab43a6e55ecf2f
| 3,644,222
|
def cell_from_system(sdict):
"""
Function to obtain cell from namelist SYSTEM read from PW input.
Args:
sdict (dict): Dictinary generated from namelist SYSTEM of PW input.
Returns:
ndarray with shape (3,3):
Cell is 3x3 matrix with entries::
[[a_x b_x c_x]
[a_y b_y c_y]
[a_z b_z c_z]],
where a, b, c are crystallographic vectors,
and x, y, z are their coordinates in the cartesian reference frame.
"""
ibrav = sdict.get('ibrav', None)
if ibrav == 0:
return None
params = ['a', 'b', 'c', 'cosab', 'cosac', 'cosbc']
celldm = [sdict.get(f'celldm({i + 1})', 0) for i in range(6)]
if not any(celldm):
abc = [sdict.get(a, 0) for a in params]
celldm = celldms_from_abc(ibrav, abc)
if not any(celldm):
return None
if ibrav == 1:
cell = np.eye(3) * celldm[0]
return cell
elif ibrav == 2:
v1 = celldm[0] / 2 * np.array([-1, 0, 1])
v2 = celldm[0] / 2 * np.array([0, 1, 1])
v3 = celldm[0] / 2 * np.array([-1, 1, 0])
elif ibrav == 3:
v1 = celldm[0] / 2 * np.array([1, 1, 1])
v2 = celldm[0] / 2 * np.array([-1, 1, 1])
v3 = celldm[0] / 2 * np.array([-1, -1, 1])
elif ibrav == -3:
v1 = celldm[0] / 2 * np.array([-1, 1, 1])
v2 = celldm[0] / 2 * np.array([1, -1, 1])
v3 = celldm[0] / 2 * np.array([1, 1, -1])
elif ibrav == 4:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([-1 / 2, np.sqrt(3) / 2, 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 5:
term_1 = np.sqrt(1 + 2 * celldm[3])
term_2 = np.sqrt(1 - celldm[3])
v1 = celldm[0] * np.array([term_2 / np.sqrt(2), -term_2 / np.sqrt(6), term_1 / np.sqrt(3)])
v2 = celldm[0] * np.array([0, term_2 * np.sqrt(2 / 3), term_1 / np.sqrt(3)])
v3 = celldm[0] * np.array([-term_2 / np.sqrt(2), -term_2 / np.sqrt(6), term_1 / np.sqrt(3)])
elif ibrav == -5:
term_1 = np.sqrt(1 + 2 * celldm[3])
term_2 = np.sqrt(1 - celldm[3])
v1 = celldm[0] * np.array([(term_1 - 2 * term_2) / 3, (term_1 + term_2) / 3, (term_1 + term_2) / 3])
v2 = celldm[0] * np.array([(term_1 + term_2) / 3, (term_1 - 2 * term_2) / 3, (term_1 + term_2) / 3])
v3 = celldm[0] * np.array([(term_1 + term_2) / 3, (term_1 + term_2) / 3, (term_1 - 2 * term_2) / 3])
elif ibrav == 6:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([0, 1, 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 7:
v1 = celldm[0] / 2 * np.array([1, -1, celldm[2]])
v2 = celldm[0] / 2 * np.array([1, 1, celldm[2]])
v3 = celldm[0] / 2 * np.array([-1, -1, celldm[2]])
elif ibrav == 8:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([0, celldm[1], 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 9:
v1 = celldm[0] / 2 * np.array([1, celldm[1], 0])
v2 = celldm[0] / 2 * np.array([-1, celldm[1], 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == -9:
v1 = celldm[0] / 2 * np.array([1, -celldm[1], 0])
v2 = celldm[0] / 2 * np.array([+1, celldm[1], 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == 91:
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] / 2 * np.array([0, celldm[1], -celldm[2]])
v3 = celldm[0] / 2 * np.array([0, celldm[1], celldm[2]])
elif ibrav == 10:
v1 = celldm[0] / 2 * np.array([1, 0, celldm[2]])
v2 = celldm[0] / 2 * np.array([1, celldm[1], 0])
v3 = celldm[0] / 2 * np.array([0, celldm[1], celldm[2]])
elif ibrav == 11:
v1 = celldm[0] / 2 * np.array([1, celldm[1], celldm[2]])
v2 = celldm[0] / 2 * np.array([-1, celldm[1], celldm[2]])
v3 = celldm[0] / 2 * np.array([-1, -celldm[1], celldm[2]])
elif ibrav == 12:
sen = np.sqrt(1 - celldm[3] ** 2)
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([celldm[1] * celldm[3], celldm[1] * sen, 0])
v3 = celldm[0] * np.array([0, 0, celldm[2]])
elif ibrav == -12:
sen = np.sqrt(1 - celldm[4] ** 2)
v1 = celldm[0] * np.array([1, 0, 0])
v2 = celldm[0] * np.array([0, celldm[1], 0])
v3 = celldm[0] * np.array([celldm[2] * celldm[4], 0, celldm[2] * sen])
elif ibrav == 13:
sen = np.sqrt(1 - celldm[3] ** 2)
v1 = celldm[0] / 2 * np.array([1, 0, -celldm[2]])
v2 = celldm[0] * np.array([celldm[1] * celldm[3], celldm[1] * sen, 0])
v3 = celldm[0] / 2 * np.array([1, 0, celldm[2]])
elif ibrav == -13:
sen = np.sqrt(1 - celldm[4] ** 2)
v1 = celldm[0] / 2 * np.array([1, celldm[1], 0])
v2 = celldm[0] / 2 * np.array([-1, celldm[1], 0])
v3 = celldm[0] * np.array([celldm[2] * celldm[4], 0, celldm[2] * sen])
elif ibrav == 14:
singam = np.sqrt(1 - celldm[5] ** 2)
term = (1 + 2 * celldm[3] * celldm[4] * celldm[5] - celldm[3] ** 2 - celldm[4] ** 2 - celldm[5] ** 2)
term = np.sqrt(term / (1 - celldm[5] ** 2))
v1 = celldm[0] * np.array([1,
0,
0])
v2 = celldm[0] * np.array([celldm[1] * celldm[5],
celldm[1] * singam,
0])
v3 = celldm[0] * np.array([celldm[2] * celldm[4],
celldm[2] * (celldm[3] - celldm[4] * celldm[5]) / singam,
celldm[2] * term])
else:
raise ValueError('Unsupported ibrav')
cell = np.stack([v1, v2, v3], axis=1)
return cell
|
fbd6e034f738f42be45d7e5304892a9e69a8493b
| 3,644,223
|
def A12_6_3_2(FAxial, eta, Pp, Pu, Muey , Muez, Muay, Muaz,
Ppls, Mby, Mbz, GammaRPa, GammaRPb):
"""
A.12.6.3.2 Interaction equation approach
where :
Pu is the applied axial force in a member due to factored actions,
determined in an analysis that includes Pu effects (see A.12.4);
Ppls is the representative local axial strength of a non-circular
prismatic member,
Pp is the representative axial strength of a non-circular prismatic
member,
Muey is the corrected bending moment due to factored actions about
the member y-axis from A.12.4;
Muez is the corrected bending moment due to factored actions about
the member z-axis from A.12.4;
Muay is the amplified bending moment due to factored actions about
the member y-axis from A.12.4;
Muaz is the amplified bending moment due to factored actions about
the member z-axis from A.12.4;
Mby is the representative bending moment strength about the member
y-axis, as defined in A.12.6.2.5 or A.12.6.2.6.
"""
# Each non-circular prismatic structural member should satisfy
# the following conditions in Equations (A.12.6-38] to [A.12.6-40]
# at all cross-sections along its length. When the shear due to
# factored actions is greater than 60 percent of the shear strength,
# the bending moment strength should be reduced parabolically to zero
# when the shear equals the shear strength (Pv in A.12.6.3.4).
#
# Local strength check (for all members):
# (A.12.6-38)
_UR1 = ((GammaRPa * Pu / Ppls)
+ pow((pow((GammaRPb * Muey / Mby),eta)
+ pow((GammaRPb * Muez / Mbz),eta)), 1.0 / eta))
print("")
print("A.12.6.3.2 Interaction equation approach")
print("Uint [Local strength check ] = {: 1.4f}".format(_UR1))
_UR2 = 0
if FAxial == 'compression':
# and beam-column check (for members subject to axial compression):
if GammaRPa * Pu / Pp > 0.20:
# after AISC[A.12.5-1], Equation H1-1a (A.12.6-39)
_UR2 = ((GammaRPa * Pu / Pp)
+ (8.0 / 9.0) * pow((pow((GammaRPb * Muay / Mby),eta)
+ pow((GammaRPb * Muaz / Mbz),eta)), 1.0 / eta))
#
else:
# after AISC[A.12.5-1], Equation H1-1b (A.12.6-40)
_UR2 = ((GammaRPa * Pu / (2.0 * Pp))
+ pow((pow((GammaRPb * Muay / Mby),eta)
+ pow((GammaRPb * Muaz / Mbz),eta)), 1.0/eta))
print("Uint [beam-column check ] = {: 1.4f}".format(_UR2))
print("-----------------")
#
#
#
return _UR1, _UR2
#
|
7a36ec489681100f99563f9c336df1306363851d
| 3,644,224
|
def gain_deploy_data():
"""
@api {get} /v1/deploy/new_data 获取当前deploy_id 的信息
@apiName deployNew_data
@apiGroup Deploy
@apiDescription 获取当前deploy_id 的信息
@apiParam {int} project_id 项目id
@apiParam {int} flow_id 流程id
@apiParam {int} deploy_id 部署id
@apiParamExample {json} Request-Example:
{
"project_id": 45,
"flow_id": 1,
"deploy_id": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"branch": "develop",
"deploy_id": 160,
"flow_id": 232,
"id": 179,
"node_id": 31,
"node_name": "yn-244",
"project_id": 4,
"result": 1,
"server_id": 45,
"server_name": "submarine-test",
"status": 0,
"version": "1.1.75"
}
],
"message": "成功"
}
"""
data = DeployRecordBusiness.query_deploy_id_json()
combine_data = {'is_one_Key': 1, 'data': data}
single_data = DeployRecordBusiness.is_one_key()
if len(single_data) == 0:
combine_data['is_one_Key'] = 0
return json_detail_render(0, combine_data)
|
9dc5e5faa53235ac6c5d8f0d37a2989b15ead477
| 3,644,225
|
def topk_mask(score, k):
"""Efficient implementation of topk_mask for TPUs.
This is a more efficient implementation of the following snippet with support
for higher rank tensors. It has the limitation that it only supports float32
as element type. The mask only contains k elements even if other elements
have the same value as the kth largest.
def topk_mask(score, k):
_, indices = tf.nn.top_k(score, k=k)
return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k),
tf.squeeze(score).shape.as_list())
The implementation binary searches for the kth value along each row of the
input and once the kth value is found it creates the mask via a single select
instruction. This approach is more than 100x faster on TPUs for large inputs
compared with the above snippet.
Args:
score: 1-D or higher Tensor with last dimension at least k.
k: Number of top elements to look for along the last dimension (along each
row for matrices).
"""
last_dim_size = score.get_shape().as_list()[-1]
# Choose top k+epsilon where epsilon is the number of times the k'th largest i
# element is present in the input.
topk_mask_with_duplicate = topk_mask_internal(score, k)
# Calculate the number of redudant duplicate values to discard.
select_num = tf.cast(
tf.reduce_sum(topk_mask_with_duplicate, axis=-1, keepdims=True), tf.int32)
redudant_num = select_num - k
# softmax cross entropy value range [0, 1].
# k's largest value is the smallest value being selected.
k_th_value = tf.reduce_min(
tf.where(
tf.cast(topk_mask_with_duplicate, tf.bool), score,
tf.ones_like(score) * 2.0),
axis=-1,
keepdims=True)
# Mask to indicate if score equals k th largest value.
equal_k_th_value = tf.equal(score, k_th_value)
# Creates a tensor wherer the value is 1 if the value is equal to kth largest
# value, otherwise, 0.
k_th_value = tf.where(equal_k_th_value, tf.ones_like(score, dtype=tf.int32),
tf.zeros_like(score, dtype=tf.int32))
index = tf.range(last_dim_size)
k_th_value_index = tf.multiply(k_th_value, index)
duplicate_mask = topk_mask_internal(
tf.cast(k_th_value_index, tf.float32), redudant_num)
return tf.where(
tf.cast(duplicate_mask, tf.bool), tf.zeros_like(topk_mask_with_duplicate),
topk_mask_with_duplicate)
|
0a33dc6d5b9c621ab3fbd86c54c9ec90ac00f21f
| 3,644,226
|
def _generateGroundTruth(uids, COURSEDESCRIPTIONS):
"""Generate the ground truths from pre-stored bert model results given unique id lists
:param uids: list of unique ids
:type uids: list
:param COURSEDESCRIPTIONS: dictionary of course Descriptions
:type COURSEDESCRIPTIONS: dict
:return: a dictionary with (uid, ground truth similarity) as key-val pair
:rtype: dict
"""
gt = {}
# _pseudoGroundTruth(COURSEDESCRIPTIONS)
bertVecs = np.load("data/bert_vecs.npy")
cidLists = list(COURSEDESCRIPTIONS.keys())
for uid in uids:
twoids = uid.split("-")
id1, id2 = twoids[0], twoids[1]
vec1, vec2 = bertVecs[cidLists.index(
id1)], bertVecs[cidLists.index(id2)]
sim = np.dot(vec1, vec2)/np.linalg.norm(vec1)/np.linalg.norm(vec2)
# augment to [0,1]
gt[uid] = sim
# to ensure the similarities are comparable, 0-center the similarity value
# ? variance TODO
ave = np.mean(list(gt.values()))
for key in gt.keys():
gt[key] -= ave
return gt
|
623803815d3016989d26cf6841750e2cbd55bc83
| 3,644,227
|
import calendar
def valueSearch(stat_type,op,value,**kwargs):
"""Quick function to designate a value, and the days or months where the
attribute of interest exceeded, equalled, or was less than the passed
value
valueSearch("attribute","operator",value,**{sortmonth=False})
* "attribute" must be in ["prcp","snow","snwd","tavg","tmax","tmin"] (other
values are accepted, but these are what are assessed
* "operator" must be in ["<=","<","==","!=",">",">="]
* value must be an integer or a float
OPT **kwarg: sortmonth = True --> If set to true, it will do a value
search based on monthly data instead of
daily (no snwd data is available for
months though)
EXAMPLE: valueSearch("prcp",">=",5) --> returns a list of all days on
record where 5+ inches of rain
fell
"""
#operator=">", year=1984, month=12,season="winter"
# v, args[rain,prcp,snow,temp,avgtemp,tmax,avgtmax,tmin,avgtmin], kwargs[condition,year,metyear,season,month]
valid_yrs = sorted([x for x in clmt.keys() if type(x) == int])
valid_metyrs = sorted([x for x in metclmt.keys() if type(x) == int])
# ERROR HANDLING
if stat_type.lower() not in ["rain","prcp","precip","snow","snwd","temp","temps","temperature","temperatures","avgtemp","tavg","tempavglist","tmax","hi","high","tmin","lo","low"]:
return print("OOPS! {} is not a supported stat category. Try again!".format(stat_type))
if op not in ["<","<=","==",">",">="]: return print("OOPS! '{}' is not a supported operator. Try again!".format(op))
if type(value) not in [int,float]: return print("OOPS! Only integers or floats are supported for value intake")
# Format passed variables
stat_type = stat_type.lower() # Convert to lower-case for homogeniety
if stat_type in ["rain","prcp","precip"]: stat_type = "prcp"
if stat_type in ["snow"]: stat_type = "snow"
if stat_type in ["snwd"]: stat_type = "snwd"
if stat_type in ["avgtemp","tavg","tempavglist","temp","temps","temperature","temperatures"]: stat_type = "tavg"
if stat_type in ["tmax","hi","high"]: stat_type = "tmax"
if stat_type in ["tmin","lo","low"]: stat_type = "tmin"
if "sortmonth" in kwargs and kwargs["sortmonth"] == True:
CLMTDICT = clmt_vars_months
stype = "month"
else: # Just sorting indv days
CLMTDICT = clmt_vars_days
stype = "day"
results = []
for VAR in CLMTDICT[stat_type]:
for DAY in CLMTDICT[stat_type][VAR]:
if op == "<":
if stype == "month":
if VAR < value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY)
else:
if VAR < value: results.append(DAY)
elif op == "<=":
if stype == "month":
if VAR <= value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY)
else:
if VAR <= value: results.append(DAY)
elif op == "!=":
if VAR != value: results.append(DAY)
elif op == "==":
if VAR == value: results.append(DAY)
elif op == ">=":
if VAR >= value: results.append(DAY)
elif op == ">":
if VAR > value: results.append(DAY)
results.sort()
if "sortmonth" in kwargs and kwargs["sortmonth"] == True:
if stat_type == "prcp": print("Total months where the Total Rainfall {} {}: {}".format(op,value,len(results)))
elif stat_type == "snow": print("Total months where the Total Snowfall {} {}: {}".format(op,value,len(results)))
elif stat_type in ["tmax","tmin"]:
print("Total months where the Average {} {} {}: {}".format(stat_type.upper(),op,value,len(results)))
elif stat_type == "tavg":
print("Total months where the Average Temperature {} {}: {}".format(op,value,len(results)))
else:
return print("*** valueSearch does not report on monthly variations of {} ***".format(stat_type))
if len(results) <= 50: stillprint = True
else:
stillpr = input("print results? ('y'/'n'): ")
if stillpr == "y": stillprint = True
else: stillprint = False
if stillprint == True:
if stat_type == "prcp":
for x in results: print("{:6.2f}: {} {}".format(round(sum(clmt[x.year][x.month]["prcp"]),2),calendar.month_abbr[x.month],x.year))
if stat_type == "snow":
for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snow"]),1),calendar.month_abbr[x.month],x.year))
#if stat_type == "snwd":
#for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snwd"]),1),calendar.month_abbr[x.month],x.year))
if stat_type == "tavg":
for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tempAVGlist"]),1),calendar.month_abbr[x.month],x.year))
if stat_type == "tmax":
for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmax"]),1),calendar.month_abbr[x.month],x.year))
if stat_type == "tmin":
for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmin"]),1),calendar.month_abbr[x.month],x.year))
else: # Just assessing individual days
print("Total days where '{}' {} {}: {}".format(stat_type,op,value,len(results)))
if len(results) <= 50: stillprint = True
else:
stillpr = input("print results? ('y'/'n'): ")
if stillpr == "y": stillprint = True
else: stillprint = False
if stillprint == True:
if stat_type == "prcp":
for x in results: print("{:>5.2f}: {}".format(float(clmt[x.year][x.month][x.day].prcp),x))
if stat_type == "snow":
for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snow),x))
if stat_type == "snwd":
for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snwd),x))
if stat_type == "tmax":
for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmax,x))
if stat_type == "tmin":
for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmin,x))
print("")
|
94b55a362d179f6acce705b002eb99f330a5427b
| 3,644,228
|
import requests
def get_gnid(rec):
"""
Use geonames API (slow and quota limit for free accounts)
"""
if not any("http://www.geonames.org" in s for s in rec.get("sameAs")) and rec["geo"].get("latitude") and rec["geo"].get("longitude"):
changed = False
r = requests.get("http://api.geonames.org/findNearbyJSON?lat="+rec["geo"].get(
"latitude")+"&lng="+rec["geo"].get("longitude")+"&username=slublod")
if r.ok and isiter(r.json().get("geonames")):
for geoNameRecord in r.json().get("geonames"):
if rec.get("name") in geoNameRecord.get("name") or geoNameRecord.get("name") in rec.get("name"): # match!
newSameAs = {'@id': "https://sws.geonames.org/"+str(geoNameRecord.get("geonameId"))+"/",
'publisher': {'abbr': "geonames",
'preferredName': "GeoNames",
"isBasedOn": {"@type": "Dataset",
"@id": "https://sws.geonames.org/"+str(record.get("id"))+"/"
}
}
}
rec["sameAs"] = litter(rec.get("sameAs"), newSameAs)
changed = True
else:
if r.json().get("status").get("message").startswith("the hourly limit") or r.json().get("status").get("message").startswith("the daily limit"):
eprint("Limit exceeded!\n")
exit(0)
if changed:
return rec
|
ab9d5e50e45217e3742f1d1ca7f58326ed3bf6f6
| 3,644,229
|
def handle_message(message):
"""Handles every message and creates the reply"""
if re_vpncheck_short.search(message.body) or re_vpncheck_long.search(message.body):
"""Checks for VPN Connectivity"""
servername = None
protocol = None
if re_vpncheck_short.search(message.body):
servername = (re_vpncheck_short.search(message.body).group(2) + "#" + re_vpncheck_short.search(
message.body).group(4).lstrip("0")).upper()
if re_vpncheck_short.search(message.body).group(5) != None:
protocol = re_vpncheck_short.search(message.body).group(5).strip().lower()
else:
protocol = "udp"
elif re_vpncheck_long.search(message.body):
servername = (re_vpncheck_long.search(message.body).group(3) + "-" + re_vpncheck_long.search(
message.body).group(5) + "#" + re_vpncheck_long.search(message.body).group(7).lstrip("0")).upper()
if re_vpncheck_long.search(message.body).group(8) != None:
protocol = re_vpncheck_long.search(message.body).group(8).strip().lower()
else:
protocol = "udp"
ServerID = get_vpnserver_id(servername)
if ServerID != None:
res = test_vpn(servername, ServerID, protocol)
return res
else:
if servername != None:
logger.debug("Server {} not found".format(servername))
return "Server {} not found".format(servername)
else:
return
if re_vpncheck_random.search(message.body):
return test_vpn("FillerServername", "FillerServerID", rand=True)
if re_mailcheck_login.search(message.body):
return test_pm_login()
|
cc95a1f088edd8e815c533e43ad56205bf4747d6
| 3,644,230
|
def allowed_file(filename):
""" Is file extension allowed for upload"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
3d0a3a15eecf8f6b0d76b52935a14628f1655328
| 3,644,231
|
import re
def parse_tsv(filename, name_dict):
"""
"""
output_matrix = []
with open(filename, 'rU') as handle:
curr_protein = []
for line in handle:
if line[0] == "#" or line[0] == "-" or len(line.strip('\n')) < 1:
continue
if re.match("Protein", line):
continue
arow = line.strip('\n').split()
if arow[0] == "pos":
continue
arow[12] = float(arow[12])
if len(arow[10].split('-')) == 3:
#arow = arow[:10] + arow[10].split('_') + arow[11:]
arow = arow[:10] + name_dict[arow[10]].split('-') + arow[11:]
#print arow
output_matrix.append(arow)
return output_matrix
|
12aa31ab3ff033ecc514518700c22ea467f01ef6
| 3,644,232
|
def get_orlist(site=DEFAULT_SITE, namespace="0|6|10|14|100|828", redirects="nonredirects"):
"""Get list of oldreviewed pages."""
request = Request(site=site,
action="query",
list="oldreviewedpages",
ornamespace=namespace,
orfilterredir=redirects,
orlimit="5000")
result = []
while True:
answer = request.submit()
result += [page["title"] for page in answer["query"]["oldreviewedpages"]]
if "query-continue" in answer:
request["orstart"] = answer["query-continue"]["oldreviewedpages"]["orstart"]
else:
break
return result
|
8253b2ac8ea72690086fa7864e5ca4ffcc33de50
| 3,644,233
|
def meshVolume(verts, norm, tri):
"""Compute the Volume of a mesh specified by vertices, their normals, and
indices of triangular faces
"""
# TEST
zeronorms = []
for i, n in enumerate(norm):
#if n == [0., 0., 0.] or n == (0., 0., 0.):
if n[0] == 0 and n[1] == 0 and n[2] == 0:
#print "normal %d is zero!" % i, n
zeronorms.append(i)
#print "in meshVolume, zeronorms length: ", len(zeronorms), "normals length:", len(norm)
# Initialize
volSum = 0.0
oneThird = 1./3.
# Compute face normals
trinorm = []
for t in tri:
n1 = norm[t[0]]
n2 = norm[t[1]]
n3 = norm[t[2]]
tn = [ (n1[0]+n2[0]+n3[0])*oneThird,
(n1[1]+n2[1]+n3[1])*oneThird,
(n1[2]+n2[2]+n3[2])*oneThird ]
trinorm.append(tn)
# print trinorm # TEST
# Compute volume
for t,tn in zip(tri, trinorm):
s1 = verts[t[0]]
s2 = verts[t[1]]
s3 = verts[t[2]]
area = triangleArea(s1,s2,s3)
g = [ (s1[0]+s2[0]+s3[0])*oneThird,
(s1[1]+s2[1]+s3[1])*oneThird,
(s1[2]+s2[2]+s3[2])*oneThird ]
volSum += (g[0]*tn[0] + g[1]*tn[1] + g[2]*tn[2])*area
return volSum*oneThird
|
018818ab558b64b9699250bf6f45f0a1c47f92c8
| 3,644,234
|
def _groupby_clause(uuid=None, owner=None, human_name=None, processing_name=None):
"""
Build the groupby clause. Simply detect which fields are set, and group by those.
Args:
uuid:
owner:
human_name:
processing_name:
Returns:
(str): "field, ..., field"
"""
gbc = ''
clauses = []
if uuid is not None:
clauses.append('uuid')
if owner is not None:
clauses.append('owner')
if human_name is not None:
clauses.append('human_name')
if processing_name is not None:
clauses.append('processing_name')
if len(clauses) > 0:
gbc = ','.join(clauses)
return gbc
|
21546efa19e841661ed3a7ad8a84cf9a9a76d416
| 3,644,235
|
def _coeff_mod_wfe_drift(self, wfe_drift, key='wfe_drift'):
"""
Modify PSF polynomial coefficients as a function of WFE drift.
"""
# Modify PSF coefficients based on WFE drift
if wfe_drift==0:
cf_mod = 0 # Don't modify coefficients
elif (self._psf_coeff_mod[key] is None):
_log.warning("You must run `gen_wfedrift_coeff` first before setting the wfe_drift parameter.")
_log.warning("Will continue assuming `wfe_drift=0`.")
cf_mod = 0
else:
_log.info("Generating WFE drift modifications...")
psf_coeff = self.psf_coeff
cf_fit = self._psf_coeff_mod[key]
lxmap = self._psf_coeff_mod['wfe_drift_lxmap']
# Fit function
cf_fit_shape = cf_fit.shape
cf_fit = cf_fit.reshape([cf_fit.shape[0], -1])
cf_mod = jl_poly(np.array([wfe_drift]), cf_fit, use_legendre=True, lxmap=lxmap)
cf_mod = cf_mod.reshape(cf_fit_shape[1:])
# Pad cf_mod array with 0s if undersized
if not np.allclose(psf_coeff.shape, cf_mod.shape):
new_shape = psf_coeff.shape[1:]
cf_mod_resize = np.asarray([pad_or_cut_to_size(im, new_shape) for im in cf_mod])
cf_mod = cf_mod_resize
return cf_mod
|
345d07a8850ec702d42f5c527fae0311f50a69b1
| 3,644,236
|
def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user=None):
""" Returns the GitHub webhook JSON payload transformed into our own payload
format. If the gh_payload is not valid, returns None.
"""
try:
validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA)
except Exception as exc:
raise InvalidPayloadException(exc.message)
payload = JSONPathDict(gh_payload)
if payload['head_commit'] is None:
raise SkipRequestException
config = SafeDictSetter()
config['commit'] = payload['head_commit.id']
config['ref'] = payload['ref']
config['default_branch'] = payload['repository.default_branch'] or default_branch
config['git_url'] = payload['repository.ssh_url']
config['commit_info.url'] = payload['head_commit.url']
config['commit_info.message'] = payload['head_commit.message']
config['commit_info.date'] = payload['head_commit.timestamp']
config['commit_info.author.username'] = payload['head_commit.author.username']
config['commit_info.author.url'] = payload.get('head_commit.author.html_url')
config['commit_info.author.avatar_url'] = payload.get('head_commit.author.avatar_url')
config['commit_info.committer.username'] = payload.get('head_commit.committer.username')
config['commit_info.committer.url'] = payload.get('head_commit.committer.html_url')
config['commit_info.committer.avatar_url'] = payload.get('head_commit.committer.avatar_url')
# Note: GitHub doesn't always return the extra information for users, so we do the lookup
# manually if possible.
if (lookup_user and not payload.get('head_commit.author.html_url') and
payload.get('head_commit.author.username')):
author_info = lookup_user(payload['head_commit.author.username'])
if author_info:
config['commit_info.author.url'] = author_info['html_url']
config['commit_info.author.avatar_url'] = author_info['avatar_url']
if (lookup_user and
payload.get('head_commit.committer.username') and
not payload.get('head_commit.committer.html_url')):
committer_info = lookup_user(payload['head_commit.committer.username'])
if committer_info:
config['commit_info.committer.url'] = committer_info['html_url']
config['commit_info.committer.avatar_url'] = committer_info['avatar_url']
return config.dict_value()
|
26e645219b816405521ddb6033a0a44c2ab7bba5
| 3,644,237
|
def get_retweeted_tweet(tweet):
"""
Get the retweeted Tweet and return it as a dictionary
If the Tweet is not a Retweet, return None
Args:
tweet (Tweet or dict): A Tweet object or a dictionary
Returns:
dict: A dictionary representing the retweeted status
or None if there is no quoted status. \n
- For original format, this is the value of "retweeted_status" \n
- For activity streams, If the Tweet is a Retweet this is the value of the key "object"
"""
if get_tweet_type(tweet) == "retweet":
if is_original_format(tweet):
return tweet["retweeted_status"]
else:
return tweet["object"]
else:
return None
|
f852d45deadb1622687d097f2c724bdaef72ccc9
| 3,644,238
|
def listminus(c1, c2):
"""Return a list of all elements of C1 that are not in C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
c = []
for delta in c1:
if not s2.has_key(delta):
c.append(delta)
return c
|
829c347343d6a305fef2ad2f71539d7267b5a973
| 3,644,239
|
import random
import torch
def distribute_quantity_skew(batch_size, grouped_data, distributed_dataset, groupings, p=0.5, scalar=1.5):
"""
Adds quantity skew to the data distribution. If p=0. or scalar=1., no skew is applied and the data are divided
evenly among the workers in each label group.
:param batch_size: the batch size for training
:param grouped_data: a dictionary containing the data for each label skew group, key is the label integer and value
is the data
:param distributed_dataset: an initialized empty dictionary that will be filled with data for each worker
:param groupings: a dictionary of the groupings for each worker id, key is the label integer and value is a list of
worker ids
:param p: the portion of workers within each group that will receive higher data quantities, p=0 indicates no skew
:param scalar: the factor used to multiply the size of datasets for high quantity workers, e.g. if scalar=1.5 then
each worker with high quantity skew has 1.5x as many data points as the low quantity workers in their group
:return: the distributed dataset
"""
for n, group in groupings.items():
high_quantity = random.sample(group, k=int(p*len(group)))
low_quantity = [i for i in group if i not in high_quantity]
base_k = int(len(grouped_data[n])/len(group))
print(f"Base K: {base_k}")
print(f"Length of grouped data: {len(grouped_data[n])}")
if p > 0.:
low_k = int(len(grouped_data[n]) / (len(low_quantity) + len(high_quantity) * scalar))
high_k = int(low_k * scalar)
print(f"High Quantity Skew: {high_quantity}")
print(f"High Quantity K: {high_k}")
print(f"Low Quantity Skew: {low_quantity}")
print(f"Low Quantity K: {low_k}")
else:
low_k = base_k
assert len(high_quantity) == 0, "Quantity skew with probability 0 should have no high quantity clients"
print(f"High Quantity Skew: {high_quantity}")
print(f"Low Quantity Skew: {low_quantity}")
print(f"Base K: {base_k}")
for worker in high_quantity:
selected = random.sample(list(range(len(grouped_data[n]))), k=high_k)
temp = [grouped_data[n][i] for i in selected]
# This would need to be changed if the number of samples is not divisible by batch size
worker_vals = []
for i in range(len(temp) // batch_size):
ix = i * batch_size
vals = temp[ix:ix + batch_size]
targets = []
inputs = []
for j in vals:
targets.append(int(j[1].numpy()))
inputs.append(j[0].numpy())
worker_vals.append((torch.Tensor(inputs), torch.Tensor(targets)))
distributed_dataset[worker].extend(worker_vals)
grouped_data[n] = [grouped_data[n][i] for i in range(len(grouped_data[n])) if i not in selected]
for nx, worker in enumerate(low_quantity):
if nx+1 == len(low_quantity):
print(f"Length of remaining data = {len(grouped_data[n])}\nLow_k = {low_k}")
temp = grouped_data[n]
else:
selected = random.sample(list(range(len(grouped_data[n]))), k=low_k)
temp = [grouped_data[n][i] for i in selected]
# This would need to be changed if the number of samples is not divisible by batch size
worker_vals = []
for i in range(len(temp) // batch_size):
ix = i * batch_size
vals = temp[ix:ix + batch_size]
targets = []
inputs = []
for j in vals:
targets.append(int(j[1].numpy()))
inputs.append(j[0].numpy())
worker_vals.append((torch.Tensor(inputs), torch.Tensor(targets)))
distributed_dataset[worker].extend(worker_vals)
if nx+1 != len(low_quantity):
grouped_data[n] = [grouped_data[n][i] for i in range(len(grouped_data[n])) if i not in selected]
return distributed_dataset
|
b4ebd1d6058550d2e32cedd62a56b50441d93b4c
| 3,644,240
|
import numpy as np
import lightkurve as lk
def xmkpy3_tpf_get_coordinates_v1():
"""Unit test"""
print(lk.__version__, "=lk.__version__")
def msg(ok, tag_): # helper function
print("***" + tag_ + ": ", end="")
if ok:
print("PASS***")
else:
print("FAIL***")
# fed
tpf = lk.search_targetpixelfile(
target="kepler-138b", mission="kepler", cadence="long", quarter=10
).download(quality_bitmask=0)
w = tpf.wcs # alias
ll_x0 = 0
ll_y0 = 0
print(ll_x0, "=ll_x0")
print(ll_y0, "=ll_y0")
origin0 = 0
ra_ll_x0, dec_ll_y0 = w.wcs_pix2world(ll_x0, ll_y0, origin0)
print(ra_ll_x0, dec_ll_y0, "=ra_ll_x0, dec_ll_y0")
print()
x0_ra_ll_x0, y0_dec_ll_y0 = w.wcs_world2pix(ra_ll_x0, dec_ll_y0, origin0)
print(
x0_ra_ll_x0, y0_dec_ll_y0, "=x0_ra_ll_x0, y0_dec_ll_y0 [should be about (0,0)]"
)
ra_x0_ra_ll_x0, dec_y0_dec_ll_y0 = w.wcs_pix2world(
x0_ra_ll_x0, y0_dec_ll_y0, origin0
)
print(ra_x0_ra_ll_x0, dec_y0_dec_ll_y0, "=ra_x0_ra_ll_x0, dec_y0_dec_ll_y0")
print("\nra_x0_ra_ll_x0 is_close_to ra_ll_x0 ?")
ok = np.abs(ra_x0_ra_ll_x0 - ra_ll_x0) < 0.000001
msg(ok, "TEST1")
print("^--- THIS BETTER PASS!")
print("\ndec_y0_dec_ll_y0 is_close_to dec_ll_y0 ?")
ok = np.abs(dec_y0_dec_ll_y0 - dec_ll_y0) < 0.000001
msg(ok, "TEST2")
print("^--- THIS BETTER PASS!")
print()
frame0 = 0
# Set one of the next 3 if statements to TRUE depending on the function to
# be tested
if False:
print("---> check tpf.get_coordinates()")
rax_ll_x0 = tpf.get_coordinates()[0][frame0][0][0]
decx_ll_y0 = tpf.get_coordinates()[1][frame0][0][0]
print(
"NOTE: next two tests will PASS --- if the tpf.get_coordinates "
"bug has been fixed"
)
# fi
if True:
print("---> check mkpy3_tpf_get_coordinates_v1()")
rax_ll_x0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf)[0][frame0][0][0]
decx_ll_y0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf)[1][frame0][0][0]
print("NOTE: next two tests should PASS")
# fi
if False:
print("---> check mkpy3_tpf_get_coordinates_v1(...,recreate_bug=True)")
rax_ll_x0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf, recreate_bug=True)[0][frame0][
0
][0]
decx_ll_y0 = mkpy3_tpf_get_coordinates_v1(tpf=tpf, recreate_bug=True)[1][
frame0
][0][0]
print("NOTE: next two tests should FAIL")
# fi
print(rax_ll_x0, decx_ll_y0, "=rax_ll_x0, decx_ll_y0")
print()
x0_rax_ll_x0, y0_decx_ll_y0 = w.wcs_world2pix(rax_ll_x0, decx_ll_y0, origin0)
print(x0_rax_ll_x0, y0_decx_ll_y0, "=x0_rax_ll_x0, y_decx_ll_y0")
tpf_pos_corr1_frame0 = tpf.pos_corr1[frame0]
tpf_pos_corr2_frame0 = tpf.pos_corr2[frame0]
print(
tpf_pos_corr1_frame0,
tpf_pos_corr2_frame0,
"=tpf_pos_corr1_frame0, tpf_pos_corr2_frame0",
)
xx0_rax_ll_x0 = x0_rax_ll_x0 - tpf_pos_corr1_frame0
yy0_decx_ll_y0 = y0_decx_ll_y0 - tpf_pos_corr2_frame0
print(
xx0_rax_ll_x0,
yy0_decx_ll_y0,
"=xx0_rax_ll_x0, yy0_decx_ll_y0 [should be about (0,0)]",
)
ra_xx0_rax_ll_x0, dec_yy0_decx_ll_y0 = w.wcs_pix2world(
xx0_rax_ll_x0, yy0_decx_ll_y0, origin0
)
print(ra_xx0_rax_ll_x0, dec_yy0_decx_ll_y0, "=ra_xx0_rax_ll_x0, dec_yy0_decx_ll_y0")
print("\nra_xx0_rax_ll_x0 is_close_to ra_ll_x0 ?")
ok = np.abs(ra_xx0_rax_ll_x0 - ra_ll_x0) < 0.000001
msg(ok, "TEST3")
print("\ndec_yy0_decx_ll_y0 is_close_to dec_ll_y0 ?")
ok = np.abs(dec_yy0_decx_ll_y0 - dec_ll_y0) < 0.000001
msg(ok, "TEST4")
return None
# fed
|
c01e7440f4e48fb922bf683185d76ecc1eb349b6
| 3,644,241
|
import os
import sys
import imp
def __loadModule(modulePath):
# type: (str) -> module
""" Load module
Args:
modulePath (str): Full path to the python module
Return:
mod (module object): command module
None: if path doesn't exist
"""
# Create module names for import, for exapmle ...
#
# "rush/template"
# "animation/animate"
# "common/create"
# "common/display"
normPath = os.path.normpath(modulePath)
if sys.platform == "win32":
name = os.path.splitext(normPath)[0].split("\\")
else:
name = os.path.splitext(normPath)[0].split("/")
name = "/".join(name[-2:])
# If arnold is not loaded or installed, ignore modules for arnold
if name.startswith("Arnold"):
hasArnold = cmds.pluginInfo("mtoa", q=True, loaded=True)
if not hasArnold:
return None
try:
mod = imp.load_source(name, modulePath)
return mod
except Exception:
print("Failed to load module : %s" % modulePath)
return None
|
df043a8f5bb189aaa55060266bf6a0bb2a8b77f2
| 3,644,242
|
def get_dtype(names, array_dtype=DEFAULT_FLOAT_DTYPE):
"""
Get a list of tuples containing the dtypes for the structured array
Parameters
----------
names : list of str
Names of parameters
array_dtype : optional
dtype to use
Returns
-------
list of tuple
Dtypes as tuples with (field, dtype)
"""
return [(n, array_dtype) for n in names] \
+ [('logP', array_dtype), ('logL', LOGL_DTYPE)]
|
9f29dae78b3839429f13b8513293e9ce4c240e2f
| 3,644,243
|
import sys
def print_progress_table(col_headers, col_widths = None, col_init_data = None,
col_format_specs = None, skip_header=False):
""" Live updates on progress with NUPACK and Multistrand computations.
Note: This table has two rows. The
Args:
col_headers (list(str)): The header of the table.
col_widths (list(int), optional): Spacing of the table columns. Strings are
clipped to width-1. (?)
col_intit_data (list(), optional): Prints initial data into the first row.
col_format_specs (list(), optional): ?
Returns:
A progress update function which overwrites the data row (or the last line on screen).
"""
def update_progress(col_data, inline=True):
"""Print new data to your progress table."""
str_data = [('{:<'+str(w-1)+'}').format(f.format(d))[:w-1] for d,w,f in zip(col_data, col_widths, col_format_specs)]
print("# {}{}".format(' '.join(str_data), "\r" if inline else "\n"), end='')
sys.stdout.flush()
if col_widths is None:
col_widths = [max(len(h)+1, 8) for h in col_headers]
else:
assert len(col_widths) == len(col_headers)
if col_format_specs is None:
col_format_specs = ['{}'] * len(col_headers)
else:
assert len(col_format_specs) == len(col_headers)
header = ' '.join([(h+' '*(w-1))[:w-1] for h,w in zip(col_headers, col_widths)])
if not skip_header:
print("# {}".format(header))
if col_init_data is not None:
update_progress(col_init_data)
return update_progress
|
605e916a67e5dfa68cbf25dd261fd7710416ae39
| 3,644,244
|
def if_analyser(string):
"""调用python的eval函数计算True false"""
trans = sign_transform(string.strip().lower())
# print('if_analyser>>', trans)
boool = eval(trans)
boool = 1 if boool else 0
return boool
|
a27469a6c23a53f0131e8135600c6dc7d596cdbb
| 3,644,245
|
def zzX_trunc(f, p):
"""Reduce Z[X] polynomial modulo polynomial p. """
return zzX_strip([ zzX_rem(g, p) for g in f ])
|
9e80862a229b1a0689dea01fef865997ee87d1f9
| 3,644,246
|
from typing import List
def _format_bin_intervals(bins_arr: np.ndarray) -> List[str]:
"""
Auxillary function to format bin intervals in a histogram
Parameters
----------
bins_arr: np.ndarray
Bin endpoints to format into intervals
Returns
-------
List of formatted bin intervals
"""
bins_arr = np.round(bins_arr, 3)
intervals = [f"[{bins_arr[i]},{bins_arr[i+1]})" for i in range(len(bins_arr) - 2)]
intervals.append(f"[{bins_arr[-2]},{bins_arr[-1]}]")
return intervals
|
96d3a89fc3427bf33abe5c44a04061694ae7b2b3
| 3,644,247
|
from typing import List
from typing import Dict
from typing import Callable
def get_embedder_functions(corpus: List[str]) -> Dict[str, Callable[[List[str]], List[float]]]:
"""
Returns a list of the available embedders.
#! If updated, update next function too
"""
embedders = {
# 'Bag of Words': bow_embedder(corpus),
'FastText (CBOW)': fasttext_embedder(corpus, model_type="cbow"),
'FastText (Skipgram)': fasttext_embedder(corpus, model_type="skipgram"),
'Doc2Vec': doc2vec_embedder(corpus),
'GPT2 Small Spanish': bert_embedder(model_name="datificate/gpt2-small-spanish"),
'BERT: TinyBERT-spanish-uncased-finetuned-ner':
bert_embedder(model_name='mrm8488/TinyBERT-spanish-uncased-finetuned-ner'),
'BERT: paraphrase-xlm-r-multilingual-v1': bert_embedder(model_name='paraphrase-xlm-r-multilingual-v1'),
'BERT: distiluse-base-multilingual-cased-v2': bert_embedder(model_name='distiluse-base-multilingual-cased-v2'),
}
reduced_embedders = {}
for name, embedder in embedders.items():
reduced_embedders[f"{name} (50-d)"] = reduce_dimensionality(embedder)
return {**embedders, **reduced_embedders}
|
6e1d4ddd41725a26b940c7d108ea552366ab6c9b
| 3,644,248
|
import logging
def test_stimeit():
""" Test the stimeit function """
dummy_function = lambda x: x + 2
@vtime.stimeit(logging.info)
def stimeit_function(x):
return dummy_function(x)
assert dummy_function(42) == stimeit_function(42)
|
6d5cf6d261871cb466b71e93dbecfafff1731727
| 3,644,249
|
import yaml
def parse_thermal_properties(f):
"""thermal_properties.yaml parser."""
thermal_properties = {
"temperatures": [],
"free_energy": [],
"entropy": [],
"heat_capacity": [],
}
data = yaml.load(f, Loader=Loader)
for tp in data["thermal_properties"]:
thermal_properties["temperatures"].append(tp["temperature"])
thermal_properties["entropy"].append(tp["entropy"])
thermal_properties["free_energy"].append(tp["free_energy"])
thermal_properties["heat_capacity"].append(tp["heat_capacity"])
for key in thermal_properties:
thermal_properties[key] = np.array(thermal_properties[key])
tprops = get_thermal_properties(thermal_properties)
return tprops
|
4cc0020849e6ec1202fd2138f0bc86e5abfadf3b
| 3,644,250
|
def convert(ts, new_freq, include_partial=True, **kwargs):
"""
This function converts a timeseries to another frequency. Conversion only
works from a higher frequency to a lower frequency, for example daily to
monthly.
NOTE: add a gatekeeper for invalid kwargs.
"""
new_ts = ts.clone()
series_dir = ts.series_direction()
new_ts.sort_by_date(reverse=True)
freq_idx = HIERARCHY.index(ts.frequency)
new_idx = HIERARCHY.index(FREQ_Q)
daily_idx = HIERARCHY.index(FREQ_D)
if freq_idx > new_idx:
raise ValueError(
"Cannot convert from %s to %s." % (ts.frequency, new_freq)
)
dates = new_ts.datetime_series()
date_series_type = ts.get_date_series_type()
if date_series_type == TS_ORDINAL:
selected = _filter_dates(dates, new_freq, kwargs)
elif date_series_type == TS_TIMESTAMP:
selected = _filter_idates(
dates, new_freq, end_of_period=ts.end_of_period
)
else:
raise ValueError("Invalid date series type: %s" % (date_series_type))
if selected.shape[0] > 0:
if new_ts.end_of_period:
selected += 1 # shift to start of next period
if include_partial or freq_idx > daily_idx:
if selected[0] != 0:
# insert most recent date
# selected = np.insert(selected, 0, 0)
# np.insert(arr, obj, values, axis=None)
selected = np.insert(selected, 0, 0)
if freq_idx > daily_idx:
# already processed (probably)
if selected[-1] != len(dates) - 1:
selected = np.append(selected, len(dates) - 1)
new_ts.tseries = new_ts.tseries[selected.flatten()]
new_ts.frequency = new_freq
if new_freq == FREQ_D:
# convert dates from timestamp to ordinal
new_ts.dseries = np.fromiter(
[date.toordinal() for date in np.array(dates)[selected]],
dtype=np.int32,
)
else:
new_ts.dseries = new_ts.dseries[selected]
new_ts.dseries = new_ts.dseries.flatten()
if series_dir != new_ts.series_direction():
new_ts.reverse()
return new_ts
|
a6b8daf6092052c0d7872d4b9a75edbe10bc15e5
| 3,644,251
|
def _get_image_info(name: str) -> versions.Image:
"""Retrieve an `Image` information by name from the versions listing."""
try:
return versions.CONTAINER_IMAGES_MAP[name]
except KeyError:
raise ValueError(
'Missing version for container image "{}"'.format(name)
)
|
4a328d6924adc3c826a6a01c46a27e6380d5d89a
| 3,644,252
|
def _mother_proc_cpp_stat(
amplitude_distribution, t_stop, rate, t_start=0 * pq.ms):
"""
Generate the hidden ("mother") Poisson process for a Compound Poisson
Process (CPP).
Parameters
----------
amplitude_distribution : np.ndarray
CPP's amplitude distribution :math:`A`. `A[j]` represents the
probability of a synchronous event of size `j` among the generated
spike trains. The sum over all entries of :math:`A` must be equal to
one.
t_stop : pq.Quantity
The stopping time of the mother process
rate : pq.Quantity
Homogeneous rate of the n spike trains that will be generated by the
CPP function
t_start : pq.Quantity, optional
The starting time of the mother process
Default: 0 pq.ms
Returns
-------
Poisson spike train representing the mother process generating the CPP
"""
n_spiketrains = len(amplitude_distribution) - 1
# expected amplitude
exp_amplitude = np.dot(
amplitude_distribution, np.arange(n_spiketrains + 1))
# expected rate of the mother process
exp_mother_rate = (n_spiketrains * rate) / exp_amplitude
return StationaryPoissonProcess(
rate=exp_mother_rate, t_stop=t_stop, t_start=t_start
).generate_spiketrain()
|
90ea9272c1a5541ea5c278960369ea301b31d01a
| 3,644,253
|
import telegram
import urllib
def get_service(hass, config):
"""Get the Telegram notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY, 'chat_id']},
_LOGGER):
return None
try:
bot = telegram.Bot(token=config[CONF_API_KEY])
username = bot.getMe()['username']
_LOGGER.info("Telegram bot is '%s'.", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token.")
return None
return TelegramNotificationService(config[CONF_API_KEY], config['chat_id'])
|
474efeccaef641ba50042a036d5edf6d6e86f90c
| 3,644,254
|
def has_numbers(input_str: str):
""" Check if a string has a number character """
return any(char.isdigit() for char in input_str)
|
5038cb737cdcfbad3a7bd6ac89f435559b67cebc
| 3,644,255
|
def _validate_LIMS_data(input, field_label, selectedTemplate, planObj):
"""
No validation but LIMS data with leading/trailing blanks in the input will be trimmed off
"""
errorMsg = None
if input:
data = input.strip()
try:
if planObj.get_planObj().metaData:
logger.debug(
"plan_csv_validator._validator_LIMS_data() B4 planObj.get_planObj().metaData=%s"
% (planObj.get_planObj().metaData)
)
else:
planObj.get_planObj().metaData = {}
if len(planObj.get_planObj().metaData.get("LIMS", [])) == 0:
planObj.get_planObj().metaData["LIMS"] = []
planObj.get_planObj().metaData["LIMS"].append(data)
logger.debug(
"EXIT plan_csv_validator._validator_LIMS_data() AFTER planObj.get_planObj().metaData=%s"
% (planObj.get_planObj().metaData)
)
except Exception:
logger.exception(format_exc())
errorMsg = "Internal error during LIMS data processing"
# self.metaData["Status"] = status
# self.metaData["Date"] = "%s" % timezone.now()
# self.metaData["Info"] = info
# self.metaData["Comment"] = comment
#
# Try to read the Log entry, if it does not exist, create it
# if len(self.metaData.get("Log",[])) == 0:
# self.metaData["Log"] = []
# self.metaData["Log"].append({"Status":self.metaData.get("Status"), "Date":self.metaData.get("Date"), "Info":self.metaData.get("Info"), "Comment":comment})
return errorMsg
|
2ca3f271c546e4506e7e2490fb8b60fc0ef03f35
| 3,644,256
|
def get_report_permission(report: Report, user: User) -> Permission:
"""Get permission of given user for the report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the report
:rtype: Permission
"""
if 'reports' in session and str(report.id) in session['reports']:
return session['reports'][str(report.id)]
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
if rp is None:
return 'n'
return rp.Type
|
bb09d744c133a4c9212ab6c6e2ba345bb9c8f78f
| 3,644,257
|
def peak_time_from_sxs(
sxs_format_waveform,
metadata,
extrapolation_order='Extrapolated_N2'):
"""Returns the time when the sum of the squared amplitudes of an
SXS-format waveform is largest. Note: this is not necessarily the time of
the peak of the l=m=2 mode."""
extrap = extrapolation_order + ".dir"
# All modes have the same time, so just look at the l=m=2 mode to get the
# times
times = sxs_format_waveform[extrapolation_order +
".dir"]['Y_l2_m2.dat'][:, 0]
start = first_index_before_reference_time(times, metadata)
sum_amp_squared = waveform_norm_squared(
sxs_format_waveform, extrapolation_order)
index_peak = start + sum_amp_squared[start:].argmax()
return sxs_format_waveform[extrap]['Y_l2_m2.dat'][index_peak][0]
|
1ad4b593db3aa3d74170056f2c32d4108ec05a48
| 3,644,258
|
def create_trackhub_resource(project_dir, api_client, create_user_resource, create_genome_assembly_dump_resource):
"""
This fixture is used to create a temporary trackhub using POST API
The created trackhub will be used to test GET API
"""
_, token = create_user_resource
api_client.credentials(HTTP_AUTHORIZATION='Token ' + str(token))
submitted_hub = {
'url': 'file:///' + str(project_dir) + '/' + 'samples/JASPAR_TFBS/hub.txt'
}
response = api_client.post('/api/trackhub/', submitted_hub, format='json')
return response
|
a81db1e7c9c95355457d9f6c4ec4c6428e1a77a7
| 3,644,259
|
def create_node(x, y):
"""Create a node along the network.
Parameters
----------
x : {float, int}
The x coordinate of a point.
y : {float, int}
The y coordinate of a point.
Returns
-------
_node : shapely.geoemtry.Point
Instantiated node.
"""
_node = Point(list(zip(x, y))[0])
return _node
|
d8645b77a3d843bf3855522c63156916432ae899
| 3,644,260
|
from typing import Match
def get_matchroom_name(match: Match) -> str:
"""Get a new unique channel name corresponding to the match.
Parameters
----------
match: Match
The match whose info determines the name.
Returns
-------
str
The name of the channel.
"""
name_prefix = match.matchroom_name
cut_length = len(name_prefix) + 1
largest_postfix = 1
found = False
for channel in server.server.channels:
if channel.name.startswith(name_prefix):
found = True
try:
val = int(channel.name[cut_length:])
largest_postfix = max(largest_postfix, val)
except ValueError:
pass
return name_prefix if not found else '{0}-{1}'.format(name_prefix, largest_postfix + 1)
|
404d21b6f88918204aa287c9227640c03f47b916
| 3,644,261
|
import re
def unidecode_name(uname):
"""
unidecode() of cjk ideograms can produce strings which contain spaces.
Strip leading and trailing spaces, and reduce double-spaces to single.
For some other ranges, unidecode returns all-lowercase names; fix these
up with capitalization.
"""
# Fix double spacing
name = unidecode.unidecode(uname)
if name == uname:
return name
name = re.sub(' +', ' ', name.strip().replace('@', '').replace('"', ''))
name = re.sub(r'(\w)\.(\w)', r'\1\2', name)
# Fix all-upper and all-lower names:
# Check for name particles -- don't capitalize those
m = name_particle_match(name)
particle = m.group(1) if m else None
# Get the name parts
prefix, first, middle, last, suffix = name_parts(name)
# Capitalize names
first = first.title()
middle = ' '.join([ capfirst(p) for p in middle.split() ])
last = ' '.join([ capfirst(p) for p in last.split() ])
if len(last) == 1:
last = (last+last).capitalize()
# Restore the particle, if any
if particle and last.startswith(capfirst(particle)+' '):
last = ' '.join([ particle, last[len(particle)+1:] ])
# Recombine the parts
parts = prefix, first, middle, last, suffix
name = ' '.join([ p for p in parts if p and p.strip() != '' ])
name = re.sub(' +', ' ', name)
return name
|
16676453059b53b3e397f33630e00de66f3585b9
| 3,644,262
|
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
|
c900c5a0da1f4f0960ced2ba36fb9785a7340f4a
| 3,644,263
|
def xception_block(inputs, depth_list, prefix, skip_connect_type, stride, rate=1,
depth_activation=False, return_skip=False):
"""用于构建xception,同样用到了残差结构,但是将卷积换成了 深度可分离卷积(depthwise + pointwise + conv 1x1)"""
residual = inputs
for i in range(3):
# depthwise + pointwise + conv2d
residual = sep_layer(residual, depth_list[i], prefix + '_separable_conv{}'.format(i + 1),
stride=stride if stride == 2 else 1, rate=rate, depth_activation=depth_activation)
if i == 1:
skip = residual # 两次: depth_wise + conv2d
if skip_connect_type == 'conv':
# 采用跳跃连接: 输入经过侧边conv后与主路输出相加
shortcut = conv_same_layer(inputs, depth_list[-1], prefix + '_shortcut', k_size=1, stride=stride)
shortcut = layers.BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
output = layers.Add()([residual, shortcut])
elif skip_connect_type == 'sum':
# 采用跳跃连接直接与输入相加
output = layers.Add()([residual, shortcut])
elif skip_connect_type == 'none':
# 不采用跳跃连接
output = residual
if return_skip:
# output是整个block的输出,skip只是主路的经过两次sep_conv的输出
return output, skip
else:
return output
|
3a8eaf7cb73216039411ec8fddd7bfb8c81604a6
| 3,644,264
|
def auth_test():
"""
Test's the endpoint authenticiation works.
:return:
"""
return "hello"
|
7c65897d83b0af41307aec28d7f2ce3d6852f8b7
| 3,644,265
|
def cnn_2x_lstm_siamese(voc_size, max_len, dropout=0.5):
"""Two siamese branches, each embedding a statement.
Binary classifier on top.
Args:
voc_size: size of the vocabulary for the input statements.
max_len: maximum length for the input statements.
dropout: Fraction of units to drop.
Returns:
A Keras model instance.
"""
pivot_input = layers.Input(shape=(max_len,), dtype='int32')
statement_input = layers.Input(shape=(max_len,), dtype='int32')
x = layers.Embedding(
output_dim=256,
input_dim=voc_size,
input_length=max_len)(pivot_input)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(3)(x)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
embedded_pivot = layers.LSTM(256)(x)
encoder_model = Model(pivot_input, embedded_pivot)
embedded_statement = encoder_model(statement_input)
concat = layers.merge([embedded_pivot, embedded_statement], mode='concat')
x = layers.Dense(256, activation='relu')(concat)
x = layers.Dropout(dropout)(x)
prediction = layers.Dense(1, activation='sigmoid')(x)
model = Model([pivot_input, statement_input], prediction)
return model
|
d0eec28b8e91bed77fbc84bd085cae337da04c61
| 3,644,266
|
def roll_function(positions, I, angular_velocity):
"""
Due to how the simulations are generated where the first point of the simulation
is at the smallest x value and the subsequent positions are in a clockwise
(counterclockwise) direction when the vorticity is positive (negative), the first
point of the simulated intensity might lie in the middle of an intensity trace.
This needs to be compensated for by rolling array elements. Simulations come onto
the screen from one of 4 sides. Which side the sim comes onto the screen and
which side the sim leaves the screen defines how to roll the intensity as a function
of time such that the first returned position is at the entrance and the final returned
position is at the exit.
Args:
positions (array): position of Particle
I (array): intensities calculated as a function of position
angular velocity (float): Particle angular velocity
Returns:
p (array): position of Particle, adjusted to preserve order of peaks
I (array): intensities calculated as a function of p, adjusted to preserve order of peaks
"""
p = positions.T
x_0 = p[0][0]
y_0 = p[1][0]
clockwise = True
if angular_velocity < 0:
clockwise = False
roll = 0
if clockwise:
if (x_0>0) and (y_0>0) and (y_0<616):
# need to roll
if 616/2 > y_0: # orbit starts in upper half of screen
try:
rollval = -np.argwhere(p[1][:(len(p[1])//4+1)]==0)[0]
except IndexError: #if none of the points is actually equal to 0
rollval = -np.abs(p[1][:(len(p[1])//4+1)]).argmin()
p = np.roll(p,rollval,axis=1)
I = np.roll(I,rollval)
else: #orbit starts in middle or lower half of screen
try:
rollval = np.argwhere(p[1]==616)[0]+len(p[1])//2
except IndexError: #if none of the points is actually equal to 0
rollval = np.abs(p[1][3*(len(p[1])//4):]).argmin()
p = np.roll(p,rollval,axis=1)
I = np.roll(I,rollval)
else:
print('need to implement this still... rolling for counterclockwise vorticity.')
raise ValueError
return p.T, I
|
79e9e3fbcdd2bfc1f2f9108f17aeeeb13fc6339d
| 3,644,267
|
import subprocess
import tempfile
import pipes
def run_exkeys(hosts, capture=False):
"""
Runs gpssh-exkeys for the given list of hosts. If capture is True, the
(returncode, stdout, stderr) from the gpssh-exkeys run is returned;
otherwise an exception is thrown on failure and all stdout/err is untouched.
"""
host_opts = []
for host in hosts:
host_opts.extend(['-h', host])
args = [ 'gpssh-exkeys', '-v' ] + host_opts
if not capture:
subprocess.check_call(args)
return
# Capture stdout/err for later use, while routing it through tee(1) so that
# developers can still see the live stream output.
#
# XXX This is a very heavy-weight solution, using pipes.Template() for the
# creation of shell pipeline processes. It's also platform-specific as it
# relies on the functionality of /dev/stdout and /dev/stderr.
#
# The overview: we open up two shell processes running tee(1), using
# pipes.Template(), and connect their standard output to the stdout/err of
# the current Python process using Template.open(). We then connect the
# stdout/stderr streams of subprocess.call() to the stdin of those tee
# pipelines. tee(1) will duplicate all output to temporary files, which we
# read after the subprocess call completes. NamedTemporaryFile() then cleans
# up those files when we return.
with tempfile.NamedTemporaryFile() as temp_out, tempfile.NamedTemporaryFile() as temp_err:
pipe_out = pipes.Template()
pipe_out.append('tee %s' % pipes.quote(temp_out.name), '--')
pipe_err = pipes.Template()
pipe_err.append('tee %s' % pipes.quote(temp_err.name), '--')
with pipe_out.open('/dev/stdout', 'w') as out, pipe_err.open('/dev/stderr', 'w') as err:
ret = subprocess.call(args, stdout=out, stderr=err)
stored_out = temp_out.read()
stored_err = temp_err.read()
return ret, stored_out, stored_err
|
1ecb634e76b2ed68966457a0baac2122da00270f
| 3,644,268
|
def top10():
"""Renders the top 10 page."""
top10_urls = ShortURL.query.order_by(ShortURL.hits.desc()).limit(10)
return render_template("top10.html", urls=top10_urls)
|
781c7e65b94894e1292e626c163dfecb1d966678
| 3,644,269
|
def rename_group(str_group2=None):
"""
Rename OFF food group (pnns_group_2) to a standard name
Args:
str_group2 (str): OFF food group name
Returns:
conv_group (str): standard food group name
"""
#convert_group1 = {'Beverage':['Beverages'],
# 'Cereals':['Cereals and potatoes'],
# 'Meal':['Composite foods'],
# 'Fat':['Fat and sauces'],
# 'Meat':['Fish Meat Eggs'],
# 'Fruits and vegetables':['Fruits and vegetables','fruits-and-vegetables'],
# 'Dairy':['Milk and dairy products'],
# 'Snack':['Salty snacks','Sugary snacks','sugary-snacks'],
# None:[None,'unknown','']}
convert_group2 = {'Beverage':['Alcoholic beverages','Artificially sweetened beverages',
'Fruit juices','Fruit nectars','Non-sugared beverages',
'Sweetened beverages'],
'Cereals':['Bread','Breakfast cereals','Cereals','Legumes','Patatoes'],
'Meal':['One-dish meals','Pizza pies and quiche','Sandwich'],
'Fat':['Dressings and sauces','Fats'],
'Meat':['Tripe dishes','Eggs','Fish and seafood','Meat','Processed meat','Nuts'],
'Fruit':['Fruits','fruits','Dried fruits'],
'Vegetable':['Soups','Vegetables','vegetables'],
'Dairy':['Cheese','Dairy desserts','Ice cream','Milk and yogurt'],
'Snack':['Appetizers','Salty and fatty products','Biscuits and cakes',
'Chocolate products','Sweets','pastries'],
None:[None,'unknown','']}
conv_group = [key for (key, value) in convert_group2.items() if (str_group2 in value)]
conv_group = [None] if not conv_group else conv_group
return conv_group[0]
|
31b52f600fe3a087f8b230c880ae55f0dd63264e
| 3,644,270
|
from typing import Union
import pathlib
import io
import os
def save_png(fig: Figure, path: Union[None, str, pathlib.Path],
width: Union[int, float] = None, height: Union[int, float] = None, unit: str = 'px',
print_info: bool = False) -> Union[str, io.BytesIO]:
"""
Save PNG image of the figure.
:param fig: Figure to save.
:param path: Full path of the image to save. If directory (string ending in slash - '/' or '\\') then
the figure window title is used as a file name. If `None`, in-memory :class:`io.BytesIO`
file will be generated and returned.
:param width: Image width in `unit`. If not provided it will be left as it is.
:param height: Image height in `unit`. If not provided it will be left as it is.
:param unit: Unit of the image width and height, one of: 'px' (pixels), 'cm' (centimeters), 'in' (inch).
:param print_info: Whether to print information about saved file.
:return: Full path of the generated image if `path` was provided or in-memory :class:`io.BytesIO` file.
"""
if path:
directory, file_name = os.path.split(path)
# Create the directory if not existent
os.makedirs(directory, exist_ok=True)
# If the provided path is only a directory, use window title as filename
if not file_name:
file_name = get_window_title(fig)
# Image path must have .png extension!
if os.path.splitext(file_name)[1] != ".png":
file_name += ".png"
path = os.path.join(directory, file_name)
dpi = fig.get_dpi()
if width or height:
size = fig.get_size_inches()
if unit == 'px':
fig.set_size_inches((width or size[0]) / dpi, (height or size[1]) / dpi)
elif unit in ('mm', 'cm', 'in', 'inch'):
if unit == 'mm':
width /= 25.4
height /= 25.4
elif unit == 'cm':
width /= 2.54
height /= 2.54
# Unit is inches.
fig.set_size_inches(width or size[0], height or size[1])
else:
raise ValueError(f"Unsupported size unit '{unit}'")
width = fig.get_figwidth()
height = fig.get_figheight()
width_px = int(round(width * dpi))
height_px = int(round(height * dpi))
width_mm = width * 25.4
height_mm = height * 25.4
if path:
fig.savefig(path, dpi=dpi)
ret = path
if print_info:
print(f"Saved plot ({width_px}x{height_px} px = {width_mm:.1f}x{height_mm:.1f} mm @ {dpi} dpi)"
f" to '{os.path.normpath(path)}'")
else:
file = io.BytesIO()
fig.savefig(file, dpi=dpi)
file.seek(0)
ret = file
return ret
|
113973e0633bdce6464d725f50ed7d04a47f6834
| 3,644,271
|
def parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
|
523f00841d9352725b3561f401ee827274aaa05b
| 3,644,272
|
def run_basic():
"""Check that the windows all open ok (i.e. is GUI functioning?)."""
_initialize()
s = 'Simulation'
p = 'Plots'
menu_paths = [ (s,'Test Pattern'),
(s,'Model Editor'),
(p,'Activity'),
(p,'Connection Fields'),
(p,'Projection'),
(p,'Projection Activity'),
(p,'Preference Maps','Orientation Preference'),
(p,'Tuning Curves','Orientation Tuning') ]
return ft.run([_menu_item_fn(*x) for x in menu_paths],"Running basic GUI tests...")
|
e90546b7312b9c7de5fd812784d91c5ef1c9f22f
| 3,644,273
|
def node_to_evenly_discretized(node):
"""
Parses the evenly discretized mfd node to an instance of the
:class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD,
or to None if not all parameters are available
"""
if not all([node.attrib["minMag"], node.attrib["binWidth"],
node.nodes[0].text]):
return None
# Text to float
rates = [float(x) for x in node.nodes[0].text.split()]
return mfd.evenly_discretized.EvenlyDiscretizedMFD(
float(node.attrib["minMag"]),
float(node.attrib["binWidth"]),
rates)
|
168bf8efcacac4eaf5832bbab4b3708e8187d5dd
| 3,644,274
|
from typing import Collection
def delete_comment(request, collection_id, comment_id):
"""Delete comment if the staff or comment owner want to delete."""
collection = get_object_or_404(Collection, id=collection_id)
comment = get_object_or_404(Comment, id=comment_id, collection=collection)
if not request.user.is_authenticated:
messages.error(request, "Stop there! How dare you delete a comment without logging in?")
return redirect('collection', collection_id=collection.id)
if not request.user.is_staff and not request.user.is_superuser and request.user != comment.user:
messages.error(request, "Wait! This is not yours! You can't delete this comment!")
return redirect('collection', collection_id=collection.id)
# After this point, everything is valid now.
# It is safe to delete the comment
comment.delete()
messages.success(request, f"Delete comment successfully!")
return redirect('collection', collection_id=collection.id)
|
e8de0e5b8fb1ca8d6b6f27009f34ef0b8678c7cd
| 3,644,275
|
import numpy
def layers_weights_as_vector(model, initial=True):
"""
Creates a list holding the weights of each layer (Conv and Dense) in the CNN as a vector.
model: A reference to the instance from the cnn.Model class.
initial: When True, the function returns the initial weights of the CNN. When False, the trained weights of the CNN layers are returned. The initial weights are only needed before network training starts. The trained weights are needed to predict the network outputs.
Returns a list (network_weights) holding the weights of the CNN layers as a vector.
"""
network_weights = []
layer = model.last_layer
while "previous_layer" in layer.__init__.__code__.co_varnames:
if type(layer) in [Conv2D, Dense]:
# If the 'initial' parameter is True, append the initial weights. Otherwise, append the trained weights.
if initial == True:
vector = numpy.reshape(layer.initial_weights, newshape=(layer.initial_weights.size))
# vector = pygad.nn.DenseLayer.to_vector(matrix=layer.initial_weights)
network_weights.extend(vector)
elif initial == False:
vector = numpy.reshape(layer.trained_weights, newshape=(layer.trained_weights.size))
# vector = pygad.nn.DenseLayer.to_vector(array=layer.trained_weights)
network_weights.extend(vector)
else:
raise ValueError("Unexpected value to the 'initial' parameter: {initial}.".format(initial=initial))
# Go to the previous layer.
layer = layer.previous_layer
# If the first layer in the network is not an input layer (i.e. an instance of the Input2D class), raise an error.
if not (type(layer) is Input2D):
raise TypeError("The first layer in the network architecture must be an input layer.")
# Currently, the weights of the layers are in the reverse order. In other words, the weights of the first layer are at the last index of the 'network_weights' list while the weights of the last layer are at the first index.
# Reversing the 'network_weights' list to order the layers' weights according to their location in the network architecture (i.e. the weights of the first layer appears at index 0 of the list).
network_weights.reverse()
return numpy.array(network_weights)
|
3a13d44868cb67c8ba757db3ceed8b6cf01bfbfb
| 3,644,276
|
def calculate_moist_adiabatic_lapse_rate(t, p):
"""calculate moist adiabatic lapse rate from pressure, temperature
p: pressure in hPa
t: temperature in Kelvin
returns: moist adiabatic lapse rate in Kelvin/m
"""
es = 611.2*np.exp(17.67*(t-273.15)/(t-29.65)) # Bolton formula, es in Pa
qs = 0.622*es/(p*100-0.378*es)
num = 1 + lv*qs/(Rdry*t)
denom = 1 + lv**2*qs/(cp*Rvap*t**2)
gamma = g/cp*(1-num/denom)
return gamma
|
4a20b15bdee1ce72f10d85b74a68358fa7934093
| 3,644,277
|
def read_cat_file(genomeCatFile):
""" Read in genome categories and create dictionary of category name and
genomes in that category"""
inFile = open(genomeCatFile, 'r')
catDict = {}
for line in inFile:
line = line.strip()
entries = line.split()
genome = entries[0]
cat = entries[1]
if cat in catDict:
catDict[cat].add(genome)
else:
catDict[cat] = {genome}
inFile.close()
return catDict
|
23a30f29cb62d56a3e0763be34cad45717421815
| 3,644,278
|
import matplotlib.pyplot as plt
def _check_axes(axes):
"""Check if "axes" is an instance of an axis object. If not, use `gca`."""
if axes is None:
axes = plt.gca()
elif not isinstance(axes, Axes):
raise ValueError(
"`axes` must be an instance of matplotlib.axes.Axes. "
"Found type(axes)={}".format(type(axes))
)
return axes
|
c615b622dbf23b7f7f963256cab028c2d1a18706
| 3,644,279
|
import sys
def gauss_method_mpc(filename, bodyname, obs_arr=None, r2_root_ind_vec=None, refiters=0, plot=True):
"""Gauss method high-level function for minor planets (asteroids, comets,
etc.) orbit determination from MPC-formatted ra/dec tracking data. Roots of
8-th order Gauss polynomial are computed using np.roots function. Note that
if `r2_root_ind_vec` is not specified by the user, then the first positive
root returned by np.roots is used by default.
Args:
filename (string): path to MPC-formatted observation data file
bodyname (string): user-defined name of minor planet
obs_arr (int vector): line numbers in data file to be processed
refiters (int): number of refinement iterations to be performed
r2_root_ind_vec (1xlen(obs_arr) int array): indices of Gauss polynomial roots.
plot (bool): if True, plots data.
Returns:
x (tuple): set of Keplerian orbital elements {(a, e, taup, omega, I, omega, T),t_vec[-1]}
"""
# load MPC data for a given NEA
mpc_object_data = load_mpc_data(filename)
#load MPC data of listed observatories (longitude, parallax constants C, S)
mpc_observatories_data = load_mpc_observatories_data('../station_observatory_data/mpc_observatories.txt')
#definition of the astronomical unit in km
# au = cts.au.to(uts.Unit('km')).value
# Sun's G*m value
# mu_Sun = 0.295912208285591100E-03 # au^3/day^2
mu = mu_Sun # cts.GM_sun.to(uts.Unit("au3 / day2")).value
# handle default behavior for obs_arr
# load JPL DE432s ephemeris SPK kernel
# 'de432s.bsp' is automatically loaded by astropy, via jplephem
# 'de432s.bsp' is about 10MB in size and will be automatically downloaded if not present yet in astropy's cache
# for more information, see astropy.coordinates.solar_system_ephemeris documentation
print("")
questions = [
inquirer.List('Ephemerides',
message="Select ephemerides[de432s(default,small in size,faster)','de430(more precise)]:",
choices=['de432s','de430'],
),
]
answers = inquirer.prompt(questions)
global x_ephem
x_ephem=answers["Ephemerides"]
solar_system_ephemeris.set(answers["Ephemerides"])
if obs_arr is None:
obs_arr = list(range(1, len(mpc_object_data)+1))
#the total number of observations used
nobs = len(obs_arr)
# if r2_root_ind_vec was not specified, then use always the first positive root by default
if r2_root_ind_vec is None:
r2_root_ind_vec = np.zeros((nobs-2,), dtype=int)
#auxiliary arrays
x_vec = np.zeros((nobs,))
y_vec = np.zeros((nobs,))
z_vec = np.zeros((nobs,))
a_vec = np.zeros((nobs-2,))
e_vec = np.zeros((nobs-2,))
taup_vec = np.zeros((nobs-2,))
I_vec = np.zeros((nobs-2,))
W_vec = np.zeros((nobs-2,))
w_vec = np.zeros((nobs-2,))
n_vec = np.zeros((nobs-2,))
x_Ea_vec = np.zeros((nobs,))
y_Ea_vec = np.zeros((nobs,))
z_Ea_vec = np.zeros((nobs,))
t_vec = np.zeros((nobs,))
# Speed of light constant
c= 299792.458
print("Consider light propogation time?[y/n]")
check=input()
if(check!='y' and check!='n'):
print("Invalid input.Exiting...\n")
sys.exit()
for j in range (0,nobs-2):
# Apply Gauss method to three elements of data
inds = [obs_arr[j]-1, obs_arr[j+1]-1, obs_arr[j+2]-1]
print('Processing observation #', j)
r1, r2, r3, v2, R, rho1, rho2, rho3, rho_1_sr, rho_2_sr, rho_3_sr, Ea_hc_pos, obs_t = \
gauss_iterator_mpc(mpc_object_data, mpc_observatories_data, inds, refiters=refiters, r2_root_ind=r2_root_ind_vec[j])
# Consider light propagation time
if(check=='y'):
#print(obs_t[0])
#print(obs_t[1])
obs_t[0]= obs_t[0]-(rho_1_sr/c)
obs_t[1]= obs_t[1]-(rho_2_sr/c)
obs_t[2]= obs_t[2]-(rho_3_sr/c)
#print(rho_1_sr)
if j==0:
t_vec[0] = obs_t[0]
x_vec[0], y_vec[0], z_vec[0] = np.matmul(rot_equat_to_eclip, r1)
x_Ea_vec[0], y_Ea_vec[0], z_Ea_vec[0] = np.matmul(rot_equat_to_eclip, earth_ephemeris(obs_t[0])/au)
if j==nobs-3:
t_vec[nobs-1] = obs_t[2]
x_vec[nobs-1], y_vec[nobs-1], z_vec[nobs-1] = np.matmul(rot_equat_to_eclip, r3)
x_Ea_vec[nobs-1], y_Ea_vec[nobs-1], z_Ea_vec[nobs-1] = np.matmul(rot_equat_to_eclip, earth_ephemeris(obs_t[2])/au)
r2_eclip = np.matmul(rot_equat_to_eclip, r2)
v2_eclip = np.matmul(rot_equat_to_eclip, v2)
a_num = semimajoraxis(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu)
e_num = eccentricity(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu)
f_num = trueanomaly5(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu)
n_num = meanmotion(mu, a_num)
a_vec[j] = a_num
e_vec[j] = e_num
taup_vec[j] = taupericenter(obs_t[1], e_num, f_num, n_num)
w_vec[j] = np.rad2deg( argperi(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu) )
I_vec[j] = np.rad2deg( inclination(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2]) )
W_vec[j] = np.rad2deg( longascnode(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2]) )
n_vec[j] = n_num
t_vec[j+1] = obs_t[1]
x_vec[j+1] = r2_eclip[0]
y_vec[j+1] = r2_eclip[1]
z_vec[j+1] = r2_eclip[2]
Ea_hc_pos_eclip = np.matmul(rot_equat_to_eclip, Ea_hc_pos[1])
x_Ea_vec[j+1] = Ea_hc_pos_eclip[0]
y_Ea_vec[j+1] = Ea_hc_pos_eclip[1]
z_Ea_vec[j+1] = Ea_hc_pos_eclip[2]
a_mean = np.mean(a_vec) #au
e_mean = np.mean(e_vec) #dimensionless
taup_mean = np.mean(taup_vec) #deg
w_mean = np.mean(w_vec) #deg
I_mean = np.mean(I_vec) #deg
W_mean = np.mean(W_vec) #deg
n_mean = np.mean(n_vec) #sec
print('\n*** ORBIT DETERMINATION: GAUSS METHOD ***')
print('Observational arc:')
print('Number of observations: ', len(obs_arr))
print('First observation (UTC) : ', Time(t_vec[0], format='jd').iso)
print('Last observation (UTC) : ', Time(t_vec[-1], format='jd').iso)
print('\nAVERAGE ORBITAL ELEMENTS (ECLIPTIC, MEAN J2000.0): a, e, taup, omega, I, Omega, T')
print('Semi-major axis (a): ', a_mean, 'au')
print('Eccentricity (e): ', e_mean)
# print('Time of pericenter passage (tau): ', Time(taup_mean, format='jd').iso, 'JDTDB')
print('Pericenter distance (q): ', a_mean*(1.0-e_mean), 'au')
print('Apocenter distance (Q): ', a_mean*(1.0+e_mean), 'au')
print('Argument of pericenter (omega): ', w_mean, 'deg')
print('Inclination (I): ', I_mean, 'deg')
print('Longitude of Ascending Node (Omega): ', W_mean, 'deg')
print('Orbital period (T): ', 2.0*np.pi/n_mean, 'days')
# PLOT
if plot:
npoints = 500 # number of points in orbit
theta_vec = np.linspace(0.0, 2.0*np.pi, npoints)
t_Ea_vec = np.linspace(t_vec[0], t_vec[-1], npoints)
x_orb_vec = np.zeros((npoints,))
y_orb_vec = np.zeros((npoints,))
z_orb_vec = np.zeros((npoints,))
x_Ea_orb_vec = np.zeros((npoints,))
y_Ea_orb_vec = np.zeros((npoints,))
z_Ea_orb_vec = np.zeros((npoints,))
for i in range(0,npoints):
x_orb_vec[i], y_orb_vec[i], z_orb_vec[i] = xyz_frame2(a_mean, e_mean, theta_vec[i],
np.deg2rad(w_mean), np.deg2rad(I_mean), np.deg2rad(W_mean))
xyz_Ea_orb_vec_equat = earth_ephemeris(t_Ea_vec[i])/au
xyz_Ea_orb_vec_eclip = np.matmul(rot_equat_to_eclip, xyz_Ea_orb_vec_equat)
x_Ea_orb_vec[i], y_Ea_orb_vec[i], z_Ea_orb_vec[i] = xyz_Ea_orb_vec_eclip
ax = plt.axes(aspect='auto', projection='3d')
# Sun-centered orbits: Computed orbit and Earth's
ax.scatter3D(0.0, 0.0, 0.0, color='yellow', label='Sun')
ax.scatter3D(x_Ea_vec, y_Ea_vec, z_Ea_vec, color='blue', marker='.', label='Earth orbit')
ax.plot3D(x_Ea_orb_vec, y_Ea_orb_vec, z_Ea_orb_vec, color='blue', linewidth=0.5)
ax.scatter3D(x_vec, y_vec, z_vec, color='red', marker='+', label=bodyname+' orbit')
ax.plot3D(x_orb_vec, y_orb_vec, z_orb_vec, 'red', linewidth=0.5)
plt.legend()
ax.set_xlabel('x (au)')
ax.set_ylabel('y (au)')
ax.set_zlabel('z (au)')
xy_plot_abs_max = np.max((np.amax(np.abs(ax.get_xlim())), np.amax(np.abs(ax.get_ylim()))))
ax.set_xlim(-xy_plot_abs_max, xy_plot_abs_max)
ax.set_ylim(-xy_plot_abs_max, xy_plot_abs_max)
ax.set_zlim(-xy_plot_abs_max, xy_plot_abs_max)
ax.legend(loc='center left', bbox_to_anchor=(1.04,0.5)) #, ncol=3)
ax.set_title('Angles-only orbit determ. (Gauss): '+bodyname)
plt.show()
return a_mean, e_mean, taup_mean, w_mean, I_mean, W_mean, 2.0*np.pi/n_mean,t_vec[-1]
|
6450a4bb5b2fc66b89edf6ec6189f1a446bfc982
| 3,644,280
|
def removeCable(n, edges):
"""
@param n 道路
@param edges 连通情况
"""
fa = initFa(n)
totalW, nodes = 0, []
for x, y, w in edges:
node = Node(x, y, w)
nodes.append(node)
totalW += w
def getW(node):
return node.w
nodes.sort(key=getW)
tmpW = 0
for node in nodes:
if find(fa, node.x) == find(fa, node.y):
continue
fa[find(fa, node.x)] = find(fa, node.y)
tmpW += node.w
return totalW - tmpW
|
4b43cc0ddd1ea89113a95a6771dea97d2b21a0fb
| 3,644,281
|
from read_file import read_selected, narrow
def upload():
"""POST route through which downloading sequence is triggered
:param checked: which pins were selected by user
:returns: log of arrays with pins, files downloaded counts, and notes
"""
DASHRlut = findSNs(compCrawl())
checked = request.get_json()
chosen = narrow(checked, DASHRlut)
log = read_selected(chosen)
return jsonify(log), 200
|
99a00d173574b789e8e6681a0373c2b805a42e39
| 3,644,282
|
def verify_credentials():
"""Verify credentials to gdrive for the current user"""
if 'credentials' not in flask.session:
return flask.redirect(flask.url_for('authorize_app', _external=True))
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if credentials.access_token_expired:
return flask.redirect(flask.url_for('authorize_app', _external=True))
return None
|
96ebe13a7e04f245fa432f0dcbfecdb490367ad9
| 3,644,283
|
def _gaussian_blur(heatmaps, kernel=11):
"""Modulate heatmap distribution with Gaussian.
sigma = 0.3*((kernel_size-1)*0.5-1)+0.8
sigma~=3 if k=17
sigma=2 if k=11;
sigma~=1.5 if k=7;
sigma~=1 if k=3;
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray[N, K, H, W]: Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
height = heatmaps.shape[2]
width = heatmaps.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(heatmaps[i, j])
dr = np.zeros((height + 2 * border, width + 2 * border),
dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[i, j] = dr[border:-border, border:-border].copy()
heatmaps[i, j] *= origin_max / np.max(heatmaps[i, j])
return heatmaps
|
e49edc97eefc2f0de5200e4c8aee794642cb6a1f
| 3,644,284
|
def pose_vec2mat(vec):
"""Converts 6DoF parameters to transformation matrix
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 4, 4]
"""
# batch_size, _ = vec.get_shape().as_list()
batch_size = tf.shape(vec)[0]
translation = tf.slice(vec, [0, 0], [-1, 3])
translation = tf.expand_dims(translation, -1)
rx = tf.slice(vec, [0, 3], [-1, 1])
ry = tf.slice(vec, [0, 4], [-1, 1])
rz = tf.slice(vec, [0, 5], [-1, 1])
rot_mat = euler2mat(rz, ry, rx)
rot_mat = tf.squeeze(rot_mat, axis=[1])
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([rot_mat, translation], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
return transform_mat
|
1ecfb0461bc7ec19c1e730e4499510a890474b33
| 3,644,285
|
import collections
def get_gradients_through_compute_gradients(optimizer, loss, activations):
"""Compute gradients to send to TPU embedding.
Args:
optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer.
Used to call compute_gradients().
loss: a Tensor to call optimizer.compute_gradients() on.
activations: an OrderedDict mapping feature_name to Tensors of activations.
Returns:
An OrderedDict mapping from feature name Strings to Tensors of gradients of
the loss wrt the activations of the features.
"""
activation_list = activations.values()
grads_and_vars = optimizer.compute_gradients(loss, activation_list)
grads = [grad for grad, _ in grads_and_vars]
feature_to_gradient_dict = collections.OrderedDict(
zip(activations.keys(), grads))
return feature_to_gradient_dict
|
2a2ebca1e6024e11f541e3ccaf1fee4acd7ab745
| 3,644,286
|
def distance_to_mesh(mesh, pts, engine="auto", bvh=None):
""" Compute the distance from a set of points to a mesh.
Args:
mesh (:class:`Mesh`): A input mesh.
pts (:class:`numpy.ndarray`): A :math:`N \\times dim` array of query
points.
engine (``string``): BVH engine name. Valid choices are "cgal",
"geogram", "igl" if all dependencies are used. The default is
"auto" where an available engine is automatically picked.
bvh (:class:`BVH`): BVH engine instance (optional)
Returns:
Three values are returned.
* ``squared_distances``: squared distances from each point to mesh.
* ``face_indices`` : the closest face to each point.
* ``closest_points``: the point on mesh that is closest to each
query point.
"""
if not bvh:
bvh = BVH(engine, mesh.dim)
bvh.load_mesh(mesh)
squared_distances, face_indices, closest_points = bvh.lookup(pts)
return squared_distances, face_indices, closest_points
|
c44230d7e9cd18c2d992a85e2fba04a890b55ed8
| 3,644,287
|
from typing import Any
from typing import Tuple
from typing import Dict
def parse_config(settings: Any) -> Tuple[Dict[str, Queue], Dict[str, dict]]:
"""
SAQ configuration parsing.
Args:
settings: The settings (can be pydantic.BaseSettings).
Returns:
Tuple[Dict[str, Queue], Dict[str, dict]]: The SAQ queues and the queue settings.
"""
saq_queues: Dict[str, dict] = getattr(settings, "SAQ_QUEUES", {})
if not isinstance(saq_queues, dict):
raise RuntimeError("SAQ_QUEUES must be a dict, got {}".format(type(saq_queues)))
queue_maps = {}
queue_settings = {}
for q_name, q_param in saq_queues.items():
url = q_param.get("url", None)
if not url:
raise RuntimeError("No url specified for queue {}".format(q_name))
queue = Queue.from_url(url, q_name)
queue_maps[q_name] = queue
queue_settings[q_name] = q_param
return queue_maps, queue_settings
|
d2711efedff319fb181d062593338f32271f39d1
| 3,644,288
|
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Output`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
val = array_ops.zeros(primary.get_shape().as_list(), dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
|
ac940b8d92e4de025a2fc83695adb66a611935ea
| 3,644,289
|
def AdditionalMedicareTax(e00200, MARS,
AMEDT_ec, sey, AMEDT_rt,
FICA_mc_trt, FICA_ss_trt,
ptax_amc, payrolltax):
"""
Computes Additional Medicare Tax (Form 8959) included in payroll taxes.
Notes
-----
Tax Law Parameters:
AMEDT_ec : Additional Medicare Tax earnings exclusion
AMEDT_rt : Additional Medicare Tax rate
FICA_ss_trt : FICA Social Security tax rate
FICA_mc_trt : FICA Medicare tax rate
Taxpayer Charateristics:
e00200 : Wages and salaries
sey : Self-employment income
Returns
-------
ptax_amc : Additional Medicare Tax
payrolltax : payroll tax augmented by Additional Medicare Tax
"""
line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt))
line11 = max(0., AMEDT_ec[MARS - 1] - e00200)
ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) +
max(0., line8 - line11))
payrolltax += ptax_amc
return (ptax_amc, payrolltax)
|
de0e35fbe5c7c09de384e1302cba082149ea5930
| 3,644,290
|
import copy
def append_step_list(step_list, step, value, go_next, mode, tag):
"""from step_list, append the number of times a step needs to be repeated
if runmode or retry is present
:Arguments:
step_list = Ordered list of steps to be executed
step = Current step
value = attempts in runmode/retry
go_next = value of the real next step
mode = runmode or retry
tag = In runmode it is value, in retry it is count
:Return:
step_list = New step list formed by appending the replicated steps
"""
for i in range(0, value):
copy_step = copy.deepcopy(step)
copy_step.find(mode).set(tag, go_next)
copy_step.find(mode).set("attempt", i + 1)
copy_step.find(mode).set(mode+"_val", value)
step_list.append(copy_step)
return step_list
|
b8b5b3614fea0709b484df087ffa3ee2861532c4
| 3,644,291
|
def load_license(request, project_slug):
"""
Reload the license input queryset with the right options for the
access form's current access policy choice. Called via ajax.
"""
user = request.user
project = ActiveProject.objects.filter(slug=project_slug)
if project:
project = project.get()
else:
raise Http404()
form = forms.AccessMetadataForm(instance=project)
form.set_license_queryset(access_policy=int(request.GET['access_policy']))
return render(request, 'project/license_input.html', {'form':form})
|
59fb710cfccfaaf642283e6fb26631f56a39cc1e
| 3,644,292
|
def wt():
"""Return default word tokenizer."""
return WordTokenizer()
|
d9e9a9c3cb99f1c3846ee54b38184d39d67051a7
| 3,644,293
|
import warnings
def epochplot(epochs, *, ax=None, height=None, fc='0.5', ec='0.5',
alpha=0.5, hatch='////', label=None, hc=None,**kwargs):
"""Docstring goes here.
"""
if ax is None:
ax = plt.gca()
ymin, ymax = ax.get_ylim()
if height is None:
height = ymax - ymin
if hc is not None:
try:
hc_before = mpl.rcParams['hatch.color']
mpl.rcParams['hatch.color']=hc
except KeyError:
warnings.warn("Hatch color not supported for matplotlib <2.0")
for ii, (start, stop) in enumerate(zip(epochs.starts, epochs.stops)):
ax.add_patch(
patches.Rectangle(
(start, ymin), # (x,y)
width=stop - start , # width
height=height, # height
hatch=hatch,
facecolor=fc,
edgecolor=ec,
alpha=alpha,
label=label if ii == 0 else "_nolegend_",
**kwargs
)
)
ax.set_xlim([epochs.start, epochs.stop])
if hc is not None:
try:
mpl.rcParams['hatch.color'] = hc_before
except UnboundLocalError:
pass
return ax
|
2e4f993ac48e6f054cd8781f4356b7afed41b369
| 3,644,294
|
def run_dag(
dag_id,
run_id=None,
conf=None,
replace_microseconds=True,
execution_date=None,
):
"""Runs DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run - even if more than one Dag Runs were present or None
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound("Dag id {} not found in DagModel".format(dag_id))
dagbag = DagBag(dag_folder=dag_model.fileloc)
"""
dagbag = DagBag()
dag_run = DagRun()
runs = _run_dag(
dag_id=dag_id,
dag_run=dag_run,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
replace_microseconds=replace_microseconds,
execution_date=execution_date,
)
return runs[0] if runs else None
|
124954d350a09d576b32e80fae4c56d1c0b2c141
| 3,644,295
|
from typing import Any
import hashlib
import glob
import os
import requests
import html
import re
import uuid
import io
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert is_url(url)
assert num_attempts >= 1
# Lookup from cache.
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache_dir is not None:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
return open(cache_files[0], "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError("Google Drive quota exceeded")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache_dir is not None:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
# Return data as file object.
return io.BytesIO(url_data)
|
d2c478c7c9e64423c6d494016e0c46e9ca642984
| 3,644,296
|
def get_function_handle(method, var):
"""
Return a function handle to a given calculation method.
Parameters
----------
method : str
Identifier of the calculation method to return a handle to.
var : dict
Local variables needed in the mu update method.
Returns
-------
f_handle : function
Handle to the calculation method defined in this globals scope.
"""
return globals()['wrap_calculate_using_' + method](var)
|
e9f363908be5e628e2e17a781a3626737a3d3879
| 3,644,297
|
def build_receiver_model(params, ds_meta, utt_len: int, vocab_size: int, pre_conv=None) -> ReceiverModel:
"""
given the size of images from a dataset, and a desired vocab size and utterance length,
creates a ReceiverModel, which will take in images, and utterances, and classify
the images as being consistent with the utterances or not.
"""
p = params
if pre_conv is None:
pre_conv = pre_conv_lib.build_preconv(params=p, ds_meta=ds_meta)
multimodal_classifier = multimodal_classifiers.build_multimodal_classifier(
params=p, pre_conv=pre_conv, ds_meta=ds_meta)
linguistic_encoder = linguistic_encoders.build_linguistic_encoder(
params=p, utt_len=utt_len, vocab_size=vocab_size)
receiver_model = ReceiverModel(
pre_conv=pre_conv,
multimodal_classifier=multimodal_classifier,
linguistic_encoder=linguistic_encoder)
return receiver_model
|
59904d83fa48d390f472b69b5324005d1a28e9c6
| 3,644,298
|
import math
def fermi_fitness(strategy_pair, N, i, utilities, selection_intensity=1):
"""
Return the fermi fitness of a strategy pair in a population with
N total individuals and i individuals of the first type.
"""
F, G = [math.exp(k) for k in fitness(strategy_pair, N, i, utilities)]
return F / (F + G), G / (F + G)
|
fc2631d85ad0fa8ce879ff1fffd6976b1a1e1abf
| 3,644,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.