content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _has_access_to_course(user, access_level, course_key):
"""
Returns True if the given user has access_level (= staff or
instructor) access to the course with the given course_key.
This ensures the user is authenticated and checks if global staff or has
staff / instructor access.
access_level = string, either "staff" or "instructor"
"""
if user is None or (not user.is_authenticated):
debug("Deny: no user or anon user")
return ACCESS_DENIED
if is_masquerading_as_student(user, course_key):
return ACCESS_DENIED
global_staff, staff_access, instructor_access = administrative_accesses_to_course_for_user(user, course_key)
if global_staff:
debug("Allow: user.is_staff")
return ACCESS_GRANTED
if access_level not in ('staff', 'instructor'):
log.debug("Error in access._has_access_to_course access_level=%s unknown", access_level)
debug("Deny: unknown access level")
return ACCESS_DENIED
if staff_access and access_level == 'staff':
debug("Allow: user has course staff access")
return ACCESS_GRANTED
if instructor_access and access_level in ('staff', 'instructor'):
debug("Allow: user has course instructor access")
return ACCESS_GRANTED
debug("Deny: user did not have correct access")
return ACCESS_DENIED
|
c6e09f58a849e9ffc0335441144c9246326f2d3c
| 3,647,500
|
import tempfile
import os
import tarfile
import pathlib
def download_and_mosaic_through_ftps(file_list, tmp_path, cds_url, cds_path,
cds_sso, cds_pw, bbox, crs, geoTransform):
""" Download Copernicus DEM tiles and create mosaic according to satellite
imagery tiling scheme
file_list : list with strings
list of DEM tile filenames
tmp_path : string
work path where to download and untar DEM tiles
cds_url : string
Copernicus Data Service url
cds_path : string
data directory of interest
cds_sso : string
single sign-in
cds_pw : string
pass word
bbox : list
bound box formated as (x_min, y_min, x_max, y_max)
crs : string
coordinate reference string
geoTransform : tuple, size=(6,1)
affine transformation coefficients of the DEM tile
Returns
-------
dem_clip : DataArray object
retiled DEM
"""
with tempfile.TemporaryDirectory(dir=tmp_path) as tmpdir:
for file_name in file_list:
get_file_from_ftps(cds_url, cds_sso, cds_pw,
cds_path, file_name, tmpdir)
tar_tiles_filenames = [f for f in os.listdir(tmpdir) if f.endswith('.tar')]
for tar_fname in tar_tiles_filenames:
tar_file = tarfile.open(os.path.join(tmpdir, tar_fname), mode="r|")
tar_file.extractall(members=copDEM_files(tar_file),
path=tmpdir)
tar_file.close()
dem_tiles_filename = pathlib.Path(tmpdir).glob("**/*_DEM.tif")
dem_clip = mosaic_tiles(dem_tiles_filename, bbox, crs, geoTransform)
# sometimes out of bound tiles are still present,
# hence rerun a clip to be sure
# dem_clip = dem_clip.rio.clip_box(*bbox)
return dem_clip
|
dbe1a34f2ac866dfcf9147228b04937bc8bbb4db
| 3,647,501
|
def chi2(observed, expected):
"""
Return the chi2 sum of the provided observed and expected values.
:param observed: list of floats.
:param expected: list of floats.
:return: chi2 (float).
"""
if 0 in expected:
return 0.0
return sum((_o - _e) ** 2 / _e ** 2 for _o, _e in zip(observed, expected))
|
6050e98a823671de4a518d584a6e39bc519fa610
| 3,647,502
|
import math
def range_bearing(p1: LatLon, p2: LatLon, R: float = NM) -> tuple[float, Angle]:
"""Rhumb-line course from :py:data:`p1` to :py:data:`p2`.
See :ref:`calc.range_bearing`.
This is the equirectangular approximation.
Without even the minimal corrections for non-spherical Earth.
:param p1: a :py:class:`LatLon` starting point
:param p2: a :py:class:`LatLon` ending point
:param R: radius of the earth in appropriate units;
default is nautical miles.
Values include :py:data:`KM` for kilometers,
:py:data:`MI` for statute miles and :py:data:`NM` for nautical miles.
:returns: 2-tuple of range and bearing from p1 to p2.
"""
d_NS = R * (p2.lat.radians - p1.lat.radians)
d_EW = (
R
* math.cos((p2.lat.radians + p1.lat.radians) / 2)
* (p2.lon.radians - p1.lon.radians)
)
d = math.hypot(d_NS, d_EW)
tc = math.atan2(d_EW, d_NS) % (2 * math.pi)
theta = Angle(tc)
return d, theta
|
68860efbea6d8f1b36ff9e7b91a2a3779a57e611
| 3,647,503
|
import json
import logging
def cf_model_to_life(first_best, update_prod=False, pr_cache=False):
"""
We simulate the response of several variables to a shock to z and x.
We fixed the cross-section distribution of (X,Z) and set rho to rho_start
We apply a permanent shock to either X or Z, and fix the employment relationship, as well as (X,Z)
We then simulate forward the Rho, and the wage, and report several different variable of interest.
"""
nt = 20*4
np.random.seed(JMP_CONF['seeds']['model_to_life'])
# we load the model
model = wd.FullModel.load("res_main_model.pkl")
p = model.p
p.tax_expost_tau = p.tax_tau
p.tax_expost_lambda = p.tax_lambda
# we simulate from the model to get a cross-section
sim = wd.Simulator(model, p)
sdata = sim.simulate().get_sdata()
# we construct the different starting values
tm = sdata['t'].max()
d0 = sdata.query('e==1 & t==@tm')[['x','z','h','r']]
# we start at target rho
R0 = model.target_rho[ (d0['z'],d0['x']) ]
# starting with X and Z shocks
def get_z_pos(pr):
Z1_pos = np.minimum(sdata['z'].max(), d0['z'] + 1)
Z1_pos = np.where(np.random.uniform(size=len(Z1_pos)) > pr, Z1_pos, d0['z'] )
return(Z1_pos)
def get_z_neg(pr):
Z1_neg = np.maximum(0, d0['z'] - 1)
Z1_neg = np.where(np.random.uniform(size=len(Z1_neg)) > pr, Z1_neg, d0['z'] )
return(Z1_neg)
def get_x_pos(pr):
Xtrans_pos = np.array([1,2,3,4,4,6,7,8,9,9,11,12,13,14,14],int)
X1_pos = Xtrans_pos[d0['x']]
X1_pos = np.where(np.random.uniform(size=len(X1_pos)) > pr, X1_pos, d0['x'] )
return(X1_pos)
def get_x_neg(pr):
Xtrans_neg = np.array([0,0,1,2,3, 5,5,6,7,8, 10,10,11,12,13],int)
X1_neg = Xtrans_neg[d0['x']]
X1_neg = np.where( np.random.uniform(size=len(X1_neg)) > pr, X1_neg, d0['x'] )
return(X1_neg)
# simulate a control group
var_name = {'x':r'worker productivity $x$',
'w':r'log earnings $\log w$',
'W1':'worker promised value $V$',
'lceq':'worker cons. eq.',
'Pi':r'firm present value $J(x,z,V)$',
'y':r'log match output $\log f(x,z)$',
'pr_j2j':'J2J probability',
'pr_e2u':'E2U probability',
'target_wage':r'log of target wage $\log w^*(x,z)$',
'vs':'worker search decision $v_1$',
'effort':'effort cost $c(e)$'}
var_list = { k:'mean' for k in var_name.keys() }
def sim_agg(dd):
# compute consumption equivalent for W1
dd['lceq'] = model.pref.log_consumption_eq(dd['W1'])
dd['lpeq'] = model.pref.log_profit_eq(dd['W1'])
return(dd.groupby('t').agg(var_list))
if first_best:
model_fb = wd.FullModel.load("res_main_model_fb.pkl")
for iz in range(model_fb.p.num_z):
for ix in range(model_fb.p.num_x):
model_fb.rho_star[iz,:,ix] = model_fb.rho_grid
sim.model = model_fb
# let's find rho_star for the first best model
I=range(p.num_v)[::-1]
R0_fb = np.zeros((p.num_z,p.num_x))
for ix in range(p.num_x):
for iz in range(p.num_z):
R0_fb[iz,ix] = np.interp( 0.0,
model_fb.Vf_J[iz,I,ix],
model_fb.rho_grid[I])
R0 = R0_fb[ (d0['z'],d0['x']) ]
sdata0 = sim_agg(sim.simulate_force_ee(d0['x'],d0['z'],d0['h'],R0, nt, update_x=False, update_z=False, pb=True))
# we run for a grid of probabilities
if pr_cache:
with open("res_cf_pr_fb{}.json".format(first_best)) as f:
all = json.load(f)
else:
all = []
vec = np.linspace(0,1,10)
for i in range(len(vec)):
logging.info("simulating {}/{}".format(i, len(vec)))
res = {}
res['pr'] = vec[i]
pr = vec[i]
res['x_pos'] = sim.simulate_force_ee(
get_x_pos(pr), d0['z'],d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
res['x_neg'] = sim.simulate_force_ee(
get_x_neg(pr), d0['z'],d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
res['z_pos'] = sim.simulate_force_ee(
d0['x'], get_z_pos(pr), d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
res['z_neg'] = sim.simulate_force_ee(
d0['x'], get_z_neg(pr), d0['h'],R0, nt,
update_x=False, update_z=False, pb=True)['y'].mean()
all.append(res)
# save to file!
# with open("res_cf_pr_fb{}.json".format(first_best), 'w') as fp:
# json.dump(all, fp)
df = pd.DataFrame(all)
df = df.sort_values(['x_pos'])
pr_x_pos = np.interp( sdata0['y'].mean() + 0.1, df['x_pos'] , df['pr'] )
df = df.sort_values(['x_neg'])
pr_x_neg = np.interp( sdata0['y'].mean() - 0.1, df['x_neg'] , df['pr'] )
df = df.sort_values(['z_pos'])
pr_z_pos = np.interp( sdata0['y'].mean() + 0.1, df['z_pos'] , df['pr'] )
df = df.sort_values(['z_neg'])
pr_z_neg = np.interp( sdata0['y'].mean() - 0.1, df['z_neg'] , df['pr'] )
logging.info(" chosen probability x pos:{}".format(pr_x_pos))
logging.info(" chosen probability x neg:{}".format(pr_x_neg))
logging.info(" chosen probability z pos:{}".format(pr_z_pos))
logging.info(" chosen probability z neg:{}".format(pr_z_neg))
sdata0 = sim_agg(sim.simulate_force_ee(d0['x'],d0['z'],d0['h'],R0, nt, update_x=update_prod, update_z=update_prod, pb=True))
# finaly we simulate at the probabilities that we have chosen.
sdata_x_pos = sim_agg(sim.simulate_force_ee(
get_x_pos(pr_x_pos),d0['z'],d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
sdata_x_neg = sim_agg(sim.simulate_force_ee(
get_x_neg(pr_x_neg),d0['z'],d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
sdata_z_pos = sim_agg(sim.simulate_force_ee(
d0['x'],get_z_pos(pr_z_pos),d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
sdata_z_neg = sim_agg(sim.simulate_force_ee(
d0['x'],get_z_neg(pr_z_neg),d0['h'],R0, nt,
update_x=update_prod, update_z=update_prod,pb=True))
# preparing the lead and lag plots
pp0 = lambda v : np.concatenate([ np.zeros(5), v ])
ppt = lambda v : np.concatenate([ [-4,-3,-2,-1,0], v ])
to_plot = {'w','pr_j2j','pr_e2u','vs','effort','Pi','y','W1','target_wage'}
to_plot = {k:v for k,v in var_name.items() if k in to_plot}
# Z shock response
plt.clf()
# plt.rcParams["figure.figsize"]=12,12
plt.figure(figsize=(12, 12), dpi=80)
for i,name in enumerate(to_plot.keys()):
plt.subplot(3, 3, i+1)
plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_pos[name] - sdata0[name]) )
plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_neg[name] - sdata0[name]), linestyle='--')
#plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_pos_fb[name] - sdata0[name]), linestyle='dashdot')
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) )
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) )
plt.axhline(0,linestyle=':',color="black")
plt.xlabel(var_name[name])
#plt.xlabel('years')
plt.xticks(range(0,21,5))
plt.ticklabel_format(axis="y", style="sci", scilimits=(-3,5))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
if first_best:
plt.savefig('../figures/figurew6-ir-zshock-fb.pdf', bbox_inches='tight')
else:
plt.savefig('../figures/figure4-ir-zshock.pdf', bbox_inches='tight')
plt.clf()
# plt.rcParams["figure.figsize"]=12,12
plt.figure(figsize=(12, 12), dpi=80)
for i,name in enumerate(to_plot.keys()):
plt.subplot(3, 3, i+1)
plt.plot( ppt (sdata0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) )
plt.plot( ppt (sdata0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) ,ls='--')
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) )
#plt.plot( ppt (dd0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) )
plt.axhline(0,linestyle=':',color="black")
plt.xlabel(var_name[name])
plt.xticks(range(0,21,5))
plt.ticklabel_format(axis="y", style="sci", scilimits=(-3,5))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
if first_best:
plt.savefig('../figures/figurew5-ir-xshock-fb.pdf', bbox_inches='tight')
else:
plt.savefig('../figures/figure3-ir-xshock.pdf', bbox_inches='tight')
|
131fd2a0edb202adacafd9a6416fecb7a1f77dc7
| 3,647,504
|
def kde_interpolation(poi, bw='scott', grid=None, resolution=1, area=None, return_contour_geojson=False):
"""Applies kernel density estimation to a set points-of-interest
measuring the density estimation on a grid of places (arbitrary points
regularly spaced).
Parameters
----------
poi : GeoDataFrame.
Corresponds to input data.
bw : 'scott', 'silverman' or float.
The bandwidth for kernel density estimation. Check `scipy docs`_ about their bw parameter of gaussian_kde.
grid : GeoDataFrame or None, default is None.
If a grid is not given, then it is provided according to the area parameter
and resolution.
resolution : float, default is 1.
Space in kilometers between the arbitrary points of resulting grid.
area : GeoDataFrame or None, default is None.
If area is given, grid will be bounded accordingly with the GeoDataFrame passed.
return_contour_geojson : bool, default is False.
If True, it returns the result of the kde as a contourplot in the geojson format.
Returns
-------
GeoDataFrame with a grid of points regularly spaced with the respective
density values for the input points-of-interest given.
Example
-------
>>> import geohunter as gh
>>> poi = gh.osm.Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)',
amenity=['hospital' , 'police'], natural='*')
>>> neighborhood = gh.osm.Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)',
largest_geom=True,
name='Ponta Negra')
>>> result = kde_interpolation(poi, bw='scott', area=neighborhood, resolution=0.5)
>>> ax = area.plot(edgecolor='black', color='white')
>>> result.plot(column='density', ax=ax)
.. _scipy docs:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html
"""
lonv, latv = None, None
if grid is None and area is None:
raise ValueError('grid or area must be given.')
if grid is None and isinstance(area, gpd.GeoDataFrame):
grid, lonv, latv = make_gridpoints(area, resolution, return_coords=True)
assert isinstance(poi, gpd.GeoDataFrame)
kernel = stats.gaussian_kde(np.vstack([poi.centroid.x, poi.centroid.y]),
bw_method=bw)
grid_ = grid[:]
grid_['density'] = kernel(grid[['lon', 'lat']].values.T)
if return_contour_geojson:
assert lonv is not None and latv is not None, \
"grid should not be passed for this operation. Try to pass area and pick a resolution level."
return contour_geojson(grid_['density'], lonv, latv,
cmin=grid_['density'].min(),
cmax=grid_['density'].max())
else:
return grid_
|
f0473e459e42075a3ad4070325aecb229b6b2d89
| 3,647,505
|
def nums2tcrs(nums):
"""Converts a list containing lists of numbers to amino acid sequences. Each number is considered to be an index of the alphabet."""
tcrs_letter=[]
n=len(nums)
for i in range(n):
num=nums[i]
tcr=''
for j in range(len(num)):
tcr+=alphabet[num[j]]
tcrs_letter.append(tcr)
return tcrs_letter
|
3f366e0bd593b799c7e88c84d583e7c6aeee066f
| 3,647,506
|
def extract_columns(data):
""" EXTRACTS COLUMNS TO USE IN `DictWriter()` """
columns = []
column_headers = data[0]
for key in column_headers:
columns.append(key)
return columns
|
6df143107612d311ab3c8870b9eccd3528ac3802
| 3,647,507
|
import numpy
def cylindric_grid(dr, dz, origin_z=None, layer=False, material="dfalt"):
"""
Generate a cylindric mesh as a radial XZ structured grid.
Parameters
----------
dr : array_like
Grid spacing along X axis.
dz : array_like
Grid spacing along Z axis.
origin_z : scalar, optional, default None
Depth of origin point.
layer : bool, optional, default False
If `True`, mesh will be generated by layers.
material : str, optional, default 'dfalt'
Default material name.
Returns
-------
toughio.Mesh
Output cylindric mesh.
"""
if not isinstance(dr, (list, tuple, numpy.ndarray)):
raise TypeError()
if not isinstance(dz, (list, tuple, numpy.ndarray)):
raise TypeError()
if not (origin_z is None or isinstance(origin_z, (int, float))):
raise TypeError()
if not isinstance(material, str):
raise TypeError()
dr = numpy.asarray(dr)
dz = numpy.asarray(dz)
if not (dr > 0.0).all():
raise ValueError()
if not (dz > 0.0).all():
raise ValueError()
origin_z = origin_z if origin_z is not None else -dz.sum()
mesh = structured_grid(
dr,
[1.0],
dz,
origin=[0.0, -0.5, origin_z],
layer=layer,
material=material,
)
return CylindricMesh(
dr,
dz,
layer,
points=mesh.points,
cells=mesh.cells,
point_data=mesh.point_data,
cell_data=mesh.cell_data,
field_data=mesh.field_data,
)
|
bf710bc212068ec76eb19edce3e8493689535697
| 3,647,508
|
import urllib
def get_clip_preview_feedback(program, event, classifier, start_time, audio_track, reviewer):
"""
Gets the feedback provided by a user for a Segment's clip
Returns:
Feedback if present. Empty Dictionary of no feedback exists.
"""
event = urllib.parse.unquote(event)
program = urllib.parse.unquote(program)
classifier = urllib.parse.unquote(classifier)
start_time = Decimal(urllib.parse.unquote(start_time))
tracknumber = urllib.parse.unquote(audio_track)
clip_preview_table = ddb_resource.Table(CLIP_PREVIEW_FEEDBACK_TABLE_NAME)
response = clip_preview_table.query(
KeyConditionExpression=Key("PK").eq(
f"{program}#{event}#{classifier}#{str(start_time)}#{str(tracknumber)}#{reviewer}")
)
if "Items" not in response or len(response["Items"]) == 0:
return {}
return response["Items"][0]
|
578952869606951057b8b8797698c320a02d1d00
| 3,647,509
|
import ast
import numpy
def interp(specStr, t):
"""Return the current value of t using linear interpolation.
<specStr> is a string containing a list of pairs e.g. '[[0,20],[30,65],[60,50],[90,75]]'
The first element of each pair is DAYS. The second is a NUMBER.
<t> is time in seconds"""
specList = ast.literal_eval(specStr)
X = [i[0] for i in specList]
Y = [i[1] for i in specList]
day = t/(60*60*24.0)
return numpy.interp(day,X,Y)
|
bc60affe122f2d17044e01a01509231e71eda47d
| 3,647,510
|
from bs4 import BeautifulSoup
def time_is(location):
"""
Retrieves the time in a location by parsing the time element in the html from Time.is .
:param location: str location of the place you want to find time (works for small towns as well).
:return: time str or None on failure.
"""
if BeautifulSoup:
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2704.106 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Referrer': 'http://time.is/',
}
post_url = 'http://time.is/' + str(location)
time_data = util.web.http_get(post_url, header=header)
time_html = time_data['content']
soup = BeautifulSoup(time_html, "html.parser")
time = ''
try:
for hit in soup.findAll(attrs={'id': 'twd'}):
time = hit.contents[0].strip()
except KeyError:
pass
return time
else:
return None
|
e8f6675199f070fcad7eead98187683b48417757
| 3,647,511
|
import logging
def _generate_template_context(arguments: PackagingResourceArguments,
manifest: OdahuProjectManifest,
output_folder: str) -> DockerTemplateContext:
"""
Generate Docker packager context for templates
"""
logging.info('Building context for template')
return DockerTemplateContext(
model_name=manifest.model.name,
model_version=manifest.model.version,
odahuflow_version=manifest.odahuflowVersion,
timeout=arguments.timeout,
host=arguments.host,
port=arguments.port,
workers=arguments.workers,
threads=arguments.threads,
pythonpath=output_folder,
wsgi_handler=f'{HANDLER_MODULE}:{HANDLER_APP}',
model_location=ODAHU_SUB_PATH_NAME,
entrypoint_target=ENTRYPOINT_TEMPLATE,
handler_file=f'{HANDLER_MODULE}.py',
base_image=arguments.dockerfileBaseImage,
conda_file_name=CONDA_FILE_NAME,
conda_server_file_name=CONDA_SERVER_FILE_NAME,
entrypoint_docker=ENTRYPOINT_TEMPLATE
)
|
e973a44949d6d2df8bfcbf0be42b8214d1c95352
| 3,647,512
|
def get_records(fname):
"""
Read the records of an IRAF database file into a python list
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split('begin')[1:]
records = [Record(r) for r in recs]
return records
|
a1eb4500afcd1379db1efe8674c1ff256f2861b5
| 3,647,513
|
from typing import List
def get_all_clips_matching_filter(fid: int) -> List[Clip]:
"""
gets all te clips that is part of the project and matches the filter
:param fid: The filter the clips should match
:return: A list of all clips that is part of the project and matches the filter
"""
filter = get_filter_by_id(fid)
assert filter is not None
clips = get_all_clips_in_project(filter.project.id)
res = []
for clip in clips:
if clip.clip_match_filter(filter):
res.append(clip)
return res
|
eb69bf40ad397e970d85b425d4c2c0b25ee345fc
| 3,647,514
|
def get_gushim():
"""
get gush_id metadata
"""
detailed = request.args.get('detailed', '') == 'true'
gushim = helpers._get_gushim(fields={'gush_id': True, 'last_checked_at': True, '_id': False})
if detailed:
# Flatten list of gushim into a dict
g_flat = dict((g['gush_id'], {"gush_id": g['gush_id'],
"last_checked_at": g['last_checked_at'],
"plan_stats": {}}) for g in gushim)
# Get plan statistics from DB
stats = helpers._get_plan_statistics()
# Merge stats into gushim dict
for g in stats['result']:
try:
gush_id = g['_id']['gush_id']
status = g['_id']['status']
g_flat[gush_id]['plan_stats'][status] = g['count']
except KeyError, e:
# Gush has plans but is missing from list of gushim?
app.logger.warn("Gush #%d has plans but is not listed in the Gushim list", gush_id)
app.log_exception(e)
# De-flatten our dict
gushim = g_flat.values()
return helpers._create_response_json(gushim)
|
93a941090f515bb726e305856ec6e0ea644b5a34
| 3,647,515
|
def dump_source(buf, id):
"""Dump BASIC source."""
if id == ID_SP5030:
line_end_code = 0x0d
src_end_code = 0x0000
kind = "SP-5030"
elif id == ID_SBASIC:
line_end_code = 0x00
src_end_code = 0x0000
kind = "S-BASIC"
elif id == ID_HUBASIC:
line_end_code = 0x00
src_end_code = 0x0000
kind = "Hu-BASIC"
else:
return 1
if not found_word_endcode(buf, src_end_code):
print("Not found %s end code (0x%04X)" % (kind, src_end_code))
return 1
p = 0
while True:
line_length = get_word(buf, p)
if line_length == src_end_code:
# Found Source end code
break
# get 1 line data
line = buf[p:p + line_length]
if get_last_byte(line) != line_end_code:
print("Not found %s line end code (0x%02X)" % (kind, line_end_code))
return 1
line_number = get_word(line, 2)
if id == ID_SP5030:
lstr = get_line_sp5030(line, 4, line_end_code)
elif id == ID_SBASIC:
lstr = get_line_sbasic(line, 4, line_end_code)
elif id == ID_HUBASIC:
lstr = get_line_hubasic(line, 4, line_end_code)
if jp_flag:
# print("%d %s" % (line_number, lstr.encode('utf-8')))
print("%d %s" % (line_number, lstr.encode('cp932')))
else:
print("%d %s" % (line_number, lstr))
p += line_length
return 0
|
598fe1d9dd4be6f1c651be4f81bc9f8290496c3a
| 3,647,516
|
def dense_layers(sequences, training, regularizer, initializer,
num_layers=3, activation=tf.nn.relu):
"""
Create a chain of dense (fully-connected) neural network layers.
Args:
sequences (tf.Tensor): Input sequences.
training (bool): Whether the mode is training or not.
regularizer: TF weight reqularizer.
initializer: TF weight initializer.
num_layers (int):
activation (function): TF activation function.
Returns:
tf.Tensor: Output tensor.
"""
with tf.variable_scope('dense'):
output = sequences
for _ in range(num_layers):
output = tf.layers.dense(output, FLAGS.num_units_dense,
activation=activation,
kernel_initializer=initializer,
kernel_regularizer=regularizer)
output = tf.minimum(output, FLAGS.relu_cutoff)
output = tf.layers.dropout(output, rate=FLAGS.dense_dropout_rate, training=training)
# output = [batch_size, time, num_units_dense]
return output
|
72cebd7eb6487555c3efe8e6c14954dc2886e0c3
| 3,647,517
|
def apply_cst(im, cst):
""" Applies CST matrix to image.
Args:
im: input ndarray image ((height * width) x channel).
cst: a 3x3 CST matrix.
Returns:
transformed image.
"""
result = im
for c in range(3):
result[:, :, c] = (cst[c, 0] * im[:, :, 0] + cst[c, 1] * im[:, :, 1] +
cst[c, 2] * im[:, :, 2])
return result
|
7c63d07413bad5fcebf2dfe5f83f205d16280957
| 3,647,518
|
from typing import Tuple
import torch
def show_binary_classification_accuracy(best_m: nn.Module, local_loader: data_utils.DataLoader, chatty = False) -> Tuple:
"""
Given the model and dataloader, calculate the classification accuracy.
Returns true_positives, true_negatives, false_positives, false_negatives, roc_auc, pr for use elsewhere.
:param best_m:
:param local_loader:
:return:
"""
correct = 0; total = 0
false_positives = 0
false_negatives = 0
true_positives = 0
true_negatives = 0
pred_list = []
lab_list = []
with torch.no_grad():
for data, labels in local_loader:
outputs = best_m(data)
predicted = torch.argmax(outputs, dim=1)
#print(predicted)
#print(labels.shape[0])
total += labels.shape[0]
#print(labels.shape[0])
#print(labels)
correct += int((predicted == labels).sum())
pred_list.extend(predicted.detach().flatten().numpy())
lab_list.extend(labels.detach().flatten().numpy())
#Calculate false positives, etc.
for kt in zip(predicted, labels):
if kt[0] == kt[1] ==1:
true_positives+=1
elif kt[0] == kt[1] == 0:
true_negatives+=1
elif kt[0] == 1 and kt[1] == 0:
false_negatives+=1
elif kt[0] == 0 and kt[1] == 1:
false_positives+=1
accuracy = correct/total
print("Accuracy: %f" % (accuracy))
auc = roc_auc_score(pred_list, lab_list)
pr = precision_recall_curve(pred_list, lab_list)
if chatty:
print("True Positives", true_positives, " False Positives", false_positives, f" at {false_positives/(total-correct):.2f}")
print("True Negatives", true_negatives, " False Negatives", false_negatives, f" at {false_negatives/(total-correct):.2f}")
return accuracy, true_positives, true_negatives, false_positives, false_negatives, auc, pr, pred_list, lab_list
|
7743c51a8f64c46c625ccc3b8737b9553f79334f
| 3,647,519
|
import argparse
def validate_accelerator_count(accel: Accelerator, count: int) -> int:
"""Raises an error if the count isn't valid for the supplied accelerator, else
returns the count.
"""
is_gpu = accel in GPU
ucase = accelerator_name(is_gpu)
valid_counts = accelerator_counts(accel)
if not _AccelCountMT[accel].get(count):
raise argparse.ArgumentTypeError(
with_advice_suffix(
accel, "{} {}s of type {} aren't available \
for any machine type. Try one of the following counts: {}\n".format(
count, ucase, accel.name, valid_counts)))
return count
|
b3f422710827eaa5cc95e3f896aefed353f8de1d
| 3,647,520
|
from typing import Tuple
import os
from typing import Dict
from typing import Any
import torch
def objective(trial: optuna.trial.Trial, log_dir: str, device, backbone) -> Tuple[float, int, float]:
"""Optuna objective.
Args:
trial
Returns:
float: score1(e.g. accuracy)
int: score2(e.g. params)
"""
hyperparams = search_hyperparam(trial)
if backbone:
model = Efficientnet_b0()
model_path = os.path.join(log_dir, "best.pt") # result model will be saved in this path
print(f"Model save path: {model_path}")
model.to(device)
else:
model_config: Dict[str, Any] = {}
model_config["input_channel"] = 3
img_size = hyperparams["IMG_SIZE"]
model_config["INPUT_SIZE"] = [img_size, img_size]
model_config["depth_multiple"] = trial.suggest_categorical(
"depth_multiple", [0.25, 0.5, 0.75, 1.0]
)
model_config["width_multiple"] = trial.suggest_categorical(
"width_multiple", [0.25, 0.5, 0.75, 1.0]
)
model_config["backbone"], module_info = search_model(trial)
model = Model(model_config, verbose=True)
model_path = os.path.join(log_dir, "best.pt") # result model will be saved in this path
print(f"Model save path: {model_path}")
model.to(device)
model.model.to(device)
# check ./data_configs/data.yaml for config information
data_config: Dict[str, Any] = {}
data_config["DATA_PATH"] = DATA_PATH
data_config["DATASET"] = "TACO"
data_config["IMG_SIZE"] = hyperparams["IMG_SIZE"]
data_config["AUG_TRAIN"] = "randaugment_train"
data_config["AUG_TEST"] = "simple_augment_test"
data_config["AUG_TRAIN_PARAMS"] = {
"n_select": hyperparams["n_select"],
}
data_config["AUG_TEST_PARAMS"] = None
data_config["BATCH_SIZE"] = hyperparams["BATCH_SIZE"]
data_config["EPOCHS"] = hyperparams["EPOCHS"]
data_config["VAL_RATIO"] = 0.2
data_config["INIT_LR"] = hyperparams["INIT_LR"]
data_config["FP16"] = True
data_config["SUBSET_SAMPLING_RATIO"] = 0.5 # 0 means full data
data_config["LOSS"] = 'CrossEntropy_Weight'
trial.set_user_attr('hyperparams', hyperparams)
if backbone:
mean_time = check_runtime(
model,
[3]+[224, 224],
device,
)
else:
trial.set_user_attr('model_config', model_config)
mean_time = check_runtime(
model.model,
[model_config["input_channel"]] + model_config["INPUT_SIZE"],
device,
)
trial.set_user_attr('data_config', data_config)
for key, value in trial.params.items():
print(f" {key}:{value}")
model_info(model, verbose=True)
train_loader, val_loader, test_loader = create_dataloader(data_config)
weights = get_weights(data_config["DATA_PATH"])
criterion = get_loss(data_config["LOSS"], data_config["FP16"], weight=weights, device=device)
if hyperparams["OPTIMIZER"] == "SGD":
optimizer = torch.optim.SGD(model.parameters(), lr=hyperparams["INIT_LR"])
else:
optimizer = getattr(optim, hyperparams["OPTIMIZER"])(model.parameters(), lr=hyperparams["INIT_LR"])
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=hyperparams["INIT_LR"],
steps_per_epoch=len(train_loader),
epochs=hyperparams["EPOCHS"],
pct_start=0.05,
cycle_momentum=True if hyperparams["OPTIMIZER"] == "SGD" else False
)
# Amp loss scaler
scaler = (
torch.cuda.amp.GradScaler() if data_config["FP16"] and device != torch.device("cpu") else None
)
trainer = TorchTrainer(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
scaler=scaler,
model_path=model_path,
device=device,
verbose=1,
)
trainer.train(train_loader, hyperparams["EPOCHS"], val_dataloader=val_loader)
loss, f1_score, acc_percent = trainer.test(model, test_dataloader=val_loader)
params_nums = count_model_params(model)
model_info(model, verbose=True)
print('='*50)
return f1_score, params_nums, mean_time
|
59a8273634eec116d7ff16c0ac6b7dfdcbe24cf8
| 3,647,521
|
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
|
2ad0fcc50721fe30e4d48f691420393748bf9df3
| 3,647,522
|
def feedback(olsys,H=1):
"""Calculate the closed-loop transfer function
olsys
cltf = --------------
1+H*olsys
where olsys is the transfer function of the open loop
system (Gc*Gp) and H is the transfer function in the feedback
loop (H=1 for unity feedback)."""
clsys=olsys/(1.0+H*olsys)
return clsys
|
ca78d05196068746a225038c0f401faad24c5f65
| 3,647,523
|
from typing import List
def get_sigma_grid(
init_sigma: float = 1.0, factor: int = 2, n_grid_points: int = 20
) -> List[float]:
"""Get a standard parameter grid for the cross validation strategy.
Parameters
----------
init_sigma : float, default=1.0
The initial sigma to use to populate the grid points.
factor : int, default=2
The log scale factor to use for both the beginning and end of the grid.
n_grid_points : int, default=20
The number of grid points to use.
Returns
-------
param_grid : List[float]
The parameter grid as per the specifications
Example
-------
>> param_grid = get_param_grid()
>> param_grid = get_param_grid(10.0, 3, 1_000)
"""
# create bounds for search space (logscale)
init_space = 10 ** (-factor)
end_space = 10 ** (factor)
# create param grid
param_grid = np.logspace(
np.log10(init_sigma * init_space),
np.log10(init_sigma * end_space),
n_grid_points,
)
return param_grid
|
33e49127bb2e116b8c209446ad1f614c44e5e128
| 3,647,524
|
def parse_csv(value_column):
"""Parses a CSV file based on the provided column types."""
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(ALL_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
classes = tf.cast(label, tf.int32) - 1
return features, classes
|
11d0f0508fd369ab50df45f71340d8336da676c0
| 3,647,525
|
def on_over_limit():
""" This is called when the rate limit is reached """
return jsonify(status='error', error=[_('Whoa, calm down and wait a bit before posting again.')])
|
f954abb1de5746ca49bbdff02894c1fe75fed106
| 3,647,526
|
def comment(strng,indent=''):
"""return an input string, commented out"""
template = indent + '# %s'
lines = [template % s for s in strng.splitlines(True)]
return ''.join(lines)
|
42386b7ed8de9127d7224481a5f5315d39b6ae97
| 3,647,527
|
def square(number):
"""
Calculates how many grains were on each square
:param number:
:return:
"""
if number <= 0 or not number or number > 64:
raise ValueError(ERROR)
return 2**(number - 1)
|
dd8d6f9dc95632effaf7bc8a705ffddd1de6c825
| 3,647,528
|
import os
from datetime import datetime
def get_doc(name=None, filename=None, url=None, parsed=True, start=0, end=None,
localfile=None, params=None, cookies=None, **kwargs):
"""
Retrieve an IDE file from either a file or URL.
Note: `name`, `filename`, and `url` are mutually exclusive arguments.
One and only one must be specified. Attempting to supply more than one
will generate an error.
Example usage::
get_doc("my_recording.ide")
get_doc("https://example.com/remote_recording.ide")
get_doc(filename="my_recording.ide")
get_doc(url="https://example.com/remote_recording.ide")
get_doc(filename="my_recording.ide", start="1:23")
The `start` and `end` times, if used, may be specified in several
ways:
* `int`/`float` (Microseconds from the recording start)
* `str` (formatted as a time from the recording start, e.g., `MM:SS`,
`HH:MM:SS`, `DDd HH:MM:SS`). More examples:
* ``":01"`` or ``":1"`` or ``"1s"`` (1 second)
* ``"22:11"`` (22 minutes, 11 seconds)
* ``"3:22:11"`` (3 hours, 22 minutes, 11 seconds)
* ``"1d 3:22:11"`` (1 day, 3 hours, 22 minutes, 11 seconds)
* `datetime.timedelta` or `pandas.Timedelta` (time from the
recording start)
* `datetime.datetime` (an explicit UTC time)
:param name: The name or URL of the IDE. The method of fetching it will
be automatically chosen based on how it is formatted.
:param filename: The name of an IDE file. Supplying a name this way will
force it to be read from a file, avoiding the possibility of
accidentally trying to retrieve it via URL.
:param url: The URL of an IDE file. Supplying a name this way will force
it to be read from a URL, avoiding the possibility of accidentally
trying to retrieve it from a local file.
:param parsed: If `True` (default), the IDE will be fully parsed after it
is fetched. If `False`, only the file metadata will be initially
loaded, and a call to `idelib.importer.readData()`. This can save
time.
:param start: The starting time. Defaults to the start of the
recording. Only applicable if `parsed` is `True`.
:param end: The ending time. Defaults to the end of the recording. Only
applicable if `parsed` is `True`.
:param localfile: The name of the file to which to write data recieved
from a URL. If none is supplied, a temporary file will be used. Only
applicable when opening a URL.
:param params: Additional URL request parameters. Only applicable when
opening a URL.
:param cookies: Additional browser cookies for use in the URL request.
Only applicable when opening a URL.
:return: The fetched IDE data.
Additionally, `get_doc()` will accept the keyword arguments for
`idelib.importer.importFile()` or `idelib.importer.openFile()`
"""
if len([x for x in (name, filename, url) if x]) != 1:
raise TypeError("Only one source can be specified: name, filename, or url")
original = name or filename or url # For error reporting
stream = None
parsed_url = None
if name:
if os.path.isfile(name):
filename = name
else:
parsed_url = urlparse(name.replace('\\', '/'))
if not parsed_url.scheme or parsed_url.scheme == "file":
filename = parsed_url.path
else:
url = name
if filename:
filename = os.path.abspath(os.path.expanduser(filename))
stream = open(filename, 'rb')
elif url:
kwargs.setdefault('name', url)
parsed_url = parsed_url or urlparse(url)
if parsed_url.scheme.startswith('http'):
stream, _total = _get_url(url, localfile=localfile, params=params, cookies=cookies)
else:
# future: more fetching schemes before this `else` (ftp, etc.)?
raise ValueError(f"Unsupported transfer scheme: {parsed_url.scheme}")
if stream:
if not validate(stream):
stream.close()
raise ValueError(f"Could not read a Dataset from '{original}'"
f"(not an IDE file?)")
# Separate `openFile()` and `readData` kwargs, remove ones that aren't shared
open_kwargs = kwargs.copy()
read_kwargs = kwargs.copy()
for k in ('startTime', 'endTime', 'channels', 'source', 'total',
'bytesRead', 'samplesRead'):
open_kwargs.pop(k, None)
doc = openFile(stream, **open_kwargs)
if parsed:
for k in ('defaults', 'name', 'quiet'):
read_kwargs.pop(k, None)
session_start = doc.lastSession.utcStartTime
if session_start:
session_start = datetime.utcfromtimestamp(session_start)
if start:
read_kwargs['startTime'] = parse_time(start, session_start)
if end:
read_kwargs['endTime'] = parse_time(end, session_start)
readData(doc, **read_kwargs)
return doc
raise ValueError(f"Could not read data from '{original}'")
|
30b713188fbcc6b8a23193f05114b1b692a2869d
| 3,647,529
|
def health_check() -> ControllerResponse:
"""
Retrieve the current health of service integrations.
Returns
-------
dict
Response content.
int
HTTP status code.
dict
Response headers.
"""
status = {}
for name, obj in _getServices():
logger.info('Getting status of %s' % name)
status[name] = _healthy_session(obj)
return status, 200, {}
|
1915deb5283aac2c0ced935c66dbd3d1f5564e33
| 3,647,530
|
def GeoSim(hss_0, pow_law_exp, lat1, lon1, lat2, lon2):
""" In order to make the Similarity adimensional I have to add a scale to the game.
This scale is hss, i.e. the scale after which the similairty is damped by a factor 2.
:param pow_law_exp: is the exponent of the power law
"""
# @TODO: measure power operator performance vs `math.pow`
return (float(hss_0)/(hss_0 + GeoDist(lat1, lon1, lat2, lon2)))**pow_law_exp
|
dc133428d29b03dd1a6d78565350de6765d2197c
| 3,647,531
|
from openpype.scripts import publish_filesequence
def _get_script():
"""Get path to the image sequence script"""
try:
except Exception:
raise RuntimeError("Expected module 'publish_deadline'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
|
8efa4f24ed070b859a8e406275feb1c989d6fb6c
| 3,647,532
|
def residual_unit(data, nchw_inshape, num_filter, stride, dim_match, name, bottle_neck=True,
workspace=256, memonger=False, conv_layout='NCHW', batchnorm_layout='NCHW',
verbose=False, cudnn_bn_off=False, bn_eps=2e-5, bn_mom=0.9, conv_algo=-1,
fuse_bn_relu=False, fuse_bn_add_relu=False, cudnn_tensor_core_only=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
nhwc_shape : tuple of int
Input minibatch shape in (n, c, h, w) format independent of actual layout
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
Returns
-------
(sym, nchw_outshape)
sym : the model symbol (up to this point)
nchw_outshape : tuple
(batch_size, features, height, width)
"""
nchw_shape = nchw_inshape
act = 'relu' if fuse_bn_relu else None
if bottle_neck:
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0))
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1))
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn2', cudnn_off=cudnn_bn_off, act_type=act)
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2') if not fuse_bn_relu else bn2
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0))
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
sc_nchw_shape = conv_nchw_out_shape(nchw_inshape, num_filter=num_filter, kernel=(1,1), stride=stride)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return (batchnorm_add_relu(data=conv3, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off),
nchw_shape)
else:
bn3 = batchnorm(data=conv3, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off)
return (mx.sym.Activation(data=bn3 + shortcut, act_type='relu', name=name + '_relu3'),
nchw_shape)
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1))
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1))
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
sc_nchw_shape = conv_nchw_out_shape(nchw_inshape, num_filter=num_filter, kernel=(1,1), stride=stride)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return (batchnorm_add_relu(data=conv2, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn2', cudnn_off=cudnn_bn_off),
nchw_shape)
else:
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn2', cudnn_off=cudnn_bn_off)
return (mx.sym.Activation(data=bn2 + shortcut, act_type='relu', name=name + '_relu2'),
nchw_shape)
|
a67edaf2a40a75619b389a6de8e8d20397b4df20
| 3,647,533
|
def dwa_control(x, config, goal, ob):
"""
Dynamic Window Approach control
"""
dw = calc_dynamic_window(x, config)
u, trajectory = calc_final_input(x, dw, config, goal, ob)
return u, trajectory
|
788d7c5427017436766d86cf0408eeabc4361d7e
| 3,647,534
|
import os
def GetCoverageDirectory(fuzzer):
"""Get a coverage report directory for a fuzzer
Args:
fuzzer: The fuzzer to get the coverage report directory for.
Returns:
The location of the coverage report directory for the |fuzzer|.
"""
relative_path = os.path.join(COVERAGE_REPORT_DIRECTORY_NAME, fuzzer)
return GetScriptStoragePath(relative_path)
|
098b237718389e3296186c177f4ac84c80ebc299
| 3,647,535
|
def decompress_bytes(inp_bytes: bytes, verbose=False) -> bytearray:
"""
Main function to decompress input bytes by extracting the Huffman map
and using the map to replace the encoded sequences with the original
characters.
:param inp_bytes: Input data to be compressed
:param verbose: set to True for printing console outputs
:return: decompressed bytearray data
"""
huff_map: HuffCode
rem: int
huff_map, rem = extract_huff_map(inp_bytes, verbose=verbose)
inp_bytes = inp_bytes[:-rem]
rev_seq: str = reverse_final_sequence(inp_bytes, verbose=verbose)
res: bytearray = reverse_huff_sequence(huff_map, rev_seq, verbose=verbose)
return res
|
9d3287ff1e481f04edcbe9eb8e06989d5ac83bd6
| 3,647,536
|
def filter_nans(data,
threshold = 3,
threshold_type = "data"):
"""
=================================================================================================
filter_nans(data, threshold, threshold_type)
This function is meant to filter out the nan values from a list, based on the input arguments.
=================================================================================================
Arguments:
data -> A list (or iterable) of data points. The points are assumed to be numbers.
threshold -> An integer describing the minimum value requirement.
threshold_type -> A string describing how the threshold integer will be applied.
"on_data" "on_nan"
=================================================================================================
Returns: The filtered list, or an empty list if the threshold requirements were not met.
=================================================================================================
"""
# Make sure the user gave a valid thresholding option
assert threshold_type.lower() in ["data",
"on_data",
"on data",
"nan",
"on_nan",
"on nan"], "Threshold is either relative to NaN or data."
assert type(data) == list, "The data should be in a list"
# Filter NaNs, as they do not equal themselves
filtered = [val for val in data if val == val]
# Keep data if there are at least <threshold> data points
if threshold_type.lower() in ["data", "on_data", "on data"]:
if len(filtered) >= threshold:
return filtered
else:
return []
# Keep data if there are no more than <threshold> nans
elif threshold_type.lower() in ["nan", "on_nan", "on nan"]:
if len(data) - len(filtered) <= threshold:
return filtered
else:
return []
|
fe84ae2d638102e05db68f0c0062ee036be1a63b
| 3,647,537
|
def edit_seq2seq_config(config, frameworks=FULL_FRAMEWORKS, no_attn=False):
"""Rotate frameworks and optionally remove attention."""
configs = []
for fw in frameworks:
c = deepcopy(config)
c['backend'] = fw
configs.append(c)
if not no_attn:
new_configs = []
# Run the non attention version
for config in configs:
c = deepcopy(config)
c['model']['model_type'] = 'default'
new_configs.append(c)
new_configs.append(config)
configs = new_configs
return configs
|
bca93003cf67cc1c0ec14ba1dfa83664b10191fb
| 3,647,538
|
from typing import Optional
def get_bioportal_prefix(prefix: str) -> Optional[str]:
"""Get the Bioportal prefix if available."""
return _get_mapped_prefix(prefix, "bioportal")
|
f68ec16b8de886ab76319b06d4cf68c14a90fc53
| 3,647,539
|
def _obtain_rapt(request, access_token, requested_scopes):
"""Given an http request method and reauth access token, get rapt token.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
access_token (str): reauth access token
requested_scopes (Sequence[str]): scopes required by the client application
Returns:
str: The rapt token.
Raises:
google.auth.exceptions.ReauthError: if reauth failed
"""
msg = _get_challenges(
request,
list(challenges.AVAILABLE_CHALLENGES.keys()),
access_token,
requested_scopes,
)
if msg["status"] == _AUTHENTICATED:
return msg["encodedProofOfReauthToken"]
for _ in range(0, RUN_CHALLENGE_RETRY_LIMIT):
if not (
msg["status"] == _CHALLENGE_REQUIRED or msg["status"] == _CHALLENGE_PENDING
):
raise exceptions.ReauthFailError(
"Reauthentication challenge failed due to API error: {}".format(
msg["status"]
)
)
if not is_interactive():
raise exceptions.ReauthFailError(
"Reauthentication challenge could not be answered because you are not"
" in an interactive session."
)
msg = _run_next_challenge(msg, request, access_token)
if msg["status"] == _AUTHENTICATED:
return msg["encodedProofOfReauthToken"]
# If we got here it means we didn't get authenticated.
raise exceptions.ReauthFailError("Failed to obtain rapt token.")
|
8c430df5c4198af8d044bd3151cdb7af605c14b1
| 3,647,540
|
def argunique(items, key=None):
"""
Returns indices corresponding to the first instance of each unique item.
Args:
items (Sequence[VT]): indexable collection of items
key (Callable[[VT], Any], default=None): custom normalization function.
If specified returns items where ``key(item)`` is unique.
Returns:
Iterator[int] : indices of the unique items
Example:
>>> import ubelt as ub
>>> items = [0, 2, 5, 1, 1, 0, 2, 4]
>>> indices = list(ub.argunique(items))
>>> assert indices == [0, 1, 2, 3, 7]
>>> indices = list(ub.argunique(items, key=lambda x: x % 2 == 0))
>>> assert indices == [0, 2]
"""
if key is None:
return unique(range(len(items)), key=lambda i: items[i])
else:
return unique(range(len(items)), key=lambda i: key(items[i]))
|
fd7af970578aac1a13a3123f13aac9daef1a4b7a
| 3,647,541
|
def promote_cvals(*vals):
"""
Promote Python values into the most general dshape containing
all of them. Only defined over simple CType instances.
>>> promote_vals(1,2.)
dshape("float64")
>>> promote_vals(1,2,3j)
dshape("complex128")
"""
promoted = np.result_type(*vals)
datashape = CType.from_dtype(promoted)
return datashape
|
3a928ca061bdc8fedf1cb6e125994c4b7167e0c7
| 3,647,542
|
def load_directory_metadata(directory_path, return_copy=True):
"""
Get stored metadata for files in path. This currently only stores bookmarks.
If no metadata is available, return an empty dictionary.
This is a hidden file in the directory which stores metadata for all files
in the directory, as well as the directory itself. This has a bunch of
advantages over putting the data in each file:
- Every file format has its own way of storing metadata, and there are no
robust libraries that handle all of them.
- We don't have to modify the user's files, so there's no chance of us screwing
up and causing data loss.
- Opening each file during a refresh is extremely slow. It's much faster to
have a single file that we only read once per directory scan.
- We can use Windows Search to search this data if we format it properly. Use
a file extension that it indexes by default (we use .txt), and we can insert
keywords in the file that we can search for. Windows Search will index metadata
for some file types, but it's hit-or-miss (it handles JPEGs much better than PNGs).
"""
with _metadata_lock:
return _load_directory_metadata_locked(directory_path, return_copy=return_copy)
|
4033c1fae5c5330ef1254a13c97f33af43e39984
| 3,647,543
|
def _traverse_tree_and_group_all_objects_by_oclass(root_obj, result=None):
"""Traverses the tree once and groups all objects by oclass
:param root_obj: The root object where to start the traversion
:type root_obj: CUDS
:param result: The current results of the recursion, defaults to None
:type result: dict, optional
:return: All CUDS objects in the tree, grouped by oclass.
:rtype: dict
"""
if result is None:
result = {str(root_obj.oclass): [root_obj]}
for neighbour in root_obj.iter():
if neighbour.oclass not in result.keys():
result[str(neighbour.oclass)] = [neighbour]
else:
result[str(neighbour.oclass)].append(neighbour)
_traverse_tree_and_group_all_objects_by_oclass(neighbour, result)
return result
|
3ae139313ea7b5e92f0d9231a4e64efc87acc5ac
| 3,647,544
|
def check_measurement(m_info, filters):
"""
Determine whether a given measurement should be included based on the
filters.
Inputs:
m_info - A dictionary containing the configuration parameters for an
individual measurement.
filters - A dictionary containing a set of configuration parameter
values that should be included
Output:
include - Boolean indicating whether to include the given measurement
"""
include = True
for filter_field, filter_values in filters.iteritems():
try:
iter(filter_values)
except:
filter_values = [filter_values]
if not m_info[filter_field] in filter_values:
include = False
return include
|
374be08c315a63d09faadc9c963a49a89b04b3ed
| 3,647,545
|
def audiosegment2wav(data: AudioSegment):
"""
pydub.AudioSegment格式转为音频信号wav。
:param data:
:return:
"""
wav = np.array(data.get_array_of_samples()) / _int16_max
return wav
|
44f75bf26ae0f3e11c3d9480aee38c2ad943ae86
| 3,647,546
|
def embargo(cand_times, test_times, embargo_table):
"""
"Embargo" observations from the training set.
Args:
cand_times(Series): times of candidates to be the "embargoed set"
index: t0(start time)
value: t1(end time)
test_times(Series): times of the test set
index: t0(start time)
value: t1(end time)
embargo_table(Series): embargo times table returned by get_embargo_table()
Returns:
embargoed_times(Series): times of embargoed training set
index: t0(start time)
value: t1(end time)
"""
first_test_start = test_times.index[0]
final_test_start = test_times.index[-1]
final_embargo_start = embargo_table[final_test_start] # end time of the embargo
to_embargo_idx = cand_times.loc[first_test_start:final_embargo_start].index
embargoed_times = cand_times.drop(to_embargo_idx)
return embargoed_times
|
6fb97816c32fc73661905af27613bef0c6ac0726
| 3,647,547
|
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the WiZ Light platform from config_flow."""
# Assign configuration variables.
wiz_data = hass.data[DOMAIN][entry.entry_id]
wizbulb = WizBulbEntity(wiz_data.bulb, entry.data.get(CONF_NAME), wiz_data.scenes)
# Add devices with defined name
async_add_entities([wizbulb], update_before_add=True)
return True
|
c65665220f81a5c918cf8eac7839159b4296a968
| 3,647,548
|
import os
import pandas
def handle_uploaded_file(file, filename):
"""
Обработка файла csv спарсенного с online.edu.ru
"""
if not os.path.exists('upload/'):
os.mkdir('upload/')
path = 'upload/' + filename
with open(path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
df = pandas.read_csv(path, sep=',', encoding='utf-8')
df.dropna(subset=['Направления подготовки'], inplace=True)
df = df.drop(['Unnamed: 0'], axis=1)
return df
|
2b24081bf7b4d42c60ff17f500c7da0d81e11ceb
| 3,647,549
|
async def check_account():
"""
A check that checks if the user has an account and if not creates one for them.
"""
async def check(ctx) -> bool:
conn = get_db()
cur = conn.cursor()
cur.execute("SELECT * FROM economy WHERE user_id = ?", (ctx.author.id,))
if cur.fetchone() is None:
cur.execute("INSERT INTO economy (user_id) VALUES (?)", (ctx.author.id,))
cur.execute("INSERT INTO cooldowns (user_id) VALUES (?)", (ctx.author.id,))
conn.commit()
cur.close()
conn.close()
return True
return check
|
205e39405eb52b57f743dfabca11c04cf11f0f34
| 3,647,550
|
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
|
0c70aac1ffe03eea62d581a6a4ab6b84495af079
| 3,647,551
|
import pandas as pd
import os
def edc_t(path):
"""EPICA Dome C Ice Core 800KYr Temperature Estimates
Temperature record, using Deuterium as a proxy, from the EPICA (European
Project for Ice Coring in Antarctica) Dome C ice core covering 0 to 800
kyr BP.
A data frame with 5788 observations on the following 5 variables.
`Bag`
Bag number
`ztop`
Top depth (m)
`Age`
Years before 1950
`Deuterium`
Deuterium dD data
`dT`
Temperature difference from the average of the last 1000 years ~
-54.5degC
http://www.ncdc.noaa.gov/paleo/icecore/antarctica/domec/domec_epica_data.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `edc_t.csv`.
Returns:
Tuple of np.ndarray `x_train` with 5788 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'edc_t.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/edcT.csv'
maybe_download_and_extract(path, url,
save_file_name='edc_t.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
ac92d5b39bfa99b9fdea62e4684868c5593d52bf
| 3,647,552
|
def init_group_prams(net):
"""Initialize group_prams."""
decayed_params = []
no_decayed_params = []
for param in net.trainable_params():
if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
decayed_params.append(param)
else:
no_decayed_params.append(param)
group_params = [{'params': decayed_params, 'weight_decay': 0.0001},
{'params': no_decayed_params},
{'order_params': net.trainable_params()}]
return group_params
|
be078603c4ae42163f66668dcc16a0a77d899805
| 3,647,553
|
def nni_differences_parameters(nni=None, rpeaks=None):
"""Computes basic statistical parameters from a series of successive NN interval differences (mean, min, max, standard deviation).
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
Returns (biosppy.utils.ReturnTuple Object)
------------------------------------------
[key : format]
Description.
nni_diff_mean: float
Mean NN interval difference [ms].
nni_diff_min : float
Minimum NN interval difference [ms].
nni_diff_max : float
Maximum NN interval difference [ms].
Notes
-----
.. Only one type of input data is required.
.. If both 'nni' and 'rpeaks' are provided, 'nni' will be chosen over the 'rpeaks'
.. NN and R-peak series provided in [s] format will be converted to [ms] format.
"""
# Check input
nn = tools.check_input(nni, rpeaks)
# Get NN interval differences
nnd = tools.nni_diff(nn)
# output
args = (float(nnd.mean()), int(nnd.min()), int(nnd.max()), )
names = ('nni_diff_mean', 'nni_diff_min', 'nni_diff_max', )
return utils.ReturnTuple(args, names)
|
aadea3b440fe4ac3c06cbd88cde69e11566e861f
| 3,647,554
|
def contextualize_model(model, cell_line, genes):
"""Contextualize model at the level of a PySB model."""
# Here we just make a PysbAssembler to be able
# to apply set_context on the model being passed in
model.name = cell_line
cell_line_ccle = cell_line + '_SKIN'
pa = PysbAssembler()
pa.model = model
pa.set_context(cell_line_ccle)
# Set initial conditions for missense mutations
variants = read_ccle_variants(genes)
mutations = variants['missense'][cell_line_ccle]
for gene, mut_list in mutations.items():
for fres, loc, tres in mut_list:
site_name = fres + loc
for ic in model.initial_conditions:
if ic[0].monomer_patterns[0].monomer.name == gene:
sc = ic[0].monomer_patterns[0].site_conditions
if site_name in sc:
sc[site_name] = tres
return pa.model
|
7f0018b0e1308a354529893fcd8ac54bb9fa7642
| 3,647,555
|
def _quaternionInverse(quat):
""" Inverses a list of quaternions
"""
quat_ = np.empty((quat.shape[0],4))
# For every quaternion
for i in range(quat.shape[0]):
mag = quat[i,0]**2 + quat[i,1]**2 + quat[i,2]**2 + quat[i,3]**2
quat_[i,0] = -quat[i,0]/mag
quat_[i,1] = -quat[i,1]/mag
quat_[i,2] = -quat[i,2]/mag
quat_[i,3] = quat[i,3]/mag
return quat_
|
a70868d3b38fe087c83a52c1a7cabc32f05310dc
| 3,647,556
|
from typing import Union
def load_dataset(files: list[str]) -> Union[list[int], list[list[list[int]]]]:
"""load the images and labels of the test dataset
Args:
files (list[str]): list of files path for images and label dataset
Returns:
Union[list[int], list[list[list[int]]]]: list of labels and list of int matrixes
"""
print("loading the dataset...")
with open(files[0], "rb") as image_file:
megic_number = int.from_bytes(image_file.read(4), 'big', signed=True)
number_of_images = int.from_bytes(image_file.read(4), 'big', signed=True)
rows = int.from_bytes(image_file.read(4), 'big', signed=True)
cols = int.from_bytes(image_file.read(4), 'big', signed=True)
images = []
for _ in range(number_of_images):
matrix = []
for _ in range(rows):
row = []
for _ in range(cols):
row.append(int.from_bytes(image_file.read(1), 'big', signed=False))
matrix.append(row)
images.append(matrix)
with open(files[1], "rb") as label_file:
megic_number = int.from_bytes(label_file.read(4), 'big', signed=True)
number_of_labels = int.from_bytes(label_file.read(4), 'big', signed=True)
labels = []
for _ in range(number_of_labels):
labels.append(int.from_bytes(label_file.read(1), 'big', signed=False))
return labels, images
|
e9635b8b9a4f92d96df8e0dea97a569a1b49b02d
| 3,647,557
|
def get_minion_node_ips(boot_conf, hb_conf):
"""
Returns a list of IPs for all master nodes
:param boot_conf: the snaps-boot configuration dict
:param hb_conf: the adrenaline configuration dict
:return: a list of IP addresses
"""
return __get_node_ips(boot_conf, hb_conf, 'minions')
|
c36ccc30043d2bb7a43314f6665b35ae9e1c47f4
| 3,647,558
|
def _normalize_sql(sql, maxlen=150):
"""Collapse whitespace and middle-truncate if needed."""
out = ' '.join(sql.split())
if len(out) > maxlen:
i = int(maxlen / 2 - 4)
out = (out[0:i] +
' . . . ' +
out[-i:None])
return out
|
f85efb0c367b448d2e363d9c1f8bf62a2bdb600e
| 3,647,559
|
from typing import Dict
def utt_non_punct_dialog(dialog: Dict):
"""
Used by: book_skill
"""
dialog = utils.get_last_n_turns(dialog)
dialog = utils.remove_clarification_turns_from_dialog(dialog)
return [{"dialogs": [dialog]}]
|
6ef4bf4fee0d8a4bba9fe140e476682e84064060
| 3,647,560
|
def griddata_easy(xx, yy, data, xi=None, yi=None, dx=None, dy=None, nx=10, ny=10, method='nearest', fill_value=None):
"""
Generate a girdded data from scattered data z=f(x, y)
... Wrapper of scipy.interplate.riddata
Parameters
----------
xx: nd array-like
x-coordinate of scattered data
yy: nd array-like
y-coordinate of scattered data
data: nd array-like
values of scattered data
xi: 1d array
x-coordinate of the interpolated grid
... The array must be monotonically increasing.
... If None, xi = np.arange(xmin, xmax, dx)
yi: 1d array
y-coordinate of the interpolated grid
... The array must be monotonically increasing.
... If None, yi = np.arange(ymin, ymax, dy)
dx: float
spacing of 'xi' if 'xi' is not given
dy: float
spacing of 'xi' if 'xi' is not given
nx: int
if 'dx' were not given, dx is set as (xmax-xmin)/nx
ny: int
if 'dy' were not given, dx is set as (ymax-ymin)/ny
method: method of 2D interpolation
... Options: 'nearest', 'linear', 'cubic'
Returns
-------
xxi: 2d array
x-coordinate of the grid
yyi: 2d array
x-coordinate of the grid
data_i: 2d array
values on the grid
"""
xx, yy, data = np.asarray(xx), np.asarray(yy), np.asarray(data)
if not xx.shape == yy.shape == data.shape:
print('x.shape, y.shape, and data.shape must match. ', xx.shape, yy.shape, data.shape)
raise ValueError('shapes of x, y, and data do not match.')
x, y, data1d = xx.flatten(), yy.flatten(), data.flatten()
if xi is None:
xmin, xmax = np.nanmin(x), np.nanmax(x)
if dx is None:
dx = (xmax - xmin) / nx
xi = np.arange(xmin, xmax, dx)
if yi is None:
ymin, ymax = np.nanmin(y), np.nanmax(y)
if dy is None:
dy = (ymax - ymin) / ny
yi = np.arange(ymin, ymax, dy)
xxi, yyi = np.meshgrid(xi, yi)
# interpolate
data_i = griddata((x, y), data1d, (xxi, yyi), method=method, fill_value=fill_value)
return xxi, yyi, data_i
|
77c5c92e5176c62252f7c6814e3483d8a1323925
| 3,647,561
|
def emit_cover(ctx, go_toolchain,
source = None,
mode = None,
importpath = ""):
"""See go/toolchains.rst#cover for full documentation."""
if source == None: fail("source is a required parameter")
if mode == None: fail("mode is a required parameter")
if not importpath: fail("importpath is a required parameter")
stdlib = go_toolchain.stdlib.get(ctx, go_toolchain, mode)
covered = []
cover_vars = []
for s in source.entries:
if not s.want_coverage:
covered.append(s)
continue
outputs = []
for src in s.srcs:
if not src.basename.endswith(".go"):
outputs.append(src)
continue
cover_var = "Cover_" + src.basename[:-3].replace("-", "_").replace(".", "_")
cover_vars.append("{}={}={}".format(cover_var, src.short_path, importpath))
out = declare_file(ctx, path=cover_var, ext='.cover.go')
outputs.append(out)
args = ctx.actions.args()
add_go_env(args, stdlib, mode)
args.add(["--", "--mode=set", "-var=%s" % cover_var, "-o", out, src])
ctx.actions.run(
inputs = [src] + stdlib.files,
outputs = [out],
mnemonic = "GoCover",
executable = go_toolchain.tools.cover,
arguments = [args],
)
members = structs.to_dict(s)
members["srcs"] = outputs
covered.append(GoSource(**members))
return GoSourceList(entries=covered), cover_vars
|
d390f534e723a893ca5e8b23a90ae4008abf79fe
| 3,647,562
|
def shortdate(date=None):
"""turn (timestamp, tzoff) tuple into iso 8631 date."""
return datestr(date, format='%Y-%m-%d')
|
9478c96e8abd95a8cc5822b111b139572693ac8b
| 3,647,563
|
from datetime import datetime
import time
import numpy
def default_fram( object_to_serialize):
"""
Python json api custom serializer function for FRAM Warehouse API
per:'Specializing JSON object encoding', https://simplejson.readthedocs.org
>>> import simplejson as json
>>> json.dumps({'Without':[1,'a',datetime(1999, 1, 1),'Serializer']})
Traceback (most recent call last):
...
TypeError: datetime.datetime(1999, 1, 1, 0, 0) is not JSON serializable
>>> dict2 = {'With':[1,'a',datetime(1999, 1, 1),'Serializer']}
>>> json.dumps( dict2, default=default_fram)
'{"With": [1, "a", "1999-01-01T00:00:00Z", "Serializer"]}'
>>> dict3 = {'With':[1,'a',date(1999, 1, 1),'Serializer']}
>>> json.dumps( dict3, default=default_fram)
'{"With": [1, "a", "1999-01-01", "Serializer"]}'
>>> dict4 = {'With':[1,'a',time(4, 5, 6),'Serializer']}
>>> json.dumps( dict4, default=default_fram)
'{"With": [1, "a", "1970-01-01T04:05:06Z", "Serializer"]}'
>>> numpy_64bit_int = {'With':[1,numpy.int64(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
>>> numpy_32bit_int = {'With':[1,numpy.int32(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
>>> numpy_16bit_int = {'With':[1,numpy.int16(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
"""
#Bake datetime objects into Strings
if isinstance( object_to_serialize, datetime):
if object_to_serialize.utcoffset() is None:
#Append 'Z', to conform to ISO8601 date spec
return object_to_serialize.isoformat()+'Z'
#Else, TZ offset present. TZ info will be automatically included per
# docs.python.org/3/library/datetime.html#datetime.datetime.isoformat
return object_to_serialize.isoformat()
if isinstance( object_to_serialize, date):
# No Timezone info available,
return object_to_serialize.isoformat()
if isinstance( object_to_serialize, time):
#No date available.Prefix:'1970-01-01T',to conform to ISO8601 date spec
isoformat = '1970-01-01T'+object_to_serialize.isoformat()
if object_to_serialize.utcoffset() is None:
# No Timezone info available,
# Append 'Z',to conform to ISO8601 date spec
return isoformat+'Z'
#else, TZ offset has already been added to string.
return isoformat
if isinstance(object_to_serialize, numpy.integer):
return int(object_to_serialize) #per Python issue24313, no support for numpy Ints
#Else, wasnt a datetime Date & we dont handle anything else.. so:
raise TypeError(repr(object_to_serialize) + " is not JSON serializable")
|
bb345b01b7ba86e2e47515addda854d16983f036
| 3,647,564
|
import random
def read_random_stack_for_multiple_pickles(all_spectra, all_sequence, num_spectra, stack_size):
"""TODO(nh2tran): docstring."""
print("read_random_stack_for_multiple_pickles()")
random_idx = random.sample(xrange(num_spectra[-1]), min(stack_size, num_spectra[-1]))
random_locations = []
f_idx = np.array(num_spectra)
for i in random_idx:
idx = np.where((f_idx > i))[0][0]
if idx == 0:
random_locations.append([idx, i])
else:
random_locations.append([idx, i-num_spectra[idx-1]])
return read_spectra_from_multiple_pickles(all_spectra, all_sequence, random_locations)
|
36a0baf3d111f262ebb4845379747e054c5e728f
| 3,647,565
|
import logging
def perfect_pattern(dict_class_counts, distinct_classes, pattern, supporting_items, results_dir):
"""
Performs checking whether the pattern is perfect and a common class can be found directly without constructing the
relative class hierarchy.
:param dict_class_counts: the count of each distinct class combinations to weight the superclass
:param distinct_classes: the distinct class combinations of relative hierarchy level 0
:param pattern: the pattern to evaluate
:param supporting_items: the classes to which the supporting items belong to
:param results_dir: the directory to store the results
:return:
"""
base_case = False
superclasses = list()
for key in dict_class_counts:
for subkey in key.split():
logging.debug('Subkey:\t%s', set([subkey]))
subkey_class = True
for cl in distinct_classes:
if set([subkey]).issubset(set(cl)):
pass # do nothing
else:
logging.debug('%s is no subset of %s', set([subkey]), set(cl))
subkey_class = False
break
if subkey_class and subkey not in superclasses:
logging.debug('Setting base_case to True!')
base_case = True
superclasses.append(subkey)
weights = [0 for superclass in superclasses]
weights_indexes = [[0] for superclass in superclasses]
if base_case:
logging.info('Found pattern on first hierarchy level!')
construct_result(pattern, {}, weights, weights_indexes, superclasses, dict_class_counts)
write_to_csv(pattern, distinct_classes, weights, superclasses, weights_indexes,
get_distribution(supporting_items), True, results_dir)
return base_case
|
cbedf060049466d46689af49cb31be95dd2ecc3b
| 3,647,566
|
import timeit
def _benchmark_grep(filename, pattern):
"""Benchmarks grep.
Args:
- filename: The name of the file to be searched.
- pattern: The pattern we are searching for in the file.
"""
time_taken = timeit(setup=BENCHMARK_SETUP, number=SINGLE_STRING_TESTS,
stmt='subprocess.call(%s)' % GREP_CALL_ARGS.format(pattern, filename))
return time_taken / SINGLE_STRING_TESTS
|
f1d3a4b9f6d5f7867f49a6eb3bdc6236111d5277
| 3,647,567
|
import pathlib
def inotify_test(
test_paths: dict[str, pathlib.Path], tmp_path: pathlib.Path
) -> InotifyTest:
"""Generate a pre-configured test instance of `inotify_simple.INotify`.
Parameters
----------
test_paths: dict[str, pathlib.Path]
The test fixture that generates test files based on configuration
(:obj:`test_paths`).
tmp_path: pathlib.Path
The pytest `tmp_path` fixture providing a path object to a temporary
directory which is unique to each test function
(:obj:`_pytest.tmpdir.tmp_path`).
Returns
-------
inotify_simple: InotifyTest
A pre-configured `InotifyTest` object with the specified test paths.
"""
inotify = InotifyTest(tmp_path)
for key, path in test_paths.items():
inotify.add_watch(path)
return inotify
|
e64975dc2765e3c887194cbf88a0f47ef3d5311e
| 3,647,568
|
def set_system_bios( context, settings, system_id = None ):
"""
Finds a system matching the given ID and sets the BIOS settings
Args:
context: The Redfish client object with an open session
settings: The settings to apply to the system
system_id: The system to locate; if None, perform on the only system
Returns:
The response of the PATCH
"""
# Locate the system
system = get_system( context, system_id )
# Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings object
bios_uri = system.dict["Bios"]["@odata.id"]
bios = context.get( bios_uri )
etag = bios.getheader( "ETag" )
if "@Redfish.Settings" in bios.dict:
bios_uri = bios.dict["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
bios_settings = context.get( bios_uri )
etag = bios_settings.getheader( "ETag" )
# Update the settings
payload = { "Attributes": settings }
headers = None
if etag is not None:
headers = { "If-Match": etag }
response = context.patch( bios_uri, body = payload, headers = headers )
verify_response( response )
return response
|
68ceeb63ec74f3459f8cfea1eb6eb9d668bff15e
| 3,647,569
|
def create() -> UserSecurityModel:
"""
Creates a new instance of the USM
"""
return UserSecurityModel()
|
1e07d9bc6359a2ca000b886de416147d85720c9c
| 3,647,570
|
def clDice(v_p, v_l):
"""[this function computes the cldice metric]
Args:
v_p ([bool]): [predicted image]
v_l ([bool]): [ground truth image]
Returns:
[float]: [cldice metric]
"""
if len(v_p.shape)==2:
tprec = cl_score(v_p,skeletonize(v_l))
tsens = cl_score(v_l,skeletonize(v_p))
elif len(v_p.shape)==3:
tprec = cl_score(v_p,skeletonize_3d(v_l))
tsens = cl_score(v_l,skeletonize_3d(v_p))
return 2*tprec*tsens/(tprec+tsens)
|
f8a6947ca1487878e9e33c5c7aed3604565801e3
| 3,647,571
|
import re
def validate_regex(regex_str):
"""
Checks if a given string is valid regex
:param str regex_str: a suspicios string that may or may not be valid regex
:rtype: bool
:return: True if valid regex was give, False in case of TypeError or re.error
"""
# another of those super basic function where i am not sure if there isn't an easier way
try:
re.compile(regex_str)
return True
except re.error:
return False
except TypeError: # for the string not being one
return False
|
97c6e2338eb67c2d4be74e3a18a4393a1eb36242
| 3,647,572
|
import json
def load_stats_from_file(date):
"""
Load stats data from a stat file.
Params:
date -- a `datetime` instance.
"""
file_path = _build_stats_file_path(date)
if not isfile(file_path):
raise IOError # This will be FileNotFoundError in Python3.
with open(file_path, 'r') as fin:
return json.loads(fin.read())
|
b2bb85f6a492ca26441271222f10373e200497e1
| 3,647,573
|
def null_gt_null(left, right):
""":yaql:operator >
Returns false. This function is called when left and right are null.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null > null
false
"""
return False
|
f99a985ae1b0e678afb315ed441d33064dd281b0
| 3,647,574
|
def read_header(file):
""" Read the information in an OpenFOAM file header.
Parameters
----------
file : str
Name (path) of OpenFOAM file.
Returns
-------
info : dictionary
The information in the file header.
"""
with open(file, 'r') as f:
content = f.read()
info = {}
info['file'] = file
# read logo
logo_info = _read_logo(content)
info['foam_version'] = logo_info['Version']
info['website'] = logo_info['Website']
# read header
header_info = _read_header_info(content)
info['foam_class'] = header_info['foam_class']
info['name'] = header_info['name']
info['location'] = header_info['location']
return info
|
91446555ed31953ea4290e76db51872eb1ef3ae9
| 3,647,575
|
def point_from_b58(b):
"""Return b58 decoded P."""
x, y = [int_from_b58(t) for t in b.split(",")]
return ECC.EccPoint(x=x, y=y, curve=CURVE)
|
4f5b9dfe60c745b17ffb54535a8994273d07c675
| 3,647,576
|
def _cp_embeds_into(cp1, cp2):
"""Check that any state in ComplexPattern2 is matched in ComplexPattern1.
"""
# Check that any state in cp2 is matched in cp1
# If the thing we're matching to is just a monomer pattern, that makes
# things easier--we just need to find the corresponding monomer pattern
# in cp1
if cp1 is None or cp2 is None:
return False
cp1 = as_complex_pattern(cp1)
cp2 = as_complex_pattern(cp2)
if len(cp2.monomer_patterns) == 1:
mp2 = cp2.monomer_patterns[0]
# Iterate over the monomer patterns in cp1 and see if there is one
# that has the same name
for mp1 in cp1.monomer_patterns:
if _mp_embeds_into(mp1, mp2):
return True
return False
|
67e410eb3ba1131f144829b724ad7099807d4e4e
| 3,647,577
|
def get_tags_for_message(khoros_object, msg_id):
"""This function retrieves the tags for a given message.
.. versionadded:: 2.8.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param msg_id: The Message ID for the message from which to retrieve tags
:type msg_id: str, int
:returns: A list of tags associated with the message
"""
tag_list = []
query = f"SELECT text FROM tags WHERE messages.id = '{msg_id}'" # nosec
response = liql.perform_query(khoros_object, liql_query=query, verify_success=True)
entries = api.get_items_list(response)
for entry in entries:
tag_list.append(entry['text'])
return tag_list
|
563df4344f9291d9114450a994145610ef79ae8f
| 3,647,578
|
def _build_hierarchical_histogram_computation(
lower_bound: float, upper_bound: float, num_bins: int,
aggregation_factory: factory.UnweightedAggregationFactory):
"""Utility function creating tff computation given the parameters and factory.
Args:
lower_bound: A `float` specifying the lower bound of the data range.
upper_bound: A `float` specifying the upper bound of the data range.
num_bins: The integer number of bins to compute.
aggregation_factory: The aggregation factory used to construct the federated
computation.
Returns:
A tff federated computation function.
"""
@computations.tf_computation(computation_types.SequenceType(tf.float32))
def client_work(client_data):
return _discretized_histogram_counts(client_data, lower_bound, upper_bound,
num_bins)
aggregator = aggregation_factory.create(client_work.type_signature.result)
@computations.federated_computation(
computation_types.at_clients(client_work.type_signature.parameter))
def hierarchical_histogram_computation(federated_client_data):
# Work done at clients.
client_histogram = intrinsics.federated_map(client_work,
federated_client_data)
# Aggregation to server.
return aggregator.next(aggregator.initialize(), client_histogram).result
return hierarchical_histogram_computation
|
38d5c711bcd6d6cd8965f7e8e85b0933363a2a7b
| 3,647,579
|
import inspect
def check_endpoint(func):
"""Check available endpoint."""
@wraps(func)
def wrapper(*args, **kwargs):
sig = inspect.signature(func)
args_value = sig.bind(*args, **kwargs)
endpoint = args_value.arguments["endpoint"]
if endpoint not in AVAILABLE_ENDPOINTS:
raise ClientException(f"Unavailable endpoints: {endpoint}")
return func(*args, **kwargs)
return wrapper
|
1e833dc8c3d43b6c09bd2b3bc89846ce29952cbd
| 3,647,580
|
def read_sql_one(id):
"""
This function responds to a request for api/reviews/{id}
with one matching review from reviews
:param id: id of the review
:return: review matching the id
"""
response = Response.query.filter_by(id=id).one_or_none()
if response is not None:
# serialize the data for the response
response_schema = ResponseSchema()
return response_schema.dump(response).data
else:
abort(404, f"Review {id} not found.")
|
d54abca40fb6d44adf0988bc44484da3af3efb22
| 3,647,581
|
from typing import Tuple
def ds_to_numpy(ds: Dataset) -> Tuple[np.ndarray, np.ndarray]:
"""Transform torch dataset to numpy arrays
Parameters
----------
ds : Dataset
COVID dataset
Returns
-------
Tuple[np.ndarray, np.ndarray]
Flattened images + labels
"""
imgs = []
labels = []
for img, label in ds:
imgs.append(img.detach().cpu().numpy().flatten()[np.newaxis, ])
labels.append(label)
return np.concatenate(imgs), np.array(labels)
|
218eaf582b36a562920bc2e8808b3524a900b8ef
| 3,647,582
|
import base64
def _b64(b):
"""Helper function base64 encode for jose spec."""
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
|
4777d4f47de2c72b8dd95b765fc54d1abc6763f0
| 3,647,583
|
def load_from_arff(filename, label_count, label_location="end",
input_feature_type='float', encode_nominal=True, load_sparse=False,
return_attribute_definitions=False):
"""Method for loading ARFF files as numpy array
Parameters
----------
filename : str
path to ARFF file
labelcount: integer
number of labels in the ARFF file
endian: str {"big", "little"} (default is "big")
whether the ARFF file contains labels at the beginning of the
attributes list ("start", MEKA format)
or at the end ("end", MULAN format)
input_feature_type: numpy.type as string (default is "float")
the desire type of the contents of the return 'X' array-likes,
default 'i8', should be a numpy type,
see http://docs.scipy.org/doc/numpy/user/basics.types.html
encode_nominal: bool (default is True)
whether convert categorical data into numeric factors - required
for some scikit classifiers that can't handle non-numeric
input features.
load_sparse: boolean (default is False)
whether to read arff file as a sparse file format, liac-arff
breaks if sparse reading is enabled for non-sparse ARFFs.
return_attribute_definitions: boolean (default is False)
whether to return the definitions for each attribute in the
dataset
Returns
-------
X : :mod:`scipy.sparse.lil_matrix` of `input_feature_type`, shape=(n_samples, n_features)
input feature matrix
y : :mod:`scipy.sparse.lil_matrix` of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
names of attributes : List[str]
list of attribute names from ARFF file
"""
if not load_sparse:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.DENSE
)
matrix = sparse.csr_matrix(
arff_frame['data'], dtype=input_feature_type
)
else:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.COO
)
data = arff_frame['data'][0]
row = arff_frame['data'][1]
col = arff_frame['data'][2]
matrix = sparse.coo_matrix(
(data, (row, col)), shape=(max(row) + 1, max(col) + 1)
)
if label_location == "start":
X, y = matrix.tocsc()[:, label_count:].tolil(), matrix.tocsc()[:, :label_count].astype(int).tolil()
feature_names = arff_frame['attributes'][label_count:]
label_names = arff_frame['attributes'][:label_count]
elif label_location == "end":
X, y = matrix.tocsc()[:, :-label_count].tolil(), matrix.tocsc()[:, -label_count:].astype(int).tolil()
feature_names = arff_frame['attributes'][:-label_count]
label_names = arff_frame['attributes'][-label_count:]
else:
# unknown endian
return None
if return_attribute_definitions:
return X, y, feature_names, label_names
else:
return X, y
|
d203b6360d3212e7e6a37f0ff434e17dfacfe6a0
| 3,647,584
|
def gapfill_to_ensemble(model, iterations=1, universal=None, lower_bound=0.05,
penalties=None, exchange_reactions=False,
demand_reactions=False, integer_threshold=1e-6):
"""
Performs gapfilling on model, pulling reactions from universal.
Any existing constraints on base_model are maintained during gapfilling, so
these should be set before calling gapfill_to_ensemble (e.g. secretion of
metabolites, choice of objective function etc.).
Currently, only iterative solutions are supported with accumulating
penalties (i.e. after each iteration, the penalty for each reaction
doubles).
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model
A universal model with reactions that can be used to complete the
model.
lower_bound : float, 0.05
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
integer_threshold : float, 1e-6
The threshold at which a value is considered non-zero (aka
integrality threshold). If gapfilled models fail to validate,
you may want to lower this value. However, picking a threshold that is
too low may also result in reactions being added that are not essential
to meet the imposed constraints.
exchange_reactions : bool, False
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool, False
Consider adding demand reactions for all metabolites.
Returns
-------
ensemble : medusa.core.Ensemble
The ensemble object created from the gapfill solutions.
"""
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions,
integer_threshold=integer_threshold)
solutions = gapfiller.fill(iterations=iterations)
print("finished gap-filling. Constructing ensemble...")
ensemble = _build_ensemble_from_gapfill_solutions(model,solutions,
universal=universal)
return ensemble
|
1e5b2c6e413afc1b745867f931d4fbc7c33babcc
| 3,647,585
|
import torch
def reparameterize(mu, logvar, n_samples=1):
"""Reparameterization trick.
Args:
mu (torch.Tensor): Mean.
logvar (torch.Tensor): Logarithm of variation.
n_samples (int): The number of samples.
Returns:
torch.Tensor: Samples drawn from the given Gaussian distribution.
The shape is equal to mu if n_samples is 1,
and (n_samples, *mu.shape) if n_samples is larger than 1.
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn(n_samples, *std.size(), device=std.device)
z = mu + eps * std
return z.squeeze(0)
|
726473147ee28f470ad7d543e2b36bc512ffd0ae
| 3,647,586
|
def rotationMatrixFromNormals(v0,v1,tol=1e-20):
"""
Performs the minimum number of rotations to define a rotation from the direction indicated by the vector n0 to the direction indicated by n1.
The axis of rotation is n0 x n1
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
:param numpy.array v0: vector of length 3
:param numpy.array v1: vector of length 3
:param tol = 1e-20: tolerance. If the norm of the cross product between the two vectors is below this, no rotation is performed
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
# ensure both n0, n1 are vectors of length 1
assert len(v0) == 3, "Length of n0 should be 3"
assert len(v1) == 3, "Length of n1 should be 3"
# ensure both are true normals
n0 = v0*1./np.linalg.norm(v0)
n1 = v1*1./np.linalg.norm(v1)
n0dotn1 = n0.dot(n1)
# define the rotation axis, which is the cross product of the two vectors
rotAx = crossProd(n0,n1)
if np.linalg.norm(rotAx) < tol:
return np.eye(3,dtype=float)
rotAx *= 1./np.linalg.norm(rotAx)
cosT = n0dotn1/(np.linalg.norm(n0)*np.linalg.norm(n1))
sinT = np.sqrt(1.-n0dotn1**2)
ux = np.array([[0., -rotAx[2], rotAx[1]], [rotAx[2], 0., -rotAx[0]], [-rotAx[1], rotAx[0], 0.]],dtype=float)
return np.eye(3,dtype=float) + sinT*ux + (1.-cosT)*(ux.dot(ux))
|
946110994a3567871df4b60a3c6814f9ab092ad1
| 3,647,587
|
def P_to_array(P: NestedDicts) -> np.array:
""" Converts a transition matrix in nested dictionary format to a numpy array.
P is usually given as starting state -> action -> ending state w/ data, we reorder this to
action -> starting state -> ending state -> transition probability.
"""
# Action, Starting State, Ending State, value is probability
out = np.zeros(shape=(len(P[0]), len(P), len(P)))
for start_state, actions in P.items():
for action, results in actions.items():
for prob, end_state, _, __ in results:
out[action, start_state, end_state] += prob
return out
|
3a107b3cff6b46b8afc93705bebef84bcbcad6ca
| 3,647,588
|
def get_available_smc_versions():
"""
Return list of available SMC versions. SMC versioning is done by
d70/smc:v6.1.2. Version returned is after the colon.
"""
return [repotag for image in get_images(filter='d70/smc')
for repotag in image.get('RepoTags')]
|
3ddb2908501ebf2ce648f7ebfe00000eb429ffad
| 3,647,589
|
def boolean_fn2(a, b, c):
""" Return the truth value of (a ∧ b) ∨ (-a ∧ -b) """
return a and b or not a and not b
|
c1ef37b3503866e9460fb95c4ab609278c6cff52
| 3,647,590
|
from utils.ica_base import get_configuration
def get_ica_gds_configuration() -> libgds.Configuration:
"""
Get the configuration object for ica wes
:return:
"""
return get_configuration(libgds.Configuration)
|
9e2efd47bca098fb8a03dd4412269a18663e8dfa
| 3,647,591
|
import torch
import time
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = []
for image_path in image_paths:
with g_pathmgr.open(image_path, "rb") as f:
img_str = np.frombuffer(f.read(), np.uint8)
img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR)
imgs.append(img)
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load images {}".format(image_paths))
|
5a34ababc157548c6d9f673c3ff0934df9eccb3d
| 3,647,592
|
def b2p(exts):
"""Convert two points of a polygon into its bounding box.
(Rectangular polygon parallel with axes.)
"""
p0x = exts[0][0]
p0y = exts[0][1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = exts[0][2]
p1y = exts[0][3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
e = "%s %s %s %s %s" % (p0, pb, p1, pu, p0)
i = []
if exts[1] is not None:
for h in exts[1]:
p0x = h[0]
p0y = h[1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = h[2]
p1y = h[3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
i.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
return e, i
|
11a51cffb8143b01b60904bef4c92e6f7335dc1d
| 3,647,593
|
def unix_to_human_time(utime, alt_format=0):
"""convert Unix time to Human readable time"""
try:
fraction = utime - int(utime)
except OverflowError as err:
t = 'Unix time %s too long to convert, substituting 0' % utime
# TODO log this time issue
print('NEED TO LOG THIS TIME ISUSE:', t)
fraction = utime = 0
# handle special case of -1 (not handled correctly by 'date')
if int(utime == -1):
return 1969, 12, 31, 23, 59, 59
cmd = 'date -u -d "1970-01-01 %d sec" +"%%Y %%m %%d %%H %%M %%S"' % int(utime)
try:
result = getoutput(cmd)
# s = split(result)
s = result.split()
# s[5] = atoi(s[5]) + fraction
s[5] = int(s[5]) + fraction
except ValueError as err:
t = 'date conversion error\ndate command was: %sdate command returned: %s' % (cmd, result)
# TODO log this time issue
print('NEED TO LOG THIS TIME ISUSE:', t)
raise ValueError(err)
if alt_format == 1:
return "%s_%s_%s_%s_%s_%06.3f" % tuple(s)
elif alt_format == 0:
return "%s/%s/%s %s:%s:%06.3f" % tuple(s)
else: # i.e. alt_format == 2
s[0:5] = list(map(atoi, s[0:5]))
return tuple(s)
|
0f296ec2f394568fb973ce8371cc262c2e21ffc8
| 3,647,594
|
import re
def read_conf_file_interface(config_name):
"""
Get interface settings.
@param config_name: Name of WG interface
@type config_name: str
@return: Dictionary with interface settings
@rtype: dict
"""
conf_location = WG_CONF_PATH + "/" + config_name + ".conf"
with open(conf_location, 'r', encoding='utf-8') as file_object:
file = file_object.read().split("\n")
data = {}
for i in file:
if not regex_match("#(.*)", i):
if len(i) > 0:
if i != "[Interface]":
tmp = re.split(r'\s*=\s*', i, 1)
if len(tmp) == 2:
data[tmp[0]] = tmp[1]
return data
|
7f51585d05472fa7fbc26e89b150e540f7013be1
| 3,647,595
|
import re
def book_transformer(query_input, book_dict_input):
"""grabs the book and casts it to a list"""
sample_version = versions_dict.versions_dict()
query_input[1] = query_input[1].replace('[', '').replace(']', '').lstrip().rstrip().upper()
for i in list(book_dict_input.keys()):
result = re.search(i, query_input[1])
if result is not None:
book = book_dict_input[result.group(0)]
reduced_query = query_input[1].replace(result.group(0), '')
return [query_input[0], book, reduced_query]
return [sample_version['KJV'], 'error book not found']
|
259e5520aa762749169b0d529c8f1e8836815a16
| 3,647,596
|
import json
def custom_response(message, status, mimetype):
"""handle custom errors"""
resp = Response(json.dumps({"message": message, "status_code": status}),
status=status,
mimetype=mimetype)
return resp
|
6ec8aa2784e6dd0420c3d246ab5a2a2b6e20db1e
| 3,647,597
|
def embed_network(input_net, layers, reuse_variables=False):
"""Convolutional embedding."""
n_layers = int(len(layers)/3)
tf.logging.info('Number of layers: %d' % n_layers)
# set normalization and activation functions
normalizer_fn = None
activation_fn = tf.nn.softplus
tf.logging.info('Softplus activation')
net = input_net
for ilayer in range(n_layers):
tf.logging.info('Building layer: %d, %d, %d'
% (int(layers[ilayer*3 + 1]), int(layers[ilayer*3]),
int(layers[ilayer*3 + 2])))
net = slim.conv2d(net, int(layers[ilayer*3 + 1]),
int(layers[ilayer*3]),
stride=int(layers[ilayer*3 + 2]),
scope='layer_wt_%d' % ilayer,
reuse=reuse_variables,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn)
return net
|
d525dbf59ce860af6e0bc0de6c21fa55454c3f55
| 3,647,598
|
def sample_variance(sample1, sample2):
"""
Calculate sample variance. After learn.co
"""
n_1, n_2 = len(sample1), len(sample2)
var_1, var_2 = variance(sample1), variance(sample2)
return (var_1 + var_2)/((n_1 + n_2)-2)
|
e464ac7434139409a430341bb39b107d9a15eacf
| 3,647,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.