content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def cache_mixin(cache, session):
"""CacheMixin factory"""
hook = EventHook([cache], session)
class _Cache(CacheMixinBase):
_hook = hook
_cache_client = cache
_db_session = session
return _Cache
|
79368b4cc2680ff95be520c9d877dcca5a6a1eef
| 3,639,000
|
def _read_output(path):
"""Read CmdStan output csv file.
Parameters
----------
path : str
Returns
-------
Dict[str, Any]
"""
# Read data
columns, data, comments = _read_output_file(path)
pconf = _process_configuration(comments)
# split dataframe to warmup and draws
saved_warmup = (
int(pconf.get("save_warmup", 0))
* int(pconf.get("num_warmup", 0))
// int(pconf.get("thin", 1))
)
data_warmup = data[:saved_warmup]
data = data[saved_warmup:]
# Split data to sample_stats and sample
sample_stats_columns = {col: idx for col, idx in columns.items() if col.endswith("__")}
sample_columns = {col: idx for col, idx in columns.items() if col not in sample_stats_columns}
return {
"sample": data,
"sample_warmup": data_warmup,
"sample_columns": sample_columns,
"sample_stats_columns": sample_stats_columns,
"configuration_info": pconf,
}
|
aba1fe156de9f2fe9f595d5e5e64994b9eab539b
| 3,639,001
|
def linear_chance_constraint_noinit(a,M,N,risk,num_gpcpoly,n_states,n_uncert,p):
"""
Pr{a^\Top x + b \leq 0} \geq 1-eps
Converts to SOCP
"""
a_hat = np.kron(a.T,M)
a_dummy = np.zeros((n_states,n_states))
for ii in range(n_states):
a_dummy[ii,ii] = a[ii,0]
#print(a_dummy)
U = np.kron(a_dummy,np.identity(num_gpcpoly))
# Sigma_det = U*N*N.T*U.T
Sigma_det = N.T*U.T
return np.reshape(np.round(np.array(a_hat,dtype=float),5),num_gpcpoly*n_states), np.round(np.array(Sigma_det,dtype=float),5)
|
9b6421213f3f2251824a45fc04b69146cfeebbaa
| 3,639,002
|
import fastapi
def patroni(response: responses.Response,
session: sqlalchemy.orm.Session = fastapi.Depends(models.patroni.get_session)):
"""
Returns a health check for the reachability of the Patroni database.
"""
return db_health(response, session, 'Patroni')
|
fabaf09e2754e0e89ff17312e9a3f86d48972dc0
| 3,639,003
|
def authorization_code_grant_step1(request):
"""
Code grant step1 short-cut. This will return url with code.
"""
django_request = oauth2_request_class()(request)
grant = CodeGrant(oauth2_server, django_request)
return grant.authorization()
|
5227158127b5313b17c27fb0f351f8294faec840
| 3,639,004
|
async def activity(
guild_id: int,
discord_id: int,
activity_input: DestinyActivityInputModel,
db: AsyncSession = Depends(get_db_session),
):
"""Return information about the user their stats in the supplied activity ids"""
user = await discord_users.get_profile_from_discord_id(discord_id)
# update the user's db entries
activities = DestinyActivities(db=db, user=user)
await activities.update_activity_db()
return await activities.get_activity_stats(
activity_ids=activity_input.activity_ids,
mode=activity_input.mode,
character_class=activity_input.character_class,
character_ids=activity_input.character_ids,
start_time=activity_input.start_time,
end_time=activity_input.end_time,
)
|
25a2eb719648cbdb6161baa2b1de1570e07a42ea
| 3,639,005
|
def put_topoverlays(image, rects, alpha=0.3):
"""
a function for drawing some rectangles with random color
Args:
image: an opencv image with format of BGR
rects: a list of opencv rectangle
alpha: a float, blend level
Return:
An opencv image
"""
h, w, _ = image.shape
im = np.ones(shape=image.shape).astype(np.uint8)
overlay_bboxs = []
for i in rects:
x1 = int(i[0])
x2 = int(min(i[0] + 1.7 * (i[2] - i[0]), w))
y1 = int(i[1])
y2 = int(max(i[1] - 0.2 * (i[3] - i[1]), 0))
overlay_bboxs.append([x1, y1, x2, y2])
cv2.rectangle(im, (x1, y1), (x2, y2), (100, 100, 0), -1)
cv2.rectangle(im, (x1, y1), (x2, y2), (0, 100, 255), 2)
image = cv2.addWeighted(im, alpha, image, 1 - alpha, 0, image)
return image, overlay_bboxs
|
9f6cf0cfd33214503905a16384fade2976bad190
| 3,639,006
|
def extract_next_token(link):
"""Use with paginated endpoints for extracting
token which points to next page of data."""
clean_link = link.split(";")[0].strip("<>")
token = clean_link.split("?token=")[1]
# token is already quoted we have to unqoute so it can be passed to params
return unquote(token)
|
f0eae72cf8d99e816dfff1c6345f0a9b73abdd21
| 3,639,007
|
import urllib
import json
def get_host_country(host_ip):
"""Gets country of the target's IP"""
country = 'NOT DEFINED'
try:
response_body = urllib.request.urlopen(f'https://ipinfo.io/{host_ip}').read().decode('utf8')
response_data = json.loads(response_body)
country = response_data['country']
except:
pass
return country
|
05df2c54dd275c654631cb188cbfdaa0d8e15ed9
| 3,639,008
|
import traceback
def astng_wrapper(func, modname):
"""wrapper to give to ASTNGManager.project_from_files"""
print 'parsing %s...' % modname
try:
return func(modname)
except ASTNGBuildingException, exc:
print exc
except Exception, exc:
traceback.print_exc()
|
fbb1b3090bfc7b93258fbbcefea1a8d1463dded2
| 3,639,009
|
def clean(s):
"""Clean text!"""
return patList.do(liblang.fixRepetedVowel(s))
|
d81f16dfb35b4f218af5017de1eef8a005d3e7de
| 3,639,010
|
import sys
def filename(s, errors="strict"):
"""Same as force_unicode(s, sys.getfilesystemencoding(), errors)
"""
return force_unicode(s, sys.getfilesystemencoding(), errors)
|
bd8a178cc1216e04f95699418a90dbe52a2f708f
| 3,639,011
|
def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):
"""Elastic deformation of image as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert image.ndim == 3
shape = image.shape[:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
result = np.empty_like(image)
for i in range(image.shape[2]):
result[:, :, i] = map_coordinates(
image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
return result
|
ddabb6a15deba901398f799352216b2c89652296
| 3,639,012
|
import json
def format_search_log(json_string):
"""
usage example {{ model_object|format_search_log }}
"""
query_json = json.loads(json_string)
attributes_selected = sorted(query_json.get('_source'))
context = {}
context['attributes_selected'] = attributes_selected
return attributes_selected
|
eb5aa21590474acaee7b2b94a1cfdc52c080d017
| 3,639,013
|
def set_variable(value,variable=None):
"""Load some value into session memory by creating a new variable.
If an existing variable is given, load the value into the given variable.
"""
sess = get_session()
if variable is not None:
assign_op = tf.assign(variable,value)
sess.run([assign_op])
return variable
else:
variable = tf.Variable(initial_value=value)
sess.run([tf.variables_initializer([variable])])
return variable
|
8256a27c2a446e600e6cfe818c5e4c60e18f1d04
| 3,639,014
|
def matrix ( mtrx , i , j ) :
"""Get i,j element from matrix-like object
>>> mtrx = ...
>>> value = matrix ( m , 1 , 2 )
"""
if isinstance ( mtrx , ROOT.TMatrix ) :
if i < mtrx.GetNrows () and j < mtrx.GetNcols () :
return mtrx ( i , j )
if callable ( mtrx ) :
try :
return mtrx ( i , j )
except :
pass
try :
return m [ i , j ]
except :
pass
try :
return m [ i ] [ j ]
except :
pass
return TypeError("Can't get m(%d,%d) for m=%s" % ( i , j , mtrx ) )
|
1101d5bd4bf569f11ec7ee41700171906e58e743
| 3,639,015
|
def check_type(instance, *classes):
"""Check if object is instance of given class"""
for klass in classes:
if type(instance).__name__ == klass:
return True
for T in getmro(type(instance)):
if T.__name__ == klass:
return True
return False
|
1761be42fd1a781ef5b6d94b42006fdcb2789e8b
| 3,639,016
|
def test_get_earth_imperative_solution(solar_system):
"""
## Imperative Solution
The first example uses flow control statements to define a
[Imperative Solution]( https://en.wikipedia.org/wiki/Imperative_programming). This is a
very common approach to solving problems.
"""
def get_planet_by_name(name, the_solar_system):
try:
planets = the_solar_system['star']['planets']
for arc in planets.values():
for planet in arc:
if name == planet.get('name', None):
return planet
except KeyError:
pass
return None
actual = get_planet_by_name('Earth', solar_system)
expected = {'Number of Moons': '1', 'diameter': 12756, 'has-moons': True, 'name': 'Earth'}
assert actual == expected
|
f966886e3384547803106c404a21e2bb7ecd8fa9
| 3,639,017
|
import os
def parse(request):
"""
A form that lets an authorized user import and the parse data files in
the incoming directory.
"""
dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'incoming')
if request.method == 'POST':
parse_form = forms.Form(request.POST)
if parse_form.is_valid():
options = electionaudits.parsers.set_options(["-c", "-s"])
electionaudits.parsers.parse([dir], options)
else:
parse_form = forms.Form()
return render_to_response('electionaudits/parse.html', {
'parse_form': parse_form,
'parse': os.listdir(dir)
})
|
9645436986ae644db5f8537e2c6c1ebee1d91e94
| 3,639,018
|
import glob
import tqdm
def animate(map, time, phase0=0.0, res=75, interval=75):
"""
"""
# Load the SPICE data
ephemFiles = glob.glob('../data/TESS_EPH_PRE_LONG_2018*.bsp')
tlsFile = '../data/tess2018338154046-41240_naif0012.tls'
solarSysFile = '../data/tess2018338154429-41241_de430.bsp'
#print(spice.tkvrsn('TOOLKIT'))
for ephFil in ephemFiles:
spice.furnsh(ephFil)
spice.furnsh(tlsFile)
spice.furnsh(solarSysFile)
# JD time range
allTJD = time + TJD0
nT = len(allTJD)
allET = np.zeros((nT,), dtype=np.float)
for i, t in enumerate(allTJD):
allET[i] = spice.unitim(t, 'JDTDB', 'ET')
# Calculate positions of TESS, the Earth, and the Sun
tess = np.zeros((3, len(allET)))
sun = np.zeros((3, len(allET)))
for i, et in enumerate(allET):
outTuple = spice.spkezr('Mgs Simulation', et, 'J2000', 'NONE', 'Earth')
tess[0, i] = outTuple[0][0] * REARTH
tess[1, i] = outTuple[0][1] * REARTH
tess[2, i] = outTuple[0][2] * REARTH
outTuple = spice.spkezr('Sun', et, 'J2000', 'NONE', 'Earth')
sun[0, i] = outTuple[0][0] * REARTH
sun[1, i] = outTuple[0][1] * REARTH
sun[2, i] = outTuple[0][2] * REARTH
# Figure setup
fig = plt.figure(figsize=(8, 8))
ax = np.zeros((2, 2), dtype=object)
ax[0, 0] = plt.subplot(221)
ax[0, 1] = plt.subplot(222)
ax[1, 0] = plt.subplot(223, sharex=ax[0, 0], sharey=ax[0, 0])
ax[1, 1] = plt.subplot(224, sharex=ax[0, 0], sharey=ax[0, 0])
for axis in [ax[0, 0], ax[1, 0], ax[1, 1]]:
axis.set_aspect(1)
axis.set_xlim(-65, 65)
axis.set_ylim(-65, 65)
for tick in axis.xaxis.get_major_ticks() + axis.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
i = 0
# Orbit xz
ax[0, 0].plot(tess[0], tess[2], "k.", ms=1, alpha=0.025)
txz, = ax[0, 0].plot(tess[0, i], tess[2, i], 'o', color="C0", ms=4)
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[2, i] ** 2)
x = sun[0, i] * norm
y = sun[2, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxz = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8))
nightxz = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0))
ax[0, 0].add_artist(dayxz)
ax[0, 0].add_artist(nightxz)
ax[0, 0].set_ylabel("z", fontsize=16)
# Orbit xy
ax[1, 0].plot(tess[0], tess[1], "k.", ms=1, alpha=0.025)
txy, = ax[1, 0].plot(tess[0, i], tess[1, i], 'o', color="C0", ms=4)
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[1, i] ** 2)
x = sun[0, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxy = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8))
nightxy = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0))
ax[1, 0].add_artist(dayxy)
ax[1, 0].add_artist(nightxy)
ax[1, 0].set_xlabel("x", fontsize=16)
ax[1, 0].set_ylabel("y", fontsize=16)
# Orbit zy
ax[1, 1].plot(tess[2], tess[1], "k.", ms=1, alpha=0.025)
tzy, = ax[1, 1].plot(tess[2, i], tess[1, i], 'o', color="C0", ms=4)
norm = 1. / np.sqrt(sun[2, i] ** 2 + sun[1, i] ** 2)
x = sun[2, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayzy = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8))
nightzy = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0))
ax[1, 1].add_artist(dayzy)
ax[1, 1].add_artist(nightzy)
ax[1, 1].set_xlabel("z", fontsize=16)
# Render the image
t = (time - time[0]) / (time[-1] - time[0])
t = 2 * (t - 0.5)
Z = np.empty((len(time), res, res))
north_pole = np.empty((len(time), 3))
y = np.array(map[:, :, :])
for i in tqdm(range(len(time))):
# Reset the map and rotate it to the correct phase
# in the mean equatorial (J2000) frame
map[:, :, :] = y
'''
map.axis = [0, 1, 0]
phase = (360. * time[i]) % 360. + phase0
map.rotate(phase)
'''
# Rotate so that TESS is along the +z axis
r = np.sqrt(np.sum(tess[:, i] ** 2))
costheta = np.dot(tess[:, i], [0, 0, r])
axis = np.cross(tess[:, i], [0, 0, r])
sintheta = np.sqrt(np.sum(axis ** 2))
axis /= sintheta
theta = 180. / np.pi * np.arctan2(sintheta, costheta)
R = starry.RAxisAngle(axis, theta)
north_pole[i] = np.dot(R, [0, 0, 1])
source = np.dot(R, sun[:, i])
source /= np.sqrt(np.sum(source ** 2, axis=0))
'''
map.axis = axis
map.rotate(theta)
'''
# Align the pole of the Earth with the "north" direction
costheta = np.dot([0, 1, 0], north_pole[i])
axis = np.cross([0, 1, 0], north_pole[i])
sintheta = np.sqrt(np.sum(axis ** 2))
axis /= sintheta
theta = 180. / np.pi * np.arctan2(sintheta, costheta)
map.axis = axis
map.rotate(theta)
# Rotate to the correct phase
map.axis = north_pole[i]
phase = (360. * time[i]) % 360. + phase0
map.rotate(phase)
# Finally, rotate the image so that north always points up
# This doesn't actually change the integrated flux!
map.axis = [0, 0, 1]
theta = 180. / np.pi * np.arctan2(north_pole[i, 0], north_pole[i, 1])
map.rotate(theta)
R = starry.RAxisAngle([0, 0, 1], theta)
north_pole[i] = np.dot(R, north_pole[i])
source = np.dot(R, source)
# Render the image
Z[i] = map.render(t=t[i], source=source, res=res)[0]
# Reset the map
map[:, :, :] = y
map.axis = [0, 1, 0]
# Image
vmin = 0.0
vmax = np.nanmax(Z)
cmap.set_under(cmap(vmin))
image = ax[0, 1].imshow(Z[0], extent=(-1, 1, -1, 1),
origin="lower", cmap=cmap,
vmin=vmin, vmax=vmax)
npl, = ax[0, 1].plot(north_pole[0, 0], north_pole[0, 1], marker=r"$N$", color="r")
spl, = ax[0, 1].plot(-north_pole[0, 0], -north_pole[0, 1], marker=r"$S$", color="b")
if north_pole[0, 2] > 0:
npl.set_visible(True)
spl.set_visible(False)
else:
npl.set_visible(False)
spl.set_visible(True)
ax[0, 1].axis("off")
ax[0, 1].set_xlim(-1.1, 1.1)
ax[0, 1].set_ylim(-1.1, 1.1)
# Function to animate each frame
def update(i):
# Update orbit
txz.set_xdata(tess[0, i])
txz.set_ydata(tess[2, i])
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[2, i] ** 2)
x = sun[0, i] * norm
y = sun[2, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxz.set_theta1(theta - 90)
dayxz.set_theta2(theta + 90)
nightxz.set_theta1(theta + 90)
nightxz.set_theta2(theta + 270)
txy.set_xdata(tess[0, i])
txy.set_ydata(tess[1, i])
norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[1, i] ** 2)
x = sun[0, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayxy.set_theta1(theta - 90)
dayxy.set_theta2(theta + 90)
nightxy.set_theta1(theta + 90)
nightxy.set_theta2(theta + 270)
tzy.set_xdata(tess[2, i])
tzy.set_ydata(tess[1, i])
norm = 1. / np.sqrt(sun[2, i] ** 2 + sun[1, i] ** 2)
x = sun[2, i] * norm
y = sun[1, i] * norm
theta = 180. / np.pi * np.arctan2(y, x)
dayzy.set_theta1(theta - 90)
dayzy.set_theta2(theta + 90)
nightzy.set_theta1(theta + 90)
nightzy.set_theta2(theta + 270)
image.set_data(Z[i])
npl.set_xdata(north_pole[i, 0])
npl.set_ydata(north_pole[i, 1])
spl.set_xdata(-north_pole[i, 0])
spl.set_ydata(-north_pole[i, 1])
if north_pole[i, 2] > 0:
npl.set_visible(True)
spl.set_visible(False)
else:
npl.set_visible(False)
spl.set_visible(True)
return txz, dayxz, nightxz, txy, dayxy, nightxy, \
tzy, dayzy, nightzy, image, npl, spl
# Generate the animation
ani = FuncAnimation(fig, update, frames=len(time), interval=interval,
blit=False)
try:
if 'zmqshell' in str(type(get_ipython())):
plt.close()
display(HTML(ani.to_html5_video()))
else:
raise NameError("")
except NameError:
plt.show()
plt.close()
return np.nansum(Z, axis=(1, 2))
|
0fa39a0299a8d8cd75b0475f45e49caa731925a7
| 3,639,019
|
import functools
def get_params_from_ctx(func=None, path=None, derive_kwargs=None):
"""
Derive parameters for this function from ctx, if possible.
:param str path:
A path in the format ``'ctx.arbitraryname.unpackthistomyparams'``
to use to find defaults for the function.
Default: ``'ctx.mymodulename.myfuncname'``
It's good to pass this explicitly to make it clear where your arguments
are coming from.
:param Callable derive_kwargs:
Overkill. Is passed ``ctx`` as first arg, expected to
return dict of the format ``{'argname': 'defaultvalueforparam'}``.
**Examples**::
@get_params_from_ctx(path='ctx.randompath') # just 'ctx' works as well
def myfuncname(ctx, requiredparam1, namedparam2='trulyoptional'):
print(requiredparam1, namedparam2)
If your default is a callable we will call it with ``args[0]``. This is how
:meth:`invoke.config.Lazy` works under the hood.
That is, this is a valid function::
@get_params_from_ctx
def myfuncname(ctx,
namedparam0=Lazy('ctx.mynamedparam0'),
namedparam1=lambda ctx: ctx.myvalue * 4):
print(namedparam1) # 4, if myvalue == 1 :)
**Why do I need this?** Suppose this configuration:
``ctx = {"myfuncname" : {"requiredparam1" : 392, "namedparam2" : 199}}``
And this task, where it's important that we always have a value
for some parameter, but we don't always want to supply
it from the command-line::
@task
def myfuncname(ctx, requiredparam=None, namedparam2=None):
requiredparam1 = requiredparam1 or ctx.myfuncname.requiredparam1
if not requiredparam1:
raise ValueError("Need a value for requiredparam1, but didn't want
user to always have to give one.")
namedparam2 = namedparam2 or ctx.myfuncname.namedparam2
print(requiredparam1, namedparam2)
This task can be invoked from the command line like so::
$ invoke myfuncname
(392, 199)
Other functions/tasks can re-use our task with custom parameters, and
the cmd-line user can override our config's defaults if he or she wishes.
However, the semantics of this function are hidden behind the boilerplate
of finding values for each argument.
``Requiredparam1`` and ``namedparam2`` are really required, we just can't
reveal that in the function signature, or ```invoke``` will force the user
to give one every time they call our task, even though we have a default in
the config we defined.
One solution is something like this::
def myfuncname(ctx, requiredparam1, namedparam2):
print(param1, requiredparam1)::
@task(name=myfuncname)
def myfuncname_task(ctx, requiredparam1=None, namedparam2=None)
requiredparam1 = namedparam1 or ctx.myfuncname.namedparam1
namedparam2 = namedparam2 or ctx.myfuncname.namedparam2
myfuncname(ctx, requiredparam1, namedparam2)
This solution decouples the core of your code from invoke, which
could be seen as a plus. However, if we were going to write this much
boiler-plate and passing stuff around, we could have stuck with argparse.
Also, notice that each parameter name appears *6 times*, and the function name
appears *3 times*. Maybe it's not the worst nightmare for maintainability,
but it sure gives writing a new re-usable task quite a lot of friction, so
most just won't do it. They'll write the task, and you'll either get runtime
``Nones`` because you forgot to load a newly added param from the ctx, or you'll
have a cmd-line experience so painful that people generate calls to your
task from their own configs and scripts.
Here's a better solution. It mirrors the logic of the above pair of functions,
but with a simple decorator instead.::
@task
@get_params_from_ctx
def myfuncname(ctx, requiredparam1, namedparam2='trulyoptional')
print(requiredparam1, namedparam2)
ns.configure({"tasks": {"myfuncname" : {"requiredparam1" : 392}}})
The semantics of the raw python function now match the cmd-line task:
* You can call it with no arguments, and as long as a proper value is
found in ctx or in the signature of the function, it will run just like
you called it from the cmd-line.
* If no value was passed, and no default can be found, you will get a normal
Python error.
The cascading order for finding an argument value is as follows:
1. directly passed (i.e. ``task(ctx, 'arghere')`` or ``--arg arghere`` on cmd line
2. from config (``ctx`` arg) (defaults to ctx.__module__.func.__name__)
3. function defaults (``def myfunc(ctx, default=1)``) - default parameter values
that are callable are called
with callable(ctx) to get the value that should be used for a default.
.. versionadded:: 0.1
"""
if func is None: # Dirty hack taken from the wrapt documentation :)
return partial(
get_params_from_ctx, derive_kwargs=derive_kwargs, path=path
)
# Only up here to we can use it to generate ParseError when decorated func gets called.
sig = signature(func)
func_name = _get_full_name(func)
func.ctx_path = path or 'ctx.{}'.format(func_name)
debug("Set {}() param ctx-path to {!r}".format(func_name, func.ctx_path))
if path:
if path.endswith("."):
raise ValueError(
"Path can't end in .! Try 'ctx' instead of 'ctx.'."
)
if path.split(".")[0] not in names_for_ctx:
raise ValueError(
"Path {!r} into ctx for {}()'s args must start with 'ctx.' or 'c.'".format(
path, func_name
)
)
user_passed_path = (
path # Necessary because otherwise doesn't go into closure on py2.
)
@functools.wraps(func)
def customized_default_decorator(*args, **kwargs):
"""
Creates a decorated function with the same argument list,
but with almost every parameter optional. When called,
looks for actually required params in ctx. Finally, calls
original function.
"""
# Will throw here if too many args/kwargs
directly_passed = get_directly_passed(func, sig, args, kwargs)
# Task.__call__ will error before us if ctx wasn't passed
# Might want a non-task to be skippable, so just try to carry on without ctx.
ctx = args[0] if args else None
class fell_through: # Cheapest sentinel I can come up with
pass
cache = {'derived': {}, 'ctx_argdict': {}} # Don't have nonlocal in py2
def get_directly_passed_arg(param_name):
return directly_passed.pop(param_name, fell_through)
def call_derive_kwargs_or_error(param_name):
if not derive_kwargs:
return fell_through
if not cache['derived']:
cache['derived'] = derive_kwargs(ctx)
result = cache['derived']
return result.get(param_name, fell_through)
def traverse_path_for_argdict():
# Could just use eval(path) with a similar trick to invoke.Lazy.
if user_passed_path is None and not ctx:
return {} # that's fine
elif user_passed_path and not ctx:
# If explicitly ask us to traverse (with a path), but
# don't give ctx, what can we do?
# msg = "You gave path {!r} for {!r} args but 'ctx' (arg[0]) was {!r}.".format(path, func_name, ctx)
msg = "'ctx' (arg[0]) was {!r}. Cannot get dict from {} for args of {!r}.".format(
ctx, user_passed_path, func_name
)
raise DerivingArgsError(msg)
path = func.ctx_path
seq = path.split(".")
looking_in = ctx.get('config', ctx) # Gracefully handle Configs (not usual Contexts)
seq.pop(0)
while seq:
key = seq.pop(0)
try:
looking_in = looking_in[key]
except (KeyError, AttributeError) as e:
msg = "while traversing path {!r} for {}() args.".format(path, func_name),
if user_passed_path:
reraise_with_context(
e,
msg,
DerivingArgsError
)
else:
debug("Ignoring {!r} {}".format(type(e).__name__, msg))
return {}
return looking_in
def get_from_ctx(param_name):
if not cache['ctx_argdict']:
cache['ctx_argdict'] = traverse_path_for_argdict()
return cache['ctx_argdict'].get(param_name, fell_through)
param_name_to_callable_default = {
param_name: param.default
for param_name, param in signature(func).parameters.items()
if param.default is not param.empty and callable(param.default)
}
def call_callable_default(param_name):
if param_name in param_name_to_callable_default:
return param_name_to_callable_default[param_name](ctx)
return fell_through
# Decide through cascading what to use as the value for each parameter
args_passing = {}
expecting = sig.parameters
for param_name in expecting:
possibilities = (
# First, positionals and kwargs
get_directly_passed_arg,
# Then check ctx
get_from_ctx,
call_derive_kwargs_or_error, # Not really used/tested
call_callable_default,
)
passing = fell_through
for p in possibilities:
try:
passing = p(param_name)
except Exception as e:
if type(e) is DerivingArgsError:
raise
reraise_with_context(
e,
"in {!r} step of deriving args for param {!r} of {}()".format(
p.__name__, param_name, func_name
),
DerivingArgsError
)
if passing is not fell_through:
debug("{}(): {} found value {:.25}... for param {!r}".format(
func_name, p.__name__, str(passing), param_name)
)
break
else:
debug("{}(): {} failed to find value for param {!r}".format(func_name, p.__name__, param_name))
if passing is not fell_through:
args_passing[param_name] = passing
# Now, bind and supply defaults to see if any are still missing.
# Partial bind and then error because funcsigs error msg succ.
ba = sig.bind_partial(**args_passing)
# getcallargs isn't there on funcsig version.
missing = []
for param in sig.parameters.values():
if param.name not in ba.arguments and param.default is param.empty:
missing.append(param.name)
# TODO contribute these improved error messages back to funcsigs
if missing:
msg = ("{!r} did not receive required positional arguments: {!r}. "
"Looked in arguments passed directly to function and then {!r}.").format(
func_name,
", ".join(
missing
),
'{}.{}'.format(func.ctx_path, param_name)
)
raise TypeError(msg)
# Now that we've generated a kwargs dict that is everything we know about how to call
# this function, call it!
# debug("Derived params {}".format({a: v for a, v in args_passing.items()
# if a != 'ctx' and a != 'c'}))
# TODO We get an 'unexpected kwarg clean' here in Py2 if try to use it.
# Funcsigs bug of not respecting __signature__? Review both sources
return func(**args_passing)
# myparams = (ctx=None, arg1=None, optionalarg1=olddefault)
myparams = [
p.replace(default=None) if p.default is p.empty else p
for p in sig.parameters.values()
]
if not myparams or myparams[0].name not in names_for_ctx:
raise ValueError("Can't have a derive_kwargs_from_ctx function that doesn't have a context arg!")
# Don't provide default for ctx
myparams[0] = list(sig.parameters.values())[0]
mysig = sig.replace(parameters=myparams)
generated_function = customized_default_decorator
generated_function.__signature__ = mysig
# print('sig here ', mysig.parameters)
return generated_function
|
9b1a24a17ec0653804752f08f43e7cf615de679e
| 3,639,020
|
def bresenham(points):
""" Apply Bresenham algorithm for a list points.
More info: https://en.wikipedia.org/wiki/Bresenham's_line_algorithm
# Arguments
points: ndarray. Array of points with shape (N, 2) with N being the number
if points and the second coordinate representing the (x, y)
coordinates.
# Returns
ndarray: Array of points after having applied the bresenham algorithm.
"""
points = np.asarray(points, dtype=np.int)
def line(x0, y0, x1, y1):
""" Bresenham line algorithm.
"""
d_x = x1 - x0
d_y = y1 - y0
x_sign = 1 if d_x > 0 else -1
y_sign = 1 if d_y > 0 else -1
d_x = np.abs(d_x)
d_y = np.abs(d_y)
if d_x > d_y:
xx, xy, yx, yy = x_sign, 0, 0, y_sign
else:
d_x, d_y = d_y, d_x
xx, xy, yx, yy = 0, y_sign, x_sign, 0
D = 2 * d_y - d_x
y = 0
line = np.empty((d_x + 1, 2), dtype=points.dtype)
for x in range(d_x + 1):
line[x] = [x0 + x * xx + y * yx, y0 + x * xy + y * yy]
if D >= 0:
y += 1
D -= 2 * d_x
D += 2 * d_y
return line
nb_points = len(points)
if nb_points < 2:
return points
new_points = []
for i in range(nb_points - 1):
p = points[i:i + 2].ravel().tolist()
new_points.append(line(*p))
new_points = np.concatenate(new_points, axis=0)
return new_points
|
9c49edd9eda3113855582ec3cc35c4d40d056dd9
| 3,639,021
|
def radialBeamProfile_flatTop(x,y,a):
"""Top hat beam profile
\param[in] x x-position for profile computation
\param[in] y y-position for profile computation
\param[in] a radial extension of flat-top component
\param[in] R 1/e-width of beam profile
\param[out] isp radial irradiation source profile
"""
if (x**2+y**2) <= a*a:
return 1.0
else:
return 0.0
|
699d214c499d8cbcf1c0ed26a5d0d00cf2813f3f
| 3,639,022
|
def _split_data(x, y, k_idx, k, perm_indices):
"""Randomly and coordinates splits two indexable items.
Splits items in accordiance with k-fold cross-validatoin.
Arguments:
x: [?]
indexable item
y: [?]
indexable item
k_idx: int
index of the k-fold partition to use
k: int
number of partitions for k-fold cross-validation
perm_indices: np.ndarray, int
array of indices representing a permutation of the samples with
shape (num_sample, )
Returns:
x_majority: [?]
majority partition of indexable item
y_majority: [?]
majority partition of indexable item
x_minority: [?]
minority partition of indexable item
y_minority: [?]
minority partition of indexable item
"""
assert k > 0
assert k_idx >= 0
assert k_idx < k
N = len(x)
partition_size = int(ceil(N / k))
# minority group is the single selected partition
# majority group is the other partitions
minority_start = k_idx * partition_size
minority_end = minority_start + partition_size
minority_indices = perm_indices[minority_start:minority_end]
majority_indices = np.append(perm_indices[0:minority_start],
perm_indices[minority_end:])
assert np.array_equal(np.sort(np.append(minority_indices, majority_indices)),
np.array(range(N)))
x_majority = [x[i] for i in majority_indices]
y_majority = [y[i] for i in majority_indices]
x_minority = [x[i] for i in minority_indices]
y_minority = [y[i] for i in minority_indices]
return (x_majority, y_majority), (x_minority, y_minority)
|
7e53d6a172335b7777887ed493ec41ecb6833461
| 3,639,023
|
def set_neighborhood(G, nodes):
"""Return a list of all neighbors of every node in nodes.
Parameters
----------
G : NetworkX graph
An undirected graph.
nodes :
An interable container of nodes in G.
Returns
-------
list
A list containing all nodes that are a neighbor of some node in nodes.
See Also
--------
set_closed_neighborhood
"""
# TODO: write unit test
N = set()
for n in nodes:
N |= set(neighborhood(G, n))
return list(N)
|
e6ce89162307fecead69c9bdc67bc9c9f8ff40e8
| 3,639,024
|
from typing import Any
from typing import Optional
def resolve_Log(
parent: Any,
info: gr.ResolveInfo,
id: Optional[int] = None,
uuid: Optional[str] = None,
) -> ENTITY_DICT_TYPE:
"""Resolution function."""
return resolve_entity(Log, info, id, uuid)
|
eecd46296e9d1c0ce55dc31ee1249b4c3b512b15
| 3,639,025
|
from typing import Union
from pathlib import Path
def _get_path_size(source: Union[Path, ZipInfo]) -> int:
"""
A helper method that returns the file size for the given source
:param source: the source object to get the file size for.
:return: the source's size.
"""
return source.stat().st_size if isinstance(source, Path) else source.file_size
|
2981b2b88e776cfd2315785fea8ba1e1ec63c7cf
| 3,639,026
|
def get_graph_subsampling_dataset(
prefix, arrays, shuffle_indices, ratio_unlabeled_data_to_labeled_data,
max_nodes, max_edges,
**subsampler_kwargs):
"""Returns tf_dataset for online sampling."""
def generator():
labeled_indices = arrays[f"{prefix}_indices"]
if ratio_unlabeled_data_to_labeled_data > 0:
num_unlabeled_data_to_add = int(ratio_unlabeled_data_to_labeled_data *
labeled_indices.shape[0])
unlabeled_indices = np.random.choice(
NUM_PAPERS, size=num_unlabeled_data_to_add, replace=False)
root_node_indices = np.concatenate([labeled_indices, unlabeled_indices])
else:
root_node_indices = labeled_indices
if shuffle_indices:
root_node_indices = root_node_indices.copy()
np.random.shuffle(root_node_indices)
for index in root_node_indices:
graph = sub_sampler.subsample_graph(
index,
arrays["author_institution_index"],
arrays["institution_author_index"],
arrays["author_paper_index"],
arrays["paper_author_index"],
arrays["paper_paper_index"],
arrays["paper_paper_index_t"],
paper_years=arrays["paper_year"],
max_nodes=max_nodes,
max_edges=max_edges,
**subsampler_kwargs)
graph = add_nodes_label(graph, arrays["paper_label"])
graph = add_nodes_year(graph, arrays["paper_year"])
graph = tf_graphs.GraphsTuple(*graph)
yield graph
sample_graph = next(generator())
return tf.data.Dataset.from_generator(
generator,
output_signature=utils_tf.specs_from_graphs_tuple(sample_graph))
|
da31aff7064c3516f95fb5597f2ee757ee35fa25
| 3,639,027
|
def check_comment_exists(comment_id_required=True):
"""
Decorator to check if a given comment exists. If it does not, it returns an
HTTP 400 error. Must be called with (), and may pass the optional argument
of whether the id is required. If the id is passed, it will be checked
against entities of the Comment kind, and a 400 error will be returned if
it is not found. If the id is not passed, an error will be returned, unless
the argument is False.
"""
def decorator(func):
@wraps(func)
def wrapper(self, comment_id=''):
comment_entity = None
if comment_id:
comment_key = db.Key.from_path('Comment', int(comment_id))
comment_entity = db.get(comment_key)
if not comment_entity:
# bad request
return self.error(400)
elif comment_id_required:
return self.error(400)
func(self, comment_entity)
return wrapper
return decorator
|
2c2dd4bd1149ee9f0e87b5d426211a3d5bba78c0
| 3,639,028
|
def GeoMoonState(time):
"""Calculates equatorial geocentric position and velocity of the Moon at a given time.
Given a time of observation, calculates the Moon's position and velocity vectors.
The position and velocity are of the Moon's center relative to the Earth's center.
The position (x, y, z) components are expressed in AU (astronomical units).
The velocity (vx, vy, vz) components are expressed in AU/day.
The coordinates are oriented with respect to the Earth's equator at the J2000 epoch.
In Astronomy Engine, this orientation is called EQJ.
If you need the Moon's position only, and not its velocity,
it is much more efficient to use #GeoMoon instead.
Parameters
----------
time : Time
The date and time for which to calculate the Moon's position and velocity.
Returns
-------
StateVector
The Moon's position and velocity vectors in J2000 equatorial coordinates (EQJ).
"""
# This is a hack, because trying to figure out how to derive a time
# derivative for CalcMoon() would be extremely painful!
# Calculate just before and just after the given time.
# Average to find position, subtract to find velocity.
dt = 1.0e-5 # 0.864 seconds
t1 = time.AddDays(-dt)
t2 = time.AddDays(+dt)
r1 = GeoMoon(t1)
r2 = GeoMoon(t2)
return StateVector(
(r1.x + r2.x) / 2,
(r1.y + r2.y) / 2,
(r1.z + r2.z) / 2,
(r2.x - r1.x) / (2 * dt),
(r2.y - r1.y) / (2 * dt),
(r2.z - r1.z) / (2 * dt),
time
)
|
50c523a2f838e7730546fac4e4b8ed2a13eefe0a
| 3,639,029
|
import platform
def get_machine_name():
"""
Portable way of calling hostname shell-command.
Regarding docker containers:
NOTE: If we are running from inside the docker-dev environment, then $(hostname) will return
the container-id by default.
For now we leave that behaviour.
We can override it in the future by passing --hostname to the container.
Documentation:
https://docs.docker.com/config/containers/container-networking/#ip-address-and-hostname
:return:
Unique name for a node in the cluster
"""
machine_name = platform.node()
return machine_name
|
ae5a7090846164a97cafd07af4701dcfcc25070e
| 3,639,030
|
def pass_through_formatter(value):
"""No op update function."""
return value
|
202ea761db9e1fa858718c61df3a7fd18f02826c
| 3,639,031
|
from re import U
def instantiate(decoder, model=None, dataset=None):
""" Instantiate a full decoder config, e.g. handle list of configs
Note that arguments are added in reverse order compared to encoder (model first, then dataset)
"""
decoder = utils.to_list(decoder)
return U.TupleSequential(*[_instantiate(d, model=model, dataset=dataset) for d in decoder])
|
238b97eab9a653200d0f82b92342a64bbbbc6336
| 3,639,032
|
def un_normalize(stdevs, arrList):
"""
Return an arrayList with ith column multiplied by scalar stdevs[i] if stdevs[i] is not zero,
and unmodified if it is zero.
Args:
stdevs: A list of numbers (should be the list output by normalize).
arrList: A list of list of numbers that is the (normalized) data.
Returns:
list: A list of list of numbers of the same dimensions as arrlist.
>>> un_normalize([0.5, 2],[[1, 2], [3,4]])
[[0.5, 4], [1.5, 8]]
>>> un_normalize([0.0, 2],[[1, 2], [3,4]])
[[1, 4], [3, 8]]
"""
stdevs = list(map(lambda x: x if x != 0.0 else 1, stdevs))
return scalarMultCols(stdevs, arrList)
|
a87aa89b2f591d46b077ea26d877f1d3459df2b3
| 3,639,033
|
import json
def convert_graph(input_path):
"""
Converts a CRED-like graph into a graph format supported by the igraph library. The input graph must have been
generated by cli2 CRED command (look for credResult.json)
:param input_path: The path to the CRED graph to convert (credResult.json)
"""
with open(input_path, encoding="utf8") as f:
cred_file = json.load(f)
# Locating important elements in the graph
cred_data = cred_file[1]['credData']
graph = cred_file[1]['weightedGraph'][1]['graphJSON'][1]
cred_node_addresses = graph['sortedNodeAddresses']
# Summary of edges/nodes and also a reminder about dangling edges
print(f'Found cred summary data for {len(cred_data["nodeSummaries"])} nodes and {len(cred_data["edgeSummaries"])} edges')
print(f'The graph has {len(graph["nodes"])} nodes, {len(graph["edges"])} edges and {len(graph["sortedNodeAddresses"])} node addresses')
print(f'Dangling edges expected: {len(graph["edges"]) - len(cred_data["edgeSummaries"])}')
g = Graph(directed=True)
# Collecting nodes
for i, cred_node in enumerate(graph['nodes']):
cred_node_address = cred_node_addresses[cred_node['index']]
igraph_node_atts = {'label': cred_node_address[2]+'-'+cred_node_address[-1][:7],
'type': cred_node_address[2],
'timestamp': cred_node['timestampMs'] if cred_node['timestampMs'] is not None else 0,
'totalCred': cred_data['nodeSummaries'][i]['cred'],
'index': cred_node['index'],
}
g.add_vertex(name=str(cred_node['index']), **igraph_node_atts)
# Collecting edges
dangling_edges = []
idx = 0
for cred_edge in graph['edges']:
# Checking if the edges is a dangling one. If so, we skip.
if len(g.vs.select(name_eq=str(cred_edge['srcIndex']))) + len(g.vs.select(name_eq=str(cred_edge['dstIndex']))) < 2:
dangling_edges.append({"srcIndex": cred_edge['srcIndex'], "dstIndex": cred_edge['dstIndex']})
continue
igraph_edge_atts = {'address': '-'.join(cred_edge['address']),
'timestamp': cred_edge['timestampMs'],
'backwardFlow': cred_data['edgeSummaries'][idx]['backwardFlow'],
'forwardFlow': cred_data['edgeSummaries'][idx]['forwardFlow'],
}
g.add_edge(str(cred_edge['srcIndex']), str(cred_edge['dstIndex']), **igraph_edge_atts)
idx += 1
# Reporting the number of dangling edges found
print(f"Dangling edges found: {len(dangling_edges)}")
return g
|
57037a662d033a422965f3efe13f321a5bf7f128
| 3,639,034
|
from typing import List
def get_valid_classes_from_class_input(
class_graph: class_dependency.JavaClassDependencyGraph,
class_names_input: str) -> List[str]:
"""Parses classes given as input into fully qualified, valid classes.
Input is a comma-separated list of classes."""
class_names = class_names_input.split(',')
return get_valid_classes_from_class_list(class_graph, class_names)
|
e93edea9692ab9c461ed744a8727effbf705fdea
| 3,639,035
|
def us_ppop(ppop):
""" Determines if the ppop is in a valid format to be in the US """
# return false if it's null or not 7 digits long
if not ppop or len(ppop) != 7:
return False
ppop = ppop.upper()
if ppop[:2] in g_state_by_code or ppop[:2] in g_state_code_by_fips:
return True
return False
|
afef4e7634034709f870379cd684a37a793c7ec5
| 3,639,036
|
def get_pygments_lexer(location):
"""
Given an input file location, return a Pygments lexer appropriate for
lexing this file content.
"""
try:
T = _registry[location]
if T.is_binary:
return
except KeyError:
if binaryornot.check.is_binary(location):
return
try:
# FIXME: Latest Pygments versions should work fine
# win32_bug_on_s_files = dejacode.on_windows and location.endswith('.s')
# NOTE: we use only the location for its file name here, we could use
# lowercase location may be
lexer = get_lexer_for_filename(location, stripnl=False, stripall=False)
return lexer
except LexerClassNotFound:
try:
# if Pygments does not guess we should not carry forward
# read the first 4K of the file
with open(location, 'rb') as f:
content = f.read(4096)
guessed = guess_lexer(content)
return guessed
except LexerClassNotFound:
return
|
53521a3a8b297733cab9af444eff92a76d799a4d
| 3,639,037
|
def get_robotstxt_parser(url, session=None):
"""Get a RobotFileParser for the given robots.txt URL."""
rp = RobotFileParser()
try:
req = urlopen(url, session, max_content_bytes=MaxContentBytes,
allow_errors=range(600))
except Exception:
# connect or timeout errors are treated as an absent robots.txt
rp.allow_all = True
else:
if req.status_code >= 400:
rp.allow_all = True
elif req.status_code == 200:
rp.parse(req.text.splitlines())
return rp
|
f838f8284b250133a1c5f0ca5d514756ff4f1eb0
| 3,639,038
|
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed model.
(nn.Module, None): The constructed extractor model
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.data.test.test_mode = True
model = build_architecture(config.model)
if checkpoint is not None:
# load model checkpoint
load_checkpoint(model, checkpoint, map_location=device)
# save the config in the model for convenience
model.cfg = config
model.to(device)
model.eval()
extractor = None
if config.model.type == 'VideoBodyModelEstimator':
extractor = build_backbone(config.extractor.backbone)
if config.extractor.checkpoint is not None:
# load model checkpoint
load_checkpoint(extractor, config.extractor.checkpoint)
extractor.cfg = config
extractor.to(device)
extractor.eval()
return model, extractor
|
494cbcb012978d49905318d92c136bc7c6241a79
| 3,639,039
|
def estimate_period(time, y, y_err, clip=True, plot=True, **kwargs):
"""
Run a Lomb-Scargle Periodogram to find periodic signals. It's recommended
to use the allesfitter.time_series functions sigma_clip and slide_clip beforehand.
Parameters
----------
time : array of float
e.g. time array (usually in days)
y : array of float
e.g. flux or RV array (usually as normalized flux or RV in km/s)
yerr : array of float
e.g. flux or RV error array (usually as normalized flux or RV in km/s)
clip : bool, optional
Automatically clip the input data with sigma_clip(low=4, high=4)
and slide_clip(window_length=1, low=4, high=4). The default is True.
plot : bool, optional
To plot or not, that is the question. The default is False.
**kwargs : collection of keyword arguments
Any keyword arguments will be passed onto the astropy periodogram class.
Returns
-------
best_period : float
The best period found.
FAP : float
The false alarm probability for the best period.
fig : matplotlib.figure object, optional
The summary figure. Only returned if plot is True.
"""
#==========================================================================
#::: clean the inputs
#==========================================================================
time, y, y_err = clean(time, y, y_err)
plot_bool = plot
if clip:
y = sigma_clip(time, y, low=4, high=4)
y = slide_clip(time, y, window_length=1, low=4, high=4)
time, y, y_err = clean(time, y, y_err)
#==========================================================================
#::: handle inputs
#==========================================================================
cadence = np.nanmedian(np.diff(time))
if kwargs is None: kwargs = {}
if 'minperiod' not in kwargs: kwargs['minperiod'] = 10. * cadence
if 'maxperiod' not in kwargs: kwargs['maxperiod'] = time[-1]-time[0]
minfreq = 1./kwargs['maxperiod']
maxfreq = 1./kwargs['minperiod']
#==========================================================================
#::: now do the periodogram
#==========================================================================
ls = LombScargle(time, y) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module
frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram
best_power = np.nanmax(power)
best_frequency = frequency[np.argmax(power)]
best_period = 1./best_frequency
FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array
#==========================================================================
#::: plot
#==========================================================================
def plot():
peak_loc=round(float(1./best_frequency),2)
FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine
FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values
fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True)
#::: plot the periodogram
ax = axes[0]
ax.semilogx(1./frequency,power,color='b')
ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r')
ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days')
ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP))
ax.hlines(FAP_levels, kwargs['minperiod'], kwargs['maxperiod'], color='grey', lw=1)
ax.text(kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right')
ax.set(xlabel='Period (days)', ylabel='L-S power')
ax.tick_params(axis='both',which='major')
#::: plot the phase-folded data
ax = axes[1]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylim=[np.nanmin(y), np.nanmax(y)], ylabel='Data (clipped; phased)')
#::: plot the phase-folded data, zoomed
ax = axes[2]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylabel='Data (clipped; phased; y-zoom)')
#::: plot the autocorrelation of the data
ax = axes[3]
plot_acf(pd.Series(y, index=time), ax=ax, lags=np.linspace(start=1,stop=2*best_period/cadence,num=100,dtype=int))
ax.set(xlabel='Lag', ylabel='Autocorrelation', title='')
return fig
#==========================================================================
#::: return
#==========================================================================
if plot_bool:
fig = plot()
return best_period, FAP, fig
else:
return best_period, FAP
|
23cc58d910ff5541847fa4d5892979aa312d1609
| 3,639,040
|
def get_test_packages():
"""Get a list of packages which need tests run.
Filters the package list in the following order:
* Check command line for packages passed in as positional arguments
* Check if the the local remote and local branch environment variables
have been set to specify a remote branch to diff against.
* Check if in Travis, then limit the subset based on changes
in a Pull Request ("push" builds to branches may not have
any filtering)
* Just use all packages
An additional check is done for the cases when a diff is computed (i.e.
using local remote and local branch environment variables, and on Travis).
Once the filtered list of **changed** packages is found, the package
dependency graph is used to add any additional packages which depend on
the changed packages.
:rtype: list
:returns: A list of all package directories where tests
need be run.
"""
all_packages = get_package_directories()
local_diff = local_diff_branch()
parser = get_parser()
args = parser.parse_args()
if args.packages is not UNSET_SENTINEL:
verify_packages(args.packages, all_packages)
return sorted(args.packages)
elif local_diff is not None:
changed_packages = get_changed_packages(
'HEAD', local_diff, all_packages)
return follow_dependencies(changed_packages, all_packages)
elif in_travis():
changed_packages = get_travis_directories(all_packages)
return follow_dependencies(changed_packages, all_packages)
else:
return all_packages
|
302a3136ec84e81a68348e5ff1bffa9c916f36a1
| 3,639,041
|
def decoding_character(morse_character):
"""
Input:
- morse_character : 문자열값으로 get_morse_code_dict 함수로 알파벳으로 치환이 가능한 값의 입력이 보장됨
Output:
- Morse Code를 알파벳으로 치환함 값
Examples:
>>> import morsecode as mc
>>> mc.decoding_character("-")
'T'
>>> mc.decoding_character(".")
'E'
>>> mc.decoding_character(".-")
'A'
>>> mc.decoding_character("...")
'S'
>>> mc.decoding_character("....")
'H'
>>> mc.decoding_character("-.-")
'K'
"""
# ===Modify codes below=============
# 조건에 따라 변환되어야 할 결과를 result 변수에 할당 또는 필요에 따라 자유로운 수정
#morse_code_dict = get_morse_code_dict()
char_dict = get_char_code_dict()
result = char_dict.get(morse_character)
return result
|
29c3f99da372a713d349a0c7640403ae32c08aba
| 3,639,042
|
def SparsityParametersAddDimMetadata(builder, dimMetadata):
"""This method is deprecated. Please switch to AddDimMetadata."""
return AddDimMetadata(builder, dimMetadata)
|
5a7604ca44fbf3f2a1d520018269c472340511e5
| 3,639,043
|
def check_branch(payload, branch):
"""
Check if a push was on configured branch.
:param payload: Payload from web hook.
:param branch: Name of branch to trigger action on.
:return: True if push was on configured branch, False otherwise.
"""
if "ref" in payload:
if payload["ref"] == branch:
return True
return False
|
88bd0ebae330ee169e97a40aee208b2f92ee4a32
| 3,639,044
|
from typing import Union
def convert(q: Quantity, new_unit: Union[str, Unit], equivalencies=None) -> Quantity:
"""Convert quantity to a new unit.
:raises InvalidUnit: When target unit does not exist.
:raises InvalidUnitConversion: If the conversion is invalid.
Customized to be a bit more universal than the original quantities.
"""
try:
return q.to(new_unit, equivalencies or [])
except u.UnitConversionError:
if q.unit.physical_type == "temperature":
return q.to(new_unit, u.temperature())
else:
raise InvalidUnitConversion(
f"Cannot convert unit '{q.unit}' to '{new_unit}'."
) from None
except ValueError as err:
raise InvalidUnit(f"Unit '{new_unit}' does not exist.") from None
|
7d28a40d3da4a6189aeb9efb252f50088838a1f3
| 3,639,045
|
def randomized_pairwise_t_test(arr1, arr2, output=True):
"""
Perform a randomized pairwise t-test on two arrays
of values of equal size.
see Cohen, P.R., Empirical Methods for Artificial Intelligence, p. 168
"""
# Make sure both arrays are the same length
assert len(arr1) == len(arr2)
# Cast them to floats
arr1 = map(float, arr1)
arr2 = map(float, arr2)
# Calculate the absolute diffs
diffs = [(arr1[i] - arr2[i]) for i in range(len(arr1))]
# Calculate the original mean
originalMean = sum(diffs) / float(len(diffs))
numLess = 0
# Do 10000 trials to test
for i in range(10000):
running_sum = 0.
for j in range(len(diffs)):
if choice([True,False]):
running_sum += diffs[j]
else:
running_sum -= diffs[j]
mean = running_sum / float(len(diffs))
if mean <= originalMean:
numLess += 1
# Finally output / return the stats
ratio = float(numLess + 1) / float(10001)
ratio = min(ratio, 1-ratio)
if output:
print ("mean difference: %f\nsignificant at p <= %f" % (originalMean, ratio))
return originalMean, ratio
|
92ceb071fcc03dd952a15ffe08f2bd305c603a39
| 3,639,046
|
from typing import Dict
from datetime import datetime
import uuid
def update_metadata(radar, longitude: np.ndarray, latitude: np.ndarray) -> Dict:
"""
Update metadata of the gridded products.
Parameter:
==========
radar: pyart.core.Grid
Radar data.
Returns:
========
metadata: dict
Output metadata dictionnary.
"""
today = datetime.datetime.utcnow()
dtime = cftime.num2pydate(radar.time["data"], radar.time["units"])
maxlon = longitude.max()
minlon = longitude.min()
maxlat = latitude.max()
minlat = latitude.min()
metadata = {
"comment": "Gridded radar volume using Barnes et al. ROI",
"field_names": ", ".join([k for k in radar.fields.keys()]),
"geospatial_bounds": f"POLYGON(({minlon:0.6} {minlat:0.6},{minlon:0.6} {maxlat:0.6},{maxlon:0.6} {maxlat:0.6},{maxlon:0.6} {minlat:0.6},{minlon:0.6} {minlat:0.6}))",
"geospatial_lat_max": f"{maxlat:0.6}",
"geospatial_lat_min": f"{minlat:0.6}",
"geospatial_lat_units": "degrees_north",
"geospatial_lon_max": f"{maxlon:0.6}",
"geospatial_lon_min": f"{minlon:0.6}",
"geospatial_lon_units": "degrees_east",
"geospatial_vertical_min": np.int32(radar.origin_altitude["data"][0]),
"geospatial_vertical_max": np.int32(20000),
"geospatial_vertical_positive": "up",
"history": f"created by Valentin Louf on gadi.nci.org.au at {today.isoformat()} using Py-ART",
"processing_level": "b2",
"time_coverage_start": dtime[0].isoformat(),
"time_coverage_end": dtime[-1].isoformat(),
"uuid": str(uuid.uuid4()),
}
return metadata
|
ae4b26372221262426803f40394caa06245d5afb
| 3,639,047
|
import copy
def idxsel2xsel(file, isel, dimensions, order):
""" convert a index space selection object to an xSelect object
"""
if not isinstance(isel, idxSelect):
raise TypeError('wrong argument type')
xsel = {}
xsel_size = {}
xsel_dims = {}
isarray = False
interp = False
masked = False
multidim = False
i = 0
for axis in dimensions:
inc_i = True
try:
idx = isel[axis]
if idx.interp: interp = True
if idx.isarray:
isarray = True
if idx.dims is not None: multidim = True
if isinstance(idx.v, N.ma.MaskedArray): masked = True
xsel_dims[axis] = idx.dims
idx = idx.v
if isinstance(idx, slice):
dimsize = file.cf_dimensions[axis]
res = [idx.start, idx.stop, idx.step]
if (idx.step is not None and idx.step < 0):
if idx.start is None: res[0] = dimsize - 1
if idx.stop is None: res[1] = None
else:
if idx.start is None: res[0] = 0
if idx.stop is None: res[1] = dimsize
if idx.step is None: res[2] = 1
xsel[axis] = slice(res[0], res[1], res[2])
elif N.isscalar(idx):
xsel[axis] = idx
if len(order) > 0:
order.remove(i)
for val in order:
if val > i:
order[order.index(val)] = val - 1
inc_i = False
else:
#xsel[axis] = idx.copy()
xsel[axis] = copy.copy(idx)
if len(idx.shape) == 0 or idx.shape == 1:
if len(order) > 0:
order.remove(i)
for val in order:
if val > i:
order[order.index(val)] = val - 1
inc_i = False
except KeyError:
dimsize = file.cf_dimensions[axis]
xsel[axis] = (slice(0, dimsize, 1))
xsel_dims[axis] = None
if inc_i:
i += 1
if isarray:
# convert slices to 1d-arrays and determine result size
for axis in dimensions:
idx = xsel[axis]
if isinstance(idx, slice):
xsel[axis] = N.arange(idx.start, idx.stop, idx.step)
if xsel_dims[axis] is None:
if is_scalar(xsel[axis]):
xsel_size[axis] = 0
else:
xsel_size[axis] = len(xsel[axis])
else:
xsel_size[axis] = isel[axis].axlen
# determine shape of xsel
dim_ret = []
for axis in dimensions:
if xsel_size[axis] != 0:
dim_ret.append(xsel_size[axis])
ndim_ret = len(dim_ret)
# all 1d arrays
if not multidim:
i = 0
for axis in dimensions:
if xsel_size[axis] != 0:
idx_shape = N.ones(ndim_ret,dtype="int32")
idx_shape[i] = dim_ret[i]
xsel[axis].shape = idx_shape
i += 1
# at least one multidimensional coordinate
else:
i = 0
for axis in dimensions:
if xsel_dims[axis] is None:
if xsel_size[axis] != 0:
idx_shape = N.ones(ndim_ret,dtype="int32")
idx_shape[i] = dim_ret[i]
xsel[axis].shape = idx_shape
i += 1
else:
idx_shape2 = {}
for axis2 in dimensions:
if xsel_size[axis2] != 0:
if axis2 in xsel_dims[axis]:
idx_shape2[axis2] = isel[axis].dimsize(axis2)
else:
idx_shape2[axis2] = 1
idx_shape = []
for axis2 in dimensions:
if axis2 in idx_shape2:
idx_shape.append(idx_shape2[axis2])
if isel[axis].type != 'scalar':
i += 1
# check if we only need basic slicing
if not isarray and not interp:
isbasic = True
else:
isbasic = False
ret = []
for axis in dimensions:
ret.append(xsel[axis])
ret = xSelect(ret)
ret.isbasic = isbasic
ret.interp = interp
ret.masked = masked
ret.order = order
return ret
|
ff00a7705a9ae1f633e7ec19682367ccfea2b7bf
| 3,639,048
|
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1000, 1)
|
e1c0a710131289e457f3c15da411a7f8d17fdfc7
| 3,639,049
|
def user_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = User.objects.get(id=id)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = UserSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.delete_user'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
9339c85cec0b271d5eeb8a1caec976992869174a
| 3,639,050
|
def _gsmooth_img(args):
"""
HELPER FUNCTION: private!
Smooth an image with a gaussian in 2d
"""
img,kernel,use_fft,kwargs = args
if use_fft:
return convolve_fft(img, kernel, normalize_kernel=True, **kwargs)
else:
return convolve(img, kernel, normalize_kernel=True, **kwargs)
|
313a0c4475935665cb0e4c55bea343adf3a9fab4
| 3,639,051
|
import argparse
def ParseArgs():
"""Parses command line arguments.
Returns:
args from argparse.parse_args().
"""
description = (
'Handle Whale button click event.'
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=description)
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='enable debug messages')
parser.add_argument('--rpc_debug', action='store_true', default=False,
help='enable debug messages for XMLRPC call')
parser.add_argument('--nouse_dolphin', action='store_false', default=True,
dest='use_dolphin', help='whether to skip dolphin control'
' (remote server). default: %(default)s')
parser.add_argument('--use_polld', action='store_true', default=False,
help='whether to use polld (for polling GPIO port on '
'remote server) or poll local GPIO port, default: '
'%(default)s')
parser.add_argument('--host', default='127.0.0.1', type=str,
help='hostname of server, default: %(default)s')
parser.add_argument('--dolphin_port', default=9997, type=int,
help='port that dolphin_server listens, default: '
'%(default)d')
parser.add_argument('--polld_port', default=9998, type=int,
help='port that polld listens, default: %(default)d')
parser.add_argument('--servod_port', default=9999, type=int,
help='port that servod listens, default: %(default)d')
parser.add_argument('--polling_wait_secs', default=5, type=int,
help=('# seconds for polling button clicking event, '
'default: %(default)d'))
return parser.parse_args()
|
304a453995de9586467756cbd9e974786033e794
| 3,639,052
|
def __logs_by_scan_id(scan_id, language):
"""
select all events by scan id hash
Args:
scan_id: scan id hash
language: language
Returns:
an array with JSON events or an empty array
"""
try:
logs = []
for log in send_read_query(
"select host,username,password,port,type,date,description from hosts_log where scan_id=\"{0}\"".format(
scan_id), language):
data = {
"SCAN_ID": scan_id,
"HOST": log[0],
"USERNAME": log[1],
"PASSWORD": log[2],
"PORT": log[3],
"TYPE": log[4],
"TIME": log[5],
"DESCRIPTION": log[6]
}
logs.append(data)
return logs
except:
return []
|
26ef72dd2e0ed974a84f2ddc67e61fd90f769f17
| 3,639,053
|
def docs():
"""Redirect to documentation on Github
Route: /docs
Methods: GET
Return: redirect to webpage
"""
return redirect("https://kinsaurralde.github.io/ws_281x-lights/#/")
|
18fbbf2e4d53c66545bdf1129de5d1d4ac5944fd
| 3,639,054
|
def std(a, weights=None, axis=None, dtype=None, ddof=0, keepdims=False):
"""
Compute the weighted standard deviation along the specified axis.
:param a: Array containing numbers whose standard deviation is desired. If `a` is not an
array, a conversion is attempted.
:param weights: Array containing weights for the elements of `a`. If `weights` is not an
array, a conversion is attempted.
:param axis: Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array. Type is None or int or tuple of ints, optional.
:param dtype: data type to use in computing the mean.
:param int ddof: Delta Degrees of Freedom. The divisor used in calculations
is ``W - ddof``, where ``W`` is the sum of weights (or number of elements
if `weights` is None). By default `ddof` is zero
:param bool keepdims: If this is set to True, the axes which are reduced are left
in the result as dimensions with size one.
:return: np.ndarray
"""
if weights is None:
return np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims)
else:
w = np.array(weights)
m = mean(a, weights=w, axis=axis, keepdims=True)
return np.sqrt(
np.sum(
w * (np.array(a) - m) ** 2, axis=axis, dtype=dtype, keepdims=keepdims
)
/ ( # noqa: W504
np.sum(w, axis=axis, dtype=dtype, keepdims=keepdims) - ddof
)
)
|
758421b85657197413ab4fe2713bf18da2ac184a
| 3,639,055
|
import logging
def createOneHourCandles(markets, database):
"""
Function that creates tables for one minute candles.
:param database:
:param markets:
:return:
"""
conn = pymysql.connect(host='localhost',
user='jan',
password='17051982',
database=database)
func_logging = logging.getLogger("bittrex_database." + str(__name__) + ".createOneHourCandles()")
for ix in markets:
with conn.cursor() as cur:
comm = f'CREATE OR REPLACE TABLE `{ix}` (' \
f'symbol CHAR(20) NOT NULL, ' \
f'ttime FLOAT NOT NULL, ' \
f'oopen DOUBLE NOT NULL, ' \
f'hhigh DOUBLE NOT NULL, ' \
f'llow DOUBLE NOT NULL, ' \
f'cclose DOUBLE NOT NULL, ' \
f'base_vol DOUBLE NOT NULL, ' \
f'quote_vol DOUBLE NOT NULL, ' \
f'usd_vol DOUBLE NOT NULL, ' \
f'PRIMARY KEY (symbol, ttime)' \
f') ENGINE=Maria'
cur.execute(comm)
conn.commit()
func_logging.info("One hour candles database have been initiated with empty columns.")
return True
|
c800570a4dd17acb0b588064938ccf098e6c53bb
| 3,639,056
|
def coaddspectra(splist,plotsp=True,outf=None,sn_smooth_npix=10):
""" Coadd spectra
Parameters
----------
splist : list of XSpectrum1D objects
List of spectra to coadd
plotsp : bool
If True, plot the coadded spectrum
outf : str
Output file
sn_smooth_npix : float
Parameter in coadd1d.combspec function that defines
number of pixels to median filter by when computing S/N used to decide how to scale and weight spectra
Returns
-------
sp : XSpectrum1D
A spectrum that represents coadded spectra from the splist list
"""
waves = []
fluxes = []
ivars = []
masks = []
for isp in splist:
waves.append(isp.wavelength)
fluxes.append(isp.flux)
ivars.append(1. / (isp.sig) ** 2.)
imask = np.repeat(True, len(isp.flux))
j = np.where((isp.flux == 0) & (isp.sig == 0))[0]
imask[j] = False
masks.append(imask)
waves = np.ndarray.transpose(np.asarray(waves))
fluxes = np.ndarray.transpose(np.asarray(fluxes))
ivars = np.ndarray.transpose(np.asarray(ivars))
masks = np.ndarray.transpose(np.asarray(masks))
wave_stack, flux_stack, ivar_stack, mask_stack = coadd1d.combspec(
waves, fluxes, ivars, masks, sn_smooth_npix, show=plotsp)
ii = np.where(wave_stack > 0)[0]
coadded_waves = wave_stack[ii]
coadded_fluxes = flux_stack[ii]
coadded_sigs = 1 / (np.sqrt(ivar_stack[ii]))
# write and return the spectrum
sp = xspec.XSpectrum1D(coadded_waves, coadded_fluxes, coadded_sigs)
if outf is not None:
sp.write_to_fits(outf)
return sp
|
1e0c312389f566a34cca878251b7d808968e175c
| 3,639,057
|
def get_rel_sim(relation, question, dataset):
"""
Get max cosine distance for relations
:param relation:
:param question:
:return:
"""
query_ngrams = generate_ngrams(question)
query_ngrams_vec = [get_avg_word2vec(phr, dataset) for phr in query_ngrams]
relation_ngram = get_avg_word2vec(relation, dataset)
similarities = [cosine_similarity(relation_ngram, q)[0][0] for q in query_ngrams_vec]
if similarities and np.max(similarities) > 0.5:
return np.max(similarities)
else:
return 0.0
|
63c313fac32ec2483979585c60cea916979aaf5d
| 3,639,058
|
def mk_request(bits, cn):
"""
Create a X509 request with the given number of bits in they key.
Args:
bits -- number of RSA key bits
cn -- common name in the request
Returns a X509 request and the private key (EVP)
"""
pk = EVP.PKey()
x = X509.Request()
rsa = RSA.gen_key(bits, 65537, lambda: None)
pk.assign_rsa(rsa)
x.set_pubkey(pk)
name = x.get_subject()
name.C = config.get('ca', 'cert_country')
name.CN = cn
name.ST = config.get('ca', 'cert_state')
name.L = config.get('ca', 'cert_locality')
name.O = config.get('ca', 'cert_organization')
name.OU = config.get('ca', 'cert_org_unit')
x.sign(pk, 'sha256')
return x, pk
|
f6ac4fe385caba149b85599fa6f48fc3d0dc7ccf
| 3,639,059
|
import re
def nice(name):
"""Generate a nice name based on the given string.
Examples:
>>> names = [
... "simple_command",
... "simpleCommand",
... "SimpleCommand",
... "Simple command",
... ]
>>> for name in names:
... nice(name)
'Simple Command'
'Simple Command'
'Simple Command'
'Simple Command'
Arguments:
name (str): The string from which generate the nice name.
Returns:
str: The generated nice name.
"""
# The regular expression will match all upper case characters except the
# one that starts the string and insert a space before it.
return re.sub(r"(?<!^)([A-Z])", r" \1", name).replace("_", " ").title()
|
ab96675423812a85744bb76e7f62d08bbbac2eea
| 3,639,060
|
def get_outputs():
"""Get the available outputs, excluding outputs in the EXCLUDED_OUTPUTS variable."""
outputs = []
tree = connection.get_tree()
for node in filter(
lambda node: node.type == "output" and node.name not in EXCLUDED_OUTPUTS, tree
):
workspaces = node.nodes[1].nodes
if workspaces:
outputs.append((node, workspaces))
return outputs
|
6db1ea83252a7a6f4fd7f731c206c5d4a738a282
| 3,639,061
|
def get_user_owner_mailboxes_tuples(user):
"""
Return owned mailboxes of a user as tuple
"""
return ((owned_mailbox.id, owned_mailbox.email_address) for owned_mailbox in get_user_owner_mailboxes_query(user))
|
e7db6658497678387f4a93237b686d29bc27d91f
| 3,639,062
|
def modinv(a, m):
"""Modular Multiplicative Inverse"""
a = a % m
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
|
9ddea93398f8c96f828a8efaea36f21f6b8dd13e
| 3,639,063
|
import socket
def get_ip():
"""Get the ip of the host computer"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('1.1.1.1', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
|
fb4f79eaa25573d7078f69c5d5ad71c51c9d1c44
| 3,639,064
|
def available_structure_info():
""" Lists available attributes for :func:`abagen.mouse.get_structure_info`
"""
return _STRUCTURE_ATTRIBUTES.copy()
|
14591c89c9f7212440da282f50408459692d1fc4
| 3,639,065
|
def centos(function):
"""Decorator to set the Linux distribution to CentOS 7"""
def wrapper(*args, **kwargs):
hpccm.config.g_linux_distro = linux_distro.CENTOS
hpccm.config.g_linux_version = StrictVersion('7.0')
return function(*args, **kwargs)
return wrapper
|
9c54a7aac46bd30d490c625afa4392d5127a2be7
| 3,639,066
|
def Normalized2(p):
"""Return vector p normlized by dividing by its squared length.
Return (0.0, 1.0) if the result is undefined."""
(x, y) = p
sqrlen = x * x + y * y
if sqrlen < 1e-100:
return (0.0, 1.0)
else:
try:
d = sqrt(sqrlen)
return (x / d, y / d)
except:
return (0.0, 1.0)
|
42cc78350f264226c624a81ca5b0bd6457d353b0
| 3,639,067
|
def findwskeyword(keyword, sol):
"""Find and return a value for a keyword in the list of the wavelength solution"""
i = sol.index(keyword)
j = sol[i:].index('\n')
return sol[i:i + j].split('=')[1].strip()
|
b3cc028415d74ecfd7ec3868ae591d7b4d3b8860
| 3,639,068
|
import numpy
from typing import Tuple
from typing import Callable
from typing import Union
from typing import List
def algorithm(array: numpy.array, start: Tuple[int, int], end: Tuple[int, int],
heuristic: Callable = manhattan) -> Union[List, None]:
"""
Returns a list of all points, for the path between `start` and `end`
:param array: a numpy array of Node instances
:param start: a tuple (or list) of points corresponding to where to start on array
:param end: like start, but for the end
:param heuristic: a function that represents the heuristic (default: manhattan heuristic)
Example:
>>> test = numpy.array(
[[0, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 0]]
)
>>> print(algorithm(test, (0, 0), (5, 5)))
"""
array = array_to_class(array)
actual_start = array[start[0], start[1]]
actual_end = array[end[0], end[1]]
count = 0
open_set = PriorityQueue()
open_set.put((0, count, actual_start))
came_from = {}
g_score = {node: inf for row in array for node in row}
f_score = {node: inf for row in array for node in row}
g_score[actual_start] = 0
f_score[actual_start] = heuristic(start, end)
open_set_hash = {actual_start}
while not open_set.empty():
current = open_set.get()[2]
current_pos = current.pos
open_set_hash.remove(current)
if current == actual_end:
return reconstruct_path(came_from, start, end)
for neighbor in get_neighbors(array, current_pos):
neighbor_instance = array[neighbor[0], neighbor[1]]
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor_instance]:
came_from[neighbor_instance] = current
g_score[neighbor_instance] = temp_g_score
f_score[neighbor_instance] = temp_g_score + heuristic(neighbor, end)
if neighbor_instance not in open_set_hash:
count += 1
open_set.put((f_score[neighbor_instance], count, neighbor_instance))
open_set_hash.add(neighbor_instance)
return None
|
9a91e27bc0dbe78f7b2801dbdfbef6747a845aa7
| 3,639,069
|
from . import logger
def MFString(string_list):
"""
input a list of unicode strings
output: a unicode string formed by encoding, enclosing each
item in double quotes, and concatenating
27 Nov 2016: The complete case is as yet unimplemented,
to avoid sending bad X3D into the world will instead fail with
a Exception if any of the elements of list contain a XML special case in '"&<>
"""
special_characters = u"\'\"&<>"
assert( len(special_characters) == 5)
# check
unicode_type = type(u"")
for item in string_list:
if not type(item) is unicode_type:
logger.warn("Non unicode entry for MFString: %s" % (repr(item),))
for c in special_characters:
if c in item:
raise ValueError("Unimplemented case: special character in MFString item: %s" % (repr(item),))
return " ".join([u'"%s"' % item for item in string_list])
|
a068c25ab157b5537ea47e625ca8ed9aecd0f4e5
| 3,639,070
|
def re_allocate_memory(ptr: VoidPtr, size: int)-> VoidPtr:
"""
Internal memory free
ptr: The pointer which is pointing the previously allocated memory block by allocate_memory.
size: The new size of memory block.
"""
return _rl.MemRealloc(
ptr,
_to_int(size)
)
|
806c17a6863db3af8c5b42474fe05c624685757c
| 3,639,071
|
import json
def get_task_manager(setup_file, **kwargs):
""" Create a task manager of a correct type.
Parameters
----------
setup_file : string
File name of the setup file.
kwargs : dict
Additional kwargs.
Returns
-------
manager : TaskManager
Created task manager.
"""
setup = json.load(open(setup_file))
manager = setup['manager'].lower()
if manager == 'slurm':
return SlurmTaskManager(setup_file, **kwargs)
elif manager == 'sge':
return SgeTaskManager(setup_file, **kwargs)
elif manager == 'local':
return LocalTaskManager(setup_file, **kwargs)
else:
raise ValueError('Unknown task manager: %s', manager)
|
a297937fd4520549df034a22739250461cbf2c0e
| 3,639,072
|
def format_time(time):
""" Converts datetimes to the format expected in SAML2 XMLs. """
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
|
03651b72aa0b177ac1ac3f1ccafdba6fe967a11a
| 3,639,073
|
def get_delivery_voucher_discount(voucher, total_price, delivery_price):
"""Calculate discount value for a voucher of delivery type."""
voucher.validate_min_amount_spent(total_price)
return voucher.get_discount_amount_for(delivery_price)
|
8ede095730c1d29d01949dff47b4a2893d29720c
| 3,639,074
|
def has_admin_access(request):
# type: (Request) -> bool
"""
Verifies if the authenticated user doing the request has administrative access.
.. note::
Any request view that does not explicitly override ``permission`` by another value than the default
:envvar:`MAGPIE_ADMIN_PERMISSION` will already automatically guarantee that the request user is an
administrator since HTTP [403] Forbidden would have been otherwise replied. This method is indented
for operations that are more permissive and require conditional validation of administrator access.
.. seealso::
Definitions in :class:`magpie.models.RootFactory` and :class:`magpie.models.UserFactory` define
conditional principals and :term:`ACL` based on the request.
"""
admin_perm = get_constant("MAGPIE_ADMIN_PERMISSION", request)
authz_policy = request.registry.queryUtility(IAuthorizationPolicy) # noqa
principals = get_principals(request)
result = authz_policy.permits(models.RootFactory(request), principals, admin_perm)
return isinstance(result, ACLAllowed)
|
54a109375c60354759d98177a2db275f627034b2
| 3,639,075
|
import os
import errno
def safe_remove(path: str) -> bool:
"""Removes a file or directory
This will remove a file if it exists, and will
remove a directory if the directory is empty.
Args:
path: The path to remove
Returns:
True if `path` was removed or did not exist, False
if `path` was a non empty directory.
Raises:
UtilError: In the case of unexpected system call failures
"""
try:
if S_ISDIR(os.lstat(path).st_mode):
os.rmdir(path)
else:
os.unlink(path)
# File removed/unlinked successfully
return True
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# Path is non-empty directory
return False
elif e.errno == errno.ENOENT:
# Path does not exist
return True
raise UtilError("Failed to remove '{}': {}".format(path, e))
|
3f8388f03a38f5933c52323f64cffe189a6652f1
| 3,639,076
|
def model_selection(modelname, num_out_classes=2, pretrain_path=None):
"""
:param modelname, num_out_classes, pretrained, dropout:
:return: model, image size
"""
return TransferModel(modelchoice=modelname,
num_out_classes=num_out_classes,
pretrain_path=pretrain_path)
|
ef80dd1c5c52bc0d090801ebb1d5e17f303e48ad
| 3,639,077
|
def stack1(x, filters, blocks, stride1=2, dilation=1, name=None):
"""A set of stacked residual blocks.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
# Returns
Output tensor for the stacked blocks.
"""
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, dilation=dilation,
name=name + '_block' + str(i))
return x
|
43103a2bcad203b1b32f33e352960bdea8d526c9
| 3,639,078
|
def _CreateDynamicDisplayAdSettings(media_service, opener):
"""Creates settings for dynamic display ad.
Args:
media_service: a SudsServiceProxy instance for AdWords's MediaService.
opener: an OpenerDirector instance.
Returns:
The dynamic display ad settings.
"""
image = _CreateImage(media_service, opener, 'https://goo.gl/dEvQeF')
logo = {
'type': 'IMAGE',
'mediaId': image['mediaId'],
'xsi_type': 'Image'
}
dynamic_settings = {
'landscapeLogoImage': logo,
'pricePrefix': 'as low as',
'promoText': 'Free shipping!',
'xsi_type': 'DynamicSettings',
}
return dynamic_settings
|
c79145ec39a7aed97eea7efe9145eab5c706b146
| 3,639,079
|
def contacts_per_person_normal_00x30():
"""
Real Name: b'contacts per person normal 00x30'
Original Eqn: b'10'
Units: b'contact/Day'
Limits: (None, None)
Type: constant
b''
"""
return 10
|
1d0f7caaa4cceafbc34045b2983e388cd1169f8b
| 3,639,080
|
def _get_scripts_shell(script_file): # type: (pathlib.Path) -> str
"""
Returns the shell used in the passed script file. If no shell is recognized exception is raised.
Depended on presence of shebang.
Supported shells: Bash, Fish, Zsh
:param script_file:
:return:
:raises exceptions.UnknownShell: If no shell is recognized
"""
with script_file.open('r') as f:
shebang = f.readline().lower()
for shell in SUPPORTED_SHELLS:
if shell in shebang:
return shell
raise exceptions.UnknownShell('It seems that the currently used post-commit '
'hook uses shebang that is not known to Gitrack: ' + shebang)
|
74332334d9b3caf1be720d656ca6e64f4971e35e
| 3,639,081
|
from shutil import which
def is_cmd_tool(name):
"""
Check whether `name` is on PATH and marked as executable.
From: https://stackoverflow.com/a/34177358
"""
return which(name) is not None
|
a35f84f1bf46aedac488a31402996f075fbe80e2
| 3,639,082
|
import pickle
def load_model(model: Model, language=()):
"""Load geo model and return as dict."""
log.info("Reading geomodel: %s", model)
with open(model.path, "rb") as infile:
m = pickle.load(infile)
result = defaultdict(set)
for _geonameid, l in list(m.items()):
result[l["name"].lower()].add((l["name"], l["latitude"], l["longitude"], l["country"], l["population"]))
for lang in l["alternative_names"]:
if lang in language or not language:
for altname in l["alternative_names"][lang]:
result[altname.lower()].add(
(l["name"], l["latitude"], l["longitude"], l["country"], l["population"]))
log.info("Read %d geographical names", len(result))
return result
|
af77d0e0835b8be6b7b87b142141f4c50082a0ae
| 3,639,083
|
def saml_metadata_generator(sp, validated=True, privacypolicy=False, tree=None, disable_entity_extensions=False):
"""
Generates metadata for single SP.
sp: ServiceProvider object
validated: if false, using unvalidated metadata
privacypolicy: fill empty privacypolicy URLs with default value
tree: use as root if given, generate new root if not
return tree
"""
entity, history, validation_date = get_entity(sp, validated)
if not entity:
return tree
if tree is not None:
entity_descriptor = etree.SubElement(tree, "EntityDescriptor", entityID=entity.entity_id)
else:
entity_descriptor = etree.Element("EntityDescriptor",
entityID=entity.entity_id,
nsmap={"ds": 'http://www.w3.org/2000/09/xmldsig#',
"mdattr": 'urn:oasis:names:tc:SAML:metadata:attribute',
"mdui": 'urn:oasis:names:tc:SAML:metadata:ui',
"saml": 'urn:oasis:names:tc:SAML:2.0:assertion',
"xmlns": 'urn:oasis:names:tc:SAML:2.0:metadata',
"xsd": 'http://www.w3.org/2001/XMLSchema',
"xsi": 'http://www.w3.org/2001/XMLSchema-instance',
})
if not disable_entity_extensions:
if history:
metadata_entity_extensions(entity_descriptor, history)
else:
metadata_entity_extensions(entity_descriptor, sp)
metadata_spssodescriptor(entity_descriptor, sp, history, validation_date, privacypolicy)
metadata_contact(entity_descriptor, sp, validation_date)
if history:
metadata_organization(entity_descriptor, history)
else:
metadata_organization(entity_descriptor, sp)
if tree is not None:
return tree
else:
return entity_descriptor
|
78f065fe7962e7221626c41b81b550ceaa9e7370
| 3,639,084
|
import logging
import sqlite3
def import_from_afd(import_list, vlb_path, working_path, conn):
"""Imports an Armada Fleets Designer list into a Fleet object"""
f = Fleet("Food", conn=conn)
start = False
obj_category = "assault"
# shipnext = False
for line in import_list.strip().split("\n"):
try:
last_line = line.strip()
card_name = line.strip().split(" x ", 1)[-1]
logging.info(card_name)
if card_name.startswith("==="):
start = True
elif start and len(card_name) > 0:
if card_name[0] == "·":
upgrade, cost = card_name.split("(")
upgrade = scrub_piecename(upgrade)
cost = cost.split(")")[0]
if upgrade in nomenclature_translation:
translated = nomenclature_translation[upgrade]
logging.info(
"[-] Translated {} to {} - AFD.".format(upgrade, translated)
)
upgrade = translated
if (upgrade, cost) in ambiguous_names:
upgrade_new = ambiguous_names[(upgrade, cost)][0]
logging.info(
"Ambiguous name {} ({}) translated to {}.".format(
upgrade, cost, upgrade_new
)
)
upgrade = upgrade_new
_ = s.add_upgrade(upgrade)
elif "(" not in card_name:
logging.info("Hit the conditional for {}.".format(card_name))
card_name = scrub_piecename(str(card_name))
f.add_objective(obj_category, card_name)
# TODO: retool the objs to not care about categories... :/
if obj_category == "assault":
obj_category = "defense"
else:
obj_category = "navigation"
else:
card_name, cost = card_name.split(" (", 1)
cost = cost.split(" x ")[-1].split(")")[0]
issquadron = False
isship = False
card_name = scrub_piecename(card_name)
try:
if card_name in nomenclature_translation:
t = nomenclature_translation[card_name]
logging.info(
"[-] Translated {} to {} - AFD.".format(card_name, t)
)
card_name = t
if (card_name, cost) in ambiguous_names:
card_name_new = ambiguous_names[(card_name, cost)][0]
logging.info(
"Ambiguous name {} ({}) translated to {}.".format(
card_name, cost, card_name_new
)
)
card_name = card_name_new
logging.info(
"Searching for AFD piece {} in {}".format(
scrub_piecename(card_name), str(conn)
)
)
with sqlite3.connect(conn) as connection:
issquadron = connection.execute(
"""SELECT * FROM pieces
WHERE piecetype='squadroncard'
AND piecename LIKE ?;""",
("%" + scrub_piecename(card_name) + "%",),
).fetchall()
except ValueError as err:
logging.exception(err)
try:
logging.info(
"Searching for AFD piece {} in {}".format(
card_name, str(conn)
)
)
with sqlite3.connect(conn) as connection:
isship = connection.execute(
"""SELECT * FROM pieces
WHERE piecetype='shipcard'
AND piecename LIKE ?;""",
("%" + card_name,),
).fetchall()
except ValueError as err:
logging.exception(err)
if bool(issquadron):
_ = f.add_squadron(card_name)
elif bool(isship):
s = f.add_ship(card_name)
else:
logging.info(
"{}{} IS FUCKED UP, YO{}".format(
"=" * 40, card_name, "=" * 40
)
)
except Exception as err:
logging.exception(err)
return (False, last_line)
return (True, f)
|
65ca2daac7aa798f1bc768ae50409b06129d46c8
| 3,639,085
|
import re
import os
def CWPProfileToVersionTuple(url):
"""Convert a CWP profile url to a version tuple
Args:
url: for example, gs://chromeos-prebuilt/afdo-job/cwp/chrome/
R65-3325.65-1519323840.afdo.xz
Returns:
A tuple of (milestone, major, minor, timestamp)
"""
fn_mat = (CWP_CHROME_PROFILE_NAME_PATTERN %
tuple(r'([0-9]+)' for _ in xrange(0, 4)))
fn_mat.replace('.', '\\.')
return map(int, re.match(fn_mat, os.path.basename(url)).groups())
|
550a76de62482a0b5c1b631bd4484d6edafab106
| 3,639,086
|
def normalize_not_found(wrapped):
"""View decorator to make 404 error messages more readable"""
def wrapper(context, request):
# Replace incoming 404 with one that has a sensible message
response = wrapped(_standard_not_found(), request)
return response
return wrapper
|
2a9a696c98b777e4f7295015840fbff6235092e7
| 3,639,087
|
import functools
import os
import concurrent
def run(cfg, cfg2=None):
""" Start preprocessing. """
# Read all log files generated by ebpf_ros2_*
# TODO: convert addr and port to uint32, uint16
read_csv = functools.partial(pd.read_csv, dtype={
'pid':'Int32', 'seqnum':'Int64', 'subscriber':'Int64', 'publisher':'Int64'})
send_log = 'send_log.csv'
recv_log = 'recv_log.csv'
other_log = ['cls_bpf_log.csv']
if cfg is None:
cfg = sofa_config.SOFA_Config()
elif cfg2 is None:
cfg2 = cfg
with open(os.path.join(cfg.logdir, 'unix_time_off.txt')) as f:
lines = f.readlines()
cfg.unix_time_off = float(lines[0])
sofa_print.print_hint('unix time offset:' + str(cfg.unix_time_off) + ' in ' + cfg.logdir)
with open(os.path.join(cfg2.logdir, 'unix_time_off.txt')) as f:
lines = f.readlines()
cfg.unix_time_off = float(lines[0])
sofa_print.print_hint('unix time offset:' + str(cfg2.unix_time_off) + ' in ' + cfg2.logdir)
send_log = os.path.join(cfg.logdir, cfg.ros2logdir, 'send_log.csv')
recv_log = os.path.join(cfg2.logdir, cfg2.ros2logdir, 'recv_log.csv')
print(send_log, recv_log)
cvs_files_others = []
for idx in range(len(other_log)):
print(os.path.join(cfg.logdir, cfg.ros2logdir, other_log[idx]))
cvs_files_others.append(
(cfg, os.path.join(cfg.logdir, cfg.ros2logdir, other_log[idx])))
if cfg2 is not cfg:
for idx in range(len(other_log)):
print(os.path.join(cfg2.logdir, cfg2.ros2logdir, other_log[idx]))
cvs_files_others.append(
(cfg2, os.path.join(cfg2.logdir, cfg2.ros2logdir, other_log[idx]))
)
df_send = (cfg, read_csv(send_log))
df_recv = (cfg2, read_csv(recv_log))
df_others = []
for cfg_to_pass, csv_file in cvs_files_others:
try:
df_others.append((cfg_to_pass, read_csv(csv_file)))
except pd.errors.EmptyDataError as e:
print(csv_file + ' is empty')
all_msgs = extract_individual_rosmsg(df_send, df_recv, *df_others)
print(all_msgs)
# TODO: Filiter topics
# Calculate ros latency for all topics
res = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
res.append(future.result())
print(res)
# res = ros_msgs_trace_read(next(iter(all_msgs.items())), cfg=cfg)
ros_lat_send = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_ros_lat_send, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
ros_lat_send.append(future.result())
print(ros_lat_send)
# Calculate time spent in OS for all topics
os_lat_send = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_os_lat_send, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
os_lat_send.append(future.result())
print(os_lat_send)
dds_lat_send = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_dds_lat_send, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
dds_lat_send.append(future.result())
print(dds_lat_send)
os_lat_recv = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_os_lat_recv, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
os_lat_recv.append(future.result())
print(os_lat_recv)
dds_lat_recv = []
ros_executor_recv = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_dds_ros_lat_recv, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
print(future.result())
# topic = item[0]
dds_lat_recv.append(future.result()[0])
ros_executor_recv.append(future.result()[1])
print(dds_lat_recv)
print(ros_executor_recv)
retransmissions = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(find_retransmissions, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
retransmissions.append(future.result())
print(retransmissions)
sample_drop = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(find_sample_drop, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
sample_drop.append(future.result())
print(sample_drop)
sofatrace = sofa_models.SOFATrace()
sofatrace.name = 'ros2_latency'
sofatrace.title = 'ros2_latency'
sofatrace.color = 'DeepPink'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = pd.concat(res) # TODO:
sofatrace_ros_lat_send = sofa_models.SOFATrace()
sofatrace_ros_lat_send.name = 'ros2_lat_send'
sofatrace_ros_lat_send.title = 'ros2_lat_send'
sofatrace_ros_lat_send.color = '#D15817'
sofatrace_ros_lat_send.x_field = 'timestamp'
sofatrace_ros_lat_send.y_field = 'duration'
sofatrace_ros_lat_send.data = pd.concat(ros_lat_send)
sofatrace_ros_executor_recv = sofa_models.SOFATrace()
sofatrace_ros_executor_recv.name = 'ros2_executor_recv'
sofatrace_ros_executor_recv.title = 'ros2_executor_recv'
sofatrace_ros_executor_recv.color = next(color_recv)
sofatrace_ros_executor_recv.x_field = 'timestamp'
sofatrace_ros_executor_recv.y_field = 'duration'
sofatrace_ros_executor_recv.data = pd.concat(ros_executor_recv)
sofatrace_dds_lat_send = sofa_models.SOFATrace()
sofatrace_dds_lat_send.name = 'dds_send_latency'
sofatrace_dds_lat_send.title = 'dds_send_latency'
sofatrace_dds_lat_send.color = next(color_send)
sofatrace_dds_lat_send.x_field = 'timestamp'
sofatrace_dds_lat_send.y_field = 'duration'
sofatrace_dds_lat_send.data = pd.concat(dds_lat_send)
sofatrace_dds_lat_recv = sofa_models.SOFATrace()
sofatrace_dds_lat_recv.name = 'dds_recv_latency'
sofatrace_dds_lat_recv.title = 'dds_recv_latency'
sofatrace_dds_lat_recv.color = next(color_recv)
sofatrace_dds_lat_recv.x_field = 'timestamp'
sofatrace_dds_lat_recv.y_field = 'duration'
sofatrace_dds_lat_recv.data = pd.concat(dds_lat_recv)
sofatrace_os_lat_send = sofa_models.SOFATrace()
sofatrace_os_lat_send.name = 'os_send_latency'
sofatrace_os_lat_send.title = 'os_send_latency'
sofatrace_os_lat_send.color = next(color_send)
sofatrace_os_lat_send.x_field = 'timestamp'
sofatrace_os_lat_send.y_field = 'duration'
sofatrace_os_lat_send.data = pd.concat(os_lat_send)
sofatrace_os_lat_recv = sofa_models.SOFATrace()
sofatrace_os_lat_recv.name = 'os_recv_latency'
sofatrace_os_lat_recv.title = 'os_recv_latency'
sofatrace_os_lat_recv.color = next(color_recv)
sofatrace_os_lat_recv.x_field = 'timestamp'
sofatrace_os_lat_recv.y_field = 'duration'
sofatrace_os_lat_recv.data = pd.concat(os_lat_recv)
sofatrace_retransmissions = sofa_models.SOFATrace()
sofatrace_retransmissions.name = 'retransmissions'
sofatrace_retransmissions.title = 'retransmissions'
sofatrace_retransmissions.color = 'Crimson'
sofatrace_retransmissions.x_field = 'timestamp'
sofatrace_retransmissions.y_field = 'duration'
sofatrace_retransmissions.data = pd.concat(retransmissions)
sofatrace_retransmissions.highlight = True
sofatrace_sample_drop = sofa_models.SOFATrace()
sofatrace_sample_drop.name = 'sample_drop'
sofatrace_sample_drop.title = 'sample_drop'
sofatrace_sample_drop.color = 'DarkCyan'
sofatrace_sample_drop.x_field = 'timestamp'
sofatrace_sample_drop.y_field = 'duration'
sofatrace_sample_drop.data = pd.concat(sample_drop)
sofatrace_sample_drop.highlight = True
sofatrace_targets = find_outliers(
[sofatrace, sofatrace_ros_executor_recv, \
sofatrace_dds_lat_send, sofatrace_dds_lat_recv, \
sofatrace_os_lat_send, sofatrace_os_lat_recv], sofatrace)
# cmd_vel = all_msgs['/cmd_vel']
# cmd_vel_msgids = [('1.f.c5.ba.f4.30.0.0.1.0.0.0|0.0.10.3', num) for num in [46, 125, 170, 208, 269, 329, 545, 827, 918, 1064, 1193, 1228, 1282]]
# print(cmd_vel[('1.f.c5.ba.f4.30.0.0.1.0.0.0|0.0.10.3', 45)])
# res2 = ros_msgs_trace_read(('/cmd_vel', {msgid:cmd_vel[msgid] for msgid in cmd_vel_msgids}), cfg=cfg)
# highlight = sofa_models.SOFATrace()
# highlight.name = 'update_cmd_vel'
# highlight.title = 'Change velocity event'
# highlight.color = next(color)
# highlight.x_field = 'timestamp'
# highlight.y_field = 'duration'
# highlight.data = pd.concat([res2])
return [sofatrace,
sofatrace_ros_lat_send, sofatrace_ros_executor_recv,
sofatrace_dds_lat_send, sofatrace_dds_lat_recv,
sofatrace_os_lat_send, sofatrace_os_lat_recv,
sofatrace_targets, sofatrace_retransmissions, sofatrace_sample_drop]
|
1ce20090d3227feab3fb2cbb211798a518c6da38
| 3,639,088
|
def process_results(unprocessed, P, R, G):
"""Process the results returned by the worker pool, sorting them by
policy and run e.g. results[i][j][k] are the results from policy i
on run j on graph k. Parameters:
- unprocessed: Unprocessed results (as returned by the worker pool)
- P: number of policies
- R: number of runs
- G: number of graphs/SCMs/test cases
"""
results = []
for i in range(P):
policy_results = []
for r in range(R):
run_results = unprocessed[(i*G*R + G*r):(i*G*R + G*(r+1))]
policy_results.append(run_results)
results.append(policy_results)
return results
|
24c2854723b3fc33c3fee58595f84d789e861fbc
| 3,639,089
|
def make_inline_table(data):
"""Create an inline table from the given data."""
table = tomlkit.inline_table()
table.update(data)
return table
|
c70352de9a716ad5d3f1f33b33ea65c10ebc8f98
| 3,639,090
|
def _mi_dc(x, y, k):
"""
Calculates the mututal information between a continuous vector x and a
disrete class vector y.
This implementation can calculate the MI between the joint distribution of
one or more continuous variables (X[:, 1:3]) with a discrete variable (y).
Thanks to Adam Pocock, the author of the FEAST package for the idea.
Brian C. Ross, 2014, PLOS ONE
Mutual Information between Discrete and Continuous Data Sets
"""
y = y.flatten()
n = x.shape[0]
classes = np.unique(y)
knn = NearestNeighbors(n_neighbors=k)
# distance to kth in-class neighbour
d2k = np.empty(n)
# number of points within each point's class
Nx = []
for yi in y:
Nx.append(np.sum(y == yi))
# find the distance of the kth in-class point
for c in classes:
mask = np.where(y == c)[0]
knn.fit(x[mask, :])
d2k[mask] = knn.kneighbors()[0][:, -1]
# find the number of points within the distance of the kth in-class point
knn.fit(x)
m = knn.radius_neighbors(radius=d2k, return_distance=False)
m = [i.shape[0] for i in m]
# calculate MI based on Equation 2 in Ross 2014
MI = psi(n) - np.mean(psi(Nx)) + psi(k) - np.mean(psi(m))
return MI
|
35b1295739d9df390980db11b7f03976c5ada3de
| 3,639,091
|
def get_new_deals_intent_handler(handler_input):
"""
Purpose:
Handler for getting new deals
Args:
handler_input (Dict): Input data from the Alexa Skill
Return:
alexa_reponse (Dict): Reponse for Alexa Skill to handle
"""
feed = get_slickdeals_feed(SLICKDEALS_URL)
deals = get_top_slickdeals(feed)
speech_text = "There are {0} deals. The first deal is {1}".format(
len(deals), deals[0]
)
return (
handler_input.response_builder.speak(speech_text)
.set_card(SimpleCard("Slick Deals", speech_text))
.set_should_end_session(True)
.response
)
|
7c30af6414a99193d5a7f97f58285b06571c85fa
| 3,639,092
|
import argparse
import os
def make_parser():
"""Returns the command-line argument parser for sage-spkg-uninstall."""
doc_lines = __doc__.strip().splitlines()
parser = argparse.ArgumentParser(
description=doc_lines[0],
epilog='\n'.join(doc_lines[1:]).strip(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('spkg', type=spkg_type, help='the spkg to uninstall')
parser.add_argument('sage_local', type=dir_type, nargs='?',
default=os.environ.get('SAGE_LOCAL'),
help='the SAGE_LOCAL path (default: the $SAGE_LOCAL '
'environment variable if set)')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output showing all files removed')
parser.add_argument('-k', '--keep-files', action='store_true',
help="only delete the package's installation record, "
"but do not remove files installed by the "
"package")
parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)
return parser
|
09d03af43b1c1d37a73d319fccb400227e77fed6
| 3,639,093
|
def analyse_dataset(imgs, lbls, name=None):
"""Analyse labelled dataset
# Arguments:
imgs: ndarray, a set of images
lbls: ndarray, labels for a set of images
"""
if name is not None:
print('Dataset: {}'.format(name))
unique_lbl, counts = np.unique(lbls, return_counts=True)
min_samples = min(counts)
max_samples = max(counts)
avr_samples = np.mean(counts)
std_dev = np.std(counts)
imgs_dict = dict()
imgs_dict['name'] = name
imgs_dict['n_samples'] = imgs.shape[0]
imgs_dict['samples_shape'] = imgs.shape[1:]
imgs_dict['n_unique_labels'] = len(counts)
imgs_dict['unique_labels'] = unique_lbl
imgs_dict['min_samples'] = min_samples
imgs_dict['max_samples'] = max_samples
imgs_dict['average_samples'] = round(avr_samples, 0)
imgs_dict['std_dev'] = round(std_dev, 2)
for k, v in imgs_dict.items():
print('{}: {}'.format(k, v))
return imgs_dict
|
a6eabfab49b4bdc8590b64275ee2d0bcd19b9a0b
| 3,639,094
|
def transform(doc, *, sort_keys=False):
"""reorder"""
heavy_defs = ["definitions", "schemas", "responses", "parameters", "paths"]
r = make_dict()
for k, v in doc.items():
if k in heavy_defs:
continue
r[k] = v
for k in heavy_defs:
if k in doc:
r[k] = doc[k]
if sort_keys:
r = str_dict(r) # side effect
return r
|
3b939ac3185cdae147709bab1709dd1a39d426c9
| 3,639,095
|
from typing import Optional
from typing import List
def plot_card(
box: str,
title: str,
data: PackedRecord,
plot: Plot,
events: Optional[List[str]] = None,
commands: Optional[List[Command]] = None,
) -> PlotCard:
"""Create a card displaying a plot.
Args:
box: A string indicating how to place this component on the page.
title: The title for this card.
data: Data for this card.
plot: The plot to be displayed in this card.
events: The events to capture on this card.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.PlotCard` instance.
"""
return PlotCard(
box,
title,
data,
plot,
events,
commands,
)
|
fe1816d045bcf59cb28e29c90e517c79df82c621
| 3,639,096
|
def get_cluster_id(url):
"""
Google assign a cluster identifier to a group of web documents
that appear to be the same publication in different places on the web.
How they do this is a bit of a mystery, but this identifier is
important since it uniquely identifies the publication.
"""
vals = parse_qs(urlparse(url).query).get("cluster", [])
if len(vals) == 1:
return vals[0]
else:
vals = parse_qs(urlparse(url).query).get("cites", [])
print(vals)
if len(vals) == 1:
return vals[0]
return None
|
95a5f554560fd219cd07cbd8c8e251e9c8bd4d5e
| 3,639,097
|
from typing import List
def vol_allocation_factory(covs:List, pres:List=None)->[float]:
""" Allocate capital between portfolios using either cov or pre matrices
:param covs: List of covariance matrices
:param pres: List of precision matrices
:return: Capital allocation vector
"""
if pres is None:
pres = []
try:
return normalize([ 1/vol_portfolio_variance(cov=cov, pre=pre) for cov, pre in zip_longest(covs, pres, fillvalue=None) ])
except Exception as e:
print('vol allocation failed')
return diagonal_allocation_factory(covs=covs, pres=pres)
|
82e707f6d79e0c2b02c5c6f5acb4c6cce130bd4c
| 3,639,098
|
import requests
def get_inspection_page(**kwargs):
"""Fetch inspection data."""
url = KING_COUNTY_DOMAIN + DATA_PATH
params = INSPECTION_PARAMS.copy()
for key, val in kwargs.items():
print(key)
if key in INSPECTION_PARAMS:
params[key] = val
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
70c2b95ea6e829f4231c887a59f717a68ede9327
| 3,639,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.