hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbcaddd126925caf34132904b77e4e858bb11b0f
| 467
|
py
|
Python
|
015_LatticePaths.py
|
joetache4/ProjectEuler
|
f101e927d73dbafa11af1b208992bf0d830c88b1
|
[
"MIT"
] | null | null | null |
015_LatticePaths.py
|
joetache4/ProjectEuler
|
f101e927d73dbafa11af1b208992bf0d830c88b1
|
[
"MIT"
] | null | null | null |
015_LatticePaths.py
|
joetache4/ProjectEuler
|
f101e927d73dbafa11af1b208992bf0d830c88b1
|
[
"MIT"
] | null | null | null |
'''
Joe Walter
difficulty: 5%
run time: 0:00
answer: 137846528820
***
015 Lattice Paths
Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
'''
# perm(n) == n!
from math import perm
h = 20
w = 20
# the number of permutations of 20 H's and 20 V's
ans = perm(h + w) // perm(h) // perm(w)
print(ans)
| 17.296296
| 152
| 0.674518
|
8a25807a92a529be3d4308e2fe03aae462c343eb
| 999
|
py
|
Python
|
ZeroMQ/filecode/examples/Python/tornado_ioloop/pathosub.py
|
JailbreakFox/LightWeightRepository
|
710dc8cacf934930b8f91b2cfe93cba0f1765094
|
[
"BSD-2-Clause"
] | null | null | null |
ZeroMQ/filecode/examples/Python/tornado_ioloop/pathosub.py
|
JailbreakFox/LightWeightRepository
|
710dc8cacf934930b8f91b2cfe93cba0f1765094
|
[
"BSD-2-Clause"
] | null | null | null |
ZeroMQ/filecode/examples/Python/tornado_ioloop/pathosub.py
|
JailbreakFox/LightWeightRepository
|
710dc8cacf934930b8f91b2cfe93cba0f1765094
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
synopsis:
Pathological subscriber
Subscribes to one random topic and prints received messages
usage:
python pathosub.py
"""
import sys
import zmq
from zmq.eventloop.future import Context, Poller
from zmq.eventloop.ioloop import IOLoop
from tornado import gen
Url = 'tcp://127.0.0.1:5555'
Ctx = Context()
@gen.coroutine
def run():
subscriber = Ctx.socket(zmq.SUB)
subscriber.connect(Url)
subscription = b"%03d" % 5
subscriber.setsockopt(zmq.SUBSCRIBE, subscription)
poller = Poller()
poller.register(subscriber, zmq.POLLOUT)
while True:
topic, data = yield subscriber.recv_multipart()
#assert topic == subscription
print(data)
def main():
args = sys.argv[1:]
if len(args) != 0:
sys.exit(__doc__)
try:
loop = IOLoop.current()
loop.run_sync(lambda: run())
except KeyboardInterrupt:
print('\nFinished (interrupted)')
if __name__ == '__main__':
main()
| 20.387755
| 63
| 0.65966
|
e0193cfb4e5adc6e70aecea82f794908e78aca46
| 98,502
|
py
|
Python
|
stompy/model/hydro_model.py
|
rustychris/stompy
|
4efb78824804edc68555bced275e37842f98ba1f
|
[
"MIT"
] | 17
|
2017-10-12T14:53:25.000Z
|
2022-02-26T01:24:52.000Z
|
stompy/model/hydro_model.py
|
rustychris/stompy
|
4efb78824804edc68555bced275e37842f98ba1f
|
[
"MIT"
] | 6
|
2018-03-12T12:43:14.000Z
|
2021-09-04T17:44:31.000Z
|
stompy/model/hydro_model.py
|
rustychris/stompy
|
4efb78824804edc68555bced275e37842f98ba1f
|
[
"MIT"
] | 6
|
2017-09-29T21:20:11.000Z
|
2020-09-28T21:29:23.000Z
|
"""
Base class for managing hydrodynamic model runs
"""
import os,shutil,glob,inspect
import six
import logging
log=logging.getLogger('HydroModel')
import copy
import numpy as np
import xarray as xr
from shapely import geometry
import stompy.model.delft.io as dio
from stompy import xr_utils
from stompy.io.local import noaa_coops, hycom
from stompy import utils, filters, memoize
from stompy.spatial import wkb2shp, proj_utils
#from stompy.model.delft import dfm_grid
import stompy.grid.unstructured_grid as ugrid
import re
#from . import io as dio
class BC(object):
name=None
# Having the CF standard name can help match up variables
standard_name=None
_geom=None
# set geom_type in subclasses to limit the matching geometries
# to just 'Point', 'LineString', etc. Avoids conflicts if
# there are multiple features with the same name. Should be a list
# since some BCs (source/sink) can be defined with either a Point
# or a LineString
geom_type=[]
# not sure if I'll keep these -- may be better to query at time of use
grid_edge=None
grid_cell=None
# but these are more general, and can vastly speedup MultiBC
grid_edges=None
grid_cells=None
# some BCs allow 'add', which just applies a delta to a previously
# set BC.
mode='overwrite'
on_insufficient_data='exception'
# extend the data before/after the model period by this much
pad=np.timedelta64(24,'h')
def __init__(self,name,model=None,**kw):
"""
Create boundary condition object. Note that no work should be done
here, as the rest of the model data is not yet in place, and this
instance does not even have access yet to its geometry or other
shapefile attributes. model should either be passed in, or assigned
immediately by caller, since most later steps rely on access to a model
object.
"""
self.model=model # may be None!
self.name=name
self.filters=[]
utils.set_keywords(self,kw)
for f in self.filters:
f.setup(self)
# A little goofy - the goal is to make geometry lazily
# fetched against the model gazetteer, but it makes
# get/set operations awkward
@property
def geom(self):
if (self._geom is None) and (self.model is not None):
kw={}
if self.geom_type is not None:
kw['geom_type']=self.geom_type
self._geom=self.model.get_geometry(name=self.name,**kw)
return self._geom
@geom.setter
def geom(self,g):
if isinstance(g,np.ndarray):
if g.ndim==1:
g=geometry.Point(g)
elif g.ndim==2:
g=geometry.LineString(g)
else:
raise Exception("Not sure how to convert %s to a shapely geometry"%g)
self._geom=g
# Utilities for specific types of BCs which need more information
# about the grid
def get_inward_normal(self,grid_edge=None):
"""
Query the grid based on self.grid_edge to find the unit
normal vector for this velocity BC, positive pointing into
the domain.
"""
if grid_edge is None:
grid_edge=self.grid_edge
assert grid_edge is not None
return self.model.grid.edges_normals(grid_edge,force_inward=True)
def get_depth(self,grid_edge=None):
"""
Estimate the water column depth associated with this BC.
This is currently limited to a constant value,
by default calculated for self.grid_edge.
For the purposes here, this is a strictly positive quantity.
"""
if grid_edge is None:
grid_edge=self.grid_edge
assert grid_edge is not None
# This feels like it should be somewhere else, maybe in DFlowModel?
h=-self.model.edge_depth(self.grid_edge,datum='eta0')
if h<=0:
log.warning("Depth for velocity BC is %f, should be >0"%h)
return h
# Below are more DFM specific methods which have not yet been
# refactored
def write(self):
log.info("Writing feature: %s"%self.name)
self.write_pli()
self.write_config()
self.write_data()
def write_config(self):
log.warning("Boundary condition '%s' has no write_config method"%self.name)
def write_data(self):
log.warning("Boundary condition '%s' has no write_data method"%self.name)
def filename_base(self):
"""
filename base (no extension, relative to model run_dir) used to construct
other filenames.
"""
return self.name
def pli_filename(self):
"""
Name of polyline file, relative to model run_dir
"""
return self.filename_base() + '.pli'
def write_pli(self):
if self.geom is not None:
assert self.geom.type=='LineString'
pli_data=[ (self.name, np.array(self.geom.coords)) ]
pli_fn=os.path.join(self.model.run_dir,self.pli_filename())
dio.write_pli(pli_fn,pli_data)
def default_tim_fn(self):
"""
full path for a time file matched to the first node of the pli.
This is only used as a default tim output path when none is
specified.
"""
return os.path.join(self.model.run_dir,self.filename_base() + "_0001.tim")
def default_t3d_fn(self):
"""
same as above, but for t3d
"""
return os.path.join(self.model.run_dir,self.filename_base() + "_0001.t3d")
def write_tim(self,da,fn=None):
"""
Write a DFM tim file based on the timeseries in the DataArray.
da must have a time dimension. No support yet for vector-values here.
"""
ref_date,start,stop = self.model.mdu.time_range()
dt=np.timedelta64(60,'s') # always minutes
# self.model.mdu.t_unit_td64()
elapsed_time=(da.time.values - ref_date)/dt
data=np.c_[elapsed_time,da.values]
if fn is None:
fn=self.default_tim_fn()
np.savetxt(fn,data)
def write_t3d(self,da,z_bed,fn=None):
"""
Write a 3D boundary condition for a feature from a vertical profile (likely
ROMS or HYCOM data)
- most of the time writing boundaries is here
- DFM details for rev52184:
the LAYERS line is silently truncated to 100 characters.
LAYER_TYPE=z assumes a coordinate of 0 at the bed, positive up
we assume that the incoming data has no nan, has a positive-up
z coordinate with 0 being model datum (i.e. NAVD88)
"""
ref_date,t_start,t_stop = self.model.mdu.time_range()
# not going to worry about 3D yet. see ocean_dfm.py
# for some hints.
assert da.ndim==2
# new code gets an xr dataset coming in with z coordinate.
# old code did some cleaning on ROMS data. no more.
# Do sort the vertical
dz=np.diff(da.z.values)
if np.all(dz>0):
log.debug("Vertical order ok")
elif np.all(dz<0):
log.debug("3D velo flip ertical order")
da=da.isel(z=slice(None,None,-1))
if np.median(da.z.values) > 0:
log.warning("Weak sign check suggests t3d input data has wrong sign on z")
max_line_length=100 # limitation in DFM on the whole LAYERS line
# 7 is '_2.4567'
# -1 for minor bit of safety
max_layers=(max_line_length-len("LAYERS=")) // 7 - 1
# This should be the right numbers, but reverse order
# that's probably not right now...
sigma = (z_bed - da.z.values) / z_bed
# Force it to span the full water column
# used to allow it to go slightly beyond, but
# in trying to diagnose a 3D profile run in 52184, limit
# to exactly 0,1
# well, maybe that's not necessary -- before trying to do any resampling
# here, maybe go ahead and let it span too far
bed_samples=np.nonzero(sigma<=0)[0]
surf_samples=np.nonzero(sigma>=1.0)[0]
slc=slice(bed_samples[-1],surf_samples[0]+1)
da=da.isel(z=slc)
sigma=sigma[slc]
sigma[0]=0.0 # min(0.0,sigma[0])
sigma[-1]=1.0 # max(1.0,sigma[-1])
assert np.all(np.diff(sigma)>0),"Need more sophisticated treatment of sigma in t3d file"
assert len(sigma)<=max_layers
# remapper=lambda y: np.interp(np.linspace(0,1,max_layers),
# np.linspace(0,1,len(sigma)),y)
# # Just because the use of remapper below is not compatible
# # with vector quantities at this time.
# assert da_sub.ndim-1 == 1
sigma_str=" ".join(["%.4f"%s for s in sigma])
# This line is truncated at 100 characters in DFM r52184.
layer_line="LAYERS=%s"%sigma_str
assert len(layer_line)<max_line_length
# NB: this is independent of the TUnit setting in the MDU, because
# it is written out in the file (see below).
elapsed_minutes=(da.time.values - ref_date)/np.timedelta64(60,'s')
ref_date_str=utils.to_datetime(ref_date).strftime('%Y-%m-%d %H:%M:%S')
if fn is None:
fn=self.default_t3d_fn()
assert da.dims[0]=='time' # for speed-up of direct indexing
# Can copy this to other node filenames if necessary
with open(fn,'wt') as fp:
fp.write("\n".join([
"LAYER_TYPE=sigma",
layer_line,
"VECTORMAX=%d"%(da.ndim-1), # default, but be explicit
"quant=velocity",
"quantity1=velocity", # why is this here?
"# start of data",
""]))
for ti,t in enumerate(elapsed_minutes):
fp.write("TIME=%g minutes since %s\n"%(t,ref_date_str))
# Faster direct indexing:
# The ravel will interleave components - unclear if that's correct.
data=" ".join( ["%.3f"%v for v in da.values[ti,:].ravel()] )
fp.write(data)
fp.write("\n")
def as_data_array(self,data,quantity='value'):
"""
Convert several types into a consistent DataArray ready to be
post-processed and then written out.
Conversion rules:
dataarray => no change
dataset => pull just the data variable, either based on quantity, or if there
is a single data variable that is not a coordinate, use that.
constant => wrap in a DataArray with no time dimension.
used to create a two-point timeseries, but if that is needed it should be moved
to model specific code.
"""
if isinstance(data,xr.DataArray):
data.attrs['mode']=self.mode
return data
elif isinstance(data,xr.Dataset):
if len(data.data_vars)==1:
# some xarray allow inteeger index to get first item.
# 0.10.9 requires this cast to list first.
da=data[list(data.data_vars)[0]]
da.attrs['mode']=self.mode
return da
else:
raise Exception("Dataset has multiple data variables -- not sure which to use: %s"%( str(data.data_vars) ))
elif isinstance(data,(np.integer,np.floating,int,float)):
# # handles expanding a constant to the length of the run
# ds=xr.Dataset()
# ds['time']=('time',),np.array( [self.data_start,self.data_stop] )
# ds[quantity]=('time',),np.array( [data,data] )
# da=ds[quantity]
da=xr.DataArray(data)
da.attrs['mode']=self.mode
return da
else:
raise Exception("Not sure how to cast %s to be a DataArray"%data)
# Not all BCs have a time dimension, but enough do that we have some general utility
# getters/setters at this level
# Note that data_start, data_stop are from the point of view of the data source,
# e.g. a model starting on 2015-01-01 could have a 31 day lag, such that
# data_start is actually 2014-12-01.
_data_start=None
_data_stop =None
@property
def data_start(self):
if self._data_start is None and self.model is not None:
return self.transform_time_input(self.model.run_start-self.pad)
else:
return self._data_start
@data_start.setter
def data_start(self,v):
self._data_start=v
@property
def data_stop(self):
if self._data_stop is None and self.model is not None:
return self.transform_time_input(self.model.run_stop+self.pad)
else:
return self._data_stop
@data_stop.setter
def data_stop(self,v):
self._data_stop=v
def transform_time_input(self,t):
for filt in self.filters:
t=filt.transform_time_input(t)
return t
def transform_output(self,da):
"""
Apply filter stack to da, including model-based time zone
correction of model is set.
"""
for filt in self.filters[::-1]:
da=filt.transform_output(da)
da=self.to_model_timezone(da)
return da
def to_model_timezone(self,da):
if 'time' in da.dims and self.model is not None:
# da.time.values[:]=self.model.utc_to_native(da.time.values)
# Create new da and replace time data instead of mutating data.
# da.time may be shared by multiple instances. This way the
# call is idempotent and avoids nasty bugs
da_new=da.copy()
da_new['time']=('time',), self.model.utc_to_native(da.time.values)
return da_new
return da
def src_data(self):
raise Exception("src_data must be set in subclass")
def data(self):
da=self.src_data()
da=self.as_data_array(da)
if 'time' in da.dims:
#on_insufficient_data='exception'
data_start=da.time.values.min()
data_stop=da.time.values.max()
if ( (data_start > self.model.run_start) or
(data_stop < self.model.run_stop) ):
msg="Run: %s -- %s, but BC data for %s is %s -- %s"%(
self.model.run_start,self.model.run_stop,self.name,
data_start,data_stop)
if self.on_insufficient_data=='exception':
# raise Exception(msg)
log.warning(msg)
pass
elif self.on_insufficient_data=='log':
log.warning(msg)
elif self.on_insufficient_data=='ignore':
pass
else:
raise Exception("Bad setting for on_insufficient_data='%s'"%
self.on_insufficient_dat)
da=self.transform_output(da)
return da
# if True, bokeh plot will include time series for intermediate
# data as filters are applied
bokeh_show_intermediate=True
def write_bokeh(self,filename=None,path=".",title=None,mode='cdn'):
"""
Write a bokeh html plot for this dataset.
path: folder in which to place the plot.
filename: relative or absolute filename. defaults to path/{self.name}.html
mode: this is passed to bokeh, 'cdn' yields small files but requires an internet
connection to view them. 'inline' yields self-contained, larger (~800k) files.
"""
try:
import bokeh.io as bio # output_notebook, show, output_file
import bokeh.plotting as bplt
except ModuleNotFoundError:
log.warning("Bokeh not found. Will not generate bokeh plots")
return
bplt.reset_output()
if title is None:
title="Name: %s"%self.name
p = bplt.figure(plot_width=750, plot_height=350,
title=title,
active_scroll='wheel_zoom',
x_axis_type="datetime")
if self.bokeh_show_intermediate:
da=self.as_data_array(self.src_data())
if da is not None:
self.plot_bokeh(da,p,label="src")
for filt in self.filters[::-1]:
da=filt.transform_output(da)
self.plot_bokeh(da,p,label=filt.label())
da=self.to_model_timezone(da)
self.plot_bokeh(da,p)
else:
log.warning("No src_data => no bokeh plot for %s"%str(self))
else:
da=self.data()
if da is not None:
self.plot_bokeh(da,p)
else:
log.warning("No src_data => no bokeh plot for %s"%str(self))
if filename is None:
filename="bc_%s.html"%self.name
output_fn=os.path.join(path,filename)
bio.output_file(output_fn,
title=title,
mode=mode)
bio.save(p) # show the results
#annoying, but bokeh not cycle colors automatically
_colors=None
def get_color(self):
if self._colors is None:
from bokeh.palettes import Dark2_5 as palette
import itertools
self._colors=itertools.cycle(palette)
return six.next(self._colors)
def plot_bokeh(self,da,plot,label=None):
"""
Generic plotting implementation -- will have to override for complicated
datatypes
"""
plot.yaxis.axis_label = da.attrs.get('units','n/a')
if label is None:
label=self.name
if 'time' in da.dims:
plot.line( da.time.values.copy(), da.values.copy(), legend_label=label,
color=self.get_color())
else:
from bokeh.models import Label
label=Label(x=70, y=70, x_units='screen', y_units='screen',
text="No plotting for %s (%s)"%(label,self.__class__.__name__))
plot.add_layout(label)
class BCFilter(object):
"""
Transformation/translations that can be applied to
a BC
"""
def __init__(self,**kw):
utils.set_keywords(self,kw)
def setup(self,bc):
"""
This is where you might increase the pad
"""
self.bc=bc
def transform_time_input(self,t):
"""
Transform the externally requested time to what the data source
should provide
"""
return t
def transform_output(self,da):
"""
Whatever dataarray comes back from the source, apply the necessary
transformations (including the inverse of the time_input transform)
"""
return da
def label(self):
return self.__class__.__name__
class LowpassGodin(BCFilter):
min_pad=np.timedelta64(5*24,'h')
def setup(self,bc):
super(LowpassGodin,self).setup(bc)
if self.bc.pad<self.min_pad:
self.bc.pad=self.min_pad
def transform_output(self,da):
assert da.ndim==1,"Only ready for simple time series"
from .. import filters
da.values[:]=filters.lowpass_godin(da.values,
utils.to_dnum(da.time))
return da
class Lowpass(BCFilter):
cutoff_hours=None
# if true, replace any nans by linear interpolation, or
# constant extrapolation at ends
fill_nan=True
def transform_output(self,da):
assert da.ndim==1,"Only ready for simple time series"
from .. import filters
assert self.cutoff_hours is not None,"Must specify lowpass threshold cutoff_hors"
dt_h=24*np.median(np.diff(utils.to_dnum(da.time.values)))
log.debug("Lowpass: data time step is %.2fh"%dt_h)
data_in=da.values
if np.any(~np.isfinite(data_in)):
if self.fill_nan:
log.info("Lowpass: %d of %d data values will be filled"%( np.sum(~np.isfinite(data_in)),
len(data_in) ))
data_in=utils.fill_invalid(data_in,ends='constant')
else:
log.error("Lowpass: %d of %d data values are not finite"%( np.sum(~np.isfinite(data_in)),
len(data_in) ))
da.values[:]=filters.lowpass(data_in,cutoff=self.cutoff_hours,dt=dt_h)
assert np.all(np.isfinite(da.values)),("Lowpass: %d of %d output data values are not finite"%
( np.sum(~np.isfinite(da.values)),
len(da.values) ))
return da
class FillTidal(BCFilter):
def transform_output(self,da):
return utils.fill_tidal_data(da)
class Lag(BCFilter):
def __init__(self,lag):
self.lag=lag
def transform_time_input(self,t):
return t+self.lag
def transform_output(self,da):
da.time.values[:]=da.time.values-self.lag
return da
class Transform(BCFilter):
def __init__(self,fn=None, fn_da=None, units=None):
"""
fn: a function which takes the data values and returns
transformed data values.
fn_da: a function which takes the data array, and
returns a transformed data array.
this will apply both, but either can be omitted from
the parameters. fn_da is applied first.
units: if fn changes the units, specify the new units here
"""
self.fn=fn
self.fn_da=fn_da
self.units=units
def transform_output(self,da):
if self.fn_da:
da=self.fn_da(da)
if self.fn:
# use ellipsis in case da.values is scalar
da.values[...]=self.fn(da.values)
if self.units is not None:
da.attrs['units']=self.units
return da
class FillGaps(BCFilter):
"""
Attempt to fill small gaps of missing data.
Not currently complete.
This will probably only handle the basic case of short
periods of missing data which can be linearly interpolated.
Anything more complicated needs a special case filter, like
filling with tidal data, or detecting periods of zero.
"""
max_gap_interp_s=2*60*60
large_gap_value=0.0
def transform_output(self,da):
# have self.bc, self.bc.model
# self.bc.data_start, self.bc.data_stop
if da.ndim==0: # scalar -- no series to fill
return da
if len(da)==0:
log.warning("FillGaps called with no input data")
da=xr.DataArray(self.large_gap_value)
return da
if not np.any(np.isnan(da.values)):
return da
else:
# This is not smart! it doesn't use time, just linearly interpolates
# overgaps based on index.
da_filled=da.copy()
da_filled.values[:] = utils.fill_invalid(da_filled.values)
return da_filled
class RoughnessBC(BC):
shapefile=None
data_array=None # xr.DataArray
def __init__(self,shapefile=None,**kw):
if 'name' not in kw:
kw['name']='roughness'
super(RoughnessBC,self).__init__(**kw)
self.shapefile=shapefile
def write_config(self):
with open(self.model.ext_force_file(),'at') as fp:
lines=["QUANTITY=frictioncoefficient",
"FILENAME=%s"%self.xyz_filename(),
"FILETYPE=7",
"METHOD=4",
"OPERAND=O",
"\n"
]
fp.write("\n".join(lines))
def xyz_filename(self):
return self.filename_base()+".xyz"
def src_data(self):
if self.shapefile is not None:
shp_data=wkb2shp.shp2geom(self.shapefile)
coords=np.array( [np.array(pnt) for pnt in shp_data['geom'] ] )
n=shp_data['n']
da=xr.DataArray(n,dims=['location'],name='n')
da=da.assign_coords(x=xr.DataArray(coords[:,0],dims='location'))
da=da.assign_coords(y=xr.DataArray(coords[:,1],dims='location'))
da.attrs['long_name']='Manning n'
elif self.data_array is not None:
da=self.data_array
return da
def write_data(self):
data_fn=os.path.join(self.model.run_dir,self.xyz_filename())
xyz=self.data()
np.savetxt(data_fn,xyz)
def write_bokeh(self,filename=None,path=".",title=None,mode='cdn'):
"""
Write a bokeh html plot for this dataset. RoughnessBC has specific
needs here.
path: folder in which to place the plot.
filename: relative or absolute filename. defaults to path/{self.name}.html
mode: this is passed to bokeh, 'cdn' yields small files but requires an internet
connection to view them. 'inline' yields self-contained, larger (~800k) files.
"""
import bokeh.io as bio # output_notebook, show, output_file
import bokeh.plotting as bplt
bplt.reset_output()
if title is None:
title="Name: %s"%self.name
p = bplt.figure(plot_width=750, plot_height=750,
title=title,
active_scroll='wheel_zoom')
p.match_aspect=True # aiming for analog to axis('equal')
da=self.data()
self.plot_bokeh(da,p)
if filename is None:
filename="bc_%s.html"%self.name
output_fn=os.path.join(path,filename)
bio.output_file(output_fn,
title=title,
mode=mode)
bio.save(p) # save the results
def plot_bokeh(self,da,plot,label=None):
if label is None:
label=self.name
rough=da.values
from bokeh.models import LinearColorMapper,ColorBar
color_mapper=LinearColorMapper(palette="Viridis256",
low=rough.min(), high=rough.max())
from matplotlib import cm
cmap=cm.viridis
norm_rough=(rough-rough.min())/(rough.max()-rough.min())
mapped=[cmap(v) for v in norm_rough]
colors = [
"#%02x%02x%02x" % (int(m[0]*255),
int(m[1]*255),
int(m[2]*255))
for m in mapped ]
plot.scatter(da.x.values.copy(), da.y.values.copy(), radius=3,
fill_color=colors, line_color=None,legend_label=label)
color_bar = ColorBar(color_mapper=color_mapper,
label_standoff=12, border_line_color=None, location=(0,0))
plot.add_layout(color_bar, 'right')
class StageBC(BC):
# If other than None, can compare to make sure it's the same as the model
# datum.
datum=None
dredge_depth=None # DFM doesn't need this, but SCHISM might
geom_type=['LineString']
standard_name='sea_surface_height'
water_level=None
def __init__(self,water_level=None,**kw):
"""
water_level: scalar value or xr.DataArray specifying water level BC.
used to be 'z' but that should be reserved for vertical coordinates
"""
super(StageBC,self).__init__(**kw)
self.water_level=water_level
def write_config(self):
old_bc_fn=self.model.ext_force_file()
with open(old_bc_fn,'at') as fp:
lines=["QUANTITY=waterlevelbnd",
"FILENAME=%s"%self.pli_filename(),
"FILETYPE=9",
"METHOD=3",
"OPERAND=O",
"\n"]
fp.write("\n".join(lines))
def filename_base(self):
"""
Make it clear in the filenames what is being forced
"""
return super(StageBC,self).filename_base()+"_ssh"
def src_data(self):
return self.water_level
def write_data(self):
# just write a single node
self.write_tim(self.data())
def evaluate(self,t):
"""
Return the water level at the given time (np.datetime64).
Useful for setting initial water level.
"""
water_level=self.data()
if 'time' in water_level.dims:
water_level=np.interp( utils.to_dnum(t),
utils.to_dnum(water_level.time), water_level.values )
return water_level
class HarmonicStageBC(StageBC):
msl=0.0
# constituents are
constituents=None
dt=np.timedelta64(360,'s')
def __init__(self,**kw):
"""
Set stage based on harmonic constituents. This is not a full astronomical tides
BC -- it does not account for equilibrium phase, nodal variations, etc.
Usage:
HarmonicStageBC(name,..., msl=0.25, M2=(2.0,0.1), S2=(0.5,0.5))
msl: set mean sea level
constituents are named by upper case standard abbreviations (see
../tide_consts.txt), and values are amplitude and phase
"""
# Pull out any constituent names from keywords before super()
from .. import tide_consts
self.constituents={}
consts=[ k
for k in kw
if k in tide_consts.const_names]
for k in consts:
self.set_constituent(k,kw.pop(k))
super(StageBC,self).__init__(**kw)
def set_constituent(self,name,amp_phase):
self.constituents[name]=amp_phase
def src_data(self):
t=np.arange(self.data_start,self.data_stop,self.dt)
t_dnum=utils.to_dnum(t) # decimal days
eta=self.msl*np.ones(len(t),np.float64)
from .. import tide_consts
for k in self.constituents:
const_idx=tide_consts.const_names.index(k)
amp,phase = self.constituents[k]
# degrees per hour, converted to rads/day
speed=np.pi/180 * 24 * tide_consts.speeds[const_idx]
# This isn't a proper construction. Should invoke the
# real deal tidal code, with equilibrium arguments and
# all.
eta += amp*np.cos(speed*t_dnum+phase*np.pi/180)
ds=xr.Dataset()
ds['time']=('time',),t
ds['water_level']=('time',),eta
return ds['water_level']
class CommonFlowBC(BC):
flow=None
def src_data(self):
return self.flow
class FlowBC(CommonFlowBC):
dredge_depth=-1.0
standard_name='ocean_volume_transport_across_line'
flow=None
geom_type=['LineString']
def __init__(self,flow=None,**kw):
super(FlowBC,self).__init__(**kw)
self.flow=flow
# def filename_base(self):
# return super(FlowBC,self).filename_base()+"_flow"
# def write_config(self):
# old_bc_fn=self.model.ext_force_file()
#
# with open(old_bc_fn,'at') as fp:
# lines=["QUANTITY=dischargebnd",
# "FILENAME=%s"%self.pli_filename(),
# "FILETYPE=9",
# "METHOD=3",
# "OPERAND=O",
# "\n"]
# fp.write("\n".join(lines))
#
# def write_data(self):
# self.write_tim(self.data())
class SourceSinkBC(CommonFlowBC):
# The grid, at the entry point, will be taken down to this elevation
# to ensure that prescribed flows are not prevented due to a dry cell.
# Note that only DFM supports LineString here.
geom_type=['Point','LineString']
z='bed' # elevation of the mass source
z_src='bed' # elevation of mass sink, if two-ended
dredge_depth=-1.0
class WindBC(BC):
"""
Not yet fully updated
"""
wind=None
def __init__(self,**kw):
if 'name' not in kw:
# commonly applied globally, so may not have a geographic name
kw['name']='wind'
super(WindBC,self).__init__(**kw)
def write_pli(self):
assert self.geom is None,"Spatially limited wind not yet supported"
return # nothing to do
def default_tim_fn(self):
# different than super class because typically no nodes
return os.path.join(self.model.run_dir,self.filename_base() + ".tim")
def write_config(self):
old_bc_fn=self.model.ext_force_file()
with open(old_bc_fn,'at') as fp:
lines=["QUANTITY=windxy",
"FILENAME=%s.tim"%self.filename_base(),
"FILETYPE=2",
"METHOD=1",
"OPERAND=O",
"\n"]
fp.write("\n".join(lines))
def write_data(self):
self.write_tim(self.data())
def src_data(self):
assert self.wind is not None
return self.wind
def plot_bokeh(self,da,plot,label=None):
# this will have to get smarter time...
# da will almost certainly have an xy dimension for the two components.
# for now, we assume no spatial variation, and plot two time series
if label is None:
label=self.name
for xy in [0,1]:
plot.line( da.time.values.copy(),
da.isel(xy=xy).values.copy(),
legend_label=label+"-"+"xy"[xy],
color=self.get_color())
class RainfallRateBC(BC):
"""
WIP.
Adopt same naming convention as DFM. Rainfall*Rate*. Data for now
are in mm/day, again following DFM convention.
"""
rainfall_rate=None
def __init__(self,**kw):
if 'name' not in kw:
# commonly applied globally, so may not have a geographic name
kw['name']='rainfall'
super(RainfallRateBC,self).__init__(**kw)
def src_data(self):
assert self.rainfall_rate is not None
return self.rainfall_rate
def plot_bokeh(self,da,plot,label=None):
# this will have to get smarter time...
# da will almost certainly have an xy dimension for the two components.
# for now, we assume no spatial variation, and plot two time series
if label is None:
label=self.name
plot.line( da.time.values.copy(),
da.values.copy(),
legend_label=label,
color=self.get_color())
class ScalarBC(BC):
scalar=None
value=None
parent=None
def __init__(self,**kw):
"""
name: feature name
model: HydroModel instance
scalar: 'salinity','temperature', other
value: floating point
parent: [optional] a BC, typ. flow but doesn't have to be
"""
if 'parent' in kw:
self.parent=kw.pop('parent')
# make a new kw dictionary with some defaults from the parent
# but they can be overridden by specified arguments
new_kw=dict(name=self.parent.name,
geom=self.parent.geom)
new_kw.update(kw)
kw=new_kw
super(ScalarBC,self).__init__(**kw)
def src_data(self):
# Base implementation does nothing
return self.value
class VerticalCoord(object):
"""
A placeholder for now, but potentially a place to describe the
vertical coordinate structure
"""
pass
class SigmaCoord(VerticalCoord):
sigma_growth_factor=1
class MpiModel(object):
"""
Starting point for common MPI functionality. In its
infancy.
"""
mpi_bin_dir=None
mpi_bin_exe='mpiexec'
mpi_args=() # tuple to avoid mutation
num_procs=1 # might be useful outside of MPI, but keep it here for now.
mpi_flavor='mpiexec' # 'mpiexec' or 'slurm'
# For mpi_flavor=='slurm':
# path to slurm's srun command.
slurm_srun="srun"
_mpiexec=None
@property
def mpiexec(self):
if self._mpiexec is None:
return os.path.join(self.mpi_bin_dir,self.mpi_bin_exe)
else:
return self._mpiexec
@mpiexec.setter
def set_mpiexec(self,m):
self._mpiexec=m
def mpirun(self,cmd,num_procs=None,working_dir=".",wait=True):
"""
Run a command via MPI.
cmd: list of command components (e.g. ['ls','-l','/home/stuff'])
working_dir: if specified, arrange to start the process in the given
directory.
wait: True: if possible, wait for the command to complete.
False: if possible, return immediately.
This is very limited! When directly using mpi or using
srun while already in the allocation, only wait=True
is supported, and wait=False will register a log.Warning.
When running with slurm but outside of a job, only wait=False is
supported. wait=True will start the job, but then raise an Exception
saying that it cannot wait.
"""
if num_procs is None:
num_procs=self.num_procs
if self.mpi_flavor=='mpiexec':
self.mpirun_mpiexec(cmd,num_procs,working_dir,wait=wait)
elif self.mpi_flavor=='slurm':
self.mpirun_slurm(cmd,num_procs,working_dir,wait=wait)
else:
raise Exception('Unknown MPI flavor %s'%self.mpi_flavor)
def mpirun_mpiexec(self,cmd,num_procs,working_dir,wait):
"""
Direct invocation of mpiexec.
"""
real_cmd=( [self.mpiexec,"-n","%d"%num_procs]
+list(self.mpi_args)
+cmd )
if not wait:
raise Exception( ("Request to start MPI process "
"(flavor=%s) without waiting not supported")%self.mpi_flavor)
self.log.info("Running command: %s"%(" ".join(real_cmd)))
return utils.call_with_path(real_cmd,working_dir)
# slurm helpers:
def slurm_jobid(self):
"""
slurm job id as a string, or None if not in a slurm job
"""
return os.environ.get('SLURM_JOBID',None)
def slurm_ntasks(self):
return int(os.environ.get('SLURM_NTASKS',0))
def slurm_check_mpi_ntasks(self,n):
"""
Check to make sure it's possible to run n tasks under the
current slurm environment.
"""
n_global=self.slurm_ntasks()
if n>n_global:
print("In SLURM task, but ntasks(%d) != local_config num_procs(%d)"%( n_global,n),
flush=True)
raise Exception("Mismatch in number of processes")
def slurm_srun_options(self,n):
"""
Return options to pass to srun to invoke an mpi task
with n cpus.
"""
n_tasks=self.slurm_ntasks()
if n_tasks==n:
print(f"Homogeneous job, and n==NTASKS")
return []
elif n_tasks<n:
raise Exception(f"MPI job size {n} > SLURM ntasks {n_tasks}")
else:
options=['-n',str(n)]
print(f"Homogeneous oversized job. Add {' '.join(options)}",
flush=True)
return options
def mpirun_slurm(self,cmd,num_procs,working_dir,wait):
"""
Start an MPI process via slurm's srun. Assumes that
script is already in an allocated job.
"""
real_cmd=( [self.slurm_srun]
+self.slurm_srun_options(num_procs)
+list(self.srun_args)
+cmd )
if not wait:
raise Exception( ("Request to start MPI process "
"(flavor=%s) without waiting not supported")%self.mpi_flavor)
self.log.info("Running command: %s"%(" ".join(real_cmd)))
return utils.call_with_path(real_cmd,working_dir)
class HydroModel(object):
run_dir="." # working directory when running dflowfm
cache_dir=None
run_start=None
run_stop=None
restart=None
restart_model=None # reference to HydroModel instance(subclass) that we are continuing
grid=None
projection=None # string like "EPSG:26910"
z_datum=None
# this is only used for setting utc_to_native, and native_to_utc
utc_offset=np.timedelta64(0,'h') # -8 for PST
def __init__(self,**kw):
self.log=log
self.bcs=[]
self.extra_files=[]
self.gazetteers=[]
self.mon_sections=[]
self.mon_points=[]
utils.set_keywords(self,kw)
def add_extra_file(self,path,copy=True):
self.extra_files.append( (path,copy) )
def write_extra_files(self):
for f in self.extra_files:
path,copy = f
if copy:
tgt=os.path.join( self.run_dir, os.path.basename(path))
if not (os.path.exists(tgt) and os.path.samefile(tgt,path)):
shutil.copyfile(path,tgt)
else:
log.info("Extra file %s points to the target. No-op"%path)
def copy(self,deep=True):
"""
Make a copy of this model instance.
"""
# Starting point is just python deepcopy, but can customize
# as needed.
return copy.deepcopy(self)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in ['log']: # shallow for some object
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def create_with_mode(self,path,mode='create'):
"""
Create a folder, with several options for existing folder contents
path: absolute, or relative to pwd
mode: 'create' create the folder if it doesn't exist
'pristine' create, and clear anything already in there
'noclobber' create, and fail if it already exists.
'existing' assert that the path exists, but do nothing to it.
"""
if mode=='create':
if not os.path.exists(path):
os.makedirs(path)
elif mode=='pristine':
if os.path.exists(path):
# shutil.rmtree(path)
# rather than going scorched earth, removed the contents of
# the directory. this plays nicer with processes which
# may be working in that directory.
for p in os.listdir(path):
fp=os.path.join(path,p)
if os.path.isdir(fp):
shutil.rmtree(fp)
else:
os.unlink(fp)
else:
os.makedirs(path)
elif mode=='noclobber':
assert not os.path.exists(path),"Directory %s exists, but mode is noclobber"%path
os.makedirs(path)
elif mode=='askclobber':
if os.path.exists(path):
import sys
sys.stdout.write("Directory %s exists. overwrite? [y/n] "%path)
sys.stdout.flush()
resp=six.moves.input()
if resp.lower()!='y':
raise Exception("Directory %s exists -- failing out"%path)
return self.create_with_mode(path,'pristine')
else:
os.makedirs(path)
elif mode=='existing':
assert os.path.exists(path),"Directory %s does not exist"%path
else:
raise Exception("Did not understand create mode: %s"%mode)
def set_run_dir(self,path,mode='create'):
"""
Set the working directory for the simulation.
See create_with_mode for details on 'mode' parameter.
set_run_dir() supports an additional mode "clean",
which removes files known to be created during the
script process, as opposed to 'pristine' which deletes
everything.
"""
self.run_dir=path
if mode=="clean":
self.create_with_mode(path,"create")
self.clean_run_dir()
else:
self.create_with_mode(path,mode)
def clean_run_dir(self):
"""
Clean out most of the run dir, deleting files known to be
created by running the model. Overload in model-specific
subclass
"""
pass
def set_cache_dir(self,path,mode='create'):
"""
Set the cache directory, mainly for BC data.
See create_with_mode for details on 'mode' parameter.
Doesn't currently interact with much -- may be removed
in the future
"""
self.create_with_mode(path,mode)
def set_grid(self,grid):
if isinstance(grid,six.string_types):
if grid.endswith('_net.nc'):
grid=ugrid.UnstructuredGrid.read_dfm(grid)
else: # if grid.endswith('.nc'):
grid=ugrid.UnstructuredGrid.read_ugrid(grid)
self.grid=grid
# To be safe, make sure grid has edges['cells'] calculated, as
# later parts of the model setup avoid rechecking this.
self.grid.edge_to_cells()
default_grid_target_filename='grid_net.nc'
def grid_target_filename(self):
"""
The filename, relative to self.run_dir, of the grid. Not guaranteed
to exist, and if no grid has been set, or the grid has no filename information,
this will default to self.default_grid_target_filename
potentially move to dflow_model
"""
if self.grid is None or self.grid.filename is None:
return self.default_grid_target_filename
else:
return os.path.basename(self.grid.filename)
def dredge_boundary(self,linestring,dredge_depth,node_field=None,edge_field=None,cell_field=None):
"""
Lower bathymetry in the vicinity of external boundary, defined
by a linestring.
linestring: [N,2] array of coordinates
dredge_depth: positive-up bed-level for dredged areas
Modifies depth information in-place.
"""
if not (node_field or edge_field or cell_field):
raise Exception("dredge_boundary: must specify at least one depth field")
# Carve out bathymetry near sources:
cells_to_dredge=[]
linestring=np.asarray(linestring)
assert linestring.ndim==2,"dredge_boundary requires [N,2] array of points"
g=self.grid
feat_edges=g.select_edges_by_polyline(linestring,rrtol=3.0,update_e2c=False)
if len(feat_edges)==0:
raise Exception("No boundary edges matched by %s"%(str(linestring)))
cells_to_dredge=g.edges['cells'][feat_edges].max(axis=1)
nodes_to_dredge=np.concatenate( [g.cell_to_nodes(c)
for c in cells_to_dredge] )
nodes_to_dredge=np.unique(nodes_to_dredge)
if edge_field:
if edge_field in g.edges.dtype.names:
assert np.all(feat_edges>=0)
g.edges[edge_field][feat_edges] = np.minimum(g.edges[edge_field][feat_edges],
dredge_depth)
else:
log.warning('No edge bathymetry (%s) to dredge. Ignoring'%edge_field)
if node_field:
assert np.all(cells_to_dredge>=0)
assert np.all(nodes_to_dredge>=0)
g.nodes[node_field][nodes_to_dredge] = np.minimum(g.nodes[node_field][nodes_to_dredge],
dredge_depth)
if cell_field:
assert np.all(cells_to_dredge>=0)
g.cells[cell_field][cells_to_dredge] = np.minimum(g.cells[cell_field][cells_to_dredge],
dredge_depth)
def dredge_discharge(self,point,dredge_depth,
node_field=None,edge_field=None,cell_field=None):
if not (node_field or edge_field or cell_field):
raise Exception("dredge_boundary: must specify at least one depth field")
point=np.asarray(point)
if point.ndim>1:
# for DFM-style discharges, a line segment starting outside the domain
# and ending at the discharge point
point=point[-1,:]
g=self.grid
cell=g.select_cells_nearest(point,inside=True)
assert cell is not None,"Discharge at %s failed to find a cell"%pnt
if cell_field:
g.cells[cell_field][cell] = min(g.cells[cell_field][cell],dredge_depth)
if edge_field:
edges=g.cell_to_edges(cell)
g.edges[edge_field][edges] = np.minimum(g.edges[edge_field][edges],
dredge_depth)
if node_field:
nodes=g.cell_to_nodes(cell)
g.nodes[node_field][nodes] = np.minimum(g.nodes[node_field][nodes],
dredge_depth)
def add_monitor_sections(self,sections):
"""
sections: list or array of features. each feature
must have a 'geom' item giving the shapely geometry as a
LineString. the feature name is pulled from a 'name'
item if it exists, otherwise 'obs_sec_NNN'
"""
self.mon_sections.extend(sections)
def add_monitor_points(self,points):
"""
points: list or array of features, must have a 'geom' item giving
the shapely geometry as a Point. if there is a 'name' item,
that will be used to name the feature, otherwise it will be given
a numeric name 'obs_pnt_NNN'
"""
self.mon_points.extend(points)
def write(self):
# Make sure instance data has been pushed to the MDUFile, this
# is used by write_forcing() and write_grid()
# This doesn't clear it out -- that should be done beforehand.
# this is just to make sure the directory exists
self.set_run_dir(self.run_dir,mode='create')
assert self.grid is not None,"Must call set_grid(...) before writing"
self.update_config()
self.write_config()
self.write_extra_files()
self.write_forcing()
# Must come after write_forcing() to allow BCs to modify grid
self.write_grid()
def write_grid(self):
raise Exception("Implement in subclass")
def write_forcing(self):
for bc in self.bcs:
self.write_bc(bc)
def write_bc(self,bc):
if isinstance(bc,MultiBC):
bc.enumerate_sub_bcs()
for sub_bc in bc.sub_bcs:
self.write_bc(sub_bc)
else:
raise Exception("BC type %s not handled by class %s"%(bc.__class__,self.__class__))
def infer_initial_water_level(self):
"""
Pull an initial water level based on the first
StageBC. If no stage BC is found, return None.
No handling of MultiBCs, and does not check whether
an initial water level has already been set.
"""
for bc in self.bcs:
if isinstance(bc,hm.StageBC):
wl=bc.evaluate(t=self.run_start)
return float(wl)
self.log.info("Could not find BC to get initial water level")
return None
def update_initial_water_level(self):
pass # override in subclass
def partition(self,partition_grid=None):
"""
For multidomain runs, partition the grid. Overload in subclass
partition_grid: whether the grid should be partitioned,
which should generally default to True unless the run is a restart.
"""
pass
def run_model(self, *args, **kwargs):
""" Alias for run_simulation
"""
return self.run_simulation(*args, **kwargs)
def run_simulation(self,extra_args=[]):
"""
As advertised. Overload in subclass.
"""
pass
def add_gazetteer(self,shp_fn):
"""
Register a shapefile for resolving feature locations.
shp_fn: string, to be loaded as shapefile, or a structure array with a geom field.
"""
if not isinstance(shp_fn,np.ndarray):
shp_fn=wkb2shp.shp2geom(shp_fn)
self.gazetteers.append(shp_fn)
def get_geometry(self,**kws):
"""
The gazetteer interface for BC geometry. given criteria as keyword arguments,
i.e. name='Old_River', return the matching geometry from the gazetteer as
a shapely geometry.
if no match, return None. Error if more than one match
This method requires that at most 1 feature is matched, and returns only
the geometry
"""
hits=self.match_gazetteer(**kws)
if hits:
assert len(hits)==1
return hits[0]['geom']
else:
return None
def match_gazetteer(self,**kws):
"""
search all gazetteers with criteria specified in keyword arguments,
returning a list of shapefile records (note that this is a python
list of numpy records, not a numpy array, since shapefiles may not
have the same fields).
return empty list if not hits
see match_feature() for details on criteria
"""
hits=[]
for gaz in self.gazetteers:
for idx in range(len(gaz)):
if self.match_feature(kws,gaz[idx]):
hits.append( gaz[idx] )
return hits
def match_feature(self,kws,feat):
"""
check the critera in dict kws against feat, a numpy record as
returned by shp2geom.
there is special handling for several values:
'geom_type' is the geom_type attribute of the geometry itself,
e.g. 'LineString' or 'Point'. feat can specify a list of geom_type
values
pattern matching will be used when a criterion has a re.Pattern
value, i.e. kws={'name':re.compile('south.*')} would match features
that start with 'south'.
"""
for k in kws:
if k=='geom_type':
feat_val=feat['geom'].geom_type
if isinstance(kws[k],list):
if feat_val in kws[k]: continue
else:
return False
else:
if feat_val==kws[k]: continue
else:
return False
else:
try:
feat_val=feat[k]
except KeyError:
return False
except ValueError: # depending on type of feat can get either
return False
if isinstance(kws[k],re.Pattern):
tst=kws[k].match(feat_val)
else:
tst=feat_val==kws[k]
if tst:
continue
else:
return False
return True
# having these classes as attributes reduces headaches in importing,
# and provides another chance for a model subclass to provide a different
# implementation.
FlowBC=FlowBC
SourceSinkBC=SourceSinkBC
StageBC=StageBC
WindBC=WindBC
RoughnessBC=RoughnessBC
ScalarBC=ScalarBC
def add_FlowBC(self,**kw):
bc=self.FlowBC(model=self,**kw)
self.add_bcs(bc)
return bc
def add_SourceSinkBC(self,*a,**kw):
bc=self.SourceSinkBC(*a,model=self,**kw)
self.add_bcs(bc)
return bc
def add_StageBC(self,**kw):
bc=self.StageBC(model=self,**kw)
self.add_bcs(bc)
return bc
def add_WindBC(self,**kw):
bc=self.WindBC(model=self,**kw)
self.add_bcs(bc)
return bc
def add_RoughnessBC(self,**kw):
bc=self.RoughnessBC(model=self,**kw)
self.add_bcs(bc)
return bc
# def add_Structure(self,**kw): # only for DFM now.
def add_bcs(self,bcs):
"""
Add BC objects to this models definition.
bcs: None (do nothing), one BC instance, or a list of BC instances
"""
if bcs is None:
return
if isinstance(bcs,BC):
bcs=[bcs]
for bc in bcs:
assert (bc.model is None) or (bc.model==self),"Not expecting to share BC objects"
bc.model=self
self.bcs.extend(bcs)
def utc_to_native(self,t):
return t+self.utc_offset
def native_to_utc(self,t):
return t-self.utc_offset
@property
@memoize.member_thunk
def ll_to_native(self):
"""
Project array of longitude/latitude [...,2] to
model-native (e.g. UTM meters)
"""
if self.projection is None:
log.warning("projection is not set, i.e. x.projection='EPSG:26910'")
return lambda x:x
else:
return proj_utils.mapper('WGS84',self.projection)
@property
@memoize.member_thunk
def native_to_ll(self):
"""
Project array of x/y [...,2] coordinates in model-native
project (e.g. UTM meters) to longitude/latitude
"""
if self.projection is not None:
return proj_utils.mapper(self.projection,'WGS84')
else:
return lambda x: x
# Some BC methods need to know more about the domain, so DFlowModel
# provides these accessors
def edge_depth(self,j,datum=None):
"""
Return the bed elevation for edge j, in meters, positive=up.
"""
z=self.grid.nodes['node_z_bed'][ self.grid.edges['nodes'][j] ].min()
if z>0:
# this probably isn't a good warning for DFM grids, just for SUNTANS
log.warning("Edge %d has positive depth %.2f"%(j,z))
if datum is not None:
if datum=='eta0':
z+=self.initial_water_level()
return z
# Functions for manipulating DFM input/output
def extract_transect(ds,line,grid=None,dx=None,cell_dim='nFlowElem',
include=None,rename=True,add_z=True,name=None,
to_keep_dims=set(['wdim','laydim','two','three','time','sample']),
bundle_components=[['U',('ucx','ucy')],
['Uavg',('ucxz','ucyz')]],
):
"""
Extract a transect from map output.
ds: xarray Dataset
line: [N,2] polyline
grid: UnstructuredGrid instance, defaults to loading from ds, although this
is typically much slower as the spatial index cannot be reused
dx: sample spacing along line
cell_dim: name of the dimension
include: limit output to these data variables
rename: if True, follow naming conventions in xr_transect
"""
missing=np.nan
assert dx is not None,"Not ready for adaptively choosing dx"
if grid is None:
grid=dfm_grid.DFMGrid(ds)
from stompy.spatial import linestring_utils
line_sampled=linestring_utils.resample_linearring(line,dx,closed_ring=False)
N_sample=len(line_sampled)
# Get the mapping from sample index to cell, or None if
# the point misses the grid.
cell_map=[ grid.select_cells_nearest( line_sampled[samp_i], inside=True)
for samp_i in range(N_sample)]
# to make indexing more streamlined, replace missing cells with 0, but record
# who is missing and nan out later. Note that this need to be None=>0, to avoid
# changing index of 0 to something else.
cell_mask=[ c is None for c in cell_map]
cell_map_safe=[ c or 0 for c in cell_map]
if include is not None:
exclude=[ v for v in ds.data_vars if v not in include]
ds_orig=ds
ds=ds_orig.drop(exclude)
new_ds=ds.isel(**{cell_dim:cell_map_safe})
#print("Post-ds:")
#print(new_ds)
# Record the intended sampling location:
new_ds['x_sample']=(cell_dim,),line_sampled[:,0]
new_ds['y_sample']=(cell_dim,),line_sampled[:,1]
distance=utils.dist_along(line_sampled)
new_ds['d_sample']=(cell_dim,),distance
# And some additional spatial data:
dx_sample=utils.center_to_interval(distance)
new_ds['dx_sample']=(cell_dim,),dx_sample
new_ds['d_sample_bnd']=(cell_dim,'two'), np.array( [distance-dx_sample/2,
distance+dx_sample/2]).T
new_ds=new_ds.rename({cell_dim:'sample'})
new_ds=new_ds.set_coords(['x_sample','y_sample','d_sample'])
if add_z:
new_ds.update( xr_utils.z_from_sigma(new_ds,'ucx',interfaces=True,dz=True) )
# need to drop variables with dimensions like nFlowLink
to_drop=[]
for v in new_ds.variables:
if (set(new_ds[v].dims) - to_keep_dims):
to_drop.append(v)
new_ds=new_ds.drop(to_drop)
for vec,comps in bundle_components:
xr_utils.bundle_components(new_ds,vec,comps,'xy',['N','E'])
if rename:
new_ds=new_ds.rename( {'ucx':'Ve',
'ucy':'Vn',
'ucz':'Vu',
'ucxa':'Ve_avg',
'ucya':'Vn_avg',
's1':'z_surf',
'FlowElem_bl':'z_bed',
'laydim':'layer'} )
# Add metadata if missing:
if (name is None) and ('name' not in new_ds.attrs):
new_ds.attrs['name']='DFM Transect'
elif name is not None:
new_ds.attrs['name']=name
if 'filename' not in new_ds.attrs:
new_ds.attrs['filename']=new_ds.attrs['name']
if 'source' not in new_ds.attrs:
new_ds.attrs['source']=new_ds.attrs['source']
return new_ds
class OTPSHelper(object):
# water columns shallower than this will have a velocity calculated
# based on this water column depth rather than their actual value.
min_h=5.0
otps_model=None
# slightly larger than default pad. probably unnecessary
pad=2*np.timedelta64(24,'h')
def __init__(self,otps_model,**kw):
self.otps_model=otps_model # something like OhS
def dataset(self):
"""
extract h,u,v from OTPS.
returns a xr.Dataset with time,U,V,u,v,h,Q,unorm
U,V: east/north transport in m2/s
u,v: east/north velocity in m/s, relative to model depth.
h: tidal freesurface
Q: inward-postive flux in m3/s
unorm: inward-positive velocity in m/s
"""
from stompy.model.otps import read_otps
ds=xr.Dataset()
times=np.arange( self.data_start,
self.data_stop,
15*np.timedelta64(60,'s') )
log.debug("Will generate tidal prediction for %d time steps"%len(times))
ds['time']=('time',),times
modfile=read_otps.model_path(self.otps_model)
xy=np.array(self.geom.coords)
ll=self.model.native_to_ll(xy)
# Note z=1.0 to get transport values in m2/s
pred_h,pred_U,pred_V=read_otps.tide_pred(modfile,lon=ll[:,0],lat=ll[:,1],
time=times,z=1.0)
pred_h=pred_h.mean(axis=1)
pred_U=pred_U.mean(axis=1)
pred_V=pred_V.mean(axis=1)
ds['U']=('time',),pred_U
ds['V']=('time',),pred_V
ds['water_level']=('time',),pred_h
# need a normal vector and a length. And make sure normal vector is pointing
# into the domain.
L=utils.dist(xy[0],xy[-1])
j=self.model.grid.select_edges_nearest( 0.5*(xy[0]+xy[-1]) )
grid_n=self.get_inward_normal(j)
Q=L*(grid_n[0]*pred_U + grid_n[1]*pred_V)
ds['Q']=('time',),Q
# u,v,unorm need model depth
edge_depth=max(self.get_depth(j),self.min_h)
# no adjustment for changing freesurface. maybe later.
ds['u']=ds.U/edge_depth
ds['v']=ds.V/edge_depth
ds['unorm']=ds.Q/(L*edge_depth)
ds.attrs['mode']=self.mode
return ds
class OTPSStageBC(StageBC,OTPSHelper):
def __init__(self,**kw):
super(OTPSStageBC,self).__init__(**kw)
# write_config same as superclass
# filename_base same as superclass
def src_data(self):
return self.dataset()['water_level']
def write_data(self): # DFM IMPLEMENTATION!
self.write_tim(self.data())
class OTPSFlowBC(FlowBC,OTPSHelper):
def __init__(self,**kw):
super(OTPSFlowBC,self).__init__(**kw)
# write_config same as superclass
# filename_base same as superclass
def src_data(self):
return self.dataset()['Q']
def write_data(self): # DFM IMPLEMENTATION!
self.write_tim(self.data())
class VelocityBC(BC):
"""
expects a dataset() method which provides a dataset with time, u,v, and unorm
(positive into the domain).
dflowfm notes:
BC setting edge-normal velocity (velocitybnd), uniform in the vertical.
positive is into the domain.
"""
# write a velocitybnd BC
def write_config(self):
old_bc_fn=self.model.ext_force_file()
with open(old_bc_fn,'at') as fp:
lines=["QUANTITY=velocitybnd",
"FILENAME=%s"%self.pli_filename(),
"FILETYPE=9",
"METHOD=3",
"OPERAND=O",
"\n"]
fp.write("\n".join(lines))
def filename_base(self):
"""
Make it clear in the filenames what is being forced
"""
return super(VelocityBC,self).filename_base()+"_vel"
def write_data(self):
raise Exception("Implement write_data() in subclass")
class OTPSVelocityBC(VelocityBC,OTPSHelper):
"""
Force 2D transport based on depth-integrated transport from OTPS.
"""
def __init__(self,**kw):
super(OTPSVelocityBC,self).__init__(**kw)
def src_data(self):
return self.dataset()['unorm']
def write_data(self):
da=self.data()
if 'z' in da.dims:
self.write_t3d(da,z_bed=self.model.edge_depth(self.grid_edge))
else:
self.write_tim(da)
class OTPSVelocity3DBC(OTPSVelocityBC):
"""
Force 3D transport based on depth-integrated transport from OTPS.
This is a temporary shim to test setting a 3D velocity BC.
It is definitely wrong. Don't use this yet.
"""
def velocity_ds(self):
ds=super(OTPSVelocity3DBC,self).velocity_ds()
# so there is a 'unorm'
z_bed=self.model.edge_depth(self.grid_edge)
z_surf=1.0
assert z_bed<0 # should probably use self.get_depth() instead.
# pad out a bit above/below
# and try populating more levels, in case things are getting chopped off
N=10
z_pad=10.0
ds['z']=('z',), np.linspace(z_bed-z_pad,z_surf+z_pad,N)
sig=np.linspace(-1,1,N)
new_unorm,_=xr.broadcast(ds.unorm,ds.z)
ds['unorm']=new_unorm
# Add some vertical structure to test 3D nature of the BC
delta=xr.DataArray(0.02*sig,dims=['z'])
ds['unorm'] = ds.unorm + delta
return ds
class MultiBC(BC):
"""
Break up a boundary condition spec into per-edge boundary conditions.
Hoping that this can be done in a mostly opaque way, without exposing to
the caller that one BC is being broken up into many.
"""
def __init__(self,cls,**kw):
self.saved_kw=dict(kw) # copy
# These are all passed on to the subclass, but only the
# known parameters are kept for MultiBC.
# if at some we need to pass parameters only to MultiBC, but
# not to the subclass, this would have to check both ways.
keys=list(kw.keys())
for k in keys:
try:
getattr(self,k)
except AttributeError:
del kw[k]
super(MultiBC,self).__init__(**kw)
self.cls=cls
self.sub_bcs="not yet!" # not populated until self.write()
def filename_base(self):
assert False,'This should never be called, right?'
def write(self):
# delay enumeration until now, so we have the most up-to-date
# information about the model, grid, etc.
self.enumerate_sub_bcs()
for sub_bc in self.sub_bcs:
sub_bc.write()
def enumerate_sub_bcs(self):
# dredge_grid already has some of the machinery
grid=self.model.grid
edges=grid.select_edges_by_polyline(np.array(self.geom.coords))
self.model.log.info("MultiBC will be applied over %d edges"%len(edges))
self.sub_bcs=[]
for j in edges:
seg=grid.nodes['x'][ grid.edges['nodes'][j] ]
sub_geom=geometry.LineString(seg)
# This slightly breaks the abstraction -- in theory, the caller
# can edit all of self's values up until write() is called, yet
# here we are grabbing the values at time of instantiation of self.
# hopefully it doesn't matter, especially since geom and model
# are handled explicitly.
sub_kw=dict(self.saved_kw) # copy original
sub_kw['geom']=sub_geom
sub_kw['name']="%s%04d"%(self.name,j)
# this is only guaranteed to be a representative element
sub_kw['grid_edge']=j
# this, if set, is all the elements
sub_kw['grid_edges']=[j]
j_cells=grid.edges['cells'][j]
assert j_cells.min()<0
assert j_cells.max()>=0
c=j_cells.max()
sub_kw['grid_cell']=c
sub_kw['grid_cells']=[c]
assert self.model is not None,"Why would that be?"
assert sub_geom is not None,"Huh?"
sub_bc=self.cls(model=self.model,**sub_kw)
self.sub_bcs.append(sub_bc)
# HYCOM
class HycomMultiBC(MultiBC):
"""
Common machinery for pulling spatially variable fields from hycom
"""
# according to website, hycom runs for download are non-tidal, so
# don't worry about filtering
# Data is only daily, so go a bit longer than a usual tidal filter
lp_hours=0
pad=np.timedelta64(4,'D')
cache_dir=None
def __init__(self,cls,ll_box=None,**kw):
self.ll_box=ll_box
self.data_files=None
super(HycomMultiBC,self).__init__(cls,**kw)
if self.cache_dir is None:
self.log.warning("You probably want to pass cache_dir for hycom download")
def enumerate_sub_bcs(self):
if self.ll_box is None:
# grid=self.model.grid ...
raise Exception("Not implemented: auto-calculating ll_box")
self.populate_files()
super(HycomMultiBC,self).enumerate_sub_bcs()
# adjust fluxes...
self.populate_values()
def populate_files(self):
self.data_files=hycom.fetch_range(self.ll_box[:2],self.ll_box[2:],
[self.data_start,self.data_stop],
cache_dir=self.cache_dir)
def init_bathy(self):
"""
populate self.bathy, an XYZField in native coordinates, with
values as hycom's positive down bathymetry.
"""
# TODO: download hycom bathy on demand.
# This version of the file is from experiment 93.0, and ostensibly is on the
# computational grid
# ftp://ftp.hycom.org/datasets/GLBb0.08/expt_93.0/topo/depth_GLBb0.08_09m11.nc
# This is what I've used in the past:
hy_bathy=self.hy_bathy=hycom.hycom_bathymetry(self.model.run_start,self.cache_dir)
# xr.open_dataset( os.path.join(self.cache_dir,'depth_GLBa0.08_09.nc') )
lon_min,lon_max,lat_min,lat_max=self.ll_box
sel=((hy_bathy.Latitude.values>=lat_min) &
(hy_bathy.Latitude.values<=lat_max) &
(hy_bathy.Longitude.values>=lon_min) &
(hy_bathy.Longitude.values<=lon_max))
bathy_xyz=np.c_[ hy_bathy.Longitude.values[sel],
hy_bathy.Latitude.values[sel],
hy_bathy.bathymetry.isel(MT=0).values[sel] ]
bathy_xyz[:,:2]=self.model.ll_to_native(bathy_xyz[:,:2])
from ..spatial import field
self.bathy=field.XYZField(X=bathy_xyz[:,:2],F=bathy_xyz[:,2])
class HycomMultiScalarBC(HycomMultiBC):
"""
Extract 3D salt, temp from Hycom
"""
scalar=None
def __init__(self,**kw):
super(HycomMultiScalarBC,self).__init__(self.ScalarProfileBC,**kw)
class ScalarProfileBC(ScalarBC):
cache_dir=None # unused now, but makes parameter-setting logic cleaner
_dataset=None # supplied by factory
def dataset(self):
self._dataset.attrs['mode']=self.mode
return self._dataset
def src_data(self):# was dataarray()
da=self.dataset()[self.scalar]
da.attrs['mode']=self.mode
return da
def populate_values(self):
""" Do the actual work of iterating over sub-edges and hycom files,
interpolating in the vertical.
Desperately wants some refactoring with the velocity code.
"""
sun_var=self.scalar
if sun_var=='salinity':
hy_scalar='salinity'
elif sun_var=='temperature':
hy_scalar='water_temp'
# Get spatial information about hycom files
hy_ds0=xr.open_dataset(self.data_files[0])
# make lon canonically [-180,180]
hy_ds0.lon.values[:] = (hy_ds0.lon.values+180)%360.0 - 180.0
if 'time' in hy_ds0.water_u.dims:
hy_ds0=hy_ds0.isel(time=0)
# makes sure lon,lat are compatible with water velocity
_,Lon,Lat=xr.broadcast(hy_ds0.water_u.isel(depth=0),hy_ds0.lon,hy_ds0.lat)
hy_xy=self.model.ll_to_native(Lon.values,Lat.values)
self.init_bathy()
# Initialize per-edge details
self.model.grid._edge_depth=self.model.grid.edges['edge_z_bed']
layers=self.model.layer_data(with_offset=True)
# In order to get valid data even when the hydro model has a cell
# that lines up with somewhere dry in hycom land, limit the search below
# to wet cells
hy_wet=np.isfinite(hy_ds0[hy_scalar].isel(depth=0).values)
for i,sub_bc in enumerate(self.sub_bcs):
sub_bc.edge_center=np.array(sub_bc.geom.centroid)
hyc_dists=utils.dist( sub_bc.edge_center, hy_xy )
# lazy way to skip over dry cells. Note that velocity differs
# here, since it's safe to just use 0 velocity, but a zero
# salinity can creep in and wreak havoc.
hyc_dists[~hy_wet]=np.inf
row,col=np.nonzero( hyc_dists==hyc_dists.min() )
row=row[0] ; col=col[0]
sub_bc.hy_row_col=(row,col) # tuple, so it can be used directly in []
# initialize the datasets
sub_bc._dataset=sub_ds=xr.Dataset()
# assumes that from each file we use only one timestep
sub_ds['time']=('time',), np.ones(len(self.data_files),'M8[m]')
sub_ds[sun_var]=('time','layer'), np.zeros((sub_ds.dims['time'],layers.dims['Nk']),
np.float64)
sub_bc.edge_depth=edge_depth=self.model.grid.edge_depths()[sub_bc.grid_edge] # positive up
# First, establish the geometry on the suntans side, in terms of z_interface values
# for all wet layers. below-bed layers have zero vertical span. positive up, but
# shift back to real, non-offset, vertical coordinate
sun_z_interface=(-self.model.z_offset)+layers.z_interface.values.clip(edge_depth,np.inf)
sub_bc.sun_z_interfaces=sun_z_interface
# And the pointwise data from hycom:
hy_layers=hy_ds0.depth.values.copy()
sub_bc.hy_valid=valid=np.isfinite(hy_ds0[hy_scalar].isel(lat=row,lon=col).values)
hycom_depths=hy_ds0.depth.values[valid]
# possible that hy_bed_depth is not quite correct, and hycom has data
# even deeper. in that case just pad out the depth a bit so there
# is at least a place to put the bed velocity.
if len(hycom_depths)!=0:
sub_bc.hy_bed_depth=max(hycom_depths[-1]+1.0,self.bathy(hy_xy[sub_bc.hy_row_col]))
sub_bc.hycom_depths=np.concatenate( [hycom_depths, [sub_bc.hy_bed_depth]])
else:
# edge is dry in HYCOM -- be careful to check and skip below.
sub_bc.hycom_depths=hycom_depths
# for scalars, pray this never gets used...
# don't use nan in case it participates in a summation with 0, but
# make it yuge to make it easier to spot if it is ever used
log.warning("Hmm - got a dry hycom edge, even though should be skipping those now")
sub_bc._dataset[sun_var].values[:]=100000000.
# Populate the scalar data, outer loop is over hycom files, since
# that's most expensive
for ti,fn in enumerate(self.data_files):
hy_ds=xr.open_dataset(fn)
if 'time' in hy_ds.dims:
# again, assuming that we only care about the first time step in each file
hy_ds=hy_ds.isel(time=0)
log.info(hy_ds.time.values)
scalar_val=hy_ds[hy_scalar].values
scalar_val_bottom=hy_ds[hy_scalar+'_bottom'].values
for i,sub_bc in enumerate(self.sub_bcs):
hy_depths=sub_bc.hycom_depths
sub_bc._dataset.time.values[ti]=hy_ds.time.values
if len(hy_depths)==0:
continue # already zero'd out above.
row,col=sub_bc.hy_row_col
z_sel=sub_bc.hy_valid
sub_scalar_val=np.concatenate([ scalar_val[z_sel,row,col],
scalar_val_bottom[None,row,col] ])
sun_dz=np.diff(-sub_bc.sun_z_interfaces)
if 0:
# 2019-04-17:
# This is the approach from the velocity interpolation. It aims to preserve
# flux, but for scalar we really want clean profiles, and if suntans is a bit
# deeper than hycom somewhere, just extend the profile, rather than the flux
# code which would put 0 down there.
# integrate -- there isn't a super clean way to do this that I see.
# but averaging each interval is probably good enough, just loses some vertical
# accuracy.
sun_valid=sun_dz>0
interval_mean_val=0.5*(sub_scalar_val[:-1]+sub_scalar_val[1:])
valdz=np.concatenate( ([0],np.cumsum(np.diff(hy_depths)*interval_mean_val)) )
sun_valdz=np.interp(-sub_bc.sun_z_interfaces, hy_depths, valdz)
sun_d_veldz=np.diff(sun_valdz)
sub_bc._dataset[sun_var].values[ti,sun_valid]=sun_d_veldz[sun_valid]/sun_dz[sun_valid]
else:
# more direct interpolation approach - linearly interpolate to center of z level
depth_middle=-sub_bc.sun_z_interfaces[:-1] + 0.5*sun_dz
sub_bc._dataset[sun_var].values[ti,:]=np.interp(depth_middle,hy_depths,sub_scalar_val)
hy_ds.close() # free up netcdf resources
class HycomMultiVelocityBC(HycomMultiBC):
"""
Special handling of multiple hycom boundary segments to
enforce specific net flux requirements.
Otherwise small errors, including quantization and discretization,
lead to a net flux.
"""
# If this is set, flux calculations will assume the average freesurface
# is at this elevation as opposed to 0.0
# this is a positive-up value
z_offset=None
def __init__(self,**kw):
super(HycomMultiVelocityBC,self).__init__(self.VelocityProfileBC,**kw)
class VelocityProfileBC(VelocityBC):
cache_dir=None # unused now, but makes parameter-setting logic cleaner
z_offset=None # likewise -- just used by the MultiBC
_dataset=None # supplied by factory
def dataset(self):
self._dataset.attrs['mode']=self.mode
return self._dataset
def update_Q_in(self):
"""calculate time series flux~m3/s from self._dataset,
updating Q_in field therein.
Assumes populate_velocity has already been run, so
additional attributes are available.
"""
ds=self.dataset()
sun_dz=np.diff(-self.sun_z_interfaces)
# u ~ [time,layer]
Uint=(ds['u'].values[:,:]*sun_dz[None,:]).sum(axis=1)
Vint=(ds['v'].values[:,:]*sun_dz[None,:]).sum(axis=1)
Q_in=self.edge_length*(self.inward_normal[0]*Uint +
self.inward_normal[1]*Vint)
ds['Q_in'].values[:]=Q_in
ds['Uint'].values[:]=Uint
ds['Vint'].values[:]=Vint
def populate_values(self):
""" Do the actual work of iterating over sub-edges and hycom files,
interpolating in the vertical, projecting as needed, and adjust the overall
fluxes
"""
# The net inward flux in m3/s over the whole BC that we will adjust to.
target_Q=np.zeros(len(self.data_files)) # assumes one time step per file
# Get spatial information about hycom files
hy_ds0=xr.open_dataset(self.data_files[0])
# make lon canonically [-180,180]
hy_ds0.lon.values[:] = (hy_ds0.lon.values+180)%360.0 - 180.0
if 'time' in hy_ds0.water_u.dims:
hy_ds0=hy_ds0.isel(time=0)
# makes sure lon,lat are compatible with water velocity
_,Lon,Lat=xr.broadcast(hy_ds0.water_u.isel(depth=0),hy_ds0.lon,hy_ds0.lat)
hy_xy=self.model.ll_to_native(Lon.values,Lat.values)
self.init_bathy()
# handle "manual" z offset, i.e. the grid will not be shifted, but the
# freesurface is not 0.0 relative to the grid depths.
eta=self.z_offset or 0.0
assert self.model.z_offset==0.0,"Trying to avoid model.z_offset"
# log.info("populate_values: eta is %s"%eta)
# Initialize per-edge details
self.model.grid._edge_depth=self.model.grid.edges['edge_z_bed']
layers=self.model.layer_data(with_offset=True)
for i,sub_bc in enumerate(self.sub_bcs):
sub_bc.inward_normal=sub_bc.get_inward_normal()
sub_bc.edge_length=sub_bc.geom.length
sub_bc.edge_center=np.array(sub_bc.geom.centroid)
# skip the transforms...
hyc_dists=utils.dist( sub_bc.edge_center, hy_xy )
row,col=np.nonzero( hyc_dists==hyc_dists.min() )
row=row[0] ; col=col[0]
sub_bc.hy_row_col=(row,col) # tuple, so it can be used directly in []
# initialize the datasets
sub_bc._dataset=sub_ds=xr.Dataset()
# assumes that from each file we use only one timestep
sub_ds['time']=('time',), np.ones(len(self.data_files),'M8[m]')
# getting tricky here - do more work here rather than trying to push ad hoc interface
# into the model class
# velocity components in UTM x/y coordinate system
sub_ds['u']=('time','layer'), np.zeros((sub_ds.dims['time'],layers.dims['Nk']),
np.float64)
sub_ds['v']=('time','layer'), np.zeros((sub_ds.dims['time'],layers.dims['Nk']),
np.float64)
# depth-integrated transport on suntans layers, in m2/s
sub_ds['Uint']=('time',), np.nan*np.ones(sub_ds.dims['time'],np.float64)
sub_ds['Vint']=('time',), np.nan*np.ones(sub_ds.dims['time'],np.float64)
# project transport to edge normal * edge_length to get m3/s
sub_ds['Q_in']=('time',), np.nan*np.ones(sub_ds.dims['time'],np.float64)
sub_bc.edge_depth=edge_depth=self.model.grid.edge_depths()[sub_bc.grid_edge] # positive up
# First, establish the geometry on the suntans side, in terms of z_interface values
# for all wet layers. below-bed layers have zero vertical span. positive up, but
# shift back to real, non-offset, vertical coordinate
# NB: trying to move away from model.z_offset. self.z_offset, if set, should
# provide an estimate of the mean freesurface elevation.
# 2019-04-14: clip to eta here.
sun_z_interface=(-self.model.z_offset)+layers.z_interface.values.clip(edge_depth,eta)
sub_bc.sun_z_interfaces=sun_z_interface
# log.info("sun_z_interface set on edge: %s"%str(sun_z_interface))
# And the pointwise data from hycom:
hy_layers=hy_ds0.depth.values.copy()
sub_bc.hy_valid=valid=np.isfinite(hy_ds0.water_u.isel(lat=row,lon=col).values)
hycom_depths=hy_ds0.depth.values[valid]
# possible that hy_bed_depth is not quite correct, and hycom has data
# even deeper. in that case just pad out the depth a bit so there
# is at least a place to put the bed velocity.
if len(hycom_depths)!=0:
sub_bc.hy_bed_depth=max(hycom_depths[-1]+1.0,self.bathy(hy_xy[sub_bc.hy_row_col]))
sub_bc.hycom_depths=np.concatenate( [hycom_depths, [sub_bc.hy_bed_depth]])
else:
# edge is dry in HYCOM -- be careful to check and skip below.
sub_bc.hycom_depths=hycom_depths
sub_bc._dataset['u'].values[:]=0.0
sub_bc._dataset['v'].values[:]=0.0
sub_bc._dataset['Uint'].values[:]=0.0
sub_bc._dataset['Vint'].values[:]=0.0
# Populate the velocity data, outer loop is over hycom files, since
# that's most expensive
for ti,fn in enumerate(self.data_files):
hy_ds=xr.open_dataset(fn)
if 'time' in hy_ds.dims:
# again, assuming that we only care about the first time step in each file
hy_ds=hy_ds.isel(time=0)
log.info(hy_ds.time.values)
water_u=hy_ds.water_u.values
water_v=hy_ds.water_v.values
water_u_bottom=hy_ds.water_u_bottom.values
water_v_bottom=hy_ds.water_v_bottom.values
for i,sub_bc in enumerate(self.sub_bcs):
hy_depths=sub_bc.hycom_depths
sub_bc._dataset.time.values[ti]=hy_ds.time.values
if len(hy_depths)==0:
continue # already zero'd out above.
row,col=sub_bc.hy_row_col
z_sel=sub_bc.hy_valid
sun_dz=np.diff(-sub_bc.sun_z_interfaces)
sun_valid=sun_dz>0 # both surface and bed cells may be dry.
for water_vel,water_vel_bottom,sun_var,trans_var in [ (water_u,water_u_bottom,'u','Uint'),
(water_v,water_v_bottom,'v','Vint') ]:
sub_water_vel=np.concatenate([ water_vel[z_sel,row,col],
water_vel_bottom[None,row,col] ])
# integrate -- there isn't a super clean way to do this that I see.
# but averaging each interval is probably good enough, just loses some vertical
# accuracy.
interval_mean_vel=0.5*(sub_water_vel[:-1]+sub_water_vel[1:])
veldz=np.concatenate( ([0],np.cumsum(np.diff(hy_depths)*interval_mean_vel)) )
sun_veldz=np.interp(-sub_bc.sun_z_interfaces, hy_depths, veldz)
sun_d_veldz=np.diff(sun_veldz)
sub_bc._dataset[sun_var].values[ti,~sun_valid]=0.0 # just to be sure...
sub_bc._dataset[sun_var].values[ti,sun_valid]=sun_d_veldz[sun_valid]/sun_dz[sun_valid]
# we've already done the integration
# but do it again!
int_A=sun_veldz[-1]
int_B=(np.diff(-sub_bc.sun_z_interfaces)*sub_bc._dataset[sun_var].values[ti,:]).sum()
# log.info("two integrations: %f vs %f"%(int_A,int_B))
sub_bc._dataset[trans_var].values[ti]=int_B # sun_veldz[-1]
hy_ds.close() # free up netcdf resources
# project transport onto edges to get fluxes
total_Q=0.0
total_flux_A=0.0
for i,sub_bc in enumerate(self.sub_bcs):
Q_in=sub_bc.edge_length*(sub_bc.inward_normal[0]*sub_bc._dataset['Uint'].values +
sub_bc.inward_normal[1]*sub_bc._dataset['Vint'].values)
sub_bc._dataset['Q_in'].values[:]=Q_in
total_Q=total_Q+Q_in
# edge_depth here reflects the expected water column depth. it is the bed elevation, with
# the z_offset removed (I hope), under the assumption that a typical eta is close to 0.0,
# but may be offset as much as -10.
# edge_depth is positive up.
#total_flux_A+=sub_bc.edge_length*(eta-sub_bc.edge_depth).clip(0,np.inf)
# maybe better to keep it consistent with above code -
total_flux_A+=sub_bc.edge_length*(sub_bc.sun_z_interfaces[0]-sub_bc.sun_z_interfaces[-1])
Q_error=total_Q-target_Q
vel_error=Q_error/total_flux_A
log.info("Velocity error: %.6f -- %.6f m/s"%(vel_error.min(),vel_error.max()))
log.info("total_flux_A: %.3e"%total_flux_A)
# And apply the adjustment, and update integrated quantities
adj_total_Q=0.0
for i,sub_bc in enumerate(self.sub_bcs):
# seems like we should be subtracting vel_error, but that results in a doubling
# of the error?
# 2019-04-14: is that an outdated comment?
sub_bc._dataset['u'].values[:,:] -= vel_error[:,None]*sub_bc.inward_normal[0]
sub_bc._dataset['v'].values[:,:] -= vel_error[:,None]*sub_bc.inward_normal[1]
sub_bc.update_Q_in()
adj_total_Q=adj_total_Q+sub_bc._dataset['Q_in']
adj_Q_error=adj_total_Q-target_Q
adj_vel_error=adj_Q_error/total_flux_A
log.info("Post-adjustment velocity error: %.6f -- %.6f m/s"%(adj_vel_error.min(),adj_vel_error.max()))
class NOAAStageBC(StageBC):
station=None # integer station
product='water_level' # or 'predictions'
cache_dir=None
def src_data(self):
ds=self.fetch_for_period(self.data_start,self.data_stop)
return ds['z']
def write_bokeh(self,**kw):
defaults=dict(title="Stage: %s (%s)"%(self.name,self.station))
defaults.update(kw)
super(NOAAStageBC,self).write_bokeh(**defaults)
def fetch_for_period(self,period_start,period_stop):
"""
Download or load from cache, take care of any filtering, unit conversion, etc.
Returns a dataset with a 'z' variable, and with time as UTC
"""
ds=noaa_coops.coops_dataset(station=self.station,
start_date=period_start,
end_date=period_stop,
products=[self.product],
days_per_request='M',cache_dir=self.cache_dir)
ds=ds.isel(station=0)
ds['z']=ds[self.product]
ds['z'].attrs['units']='m'
return ds
class CdecBC(object):
cache_dir=None # set this to enable caching
station=None # generally three letter string, all caps
sensor=None # integer - default values set in subclasses.
default=None # what to return for src_data() if no data can be fetched.
pad=np.timedelta64(24,'h')
class CdecFlowBC(CdecBC,FlowBC):
sensor=20 # flow at event frequency
def src_data(self):
ds=self.fetch_for_period(self.data_start,self.data_stop)
if ds is not None:
return ds['Q']
else:
log.warning("CDEC station %s, sensor %d found no data"%(self.station,self.sensor))
return self.default
def write_bokeh(self,**kw):
defaults=dict(title="CDEC Flow: %s (%s)"%(self.name,self.station))
defaults.update(kw)
super(CdecFlowBC,self).write_bokeh(**defaults)
def fetch_for_period(self,period_start,period_stop):
from stompy.io.local import cdec
ds=cdec.cdec_dataset(station=self.station,
start_date=period_start-self.pad,end_date=period_stop+self.pad,
sensor=self.sensor, cache_dir=self.cache_dir)
if ds is not None:
# to m3/s
ds['Q']=ds['sensor%04d'%self.sensor] * 0.028316847
ds['Q'].attrs['units']='m3 s-1'
return ds
class CdecStageBC(CdecBC,StageBC):
sensor=1 # stage at event frequency
def src_data(self):
ds=self.fetch_for_period(self.data_start,self.data_stop)
if ds is None:
return self.default
else:
return ds['z']
def write_bokeh(self,**kw):
defaults=dict(title="CDEC Stage: %s (%s)"%(self.name,self.station))
defaults.update(kw)
super(CdecFlowBC,self).write_bokeh(**defaults)
def fetch_for_period(self,period_start,period_stop):
from stompy.io.local import cdec
pad=np.timedelta64(24,'h')
ds=cdec.cdec_dataset(station=self.station,
start_date=period_start-pad,end_date=period_stop+pad,
sensor=self.sensor, cache_dir=self.cache_dir)
if ds is not None:
# to m
ds['water_level']=ds['sensor%04d'%self.sensor] * 0.3048
ds['water_level'].attrs['units']='m'
return ds
class NwisBC(object):
cache_dir=None
product_id="set_in_subclass"
default=None # in case no data can be fetched
def __init__(self,station,**kw):
"""
station: int or string station id, e.g. 11455478
"""
self.station=str(station)
super(NwisBC,self).__init__(**kw)
class NwisStageBC(NwisBC,StageBC):
product_id=65 # gage height
def src_data(self):
ds=self.fetch_for_period(self.data_start,self.data_stop)
return ds['water_level']
def write_bokeh(self,**kw):
defaults=dict(title="Stage: %s (%s)"%(self.name,self.station))
defaults.update(kw)
super(NwisStageBC,self).write_bokeh(**defaults)
def fetch_for_period(self,period_start,period_stop):
"""
Download or load from cache, take care of any filtering, unit conversion, etc.
Returns a dataset with a 'z' variable, and with time as UTC
"""
from ..io.local import usgs_nwis
ds=usgs_nwis.nwis_dataset(station=self.station,start_date=period_start,
end_date=period_stop,
products=[self.product_id],
cache_dir=self.cache_dir)
if ds is not None:
ds['water_level']=('time',), 0.3048*ds['height_gage']
ds['water_level'].attrs['units']='m'
ds['water_level'].attrs['standard_name']=self.standard_name
return ds
class NwisScalarBC(NwisBC,ScalarBC):
def src_data(self):
ds=self.fetch_for_period(self.data_start,self.data_stop)
# ideally wouldn't be necessary, but a bit safer to ignore metadata/coordinates
scalar_name=[n for n in ds.data_vars if n not in ['tz_cd','datenum','time']][0]
return ds[scalar_name]
def write_bokeh(self,**kw):
defaults=dict(title="Scalar: %s (%s, product %s)"%(self.name,self.station,self.product_id))
defaults.update(kw)
super(NwisScalarBC,self).write_bokeh(**defaults)
def fetch_for_period(self,period_start,period_stop):
"""
Download or load from cache, take care of any filtering, unit conversion, etc.
Returns a dataset with a 'z' variable, and with time as UTC
"""
if self.scalar == 'turbidity':
self.product_id=63680 # 63680: turbidity, FNU
elif self.scalar == 'salinity':
self.product_id=480 # 00480: salinity, ppt
elif self.scalar == 'NO3+NO2':
self.product_id=99133 # 99311: nitrate + nitrite, mg/l as nitrogen
elif self.scalar == 'temperature':
self.product_id=10 # 00010: temperature, degrees C
elif self.scalar == 'pH':
self.product_id=400 # 00400: pH
elif self.scalar == 'fDOM':
self.product_id=32295 # 32295: fDOM, ug/l QSE
from ..io.local import usgs_nwis
ds=usgs_nwis.nwis_dataset(station=self.station,start_date=period_start,
end_date=period_stop,
products=[self.product_id],
cache_dir=self.cache_dir)
return ds
class NwisFlowBC(NwisBC,FlowBC):
product_id=60 # discharge
def src_data(self):
ds=self.fetch_for_period(self.data_start,self.data_stop)
if ds is not None:
return ds['flow']
else:
return self.default
def write_bokeh(self,**kw):
defaults=dict(title="Flow: %s (%s)"%(self.name,self.station))
defaults.update(kw)
super(NwisFlowBC,self).write_bokeh(**defaults)
def fetch_for_period(self,period_start,period_stop):
"""
Download or load from cache, take care of any filtering, unit conversion, etc.
Returns a dataset with a 'z' variable, and with time as UTC
"""
from ..io.local import usgs_nwis
ds=usgs_nwis.nwis_dataset(station=self.station,start_date=period_start,
end_date=period_stop,
products=[self.product_id],
cache_dir=self.cache_dir)
if ds is not None:
ds['flow']=('time',), 0.028316847*ds['stream_flow_mean_daily']
ds['flow'].attrs['units']='m3 s-1'
ds['flow'].attrs['standard_name']=self.standard_name
return ds
| 38.522487
| 123
| 0.590272
|
f4302aca139802e99d80bfd4e1fc27e353abdfbb
| 7,437
|
py
|
Python
|
research/delf/delf/python/training/model/export_model_utils.py
|
haruiz/models
|
4dfcf48f7e15646dca2089a0e9f583d24661924c
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
research/delf/delf/python/training/model/export_model_utils.py
|
HirataYurina/models
|
2db2501bc9928f68e225282f3884b81680a9cccb
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
research/delf/delf/python/training/model/export_model_utils.py
|
HirataYurina/models
|
2db2501bc9928f68e225282f3884b81680a9cccb
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for DELF model exporting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from delf import feature_extractor
from delf.python.training.datasets import googlelandmarks as gld
from object_detection.core import box_list
from object_detection.core import box_list_ops
def ExtractLocalFeatures(image, image_scales, max_feature_num, abs_thres, iou,
attention_model_fn, stride_factor):
"""Extract local features for input image.
Args:
image: image tensor of type tf.uint8 with shape [h, w, channels].
image_scales: 1D float tensor which contains float scales used for image
pyramid construction.
max_feature_num: int tensor denotes the maximum selected feature points.
abs_thres: float tensor denotes the score threshold for feature selection.
iou: float scalar denotes the iou threshold for NMS.
attention_model_fn: model function. Follows the signature:
* Args:
* `images`: Image tensor which is re-scaled.
* Returns:
* `attention_prob`: attention map after the non-linearity.
* `feature_map`: feature map after ResNet convolution.
stride_factor: integer accounting for striding after block3.
Returns:
boxes: [N, 4] float tensor which denotes the selected receptive box. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
features: [N, depth] float tensor.
feature_scales: [N] float tensor. It is the inverse of the input image
scales such that larger image scales correspond to larger image regions,
which is compatible with keypoints detected with other techniques, for
example Congas.
scores: [N, 1] float tensor denotes the attention score.
"""
original_image_shape_float = tf.gather(
tf.dtypes.cast(tf.shape(image), tf.float32), [0, 1])
image_tensor = gld.NormalizeImages(
image, pixel_value_offset=128.0, pixel_value_scale=128.0)
image_tensor = tf.expand_dims(image_tensor, 0, name='image/expand_dims')
# Hard code the feature depth and receptive field parameters for now.
rf, stride, padding = [291.0, 16.0 * stride_factor, 145.0]
feature_depth = 1024
def _ProcessSingleScale(scale_index, boxes, features, scales, scores):
"""Resizes the image and run feature extraction and keypoint selection.
This function will be passed into tf.while_loop() and be called
repeatedly. The input boxes are collected from the previous iteration
[0: scale_index -1]. We get the current scale by
image_scales[scale_index], and run resize image, feature extraction and
keypoint selection. Then we will get a new set of selected_boxes for
current scale. In the end, we concat the previous boxes with current
selected_boxes as the output.
Args:
scale_index: A valid index in the image_scales.
boxes: Box tensor with the shape of [N, 4].
features: Feature tensor with the shape of [N, depth].
scales: Scale tensor with the shape of [N].
scores: Attention score tensor with the shape of [N].
Returns:
scale_index: The next scale index for processing.
boxes: Concatenated box tensor with the shape of [K, 4]. K >= N.
features: Concatenated feature tensor with the shape of [K, depth].
scales: Concatenated scale tensor with the shape of [K].
scores: Concatenated score tensor with the shape of [K].
"""
scale = tf.gather(image_scales, scale_index)
new_image_size = tf.dtypes.cast(
tf.round(original_image_shape_float * scale), tf.int32)
resized_image = tf.image.resize(image_tensor, new_image_size)
attention_prob, feature_map = attention_model_fn(resized_image)
attention_prob = tf.squeeze(attention_prob, axis=[0])
feature_map = tf.squeeze(feature_map, axis=[0])
rf_boxes = feature_extractor.CalculateReceptiveBoxes(
tf.shape(feature_map)[0],
tf.shape(feature_map)[1], rf, stride, padding)
# Re-project back to the original image space.
rf_boxes = tf.divide(rf_boxes, scale)
attention_prob = tf.reshape(attention_prob, [-1])
feature_map = tf.reshape(feature_map, [-1, feature_depth])
# Use attention score to select feature vectors.
indices = tf.reshape(tf.where(attention_prob >= abs_thres), [-1])
selected_boxes = tf.gather(rf_boxes, indices)
selected_features = tf.gather(feature_map, indices)
selected_scores = tf.gather(attention_prob, indices)
selected_scales = tf.ones_like(selected_scores, tf.float32) / scale
# Concat with the previous result from different scales.
boxes = tf.concat([boxes, selected_boxes], 0)
features = tf.concat([features, selected_features], 0)
scales = tf.concat([scales, selected_scales], 0)
scores = tf.concat([scores, selected_scores], 0)
return scale_index + 1, boxes, features, scales, scores
output_boxes = tf.zeros([0, 4], dtype=tf.float32)
output_features = tf.zeros([0, feature_depth], dtype=tf.float32)
output_scales = tf.zeros([0], dtype=tf.float32)
output_scores = tf.zeros([0], dtype=tf.float32)
# Process the first scale separately, the following scales will reuse the
# graph variables.
(_, output_boxes, output_features, output_scales,
output_scores) = _ProcessSingleScale(0, output_boxes, output_features,
output_scales, output_scores)
i = tf.constant(1, dtype=tf.int32)
num_scales = tf.shape(image_scales)[0]
keep_going = lambda j, b, f, scales, scores: tf.less(j, num_scales)
(_, output_boxes, output_features, output_scales,
output_scores) = tf.while_loop(
cond=keep_going,
body=_ProcessSingleScale,
loop_vars=[
i, output_boxes, output_features, output_scales, output_scores
],
shape_invariants=[
i.get_shape(),
tf.TensorShape([None, 4]),
tf.TensorShape([None, feature_depth]),
tf.TensorShape([None]),
tf.TensorShape([None])
],
back_prop=False)
feature_boxes = box_list.BoxList(output_boxes)
feature_boxes.add_field('features', output_features)
feature_boxes.add_field('scales', output_scales)
feature_boxes.add_field('scores', output_scores)
nms_max_boxes = tf.minimum(max_feature_num, feature_boxes.num_boxes())
final_boxes = box_list_ops.non_max_suppression(feature_boxes, iou,
nms_max_boxes)
return final_boxes.get(), final_boxes.get_field(
'features'), final_boxes.get_field('scales'), tf.expand_dims(
final_boxes.get_field('scores'), 1)
| 43.238372
| 80
| 0.70593
|
5147cda63a6b5a30c3c454bca87d23a018bb406a
| 192
|
py
|
Python
|
mjml/elements/head/mj_title.py
|
ESA-CCI-ODP/mjml-stub
|
ffd824923de85f3c02fca7f83ef6b540be048414
|
[
"MIT"
] | 23
|
2020-10-02T14:52:21.000Z
|
2022-03-24T16:05:21.000Z
|
mjml/elements/head/mj_title.py
|
ESA-CCI-ODP/mjml-stub
|
ffd824923de85f3c02fca7f83ef6b540be048414
|
[
"MIT"
] | 17
|
2020-10-07T14:48:06.000Z
|
2022-03-18T13:56:11.000Z
|
mjml/elements/head/mj_title.py
|
ESA-CCI-ODP/mjml-stub
|
ffd824923de85f3c02fca7f83ef6b540be048414
|
[
"MIT"
] | 8
|
2021-01-13T11:54:41.000Z
|
2022-03-10T15:50:55.000Z
|
from ._head_base import HeadComponent
__all__ = ['MjTitle']
class MjTitle(HeadComponent):
def handler(self):
add = self.context['add']
add('title', self.getContent())
| 16
| 39
| 0.651042
|
01fdb3d4c3ccc53c2cf9cb8bfd2a974596871b73
| 71,915
|
py
|
Python
|
h2o-py/h2o/grid/grid_search.py
|
jancijen/h2o-3
|
08b11fb8987656ce9b6b598b4832fd04605f5437
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/h2o/grid/grid_search.py
|
jancijen/h2o-3
|
08b11fb8987656ce9b6b598b4832fd04605f5437
|
[
"Apache-2.0"
] | 1
|
2020-05-10T15:33:07.000Z
|
2020-05-10T15:33:07.000Z
|
h2o-py/h2o/grid/grid_search.py
|
jancijen/h2o-3
|
08b11fb8987656ce9b6b598b4832fd04605f5437
|
[
"Apache-2.0"
] | 1
|
2020-04-17T13:06:26.000Z
|
2020-04-17T13:06:26.000Z
|
# -*- encoding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import itertools
import h2o
from h2o.base import Keyed
from h2o.job import H2OJob
from h2o.frame import H2OFrame
from h2o.exceptions import H2OValueError
from h2o.estimators.estimator_base import H2OEstimator
from h2o.two_dim_table import H2OTwoDimTable
from h2o.display import H2ODisplay
from h2o.grid.metrics import * # NOQA
from h2o.utils.metaclass import Alias as alias, BackwardsCompatible, Deprecated as deprecated, h2o_meta
from h2o.utils.shared_utils import quoted
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, is_type
@BackwardsCompatible(
instance_attrs=dict(
giniCoef=lambda self, *args, **kwargs: self.gini(*args, **kwargs)
)
)
class H2OGridSearch(h2o_meta(Keyed)):
"""
Grid Search of a Hyper-Parameter Space for a Model
:param model: The type of model to be explored initialized with optional parameters that will be
unchanged across explored models.
:param hyper_params: A dictionary of string parameters (keys) and a list of values to be explored by grid
search (values).
:param str grid_id: The unique id assigned to the resulting grid object. If none is given, an id will
automatically be generated.
:param search_criteria: The optional dictionary of directives which control the search of the hyperparameter space.
The dictionary can include values for: ``strategy``, ``max_models``, ``max_runtime_secs``, ``stopping_metric``,
``stopping_tolerance``, ``stopping_rounds`` and ``seed``. The default strategy, "Cartesian", covers the entire space of
hyperparameter combinations. If you want to use cartesian grid search, you can leave the search_criteria
argument unspecified. Specify the "RandomDiscrete" strategy to get random search of all the combinations of
your hyperparameters with three ways of specifying when to stop the search: max number of models, max time, and
metric-based early stopping (e.g., stop if MSE hasn’t improved by 0.0001 over the 5 best models).
Examples below::
>>> criteria = {"strategy": "RandomDiscrete", "max_runtime_secs": 600,
... "max_models": 100, "stopping_metric": "AUTO",
... "stopping_tolerance": 0.00001, "stopping_rounds": 5,
... "seed": 123456}
>>> criteria = {"strategy": "RandomDiscrete", "max_models": 42,
... "max_runtime_secs": 28800, "seed": 1234}
>>> criteria = {"strategy": "RandomDiscrete", "stopping_metric": "AUTO",
... "stopping_tolerance": 0.001, "stopping_rounds": 10}
>>> criteria = {"strategy": "RandomDiscrete", "stopping_rounds": 5,
... "stopping_metric": "misclassification",
... "stopping_tolerance": 0.00001}
:param parallelism: Level of parallelism during grid model building. 1 = sequential building (default).
Use the value of 0 for adaptive parallelism - decided by H2O. Any number > 1 sets the exact number of models
built in parallel.
:returns: a new H2OGridSearch instance
Examples
--------
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> hyper_parameters = {'alpha': [0.01,0.5], 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> training_data = h2o.import_file("smalldata/logreg/benign.csv")
>>> gs.train(x=range(3) + range(4,11),y=3, training_frame=training_data)
>>> gs.show()
"""
def __init__(self, model, hyper_params, grid_id=None, search_criteria=None, export_checkpoints_dir=None,
parallelism=1):
assert_is_type(model, None, H2OEstimator, lambda mdl: issubclass(mdl, H2OEstimator))
assert_is_type(hyper_params, dict)
assert_is_type(grid_id, None, str)
assert_is_type(search_criteria, None, dict)
if not (model is None or is_type(model, H2OEstimator)): model = model()
self._id = grid_id
self.model = model
self.hyper_params = dict(hyper_params)
self.search_criteria = None if search_criteria is None else dict(search_criteria)
self.export_checkpoints_dir = export_checkpoints_dir
self._parallelism = parallelism # Degree of parallelism during model building
self._grid_json = None
self.models = None # list of H2O Estimator instances
self._parms = {} # internal, for object recycle #
self.parms = {} # external#
self._future = False # used by __repr__/show to query job state#
self._job = None # used when _future is True#
@property
def key(self):
return self._id
@property
def grid_id(self):
"""A key that identifies this grid search object in H2O.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> training_data = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/logreg/benign.csv")
>>> hyper_parameters = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> gs.train(x=range(3)+range(4,11), y=3, training_frame=training_data)
>>> gs.grid_id
"""
return self._id
@grid_id.setter
def grid_id(self, value):
oldname = self.grid_id
self._id = value
h2o.rapids('(rename "{}" "{}")'.format(oldname, value))
@property
def model_ids(self):
"""
Returns model ids.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> training_data = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/logreg/benign.csv")
>>> hyper_parameters = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> gs.train(x=range(3)+range(4,11), y=3, training_frame=training_data)
>>> gs.model_ids
"""
return [i['name'] for i in self._grid_json["model_ids"]]
@property
def hyper_names(self):
"""
Return the hyperparameter names.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> training_data = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/logreg/benign.csv")
>>> hyper_parameters = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> gs.train(x=range(3)+range(4,11), y=3, training_frame=training_data)
>>> gs.hyper_names
"""
return self._grid_json["hyper_names"]
@property
def failed_params(self):
"""
Return a list of failed parameters.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> training_data = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/logreg/benign.csv")
>>> hyper_parameters = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6],
... 'beta_epsilon': [0.05]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> gs.train(x=range(3)+range(4,11), y=3, training_frame=training_data)
>>> gs.failed_params
"""
return self._grid_json.get("failed_params", None)
@property
def failure_details(self):
return self._grid_json.get("failure_details", None)
@property
def failure_stack_traces(self):
return self._grid_json.get("failure_stack_traces", None)
@property
def failed_raw_params(self):
return self._grid_json.get("failed_raw_params", None)
def detach(self):
self._id = None
def start(self, x, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Asynchronous model build by specifying the predictor columns, response column, and any
additional frame-specific values.
To block for results, call :meth:`join`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5), hyper_params)
>>> gs.start(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.join()
"""
self._future = True
self.train(x=x,
y=y,
training_frame=training_frame,
offset_column=offset_column,
fold_column=fold_column,
weights_column=weights_column,
validation_frame=validation_frame,
**params)
def join(self):
"""Wait until grid finishes computing.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5), hyper_params)
>>> gs.start(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.join()
"""
self._future = False
self._job.poll()
self._job = None
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Train the model synchronously (i.e. do not return until the model finishes training).
To train asynchronously call :meth:`start`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
"""
algo_params = locals()
parms = self._parms.copy()
parms.update({k: v for k, v in algo_params.items() if k not in ["self", "params", "algo_params", "parms"]})
# dictionaries have special handling in grid search, avoid the implicit conversion
parms["search_criteria"] = None if self.search_criteria is None else str(self.search_criteria)
parms["export_checkpoints_dir"] = self.export_checkpoints_dir
parms["parallelism"] = self._parallelism
parms["hyper_parameters"] = None if self.hyper_params is None else str(self.hyper_params) # unique to grid search
parms.update({k: v for k, v in list(self.model._parms.items()) if v is not None}) # unique to grid search
parms.update(params)
if '__class__' in parms: # FIXME: hackt for PY3
del parms['__class__']
y = algo_params["y"]
tframe = algo_params["training_frame"]
if tframe is None: raise ValueError("Missing training_frame")
if y is not None:
if is_type(y, list, tuple):
if len(y) == 1:
parms["y"] = y[0]
else:
raise ValueError('y must be a single column reference')
if x is None:
if(isinstance(y, int)):
xset = set(range(training_frame.ncols)) - {y}
else:
xset = set(training_frame.names) - {y}
else:
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-training_frame.ncols <= xi < training_frame.ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(training_frame.names[xi])
else:
if xi not in training_frame.names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
x = list(xset)
parms["x"] = x
self.build_model(parms)
def build_model(self, algo_params):
"""(internal)"""
if algo_params["training_frame"] is None: raise ValueError("Missing training_frame")
x = algo_params.pop("x")
y = algo_params.pop("y", None)
training_frame = algo_params.pop("training_frame")
validation_frame = algo_params.pop("validation_frame", None)
is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"])
algo = self.model._compute_algo() # unique to grid search
is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_unsupervised and y is None: raise ValueError("Missing response")
if not is_unsupervised:
y = y if y in training_frame.names else training_frame.names[y]
self.model._estimator_type = "classifier" if training_frame.types[y] == "enum" else "regressor"
self._model_build(x, y, training_frame, validation_frame, algo_params)
def _model_build(self, x, y, tframe, vframe, kwargs):
kwargs['training_frame'] = tframe
if vframe is not None: kwargs["validation_frame"] = vframe
if is_type(y, int): y = tframe.names[y]
if y is not None: kwargs['response_column'] = y
if not is_type(x, list, tuple): x = [x]
if is_type(x[0], int):
x = [tframe.names[i] for i in x]
offset = kwargs["offset_column"]
folds = kwargs["fold_column"]
weights = kwargs["weights_column"]
ignored_columns = list(set(tframe.names) - set(x + [y, offset, folds, weights]))
kwargs["ignored_columns"] = None if not ignored_columns else [quoted(col) for col in ignored_columns]
kwargs = dict([(k, kwargs[k].frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if
kwargs[k] is not None]) # gruesome one-liner
algo = self.model._compute_algo() # unique to grid search
if self.grid_id is not None: kwargs["grid_id"] = self.grid_id
rest_ver = kwargs.pop("_rest_version") if "_rest_version" in kwargs else None
grid = H2OJob(h2o.api("POST /99/Grid/%s" % algo, data=kwargs), job_type=(algo + " Grid Build"))
if self._future:
self._job = grid
return
grid.poll()
grid_json = h2o.api("GET /99/Grids/%s" % (grid.dest_key))
failure_messages_stacks = ""
error_index = 0
if len(grid_json["failure_details"]) > 0:
print("Errors/Warnings building gridsearch model\n")
# will raise error if no grid model is returned, store error messages here
for error_message in grid_json["failure_details"]:
if isinstance(grid_json["failed_params"][error_index], dict):
for h_name in grid_json['hyper_names']:
print("Hyper-parameter: {0}, {1}".format(h_name,
grid_json['failed_params'][error_index][h_name]))
if len(grid_json["failure_stack_traces"]) > error_index:
print("failure_details: {0}\nfailure_stack_traces: "
"{1}\n".format(error_message, grid_json['failure_stack_traces'][error_index]))
failure_messages_stacks += error_message+'\n'
error_index += 1
self.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']]
for model in self.models:
model._estimator_type = self.model._estimator_type
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
# sometimes no model is returned due to bad parameter values provided by the user.
if len(grid_json['model_ids']) > 0:
first_model_json = h2o.api("GET /%d/Models/%s" %
(rest_ver or 3, grid_json['model_ids'][0]['name']))['models'][0]
self._resolve_grid(grid.dest_key, grid_json, first_model_json)
else:
if len(failure_messages_stacks)>0:
raise ValueError(failure_messages_stacks)
else:
raise ValueError("Gridsearch returns no model due to bad parameter values or other reasons....")
def _resolve_grid(self, grid_id, grid_json, first_model_json):
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = grid_id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = self._parms
self.export_checkpoints_dir = m._grid_json["export_checkpoints_dir"]
H2OEstimator.mixin(self, model_class)
self.__dict__.update(m.__dict__.copy())
def __getitem__(self, item):
return self.models[item]
def __iter__(self):
nmodels = len(self.models)
return (self[i] for i in range(nmodels))
def __len__(self):
return len(self.models)
def __repr__(self):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data to be predicted on.
:returns: H2OFrame filled with predictions.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.predict(benign)
"""
return {model.model_id: model.predict(test_data) for model in self.models}
def is_cross_validated(self):
"""Return True if the model was cross-validated.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.is_cross_validated()
"""
return {model.model_id: model.is_cross_validated() for model in self.models}
def xval_keys(self):
"""Model keys for the cross-validated model.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.xval_keys()
"""
return {model.model_id: model.xval_keys() for model in self.models}
def get_xval_models(self, key=None):
"""
Return a Model object.
:param str key: If None, return all cross-validated models; otherwise return the model
specified by the key.
:returns: A model or a list of models.
:examples:
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> fr = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/prostate_train.csv")
>>> m = H2OGradientBoostingEstimator(nfolds=10,
... ntrees=10,
... keep_cross_validation_models=True)
>>> m.train(x=list(range(2,fr.ncol)), y=1, training_frame=fr)
>>> m.get_xval_models()
"""
return {model.model_id: model.get_xval_models(key) for model in self.models}
def xvals(self):
"""Return the list of cross-validated models."""
return {model.model_id: model.xvals for model in self.models}
def deepfeatures(self, test_data, layer):
"""
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
:examples:
>>> from h2o.estimators import H2OAutoEncoderEstimator
>>> resp = 784
>>> nfeatures = 20
>>> train = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/train.csv.gz")
>>> train[resp] = train[resp].asfactor()
>>> test = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/test.csv.gz")
>>> test[resp] = test[resp].asfactor()
>>> sid = train[0].runif(0)
>>> train_unsup = train[sid >= 0.5]
>>> train_unsup.pop(resp)
>>> train_sup = train[sid < 0.5]
>>> ae_model = H2OAutoEncoderEstimator(activation="Tanh",
... hidden=[nfeatures],
... model_id="ae_model",
... epochs=1,
... ignore_const_cols=False,
... reproducible=True,
... seed=1234)
>>> ae_model.train(list(range(resp)), training_frame=train_unsup)
>>> ae_model.deepfeatures(train_sup[0:resp], 0)
"""
return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models}
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> hh = H2ODeepLearningEstimator(hidden=[],
... loss="CrossEntropy",
... export_weights_and_biases=True)
>>> hh.train(x=list(range(4)), y=4, training_frame=iris)
>>> hh.weights(0)
"""
return {model.model_id: model.weights(matrix_id) for model in self.models}
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> hh = H2ODeepLearningEstimator(hidden=[],
... loss="CrossEntropy",
... export_weights_and_biases=True)
>>> hh.train(x=list(range(4)), y=4, training_frame=iris)
>>> hh.biases(0)
"""
return {model.model_id: model.biases(vector_id) for model in self.models}
def normmul(self):
"""Normalization/Standardization multipliers for numeric predictors.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.normmul()
"""
return {model.model_id: model.normmul() for model in self.models}
def normsub(self):
"""Normalization/Standardization offsets for numeric predictors.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.normsub()
"""
return {model.model_id: model.normsub() for model in self.models}
def respmul(self):
"""Normalization/Standardization multipliers for numeric response.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.respmul()
"""
return {model.model_id: model.respmul() for model in self.models}
def respsub(self):
"""Normalization/Standardization offsets for numeric response.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.respsub()
"""
return {model.model_id: model.respsub() for model in self.models}
def catoffsets(self):
"""
Categorical offsets for one-hot encoding
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> hh = H2ODeepLearningEstimator(hidden=[],
... loss="CrossEntropy",
... export_weights_and_biases=True)
>>> hh.train(x=list(range(4)), y=4, training_frame=iris)
>>> hh.catoffsets()
"""
return {model.model_id: model.catoffsets() for model in self.models}
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
:examples:
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> data = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_train_10k.csv")
>>> test = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_test_5k.csv")
>>> x = data.columns
>>> y = "response"
>>> x.remove(y)
>>> data[y] = data[y].asfactor()
>>> test[y] = test[y].asfactor()
>>> ss = data.split_frame(seed = 1)
>>> train = ss[0]
>>> valid = ss[1]
>>> gbm_params1 = {'learn_rate': [0.01, 0.1],
... 'max_depth': [3, 5, 9],
... 'sample_rate': [0.8, 1.0],
... 'col_sample_rate': [0.2, 0.5, 1.0]}
>>> gbm_grid1 = H2OGridSearch(model=H2OGradientBoostingEstimator,
... grid_id='gbm_grid1',
... hyper_params=gbm_params1)
>>> gbm_grid1.train(x=x, y=y,
... training_frame=train,
... validation_frame=valid,
... ntrees=100,
... seed=1)
>>> gbm_gridperf1 = gbm_grid1.get_grid(sort_by='auc', decreasing=True)
>>> best_gbm1 = gbm_gridperf1.models[0]
>>> best_gbm1.model_performance(test)
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models}
def scoring_history(self):
"""
Retrieve model scoring history.
:returns: Score history (H2OTwoDimTable)
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.scoring_history()
"""
return {model.model_id: model.scoring_history() for model in self.models}
def summary(self, header=True):
"""Print a detailed summary of the explored models.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.summary()
"""
table = []
for model in self.models:
model_summary = model._model_json["output"]["model_summary"]
r_values = list(model_summary.cell_values[0])
r_values[0] = model.model_id
table.append(r_values)
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(table,columns=self.col_header)
# return
print()
if header:
print('Grid Summary:')
print()
H2ODisplay(table, header=['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left")
def show(self):
"""Print models sorted by metric.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.show()
"""
hyper_combos = itertools.product(*list(self.hyper_params.values()))
if not self.models:
c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]
print(H2OTwoDimTable(
col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],
table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))
else:
print(self.sorted_metric_table())
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list/pandas DataFrame.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A dictionary of lists or Pandas DataFrame instances.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.varimp(use_pandas=True)
"""
return {model.model_id: model.varimp(use_pandas) for model in self.models}
def residual_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param bool train: Get the residual deviance for the training set. If both train and valid are False,
then train is selected by default.
:param bool valid: Get the residual deviance for the validation set. If both train and valid are True,
then train is selected by default.
:param bool xval: Get the residual deviance for the cross-validated models.
:returns: the residual deviance, or None if it is not present.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.residual_deviance()
"""
return {model.model_id: model.residual_deviance(train, valid, xval) for model in self.models}
def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the residual dof for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the residual dof for the validation set. If both train and valid are True, then
train is selected by default.
:param bool xval: Get the residual dof for the cross-validated models.
:returns: the residual degrees of freedom, or None if they are not present.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.residual_degrees_of_freedom()
"""
return {model.model_id: model.residual_degrees_of_freedom(train, valid, xval) for model in self.models}
def null_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param bool train: Get the null deviance for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the null deviance for the validation set. If both train and valid are True, then
train is selected by default.
:param bool xval: Get the null deviance for the cross-validated models.
:returns: the null deviance, or None if it is not present.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.null_deviance()
"""
return {model.model_id: model.null_deviance(train, valid, xval) for model in self.models}
def null_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the null dof for the training set. If both train and valid are False, then train is
selected by default.
:param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is
selected by default.
:param bool xval: Get the null dof for the cross-validated models.
:returns: the null dof, or None if it is not present.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.null_degrees_of_freedom()
"""
return {model.model_id: model.null_degrees_of_freedom(train, valid, xval) for model in self.models}
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients).
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.pprint_coef()
"""
for i, model in enumerate(self.models):
print('Model', i)
model.pprint_coef()
print()
def coef(self):
"""Return the coefficients that can be applied to the non-standardized data.
Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> training_data = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/logreg/benign.csv")
>>> hyper_parameters = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> gs.train(x=range(3)+range(4,11), y=3, training_frame=training_data)
>>> gs.coef()
"""
return {model.model_id: model.coef() for model in self.models}
def coef_norm(self):
"""Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> training_data = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/smalldata/logreg/benign.csv")
>>> hyper_parameters = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_parameters)
>>> gs.train(x=range(3)+range(4,11), y=3, training_frame=training_data)
>>> gs.coef_norm()
"""
return {model.model_id: model.coef_norm() for model in self.models}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be ``1 - MSE/var``, where ``var`` is computed as ``sigma^2``.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the R^2 value for the training data.
:param bool valid: If valid is True, then return the R^2 value for the validation data.
:param bool xval: If xval is True, then return the R^2 value for the cross validation data.
:returns: The R^2 for this regression model.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.r2()
"""
return {model.model_id: model.r2(train, valid, xval) for model in self.models}
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MSE value for the training data.
:param bool valid: If valid is True, then return the MSE value for the validation data.
:param bool xval: If xval is True, then return the MSE value for the cross validation data.
:returns: The MSE for this regression model.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.mse()
"""
return {model.model_id: model.mse(train, valid, xval) for model in self.models}
def rmse(self, train=False, valid=False, xval=False):
return {model.model_id: model.rmse(train, valid, xval) for model in self.models}
def mae(self, train=False, valid=False, xval=False):
return {model.model_id: model.mae(train, valid, xval) for model in self.models}
def rmsle(self, train=False, valid=False, xval=False):
return {model.model_id: model.rmsle(train, valid, xval) for model in self.models}
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Log Loss value for the training data.
:param bool valid: If valid is True, then return the Log Loss value for the validation data.
:param bool xval: If xval is True, then return the Log Loss value for the cross validation data.
:returns: The Log Loss for this binomial model.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.logloss()
"""
return {model.model_id: model.logloss(train, valid, xval) for model in self.models}
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Mean Residual Deviance value for the training data.
:param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:returns: The Mean Residual Deviance for this regression model.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.mean_residual_deviance()
"""
return {model.model_id: model.mean_residual_deviance(train, valid, xval) for model in self.models}
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AUC value for the training data.
:param bool valid: If valid is True, then return the AUC value for the validation data.
:param bool xval: If xval is True, then return the AUC value for the validation data.
:returns: The AUC.
:examples:
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> data = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_train_10k.csv")
>>> test = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_test_5k.csv")
>>> x = data.columns
>>> y = "response"
>>> x.remove(y)
>>> data[y] = data[y].asfactor()
>>> test[y] = test[y].asfactor()
>>> ss = data.split_frame(seed = 1)
>>> train = ss[0]
>>> valid = ss[1]
>>> gbm_params1 = {'learn_rate': [0.01, 0.1],
... 'max_depth': [3, 5, 9],
... 'sample_rate': [0.8, 1.0],
... 'col_sample_rate': [0.2, 0.5, 1.0]}
>>> gbm_grid1 = H2OGridSearch(model=H2OGradientBoostingEstimator,
... grid_id='gbm_grid1',
... hyper_params=gbm_params1)
>>> gbm_grid1.train(x=x, y=y,
... training_frame=train,
... validation_frame=valid,
... ntrees=100,
... seed=1)
>>> gbm_pridperf1 = gbm_grid1.get_grid(sort_by='auc', decreasing=True)
>>> best_gbm1 = gbm_gridperf1.models[0]
>>> best_gbm_perf1 = best_gbm1.model_performance(test)
>>> best_gbm_perf1.auc()
"""
return {model.model_id: model.auc(train, valid, xval) for model in self.models}
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AIC value for the training data.
:param bool valid: If valid is True, then return the AIC value for the validation data.
:param bool xval: If xval is True, then return the AIC value for the validation data.
:returns: The AIC.
:examples:
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate[2] = prostate[2].asfactor()
>>> prostate[4] = prostate[4].asfactor()
>>> prostate[5] = prostate[5].asfactor()
>>> prostate[8] = prostate[8].asfactor()
>>> predictors = ["AGE","RACE","DPROS","DCAPS","PSA","VOL","GLEASON"]
>>> response = "CAPSULE"
>>> hyper_params = {'alpha': [0.01,0.5],
... 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=predictors, y=response, training_frame=prostate)
>>> gs.aic()
"""
return {model.model_id: model.aic(train, valid, xval) for model in self.models}
def gini(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Gini Coefficient value for the training data.
:param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:returns: The Gini Coefficient for the models in this grid.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.gini()
"""
return {model.model_id: model.gini(train, valid, xval) for model in self.models}
# @alias('pr_auc')
def aucpr(self, train=False, valid=False, xval=False):
"""
Get the aucPR (Area Under PRECISION RECALL Curve).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the aucpr value for the training data.
:param bool valid: If valid is True, then return the aucpr value for the validation data.
:param bool xval: If xval is True, then return the aucpr value for the validation data.
:returns: The AUCPR for the models in this grid.
"""
return {model.model_id: model.aucpr(train, valid, xval) for model in self.models}
@deprecated(replaced_by=aucpr)
def pr_auc(self):
pass
def get_hyperparams(self, id, display=True):
"""
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> best_model_id = gs.get_grid(sort_by='F1',
... decreasing=True).model_ids[0]
>>> gs.get_hyperparams(best_model_id)
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)
else model.params[h]['actual']
for h in self.hyper_params]
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return res
def get_hyperparams_dict(self, id, display=True):
"""
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> best_model_id = gs.get_grid(sort_by='F1',
... decreasing=True).model_ids[0]
>>> gs.get_hyperparams_dict(best_model_id)
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
model_params = dict()
# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
for param_name in self.hyper_names:
model_params[param_name] = model.params[param_name]['actual'][0] if \
isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return model_params
def sorted_metric_table(self):
"""
Retrieve summary table of an H2O Grid Search.
:returns: The summary table as an H2OTwoDimTable or a Pandas DataFrame.
:examples:
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> insurance = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> insurance["offset"] = insurance["Holders"].log()
>>> insurance["Group"] = insurance["Group"].asfactor()
>>> insurance["Age"] = insurance["Age"].asfactor()
>>> insurance["District"] = insurance["District"].asfactor()
>>> hyper_params = {'huber_alpha': [0.2,0.5],
... 'quantile_alpha': [0.2,0.6]}
>>> from h2o.estimators import H2ODeepLearningEstimator
>>> gs = H2OGridSearch(H2ODeepLearningEstimator(epochs=5),
... hyper_params)
>>> gs.train(x=list(range(3)),y="Claims", training_frame=insurance)
>>> gs.sorted_metric_table()
"""
summary = self._grid_json["summary_table"]
if summary is not None: return summary.as_data_frame()
print("No sorted metric table for this grid search")
@staticmethod
def _metrics_class(model_json):
model_type = model_json["output"]["model_category"]
if model_type == "Binomial":
model_class = H2OBinomialGridSearch
elif model_type == "Clustering":
model_class = H2OClusteringGridSearch
elif model_type == "Regression":
model_class = H2ORegressionGridSearch
elif model_type == "Multinomial":
model_class = H2OMultinomialGridSearch
elif model_type == "Ordinal":
model_class = H2OOrdinalGridSearch
elif model_type == "AutoEncoder":
model_class = H2OAutoEncoderGridSearch
elif model_type == "DimReduction":
model_class = H2ODimReductionGridSearch
else:
raise NotImplementedError(model_type)
return model_class
def get_grid(self, sort_by=None, decreasing=None):
"""
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
:examples:
>>> from h2o.estimators import H2OGeneralizedLinearEstimator
>>> from h2o.grid.grid_search import H2OGridSearch
>>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv")
>>> y = 3
>>> x = [4,5,6,7,8,9,10,11]
>>> hyper_params = {'alpha': [0.01,0.3,0.5],
... 'lambda': [1e-5, 1e-6, 1e-7]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'),
... hyper_params)
>>> gs.train(x=x,y=y, training_frame=benign)
>>> gs.get_grid(sort_by='F1', decreasing=True)
"""
if sort_by is None and decreasing is None: return self
grid_json = h2o.api("GET /99/Grids/%s" % self._id, data={"sort_by": sort_by, "decreasing": decreasing})
grid = H2OGridSearch(self.model, self.hyper_params, self._id)
grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered
first_model_json = h2o.api("GET /99/Models/%s" % grid_json['model_ids'][0]['name'])['models'][0]
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = self._id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = grid._parms
H2OEstimator.mixin(grid, model_class)
grid.__dict__.update(m.__dict__.copy())
return grid
@deprecated("grid.sort_by() is deprecated; use grid.get_grid() instead")
def sort_by(self, metric, increasing=True):
"""Deprecated since 2016-12-12, use grid.get_grid() instead."""
if metric[-1] != ')': metric += '()'
c_values = [list(x) for x in zip(*sorted(eval('self.' + metric + '.items()'), key=lambda k_v: k_v[1]))]
c_values.insert(1, [self.get_hyperparams(model_id, display=False) for model_id in c_values[0]])
if not increasing:
for col in c_values: col.reverse()
if metric[-2] == '(': metric = metric[:-2]
return H2OTwoDimTable(
col_header=['Model Id', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']', metric],
table_header='Grid Search Results for ' + self.model.__class__.__name__,
cell_values=[list(x) for x in zip(*c_values)])
| 46.456718
| 181
| 0.606077
|
0a6b2063cb270e279fa41cec6709ca1229abaf98
| 1,260
|
py
|
Python
|
PyFile/__init__.py
|
malcolmraine/PyFile
|
933454f22362392adac049d099dad03b5384f9e8
|
[
"MIT"
] | null | null | null |
PyFile/__init__.py
|
malcolmraine/PyFile
|
933454f22362392adac049d099dad03b5384f9e8
|
[
"MIT"
] | 1
|
2020-05-11T02:13:17.000Z
|
2020-05-11T02:13:17.000Z
|
PyFile/__init__.py
|
malcolmraine/PyFile
|
933454f22362392adac049d099dad03b5384f9e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
File: __init__.py
Description:
Author: Malcolm Hall
Version: 1
MIT License
Copyright (c) 2020 Malcolm Hall
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .config import *
from .file import File
__version_info__ = (0, 0, 1)
__version__ = "0.0.1"
| 36
| 78
| 0.788095
|
cba4d67a0ff946d74291404fdee59515897d4875
| 4,223
|
py
|
Python
|
contest/serializers.py
|
custoj/CustOJ-Backend
|
5451ebb3541a0376f50235896ac4e28124cc41d7
|
[
"MIT"
] | 1
|
2019-10-16T11:25:41.000Z
|
2019-10-16T11:25:41.000Z
|
contest/serializers.py
|
custoj/CustOJ-Backend
|
5451ebb3541a0376f50235896ac4e28124cc41d7
|
[
"MIT"
] | 4
|
2021-03-19T03:28:48.000Z
|
2022-03-11T23:58:02.000Z
|
contest/serializers.py
|
custoj/CustOJ-Backend
|
5451ebb3541a0376f50235896ac4e28124cc41d7
|
[
"MIT"
] | null | null | null |
from utils.api import UsernameSerializer, serializers
from .models import Contest, ContestAnnouncement, ContestRuleType
from .models import ACMContestRank, OIContestRank
class CreateConetestSeriaizer(serializers.Serializer):
title = serializers.CharField(max_length=128)
description = serializers.CharField()
start_time = serializers.DateTimeField()
end_time = serializers.DateTimeField()
rule_type = serializers.ChoiceField(choices=[ContestRuleType.ACM, ContestRuleType.OI])
password = serializers.CharField(allow_blank=True, max_length=32)
visible = serializers.BooleanField()
real_time_rank = serializers.BooleanField()
allowed_ip_ranges = serializers.ListField(child=serializers.CharField(max_length=32), allow_empty=True)
class EditConetestSeriaizer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField(max_length=128)
description = serializers.CharField()
start_time = serializers.DateTimeField()
end_time = serializers.DateTimeField()
password = serializers.CharField(allow_blank=True, allow_null=True, max_length=32)
visible = serializers.BooleanField()
real_time_rank = serializers.BooleanField()
allowed_ip_ranges = serializers.ListField(child=serializers.CharField(max_length=32))
class ContestAdminSerializer(serializers.ModelSerializer):
created_by = UsernameSerializer()
status = serializers.CharField()
contest_type = serializers.CharField()
class Meta:
model = Contest
fields = "__all__"
class ContestSerializer(ContestAdminSerializer):
class Meta:
model = Contest
exclude = ("password", "visible", "allowed_ip_ranges", "similarity_check_result")
class ContestAnnouncementSerializer(serializers.ModelSerializer):
created_by = UsernameSerializer()
class Meta:
model = ContestAnnouncement
fields = "__all__"
class CreateContestAnnouncementSerializer(serializers.Serializer):
contest_id = serializers.IntegerField()
title = serializers.CharField(max_length=128)
content = serializers.CharField()
visible = serializers.BooleanField()
class EditContestAnnouncementSerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField(max_length=128, required=False)
content = serializers.CharField(required=False, allow_blank=True)
visible = serializers.BooleanField(required=False)
class ContestPasswordVerifySerializer(serializers.Serializer):
contest_id = serializers.IntegerField()
password = serializers.CharField(max_length=30, required=True)
class ACMContestRankSerializer(serializers.ModelSerializer):
user = serializers.SerializerMethodField()
rank = serializers.SerializerMethodField()
class Meta:
model = ACMContestRank
fields = "__all__"
def __init__(self, *args, **kwargs):
self.is_contest_admin = kwargs.pop("is_contest_admin", False)
super().__init__(*args, **kwargs)
def get_user(self, obj):
return UsernameSerializer(obj.user, need_real_name=True or self.is_contest_admin, need_school=True or self.is_contest_admin).data
def get_rank(self, obj):
return obj.rank
class OIContestRankSerializer(serializers.ModelSerializer):
user = serializers.SerializerMethodField()
rank = serializers.SerializerMethodField()
class Meta:
model = OIContestRank
fields = "__all__"
def __init__(self, *args, **kwargs):
self.is_contest_admin = kwargs.pop("is_contest_admin", False)
super().__init__(*args, **kwargs)
def get_user(self, obj):
return UsernameSerializer(obj.user, need_real_name=True or self.is_contest_admin, need_school=True or self.is_contest_admin).data
def get_rank(self, obj):
return obj.rank
class ACMContesHelperSerializer(serializers.Serializer):
contest_id = serializers.IntegerField()
problem_id = serializers.CharField()
rank_id = serializers.IntegerField()
checked = serializers.BooleanField()
class ContestSimilarResultSerializer(serializers.ModelSerializer):
class Meta:
model = Contest
fields = ["similarity_check_result"]
| 34.333333
| 137
| 0.749941
|
120e87f710a8257d84abc06f5ce93e117dc60166
| 155
|
py
|
Python
|
189. Rotate Array.py
|
Into-Y0u/Github-Baby
|
5e4e6b02f49c2c99533289be9d49911006cad919
|
[
"MIT"
] | null | null | null |
189. Rotate Array.py
|
Into-Y0u/Github-Baby
|
5e4e6b02f49c2c99533289be9d49911006cad919
|
[
"MIT"
] | null | null | null |
189. Rotate Array.py
|
Into-Y0u/Github-Baby
|
5e4e6b02f49c2c99533289be9d49911006cad919
|
[
"MIT"
] | null | null | null |
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
k = k%(len(nums))
nums[:] = nums[len(nums)-k:] + nums[:len(nums)-k]
| 31
| 57
| 0.529032
|
83661a1d0b87ca7439fec051ed317260616b935c
| 8,121
|
py
|
Python
|
lithops/storage/backends/infinispan/infinispan.py
|
aitorarjona/lithops
|
d872177b0dbb411456af47db7e383d71c28fb257
|
[
"Apache-2.0"
] | 1
|
2021-05-21T13:27:08.000Z
|
2021-05-21T13:27:08.000Z
|
lithops/storage/backends/infinispan/infinispan.py
|
aitorarjona/lithops
|
d872177b0dbb411456af47db7e383d71c28fb257
|
[
"Apache-2.0"
] | null | null | null |
lithops/storage/backends/infinispan/infinispan.py
|
aitorarjona/lithops
|
d872177b0dbb411456af47db7e383d71c28fb257
|
[
"Apache-2.0"
] | null | null | null |
#
# (C) Copyright IBM Corp. 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import requests
import json
import base64
import io
from requests.auth import HTTPBasicAuth
from lithops.constants import STORAGE_CLI_MSG
from lithops.storage.utils import StorageNoSuchKeyError
logger = logging.getLogger(__name__)
class InfinispanBackend:
"""
Infinispan backend
"""
def __init__(self, infinispan_config):
logger.debug("Creating Infinispan storage client")
self.infinispan_config = infinispan_config
self.basicAuth = HTTPBasicAuth(infinispan_config.get('username'),
infinispan_config.get('password'))
self.endpoint = infinispan_config.get('endpoint')
self.cache_names = infinispan_config.get('cache_names', ['storage'])
self.cache_type = infinispan_config.get('cache_type', 'org.infinispan.DIST_SYNC')
self.infinispan_client = requests.session()
self.__is_server_version_supported()
self.caches={}
for cache_name in self.cache_names:
self.__create_cache(cache_name, self.cache_type)
self.headers = {"Content-Type": "application/octet-stream",
"Key-Content-Type": "application/octet-stream;encoding=base64"}
msg = STORAGE_CLI_MSG.format('Infinispan')
logger.info("{} - Endpoint: {}".format(msg, self.endpoint))
def __create_cache(self, cache_name, cache_type):
url = self.endpoint + '/rest/v2/caches/' + cache_name
res = self.infinispan_client.head(url, auth=self.basicAuth)
if res.status_code == 404:
logger.debug('going to create new Infinispan cache {}'.format(cache_name))
url = self.endpoint+'/rest/v2/caches/'+cache_name+'?template='+cache_type
res = self.infinispan_client.post(url,auth=self.basicAuth)
logger.debug('New Infinispan cache {} created with '
'status {}'.format(cache_name, res.status_code))
def __key_url(self, bucket_name, key):
keySafeEncodedBytes = base64.urlsafe_b64encode(key.encode("utf-8"))
keySafeEncodedStr = str(keySafeEncodedBytes, "utf-8")
url = self.endpoint + '/rest/v2/caches/' + bucket_name + '/' + keySafeEncodedStr
return url
def __is_server_version_supported(self):
url = self.endpoint + '/rest/v2/cache-managers/default'
res = self.infinispan_client.get(url, auth=self.basicAuth)
json_resp = json.loads(res.content.decode('utf-8'))
server_version = json_resp['version'].split('.')
if (int(server_version[0]) < 10 or (int(server_version[0]) == 10 and int(server_version[1]) < 1)):
raise Exception('Infinispan versions 10.1 and up supported')
def get_client(self):
"""
Get infinispan client.
:return: infinispan_client
"""
return self.infinispan_client
def put_object(self, bucket_name, key, data):
"""
Put an object in Infinispan. Override the object if the key already exists.
:param key: key of the object.
:param data: data of the object
:type data: str/bytes
:return: None
"""
url = self.__key_url(bucket_name, key)
resp = self.infinispan_client.put(url, data=data,
auth=self.basicAuth,
headers=self.headers)
logger.debug(resp)
def get_object(self, bucket_name, key, stream=False, extra_get_args={}):
"""
Get object from COS with a key. Throws StorageNoSuchKeyError if the given key does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
url = self.__key_url(bucket_name, key)
res = self.infinispan_client.get(url, headers=self.headers, auth=self.basicAuth)
data = res.content
if data is None or len(data)==0:
raise StorageNoSuchKeyError(bucket_name, key)
if 'Range' in extra_get_args:
byte_range = extra_get_args['Range'].replace('bytes=', '')
first_byte, last_byte = map(int, byte_range.split('-'))
data=data[first_byte:last_byte+1]
if stream:
return io.BytesIO(data)
return data
def head_object(self, bucket_name, key):
"""
Head object from COS with a key. Throws StorageNoSuchKeyError if the given key does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
obj = self.get_object(bucket_name, key)
if obj is None:
raise StorageNoSuchKeyError(bucket=bucket_name, key=key)
return {'content-length': str(len(obj))}
def delete_object(self, bucket_name, key):
"""
Delete an object from storage.
:param bucket: bucket name
:param key: data key
"""
url = self.__key_url(bucket_name, key)
return self.infinispan_client.delete(url, headers=self.headers, auth=self.basicAuth)
def delete_objects(self, bucket_name, key_list):
"""
Delete a list of objects from storage.
:param bucket: bucket name
:param key_list: list of keys
"""
result = []
for key in key_list:
self.delete_object(bucket_name, key)
return result
def head_bucket(self, bucket_name):
"""
Head bucket from COS with a name. Throws StorageNoSuchKeyError if the given bucket does not exist.
:param bucket_name: name of the bucket
:return: Metadata of the bucket
:rtype: str/bytes
"""
raise NotImplementedError
def list_objects(self, bucket_name, prefix=None):
"""
Return a list of objects for the given bucket and prefix.
:param bucket_name: Name of the bucket.
:param prefix: Prefix to filter object names.
:return: List of objects in bucket that match the given prefix.
:rtype: list of str
"""
url = self.endpoint + '/rest/v2/caches/' + bucket_name + '?action=keys'
res = self.infinispan_client.get(url, auth=self.basicAuth)
data = res.content
if data is None:
return None
j = json.loads(data)
result = []
if prefix is None:
pref=""
else:
pref = prefix
for k in j:
if len(k) > 0:
key = k
if key.startswith(pref):
h = self.get_object(bucket_name, key)
d = {'Key': key, 'Size': len(h)}
result.append(d)
return result
def list_keys(self, bucket_name, prefix=None):
"""
Return a list of keys for the given prefix.
:param bucket_name: Name of the bucket.
:param prefix: Prefix to filter object names.
:return: List of keys in bucket that match the given prefix.
:rtype: list of str
"""
url = self.endpoint + '/rest/v2/caches/' + bucket_name + '?action=keys'
res = self.infinispan_client.get(url, auth=self.basicAuth)
data = res.content
if data is None:
return None
j = json.loads(data)
result = []
if prefix is None:
pref=""
else:
pref = prefix
for k in j:
if len(k) > 0:
key = k
if key.startswith(pref):
result.append(k)
return result
| 37.772093
| 106
| 0.610762
|
37fbf1bf9d3872ff1e30f9f99c9f3b961da8d4f4
| 1,377
|
py
|
Python
|
towhee/dag/variable_repr.py
|
krishnakatyal/towhee
|
c5e043aa1509cf46644ca6b53f691d6ed2647212
|
[
"Apache-2.0"
] | 365
|
2021-07-13T09:21:46.000Z
|
2022-03-31T19:54:30.000Z
|
towhee/dag/variable_repr.py
|
Nydclky/towhee
|
7cad1b64a44ccfedb18f3064dd33e44eac88ac65
|
[
"Apache-2.0"
] | 911
|
2021-07-14T05:05:31.000Z
|
2022-03-31T14:11:59.000Z
|
towhee/dag/variable_repr.py
|
jennyli-z/towhee
|
55c55fd961229575b75eae269b55090c839f8dcd
|
[
"Apache-2.0"
] | 199
|
2021-07-13T08:40:43.000Z
|
2022-03-31T19:10:23.000Z
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.dag.base_repr import BaseRepr
class VariableRepr(BaseRepr):
"""
The representation of a variable at compile-phase.
Args:
name (`str`):
The representation name.
vtype (`str`):
This can be one of many possible variable types, such as a numpy array or
PyTorch tensor.
dtype (`str`):
A string or instance of `numpy.dtype` indicating the internal data type for
this variable.
"""
def __init__(self, name: str, vtype: str, identifier: str = None):
super().__init__(name)
self._vtype = vtype
self._id = identifier
@property
def vtype(self):
return self._vtype
# @property
# def identifier(self):
# return self._id
| 31.295455
| 87
| 0.66594
|
e0be36a805e028888bb1eee16a4b07530d355c71
| 11,456
|
py
|
Python
|
src/cops_and_robots/map_tools/map_elements.py
|
COHRINT/cops_and_robots
|
1df99caa1e38bde1b5ce2d04389bc232a68938d6
|
[
"Apache-2.0"
] | 3
|
2016-01-19T17:54:51.000Z
|
2019-10-21T12:09:03.000Z
|
src/cops_and_robots/map_tools/map_elements.py
|
COHRINT/cops_and_robots
|
1df99caa1e38bde1b5ce2d04389bc232a68938d6
|
[
"Apache-2.0"
] | null | null | null |
src/cops_and_robots/map_tools/map_elements.py
|
COHRINT/cops_and_robots
|
1df99caa1e38bde1b5ce2d04389bc232a68938d6
|
[
"Apache-2.0"
] | 5
|
2015-02-19T02:53:24.000Z
|
2019-03-05T20:29:12.000Z
|
#!/usr/bin/env python
"""Defines physical and non-physical objects used in the map environment.
``map_obj`` extends Shapely's geometry objects (generally polygons) to
be used in a robotics environmnt. Map objects can be physical,
representing walls, or non-physical, representing camera viewcones.
The visibility of an object can be toggled, and each object can have
*spaces* which define areas around the object. For example, a
rectangular wall has four intrinsic exterior spaces: front, back, left and
right. These are named spaces, but arbitrary shapes can have arbitrary numbered
spaces (such as a triangle with three numbered spaces).
"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "nick.sweet@colorado.edu"
__status__ = "Development"
import logging
import matplotlib.pyplot as plt
from matplotlib.colors import cnames
from shapely.geometry import box, Polygon, LineString
from shapely.affinity import rotate
from descartes.patch import PolygonPatch
from cops_and_robots.fusion.softmax import (binary_range_model,
binary_intrinsic_space_model,
range_model,
intrinsic_space_model)
# <>TODO: Add custom softmax shapes different from map shape
class MapElement(object):
"""Generate an element based on a geometric shape, plus spatial relations.
Spaces demarcate spatial relationships around elements.
.. image:: img/classes_Map_Element.png
Note
----
If only one xy pair is given as shape_pts, MapElement will assume
the user wants to create a box with those two values as length
and width, respectively.
Shapes are created such that the centroid angle (the direction
the element is facing) is 0. To change this, use ``move_shape``.
Parameters
----------
name : str
The map element's name.
shape_pts : array_like
A list of xy pairs as [(x_i,y_i)] in [m,m] in the global (map)
coordinate frame.
pose : array_like, optional
The map element's initial [x, y, theta] in [m,m,degrees] (defaults to
[0, 0, 0]).
visible : bool, optional
Whether the element will be visibile to the human.
Default True
blocks_camera : bool, optional
Whether the element will interfere with the camera.
Default True
centroid_at_origin : bool, optional
Whether the element's centroid is placed at the map origin (as opposed
to placing the element's lower-left corner at the map origin). Default
is `True`.
color_str : str, optional
The color string for the element. Default is `'darkblue'`.
"""
def __init__(self, name, shape_pts, pose=[0, 0, 0], visible=True,
has_relations=False, plot_relations=False,
blocks_camera=True, centroid_at_origin=True, map_bounds=None,
space_resolution=0.1, color_str='darkblue', alpha=0.5):
# Define basic MapElement properties
self.has_relations = has_relations
self.plot_relations = plot_relations
self.name = name
self.visible = visible
self.blocks_camera = blocks_camera
self.space_resolution = space_resolution
if color_str == 'none':
self.color = color_str
else:
self.color = cnames[color_str]
self.pose = pose
self.alpha = alpha
self.container_area = None
# If shape has only length and width, convert to point-based poly
if len(shape_pts) == 2:
shape_pts = [list(b) for b in
box(0, 0, shape_pts[0], shape_pts[1]).exterior.coords]
# Build the map element's polygon (shape)
if centroid_at_origin:
shape = Polygon(shape_pts)
x, y = shape.centroid.x, shape.centroid.y
shape_pts = [(p[0] - x, p[1] - y) for p in shape_pts]
self.shape = Polygon(shape_pts)
# Store polygon shape
self.base_shape = self.shape
# Place the shape at the correct pose
self.move_relative(pose)
def move_absolute(self, pose):
"""Moves shape to new pose"""
# Rotate about center
pts = self.base_shape.exterior.coords
center = self.base_shape.centroid
lines = []
for pt in pts:
line = LineString([center, pt])
lines.append(rotate(line, pose[2], origin=center))
pts = []
for line in lines:
pts.append(line.coords[1])
rotated_shape = Polygon(pts)
# Move shape to new pose
self.pose = pose
shape_pts = [(p[0] + pose[0], p[1] + pose[1])
for p in rotated_shape.exterior.coords]
self.shape = Polygon(shape_pts)
def move_relative(self, pose, rotation_pt=None):
"""Translate and rotate the shape.
The rotation is assumed to be about the element's centroid
unless a rotation point is specified.
Parameters
----------
pose : array_like, optional
The map element's initial [x, y, theta] in [m,m,degrees].
rotation_pt : array_like
The rotation point as [x,y] in [m,m]. Defaults to the centroid.
"""
if rotation_pt:
rotation_point = rotation_pt
else:
rotation_point = self.shape.centroid
# Rotate the polygon
self.rotate_poly(pose[2], rotation_point)
# Translate the polygon
self.pose = pose
shape_pts = [(p[0] + pose[0], p[1] + pose[1])
for p in self.shape.exterior.coords]
self.shape = Polygon(shape_pts)
def rotate_poly(self, angle, rotation_point):
"""Rotate the shape about a rotation point.
Parameters
----------
angle : float
The angle to be rotated in degrees.
rotation_pt : array_like
The rotation point as [x,y] in [m,m].
"""
pts = self.shape.exterior.coords
lines = []
for pt in pts:
line = LineString([rotation_point, pt])
lines.append(rotate(line, angle, origin=rotation_point))
pts = []
for line in lines:
pts.append(line.coords[1])
self.shape = Polygon(pts)
def get_patch(self, **kwargs):
"""Returns a polygon patch of the object for plotting purposes"""
patch = PolygonPatch(self.shape, facecolor=self.color,
alpha=self.alpha, zorder=2, **kwargs)
return patch
def plot(self, ax=None, alpha=0.5, plot_relations=False, **kwargs):
"""DO NOT USE
Plot the map_element as a polygon patch.
plot_relations : bool, optional
Plot the map element's spaces if true. Defaults to `False`.
ax : axes handle, optional
The axes to be used for plotting. Defaults to current axes.
alpha: float, optional
Transparency of all elements of the shape. Default is 0.5.
**kwargs
Arguments passed to ``PolygonPatch``.
Note
----
The spaces can be plotted without the shape if the shape's
``visible`` attribute is False, but ``plot_relations`` is True.
DO NOT USE, use get_patch and plot using the shapelayer
"""
if not ax:
ax = plt.gca()
patch = PolygonPatch(self.shape, facecolor=self.color,
alpha=alpha, zorder=2, **kwargs)
ax.add_patch(patch)
logging.warn('You should use get_patch instead of plot')
return patch
def __str___(self):
str_ = "{} is located at ({},{}), pointing at {}}"
return str_.format(self.name,
self.centroid['x'],
self.centroid['y'],
self.centroid['theta'],
)
class MapObject(MapElement):
"""Physical object in the map.
long description of MapObject
"""
def __init__(self, name, shape_pts, color_str='darkseagreen', alpha=0.9,
visible=True, blocks_camera=True, has_relations=True,
allowed_relations=None, plot_relations=False, map_bounds=None,
ignoring_containers=True, **kwargs):
super(MapObject, self).__init__(name, shape_pts,
color_str=color_str,
visible=visible,
blocks_camera=blocks_camera,
has_relations=has_relations,
plot_relations=plot_relations,
alpha=alpha,
**kwargs
)
if self.has_relations:
self.plot_relations = plot_relations
else:
plot_relations = False
self.container_area = None
self.ignoring_containers = ignoring_containers
def define_relations(self, map_bounds=None, pose=None):
"""Create a multimodal softmax model of spatial relationships.
Defaults to: 'Front', 'Back', 'Left', and 'Right'.
"""
if self.container_area is None or self.ignoring_containers:
container_poly = None
else:
container_poly = Polygon(self.container_area.shape)
# If not rectangular, approx. with rectangular
shape = self.shape
self.relations = binary_intrinsic_space_model(shape,
container_poly=container_poly,
bounds=map_bounds)
brm = binary_range_model(shape, bounds=map_bounds)
self.relations.binary_models['Near'] = brm.binary_models['Near']
class MapArea(MapElement):
"""short description of MapArea
long description of MapArea
"""
def __init__(self, name, shape_pts, color_str='blanchedalmond', alpha=0.2,
visible=False, blocks_camera=False, has_relations=True,
allowed_relations=None, map_bounds=None,
plot_relations=False, **kwargs):
super(MapArea, self).__init__(name, shape_pts,
color_str=color_str,
visible=visible,
blocks_camera=blocks_camera,
has_relations=has_relations,
plot_relations=plot_relations,
alpha=alpha,
**kwargs
)
if self.has_relations:
self.define_relations(map_bounds)
self.plot_relations = plot_relations
else:
self.plot_relations = False
self.contained_objects = {}
def define_relations(self, map_bounds=None):
"""Create a multimodal softmax model of spatial relationships.
Defaults to: 'Inside', 'Near', and 'Outside'.
"""
self.relations = binary_range_model(self.shape, bounds=map_bounds)
| 36.368254
| 84
| 0.58118
|
d4d35611df9ade3fa6bbd60c04320bec05194d1d
| 2,007
|
py
|
Python
|
modules/image/object_detection/yolov3_resnet50_vd_coco2017/data_feed.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 8,360
|
2019-01-18T10:46:45.000Z
|
2022-03-31T14:50:02.000Z
|
modules/image/object_detection/yolov3_resnet50_vd_coco2017/data_feed.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 1,158
|
2019-04-11T09:22:43.000Z
|
2022-03-31T12:12:09.000Z
|
modules/image/object_detection/yolov3_resnet50_vd_coco2017/data_feed.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 1,677
|
2019-04-09T15:07:40.000Z
|
2022-03-31T06:41:10.000Z
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import cv2
import numpy as np
__all__ = ['reader']
def reader(paths=[], images=None):
"""
data generator
Args:
paths (list[str]): paths to images.
images (list(numpy.ndarray)): data of images, shape of each is [H, W, C]
Yield:
res (list): preprocessed image and the size of original image.
"""
img_list = []
if paths:
assert type(paths) is list, "type(paths) is not list."
for img_path in paths:
assert os.path.isfile(
img_path), "The {} isn't a valid file path.".format(img_path)
img = cv2.imread(img_path).astype('float32')
img_list.append(img)
if images is not None:
for img in images:
img_list.append(img)
for im in img_list:
# im_size
im_shape = im.shape
im_size = np.array([im_shape[0], im_shape[1]], dtype=np.int32)
# decode image
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# resize image
target_size = 608
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
if float(im_size_min) == 0:
raise ZeroDivisionError('min size of image is 0')
im_scale_x = float(target_size) / float(im_shape[1])
im_scale_y = float(target_size) / float(im_shape[0])
im = cv2.resize(
im, None, None, fx=im_scale_x, fy=im_scale_y, interpolation=2)
# normalize image
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
im = im.astype(np.float32, copy=False)
mean = np.array(mean)[np.newaxis, np.newaxis, :]
std = np.array(std)[np.newaxis, np.newaxis, :]
im = im / 255.0
im -= mean
im /= std
# permute
im = np.swapaxes(im, 1, 2)
im = np.swapaxes(im, 1, 0)
yield [im, im_size]
| 27.875
| 80
| 0.577479
|
afd5abe6d87baba519ddab795a5f3bb79ec5d3ac
| 12,433
|
py
|
Python
|
tests/kubernetes/test_pod_generator.py
|
InigoSJ/airflow
|
8b97a387dc30d8c88390d500ec99333798c20f1c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-09-06T09:55:18.000Z
|
2019-09-06T09:55:18.000Z
|
tests/kubernetes/test_pod_generator.py
|
InigoSJ/airflow
|
8b97a387dc30d8c88390d500ec99333798c20f1c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/kubernetes/test_pod_generator.py
|
InigoSJ/airflow
|
8b97a387dc30d8c88390d500ec99333798c20f1c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import unittest.mock as mock
import kubernetes.client.models as k8s
from kubernetes.client import ApiClient
from airflow.kubernetes.secret import Secret
from airflow.kubernetes.pod_generator import PodGenerator, PodDefaults
from airflow.kubernetes.pod import Resources
from airflow.kubernetes.k8s_model import append_to_pod
class TestPodGenerator(unittest.TestCase):
def setUp(self):
self.envs = {
'ENVIRONMENT': 'prod',
'LOG_LEVEL': 'warning'
}
self.secrets = [
# This should be a secretRef
Secret('env', None, 'secret_a'),
# This should be a single secret mounted in volumeMounts
Secret('volume', '/etc/foo', 'secret_b'),
# This should produce a single secret mounted in env
Secret('env', 'TARGET', 'secret_b', 'source_b'),
]
self.resources = Resources('1Gi', 1, '2Gi', 2, 1)
self.k8s_client = ApiClient()
self.expected = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': 'myapp-pod-0',
'labels': {'app': 'myapp'},
'namespace': 'default'
},
'spec': {
'containers': [{
'name': 'base',
'image': 'busybox',
'args': [],
'command': [
'sh', '-c', 'echo Hello Kubernetes!'
],
'imagePullPolicy': 'IfNotPresent',
'env': [{
'name': 'ENVIRONMENT',
'value': 'prod'
}, {
'name': 'LOG_LEVEL',
'value': 'warning'
}, {
'name': 'TARGET',
'valueFrom': {
'secretKeyRef': {
'name': 'secret_b',
'key': 'source_b'
}
}
}],
'envFrom': [{
'configMapRef': {
'name': 'configmap_a'
}
}, {
'configMapRef': {
'name': 'configmap_b'
}
}, {
'secretRef': {
'name': 'secret_a'
}
}],
'resources': {
'requests': {
'memory': '1Gi',
'cpu': 1
},
'limits': {
'memory': '2Gi',
'cpu': 2,
'nvidia.com/gpu': 1
},
},
'ports': [{'name': 'foo', 'containerPort': 1234}],
'volumeMounts': [{
'mountPath': '/etc/foo',
'name': 'secretvol0',
'readOnly': True
}]
}],
'restartPolicy': 'Never',
'volumes': [{
'name': 'secretvol0',
'secret': {
'secretName': 'secret_b'
}
}],
'hostNetwork': False,
'imagePullSecrets': [
{'name': 'pull_secret_a'},
{'name': 'pull_secret_b'}
],
'securityContext': {
'runAsUser': 1000,
'fsGroup': 2000,
},
}
}
@mock.patch('uuid.uuid4')
def test_gen_pod(self, mock_uuid):
mock_uuid.return_value = '0'
pod_generator = PodGenerator(
labels={'app': 'myapp'},
name='myapp-pod',
image_pull_secrets='pull_secret_a,pull_secret_b',
image='busybox',
envs=self.envs,
cmds=['sh', '-c', 'echo Hello Kubernetes!'],
security_context=k8s.V1PodSecurityContext(
run_as_user=1000,
fs_group=2000,
),
namespace='default',
ports=[k8s.V1ContainerPort(name='foo', container_port=1234)],
configmaps=['configmap_a', 'configmap_b']
)
result = pod_generator.gen_pod()
result = append_to_pod(result, self.secrets)
result = self.resources.attach_to_pod(result)
result_dict = self.k8s_client.sanitize_for_serialization(result)
# sort
result_dict['spec']['containers'][0]['env'].sort(key=lambda x: x['name'])
result_dict['spec']['containers'][0]['envFrom'].sort(
key=lambda x: list(x.values())[0]['name']
)
self.assertDictEqual(result_dict, self.expected)
@mock.patch('uuid.uuid4')
def test_gen_pod_extract_xcom(self, mock_uuid):
mock_uuid.return_value = '0'
pod_generator = PodGenerator(
labels={'app': 'myapp'},
name='myapp-pod',
image_pull_secrets='pull_secret_a,pull_secret_b',
image='busybox',
envs=self.envs,
cmds=['sh', '-c', 'echo Hello Kubernetes!'],
namespace='default',
security_context=k8s.V1PodSecurityContext(
run_as_user=1000,
fs_group=2000,
),
ports=[k8s.V1ContainerPort(name='foo', container_port=1234)],
configmaps=['configmap_a', 'configmap_b']
)
pod_generator.extract_xcom = True
result = pod_generator.gen_pod()
result = append_to_pod(result, self.secrets)
result = self.resources.attach_to_pod(result)
result_dict = self.k8s_client.sanitize_for_serialization(result)
container_two = {
'name': 'airflow-xcom-sidecar',
'image': 'python:3.5-alpine',
'command': ['python', '-c', PodDefaults.XCOM_CMD],
'volumeMounts': [
{
'name': 'xcom',
'mountPath': '/airflow/xcom'
}
]
}
self.expected['spec']['containers'].append(container_two)
self.expected['spec']['containers'][0]['volumeMounts'].insert(0, {
'name': 'xcom',
'mountPath': '/airflow/xcom'
})
self.expected['spec']['volumes'].insert(0, {
'name': 'xcom', 'emptyDir': {}
})
result_dict['spec']['containers'][0]['env'].sort(key=lambda x: x['name'])
self.assertEqual(result_dict, self.expected)
@mock.patch('uuid.uuid4')
def test_from_obj(self, mock_uuid):
mock_uuid.return_value = '0'
result = PodGenerator.from_obj({
"KubernetesExecutor": {
"annotations": {"test": "annotation"},
"volumes": [
{
"name": "example-kubernetes-test-volume",
"hostPath": {"path": "/tmp/"},
},
],
"volume_mounts": [
{
"mountPath": "/foo/",
"name": "example-kubernetes-test-volume",
},
],
"securityContext": {
"runAsUser": 1000
}
}
})
result = self.k8s_client.sanitize_for_serialization(result)
self.assertEqual({
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'annotations': {'test': 'annotation'},
},
'spec': {
'containers': [{
'args': [],
'command': [],
'env': [],
'envFrom': [],
'name': 'base',
'ports': [],
'volumeMounts': [{
'mountPath': '/foo/',
'name': 'example-kubernetes-test-volume'
}],
}],
'imagePullSecrets': [],
'volumes': [{
'hostPath': {'path': '/tmp/'},
'name': 'example-kubernetes-test-volume'
}],
}
}, result)
def test_reconcile_pods(self):
with mock.patch('uuid.uuid4') as mock_uuid:
mock_uuid.return_value = '0'
base_pod = PodGenerator(
image='image1',
name='name1',
envs={'key1': 'val1'},
cmds=['/bin/command1.sh', 'arg1'],
ports=k8s.V1ContainerPort(name='port', container_port=2118),
volumes=[{
'hostPath': {'path': '/tmp/'},
'name': 'example-kubernetes-test-volume1'
}],
volume_mounts=[{
'mountPath': '/foo/',
'name': 'example-kubernetes-test-volume1'
}],
).gen_pod()
mutator_pod = PodGenerator(
envs={'key2': 'val2'},
image='',
name='name2',
cmds=['/bin/command2.sh', 'arg2'],
volumes=[{
'hostPath': {'path': '/tmp/'},
'name': 'example-kubernetes-test-volume2'
}],
volume_mounts=[{
'mountPath': '/foo/',
'name': 'example-kubernetes-test-volume2'
}]
).gen_pod()
result = PodGenerator.reconcile_pods(base_pod, mutator_pod)
result = self.k8s_client.sanitize_for_serialization(result)
self.assertEqual(result, {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'name2-0'},
'spec': {
'containers': [{
'args': [],
'command': ['/bin/command1.sh', 'arg1'],
'env': [
{'name': 'key1', 'value': 'val1'},
{'name': 'key2', 'value': 'val2'}
],
'envFrom': [],
'image': 'image1',
'imagePullPolicy': 'IfNotPresent',
'name': 'base',
'ports': {
'containerPort': 2118,
'name': 'port',
},
'volumeMounts': [{
'mountPath': '/foo/',
'name': 'example-kubernetes-test-volume1'
}, {
'mountPath': '/foo/',
'name': 'example-kubernetes-test-volume2'
}]
}],
'hostNetwork': False,
'imagePullSecrets': [],
'restartPolicy': 'Never',
'volumes': [{
'hostPath': {'path': '/tmp/'},
'name': 'example-kubernetes-test-volume1'
}, {
'hostPath': {'path': '/tmp/'},
'name': 'example-kubernetes-test-volume2'
}]
}
})
| 37.790274
| 81
| 0.419529
|
1435568d1c2fbeb3961f8818877095c0e3fbdf9c
| 8,933
|
py
|
Python
|
WeatherStationSensorsReader/main/main_class.py
|
weather-station-project/weather-station-sensors-reader
|
cda7902ee382248b41d14b9a2c0543817decbb4a
|
[
"MIT"
] | null | null | null |
WeatherStationSensorsReader/main/main_class.py
|
weather-station-project/weather-station-sensors-reader
|
cda7902ee382248b41d14b9a2c0543817decbb4a
|
[
"MIT"
] | null | null | null |
WeatherStationSensorsReader/main/main_class.py
|
weather-station-project/weather-station-sensors-reader
|
cda7902ee382248b41d14b9a2c0543817decbb4a
|
[
"MIT"
] | null | null | null |
import logging
from controllers.air_measurement_controller import AirMeasurementController
from controllers.ambient_temperature_controller import AmbientTemperatureController
from controllers.fake_controller import FakeController
from controllers.ground_temperature_controller import GroundTemperatureController
from controllers.rainfall_controller import RainfallController
from controllers.wind_measurement_controller import WindMeasurementController
from exceptions.dao_exception import DaoException
from exceptions.sensor_exception import SensorException
from health_check.health_check_file_manager import register_error_in_health_check_file, APP_KEY
class Main(object):
"""Represents the main class when the app is started"""
# Environment variables
LOGGING_LEVEL_VARIABLE = 'LOGGING_LEVEL'
MINUTES_BETWEEN_READINGS_VARIABLE = 'MINUTES_BETWEEN_READINGS'
FAKE_SENSOR_VARIABLE = 'FAKE_SENSOR_ENABLED'
BME_280_SENSOR_VARIABLE = 'BME_280_SENSOR_ENABLED'
GROUND_SENSOR_VARIABLE = 'GROUND_SENSOR_ENABLED'
RAINFALL_SENSOR_VARIABLE = 'RAINFALL_SENSOR_ENABLED'
WIND_SENSOR_VARIABLE = 'WIND_SENSOR_ENABLED'
ANEMOMETER_PORT_NUMBER_VARIABLE = 'ANEMOMETER_PORT_NUMBER'
RAIN_GAUGE_PORT_NUMBER_VARIABLE = 'RAIN_GAUGE_PORT_NUMBER'
SERVER_VARIABLE = 'SERVER'
DATABASE_VARIABLE = 'DATABASE'
USER_VARIABLE = 'USER'
PASSWORD_VARIABLE = 'PASSWORD'
# LOGGING CONSTANTS
LOGGING_LEVELS = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG}
LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
# DEFAULT VALUES
DEFAULT_MINUTES_BETWEEN_READINGS = 5
DEFAULT_ANEMOMETER_PORT_NUMBER = 22
DEFAULT_RAIN_GAUGE_PORT_NUMBER = 25
def __init__(self, variables):
self.variables = variables
def validate_environment_variables(self):
self.validate_generic_variables()
self.validate_sensors_variables()
self.validate_database_variables()
def validate_generic_variables(self):
if self.LOGGING_LEVEL_VARIABLE in self.variables:
self.check_in_expected_values(variable_name=self.LOGGING_LEVEL_VARIABLE, expected_values=self.LOGGING_LEVELS.keys())
if self.MINUTES_BETWEEN_READINGS_VARIABLE in self.variables:
self.check_positive_integer_value(variable_name=self.MINUTES_BETWEEN_READINGS_VARIABLE)
def check_bool_value(self, variable_name):
value = self.variables[variable_name]
if value != 'true' and value != 'false':
raise ValueError(f'"{value}" is not a valid boolean value.')
def check_in_expected_values(self, variable_name, expected_values):
value = self.variables[variable_name]
if value not in expected_values:
raise ValueError(f'"{value}" is not in the expected values "{expected_values}".')
def check_positive_integer_value(self, variable_name):
value = self.variables[variable_name]
try:
val = int(value)
if val < 0:
raise ValueError
except ValueError:
raise ValueError(f'"{value}" is not a valid positive integer.')
def validate_sensors_variables(self):
if self.FAKE_SENSOR_VARIABLE in self.variables:
self.check_bool_value(variable_name=self.FAKE_SENSOR_VARIABLE)
if self.BME_280_SENSOR_VARIABLE in self.variables:
self.check_bool_value(variable_name=self.BME_280_SENSOR_VARIABLE)
if self.GROUND_SENSOR_VARIABLE in self.variables:
self.check_bool_value(variable_name=self.GROUND_SENSOR_VARIABLE)
if self.RAINFALL_SENSOR_VARIABLE in self.variables:
self.check_bool_value(variable_name=self.RAINFALL_SENSOR_VARIABLE)
if self.WIND_SENSOR_VARIABLE in self.variables:
self.check_bool_value(variable_name=self.WIND_SENSOR_VARIABLE)
if self.ANEMOMETER_PORT_NUMBER_VARIABLE in self.variables:
self.check_positive_integer_value(variable_name=self.ANEMOMETER_PORT_NUMBER_VARIABLE)
if self.RAIN_GAUGE_PORT_NUMBER_VARIABLE in self.variables:
self.check_positive_integer_value(variable_name=self.RAIN_GAUGE_PORT_NUMBER_VARIABLE)
def validate_database_variables(self):
if self.SERVER_VARIABLE in self.variables:
self.check_not_null_value(variable_name=self.SERVER_VARIABLE)
if self.DATABASE_VARIABLE in self.variables:
self.check_not_null_value(variable_name=self.DATABASE_VARIABLE)
if self.USER_VARIABLE in self.variables:
self.check_not_null_value(variable_name=self.USER_VARIABLE)
if self.PASSWORD_VARIABLE in self.variables:
self.check_not_null_value(variable_name=self.PASSWORD_VARIABLE)
def check_not_null_value(self, variable_name):
value = self.variables[variable_name]
if not value:
raise ValueError(f'"{value}" is not a valid boolean value.')
def configure_logging(self):
if self.LOGGING_LEVEL_VARIABLE not in self.variables:
return
for handler in logging.root.handlers[:]:
logging.root.removeHandler(hdlr=handler)
level_value = self.variables[self.LOGGING_LEVEL_VARIABLE]
logging.basicConfig(level=self.LOGGING_LEVELS[level_value], format=self.LOG_FORMAT)
def get_controllers_enabled(self):
server = self.variables[self.SERVER_VARIABLE] if self.SERVER_VARIABLE in self.variables else None
database = self.variables[self.DATABASE_VARIABLE] if self.DATABASE_VARIABLE in self.variables else None
user = self.variables[self.USER_VARIABLE] if self.USER_VARIABLE in self.variables else None
password = self.variables[self.PASSWORD_VARIABLE] if self.PASSWORD_VARIABLE in self.variables else None
controllers = []
if self.is_controller_enabled(self.FAKE_SENSOR_VARIABLE):
controllers.append(FakeController(server=server, database=database, user=user, password=password))
# When the fake controller is enabled, it will be the only one working
return controllers
if self.is_controller_enabled(self.BME_280_SENSOR_VARIABLE):
controllers.append(AmbientTemperatureController(server=server, database=database, user=user, password=password))
controllers.append(AirMeasurementController(server=server, database=database, user=user, password=password))
if self.is_controller_enabled(self.GROUND_SENSOR_VARIABLE):
controllers.append(GroundTemperatureController(server=server, database=database, user=user, password=password))
if self.is_controller_enabled(self.WIND_SENSOR_VARIABLE):
controllers.append(
WindMeasurementController(anemometer_port_number=self.get_value_as_int(variable_name=self.ANEMOMETER_PORT_NUMBER_VARIABLE,
default_value=self.DEFAULT_ANEMOMETER_PORT_NUMBER),
server=server,
database=database,
user=user,
password=password))
if self.is_controller_enabled(self.RAINFALL_SENSOR_VARIABLE):
controllers.append(RainfallController(rain_gauge_port_number=self.get_value_as_int(variable_name=self.RAIN_GAUGE_PORT_NUMBER_VARIABLE,
default_value=self.DEFAULT_RAIN_GAUGE_PORT_NUMBER),
server=server,
database=database,
user=user,
password=password))
return controllers
def is_controller_enabled(self, variable_name):
return variable_name in self.variables and self.variables[variable_name] == 'true'
def get_value_as_int(self, variable_name, default_value):
if variable_name in self.variables:
return int(self.variables[variable_name])
return default_value
@staticmethod
def execute_controllers(controllers):
for controller in controllers:
try:
controller.execute()
except (DaoException, SensorException) as e:
logging.error(e, exc_info=True)
register_error_in_health_check_file(key=e.class_name, message=repr(e))
except Exception as e:
logging.exception(f'Error while executing controller "{controller.__class__.__name__}".')
register_error_in_health_check_file(key=APP_KEY, message=repr(e))
| 46.284974
| 146
| 0.692153
|
c8c24f886863835c23254414d81d1955775e62f2
| 5,213
|
py
|
Python
|
docs/conf.py
|
brentru/Adafruit_CircuitPython_hashlib
|
5210b90a8b2b51b0b0f59982e1af06773b66e960
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
brentru/Adafruit_CircuitPython_hashlib
|
5210b90a8b2b51b0b0f59982e1af06773b66e960
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
brentru/Adafruit_CircuitPython_hashlib
|
5210b90a8b2b51b0b0f59982e1af06773b66e960
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit hashlib Library'
copyright = u'2019 Brent Rubell'
author = u'Brent Rubell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitHashlibLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruithashlibLibrary.tex', u'Adafruithashlib Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Adafruithashliblibrary', u'Adafruit hashlib Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruithashlibLibrary', u'Adafruit hashlib Library Documentation',
author, 'AdafruithashlibLibrary', 'One line description of project.',
'Miscellaneous'),
]
| 32.378882
| 146
| 0.684826
|
a0ee821f78243939eec74546b03f879a4b9b02ac
| 82,442
|
py
|
Python
|
python/ccxt/base/exchange.py
|
MMeSports/ccxt
|
175229eae57e46f90ad531650abcea4d666514f7
|
[
"MIT"
] | 1
|
2021-07-07T14:56:56.000Z
|
2021-07-07T14:56:56.000Z
|
python/ccxt/base/exchange.py
|
MMeSports/ccxt
|
175229eae57e46f90ad531650abcea4d666514f7
|
[
"MIT"
] | null | null | null |
python/ccxt/base/exchange.py
|
MMeSports/ccxt
|
175229eae57e46f90ad531650abcea4d666514f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.42.16'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import RateLimitExceeded
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, NO_PADDING, TRUNCATE, ROUND, ROUND_UP, ROUND_DOWN
from ccxt.base.decimal_to_precision import number_to_string
# -----------------------------------------------------------------------------
# rsa jwt signing
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
# -----------------------------------------------------------------------------
# ecdsa signing
from ccxt.static_dependencies import ecdsa
# eddsa signing
try:
import axolotl_curve25519 as eddsa
except ImportError:
eddsa = None
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
import random
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException, ConnectionError as requestsConnectionError
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
from time import mktime
from wsgiref.handlers import format_date_time
# -----------------------------------------------------------------------------
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
try:
long # long integer was removed in Python 3
except NameError:
long = int
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
# web3/0x imports
try:
from web3 import Web3, HTTPProvider
except ImportError:
Web3 = HTTPProvider = None # web3/0x not supported in Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
name = None
version = None
certified = False
pro = False
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
verify = True # SSL verification
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
timeframes = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
urls = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': RateLimitExceeded,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'410': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
myTrades = None
trades = None
transactions = None
ohlcvs = None
tickers = None
base_currencies = None
quote_currencies = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
status = {
'status': 'ok',
'updated': None,
'eta': None,
'url': None,
}
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'loadMarkets': True,
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchL2OrderBook': True,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchOrderTrades': False,
'fetchStatus': 'emulated',
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchFundingFee': False,
'fetchFundingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': True,
'signIn': False,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
paddingMode = NO_PADDING
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresWeb3 = False
requiresEddsa = False
web3 = None
base58_encoder = None
base58_decoder = None
# no lower case l or upper case I, O
base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.tickers = dict() if self.tickers is None else self.tickers
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.ohlcvs = dict() if self.ohlcvs is None else self.ohlcvs
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.origin = self.uuid()
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
# fetch_ohlcv → fetchOHLCV (not fetchOhlcv!)
exceptions = {'ohlcv': 'OHLCV', 'le': 'LE', 'be': 'BE'}
camelcase = parts[0] + ''.join(exceptions.get(i, self.capitalize(i)) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit if self.rateLimit > 0 else float('inf'),
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket', {}))
self.session = self.session if self.session or self.asyncio_loop else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
if self.requiresWeb3 and Web3 and not Exchange.web3:
Exchange.web3 = Web3(HTTPProvider())
def __del__(self):
if self.session:
self.session.close()
def __repr__(self):
return 'ccxt.' + ('async_support.' if self.asyncio_loop else '') + self.id + '()'
def __str__(self):
return self.name
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['apiBackup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'apiBackup' in self.urls:
self.urls['api'] = self.urls['apiBackup']
del self.urls['apiBackup']
@classmethod
def define_rest_api(cls, api, method_name, paths=[]):
delimiters = re.compile('[^a-zA-Z0-9]')
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
for key, value in api.items():
if isinstance(value, list):
uppercase_method = key.upper()
lowercase_method = key.lower()
camelcase_method = lowercase_method.capitalize()
for path in value:
path = path.strip()
split_path = delimiters.split(path)
lowercase_path = [x.strip().lower() for x in split_path]
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
underscore_suffix = '_'.join([x for x in lowercase_path if len(x)])
camelcase_prefix = ''
underscore_prefix = ''
if len(paths):
camelcase_prefix = paths[0]
underscore_prefix = paths[0]
if len(paths) > 1:
camelcase_prefix += ''.join([Exchange.capitalize(x) for x in paths[1:]])
underscore_prefix += '_' + '_'.join([x.strip() for p in paths[1:] for x in delimiters.split(p)])
api_argument = paths
else:
api_argument = paths[0]
camelcase = camelcase_prefix + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = underscore_prefix + '_' + lowercase_method + '_' + underscore_suffix.lower()
def partialer():
outer_kwargs = {'path': path, 'api': api_argument, 'method': uppercase_method}
@functools.wraps(entry)
def inner(_self, params=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
else:
cls.define_rest_api(value, method_name, paths + [key])
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def throw_exactly_matched_exception(self, exact, string, message):
if string in exact:
raise exact[string](message)
def throw_broadly_matched_exception(self, broad, string, message):
broad_key = self.find_broadly_matched_key(broad, string)
if broad_key is not None:
raise broad[broad_key](message)
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return self.set_headers(headers)
def print(self, *args):
print(*args)
def set_headers(self, headers):
return headers
def handle_errors(self, code, reason, url, method, headers, body, response, request_headers, request_body):
pass
def on_rest_response(self, code, reason, url, method, response_headers, response_body, request_headers, request_body):
return response_body.strip()
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
request_body = body
if body:
body = body.encode()
self.session.cookies.clear()
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies,
verify=self.verify
)
# does not try to detect encoding
response.encoding = 'utf-8'
headers = response.headers
http_status_code = response.status_code
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, response.text, request_headers, request_body)
json_response = self.parse_json(http_response)
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except TooManyRedirects as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except SSLError as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except HTTPError as e:
details = ' '.join([self.id, method, url])
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
raise ExchangeError(details) from e
except requestsConnectionError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
if 'Read timed out' in error_string:
raise RequestTimeout(details) from e
else:
raise NetworkError(details) from e
except RequestException as e: # base exception class
error_string = str(e)
details = ' '.join([self.id, method, url])
if any(x in error_string for x in ['ECONNRESET', 'Connection aborted.', 'Connection broken:']):
raise NetworkError(details) from e
else:
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
if json_response is not None:
return json_response
elif self.is_text_response(headers):
return http_response
else:
return response.content
def handle_http_status_code(self, http_status_code, http_status_text, url, method, body):
string_code = str(http_status_code)
if string_code in self.httpExceptions:
Exception = self.httpExceptions[string_code]
raise Exception(' '.join([self.id, method, url, string_code, http_status_text, body]))
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return json.loads(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
def is_text_response(self, headers):
# https://github.com/ccxt/ccxt/issues/5302
content_type = headers.get('Content-Type', '')
return content_type.startswith('application/json') or content_type.startswith('text/')
@staticmethod
def key_exists(dictionary, key):
if dictionary is None or key is None:
return False
if isinstance(dictionary, list):
if isinstance(key, int) and 0 <= key and key < len(dictionary):
return dictionary[key] is not None
else:
return False
if key in dictionary:
return dictionary[key] is not None
return False
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if Exchange.key_exists(dictionary, key):
value = float(dictionary[key])
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_lower(dictionary, key, default_value=None):
return str(dictionary[key]).lower() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_upper(dictionary, key, default_value=None):
return str(dictionary[key]).upper() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_integer_product(dictionary, key, factor, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number):
return int(value * factor)
elif isinstance(value, basestring):
try:
return int(float(value) * factor)
except ValueError:
pass
return default_value
@staticmethod
def safe_timestamp(dictionary, key, default_value=None):
return Exchange.safe_integer_product(dictionary, key, 1000, default_value)
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if Exchange.key_exists(dictionary, key) else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_lower_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_lower, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_upper_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_upper, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_product_2(dictionary, key1, key2, factor, default_value=None):
value = Exchange.safe_integer_product(dictionary, key1, factor)
return value if value is not None else Exchange.safe_integer_product(dictionary, key2, factor, default_value)
@staticmethod
def safe_timestamp_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_integer_product_2(dictionary, key1, key2, 1000, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid22(length=22):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def uuidv1():
return str(uuid.uuid1()).replace('-', '')
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def strip(string):
return string.strip()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
array = Exchange.to_array(array)
return list(filter(lambda x: x[key] == value, array))
@staticmethod
def filterBy(array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
is_int_key = isinstance(key, int)
for element in array:
if ((is_int_key and (key < len(element))) or (key in element)) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
if isinstance(params, dict):
for key in params:
if not isinstance(params[key], list):
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def urlencode(params={}, doseq=False):
for key, value in params.items():
if isinstance(value, bool):
params[key] = 'true' if value else 'false'
return _urlencode.urlencode(params, doseq)
@staticmethod
def urlencode_with_array_repeat(params={}):
return re.sub(r'%5B\d*%5D', '', Exchange.urlencode(params, True))
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri, safe="~()*!.'"):
return _urlencode.quote(uri, safe=safe)
@staticmethod
def omit(d, *args):
if isinstance(d, dict):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
return d
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume, *_] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def rfc2616(self, timestamp=None):
if timestamp is None:
ts = datetime.datetime.now()
else:
ts = timestamp
stamp = mktime(ts.timetuple())
return format_date_time(stamp)
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
ms = (ms + '00')[0:4]
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
if algorithm == 'keccak':
binary = bytes(Exchange.web3.sha3(request))
else:
h = hashlib.new(algorithm, request)
binary = h.digest()
if digest == 'base64':
return Exchange.binary_to_base64(binary)
elif digest == 'hex':
return Exchange.binary_to_base16(binary)
return binary
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
binary = h.digest()
if digest == 'hex':
return Exchange.binary_to_base16(binary)
elif digest == 'base64':
return Exchange.binary_to_base64(binary)
return binary
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_concat_array(array):
result = bytes()
for element in array:
result = result + element
return result
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def binary_to_base64(s):
return Exchange.decode(base64.standard_b64encode(s))
@staticmethod
def base64_to_binary(s):
return base64.standard_b64decode(s)
@staticmethod
def string_to_base64(s):
# will return string in the future
binary = Exchange.encode(s) if isinstance(s, str) else s
return Exchange.encode(Exchange.binary_to_base64(binary))
@staticmethod
def base64_to_string(s):
return base64.b64decode(s).decode('utf-8')
@staticmethod
def jwt(request, secret, alg='HS256'):
algos = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512,
}
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encoded_header = Exchange.base64urlencode(header)
encoded_data = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encoded_header + '.' + encoded_data
if alg[:2] == 'RS':
signature = Exchange.rsa(token, secret, alg)
else:
algorithm = algos[alg]
signature = Exchange.hmac(Exchange.encode(token), secret, algorithm, 'binary')
return token + '.' + Exchange.base64urlencode(signature)
@staticmethod
def rsa(request, secret, alg='RS256'):
algorithms = {
"RS256": hashes.SHA256(),
"RS384": hashes.SHA384(),
"RS512": hashes.SHA512(),
}
algorithm = algorithms[alg]
priv_key = load_pem_private_key(secret, None, backends.default_backend())
return priv_key.sign(Exchange.encode(request), padding.PKCS1v15(), algorithm)
@staticmethod
def ecdsa(request, secret, algorithm='p256', hash=None, fixed_length=False):
# your welcome - frosty00
algorithms = {
'p192': [ecdsa.NIST192p, 'sha256'],
'p224': [ecdsa.NIST224p, 'sha256'],
'p256': [ecdsa.NIST256p, 'sha256'],
'p384': [ecdsa.NIST384p, 'sha384'],
'p521': [ecdsa.NIST521p, 'sha512'],
'secp256k1': [ecdsa.SECP256k1, 'sha256'],
}
if algorithm not in algorithms:
raise ArgumentsRequired(algorithm + ' is not a supported algorithm')
curve_info = algorithms[algorithm]
hash_function = getattr(hashlib, curve_info[1])
encoded_request = Exchange.encode(request)
if hash is not None:
digest = Exchange.hash(encoded_request, hash, 'binary')
else:
digest = base64.b16decode(encoded_request, casefold=True)
key = ecdsa.SigningKey.from_string(base64.b16decode(Exchange.encode(secret),
casefold=True), curve=curve_info[0])
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize)
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter = 0
minimum_size = (1 << (8 * 31)) - 1
half_order = key.privkey.order / 2
while fixed_length and (r_int > half_order or r_int <= minimum_size or s_int <= minimum_size):
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize,
extra_entropy=Exchange.number_to_le(counter, 32))
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter += 1
r, s = Exchange.decode(base64.b16encode(r_binary)).lower(), Exchange.decode(base64.b16encode(s_binary)).lower()
return {
'r': r,
's': s,
'v': v,
}
@staticmethod
def eddsa(request, secret, curve='ed25519'):
random = b'\x00' * 64
request = base64.b16decode(request, casefold=True)
secret = base64.b16decode(secret, casefold=True)
signature = eddsa.calculateSignature(random, secret, request)
return Exchange.binary_to_base58(signature)
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode('latin-1')
@staticmethod
def decode(string):
return string.decode('latin-1')
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
raise AuthenticationError('requires `' + key + '`')
else:
return error
return True
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
raise InvalidAddress('address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
raise InvalidAddress('address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': None,
'used': None,
'total': None,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
if self.currencies:
if commonCode in self.currencies:
return self.currencies[commonCode]['id']
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], self.precisionMode, self.paddingMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode, self.paddingMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode, self.paddingMode)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode, self.paddingMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode, self.paddingMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(self.markets.keys())
self.ids = sorted(self.markets_by_id.keys())
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if (('baseId' in market) and (market['baseId'] is not None)) else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if (('quoteId' in market) and (market['quoteId'] is not None)) else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
base_currencies = self.sort_by(base_currencies, 'code')
quote_currencies = self.sort_by(quote_currencies, 'code')
self.base_currencies = self.index_by(base_currencies, 'code')
self.quote_currencies = self.index_by(quote_currencies, 'code')
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
def cancel_unified_order(self, order, params={}):
return self.cancel_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_bids_asks(self, symbols=None, params={}) -> dict:
raise NotSupported('API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
def fetch_tickers(self, symbols=None, params={}):
raise NotSupported('API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def fetch_order(self, id, symbol=None, params={}):
raise NotSupported('fetch_order() is not supported yet')
def fetch_unified_order(self, order, params={}):
return self.fetch_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
raise NotSupported('fetch_order_trades() is not supported yet')
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
def fetch_deposit_address(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposit_address() is not supported yet')
def parse_ohlcv(self, ohlcv, market=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return self.sort_by(result, 0)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
raise ExchangeError('unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, ['info', 'free', 'used', 'total']).keys()
balance['free'] = {}
balance['used'] = {}
balance['total'] = {}
for currency in currencies:
if balance[currency].get('total') is None:
if balance[currency].get('free') is not None and balance[currency].get('used') is not None:
balance[currency]['total'] = self.sum(balance[currency].get('free'), balance[currency].get('used'))
if balance[currency].get('free') is None:
if balance[currency].get('total') is not None and balance[currency].get('used') is not None:
balance[currency]['free'] = self.sum(balance[currency]['total'], -balance[currency]['used'])
if balance[currency].get('used') is None:
if balance[currency].get('total') is not None and balance[currency].get('free') is not None:
balance[currency]['used'] = self.sum(balance[currency]['total'], -balance[currency]['free'])
balance['free'][currency] = balance[currency]['free']
balance['used'][currency] = balance[currency]['used']
balance['total'][currency] = balance[currency]['total']
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
raise NotSupported('fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
raise NotSupported('fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = self.fetch_time(params)
self.status['updated'] = updated
return self.status
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = []
for i in range(0, len(ohlcvs[t])):
result.append([
ohlcvs[t][i] if ms else (ohlcvs[t][i] * 1000),
ohlcvs[o][i],
ohlcvs[h][i],
ohlcvs[l][i],
ohlcvs[c][i],
ohlcvs[v][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = {}
result[t] = []
result[o] = []
result[h] = []
result[l] = []
result[c] = []
result[v] = []
for i in range(0, len(ohlcvs)):
result[t].append(ohlcvs[i][0] if ms else int(ohlcvs[i][0] / 1000))
result[o].append(ohlcvs[i][1])
result[h].append(ohlcvs[i][2])
result[l].append(ohlcvs[i][3])
result[c].append(ohlcvs[i][4])
result[v].append(ohlcvs[i][5])
return result
def build_ohlcvc(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(timestamp, open, high, low, close, volume, count) = (0, 1, 2, 3, 4, 5, 6)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
candle = j - 1
if (j == 0) or opening_time >= ohlcvs[candle][timestamp] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
1, # count
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[candle][high] = max(ohlcvs[candle][high], trade['price'])
ohlcvs[candle][low] = min(ohlcvs[candle][low], trade['price'])
ohlcvs[candle][close] = trade['price']
ohlcvs[candle][volume] += trade['amount']
ohlcvs[candle][count] += 1
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' == unit:
scale = 60 * 60 * 24 * 365
elif 'M' == unit:
scale = 60 * 60 * 24 * 30
elif 'w' == unit:
scale = 60 * 60 * 24 * 7
elif 'd' == unit:
scale = 60 * 60 * 24
elif 'h' == unit:
scale = 60 * 60
elif 'm' == unit:
scale = 60
elif 's' == unit:
scale = 1
else:
raise NotSupported('timeframe unit {} is not supported'.format(unit))
return amount * scale
@staticmethod
def round_timeframe(timeframe, timestamp, direction=ROUND_DOWN):
ms = Exchange.parse_timeframe(timeframe) * 1000
# Get offset based on timeframe in milliseconds
offset = timestamp % ms
return timestamp - offset + (ms if direction == ROUND_UP else 0)
def parse_trades(self, trades, market=None, since=None, limit=None, params={}):
array = self.to_array(trades)
array = [self.extend(self.parse_trade(trade, market), params) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_ledger(self, data, currency=None, since=None, limit=None, params={}):
array = self.to_array(data)
result = []
for item in array:
entry = self.parse_ledger_entry(item, currency)
if isinstance(entry, list):
result += [self.extend(i, params) for i in entry]
else:
result.append(self.extend(entry, params))
result = self.sort_by(result, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
array = []
if isinstance(orders, list):
array = [self.extend(self.parse_order(order, market), params) for order in orders]
else:
array = [self.extend(self.parse_order(self.extend({'id': id}, order), market), params) for id, order in orders.items()]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def safe_market(self, marketId, market=None, delimiter=None):
if marketId is not None:
if self.markets_by_id is not None and marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
elif delimiter is not None:
baseId, quoteId = marketId.split(delimiter)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
return {
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
if market is not None:
return market
return {
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
def safe_symbol(self, marketId, market=None, delimiter=None):
market = self.safe_market(marketId, market, delimiter)
return market['symbol']
def safe_currency(self, currency_id, currency=None):
if currency_id is None and currency is not None:
return currency
if (self.currencies_by_id is not None) and (currency_id in self.currencies_by_id):
return self.currencies_by_id[currency_id]
return {
'id': currency_id,
'code': self.common_currency_code(currency_id.upper()) if currency_id is not None else currency_id
}
def safe_currency_code(self, currency_id, currency=None):
currency = self.safe_currency(currency_id, currency)
return currency['code']
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if value is not None:
array = [entry for entry in array if entry[field] == value]
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail and (since is None) else array[:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit, 'timestamp', tail)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit, 'timestamp', tail)
def filter_by_since_limit(self, array, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail and (since is None) else array[:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
raise ExchangeError('Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
raise ExchangeError('Does not have currency code ' + str(code))
def market(self, symbol):
if not self.markets:
raise ExchangeError('Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
raise BadSymbol('{} does not have market symbol {}'.format(self.id, symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, side, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'limit', side, amount, price, params)
def create_market_order(self, symbol, side, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'market', side, amount, price, params)
def create_limit_buy_order(self, symbol, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'limit', 'buy', amount, price, params)
def create_limit_sell_order(self, symbol, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'limit', 'sell', amount, price, params)
def create_market_buy_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
def vwap(self, baseVolume, quoteVolume):
return (quoteVolume / baseVolume) if (quoteVolume is not None) and (baseVolume is not None) and (baseVolume > 0) else None
# -------------------------------------------------------------------------
# web3 / 0x methods
@staticmethod
def has_web3():
return Web3 is not None
def check_required_dependencies(self):
if self.requiresWeb3 and not Exchange.has_web3():
raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py")
if self.requiresEddsa and eddsa is None:
raise NotSupported('Eddsa functionality requires python-axolotl-curve25519, install with `pip install python-axolotl-curve25519==0.4.1.post2`: https://github.com/tgalal/python-axolotl-curve25519')
@staticmethod
def from_wei(amount, decimals=18):
if amount is None:
return None
amount_float = float(amount)
exponential = '{:.14e}'.format(amount_float)
n, exponent = exponential.split('e')
new_exponent = int(exponent) - decimals
return float(n + 'e' + str(new_exponent))
@staticmethod
def to_wei(amount, decimals=18):
if amount is None:
return None
amount_float = float(amount)
exponential = '{:.14e}'.format(amount_float)
n, exponent = exponential.split('e')
new_exponent = int(exponent) + decimals
return number_to_string(n + 'e' + str(new_exponent))
def privateKeyToAddress(self, privateKey):
private_key_bytes = base64.b16decode(Exchange.encode(privateKey), True)
public_key_bytes = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key.to_string()
public_key_hash = self.web3.sha3(public_key_bytes)
return '0x' + Exchange.decode(base64.b16encode(public_key_hash))[-40:].lower()
def soliditySha3(self, array):
values = self.solidityValues(array)
types = self.solidityTypes(values)
return self.web3.soliditySha3(types, values).hex()
def solidityTypes(self, array):
return ['address' if self.web3.isAddress(value) else 'uint256' for value in array]
def solidityValues(self, array):
return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else (int(value, 16) if str(value)[:2] == '0x' else int(value)) for value in array]
@staticmethod
def remove0x_prefix(value):
if value[:2] == '0x':
return value[2:]
return value
def hashMessage(self, message):
message_bytes = base64.b16decode(Exchange.encode(Exchange.remove0x_prefix(message)), True)
hash_bytes = self.web3.sha3(b"\x19Ethereum Signed Message:\n" + Exchange.encode(str(len(message_bytes))) + message_bytes)
return '0x' + Exchange.decode(base64.b16encode(hash_bytes)).lower()
@staticmethod
def signHash(hash, privateKey):
signature = Exchange.ecdsa(hash[-64:], privateKey, 'secp256k1', None)
return {
'r': '0x' + signature['r'],
's': '0x' + signature['s'],
'v': 27 + signature['v'],
}
def sign_message_string(self, message, privateKey):
signature = self.signMessage(message, privateKey)
return signature['r'] + Exchange.remove0x_prefix(signature['s']) + Exchange.binary_to_base16(Exchange.number_to_be(signature['v'], 1))
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def decimal_to_bytes(n, endian='big'):
"""int.from_bytes and int.to_bytes don't work in python2"""
if n > 0:
next_byte = Exchange.decimal_to_bytes(n // 0x100, endian)
remainder = bytes([n % 0x100])
return next_byte + remainder if endian == 'big' else remainder + next_byte
else:
return b''
@staticmethod
def totp(key):
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(Exchange.decimal_to_bytes(epoch, 'big'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
@staticmethod
def number_to_le(n, size):
return Exchange.decimal_to_bytes(int(n), 'little').ljust(size, b'\x00')
@staticmethod
def number_to_be(n, size):
return Exchange.decimal_to_bytes(int(n), 'big').rjust(size, b'\x00')
@staticmethod
def base16_to_binary(s):
return base64.b16decode(s, True)
@staticmethod
def binary_to_base16(s):
return Exchange.decode(base64.b16encode(s)).lower()
# python supports arbitrarily big integers
@staticmethod
def integer_divide(a, b):
return int(a) // int(b)
@staticmethod
def integer_pow(a, b):
return int(a) ** int(b)
@staticmethod
def integer_modulo(a, b):
return int(a) % int(b)
def sleep(self, milliseconds):
return time.sleep(milliseconds / 1000)
@staticmethod
def base58_to_binary(s):
"""encodes a base58 string to as a big endian integer"""
if Exchange.base58_decoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
for i in range(len(s)):
result *= 58
result += Exchange.base58_decoder[s[i]]
return Exchange.decimal_to_bytes(result)
@staticmethod
def binary_to_base58(b):
if Exchange.base58_encoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
# undo decimal_to_bytes
for byte in b:
result *= 0x100
result += byte
string = []
while result > 0:
result, next_character = divmod(result, 58)
string.append(Exchange.base58_encoder[next_character])
string.reverse()
return ''.join(string)
| 39.408222
| 208
| 0.596965
|
55c50d1cfce3e01fec764595085ace27690911b5
| 945
|
py
|
Python
|
setup.py
|
datasig-ac-uk/pysegments
|
6bbc9714f5c9dd9181f96e6994427618dc81ac06
|
[
"MIT"
] | 1
|
2021-01-18T17:45:46.000Z
|
2021-01-18T17:45:46.000Z
|
setup.py
|
datasig-ac-uk/pysegments
|
6bbc9714f5c9dd9181f96e6994427618dc81ac06
|
[
"MIT"
] | null | null | null |
setup.py
|
datasig-ac-uk/pysegments
|
6bbc9714f5c9dd9181f96e6994427618dc81ac06
|
[
"MIT"
] | 1
|
2021-01-18T17:45:48.000Z
|
2021-01-18T17:45:48.000Z
|
import os
from glob import glob as _glob
from setuptools import setup, Extension
def glob(*parts):
return _glob(os.path.join(*parts))
segments_ext = Extension(
"pysegments._segments",
language = "c++",
sources = glob("src", "*.cpp"),
depends = glob("src", "*.h"),
include_dirs = ["Include"],
extra_compile_args=["-std=c++11"]
)
with open("README.md", "rt", encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
setup(
name="pysegments",
author="Sam Morley",
author_email="Sam.Morley@maths.ox.ac.uk",
version="0.1",
description="Tools for performing dyadic segmentation of data.",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
packages=["pysegments"],
package_dir={"pysegments": "pysegments"},
ext_modules=[
segments_ext,
],
python_requires=">=3.5",
tests_require=["pytest"],
test_suite="pysegments/tests"
)
| 23.04878
| 68
| 0.65291
|
cab91610f6d7893f846ff5ffe2d28d303d85956b
| 10,364
|
py
|
Python
|
tests/test_single_document.py
|
syntheorem/LSP
|
753f8675452c1a4056c606c92a6ad8d26281e138
|
[
"MIT"
] | null | null | null |
tests/test_single_document.py
|
syntheorem/LSP
|
753f8675452c1a4056c606c92a6ad8d26281e138
|
[
"MIT"
] | null | null | null |
tests/test_single_document.py
|
syntheorem/LSP
|
753f8675452c1a4056c606c92a6ad8d26281e138
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from LSP.plugin.core.url import filename_to_uri
from LSP.plugin.hover import _test_contents
from setup import TextDocumentTestCase, TIMEOUT_TIME
import os
import sublime
try:
from typing import Generator, Optional, Iterable, Tuple, List
assert Generator and Optional and Iterable and Tuple and List
except ImportError:
pass
SELFDIR = os.path.dirname(__file__)
TEST_FILE_PATH = os.path.join(SELFDIR, 'testfile.txt')
GOTO_RESPONSE = [
{
'uri': filename_to_uri(TEST_FILE_PATH),
'range':
{
'start':
{
# Put the cursor at the capital letter "F".
'character': 5,
'line': 1
},
}
}
]
GOTO_RESPONSE_LOCATION_LINK = [
{
'originSelectionRange': {'start': {'line': 0, 'character': 0}},
'targetUri': GOTO_RESPONSE[0]['uri'],
'targetRange': GOTO_RESPONSE[0]['range'],
'targetSelectionRange': GOTO_RESPONSE[0]['range']
}
]
GOTO_CONTENT = r'''abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
0123456789
'''
class SingleDocumentTestCase(TextDocumentTestCase):
def test_did_open(self) -> 'Generator':
# Just the existence of this method checks "initialize" -> "initialized" -> "textDocument/didOpen"
# -> "shutdown" -> client shut down
pass
def test_did_close(self) -> 'Generator':
assert self.view
self.view.set_scratch(True)
self.view.close()
self.view = None
yield from self.await_message("textDocument/didClose")
def test_did_change(self) -> 'Generator':
assert self.view
self.insert_characters("A")
yield from self.await_message("textDocument/didChange")
# multiple changes are batched into one didChange notification
self.insert_characters("B")
self.insert_characters("C")
self.insert_characters("D")
yield from self.await_message(("textDocument/didChange"))
def test_sends_save_with_purge(self) -> 'Generator':
assert self.view
self.view.settings().set("lsp_format_on_save", False)
self.insert_characters("A")
self.view.run_command("save")
yield from self.await_message("textDocument/didChange")
yield from self.await_message("textDocument/didSave")
yield from self.await_clear_view_and_save()
def test_formats_on_save(self) -> 'Generator':
assert self.view
self.view.settings().set("lsp_format_on_save", True)
self.insert_characters("A")
yield from self.await_message("textDocument/didChange")
self.set_response('textDocument/formatting', [{
'newText': "BBB",
'range': {
'start': {'line': 0, 'character': 0},
'end': {'line': 0, 'character': 1}
}
}])
self.view.run_command("save")
yield from self.await_message("textDocument/formatting")
yield from self.await_message("textDocument/didChange")
yield from self.await_message("textDocument/didSave")
text = self.view.substr(sublime.Region(0, self.view.size()))
self.assertEquals("BBB", text)
yield from self.await_clear_view_and_save()
def test_hover_info(self) -> 'Generator':
assert self.view
self.set_response('textDocument/hover', {"contents": "greeting"})
self.view.run_command('insert', {"characters": "Hello Wrld"})
self.assertFalse(self.view.is_popup_visible())
self.view.run_command('lsp_hover', {'point': 3})
yield lambda: self.view.is_popup_visible()
last_content = _test_contents[-1]
self.assertTrue("greeting" in last_content)
def test_remove_line_and_then_insert_at_that_line_at_end(self) -> 'Generator':
original = (
'a\n'
'b\n'
'c'
)
file_changes = [
((2, 0), (3, 0), ''), # out-of-bounds end position, but this is fine
((3, 0), (3, 0), 'c\n') # out-of-bounds start and end, this line doesn't exist
]
expected = (
'a\n'
'b\n'
'c\n'
)
# Old behavior:
# 1) first we end up with ('a\n', 'b\n', 'cc\n')
# 2) then we end up with ('a\n', 'b\n', '')
# New behavior:
# 1) line index 3 is "created" ('a\n', 'b\n', 'c\n', c\n'))
# 2) deletes line index 2.
yield from self.__run_formatting_test(original, expected, file_changes)
def test_apply_formatting(self) -> 'Generator':
original = (
'<dom-module id="some-thing">\n'
'<style></style>\n'
'<template>\n'
'</template>\n'
'</dom-module>\n'
)
file_changes = [
((0, 28), (1, 0), ''), # delete first \n
((1, 0), (1, 15), ''), # delete second line (but not the \n)
((2, 10), (2, 10), '\n <style></style>'), # insert after <template>
]
expected = (
'<dom-module id="some-thing">\n'
'<template>\n'
' <style></style>\n'
'</template>\n'
'</dom-module>\n'
)
yield from self.__run_formatting_test(original, expected, file_changes)
def test_apply_formatting_and_preserve_order(self) -> 'Generator':
original = (
'abcde\n'
'fghij\n'
)
# Note that (1, 2) comes before (0, 1) in the text.
file_changes = [
((1, 2), (1, 2), '4'), # insert after the g
((1, 2), (1, 2), '5'),
((1, 2), (1, 3), '6'), # replace the h
((0, 1), (0, 1), '1'), # insert after a
((0, 1), (0, 1), '2'),
((0, 1), (0, 1), '3'),
]
expected = (
'a123bcde\n'
'fg456ij\n'
)
yield from self.__run_formatting_test(original, expected, file_changes)
def __run_formatting_test(
self,
original: 'Iterable[str]',
expected: 'Iterable[str]',
file_changes: 'List[Tuple[Tuple[int, int], Tuple[int, int], str]]'
) -> 'Generator':
assert self.view
original_change_count = self.insert_characters(''.join(original))
# self.assertEqual(original_change_count, 1)
self.set_response('textDocument/formatting', [{
'newText': new_text,
'range': {
'start': {'line': start[0], 'character': start[1]},
'end': {'line': end[0], 'character': end[1]}}} for start, end, new_text in file_changes])
self.view.run_command('lsp_format_document')
yield from self.await_message('textDocument/formatting')
yield from self.await_view_change(original_change_count + len(file_changes))
edited_content = self.view.substr(sublime.Region(0, self.view.size()))
self.assertEquals(edited_content, ''.join(expected))
def __run_goto_test(self, response: list, text_document_request: str, subl_command_suffix: str) -> 'Generator':
assert self.view
self.insert_characters(GOTO_CONTENT)
# Put the cursor back at the start of the buffer, otherwise is_at_word fails in goto.py.
self.view.sel().clear()
self.view.sel().add(sublime.Region(0, 0))
method = 'textDocument/{}'.format(text_document_request)
self.set_response(method, response)
self.view.run_command('lsp_symbol_{}'.format(subl_command_suffix))
yield from self.await_message(method)
def condition() -> bool:
nonlocal self
assert self.view
s = self.view.sel()
if len(s) != 1:
return False
return s[0].begin() > 0
yield {"condition": condition, "timeout": TIMEOUT_TIME}
first = self.view.sel()[0].begin()
self.assertEqual(self.view.substr(sublime.Region(first, first + 1)), "F")
def test_definition(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE, 'definition', 'definition')
def test_definition_location_link(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE_LOCATION_LINK, 'definition', 'definition')
def test_type_definition(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE, 'typeDefinition', 'type_definition')
def test_type_definition_location_link(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE_LOCATION_LINK, 'typeDefinition', 'type_definition')
def test_declaration(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE, 'declaration', 'declaration')
def test_declaration_location_link(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE_LOCATION_LINK, 'declaration', 'declaration')
def test_implementation(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE, 'implementation', 'implementation')
def test_implementation_location_link(self) -> 'Generator':
yield from self.__run_goto_test(GOTO_RESPONSE_LOCATION_LINK, 'implementation', 'implementation')
class WillSaveWaitUntilTestCase(TextDocumentTestCase):
def get_test_server_capabilities(self) -> dict:
capabilities = deepcopy(super().get_test_server_capabilities())
capabilities['capabilities']['textDocumentSync']['willSaveWaitUntil'] = True
return capabilities
def test_will_save_wait_until(self) -> 'Generator':
assert self.view
self.insert_characters("A")
yield from self.await_message("textDocument/didChange")
self.set_response('textDocument/willSaveWaitUntil', [{
'newText': "BBB",
'range': {
'start': {'line': 0, 'character': 0},
'end': {'line': 0, 'character': 1}
}
}])
self.view.settings().set("lsp_format_on_save", False)
self.view.run_command("save")
yield from self.await_message("textDocument/willSaveWaitUntil")
yield from self.await_message("textDocument/didChange")
yield from self.await_message("textDocument/didSave")
text = self.view.substr(sublime.Region(0, self.view.size()))
self.assertEquals("BBB", text)
yield from self.await_clear_view_and_save()
| 38.962406
| 115
| 0.605172
|
567b71348ed14443fbc1c2052446983d8b4772bf
| 874
|
py
|
Python
|
Monte-Carlo-Attacks/Monte-Carlo-MNIST_GAN/test.py
|
SAP-samples/security-research-mi-gen-nn
|
15627f73fcc497c87a67f41957f6b82881dff353
|
[
"Apache-2.0"
] | 5
|
2020-02-21T15:13:57.000Z
|
2021-08-05T15:18:40.000Z
|
Monte-Carlo-Attacks/Monte-Carlo-MNIST_GAN/test.py
|
SAP-samples/security-research-membership-inference-against-generative-networks
|
15627f73fcc497c87a67f41957f6b82881dff353
|
[
"Apache-2.0"
] | null | null | null |
Monte-Carlo-Attacks/Monte-Carlo-MNIST_GAN/test.py
|
SAP-samples/security-research-membership-inference-against-generative-networks
|
15627f73fcc497c87a67f41957f6b82881dff353
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import numpy as np
import ais
import matplotlib.pyplot as plt
from priors import NormalPrior
from kernels import ParsenDensityEstimator
from scipy.stats import norm
class Generator(object):
def __init__(self, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
def __call__(self, z):
return z * 2 + 3
generator = Generator(1, 1)
prior = NormalPrior()
kernel = ParsenDensityEstimator()
model = ais.Model(generator, prior, kernel, 1000, 10000)
#p = norm()
#x = np.linspace(norm.ppf(0.01, loc=3, scale=2), norm.ppf(0.99, loc=3, scale=2), 100)
#p1 = norm.pdf(x, loc=3, scale=2)
#xx = np.reshape(x, [100, 1])
num_samples = 1
xx = np.reshape([2], [num_samples, 1])
schedule = ais.get_schedule(num_samples, rad=4)
p2 = model.ais(xx, schedule)
print('results')
print(p2)
print(np.exp(p2))
| 23
| 85
| 0.700229
|
3ac36aecf7fc7a0ecb7f9c205cf4c1e92ce99d3e
| 3,185
|
py
|
Python
|
symupy/runtime/logic/sorted_frozen_set.py
|
licit-lab/symupy
|
942a17ee78cd12a363a4cd7b7f8363e239ccf7fe
|
[
"MIT"
] | 2
|
2021-11-10T14:33:09.000Z
|
2022-03-03T09:23:03.000Z
|
symupy/runtime/logic/sorted_frozen_set.py
|
licit-lab/symupy
|
942a17ee78cd12a363a4cd7b7f8363e239ccf7fe
|
[
"MIT"
] | 33
|
2021-01-18T13:59:01.000Z
|
2021-11-29T13:21:10.000Z
|
symupy/runtime/logic/sorted_frozen_set.py
|
licit-lab/symupy
|
942a17ee78cd12a363a4cd7b7f8363e239ccf7fe
|
[
"MIT"
] | null | null | null |
"""
This is a class describing a sorted frozen set. This is a collection implementation for a set of ordered elements that establish specific protocols for iteration, information access, element identification.
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
from collections.abc import Sequence, Set
from itertools import chain
from bisect import bisect_left
# ============================================================================
# CLASS AND DEFINITIONS
# ============================================================================
class SortedFrozenSet(Sequence, Set):
"""
This is a collection that provides a set of properties to create a sorted frozen set.
In particular
Args:
Sequence (Sequence): Inherits from the `Sequence` collection object.
Set (Set): Inherits from the `Set` collection object.
"""
def __init__(self, items=None):
self._items = tuple(
sorted(
set(items) if (items is not None) else set(),
key=lambda x: x.vehid,
)
)
def __contains__(self, item):
try:
self.index(item)
return True
except ValueError:
return False
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def __getitem__(self, index):
result = self._items[index]
return SortedFrozenSet(result) if isinstance(index, slice) else result
def __repr__(self):
return "{type}({arg})".format(
type=type(self).__name__,
arg=(
"[{}]".format(", ".join(map(repr, self._items))) if self._items else ""
),
)
def __eq__(self, rhs):
if not isinstance(rhs, type(self)):
return NotImplemented
return self._items == rhs._items
def __hash__(self):
return hash((type(self), self._items))
def __add__(self, rhs):
if not isinstance(rhs, type(self)):
return NotImplemented
return SortedFrozenSet(chain(self._items, rhs._items))
def __mul__(self, rhs):
return self if rhs > 0 else SortedFrozenSet()
def __rmul__(self, lhs):
return self * lhs
def count(self, item):
return int(item in self)
def index(self, item):
index = bisect_left(self._items, item)
if (index != len(self._items)) and self._items[index] == item:
return index
raise ValueError(f"{item!r} not found")
def issubset(self, iterable):
return self <= SortedFrozenSet(iterable)
def issuperset(self, iterable):
return self >= SortedFrozenSet(iterable)
def intersection(self, iterable):
return self & SortedFrozenSet(iterable)
def union(self, iterable):
return self | SortedFrozenSet(iterable)
def symmetric_difference(self, iterable):
return self ^ SortedFrozenSet(iterable)
def difference(self, iterable):
return self - SortedFrozenSet(iterable)
| 29.490741
| 210
| 0.55887
|
8939262cd968bd39b1c97e795f3703f8a1021659
| 8,669
|
py
|
Python
|
alphamap/pdflib.py
|
MarcSkovMadsen/alphamap
|
ef3774c3ef693a9d772615cc019c90943a752cdf
|
[
"Apache-2.0"
] | 28
|
2021-07-24T20:39:43.000Z
|
2022-03-21T11:52:25.000Z
|
alphamap/pdflib.py
|
MarcSkovMadsen/alphamap
|
ef3774c3ef693a9d772615cc019c90943a752cdf
|
[
"Apache-2.0"
] | 19
|
2021-07-21T12:10:15.000Z
|
2022-02-23T07:04:47.000Z
|
alphamap/pdflib.py
|
MarcSkovMadsen/alphamap
|
ef3774c3ef693a9d772615cc019c90943a752cdf
|
[
"Apache-2.0"
] | 6
|
2021-07-23T15:15:41.000Z
|
2021-12-25T07:15:30.000Z
|
#!/usr/bin/env python
# coding: utf-8
# This script has kindly been provided by Julia Schessner.
from io import BytesIO
from reportlab.pdfgen import canvas
from reportlab.lib.utils import ImageReader
from reportlab.platypus import Paragraph
from pdfrw import PdfReader, PdfDict
from pdfrw.buildxobj import pagexobj
from pdfrw.toreportlab import makerl
from reportlab.platypus import Flowable
from reportlab.lib.enums import TA_JUSTIFY,TA_LEFT,TA_CENTER,TA_RIGHT
# The following class was copied from https://stackoverflow.com/questions/3448365/pdf-image-in-pdf-document-using-reportlab-python (answer from skidzo, 2017)
class PdfImage(Flowable):
"""
PdfImage wraps the first page from a PDF file as a Flowable
which can be included into a ReportLab Platypus document.
Based on the vectorpdf extension in rst2pdf (http://code.google.com/p/rst2pdf/)
This can be used from the place where you want to return your matplotlib image
as a Flowable:
img = BytesIO()
fig, ax = plt.subplots(figsize=(canvaswidth,canvaswidth))
ax.plot([1,2,3],[6,5,4],antialiased=True,linewidth=2,color='red',label='a curve')
fig.savefig(img,format='PDF')
return(PdfImage(img))
"""
def __init__(self, filename_or_object, width=None, height=None, kind='direct'):
# If using StringIO buffer, set pointer to begining
if hasattr(filename_or_object, 'read'):
filename_or_object.seek(0)
#print("read")
self.page = PdfReader(filename_or_object, decompress=False).pages[0]
self.xobj = pagexobj(self.page)
self.imageWidth = width
self.imageHeight = height
x1, y1, x2, y2 = self.xobj.BBox
self._w, self._h = x2 - x1, y2 - y1
if not self.imageWidth:
self.imageWidth = self._w
if not self.imageHeight:
self.imageHeight = self._h
self.__ratio = float(self.imageWidth)/self.imageHeight
if kind in ['direct','absolute'] or width==None or height==None:
self.drawWidth = width or self.imageWidth
self.drawHeight = height or self.imageHeight
elif kind in ['bound','proportional']:
factor = min(float(width)/self._w,float(height)/self._h)
self.drawWidth = self._w*factor
self.drawHeight = self._h*factor
def wrap(self, availableWidth, availableHeight):
"""
returns draw- width and height
convenience function to adapt your image
to the available Space that is available
"""
return self.drawWidth, self.drawHeight
def drawOn(self, canv, x, y, _sW=0):
"""
translates Bounding Box and scales the given canvas
"""
if _sW > 0 and hasattr(self, 'hAlign'):
a = self.hAlign
if a in ('CENTER', 'CENTRE', TA_CENTER):
x += 0.5*_sW
elif a in ('RIGHT', TA_RIGHT):
x += _sW
elif a not in ('LEFT', TA_LEFT):
raise ValueError("Bad hAlign value " + str(a))
#xobj_name = makerl(canv._doc, self.xobj)
xobj_name = makerl(canv, self.xobj)
xscale = self.drawWidth/self._w
yscale = self.drawHeight/self._h
x -= self.xobj.BBox[0] * xscale
y -= self.xobj.BBox[1] * yscale
canv.saveState()
canv.translate(x, y)
canv.scale(xscale, yscale)
canv.doForm(xobj_name)
canv.restoreState()
def draw_paragraph(text, pdf, cw, ch, poi, centered_vertically=False):
P = Paragraph(text)
w,h = P.wrap(cw, ch)
if not centered_vertically:
poi[1] -= h
else:
poi[1] -= h+int((ch-h)/2)
P.drawOn(pdf, poi[0], poi[1])
poi[0] += w
return poi
def draw_plotly(fig, pdf, cw, ch, poi, rescale=False, centerv=True, centerh=True,
rasterize = False, png_scaling=4):
w = fig.layout.width
if w is None:
w = cw
if rescale:
fig.update_layout(width=w)
h = fig.layout.height
if h is None:
h = ch
if rescale:
fig.update_layout(height=h)
if centerh:
poi[0] += int((cw-w)/2)
if centerv:
poi[1] -= h+int((ch-h)/2)
else:
poi[1] -= h
if rasterize:
img = ImageReader(BytesIO(fig.to_image(format='png', scale=png_scaling)))
pdf.drawImage(img, poi[0], poi[1], width=w, height=h)
else:
img = PdfImage(BytesIO(fig.to_image(format='pdf')), width=w, height=h)
img.drawOn(pdf, poi[0], poi[1])
poi[0] += w
return poi
def draw_bytes(b, pdf, cw, ch, poi):
poi[1] -= ch
pdf.drawImage(ImageReader(BytesIO(b)), poi[0], poi[1])
poi[0] += cw
return poi
def draw_content(pdf, content, width=595, height=842, border=40, spacing=7, png_scaling=4, verbose=False):
content_width = width-(2*border)
content_height = height-(2*border)
pointer = [border, height-border]
fontsize = pdf._fontsize
if type(content) == str:
draw_paragraph(content, pdf, content_width, content_height, pointer, centered_vertically=True)
elif str(type(content)) == "<class 'plotly.graph_objs._figure.Figure'>":
draw_plotly(content, pdf, content_width, content_height, pointer, png_scaling=png_scaling)
elif type(content) == bytes:
draw_bytes(content, pdf, content_width, content_height, pointer)
elif type(content) == list:
# initialize left content-height
ch = content_height
for ri, row in enumerate(content):
if verbose:
print('row', ri, 'left space:', content_width, ch, 'pointer:', pointer)
# draw row
if type(row) == str:
pointer = draw_paragraph(row, pdf, content_width, ch, pointer)
elif str(type(row)) == "<class 'plotly.graph_objs._figure.Figure'>":
pointer = draw_plotly(row, pdf, content_width, ch, pointer,
centerv=False, png_scaling=png_scaling)
elif type(row) == bytes:
pointer = draw_bytes(row, pdf, content_width, ch, pointer)
elif type(row) == list:
# initialize left content-width and store pointer height, initialize max height
cw = content_width
poih = pointer[1]
poihmax = 0
for ii, i in enumerate(row):
if verbose:
print('item', ii, 'left space:', cw, ch, 'pointer:', pointer)
# draw item
if type(i) == str:
pointer = draw_paragraph(i, pdf, cw, ch, pointer)
elif str(type(i)) == "<class 'plotly.graph_objs._figure.Figure'>":
pointer = draw_plotly(i, pdf, cw, ch, pointer,
centerv=False, centerh=False, png_scaling=png_scaling)
elif type(i) == bytes:
pointer = draw_bytes(i, pdf, cw, ch if poihmax == 0 else poihmax, pointer)
else:
pointer = draw_paragraph("Unknown content of {} passed.".format(str(type(i))), pdf,
cw, ch, pointer)
# check max height, reset height pointer for next item, raise overflow warning,
# add spacing and recalculate leftover content width
poihmax = max(poihmax, poih-pointer[1])
pointer[1] = poih
if pointer[0] > content_width+border:
print("-------\nWarning\nContent is overflowing to the right of the page.\n-------")
pointer[0] += spacing
cw = content_width-(pointer[0]+border)
# adjust pointer to maximum height for next row to be drawn
pointer[1] = poih-poihmax
else:
pointer = draw_paragraph("Unknown content of {} passed.".format(str(type(row))), pdf,
cw, ch, pointer)
# reset width pointer for next row, raise overflow warning,
# add spacing and recalculate leftover content height
pointer[0] = border
if pointer[1] < border:
print("-------\nWarning\nContent is overflowing at the bottom of the page.\n-------")
pointer[1] -= spacing
ch = pointer[1]-border
else:
draw_paragraph("Unknown content of {} passed.".format(str(type(content))), pdf,
content_width, content_height, pointer, centered_vertically=True)
| 37.855895
| 157
| 0.581382
|
db14b2a68ad23bb6248af2d97383c7f67a065446
| 5,005
|
py
|
Python
|
dnc/dnc.py
|
Munyola/dnc
|
d3d94b3b1f1efc282481910054f82047caf37f65
|
[
"Apache-2.0"
] | 2,697
|
2017-04-19T14:05:38.000Z
|
2022-03-28T02:56:29.000Z
|
dnc/dnc.py
|
Munyola/dnc
|
d3d94b3b1f1efc282481910054f82047caf37f65
|
[
"Apache-2.0"
] | 41
|
2017-04-21T12:37:29.000Z
|
2022-03-06T11:48:40.000Z
|
dnc/dnc.py
|
Munyola/dnc
|
d3d94b3b1f1efc282481910054f82047caf37f65
|
[
"Apache-2.0"
] | 539
|
2017-04-19T14:08:22.000Z
|
2022-03-14T01:30:00.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC Cores.
These modules create a DNC core. They take input, pass parameters to the memory
access module, and integrate the output of memory to form an output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import sonnet as snt
import tensorflow as tf
from dnc import access
DNCState = collections.namedtuple('DNCState', ('access_output', 'access_state',
'controller_state'))
class DNC(snt.RNNCore):
"""DNC core module.
Contains controller and memory access module.
"""
def __init__(self,
access_config,
controller_config,
output_size,
clip_value=None,
name='dnc'):
"""Initializes the DNC core.
Args:
access_config: dictionary of access module configurations.
controller_config: dictionary of controller (LSTM) module configurations.
output_size: output dimension size of core.
clip_value: clips controller and core output values to between
`[-clip_value, clip_value]` if specified.
name: module name (default 'dnc').
Raises:
TypeError: if direct_input_size is not None for any access module other
than KeyValueMemory.
"""
super(DNC, self).__init__(name=name)
with self._enter_variable_scope():
self._controller = snt.LSTM(**controller_config)
self._access = access.MemoryAccess(**access_config)
self._access_output_size = np.prod(self._access.output_size.as_list())
self._output_size = output_size
self._clip_value = clip_value or 0
self._output_size = tf.TensorShape([output_size])
self._state_size = DNCState(
access_output=self._access_output_size,
access_state=self._access.state_size,
controller_state=self._controller.state_size)
def _clip_if_enabled(self, x):
if self._clip_value > 0:
return tf.clip_by_value(x, -self._clip_value, self._clip_value)
else:
return x
def _build(self, inputs, prev_state):
"""Connects the DNC core into the graph.
Args:
inputs: Tensor input.
prev_state: A `DNCState` tuple containing the fields `access_output`,
`access_state` and `controller_state`. `access_state` is a 3-D Tensor
of shape `[batch_size, num_reads, word_size]` containing read words.
`access_state` is a tuple of the access module's state, and
`controller_state` is a tuple of controller module's state.
Returns:
A tuple `(output, next_state)` where `output` is a tensor and `next_state`
is a `DNCState` tuple containing the fields `access_output`,
`access_state`, and `controller_state`.
"""
prev_access_output = prev_state.access_output
prev_access_state = prev_state.access_state
prev_controller_state = prev_state.controller_state
batch_flatten = snt.BatchFlatten()
controller_input = tf.concat(
[batch_flatten(inputs), batch_flatten(prev_access_output)], 1)
controller_output, controller_state = self._controller(
controller_input, prev_controller_state)
controller_output = self._clip_if_enabled(controller_output)
controller_state = tf.contrib.framework.nest.map_structure(self._clip_if_enabled, controller_state)
access_output, access_state = self._access(controller_output,
prev_access_state)
output = tf.concat([controller_output, batch_flatten(access_output)], 1)
output = snt.Linear(
output_size=self._output_size.as_list()[0],
name='output_linear')(output)
output = self._clip_if_enabled(output)
return output, DNCState(
access_output=access_output,
access_state=access_state,
controller_state=controller_state)
def initial_state(self, batch_size, dtype=tf.float32):
return DNCState(
controller_state=self._controller.initial_state(batch_size, dtype),
access_state=self._access.initial_state(batch_size, dtype),
access_output=tf.zeros(
[batch_size] + self._access.output_size.as_list(), dtype))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
| 35
| 103
| 0.692707
|
d7426969268a998302bebd29d8f0e6f93cdc2f93
| 2,775
|
py
|
Python
|
examples/yara_matches.py
|
kwBrandenWagner/zat
|
b6e76af2d59313db4d3818986183689ebf559ad8
|
[
"Apache-2.0"
] | null | null | null |
examples/yara_matches.py
|
kwBrandenWagner/zat
|
b6e76af2d59313db4d3818986183689ebf559ad8
|
[
"Apache-2.0"
] | null | null | null |
examples/yara_matches.py
|
kwBrandenWagner/zat
|
b6e76af2d59313db4d3818986183689ebf559ad8
|
[
"Apache-2.0"
] | null | null | null |
"""Run a set of Yara Rule matches on Extracted Files
Note: Download yara rules from their repo and give index file as arg
$ git clone https://github.com/Yara-Rules/rules rules
$ python yara_matches -r /path/to/rules/index.yar -e /path/to/zeek/extract_files
"""
from __future__ import print_function
import os
import sys
import time
import argparse
from pprint import pprint
# Third Party Imports
try:
import yara
except ImportError:
print('\nThis example needs yara. Please do a $pip install yara-python')
sys.exit(1)
# Local imports
from zat.utils import dir_watcher, signal_utils
def yara_match(file_path, rules):
"""Callback for a newly extracted file"""
print('New Extracted File: {:s}'.format(file_path))
print('Mathes:')
pprint(rules.match(file_path))
def my_exit():
"""Exit on Signal"""
print('Goodbye...')
sys.exit()
if __name__ == '__main__':
# Run a set of Yara Rule matches on Extracted Files
# Collect args from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--rule-index', type=str, required=True, help='Specify the yara rule index file (e.g. /full/path/to/yara/rules/index.yar)')
parser.add_argument('-e', '--extract-dir', type=str, required=True, help='Specify the Zeek extract_files directory (e.g. /full/path/to/zeek/extract_files)')
args, commands = parser.parse_known_args()
# Check for unknown args
if commands:
print('Unrecognized args: %s' % commands)
sys.exit(1)
# If no args just call help
if len(sys.argv) == 1:
parser.print_help()
print('\nNote: Download the yara repo and give the index file as an arg')
print('$ git clone https://github.com/Yara-Rules/rules')
print('$ python yara_matches -r /path/to/rules/index.yar -e /path/to/zeek/extract_files')
sys.exit(1)
# Sanity check that the args exist and are what we expect
if not os.path.isfile(args.rule_index):
print('--rule-index file not found.. should be /full/path/to/yara/rules/index.yar')
sys.exit(1)
if not os.path.isdir(args.extract_dir):
print('--extract-dir directory not found.. should be /full/path/to/zeek/extract_files')
sys.exit(1)
# Load/compile the yara rules
my_rules = yara.compile(args.rule_index)
# Create DirWatcher and start watching the Zeek extract_files directory
print('Watching Extract Files Directory: {:s}'.format(args.extract_dir))
dir_watcher.DirWatcher(args.extract_dir, callback=yara_match, rules=my_rules)
# Okay so just wait around for files to be dropped by Zeek or someone hits Ctrl-C
with signal_utils.signal_catcher(my_exit):
while True:
time.sleep(.5)
| 37
| 160
| 0.684324
|
a41f07c8e0f558597dfd6ed1c580c0393144a591
| 8,755
|
py
|
Python
|
setup.py
|
PlaidCloud/sqlalchemy-views
|
669913848a36a6795cd4a32c8bdf1d23737fa959
|
[
"MIT"
] | null | null | null |
setup.py
|
PlaidCloud/sqlalchemy-views
|
669913848a36a6795cd4a32c8bdf1d23737fa959
|
[
"MIT"
] | null | null | null |
setup.py
|
PlaidCloud/sqlalchemy-views
|
669913848a36a6795cd4a32c8bdf1d23737fa959
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
## Constants
CODE_DIRECTORY = 'sqlalchemy_views'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from sqlalchemy_views import metadata
#
# However, when we do this, we also import `sqlalchemy_views/__init__.py'. If
# this imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests and doctest.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY, 'README.rst'])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
'sqlalchemy>=1.0.0',
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| 31.606498
| 113
| 0.672073
|
adf5cc0a9a25f52c488f97fb0533d348478dad89
| 2,508
|
py
|
Python
|
test.py
|
VishnuBhaarath/Tabular-AutoML
|
9f6259c8764bbc409244af49661757700b1c5b0c
|
[
"MIT"
] | null | null | null |
test.py
|
VishnuBhaarath/Tabular-AutoML
|
9f6259c8764bbc409244af49661757700b1c5b0c
|
[
"MIT"
] | null | null | null |
test.py
|
VishnuBhaarath/Tabular-AutoML
|
9f6259c8764bbc409244af49661757700b1c5b0c
|
[
"MIT"
] | null | null | null |
"""
This file is for testing the integration of the library classes and functions.
"""
from tab_automl.automl.datasets import Iris, Wine
from tab_automl.automl.training import Trainer
from tab_automl.automl.processing import PreProcessing
from tab_automl.automl.fet_engineering import FeatureEngineering
from tab_automl.utils.training import train_validation_split
def classification_test():
print(f"Testing through Classification AutoML ...")
# Loading the dataset
dataset = Iris()
# X feature set and target feature split
x, y = dataset.prepare_x_and_y()
# Defining processor
processor = PreProcessing(x=x, y=y)
# Executing processor
processor.run()
# Saving processed data
processor.save_data()
# Defining feature engineer
feature_engineer = FeatureEngineering(x=x, y=y)
# Executing feature engineer
feature_engineer.run()
# Saving engineered data
feature_engineer.save_data()
# Defining model trainer
trainer = Trainer(problem_type="classification")
# Preparing train and validation split
x_train, y_train, x_val, y_val = train_validation_split(x=x, y=y)
# Training AutoML and saving the best model
trainer.single_model_trainer(x_train=x_train, y_train=y_train, x_val=x_val, y_val=y_val, save_model=True)
print(f"Classification test completed successfully...\n")
def regression_test():
print(f"Testing through Regression AutoML ...")
# Loading the dataset
dataset = Wine()
# X feature set and target feature split
x, y = dataset.prepare_x_and_y()
# Defining processor
processor = PreProcessing(x=x, y=y)
# Executing processor
processor.run()
# Saving processed data
processor.save_data()
# Defining feature engineer
feature_engineer = FeatureEngineering(x=x, y=y)
# Executing feature engineer
feature_engineer.run()
# Saving engineered data
feature_engineer.save_data()
# Defining model trainer
trainer = Trainer(problem_type="regression")
# Preparing train and validation split
x_train, y_train, x_val, y_val = train_validation_split(x=x, y=y)
# Training AutoML and saving the best model
trainer.single_model_trainer(x_train=x_train, y_train=y_train, x_val=x_val, y_val=y_val, save_model=True)
print(f"Regression test completed successfully...\n")
def test():
# Testing classification
classification_test()
# Testing Regression
regression_test()
if __name__ == "__main__":
test()
| 33.891892
| 109
| 0.729665
|
09ab82085479422612d157dfa3338679bb88fe8f
| 266
|
py
|
Python
|
etc/untitled5.py
|
zhangbo2008/facenet
|
4dfabcb5cf14f99622dbe5f9f12f0539821c169c
|
[
"MIT"
] | null | null | null |
etc/untitled5.py
|
zhangbo2008/facenet
|
4dfabcb5cf14f99622dbe5f9f12f0539821c169c
|
[
"MIT"
] | 7
|
2019-12-16T22:10:01.000Z
|
2022-02-10T00:27:35.000Z
|
etc/untitled5.py
|
zhangbo2008/facenet
|
4dfabcb5cf14f99622dbe5f9f12f0539821c169c
|
[
"MIT"
] | null | null | null |
a=type([1,2])
print(a)
print(type(a))
b={}.get(a)
print(b)
import numpy as np
x = np.arange(16).reshape(-1,4)
print(np.where(x>5))
#(array([1, 1, 2, 2, 2, 2, 3, 3, 3, 3], dtype=int64), array([2, 3, 0, 1, 2, 3, 0, 1, 2, 3], dtype=int64))
#注意这里是坐标是前面的一维的坐标,后面是二维的坐标
| 20.461538
| 105
| 0.590226
|
dcf34530e5e702b480e9bd571c1b55eeebf81df0
| 634
|
py
|
Python
|
examples/run_bea.py
|
rhododendrom/NiaPy
|
873037e4337474bb75714f1c2be273c97de3eded
|
[
"MIT"
] | 1
|
2020-03-16T11:15:43.000Z
|
2020-03-16T11:15:43.000Z
|
examples/run_bea.py
|
rhododendrom/NiaPy
|
873037e4337474bb75714f1c2be273c97de3eded
|
[
"MIT"
] | null | null | null |
examples/run_bea.py
|
rhododendrom/NiaPy
|
873037e4337474bb75714f1c2be273c97de3eded
|
[
"MIT"
] | null | null | null |
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
from NiaPy.benchmarks import Sphere
from NiaPy.task.task import StoppingTask, OptimizationType
from NiaPy.algorithms.basic import BeesAlgorithm
import sys
sys.path.append('../')
# End of fix
# we will run Bees Algorithm for 5 independent runs
for i in range(5):
task = StoppingTask(D=20, nGEN=2, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
algo = BeesAlgorithm(NP=50, m=20, e=10, nep=20, nsp=15, ngh=7)
best = algo.run(task=task)
print('%s -> %s' % (best[0], best[1]))
| 33.368421
| 96
| 0.728707
|
ab69d097da84116e2fc1ac74f21204c37d2a6109
| 712
|
py
|
Python
|
python/478.generate-random-point-in-a-circle.py
|
fengbaoheng/leetcode
|
2b6ec9adea383503acc23622ca5623161f7ca520
|
[
"MIT"
] | 1
|
2019-04-11T12:34:55.000Z
|
2019-04-11T12:34:55.000Z
|
python/478.generate-random-point-in-a-circle.py
|
fengbaoheng/leetcode
|
2b6ec9adea383503acc23622ca5623161f7ca520
|
[
"MIT"
] | null | null | null |
python/478.generate-random-point-in-a-circle.py
|
fengbaoheng/leetcode
|
2b6ec9adea383503acc23622ca5623161f7ca520
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=478 lang=python3
#
# [478] 在圆内随机生成点
#
from typing import List
from random import uniform
import math
class Solution:
def __init__(self, radius: float, x_center: float, y_center: float):
self.r = radius
self.x = x_center
self.y = y_center
self.r2 = radius * radius
def randPoint(self) -> List[float]:
while True:
dx = uniform(-self.r, self.r)
dy = uniform(-self.r, self.r)
if (dx * dx + dy * dy) <= self.r2:
return [self.x + dx, self.y + dy]
# Your Solution object will be instantiated and called as such:
# obj = Solution(radius, x_center, y_center)
# param_1 = obj.randPoint()
| 22.967742
| 72
| 0.598315
|
49030fdd0e518c7d9ccba1a27e472fe5405f767b
| 5,658
|
py
|
Python
|
scripts/add_metadata.py
|
hsandt/pico-boots
|
1e0e7e48f42e27496a1af5535e3782ae4830a323
|
[
"MIT"
] | 6
|
2020-06-30T21:07:01.000Z
|
2021-02-14T07:41:19.000Z
|
scripts/add_metadata.py
|
hsandt/pico-boots
|
1e0e7e48f42e27496a1af5535e3782ae4830a323
|
[
"MIT"
] | null | null | null |
scripts/add_metadata.py
|
hsandt/pico-boots
|
1e0e7e48f42e27496a1af5535e3782ae4830a323
|
[
"MIT"
] | 1
|
2020-09-14T02:22:38.000Z
|
2020-09-14T02:22:38.000Z
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import argparse
import os
import shutil, tempfile
# This script does 3 things:
# 1. Add game title and author a.k.a. "header" at the top of source code for .p8.png
# 2. Add __label__ section from separate file for .p8.png if label_filepath is not '-' (to make up for the lack of --label option in p8tool)
# 3. Fix pico-8 version to 16 (instead of 8 with current p8tool behavior)
# Use it from your PICO-8 game project as a post-build step to complete your cartridge information
# Usage:
# add_metadata.py filepath label_filepath
# filepath: built game path
# label_filepath: path of file containing label data (pass '-' to preserve label from any existing file at output path overwritten during the build)
def add_title_author_info(filepath, title, author):
"""
Add game title and author at the top of source code
Additionally it fixes the version set by picotool to 27 (not required to save the game with correct metadata)
test.p8:
pico-8 cartridge // http://www.pico-8.com
version 16
__lua__
package={loaded={},_c={}}
package._c["module"]=function()
>>> add_title_author_info('test.p8', 'test game', 'tas')
test.p8:
pico-8 cartridge // http://www.pico-8.com
version 27
__lua__
-- test game
-- by tas
package={loaded={},_c={}}
package._c["module"]=function()
"""
with open(filepath, 'r') as f:
# create a temporary file with the modified content before it replaces the original file
temp_dir = tempfile.mkdtemp()
try:
temp_filepath = os.path.join(temp_dir, 'temp.p8')
with open(temp_filepath, 'w') as temp_f:
# flag meta data version replacement to avoid replacing an actual code line
# starting with "version " later
has_replaced_version = False
for line in f:
if not has_replaced_version and line.startswith('version '):
temp_f.write('version 27\n')
has_replaced_version = True
continue
temp_f.write(line)
if line.strip() == '__lua__':
# lua block detected, add title and author after the tag line, if any was passed
# if none was passed, we still call this method just so the version header gets update above
if title:
temp_f.write(f'-- {title}\n')
if author:
temp_f.write(f'-- by {author}\n')
shutil.copy(temp_filepath, filepath)
finally:
shutil.rmtree(temp_dir)
# This function is currently unused because preserving label from metadata template is easier
def add_label_info(filepath, label_filepath):
"""
Replace label content inside the file with content from another line
test.p8:
__label__
0000
label.p8:
__label__
1234
>>> add_label_info('test.p8', 'label.p8')
test.p8:
__label__
1234
"""
label_lines = []
with open(label_filepath, 'r') as f:
inside_label = False
for line in f:
stripped_line = line.strip()
if not inside_label and stripped_line == '__label__':
inside_label = True
elif inside_label:
# stop if blank line or next section starts
if not stripped_line or line.startswith('__'):
break
# save label content (in case it's the last line, force newline)
label_lines.append(f'{stripped_line}\n')
with open(filepath, 'r') as f:
# create a temporary file with the modified content before it replaces the original file
temp_dir = tempfile.mkdtemp()
try:
temp_filepath = os.path.join(temp_dir, 'test.p8')
with open(temp_filepath, 'w') as temp_f:
inside_label = False
for line in f:
stripped_line = line.strip()
if inside_label:
# reset inside_label if blank line or next section starts
if not stripped_line or line.startswith('__'):
inside_label = False
else:
temp_f.write(line)
if stripped_line == '__label__':
inside_label = True
# immediately print all label lines
for label_line in label_lines:
temp_f.write(label_line)
shutil.copy(temp_filepath, filepath)
finally:
shutil.rmtree(temp_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add metadata on a p8tool output file.')
parser.add_argument('filepath', type=str, help='path of the file to process (.p8)')
parser.add_argument('label_filepath', type=str, help='path of the file containing the label content to copy')
parser.add_argument('title', type=str, help='game title')
parser.add_argument('author', type=str, help='author')
args = parser.parse_args()
add_title_author_info(args.filepath, args.title, args.author)
if args.label_filepath != '-':
add_label_info(args.filepath, args.label_filepath)
print(f"Added metadata (title: {args.title}, author: {args.author}) to {args.filepath} based on label {args.label_filepath}.")
| 39.84507
| 150
| 0.587487
|
cc593f43a9d002793063379abd3215c72aca2b7c
| 2,784
|
py
|
Python
|
app/models.py
|
lozog95/scrapper-graphic-cards
|
c1dbf2257764b4593910c791b67a5e024c9ef1da
|
[
"MIT"
] | null | null | null |
app/models.py
|
lozog95/scrapper-graphic-cards
|
c1dbf2257764b4593910c791b67a5e024c9ef1da
|
[
"MIT"
] | null | null | null |
app/models.py
|
lozog95/scrapper-graphic-cards
|
c1dbf2257764b4593910c791b67a5e024c9ef1da
|
[
"MIT"
] | null | null | null |
"""
Definition of models.
"""
from django.db import models
import json
import os
import app.utils as utils
from datetime import datetime
#json reader model to read output json files
def json_reader(json_path):
with open(f"app/{json_path}") as w:
try:
output = json.loads(w.read())
except Exception as e:
output = None
return output
def clean_json_files(model):
td=datetime.now()
td=td.strftime("%Y%m%d")
if os.path.exists(f"app/{td}_{model}.json"):
print("Main file exists")
os.remove(f"app/{td}_{model}.json")
if os.path.exists(f"app/me_{td}_{model}.json"):
print("Me file exists")
os.remove(f"app/me_{td}_{model}.json")
if os.path.exists(f"app/euro_{td}_{model}.json"):
print("euro file exists")
os.remove(f"app/euro_{td}_{model}.json")
if os.path.exists(f"app/mm_{td}_{model}.json"):
print("Mm file exists")
os.remove(f"app/mm_{td}_{model}.json")
def output_lists(model):
output_euro = json_reader(utils.lookup_file(model=model, shop="euro"))
output_me = json_reader(utils.lookup_file(model=model, shop="me"))
output_mm = json_reader(utils.lookup_file(model=model, shop="mm"))
output = []
if output_me:
output= output + output_me
if output_euro:
output= output + output_euro
if output_mm:
output=output+output_mm
output.sort(key=lambda x: int(x['price']), reverse=False)
print(output)
output_all = utils.parse_json(output)
td=datetime.now()
td=td.strftime("%Y%m%d")
with open(f"app/{td}_{model}.json", "w") as f:
f.write(json.dumps(output_all))
f.close()
return output_all
def advise_checker(profitability):
advise = None
if float(profitability) < 0.5:
advise = "Cena jest przynajmniej połowę wyższa niż zazwyczaj, nie kupuj obecnie karty"
elif float(profitability) < 0.7:
advise = "Cena jest znacznie wyższa niż wcześniej, wstrzymaj się z zakupem"
elif float(profitability) < 0.9:
advise = "Cena jest wyższa niż wcześniej, wstrzymaj się z zakupem"
print(advise)
elif float(profitability) < 1:
advise = "Cena jest nieznacznie wyższa niż wcześniej, jeśli możesz wstrzymaj się z zakupem aż spadnie"
print(advise)
elif float(profitability) < 1.1:
advise = "Cena jest niższa niż zazwyczaj, to może być dobra okazja"
elif float(profitability) < 1.2:
advise = "Cena jest znacząco niższa niż zazwyczaj, to może być dobra okazja"
elif float(profitability) < 1.3:
advise = "Cena jest niska, to dobry czas na kupno karty"
elif float(profitability) > 1.3:
advise = "Cena jest bardzo mała, kupuj teraz"
return advise
| 34.37037
| 110
| 0.647629
|
63f746951eb9b02c84e752303153534ec6638183
| 381
|
py
|
Python
|
src/test/data/pa3/AdditionalTestCase/UnitTest/Object_Method.py
|
Leo-Enrique-Wu/chocopy_compiler_code_generation
|
4606be0531b3de77411572aae98f73169f46b3b9
|
[
"BSD-2-Clause"
] | null | null | null |
src/test/data/pa3/AdditionalTestCase/UnitTest/Object_Method.py
|
Leo-Enrique-Wu/chocopy_compiler_code_generation
|
4606be0531b3de77411572aae98f73169f46b3b9
|
[
"BSD-2-Clause"
] | null | null | null |
src/test/data/pa3/AdditionalTestCase/UnitTest/Object_Method.py
|
Leo-Enrique-Wu/chocopy_compiler_code_generation
|
4606be0531b3de77411572aae98f73169f46b3b9
|
[
"BSD-2-Clause"
] | null | null | null |
class A(object):
a:int = 1
x:str = "x in A"
def set_a(self:"A",b:int):
self.a = b
def get_a(self:"A") -> int:
return self.a
def set_x(self:"A",b:str):
self.x = b
def get_x(self:"A") -> str:
return self.x
a:A = None
a = A()
print(a.get_a())
print(a.get_x())
a.set_a(5)
a.set_x("hello word")
print(a.get_a())
print(a.get_x())
| 18.142857
| 31
| 0.52231
|
c82278d93b475dc02da31031a55404305fff9fde
| 838
|
py
|
Python
|
config.py
|
nowindxdw/flask_base
|
44963513a3945ebf6cd7c4dcd7fbd67d6d8c5641
|
[
"MIT"
] | null | null | null |
config.py
|
nowindxdw/flask_base
|
44963513a3945ebf6cd7c4dcd7fbd67d6d8c5641
|
[
"MIT"
] | 2
|
2020-04-22T11:26:13.000Z
|
2020-04-22T11:26:20.000Z
|
config.py
|
nowindxdw/flask_base
|
44963513a3945ebf6cd7c4dcd7fbd67d6d8c5641
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import os
basedir = os.path.abspath(os.path.dirname(__file__))
BASEDIR = basedir
DEBUG = False
SECRET_KEY = 'This is a secret key forexample'
# not end with else throw AttributeError: 'tuple' object has no attribute 'drivername'
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:rootpassword@127.0.0.1/test?charset=utf8" # base管理
SQLALCHEMY_BINDS = {
'base': "mysql+pymysql://root:rootpassword@127.0.0.1/test?charset=utf8", # web数据库
'website': "mysql+pymysql://root:rootpassword@127.0.0.1/website?charset=utf8", # web数据库
'otherdb': "mysql+pymysql://root:rootpassword@127.0.0.1/otherdb?charset=utf8", # other管理
}
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_AUTOFLUSH = False
SQLALCHEMY_ECHO = True
REDIS_URL = 'redis://:@127.0.0.1:6379'
| 34.916667
| 98
| 0.745823
|
97bb15bdb693d4dacf6e0fda13f6f281263718e3
| 11,189
|
py
|
Python
|
tact/classifiers.py
|
brunel-physics/mva_scikit
|
b0182da89efa466461aaf2cff4387c821df1758b
|
[
"BSD-3-Clause"
] | null | null | null |
tact/classifiers.py
|
brunel-physics/mva_scikit
|
b0182da89efa466461aaf2cff4387c821df1758b
|
[
"BSD-3-Clause"
] | null | null | null |
tact/classifiers.py
|
brunel-physics/mva_scikit
|
b0182da89efa466461aaf2cff4387c821df1758b
|
[
"BSD-3-Clause"
] | 2
|
2020-05-18T19:52:32.000Z
|
2022-01-24T10:07:35.000Z
|
# -*- coding: utf-8 -*-
"""
This module contains functions which create and train classifiers, as well as
saving them to and reading them from disk.
A classifier function takes a DataFrame containing training data, a list
describing preprocessing steps, and a list of features. It will return a
trained scikit-learn Pipeline containing the preprocessing steps and
classifier.
Classifiers are saved do disk using dill as Python's pickle module does not
correctly serialise Keras classifiers. It should be noted that Keras does not
recommend pickling for neural network serialisation, but no issues have been
observed so far using the dill library.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
from collections import namedtuple
from sklearn.pipeline import make_pipeline
def get_preprocessor_flags(pre, sample_weight):
from inspect import getargspec
return {(type(p).__name__.lower() + "__sample_weight"): sample_weight
for p in pre if "sample_weight" in getargspec(p.fit)[0]}
def evaluate_mva(df, mva):
"""
Evaluate the response of a trained classifier.
Parameters
----------
df : DataFrame, shape= [n_training_samples, n_features]
DataFrame containing features.
mva
Trained classifier.
Returns
-------
Series or array
Classifier response values corresponding to each entry in df.
Notes
-----
The classifier response values are taken from the mva object's
predict_proba method. By default this is passed the df DataFrame directly
but in some cases this is not supported and df is passed as a numpy array.
In the former case this function returns a Pandas Series and in the latter
a 1D array. This fallback has only been tested for Keras classifiers.
"""
# Keras doesn't like DataFrames, error thrown depends on Keras version
try:
return mva.predict_proba(df)[:, 1]
except (KeyError, UnboundLocalError):
return mva.predict_proba(df.as_matrix())[:, 1]
def mlp(df_train, pre, y, serialized_model, sample_weight=None,
model_params={}, early_stopping_params=None, compile_params={},
lr_reduction_params=None):
"""
Train using a multi-layer perceptron (MLP).
Parameters
----------
df_train : array-like, shape = [n_training_samples, n_features]
DataFrame containing training features.
pre : list
List containing preprocessing steps.
y : array-like, shape = [n_training_samples]
Target values (integers in classification, real numbers in regression).
For classification, labels must correspond to classes.
serialized_model : dict
Keras model serialized as a dict.
sample_weight : array-like, shape = [n_training_samples]
Sample weights. If None, then samples are equally weighted.
model_params : dict
Keyword arguments passed to
keras.wrappers.scikit_learn.KerasClassifier.
early_stopping_params : dict
Keyword arguments passed to keras.callbacks.EarlyStopping. If None, no
early stopping mechanism is used.
compile_params : dict
Keyword arguments passed to keras.models.Sequential.compile.
Returns
-------
Pipeline
Scikit-learn pipeline containing the trained classifier and
preprocessing steps.
Notes
-----
This function requires Keras to be available. Additional configuration can
be configured using Keras' configuration file. See the Keras documentation
for more information.
Keras should outperform scikit-learn's internal MLP implementation in most
cases, and supports sample weights while training.
"""
def build_model():
from keras.layers import deserialize
# Set input layer shape
serialized_model["config"][0]["config"]["batch_input_shape"] \
= (None, df_train.shape[1])
model = deserialize(serialized_model)
model.compile(**compile_params)
return model
from keras.wrappers.scikit_learn import KerasClassifier
callbacks = []
if lr_reduction_params is not None:
from keras.callbacks import ReduceLROnPlateau
callbacks.append(ReduceLROnPlateau(**lr_reduction_params))
if early_stopping_params is not None:
from keras.callbacks import EarlyStopping
callbacks.append(EarlyStopping(**early_stopping_params))
ann = KerasClassifier(build_fn=build_model, **model_params)
mva = make_pipeline(*(pre + [ann]))
# Keras does not like pandas
try:
df_train = df_train.as_matrix()
except AttributeError:
pass
try:
y = y.as_matrix()
except AttributeError:
pass
try:
sample_weight = sample_weight.as_matrix()
except AttributeError:
pass
mva.fit(df_train, y,
kerasclassifier__sample_weight=sample_weight,
kerasclassifier__callbacks=callbacks,
kerasclassifier__validation_split=0.25,
**get_preprocessor_flags(pre, sample_weight))
return mva
def bdt_grad(df_train, pre, y, sample_weight=None, **kwargs):
"""
Train using a gradient boosted decision tree using scikit-learn's
internal implementation.
Parameters
----------
df_train : array-like, shape = [n_training_samples, n_features]
DataFrame containing training features.
pre : list
List containing preprocessing steps.
y : array-like, shape = [n_training_samples]
Target values (integers in classification, real numbers in regression).
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_training_samples]
Sample weights. If None, then samples are equally weighted.
kwargs : dict
Additional keyword arguments passed to
sklearn.ensemble.GradientBoostingClassifier.
Returns
-------
Pipeline
Scikit-learn pipeline containing the trained classifier and
preprocessing steps.
"""
from sklearn.ensemble import GradientBoostingClassifier
bdt = GradientBoostingClassifier(**kwargs)
mva = make_pipeline(*(pre + [bdt]))
mva.fit(df_train, y,
gradientboostingclassifier__sample_weight=sample_weight)
return mva
def bdt_xgb(df_train, pre, y, sample_weight=None, **kwargs):
"""
Train using a gradient boosted decision tree with the XGBoost library.
Parameters
----------
df_train : array-like, shape = [n_training_samples, n_features]
DataFrame containing training features.
pre : list
List containing preprocessing steps.
y : array-like, shape = [n_training_samples]
Target values (integers in classification, real numbers in regression).
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_training_samples]
Sample weights. If None, then samples are equally weighted.
kwargs : dict
Additional keyword arguments passed to xgboost.XGBClassifier.
Returns
-------
Pipeline
Scikit-learn pipeline containing the trained classifier and
preprocessing steps.
Notes
-----
Requires xgboost.
"""
from xgboost import XGBClassifier
bdt = XGBClassifier(**kwargs)
mva = make_pipeline(*(pre + [bdt]))
mva.fit(df_train, y, xgbclassifier__sample_weight=sample_weight)
# eval_metric="auc",
# early_stopping_rounds=50,
# eval_set=[(df_test, sample_weight)])
return mva
def bdt_lgbm(df_train, pre, y, sample_weight=None, **kwargs):
"""
Train using a gradient boosted decision tree with the LightGBM library.
Parameters
----------
df_train : array-like, shape = [n_training_samples, n_features]
DataFrame containing training features.
pre : list
List containing preprocessing steps.
y : array-like, shape = [n_training_samples]
Target values (integers in classification, real numbers in regression).
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_training_samples]
Sample weights. If None, then samples are equally weighted.
kwargs : dict
Additional keyword arguments passed to lightgbm.LGBMClassifier()
Returns
-------
Pipeline
Scikit-learn pipeline containing the trained classifier and
preprocessing steps.
Notes
-----
Requires xgboost.
"""
from lightgbm import LGBMClassifier
bdt = LGBMClassifier(**kwargs)
mva = make_pipeline(*(pre + [bdt]))
mva.fit(df_train, y, lgbmclassifier__sample_weight=sample_weight)
return mva
def random_forest(df_train, pre, y, sample_weight=None, **kwargs):
"""
Train using a random forest.
Parameters
----------
df_train : array-like, shape = [n_training_samples, n_features]
DataFrame containing training features.
pre : list
List containing preprocessing steps.
y : array-like, shape = [n_training_samples]
Target values (integers in classification, real numbers in regression).
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_training_samples]
Sample weights. If None, then samples are equally weighted.
kwargs : dict
Additional keyword arguments passed to xgboost.XGBClassifier.
Returns
-------
Pipeline
Scikit-learn pipeline containing the trained classifier and
preprocessing steps.
"""
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(**kwargs)
mva = make_pipeline(*(pre + [rf]))
mva.fit(df_train, y, randomforestclassifier__sample_weight=sample_weight)
return mva
def save_classifier(mva, cfg=None, filename="mva"):
"""
Write a trained classifier pipeline and global configuration to an external
file.
Parameters
----------
mva : trained classifier
Classifier to be trained.
cfg : dict, optional
Classifier configuration.
filename : string, optional
Name of output file (including directory). Extension will be set
automatically.
Returns
-------
None
Notes
-----
Requires dill.
"""
import dill
SavedClassifier = namedtuple("SavedClassifier", "cfg mva")
# Temporarily boost the recursion limit
tmp = sys.getrecursionlimit()
sys.setrecursionlimit(9999)
dill.dump(SavedClassifier(cfg, mva), open("{}.pkl".format(filename), "wb"))
sys.setrecursionlimit(tmp)
def load_classifier(f):
"""
Load a trained classifier from a pickle file.
Parameters
----------
f : file
File classifier is to be loaded from.
Returns
-------
mva: Pipeline
Scikit-learn Pipeline containing full classifier stack.
cfg:
Configuration associated with mva. None if no configuration was
stored.
Notes
-----
Requires dill.
"""
import dill
sc = dill.load(f)
return sc.mva, sc.cfg
| 29.062338
| 79
| 0.682813
|
51b69e2eba017658a7a40b090527245fc074cbbd
| 12,572
|
py
|
Python
|
python/ccxt/async_support/base/exchange.py
|
amalcaraz/ccxt
|
54857b3514b857ba3b26542f7f8ca49bad7b7bb6
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/base/exchange.py
|
amalcaraz/ccxt
|
54857b3514b857ba3b26542f7f8ca49bad7b7bb6
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/base/exchange.py
|
amalcaraz/ccxt
|
54857b3514b857ba3b26542f7f8ca49bad7b7bb6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.28.77'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle(self.rateLimit)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| 41.906667
| 355
| 0.61287
|
bc02bbc38e04d660e3f2636f026843811357cd19
| 2,405
|
py
|
Python
|
APIScripts/News API/News_API.py
|
tanvi355/Awesome_Python_Scripts
|
dd906dff3c311e260851f8720cbce77d7398be43
|
[
"MIT"
] | 3
|
2021-06-22T07:12:31.000Z
|
2022-02-27T18:40:16.000Z
|
APIScripts/News API/News_API.py
|
tanvi355/Awesome_Python_Scripts
|
dd906dff3c311e260851f8720cbce77d7398be43
|
[
"MIT"
] | null | null | null |
APIScripts/News API/News_API.py
|
tanvi355/Awesome_Python_Scripts
|
dd906dff3c311e260851f8720cbce77d7398be43
|
[
"MIT"
] | 2
|
2021-10-03T16:22:08.000Z
|
2021-10-03T17:35:14.000Z
|
from newsapi import NewsApiClient # pip install newsapi-python
import pycountry # pip install pycountry
# Fucntion to check whether Country is exists or not.
def Check(input_countries):
input_countries = [input_countries.strip()]
countries = {}
for country in pycountry.countries:
# store all the alpha_2 values of all the countries
countries[country.name] = country.alpha_2
codes = [countries.get(country.title(), 'NO')
for country in input_countries] # Check that your input country have any alpha_2 code or not.
if codes[0] == "NO":
return "Country not found, Try searching..."
return None
if __name__ == "__main__":
while 1:
# Get your API key from New API
newsapi = NewsApiClient(api_key='Your API Key')
input_country = input("Country: ") # Taking country name input
input_countries = [f'{input_country.strip()}']
countries = {}
for country in pycountry.countries:
countries[country.name] = country.alpha_2
codes = [countries.get(country.title(), 'Unknown code')
for country in input_countries] # If code is not found means Unknown code
# Choose Category of the news.
option = input(
"Which category are you interested in?\n1.Business\n2.Entertainment\n3.General\n4.Health\n5.Science\n6.Technology\n\nEnter here: ")
top_headlines = newsapi.get_top_headlines(
category=f'{option.lower()}', language='en', country=f'{codes[0].lower()}') # getting top headlines from all the news channels
Headlines = top_headlines['articles'] # fetch the top articles
if Headlines:
# For storing content in a Good manner
for articles in Headlines:
b = articles['title'][::-1].index("-")
if "news" in (articles['title'][-b+1:]).lower():
print(
f"{articles['title'][-b+1:]}: {articles['title'][:-b-2]}.")
else:
print(
f"{articles['title'][-b+1:]} News: {articles['title'][:-b-2]}.")
else:
print(f"No articles found for {input_country}, Try for others...")
option = input("Do you want to search again[Yes/No]?")
if option.lower() == 'yes':
continue
else:
exit()
| 40.083333
| 143
| 0.592931
|
9f2cd558c4cc0f49037bb8ec1e2064afab8bd9cc
| 119
|
py
|
Python
|
nachbarstrom/inference/roof_provider/__init__.py
|
tomasruizt/python-nachbarstrom-dl-segmentation
|
7b6f6fa28ab078e372def5549c7e0a5826b208dd
|
[
"Apache-2.0"
] | null | null | null |
nachbarstrom/inference/roof_provider/__init__.py
|
tomasruizt/python-nachbarstrom-dl-segmentation
|
7b6f6fa28ab078e372def5549c7e0a5826b208dd
|
[
"Apache-2.0"
] | null | null | null |
nachbarstrom/inference/roof_provider/__init__.py
|
tomasruizt/python-nachbarstrom-dl-segmentation
|
7b6f6fa28ab078e372def5549c7e0a5826b208dd
|
[
"Apache-2.0"
] | null | null | null |
from .roof_provider import RoofProvider, MockRoofProvider
from .tensorflow_roof_provider import TensorFlowRoofProvider
| 39.666667
| 60
| 0.89916
|
a38ae5552cb01810fdf8ad98b764d3097c6f1a49
| 18,989
|
py
|
Python
|
astropy/io/fits/tests/test_fitstime.py
|
Chelwin/astropy
|
8e5bf8d93da935b4414ecc94384088171c4fb00e
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/fits/tests/test_fitstime.py
|
Chelwin/astropy
|
8e5bf8d93da935b4414ecc94384088171c4fb00e
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/fits/tests/test_fitstime.py
|
Chelwin/astropy
|
8e5bf8d93da935b4414ecc94384088171c4fb00e
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from . import FitsTestCase
from astropy.io.fits.fitstime import GLOBAL_TIME_INFO, time_to_fits, is_time_column_keyword
from astropy.coordinates import EarthLocation
from astropy.io import fits
from astropy.table import Table, QTable
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
class TestFitsTime(FitsTestCase):
def setup_class(self):
self.time = np.array(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'])
self.time_3d = np.array([[[1, 2], [1, 3], [3, 4]]])
def test_is_time_column_keyword(self):
# Time column keyword without column number
assert is_time_column_keyword('TRPOS') is False
# Global time column keyword
assert is_time_column_keyword('TIMESYS') is False
# Valid time column keyword
assert is_time_column_keyword('TRPOS12') is True
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_loc(self, table_types):
"""
Test all the unusual conditions for locations of ``Time``
columns in a ``Table``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t['b'] = Time(self.time, format='isot', scale='tt')
# Check that vectorized location is stored using Green Bank convention
t['a'].location = EarthLocation([1., 2.], [2., 3.], [3., 4.],
unit='Mm')
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no '
'specified location, but global Time Position is present'):
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no '
'specified location, but global Time Position is present'):
t.write(self.temp('time.fits'), format='fits', overwrite=True)
# Check that a blank value for the "TRPOSn" keyword is not generated
hdr = fits.getheader(self.temp('time.fits'), 1)
assert hdr.get('TRPOS2', None) is None
with pytest.warns(AstropyUserWarning, match='Time column reference position '
'"TRPOSn" is not specified. The default value for it is '
'"TOPOCENTER", and the observatory position has been specified.'):
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
assert tm['b'].location == t['b'].location
# Check that multiple Time columns with different locations raise an exception
t['a'].location = EarthLocation(1, 2, 3)
t['b'].location = EarthLocation(2, 3, 4)
with pytest.raises(ValueError) as err:
table, hdr = time_to_fits(t)
assert 'Multiple Time Columns with different geocentric' in str(err.value)
# Check that Time column with no location specified will assume global location
t['b'].location = None
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Time Column "b" has no specified '
'location, but global Time Position '
'is present')
# Check that multiple Time columns with same location can be written
t['b'].location = EarthLocation(1, 2, 3)
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 0
# Check compatibility of Time Scales and Reference Positions
for scale in BARYCENTRIC_SCALES:
t.replace_column('a', getattr(t['a'], scale))
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Earth Location "TOPOCENTER" '
'for Time Column')
# Check that multidimensional vectorized location (ndim=3) is stored
# using Green Bank convention.
t = table_types()
location = EarthLocation([[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]], unit='Mm')
t['a'] = Time(self.time_3d, format='jd', location=location)
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
# Check that singular location with ndim>1 can be written
t['a'] = Time(self.time, location=EarthLocation([[[1.]]], [[[2.]]],
[[[3.]]], unit='Mm'))
table, hdr = time_to_fits(t)
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert tm['a'].location == t['a'].location
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_header(self, table_types):
"""
Test the header and metadata returned by ``time_to_fits``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(-2446354,
4237210, 4077985, unit='m'))
t['b'] = Time([1,2], format='cxcsec', scale='tt')
ideal_col_hdr = {'OBSGEO-X' : t['a'].location.x.value,
'OBSGEO-Y' : t['a'].location.y.value,
'OBSGEO-Z' : t['a'].location.z.value}
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no '
'specified location, but global Time Position is present'):
table, hdr = time_to_fits(t)
# Check the global time keywords in hdr
for key, value in GLOBAL_TIME_INFO.items():
assert hdr[key] == value[0]
assert hdr.comments[key] == value[1]
hdr.remove(key)
for key, value in ideal_col_hdr.items():
assert hdr[key] == value
hdr.remove(key)
# Check the column-specific time metadata
coord_info = table.meta['__coordinate_columns__']
for colname in coord_info:
assert coord_info[colname]['coord_type'] == t[colname].scale.upper()
assert coord_info[colname]['coord_unit'] == 'd'
assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER'
assert coord_info['b']['time_ref_pos'] == None
assert len(hdr) == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_fits_to_time_meta(self, table_types):
"""
Test that the relevant global time metadata is read into
``Table.meta`` as ``Time``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t.meta['DATE'] = '1999-01-01T00:00:00'
t.meta['MJD-OBS'] = 56670
# Test for default write behavior (full precision) and read it
# back using native astropy objects; thus, ensure its round-trip
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE']
assert tm.meta['DATE'].format == 'fits'
# Default time scale according to the FITS standard is UTC
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].format == 'mjd'
assert tm.meta['MJD-OBS'].scale == 'utc'
# Explicitly specified Time Scale
t.meta['TIMESYS'] = 'ET'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE']
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]
# Test for conversion of time data to its value, as defined by its format
t['a'].info.serialize_method['fits'] = 'formatted_value'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits')
# Test DATE
assert not isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'] == t.meta['DATE']
# Test MJD-xxx
assert not isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']
assert (tm['a'] == t['a'].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_loc_unit(self, table_types):
"""
Test that ``location`` specified by using any valid unit
(length/angle) in ``Time`` columns gets stored in FITS
as ITRS Cartesian coordinates (X, Y, Z), each in m.
Test that it round-trips through FITS.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(1,2,3, unit='km'))
table, hdr = time_to_fits(t)
# Check the header
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Check the round-trip of location
assert (tm['a'].location == t['a'].location).all()
assert tm['a'].location.x.value == t['a'].location.x.to_value(unit='m')
assert tm['a'].location.y.value == t['a'].location.y.to_value(unit='m')
assert tm['a'].location.z.value == t['a'].location.z.to_value(unit='m')
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits(self, table_types):
"""
Test that FITS table with time columns (standard compliant)
can be read by io.fits as a table with Time columns.
This tests the following:
1. The special-case where a column has the name 'TIME' and a
time unit
2. Time from Epoch (Reference time) is appropriately converted.
3. Coordinate columns (corresponding to coordinate keywords in the header)
other than time, that is, spatial coordinates, are not mistaken
to be time.
"""
filename = self.data('chandra_time.fits')
with pytest.warns(AstropyUserWarning, match='Time column "time" reference '
'position will be ignored'):
tm = table_types.read(filename, astropy_native=True)
# Test case 1
assert isinstance(tm['time'], Time)
assert tm['time'].scale == 'tt'
assert tm['time'].format == 'mjd'
non_native = table_types.read(filename)
# Test case 2
ref_time = Time(non_native.meta['MJDREF'], format='mjd',
scale=non_native.meta['TIMESYS'].lower())
delta_time = TimeDelta(non_native['time'])
assert (ref_time + delta_time == tm['time']).all()
# Test case 3
for colname in ['chipx', 'chipy', 'detx', 'dety', 'x', 'y']:
assert not isinstance(tm[colname], Time)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_datetime(self, table_types):
"""
Test that ISO-8601 Datetime String Columns are read correctly.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TCG',
time_ref_pos='GEOCENTER', array=self.time)
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].scale == 'tcg'
assert tm['datetime'].format == 'fits'
assert (tm['datetime'] == self.time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location(self, table_types):
"""
Test that geocentric/geodetic observatory position is read
properly, as and when it is specified.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
# Observatory position in ITRS Cartesian coordinates (geocentric)
cards = [('OBSGEO-X', -2446354), ('OBSGEO-Y', 4237210),
('OBSGEO-Z', 4077985)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.x.value == -2446354
assert tm['datetime'].location.y.value == 4237210
assert tm['datetime'].location.z.value == 4077985
# Observatory position in geodetic coordinates
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.lon.value == 0
assert tm['datetime'].location.lat.value == 0
assert np.isclose(tm['datetime'].location.height.value, 0,
rtol=0, atol=1e-9)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_scale(self, table_types):
"""
Test handling of 'GPS' and 'LOCAL' time scales which are
recognized by the FITS standard but are not native to astropy.
"""
# GPS scale column
gps_time = np.array([630720013, 630720014])
c = fits.Column(name='gps_time', format='D', unit='s', coord_type='GPS',
coord_unit='s', time_ref_pos='TOPOCENTER', array=gps_time)
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert 'FITS recognized time scale value "GPS"' in str(w[0].message)
assert isinstance(tm['gps_time'], Time)
assert tm['gps_time'].format == 'gps'
assert tm['gps_time'].scale == 'tai'
assert (tm['gps_time'].value == gps_time).all()
# LOCAL scale column
local_time = np.array([1, 2])
c = fits.Column(name='local_time', format='D', unit='d',
coord_type='LOCAL', coord_unit='d',
time_ref_pos='RELOCATABLE', array=local_time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['local_time'], Time)
assert tm['local_time'].format == 'mjd'
assert tm['local_time'].scale == 'local'
assert (tm['local_time'].value == local_time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location_warnings(self, table_types):
"""
Test warnings for time column reference position.
"""
# Time reference position "TOPOCENTER" without corresponding
# observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('observatory position is not properly specified' in
str(w[0].message))
# Warning for default value of time reference position "TOPOCENTER"
# not generated when there is no specified observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 0
| 42.961538
| 92
| 0.586339
|
70f5fbdfad920909d244fc8df3bcff6048a44c68
| 2,918
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/estimator/estimator_lib.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/python/estimator/estimator_lib.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/python/estimator/estimator_lib.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator: High level tools for working with models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.python.estimator.canned.baseline import BaselineClassifier
from tensorflow.python.estimator.canned.baseline import BaselineRegressor
from tensorflow.python.estimator.canned.boosted_trees import BoostedTreesClassifier
from tensorflow.python.estimator.canned.boosted_trees import BoostedTreesRegressor
from tensorflow.python.estimator.canned.dnn import DNNClassifier
from tensorflow.python.estimator.canned.dnn import DNNRegressor
from tensorflow.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.python.estimator.canned.linear import LinearClassifier
from tensorflow.python.estimator.canned.linear import LinearRegressor
from tensorflow.python.estimator.canned.parsing_utils import classifier_parse_example_spec
from tensorflow.python.estimator.canned.parsing_utils import regressor_parse_example_spec
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.estimator import VocabInfo
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.estimator.export import export_lib as export
from tensorflow.python.estimator.exporter import Exporter
from tensorflow.python.estimator.exporter import FinalExporter
from tensorflow.python.estimator.exporter import LatestExporter
from tensorflow.python.estimator.inputs import inputs
from tensorflow.python.estimator.keras import model_to_estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.training import EvalSpec
from tensorflow.python.estimator.training import train_and_evaluate
from tensorflow.python.estimator.training import TrainSpec
# pylint: enable=unused-import,line-too-long,wildcard-import
| 56.115385
| 95
| 0.817341
|
d0c80e93f8774f0b9ff6d74d345a08edde422a0a
| 7,747
|
py
|
Python
|
cirq/google/devices/known_devices_test.py
|
jeffreygrover/Cirq
|
17d94cf45f6b09ddf40048ddbb173e50fa293995
|
[
"Apache-2.0"
] | null | null | null |
cirq/google/devices/known_devices_test.py
|
jeffreygrover/Cirq
|
17d94cf45f6b09ddf40048ddbb173e50fa293995
|
[
"Apache-2.0"
] | null | null | null |
cirq/google/devices/known_devices_test.py
|
jeffreygrover/Cirq
|
17d94cf45f6b09ddf40048ddbb173e50fa293995
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cirq
import cirq.google as cg
import cirq.google.common_serializers as cgc
def test_foxtail_qubits():
expected_qubits = []
for i in range(0, 2):
for j in range(0, 11):
expected_qubits.append(cirq.GridQubit(i, j))
assert set(expected_qubits) == cirq.google.Foxtail.qubits
def test_foxtail_device_proto():
assert str(cirq.google.devices.known_devices.FOXTAIL_PROTO) == """\
valid_gate_sets {
name: "xmon"
valid_gates {
id: "xy"
number_of_qubits: 1
valid_args {
name: "axis_half_turns"
type: FLOAT
}
valid_args {
name: "half_turns"
type: FLOAT
}
gate_duration_picos: 20000
}
valid_gates {
id: "z"
number_of_qubits: 1
valid_args {
name: "half_turns"
type: FLOAT
}
valid_args {
name: "type"
type: STRING
}
}
valid_gates {
id: "cz"
number_of_qubits: 2
valid_args {
name: "half_turns"
type: FLOAT
}
gate_duration_picos: 50000
valid_targets: "2_qubit_targets"
}
valid_gates {
id: "meas"
valid_args {
name: "key"
type: STRING
}
valid_args {
name: "invert_mask"
type: REPEATED_BOOLEAN
}
gate_duration_picos: 1000000
valid_targets: "meas_targets"
}
}
valid_qubits: "0_0"
valid_qubits: "0_1"
valid_qubits: "0_2"
valid_qubits: "0_3"
valid_qubits: "0_4"
valid_qubits: "0_5"
valid_qubits: "0_6"
valid_qubits: "0_7"
valid_qubits: "0_8"
valid_qubits: "0_9"
valid_qubits: "0_10"
valid_qubits: "1_0"
valid_qubits: "1_1"
valid_qubits: "1_2"
valid_qubits: "1_3"
valid_qubits: "1_4"
valid_qubits: "1_5"
valid_qubits: "1_6"
valid_qubits: "1_7"
valid_qubits: "1_8"
valid_qubits: "1_9"
valid_qubits: "1_10"
valid_targets {
name: "meas_targets"
target_ordering: SUBSET_PERMUTATION
}
valid_targets {
name: "2_qubit_targets"
target_ordering: SYMMETRIC
targets {
ids: "0_0"
ids: "0_1"
}
targets {
ids: "0_0"
ids: "1_0"
}
targets {
ids: "0_1"
ids: "0_2"
}
targets {
ids: "0_1"
ids: "1_1"
}
targets {
ids: "0_2"
ids: "0_3"
}
targets {
ids: "0_2"
ids: "1_2"
}
targets {
ids: "0_3"
ids: "0_4"
}
targets {
ids: "0_3"
ids: "1_3"
}
targets {
ids: "0_4"
ids: "0_5"
}
targets {
ids: "0_4"
ids: "1_4"
}
targets {
ids: "0_5"
ids: "0_6"
}
targets {
ids: "0_5"
ids: "1_5"
}
targets {
ids: "0_6"
ids: "0_7"
}
targets {
ids: "0_6"
ids: "1_6"
}
targets {
ids: "0_7"
ids: "0_8"
}
targets {
ids: "0_7"
ids: "1_7"
}
targets {
ids: "0_8"
ids: "0_9"
}
targets {
ids: "0_8"
ids: "1_8"
}
targets {
ids: "0_9"
ids: "0_10"
}
targets {
ids: "0_9"
ids: "1_9"
}
targets {
ids: "0_10"
ids: "1_10"
}
targets {
ids: "1_0"
ids: "1_1"
}
targets {
ids: "1_1"
ids: "1_2"
}
targets {
ids: "1_2"
ids: "1_3"
}
targets {
ids: "1_3"
ids: "1_4"
}
targets {
ids: "1_4"
ids: "1_5"
}
targets {
ids: "1_5"
ids: "1_6"
}
targets {
ids: "1_6"
ids: "1_7"
}
targets {
ids: "1_7"
ids: "1_8"
}
targets {
ids: "1_8"
ids: "1_9"
}
targets {
ids: "1_9"
ids: "1_10"
}
}
"""
def test_multiple_gate_sets():
halfPiGateSet = cg.serializable_gate_set.SerializableGateSet(
gate_set_name='half_pi_gateset',
serializers=[
*cgc.SINGLE_QUBIT_HALF_PI_SERIALIZERS, cgc.MEASUREMENT_SERIALIZER
],
deserializers=[
*cgc.SINGLE_QUBIT_HALF_PI_DESERIALIZERS,
cgc.MEASUREMENT_DESERIALIZER
],
)
durations_dict = {
'xy_pi': 20_000,
'xy_half_pi': 10_000,
'xy': 53_000,
'cz': 11_000,
'meas': 14_141
}
test_proto = cg.devices.known_devices.create_device_proto_from_diagram(
"aa\naa", [cg.gate_sets.XMON, halfPiGateSet], durations_dict)
assert str(test_proto) == """\
valid_gate_sets {
name: "xmon"
valid_gates {
id: "xy"
number_of_qubits: 1
valid_args {
name: "axis_half_turns"
type: FLOAT
}
valid_args {
name: "half_turns"
type: FLOAT
}
gate_duration_picos: 53000
}
valid_gates {
id: "z"
number_of_qubits: 1
valid_args {
name: "half_turns"
type: FLOAT
}
valid_args {
name: "type"
type: STRING
}
}
valid_gates {
id: "cz"
number_of_qubits: 2
valid_args {
name: "half_turns"
type: FLOAT
}
gate_duration_picos: 11000
valid_targets: "2_qubit_targets"
}
valid_gates {
id: "meas"
valid_args {
name: "key"
type: STRING
}
valid_args {
name: "invert_mask"
type: REPEATED_BOOLEAN
}
gate_duration_picos: 14141
valid_targets: "meas_targets"
}
}
valid_gate_sets {
name: "half_pi_gateset"
valid_gates {
id: "xy_pi"
number_of_qubits: 1
valid_args {
name: "axis_half_turns"
type: FLOAT
}
gate_duration_picos: 20000
}
valid_gates {
id: "xy_half_pi"
number_of_qubits: 1
valid_args {
name: "axis_half_turns"
type: FLOAT
}
gate_duration_picos: 10000
}
valid_gates {
id: "meas"
valid_args {
name: "key"
type: STRING
}
valid_args {
name: "invert_mask"
type: REPEATED_BOOLEAN
}
gate_duration_picos: 14141
valid_targets: "meas_targets"
}
}
valid_qubits: "0_0"
valid_qubits: "0_1"
valid_qubits: "1_0"
valid_qubits: "1_1"
valid_targets {
name: "meas_targets"
target_ordering: SUBSET_PERMUTATION
}
valid_targets {
name: "2_qubit_targets"
target_ordering: SYMMETRIC
targets {
ids: "0_0"
ids: "0_1"
}
targets {
ids: "0_0"
ids: "1_0"
}
targets {
ids: "0_1"
ids: "1_1"
}
targets {
ids: "1_0"
ids: "1_1"
}
}
"""
def test_json_dict():
assert cg.Foxtail._json_dict_() == {
'cirq_type': '_NamedConstantXmonDevice',
'constant': 'cirq.google.Foxtail',
'measurement_duration': cirq.Duration(nanos=1000),
'exp_w_duration': cirq.Duration(nanos=20),
'exp_11_duration': cirq.Duration(nanos=50),
'qubits': sorted(cirq.google.Foxtail.qubits)
}
assert cirq.google.Bristlecone._json_dict_() == {
'cirq_type': '_NamedConstantXmonDevice',
'constant': 'cirq.google.Bristlecone',
'measurement_duration': cirq.Duration(nanos=1000),
'exp_w_duration': cirq.Duration(nanos=20),
'exp_11_duration': cirq.Duration(nanos=50),
'qubits': sorted(cirq.google.Bristlecone.qubits)
}
def test_sycamore_device():
q0 = cirq.GridQubit(5, 4)
q1 = cirq.GridQubit(5, 5)
syc = cirq.FSimGate(theta=np.pi / 2, phi=np.pi / 6)(q0, q1)
sqrt_iswap = cirq.FSimGate(theta=np.pi / 4, phi=0)(q0, q1)
cg.Sycamore.validate_operation(syc)
cg.Sycamore.validate_operation(sqrt_iswap)
assert cg.Sycamore.duration_of(syc) == cirq.Duration(nanos=12)
assert cg.Sycamore.duration_of(sqrt_iswap) == cirq.Duration(nanos=32)
| 19.128395
| 77
| 0.603459
|
e95933b4111f5549213f8cd9703f976ed05c71d6
| 9,652
|
py
|
Python
|
reconcile/ocp_release_ecr_mirror.py
|
aproddut/qontract-reconcile
|
3516426d9aa0151e32c560090afa9e04e7d31871
|
[
"Apache-2.0"
] | null | null | null |
reconcile/ocp_release_ecr_mirror.py
|
aproddut/qontract-reconcile
|
3516426d9aa0151e32c560090afa9e04e7d31871
|
[
"Apache-2.0"
] | null | null | null |
reconcile/ocp_release_ecr_mirror.py
|
aproddut/qontract-reconcile
|
3516426d9aa0151e32c560090afa9e04e7d31871
|
[
"Apache-2.0"
] | null | null | null |
import base64
import logging
import sys
from urllib.parse import urlparse
from sretoolbox.container import Image
from utils.oc import OC
from utils.oc import OC_Map
from utils.ocm import OCMMap
from reconcile import queries
from utils.aws_api import AWSApi
from reconcile.status import ExitCodes
QONTRACT_INTEGRATION = 'ocp-release-ecr-mirror'
LOG = logging.getLogger(__name__)
class OcpReleaseEcrMirrorError(Exception):
"""
Used by the OcpReleaseEcrMirror.
"""
class OcpReleaseEcrMirror:
def __init__(self, dry_run, instance):
self.dry_run = dry_run
self.settings = queries.get_app_interface_settings()
cluster_info = instance['hiveCluster']
hive_cluster = instance['hiveCluster']['name']
# Getting the OCM Client for the hive cluster
ocm_map = OCMMap(clusters=[cluster_info],
integration=QONTRACT_INTEGRATION,
settings=self.settings)
self.ocm_cli = ocm_map.get(hive_cluster)
if not self.ocm_cli:
raise OcpReleaseEcrMirrorError(f"Can't create ocm client for "
f"cluster {hive_cluster}")
# Getting the OC Client for the hive cluster
oc_map = OC_Map(clusters=[cluster_info],
integration=QONTRACT_INTEGRATION,
settings=self.settings)
self.oc_cli = oc_map.get(hive_cluster)
if not self.oc_cli:
raise OcpReleaseEcrMirrorError(f"Can't create oc client for "
f"cluster {hive_cluster}")
namespace = instance['ecrResourcesNamespace']
ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']
ocp_release_info = self._get_tf_resource_info(namespace,
ocp_release_identifier)
if ocp_release_info is None:
raise OcpReleaseEcrMirrorError(f"Could not find rds "
f"identifier "
f"{ocp_release_identifier} in "
f"namespace {namespace['name']}")
ocp_art_dev_info = self._get_tf_resource_info(namespace,
ocp_art_dev_identifier)
if ocp_art_dev_info is None:
raise OcpReleaseEcrMirrorError(f"Could not find rds identifier"
f" {ocp_art_dev_identifier} in"
f"namespace {namespace['name']}")
# Getting the AWS Client for the accounts
aws_accounts = [
self._get_aws_account_info(account=ocp_release_info['account']),
self._get_aws_account_info(account=ocp_art_dev_info['account'])
]
self.aws_cli = AWSApi(thread_pool_size=1,
accounts=aws_accounts,
settings=self.settings,
init_ecr_auth_tokens=True)
self.aws_cli.map_ecr_resources()
self.ocp_release_ecr_uri = self._get_image_uri(
account=ocp_release_info['account'],
repository=ocp_release_identifier
)
if self.ocp_release_ecr_uri is None:
raise OcpReleaseEcrMirrorError(f"Could not find the "
f"ECR repository "
f"{ocp_release_identifier}")
self.ocp_art_dev_ecr_uri = self._get_image_uri(
account=ocp_art_dev_info['account'],
repository=ocp_art_dev_identifier
)
if self.ocp_art_dev_ecr_uri is None:
raise OcpReleaseEcrMirrorError(f"Could not find the "
f"ECR repository "
f"{ocp_art_dev_identifier}")
# Getting all the credentials
quay_creds = self._get_quay_creds()
ocp_release_creds = self._get_ecr_creds(
account=ocp_release_info['account'],
region=ocp_release_info['region']
)
ocp_art_dev_creds = self._get_ecr_creds(
account=ocp_art_dev_info['account'],
region=ocp_art_dev_info['region']
)
# Creating a single dictionary with all credentials to be used by the
# "oc adm release mirror" command
self.registry_creds = {
'auths':
{
**quay_creds['auths'],
**ocp_release_creds['auths'],
**ocp_art_dev_creds['auths'],
}
}
def run(self):
ocp_releases = self._get_ocp_releases()
if not ocp_releases:
raise RuntimeError('No OCP Releases found')
for ocp_release in ocp_releases:
tag = ocp_release.split(':')[-1]
dest_ocp_release = f'{self.ocp_release_ecr_uri}:{tag}'
self._run_mirror(ocp_release=ocp_release,
dest_ocp_release=dest_ocp_release,
dest_ocp_art_dev=self.ocp_art_dev_ecr_uri)
def _run_mirror(self, ocp_release, dest_ocp_release, dest_ocp_art_dev):
# Checking if the image is already there
if self._is_image_there(dest_ocp_release):
LOG.info(f'Image {ocp_release} already in '
f'the mirror. Skipping.')
return
LOG.info(f'Mirroring {ocp_release} to {dest_ocp_art_dev} '
f'to_release {dest_ocp_release}')
if self.dry_run:
return
# Creating a new, bare, OC client since we don't
# want to run this against any cluster or via
# a jump host
oc_cli = OC(server='', token='', jh=None, settings=None,
init_projects=False, init_api_resources=False)
oc_cli.release_mirror(from_release=ocp_release,
to=dest_ocp_art_dev,
to_release=dest_ocp_release,
dockerconfig=self.registry_creds)
def _is_image_there(self, image):
image_obj = Image(image)
for registry, creds in self.registry_creds['auths'].items():
# Getting the credentials for the image_obj
registry_obj = urlparse(registry)
if registry_obj.netloc != image_obj.registry:
continue
image_obj.auth = (creds['username'], creds['password'])
# Checking if the image is already
# in the registry
if image_obj:
return True
return False
@staticmethod
def _get_aws_account_info(account):
for account_info in queries.get_aws_accounts():
if 'name' not in account_info:
continue
if account_info['name'] != account:
continue
return account_info
def _get_ocp_releases(self):
ocp_releases = list()
clusterimagesets = self.oc_cli.get_all('clusterimageset')
for clusterimageset in clusterimagesets['items']:
release_image = clusterimageset['spec']['releaseImage']
# There are images in some ClusterImagesSets not coming
# from quay.io, e.g.:
# registry.svc.ci.openshift.org/ocp/release:4.2.0-0.nightly-2020-11-04-053758
# Let's filter out everything not from quay.io
if not release_image.startswith('quay.io'):
continue
ocp_releases.append(release_image)
return ocp_releases
def _get_quay_creds(self):
return self.ocm_cli.get_pull_secrets()
def _get_ecr_creds(self, account, region):
if region is None:
region = self.aws_cli.accounts[account]['resourcesDefaultRegion']
auth_token = f'{account}/{region}'
data = self.aws_cli.auth_tokens[auth_token]
auth_data = data['authorizationData'][0]
server = auth_data['proxyEndpoint']
token = auth_data['authorizationToken']
password = base64.b64decode(token).decode('utf-8').split(':')[1]
return {
'auths': {
server: {
'username': 'AWS',
'password': password,
'email': 'sd-app-sre@redhat.com',
'auth': token
}
}
}
@staticmethod
def _get_tf_resource_info(namespace, identifier):
tf_resources = namespace['terraformResources']
for tf_resource in tf_resources:
if 'identifier' not in tf_resource:
continue
if tf_resource['identifier'] != identifier:
continue
if tf_resource['provider'] != 'ecr':
continue
return {
'account': tf_resource['account'],
'region': tf_resource.get('region'),
}
def _get_image_uri(self, account, repository):
for repo in self.aws_cli.resources[account]['ecr']:
if repo['repositoryName'] == repository:
return repo['repositoryUri']
def run(dry_run):
instances = queries.get_ocp_release_ecr_mirror()
for instance in instances:
try:
quay_mirror = OcpReleaseEcrMirror(dry_run,
instance=instance)
quay_mirror.run()
except OcpReleaseEcrMirrorError as details:
LOG.error(str(details))
sys.exit(ExitCodes.ERROR)
| 37.266409
| 89
| 0.57097
|
b1b2d66ea4a608303ccb74d3fcca66bbe4215c6e
| 19,527
|
py
|
Python
|
src/transformers/tokenization_dpr.py
|
abufadl/transformers
|
c84bb6eb92b654e04a82fada26417fcdab45f3af
|
[
"Apache-2.0"
] | 4
|
2021-02-28T11:58:18.000Z
|
2022-02-03T03:26:45.000Z
|
src/transformers/tokenization_dpr.py
|
abufadl/transformers
|
c84bb6eb92b654e04a82fada26417fcdab45f3af
|
[
"Apache-2.0"
] | 2
|
2022-03-16T13:57:37.000Z
|
2022-03-16T14:00:51.000Z
|
src/transformers/tokenization_dpr.py
|
abufadl/transformers
|
c84bb6eb92b654e04a82fada26417fcdab45f3af
|
[
"Apache-2.0"
] | 1
|
2021-11-08T06:57:35.000Z
|
2021-11-08T06:57:35.000Z
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for DPR."""
import collections
from typing import List, Optional, Union
from .file_utils import add_end_docstrings, add_start_docstrings
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_utils_base import BatchEncoding, TensorType
from .utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
}
}
QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
}
}
READER_PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
}
}
CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
}
QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/dpr-question_encoder-single-nq-base": 512,
}
READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/dpr-reader-single-nq-base": 512,
}
CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
}
QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
}
READER_PRETRAINED_INIT_CONFIGURATION = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
}
class DPRContextEncoderTokenizer(BertTokenizer):
r"""
Constructs a DPRContextEncoderTokenizer.
:class:`~transformers.DPRContextEncoderTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class DPRContextEncoderTokenizerFast(BertTokenizerFast):
r"""
Constructs a "Fast" DPRContextEncoderTokenizer (backed by HuggingFace's `tokenizers` library).
:class:`~transformers.DPRContextEncoderTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class DPRQuestionEncoderTokenizer(BertTokenizer):
r"""
Constructs a DPRQuestionEncoderTokenizer.
:class:`~transformers.DPRQuestionEncoderTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
class DPRQuestionEncoderTokenizerFast(BertTokenizerFast):
r"""
Constructs a "Fast" DPRQuestionEncoderTokenizer (backed by HuggingFace's `tokenizers` library).
:class:`~transformers.DPRQuestionEncoderTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
DPRSpanPrediction = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
CUSTOM_DPR_READER_DOCSTRING = r"""
Return a dictionary with the token ids of the input strings and other information to give to :obj:`.decode_best_spans`.
It converts the strings of a question and different passages (title + text) in a sequence of ids (integer), using the tokenizer and vocabulary.
The resulting `input_ids` is a matrix of size :obj:`(n_passages, sequence_length)` with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Inputs:
questions (:obj:`str`, :obj:`List[str]`):
The questions to be encoded.
You can specify one question for many passages. In this case, the question will be duplicated like :obj:`[questions] * n_passages`.
Otherwise you have to specify as many questions as in :obj:`titles` or :obj:`texts`.
titles (:obj:`str`, :obj:`List[str]`):
The passages titles to be encoded. This can be a string, a list of strings if there are several passages.
texts (:obj:`str`, :obj:`List[str]`):
The passages texts to be encoded. This can be a string, a list of strings if there are several passages.
padding (:obj:`Union[bool, str]`, `optional`, defaults to :obj:`False`):
Activate and control padding. Accepts the following values:
* `True` or `'longest'`: pad to the longest sequence in the batch (or no padding if only a single sequence if provided),
* `'max_length'`: pad to a max length specified in `max_length` or to the max acceptable input length for the model if no length is provided (`max_length=None`)
* `False` or `'do_not_pad'` (default): No padding (i.e. can output batch with sequences of uneven lengths)
truncation (:obj:`Union[bool, str]`, `optional`, defaults to :obj:`False`):
Activate and control truncation. Accepts the following values:
* `True` or `'only_first'`: truncate to a max length specified in `max_length` or to the max acceptable input length for the model if no length is provided (`max_length=None`).
* `False` or `'do_not_truncate'` (default): No truncation (i.e. can output batch with sequences length greater than the model max admissible input size)
max_length (:obj:`Union[int, None]`, `optional`, defaults to :obj:`None`):
Control the length for padding/truncation. Accepts the following values
* `None` (default): This will use the predefined model max length if required by one of the truncation/padding parameters. If the model has no specific max input length (e.g. XLNet) truncation/padding to max length is deactivated.
* `any integer value` (e.g. `42`): Use this specific maximum length value if required by one of the truncation/padding parameters.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf', 'pt' or 'np' to return respectively TensorFlow :obj:`tf.constant`,
PyTorch :obj:`torch.Tensor` or Numpy :obj: `np.ndarray` instead of a list of python integers.
return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
Return:
A Dictionary of shape::
{
input_ids: list[list[int]],
attention_mask: list[int] if return_attention_mask is True (default)
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
"""
@add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class CustomDPRReaderTokenizerMixin:
def __call__(
self,
questions,
titles: Optional[str] = None,
texts: Optional[str] = None,
padding: Union[bool, str] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_attention_mask: Optional[bool] = None,
**kwargs
) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
questions,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
**kwargs,
)
elif titles is None or texts is None:
text_pair = titles if texts is None else texts
return super().__call__(
questions,
text_pair,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
**kwargs,
)
titles = titles if not isinstance(titles, str) else [titles]
texts = texts if not isinstance(texts, str) else [texts]
n_passages = len(titles)
questions = questions if not isinstance(questions, str) else [questions] * n_passages
assert len(titles) == len(
texts
), "There should be as many titles than texts but got {} titles and {} texts.".format(len(titles), len(texts))
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"]
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"]
encoded_inputs = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)
]
}
if return_attention_mask is not False:
attention_mask = [input_ids != self.pad_token_id for input_ids in encoded_inputs["input_ids"]]
encoded_inputs["attention_mask"] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(
self,
reader_input: BatchEncoding,
reader_output: DPRReaderOutput,
num_spans: int = 16,
max_answer_length: int = 64,
num_spans_per_passage: int = 4,
) -> List[DPRSpanPrediction]:
"""
Get the span predictions for the extractive Q&A model.
Outputs: `List` of `DPRReaderOutput` sorted by descending `(relevance_score, span_score)`.
Each `DPRReaderOutput` is a `Tuple` with:
**span_score**: ``float`` that corresponds to the score given by the reader for this span compared to other spans
in the same passage. It corresponds to the sum of the start and end logits of the span.
**relevance_score**: ``float`` that corresponds to the score of the each passage to answer the question,
compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
**doc_id**: ``int``` the id of the passage.
**start_index**: ``int`` the start index of the span (inclusive).
**end_index**: ``int`` the end index of the span (inclusive).
Examples::
from transformers import DPRReader, DPRReaderTokenizer
tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base')
model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base')
encoded_inputs = tokenizer(
questions=["What is love ?"],
titles=["Haddaway"],
texts=["'What Is Love' is a song recorded by the artist Haddaway"],
return_tensors='pt'
)
outputs = model(**encoded_inputs)
predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
print(predicted_spans[0].text) # best span
"""
input_ids = reader_input["input_ids"]
start_logits, end_logits, relevance_logits = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len],
end_logits=end_logits[doc_id][passage_offset:sequence_len],
max_answer_length=max_answer_length,
top_spans=num_spans_per_passage,
)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],
relevance_score=relevance_logits[doc_id],
doc_id=doc_id,
start_index=start_index,
end_index=end_index,
text=self.decode(sequence_ids[start_index : end_index + 1]),
)
)
if len(nbest_spans_predictions) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(
self,
start_logits: List[int],
end_logits: List[int],
max_answer_length: int,
top_spans: int,
) -> List[DPRSpanPrediction]:
"""
Finds the best answer span for the extractive Q&A model for one passage.
It returns the best span by descending `span_score` order and keeping max `top_spans` spans.
Spans longer that `max_answer_length` are ignored.
"""
scores = []
for (start_index, start_score) in enumerate(start_logits):
for (answer_length, end_score) in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
chosen_span_intervals = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, "Wrong span indices: [{}:{}]".format(start_index, end_index)
length = end_index - start_index + 1
assert length <= max_answer_length, "Span is too long: {} > {}".format(length, max_answer_length)
if any(
[
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals
]
):
continue
chosen_span_intervals.append((start_index, end_index))
if len(chosen_span_intervals) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer):
r"""
Constructs a DPRReaderTokenizer.
:class:`~transformers.DPRReaderTokenizer` is alsmost identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
What is different is that is has three inputs strings: question, titles and texts that are combined to feed into the DPRReader model.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = READER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = READER_PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast):
r"""
Constructs a DPRReaderTokenizerFast.
:class:`~transformers.DPRReaderTokenizerFast` is almost identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
What is different is that is has three inputs strings: question, titles and texts that are combined to feed into the DPRReader model.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = READER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = READER_PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
| 47.510949
| 242
| 0.690275
|
a6e85c505ba111e4358880b01ba8b709363a48a7
| 1,212
|
py
|
Python
|
location/International/ISO-CID/iso_alpha_3.py
|
vishalbelsare/classifications
|
e16dbc9b625ff7e233be30bfb3d432f7b026facd
|
[
"BSD-3-Clause"
] | 16
|
2016-02-05T22:05:55.000Z
|
2022-02-18T12:23:22.000Z
|
location/International/ISO-CID/iso_alpha_3.py
|
vishalbelsare/classifications
|
e16dbc9b625ff7e233be30bfb3d432f7b026facd
|
[
"BSD-3-Clause"
] | 2
|
2018-04-06T18:07:01.000Z
|
2018-11-01T21:19:42.000Z
|
location/International/ISO-CID/iso_alpha_3.py
|
vishalbelsare/classifications
|
e16dbc9b625ff7e233be30bfb3d432f7b026facd
|
[
"BSD-3-Clause"
] | 5
|
2015-04-09T01:39:00.000Z
|
2020-12-27T03:02:25.000Z
|
# Wikipedia parser, from:
# http://gis.stackexchange.com/questions/1047/full-list-of-iso-alpha-2-and-iso-alpha-3-country-codes/151571#151571
import csv
import urllib2
from BeautifulSoup import BeautifulSoup
opener = urllib2.build_opener()
opener.addheaders = [("User-agent", "Mozilla/5.0")]
url = "http://en.wikipedia.org/wiki/ISO_3166-1"
page = opener.open(url)
soup = BeautifulSoup(page.read())
t = soup.find("table", {"class": "wikitable sortable"})
# create a new CSV for the output
iso_csv = csv.writer(open("wikipedia-iso-country-codes.csv", "w"))
# get the header rows, write to the CSV
iso_csv.writerow([th.findAll(text=True)[0] for th in t.findAll("th")])
# Iterate over the table pulling out the country table results. Skip the first
# row as it contains the already-parsed header information.
for row in t.findAll("tr")[1:]:
tds = row.findAll("td")
raw_cols = [td.findAll(text=True) for td in tds]
cols = []
# country field contains differing numbers of elements, due to the flag --
# only take the name
cols.append(raw_cols[0][-1:][0])
# for all other columns, use the first result text
cols.extend([col[0] for col in raw_cols[1:]])
iso_csv.writerow(cols)
| 33.666667
| 114
| 0.707921
|
bc94ba777ef1705443dc51220cd1597fd1077ef2
| 1,020
|
py
|
Python
|
cvat/apps/dataset_repo/migrations/0001_initial.py
|
adasdevops/ADAS_UPDATE_STABLE
|
306202b4e291b5876e3dd4fdd201c761e9d182f0
|
[
"Intel",
"MIT"
] | null | null | null |
cvat/apps/dataset_repo/migrations/0001_initial.py
|
adasdevops/ADAS_UPDATE_STABLE
|
306202b4e291b5876e3dd4fdd201c761e9d182f0
|
[
"Intel",
"MIT"
] | null | null | null |
cvat/apps/dataset_repo/migrations/0001_initial.py
|
adasdevops/ADAS_UPDATE_STABLE
|
306202b4e291b5876e3dd4fdd201c761e9d182f0
|
[
"Intel",
"MIT"
] | 1
|
2022-03-04T09:18:30.000Z
|
2022-03-04T09:18:30.000Z
|
# Generated by Django 3.2.12 on 2022-02-14 16:39
import cvat.apps.dataset_repo.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('engine', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GitData',
fields=[
('task', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='engine.task')),
('url', models.URLField(max_length=2000)),
('path', models.CharField(max_length=256)),
('format', models.CharField(blank=True, max_length=256)),
('sync_date', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(default=cvat.apps.dataset_repo.models.GitStatusChoice['NON_SYNCED'], max_length=20)),
('lfs', models.BooleanField(default=True)),
],
),
]
| 34
| 145
| 0.613725
|
91327d401b67be05aee292dc5ac483b660c054eb
| 550
|
py
|
Python
|
python/ray/serve/__init__.py
|
janblumenkamp/ray
|
304e31b7e56ebd33f7099d97233e3feb37f495c9
|
[
"Apache-2.0"
] | 1
|
2021-01-22T20:30:15.000Z
|
2021-01-22T20:30:15.000Z
|
python/ray/serve/__init__.py
|
janblumenkamp/ray
|
304e31b7e56ebd33f7099d97233e3feb37f495c9
|
[
"Apache-2.0"
] | 3
|
2021-06-08T21:46:35.000Z
|
2022-03-12T00:35:21.000Z
|
python/ray/serve/__init__.py
|
lzt-pro/ray
|
cc93fee4a47dc9b9f754d0b53ae2f1e4f598aeb1
|
[
"Apache-2.0"
] | null | null | null |
from ray.serve.policy import RoutePolicy
from ray.serve.api import (init, create_backend, delete_backend,
create_endpoint, delete_endpoint, set_traffic,
get_handle, stat, update_backend_config,
get_backend_config, accept_batch) # noqa: E402
__all__ = [
"init", "create_backend", "delete_backend", "create_endpoint",
"delete_endpoint", "set_traffic", "get_handle", "stat",
"update_backend_config", "get_backend_config", "RoutePolicy",
"accept_batch"
]
| 42.307692
| 74
| 0.650909
|
0eb99df42485bf76c1abd1d0d45d2dfb823ef226
| 7,457
|
py
|
Python
|
src/simple_template_copy/core.py
|
ZX1209/simple_template_copy
|
8fbe783fc3a2f1ddec53da92a742b3312b1842d3
|
[
"MIT"
] | null | null | null |
src/simple_template_copy/core.py
|
ZX1209/simple_template_copy
|
8fbe783fc3a2f1ddec53da92a742b3312b1842d3
|
[
"MIT"
] | null | null | null |
src/simple_template_copy/core.py
|
ZX1209/simple_template_copy
|
8fbe783fc3a2f1ddec53da92a742b3312b1842d3
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import os
from typing import Dict, List
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import shutil
import logging
def order_match(str1, str2):
"""order_match"""
l1 = len(str1)
l2 = len(str2)
i = 0
j = 0
if l1 > l2:
return 0
while i < l1 and j < l2:
if str1[i] == str2[j]:
i += 1
j += 1
else:
j += 1
return 100 * (i / l1)
class CopyTemplate:
def __init__(
self,
template_str: str,
target_strs: List[str],
no_exec=False,
help_message="",
log_level=logging.INFO,
):
"""CopyTemplate"""
# print("here")
# variable define
self.log_level = log_level
self.template_str = template_str
self.target_strs = target_strs
self.template_path = None
self.target_paths = None
self.help_message = help_message
self.feedback_messages = []
self.has_template_str = True
self.has_target_str = True
self.possible_templates = []
self.find_template_num = 0
self.is_path_ts = False # template str is exist path
# this may not be good
self.target_str_exists = False
# tag: template_dir
self.template_dir = "~/tempy_templates_dir" # default path
self.template_names = []
self.template_name_path_map = dict()
self.simple_matched = False
self.possible_matched = False
self.pre_treatment()
self.info()
self.main()
self.lastline_summary()
# if help:
# self.help()
def log_config(self):
"""log_config"""
self.ch = logging.StreamHandler()
self.ch.setLevel(self.log_level)
self.formatter = logging.Formatter(
"|%(asctime)s|%(name)s|%(levelname)s|\n%(message)s\n"
)
self.ch.setFormatter(self.formatter)
self.logger = logging.Logger("CopyTemplate")
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(self.ch)
def load_config(self):
"""load_config"""
# tag: load env config
osvar = os.getenv("tempy_templates_dir")
if osvar:
self.template_dir = osvar
def pre_treatment(self):
"""pre_treatment"""
self.log_config()
self.load_config()
self.template_dir_path = self.path_solve(self.template_dir)
if not self.template_dir_path.exists():
self.template_dir_path.mkdir()
if self.template_str is None:
self.template_str = ""
self.has_template_str = False
# check variable type?
if self.target_strs is None or self.target_strs == []:
self.target_str = []
self.has_target_str = False
self.template_str.strip()
self.target_strs = list(map(str.strip, self.target_strs))
if self.has_template_str and self.path_solve(self.template_str).exists():
self.is_path_ts = True
self.template_path = self.path_solve(self.template_str)
# self.template_path = self.path_solve(self.template_str)
# template_str_process
if self.has_template_str and not self.is_path_ts:
# prepare possible tempalte path
self.data_prepare()
self.find_possible_template()
self.simple_match()
# target str process
if self.has_target_str:
self.target_paths = list(map(self.path_solve, self.target_strs))
def info(self):
"""info"""
self.logger.debug("hello")
def main(self):
"""main"""
if self.has_template_str:
self.print_possible_templates()
self.print_simple_match()
#
if self.has_target_str and (
self.simple_matched or self.possible_matched or self.is_path_ts
):
self.copy_template()
self.print_copy_info()
# else:
# # only show possible template path
# self.print_possible_templates()
else:
print(self.help_message)
return
# self.feedback_messages.append("help message")
def help(self):
"""help"""
pass
def path_solve(self, path_str):
"""path_solve"""
return Path(path_str).expanduser().resolve()
def simple_match(self):
"""simple_match"""
if self.template_str in self.template_names:
self.simple_matched = True
self.template_path = self.template_name_path_map[self.template_str]
def data_prepare(self):
"""data_prepare"""
for path in self.template_dir_path.iterdir(): # tag: bug?
self.template_names.append(path.name)
self.template_name_path_map[path.name] = path
def find_possible_template(self):
"""find_possible_template"""
# data prepare
names_scores = process.extract(
self.template_str, self.template_names, scorer=order_match
)
matchs = list(filter(lambda x: x[1] >= 90, names_scores))
self.find_template_num = len(matchs)
for match in matchs:
self.possible_templates.append(
(match[0], self.template_name_path_map[match[0]])
)
if self.find_template_num == 1:
self.template_path = self.possible_templates[0][1]
self.possible_matched = True
def print_possible_templates(self):
"""print_possible_templates"""
print(f"find {len(self.possible_templates)} possible templates below")
for template in self.possible_templates:
print(template[0], " : ")
print(" ", template[1])
print()
def print_simple_match(self):
"""print_possible_templates"""
if self.simple_matched:
print("simple matched")
print(self.template_str, " : ")
print(" ", self.template_path)
else:
print("no simple matched")
print()
def copy_template(self):
"""copy_template"""
if self.template_path.is_dir():
for target_path in self.target_paths:
shutil.copytree(
str(self.template_path), str(target_path), dirs_exist_ok=True
)
elif self.template_path.is_file():
for target_path in self.target_paths:
shutil.copy(str(self.template_path), str(target_path))
def print_copy_info(self):
"""print_copy_info"""
print("copy " + str(self.template_path))
print("to")
print(" , ".join(self.target_strs))
print()
def lastline_summary(self):
"""lastline_summary"""
lastline = ""
if self.has_template_str:
lastline += "get template str, "
if self.simple_matched:
lastline += "simple matched, "
if self.has_target_str and (
self.simple_matched or self.possible_matched or self.is_path_ts
):
lastline += "file copyed, "
else:
lastline += "no input get,show help message on top"
print(lastline)
def custom_copy(self, argvs):
"""custom_copy"""
pass
# if __name__ == "__main__":
# t = CopyTemplate()
| 28.033835
| 81
| 0.577444
|
9a5fccb83c2997481811e79b5be0c2adc39a6e91
| 94
|
py
|
Python
|
mach_eval/analyzers/spm/__init__.py
|
Severson-Group/MachEval
|
dbb7999188133f8744636da53cab475ae538ce80
|
[
"BSD-3-Clause"
] | 6
|
2021-11-02T20:12:32.000Z
|
2021-11-13T10:50:35.000Z
|
mach_eval/analyzers/spm/__init__.py
|
Severson-Group/MachEval
|
dbb7999188133f8744636da53cab475ae538ce80
|
[
"BSD-3-Clause"
] | 18
|
2021-11-29T20:14:55.000Z
|
2022-03-02T07:17:37.000Z
|
mach_eval/analyzers/spm/__init__.py
|
Severson-Group/MachEval
|
dbb7999188133f8744636da53cab475ae538ce80
|
[
"BSD-3-Clause"
] | 1
|
2022-01-29T00:52:38.000Z
|
2022-01-29T00:52:38.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 11:02:22 2022
@author: Martin Johnson
"""
| 11.75
| 35
| 0.595745
|
bb4723f5aa13f2675f9441b54118a8c5ec9120f2
| 4,069
|
py
|
Python
|
agegendernet.py
|
duongntbk/AgeGenderNet
|
f5d05e54a06a76697ef710cf7f6ccc30dc574ca5
|
[
"MIT"
] | 1
|
2021-03-17T07:39:27.000Z
|
2021-03-17T07:39:27.000Z
|
agegendernet.py
|
duongntbk/AgeGenderNet
|
f5d05e54a06a76697ef710cf7f6ccc30dc574ca5
|
[
"MIT"
] | 8
|
2020-09-25T22:34:53.000Z
|
2022-02-10T01:37:29.000Z
|
agegendernet.py
|
duongntbk/AgeGenderNet
|
f5d05e54a06a76697ef710cf7f6ccc30dc574ca5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
from keras.layers import Concatenate, Dense, Dropout, Flatten, Input
from keras.models import Model
class AgeGenderNet:
'''
This class is used to build AgeGenderNet model.
It does not perform any training or evaluation.
AgeGenderNet is based on VGG19 architecture,
with separate branches for age guessing and gender prediction.
Result of gender prediction is integrated into age guessing branch
to increase accuracy.
'''
@staticmethod
def build_root(conv_base, inputs, split_from_top=4):
'''
Build the common part of both age guessing branch and
gender prediction branch.
The weights in common part are loaded directly from model
pre-trained on ImageNet dataset and will not be modified.
'''
base = copy.deepcopy(conv_base)
base_depth = len(base.layers)
x = inputs
for layer in base.layers[1:base_depth-split_from_top]:
layer.trainable = False
x = layer(x)
return x
@staticmethod
def build_gender_branch(conv_base, root, split_from_top=4, dropout=0.4):
'''
Build the gender prediction branch.
While performing fine-tuning, we will update the weights in this branch
but at first we set all parameters to trainable==False so that
we can warm up the fully conntected layers first.
'''
base = copy.deepcopy(conv_base)
base_depth = len(base.layers)
x = root
for layer in base.layers[base_depth-split_from_top:base_depth]:
name = layer.name
layer.name = "gender_branch_" + name
layer.trainable = False
x = layer(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dense(1, activation='sigmoid', name='gender_output')(x)
return x
@staticmethod
def build_age_branch(conv_base, root, gender_branch, split_from_top=4, dropout=0.4):
'''
The age guessing branch.
Because the features to guess age for female and male are different,
we concatenate gender information into the fully connected layers of age guessing.
If gender prediction is accurate enough,
this should increase age guessing accuracy.
While performing fine-tuning, we will update the weights in this branch
but at first we set all parameters to trainable==False so that
we can warm up the fully conntected layers first.
'''
base = copy.deepcopy(conv_base)
base_depth = len(base.layers)
x = root
for layer in base.layers[base_depth-split_from_top:base_depth]:
name = layer.name
layer.name = "age_branch_" + name
layer.trainable = False
x = layer(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Concatenate()([x, gender_branch])
x = Dense(1, name='age_output')(x)
return x
@staticmethod
def build(conv_base, split_from_top=4, age_dropout=0.4, gender_dropout=0.2):
'''
Build AgeGenderNet model.
All layers in conv_base will be set to trainable=False at first.
When perform fine-tuning, we need to change trainable property.
'''
input_shape = conv_base.layers[0].input_shape[1:]
inputs = Input(shape=input_shape, name='root_input')
root = AgeGenderNet.build_root(conv_base, inputs, split_from_top=split_from_top)
gender_branch = AgeGenderNet.build_gender_branch(conv_base, root,
split_from_top=split_from_top, dropout=age_dropout)
age_branch = AgeGenderNet.build_age_branch(conv_base, root, gender_branch,
split_from_top=split_from_top, dropout=gender_dropout)
return Model(inputs, [age_branch, gender_branch])
| 36.990909
| 90
| 0.640452
|
5cdc5bb1ff1b57a1c168a9f240b2928e063bea64
| 228
|
py
|
Python
|
0x0B-python-input_output/10-main_2.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | 1
|
2019-05-21T09:34:41.000Z
|
2019-05-21T09:34:41.000Z
|
0x0B-python-input_output/10-main_2.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
0x0B-python-input_output/10-main_2.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | 1
|
2020-09-25T17:54:36.000Z
|
2020-09-25T17:54:36.000Z
|
#!/usr/bin/python3
MyClass = __import__('10-my_class_2').MyClass
class_to_json = __import__('10-class_to_json').class_to_json
m = MyClass("John")
m.win()
print(type(m))
print(m)
mj = class_to_json(m)
print(type(mj))
print(mj)
| 17.538462
| 60
| 0.72807
|
5f0cb49bfbde079e078d775cee2c7bc3cf8948c6
| 8,236
|
py
|
Python
|
tensorflow_datasets/object_detection/lvis/lvis.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | 2
|
2022-02-14T09:51:39.000Z
|
2022-02-14T13:27:49.000Z
|
tensorflow_datasets/object_detection/lvis/lvis.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/object_detection/lvis/lvis.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:11:33.000Z
|
2020-12-13T22:11:33.000Z
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LVIS dataset."""
import collections
import json
import pathlib
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
LVIS: A dataset for large vocabulary instance segmentation.
"""
_CITATION = """
@inproceedings{gupta2019lvis,
title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation},
author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross},
booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition},
year={2019}
}
"""
_URLS = {
'train_annotation':
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip',
'train_images':
'http://images.cocodataset.org/zips/train2017.zip',
'validation_annotation':
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_val.json.zip',
'validation_images':
'http://images.cocodataset.org/zips/val2017.zip',
'test_annotation':
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_image_info_test_dev.json.zip',
'test_images':
'http://images.cocodataset.org/zips/test2017.zip'
}
# Annotations with invalid bounding boxes. Will not be used.
_INVALID_ANNOTATIONS = [
# Train split.
662101,
81217,
462924,
227817,
29381,
601484,
412185,
504667,
572573,
91937,
239022,
181534,
101685,
# Validation split.
36668,
57541,
33126,
10932
]
_NUM_CLASSES = 1203
class Lvis(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for lvis dataset."""
VERSION = tfds.core.Version('1.2.0')
RELEASE_NOTES = {
'1.1.0':
'Added fields `neg_category_ids` and `not_exhaustive_category_ids`.',
'1.2.0':
'Added class names.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
class_label = tfds.features.ClassLabel(
names_file=tfds.core.tfds_path(
'object_detection/lvis/lvis_classes.txt'))
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image':
tfds.features.Image(encoding_format='jpeg'),
'image/id':
tf.int64,
'neg_category_ids':
tfds.features.Sequence(class_label),
'not_exhaustive_category_ids':
tfds.features.Sequence(class_label),
'objects':
tfds.features.Sequence({
# LVIS has unique id for each annotation.
'id': tf.int64,
'area': tf.int64,
'bbox': tfds.features.BBoxFeature(),
'label': class_label,
'segmentation': tfds.features.Image(shape=(None, None, 1)),
}),
}),
# If there's a common (input, target) tuple from the
# features, specify them here. They'll be used if
# `as_supervised=True` in `builder.as_dataset`.
supervised_keys=None,
homepage='https://www.lvisdataset.org/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
paths = dl_manager.download_and_extract(_URLS)
image_dirs = [
paths['train_images'] / 'train2017',
paths['validation_images'] / 'val2017',
paths['test_images'] / 'test2017',
]
return {
tfds.Split.TRAIN:
self._generate_examples(
image_dirs, paths['train_annotation'] / 'lvis_v1_train.json'),
tfds.Split.VALIDATION:
self._generate_examples(
image_dirs,
paths['validation_annotation'] / 'lvis_v1_val.json'),
tfds.Split.TEST:
self._generate_examples(
image_dirs,
paths['test_annotation'] / 'lvis_v1_image_info_test_dev.json'),
}
def _generate_examples(self, image_dirs, annotation_file):
"""Yields examples."""
lvis_annotation = LvisAnnotation(annotation_file)
def _process_example(image_info):
# Search image dirs.
filename = pathlib.Path(image_info['coco_url']).name
image = _find_image_in_dirs(image_dirs, filename)
instances = lvis_annotation.get_annotations(img_id=image_info['id'])
instances = [x for x in instances if x['id'] not in _INVALID_ANNOTATIONS]
neg_category_ids = image_info.get('neg_category_ids', [])
not_exhaustive_category_ids = image_info.get(
'not_exhaustive_category_ids', [])
example = {
'image': image,
'image/id': image_info['id'],
'neg_category_ids': [i - 1 for i in neg_category_ids],
'not_exhaustive_category_ids': [
i - 1 for i in not_exhaustive_category_ids
],
'objects': [],
}
for inst in instances:
example['objects'].append({
'id':
inst['id'],
'area':
inst['area'],
'bbox':
_build_bbox(image_info, *inst['bbox']),
'label':
inst['category_id'] - 1,
'segmentation':
_build_segmentation_mask(image_info, inst['segmentation'])
})
return image_info['id'], example
beam = tfds.core.lazy_imports.apache_beam
return beam.Create(lvis_annotation.images) | beam.Map(_process_example)
def _find_image_in_dirs(image_dirs, filename):
"""Finds `filename` in one of the `image_dir` folders."""
images = [d / filename for d in image_dirs if (d / filename).exists()]
assert len(images) == 1, (images, image_dirs, filename)
return images[0]
def _build_bbox(image_info, x, y, width, height):
# build_bbox is only used within the loop so it is ok to use image_info
return tfds.features.BBox(
ymin=y / image_info['height'],
xmin=x / image_info['width'],
ymax=(y + height) / image_info['height'],
xmax=(x + width) / image_info['width'],
)
def _build_segmentation_mask(image_info, seg):
cv2 = tfds.core.lazy_imports.cv2
mask = np.zeros((image_info['height'], image_info['width']), np.uint8)
error_msg = f'Annotation contains an invalid polygon with < 3 points: {seg}'
assert all(len(poly) % 2 == 0 and len(poly) >= 6 for poly in seg), error_msg
for poly in seg:
poly = np.asarray(poly, np.int32).reshape((1, -1, 2))
cv2.fillPoly(mask, poly, 255)
return mask[:, :, np.newaxis]
class LvisAnnotation:
"""LVIS annotation helper class.
The format of the annations is explained on
https://www.lvisdataset.org/dataset.
"""
def __init__(self, annotation_path):
with tf.io.gfile.GFile(annotation_path) as f:
data = json.load(f)
self._data = data
img_id2annotations = collections.defaultdict(list)
for a in self._data.get('annotations', []):
img_id2annotations[a['image_id']].append(a)
self._img_id2annotations = {
k: list(sorted(v, key=lambda a: a['id']))
for k, v in img_id2annotations.items()
}
@property
def categories(self):
"""Return the category dicts, as sorted in the file."""
return self._data['categories']
@property
def images(self):
"""Return the image dicts, as sorted in the file."""
return self._data['images']
def get_annotations(self, img_id):
"""Return all annotations associated with the image id string."""
# Some images don't have any annotations. Return empty list instead.
return self._img_id2annotations.get(img_id, [])
| 32.68254
| 110
| 0.637445
|
6a239919b5def9a7a78127b46aa0676c481febe2
| 1,927
|
py
|
Python
|
build/lib/pyostie/utils.py
|
anirudhpnbb/Pyostie
|
c9ef56529ff7938262ee7df2b88bad8016dec857
|
[
"MIT"
] | 3
|
2021-04-19T13:48:40.000Z
|
2021-08-05T14:53:29.000Z
|
build/lib/pyostie/utils.py
|
anirudhpnbb/Pyostie
|
c9ef56529ff7938262ee7df2b88bad8016dec857
|
[
"MIT"
] | 33
|
2021-04-20T09:58:47.000Z
|
2021-09-30T12:34:10.000Z
|
pyostie/utils.py
|
anirudhpnbb/Pyostie
|
c9ef56529ff7938262ee7df2b88bad8016dec857
|
[
"MIT"
] | 6
|
2021-04-29T21:50:42.000Z
|
2022-02-10T22:03:53.000Z
|
import os
import shutil
import datetime
import tempfile
import pydub
extensions = {"jpeg": "jpg", "tiff":"jpg", "tif": "jpg", "png": "jpg", "":"txt", "log":"txt", "xls": "xlsx", "mp3":"wav"}
def process_files(file_list, output_path, folder_name):
try:
if os.path.isdir(folder_name):
x = datetime.datetime.today().strftime('%d%m%Y_%H%M') + '_azure_json_processed_files'
os.mkdir(output_path + x)
for i in file_list:
shutil.move(i, output_path + x)
shutil.make_archive(output_path + x, 'zip', output_path, x)
shutil.rmtree(output_path + x)
elif not os.path.isdir(folder_name):
os.mkdir(folder_name)
x = datetime.datetime.today().strftime('%d%m%Y_%H%M') + '_azure_json_processed_files'
os.mkdir(output_path + x)
for i in file_list:
shutil.move(i, output_path + x)
shutil.make_archive(output_path + x, 'zip', output_path, x)
shutil.rmtree(output_path + x)
else:
print("Please check and try again.")
except Exception as ex:
raise ex
def remove_files(filename_with_path):
"""
"""
if os.path.isfile(filename_with_path):
os.remove(filename_with_path)
def remove_folder(foldername_with_path):
"""
"""
if os.path.isdir(foldername_with_path):
shutil.rmtree(foldername_with_path)
def extension_type_check(extension, input_type):
def decorator(function):
def wrapper(args):
if isinstance(args, input_type):
extnsn = extensions.get(extension, extension)
return extnsn.upper()
else:
print("Bad input type.")
return wrapper
return decorator
def mp3_to_wav(source, dst, format):
sound = pydub.AudioSegment.from_mp3(source)
sound.export(dst, format=format)
return dst
| 29.646154
| 121
| 0.606642
|
27a9a22676d49d6133170ce2ac3f1649a846e2ae
| 896
|
py
|
Python
|
profile/forms.py
|
all-out/all-out-website
|
99e1faee7aaceb311a784ec874e02d0338ae1c37
|
[
"Apache-2.0"
] | null | null | null |
profile/forms.py
|
all-out/all-out-website
|
99e1faee7aaceb311a784ec874e02d0338ae1c37
|
[
"Apache-2.0"
] | 1
|
2015-07-28T17:56:53.000Z
|
2015-07-31T04:29:32.000Z
|
profile/forms.py
|
all-out/all-out-website
|
99e1faee7aaceb311a784ec874e02d0338ae1c37
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth import get_user_model
from profile.models import Character
class UserModelForm(forms.ModelForm):
main_character = forms.ModelChoiceField(
queryset=Character.objects.filter(main_user=None),
required=False)
characters = forms.ModelMultipleChoiceField(
queryset=Character.objects.filter(owner=None),
required=False)
def save(self, commit=True):
return super(UserModelForm, self).save(commit=commit)
print '\nSAVING CUSTOM FORM\n'
import ipdb; ipdb.set_trace()
print self.cleaned_data
# TODO: save character data to user.characters
characters = self.cleaned_data.get('characters', None)
main_character = self.cleaned_data.get('main_character', None)
class Meta:
model = get_user_model()
fields = '__all__'
| 29.866667
| 70
| 0.683036
|
62abdb4b9402c2644cfdd585f974fa4a1795625e
| 13,787
|
py
|
Python
|
members/meeting_invites.py
|
louking/members
|
ee204211812e00945f9e2b09cfa130cc9d3e6558
|
[
"Apache-2.0"
] | 1
|
2020-12-07T02:52:01.000Z
|
2020-12-07T02:52:01.000Z
|
members/meeting_invites.py
|
louking/members
|
ee204211812e00945f9e2b09cfa130cc9d3e6558
|
[
"Apache-2.0"
] | 496
|
2020-02-12T15:48:26.000Z
|
2022-03-23T11:17:27.000Z
|
members/meeting_invites.py
|
louking/members
|
ee204211812e00945f9e2b09cfa130cc9d3e6558
|
[
"Apache-2.0"
] | null | null | null |
"""
meeting_invites - support for meeting invitation management
====================================================================================
"""
# standard
from uuid import uuid4
from datetime import datetime
# pypi
from flask import g, render_template
import inflect
# homegrown
from .model import db
from .model import Meeting, Invite, AgendaItem, ActionItem, Email
from .model import INVITE_RESPONSE_ATTENDING, INVITE_KEY_URLARG, ACTION_STATUS_CLOSED
from .model import MEETING_OPTIONS, MEETING_OPTION_RSVP
from .views.admin.viewhelpers import localuser2user, localinterest
from .views.admin.meetings_common import custom_invitation, meeting_has_option
from .helpers import members_active, positions_active
from loutilities.flask_helpers.mailer import sendmail
from loutilities.tables import page_url_for
class ParameterError(Exception): pass
inflect_engine = inflect.engine()
MEETING_INVITE_EMAIL = 'meeting-invite-email'
MEETING_REMINDER_EMAIL = 'meeting-reminder-email'
MEETING_EMAIL = 'meeting-email'
def get_invites(meetingid):
"""
get the invites for a specified meeting
:param meetingid: Meeting.id
:return: list(invitestates.values()), list(invites.values())
"""
meeting = Meeting.query.filter_by(id=meetingid, interest_id=localinterest().id).one_or_none()
if not meeting:
raise ParameterError('meeting with id "{}" not found'.format(meetingid))
def get_invite(meeting, localuser):
"""
get invite for a specific meeting/user combination
:param meeting: Meeting instance
:param localuser: LocalUser instance
:return: localuser.email, invitestate('attending, 'invited', 'send invitation'), Invite instance
"""
user = localuser2user(localuser)
email = user.email
invitestate = {'name': user.name, 'email': email}
invite = Invite.query.filter_by(interest=localinterest(), meeting=meeting, user=localuser).one_or_none()
if invite:
invitestate['state'] = 'attending' if invite.response == INVITE_RESPONSE_ATTENDING else '{} sent'.format(custom_invitation())
else:
invitestate['state'] = 'send {}'.format(custom_invitation())
return email, invitestate, invite
# send invitations to all those who are tagged like the meeting
invitestates = {}
invites = {}
for tag in meeting.tags:
for user in tag.users:
email, invitestate, invite = get_invite(meeting, user)
invitestates[email] = invitestate
invites[email] = invite
for position in tag.positions:
for member in members_active(position, meeting.date):
email, invitestate, invite = get_invite(meeting, member)
# may be overwriting but that's ok
invitestates[email] = invitestate
invites[email] = invite
# return the state values to simplify client work, also return the database records
return list(invitestates.values()), list(invites.values())
def check_add_invite(meeting, localuser, agendaitem, sendemail=True):
"""
check if user invite needs to be added
:param meeting: Meeting instance
:param localuser: LocalUser instance
:param agendaitem: AgendaItem instance for invite to be attached to
:param sendemail: True means send email to localuser
:return: invite (may have been created)
"""
invite = Invite.query.filter_by(interest=localinterest(), meeting=meeting, user=localuser).one_or_none()
if not invite:
# create unique key for invite - uuid4 gives unique key
invitekey = uuid4().hex
invite = Invite(
interest=localinterest(),
meeting=meeting,
user=localuser,
agendaitem=agendaitem,
invitekey=invitekey,
activeinvite=True,
lastreminder=datetime.now(),
)
db.session.add(invite)
db.session.flush()
# optionally send email to user
if sendemail:
# get user's outstanding action items
actionitems = ActionItem.query.filter_by(interest=localinterest(), assignee=localuser). \
filter(ActionItem.status != ACTION_STATUS_CLOSED).all()
email = Email.query.filter_by(meeting=meeting, type=MEETING_INVITE_EMAIL).one()
subject = email.subject
fromlist = email.from_email
rsvpurl = page_url_for('admin.memberstatusreport', interest=g.interest,
urlargs={INVITE_KEY_URLARG: invitekey},
_external=True)
actionitemurl = page_url_for('admin.myactionitems', interest=g.interest, _external=True)
context = {
'meeting': meeting,
'actionitems': actionitems,
'rsvpurl': rsvpurl,
'actionitemurl': actionitemurl,
'message': email.message,
'meeting_text': meeting.meetingtype.meetingwording,
'statusreport_text': meeting.meetingtype.statusreportwording,
'invitation_text': meeting.meetingtype.invitewording,
'aninvitation_text': inflect_engine.a(meeting.meetingtype.invitewording)
}
for meetingoption in MEETING_OPTIONS:
context[meetingoption] = meeting_has_option(meeting, meetingoption)
html = render_template('meeting-invite-email.jinja2', **context)
tolist = localuser.email
cclist = None
sendmail(subject, fromlist, tolist, html, ccaddr=cclist)
invite.activeinvite = True
return invite
def generateinvites(meetingid, sendemail=True, agendatitle='Attendees'):
"""
generate the invitations for a specified meeting; return the agendaitem if created
:param meetingid: Meeting.id
:param sendemail: True means email should be sent to user
:param agendatitle: title for agendaitem, if None or empty string, don't create this
:return: AgendaItem
"""
meeting = Meeting.query.filter_by(id=meetingid, interest_id=localinterest().id).one_or_none()
if not meeting:
raise ParameterError('meeting with id "{}" not found'.format(meetingid))
# only generate Attendees agendaitem if collecting RSVPs
agendaitem = None
if meeting_has_option(meeting, MEETING_OPTION_RSVP) and agendatitle:
# check if agendaitem already exists.
agendaitem = AgendaItem.query.filter_by(interest=localinterest(), meeting=meeting, is_attendee_only=True).one_or_none()
if not agendaitem:
agendaitem = AgendaItem(interest=localinterest(), meeting=meeting, order=1, title=agendatitle, agendaitem='',
is_attendee_only=True)
db.session.add(agendaitem)
# have there been any invites previous to this? used later to deactivate any invites which are not still needed
# need to check now because check_add_invite may add additional invites
previnvites = Invite.query.filter_by(interest=localinterest(), meeting=meeting).all()
# send invitations to all those who are tagged like the meeting [invite] tags
# track current invitations; make current invitations active
currinvites = set()
for tag in meeting.tags:
for user in tag.users:
thisinvite = check_add_invite(meeting, user, agendaitem, sendemail=sendemail)
currinvites |= {thisinvite.id}
for position in tag.positions:
for member in members_active(position, meeting.date):
thisinvite = check_add_invite(meeting, member, agendaitem, sendemail=sendemail)
currinvites |= {thisinvite.id}
# make invite inactive for anyone who was previously invited, but should not currently be invited
for invite in previnvites:
if invite.id not in currinvites:
invite.activeinvite = False
# this agendaitem will be added to the displayed table
db.session.flush()
return agendaitem
def generatereminder(meetingid, member, positions):
"""
generate a meeting reminder email to the user
:param meetingid: id of meeting
:param member: member to remind
:param positions: positions for which this reminder is about
:return: False if new invite sent, True if reminder sent
"""
# find member's invitation, if it exists
invite = Invite.query.filter_by(meeting_id=meetingid, user=member).one_or_none()
meeting = Meeting.query.filter_by(id=meetingid).one()
# invite already exists, send reminder
if invite:
# email record should exist, else software error, so it's ok to use one()
email = Email.query.filter_by(interest=localinterest(), meeting_id=meetingid, type=MEETING_REMINDER_EMAIL).one()
# send reminder email to user
subject = email.subject
fromlist = email.from_email
message = email.message
tolist = member.email
cclist = None
# options = email.options
# get user's outstanding action items
actionitems = ActionItem.query.filter_by(interest=localinterest(), assignee=member). \
filter(ActionItem.status != ACTION_STATUS_CLOSED).all()
# set up urls for email
rsvpurl = page_url_for('admin.memberstatusreport', interest=g.interest,
urlargs={INVITE_KEY_URLARG: invite.invitekey},
_external=True)
actionitemurl = page_url_for('admin.myactionitems', interest=g.interest, _external=True)
# filter positions to those which affect this member
active_positions = positions_active(member, invite.meeting.date)
memberpositions = [p for p in positions if p in active_positions]
# create and send email
context = {
'meeting': invite.meeting,
'message': message,
'actionitems': actionitems,
'rsvpurl': rsvpurl,
'actionitemurl': actionitemurl,
'meeting_text': invite.meeting.meetingtype.meetingwording,
'statusreport_text': invite.meeting.meetingtype.statusreportwording,
'invitation_text': invite.meeting.meetingtype.invitewording,
'aninvitation_text': inflect_engine.a(invite.meeting.meetingtype.invitewording),
'positions': memberpositions,
}
for meetingoption in MEETING_OPTIONS:
context[meetingoption] = meeting_has_option(invite.meeting, meetingoption)
html = render_template('meeting-reminder-email.jinja2', **context)
sendmail(subject, fromlist, tolist, html, ccaddr=cclist)
invite.lastreminder = datetime.now()
reminder = True
# invite doesn't exist yet, create and send invite
else:
meeting = Meeting.query.filter_by(id=meetingid).one()
anyinvite = Invite.query.filter_by(interest=localinterest(), meeting=meeting).first()
check_add_invite(meeting, member, anyinvite.agendaitem)
reminder = False
return reminder
def send_meeting_email(meeting_id, subject, message):
"""
send email to meeting invitees
:param meeting_id: id of meeting
:param subject: subject for message
:param message: message in html format
:return: list of addresses email was sent to
"""
meeting = Meeting.query.filter_by(id=meeting_id).one()
invites = Invite.query.filter_by(meeting_id=meeting_id).all()
tolist = ['{} <{}>'.format(i.user.name, i.user.email) for i in invites]
# use from address configured for email
email = Email.query.filter_by(meeting_id=meeting_id, type=MEETING_EMAIL, interest=localinterest()).one()
fromaddr = email.from_email
context = {
'meeting': meeting,
'message': message,
'meeting_text': meeting.meetingtype.meetingwording,
'statusreport_text': meeting.meetingtype.statusreportwording,
'invitation_text': meeting.meetingtype.invitewording,
'aninvitation_text': inflect_engine.a(meeting.meetingtype.invitewording),
'future_meeting': meeting.date >= datetime.today().date(),
}
for meetingoption in MEETING_OPTIONS:
context[meetingoption] = meeting_has_option(meeting, meetingoption)
html = render_template('meeting-send-email.jinja2', **context)
result = sendmail(subject, fromaddr, tolist, html)
return tolist
def send_discuss_email(meeting_id):
"""
send email to meeting invitees
:param meeting_id: id of meeting
:param subject: subject for message
:param message: message in html format
:return: list of addresses email was sent to
"""
invites = Invite.query.filter_by(meeting_id=meeting_id).all()
meeting = Meeting.query.filter_by(id=meeting_id).one()
tolist = ['{} <{}>'.format(i.user.name, i.user.email) for i in invites]
# use from address configured for email
email = Email.query.filter_by(meeting_id=meeting_id, type=MEETING_INVITE_EMAIL, interest=localinterest()).one()
fromaddr = email.from_email
subject = email.subject
message = email.message
# create and send email
context = {
'meeting': meeting,
'message': message,
'meeting_text': meeting.meetingtype.meetingwording,
'statusreport_text': meeting.meetingtype.statusreportwording,
'invitation_text': meeting.meetingtype.invitewording,
'aninvitation_text': inflect_engine.a(meeting.meetingtype.invitewording),
}
for meetingoption in MEETING_OPTIONS:
context[meetingoption] = meeting_has_option(meeting, meetingoption)
html = render_template('meeting-discuss-email.jinja2', **context)
sendmail(subject, fromaddr, tolist, html)
return tolist
| 41.032738
| 137
| 0.674403
|
6b2acff5eb83631fc165d30afc0961389a22f340
| 1,606
|
py
|
Python
|
tapdance/install_helper.py
|
dataops-tk/tapdance
|
9ba09ab1625bb3bb49ca7cc4fe659402280b038f
|
[
"MIT"
] | 8
|
2020-04-23T05:45:38.000Z
|
2020-08-29T23:26:58.000Z
|
tapdance/install_helper.py
|
aaronsteers/tapdance
|
9ba09ab1625bb3bb49ca7cc4fe659402280b038f
|
[
"MIT"
] | 7
|
2020-05-11T17:36:59.000Z
|
2021-02-10T20:48:30.000Z
|
tapdance/install_helper.py
|
dataops-tk/tapdance
|
9ba09ab1625bb3bb49ca7cc4fe659402280b038f
|
[
"MIT"
] | null | null | null |
"""tapdance.install_helper - functions to help install plugins."""
# TODO: Deprecate this module in favor of using pipx.
import os
import uio
import runnow
from logless import logged, get_logger
from tapdance import config
logging = get_logger("tapdance")
@logged(
"installing '{plugin_name}' as '{alias or plugin_name}' "
"using 'pip3 install {source or plugin_name}'"
)
def install(plugin_name: str, source: str = None, alias: str = None):
"""
Install the requested plugin to the local machine.
Arguments:
plugin_name {str} -- The name of the plugin to install, including the tap- or
target- prefix.
Keyword Arguments:
source {str} -- Optional. Overrides the pip installation source.
alias {str} -- Optional. Overrides the name (alias) of the plugin.
Raises:
RuntimeError: [description]
"""
source = source or plugin_name
alias = alias or plugin_name
venv_dir = os.path.join(config.VENV_ROOT, alias)
install_path = os.path.join(config.INSTALL_ROOT, alias)
if uio.file_exists(install_path):
response = input(
f"The file '{install_path}' already exists. "
f"Are you sure you want to replace this file? [y/n]"
)
if not response.lower() in ["y", "yes"]:
raise RuntimeError(f"File already exists '{install_path}'.")
uio.delete_file(install_path)
runnow.run(f"python3 -m venv {venv_dir}")
runnow.run(f"{os.path.join(venv_dir ,'bin', 'pip3')} install {source}")
runnow.run(f"ln -s {venv_dir}/bin/{plugin_name} {install_path}")
| 30.884615
| 85
| 0.66127
|
f66b70ef55a441386636b18a215704a411c6b353
| 98,950
|
py
|
Python
|
superset/views/core.py
|
gauravbansal2000/incubator-superset
|
fdc51fbd01651245033eb1cbc3a8c7f9750313b4
|
[
"Apache-2.0"
] | 1
|
2019-08-13T07:46:58.000Z
|
2019-08-13T07:46:58.000Z
|
superset/views/core.py
|
gauravbansal2000/incubator-superset
|
fdc51fbd01651245033eb1cbc3a8c7f9750313b4
|
[
"Apache-2.0"
] | 5
|
2022-02-01T00:55:15.000Z
|
2022-03-29T22:29:10.000Z
|
superset/views/core.py
|
gauravbansal2000/incubator-superset
|
fdc51fbd01651245033eb1cbc3a8c7f9750313b4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime, timedelta
import json
import logging
import os
import re
import time
import traceback
from urllib import parse
from flask import (
flash, g, Markup, redirect, render_template, request, Response, url_for,
)
from flask_appbuilder import expose, SimpleFormView
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
import pandas as pd
from six import text_type
import sqlalchemy as sqla
from sqlalchemy import create_engine
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import IntegrityError
from unidecode import unidecode
from werkzeug.routing import BaseConverter
from werkzeug.utils import secure_filename
from superset import (
app, appbuilder, cache, db, results_backend, security_manager, sql_lab, utils,
viz,
)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource, SqlaTable
from superset.exceptions import SupersetException, SupersetSecurityException
from superset.forms import CsvToDatabaseForm
from superset.jinja_context import get_template_processor
from superset.legacy import cast_form_data
import superset.models.core as models
from superset.models.sql_lab import Query
from superset.sql_parse import SupersetQuery
from superset.utils import (
merge_extra_filters, merge_request_params, QueryStatus,
)
from .base import (
api, BaseSupersetView, CsvResponse, DeleteMixin,
generate_download_headers, get_error_msg, get_user_roles,
json_error_response, SupersetFilter, SupersetModelView, YamlExportMixin,
)
from .utils import bootstrap_user_data
config = app.config
stats_logger = config.get('STATS_LOGGER')
log_this = models.Log.log_this
DAR = models.DatasourceAccessRequest
ALL_DATASOURCE_ACCESS_ERR = __(
'This endpoint requires the `all_datasource_access` permission')
DATASOURCE_MISSING_ERR = __('The datasource seems to have been deleted')
ACCESS_REQUEST_MISSING_ERR = __(
'The access requests seem to have been deleted')
USER_MISSING_ERR = __('The user seems to have been deleted')
perms_instruction_link = config.get('PERMISSION_INSTRUCTIONS_LINK')
if perms_instruction_link:
DATASOURCE_ACCESS_ERR = __(
"You don't have access to this datasource. <a href='{}'>(Gain access)</a>"
.format(perms_instruction_link),
)
else:
DATASOURCE_ACCESS_ERR = __("You don't have access to this datasource")
FORM_DATA_KEY_BLACKLIST = []
if not config.get('ENABLE_JAVASCRIPT_CONTROLS'):
FORM_DATA_KEY_BLACKLIST = [
'js_tooltip',
'js_onclick_href',
'js_data_mutator',
]
def get_database_access_error_msg(database_name):
return __('This view requires the database %(name)s or '
'`all_datasource_access` permission', name=database_name)
def get_datasource_access_error_msg(datasource_name):
return __('This endpoint requires the datasource %(name)s, database or '
'`all_datasource_access` permission', name=datasource_name)
def json_success(json_msg, status=200):
return Response(json_msg, status=status, mimetype='application/json')
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners
def check_ownership(obj, raise_if_false=True):
"""Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
"""
if not obj:
return False
security_exception = SupersetSecurityException(
"You don't have the rights to alter [{}]".format(obj))
if g.user.is_anonymous():
if raise_if_false:
raise security_exception
return False
roles = (r.name for r in get_user_roles())
if 'Admin' in roles:
return True
session = db.create_scoped_session()
orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first()
owner_names = (user.username for user in orig_obj.owners)
if (
hasattr(orig_obj, 'created_by') and
orig_obj.created_by and
orig_obj.created_by.username == g.user.username):
return True
if (
hasattr(orig_obj, 'owners') and
g.user and
hasattr(g.user, 'username') and
g.user.username in owner_names):
return True
if raise_if_false:
raise security_exception
else:
return False
class SliceFilter(SupersetFilter):
def apply(self, query, func): # noqa
if self.has_all_datasource_access():
return query
perms = self.get_view_menus('datasource_access')
# TODO(bogdan): add `schema_access` support here
return query.filter(self.model.perm.in_(perms))
class DashboardFilter(SupersetFilter):
"""List dashboards for which users have access to at least one slice"""
def apply(self, query, func): # noqa
if self.has_all_datasource_access():
return query
Slice = models.Slice # noqa
Dash = models.Dashboard # noqa
# TODO(bogdan): add `schema_access` support here
datasource_perms = self.get_view_menus('datasource_access')
slice_ids_qry = (
db.session
.query(Slice.id)
.filter(Slice.perm.in_(datasource_perms))
)
query = query.filter(
Dash.id.in_(
db.session.query(Dash.id)
.distinct()
.join(Dash.slices)
.filter(Slice.id.in_(slice_ids_qry)),
),
)
return query
class DatabaseView(SupersetModelView, DeleteMixin, YamlExportMixin): # noqa
datamodel = SQLAInterface(models.Database)
list_title = _('List Databases')
show_title = _('Show Database')
add_title = _('Add Database')
edit_title = _('Edit Database')
list_columns = [
'database_name', 'backend', 'allow_run_sync', 'allow_run_async',
'allow_dml', 'creator', 'modified']
order_columns = [
'database_name', 'allow_run_sync', 'allow_run_async', 'allow_dml',
'modified',
]
add_columns = [
'database_name', 'sqlalchemy_uri', 'cache_timeout', 'extra',
'expose_in_sqllab', 'allow_run_sync', 'allow_run_async',
'allow_ctas', 'allow_dml', 'force_ctas_schema', 'impersonate_user',
'allow_multi_schema_metadata_fetch',
]
search_exclude_columns = (
'password', 'tables', 'created_by', 'changed_by', 'queries',
'saved_queries')
edit_columns = add_columns
show_columns = [
'tables',
'cache_timeout',
'extra',
'database_name',
'sqlalchemy_uri',
'perm',
'created_by',
'created_on',
'changed_by',
'changed_on',
]
add_template = 'superset/models/database/add.html'
edit_template = 'superset/models/database/edit.html'
base_order = ('changed_on', 'desc')
description_columns = {
'sqlalchemy_uri': utils.markdown(
'Refer to the '
'[SqlAlchemy docs]'
'(http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#'
'database-urls) '
'for more information on how to structure your URI.', True),
'expose_in_sqllab': _('Expose this DB in SQL Lab'),
'allow_run_sync': _(
'Allow users to run synchronous queries, this is the default '
'and should work well for queries that can be executed '
'within a web request scope (<~1 minute)'),
'allow_run_async': _(
'Allow users to run queries, against an async backend. '
'This assumes that you have a Celery worker setup as well '
'as a results backend.'),
'allow_ctas': _('Allow CREATE TABLE AS option in SQL Lab'),
'allow_dml': _(
'Allow users to run non-SELECT statements '
'(UPDATE, DELETE, CREATE, ...) '
'in SQL Lab'),
'force_ctas_schema': _(
'When allowing CREATE TABLE AS option in SQL Lab, '
'this option forces the table to be created in this schema'),
'extra': utils.markdown(
'JSON string containing extra configuration elements. '
'The ``engine_params`` object gets unpacked into the '
'[sqlalchemy.create_engine]'
'(http://docs.sqlalchemy.org/en/latest/core/engines.html#'
'sqlalchemy.create_engine) call, while the ``metadata_params`` '
'gets unpacked into the [sqlalchemy.MetaData]'
'(http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html'
'#sqlalchemy.schema.MetaData) call. ', True),
'impersonate_user': _(
'If Presto, all the queries in SQL Lab are going to be executed as the '
'currently logged on user who must have permission to run them.<br/>'
'If Hive and hive.server2.enable.doAs is enabled, will run the queries as '
'service account, but impersonate the currently logged on user '
'via hive.server2.proxy.user property.'),
'allow_multi_schema_metadata_fetch': _(
'Allow SQL Lab to fetch a list of all tables and all views across '
'all database schemas. For large data warehouse with thousands of '
'tables, this can be expensive and put strain on the system.'),
}
label_columns = {
'expose_in_sqllab': _('Expose in SQL Lab'),
'allow_ctas': _('Allow CREATE TABLE AS'),
'allow_dml': _('Allow DML'),
'force_ctas_schema': _('CTAS Schema'),
'database_name': _('Database'),
'creator': _('Creator'),
'changed_on_': _('Last Changed'),
'sqlalchemy_uri': _('SQLAlchemy URI'),
'cache_timeout': _('Cache Timeout'),
'extra': _('Extra'),
'allow_run_sync': _('Allow Run Sync'),
'allow_run_async': _('Allow Run Async'),
'impersonate_user': _('Impersonate the logged on user'),
}
def pre_add(self, db):
db.set_sqlalchemy_uri(db.sqlalchemy_uri)
security_manager.merge_perm('database_access', db.perm)
for schema in db.all_schema_names():
security_manager.merge_perm(
'schema_access', security_manager.get_schema_perm(db, schema))
def pre_update(self, db):
self.pre_add(db)
def _delete(self, pk):
DeleteMixin._delete(self, pk)
appbuilder.add_link(
'Import Dashboards',
label=__('Import Dashboards'),
href='/superset/import_dashboards',
icon='fa-cloud-upload',
category='Manage',
category_label=__('Manage'),
category_icon='fa-wrench')
appbuilder.add_view(
DatabaseView,
'Databases',
label=__('Databases'),
icon='fa-database',
category='Sources',
category_label=__('Sources'),
category_icon='fa-database')
class DatabaseAsync(DatabaseView):
list_columns = [
'id', 'database_name',
'expose_in_sqllab', 'allow_ctas', 'force_ctas_schema',
'allow_run_async', 'allow_run_sync', 'allow_dml',
'allow_multi_schema_metadata_fetch',
]
appbuilder.add_view_no_menu(DatabaseAsync)
class CsvToDatabaseView(SimpleFormView):
form = CsvToDatabaseForm
form_title = _('CSV to Database configuration')
add_columns = ['database', 'schema', 'table_name']
def form_get(self, form):
form.sep.data = ','
form.header.data = 0
form.mangle_dupe_cols.data = True
form.skipinitialspace.data = False
form.skip_blank_lines.data = True
form.infer_datetime_format.data = True
form.decimal.data = '.'
form.if_exists.data = 'append'
def form_post(self, form):
csv_file = form.csv_file.data
form.csv_file.data.filename = secure_filename(form.csv_file.data.filename)
csv_filename = form.csv_file.data.filename
path = os.path.join(config['UPLOAD_FOLDER'], csv_filename)
try:
utils.ensure_path_exists(config['UPLOAD_FOLDER'])
csv_file.save(path)
table = SqlaTable(table_name=form.name.data)
table.database = form.data.get('con')
table.database_id = table.database.id
table.database.db_engine_spec.create_table_from_csv(form, table)
except Exception as e:
try:
os.remove(path)
except OSError:
pass
message = 'Table name {} already exists. Please pick another'.format(
form.name.data) if isinstance(e, IntegrityError) else text_type(e)
flash(
message,
'danger')
return redirect('/csvtodatabaseview/form')
os.remove(path)
# Go back to welcome page / splash screen
db_name = table.database.database_name
message = _('CSV file "{0}" uploaded to table "{1}" in '
'database "{2}"'.format(csv_filename,
form.name.data,
db_name))
flash(message, 'info')
return redirect('/tablemodelview/list/')
appbuilder.add_view_no_menu(CsvToDatabaseView)
class DatabaseTablesAsync(DatabaseView):
list_columns = ['id', 'all_table_names', 'all_schema_names']
appbuilder.add_view_no_menu(DatabaseTablesAsync)
if config.get('ENABLE_ACCESS_REQUEST'):
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
list_columns = [
'username', 'user_roles', 'datasource_link',
'roles_with_datasource', 'created_on']
order_columns = ['created_on']
base_order = ('changed_on', 'desc')
label_columns = {
'username': _('User'),
'user_roles': _('User Roles'),
'database': _('Database URL'),
'datasource_link': _('Datasource'),
'roles_with_datasource': _('Roles to grant'),
'created_on': _('Created On'),
}
appbuilder.add_view(
AccessRequestsModelView,
'Access requests',
label=__('Access requests'),
category='Security',
category_label=__('Security'),
icon='fa-table')
class SliceModelView(SupersetModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.Slice)
list_title = _('List Charts')
show_title = _('Show Chart')
add_title = _('Add Chart')
edit_title = _('Edit Chart')
can_add = False
label_columns = {
'datasource_link': _('Datasource'),
}
search_columns = (
'slice_name', 'description', 'viz_type', 'datasource_name', 'owners',
)
list_columns = [
'slice_link', 'viz_type', 'datasource_link', 'creator', 'modified']
order_columns = ['viz_type', 'datasource_link', 'modified']
edit_columns = [
'slice_name', 'description', 'viz_type', 'owners', 'dashboards',
'params', 'cache_timeout']
base_order = ('changed_on', 'desc')
description_columns = {
'description': Markup(
'The content here can be displayed as widget headers in the '
'dashboard view. Supports '
'<a href="https://daringfireball.net/projects/markdown/"">'
'markdown</a>'),
'params': _(
'These parameters are generated dynamically when clicking '
'the save or overwrite button in the explore view. This JSON '
'object is exposed here for reference and for power users who may '
'want to alter specific parameters.',
),
'cache_timeout': _(
'Duration (in seconds) of the caching timeout for this slice.'),
}
base_filters = [['id', SliceFilter, lambda: []]]
label_columns = {
'cache_timeout': _('Cache Timeout'),
'creator': _('Creator'),
'dashboards': _('Dashboards'),
'datasource_link': _('Datasource'),
'description': _('Description'),
'modified': _('Last Modified'),
'owners': _('Owners'),
'params': _('Parameters'),
'slice_link': _('Chart'),
'slice_name': _('Name'),
'table': _('Table'),
'viz_type': _('Visualization Type'),
}
def pre_add(self, obj):
utils.validate_json(obj.params)
def pre_update(self, obj):
utils.validate_json(obj.params)
check_ownership(obj)
def pre_delete(self, obj):
check_ownership(obj)
@expose('/add', methods=['GET', 'POST'])
@has_access
def add(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
{'value': str(d.id) + '__' + d.type, 'label': repr(d)}
for d in datasources
]
return self.render_template(
'superset/add_slice.html',
bootstrap_data=json.dumps({
'datasources': sorted(datasources, key=lambda d: d['label']),
}),
)
appbuilder.add_view(
SliceModelView,
'Charts',
label=__('Charts'),
icon='fa-bar-chart',
category='',
category_icon='')
class SliceAsync(SliceModelView): # noqa
list_columns = [
'id', 'slice_link', 'viz_type', 'slice_name',
'creator', 'modified', 'icons']
label_columns = {
'icons': ' ',
'slice_link': _('Chart'),
}
appbuilder.add_view_no_menu(SliceAsync)
class SliceAddView(SliceModelView): # noqa
list_columns = [
'id', 'slice_name', 'slice_link', 'viz_type',
'datasource_link', 'owners', 'modified', 'changed_on']
show_columns = list(set(SliceModelView.edit_columns + list_columns))
appbuilder.add_view_no_menu(SliceAddView)
class DashboardModelView(SupersetModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.Dashboard)
list_title = _('List Dashboards')
show_title = _('Show Dashboard')
add_title = _('Add Dashboard')
edit_title = _('Edit Dashboard')
list_columns = ['dashboard_link', 'creator', 'modified']
order_columns = ['modified']
edit_columns = [
'dashboard_title', 'slug', 'slices', 'owners', 'position_json', 'css',
'json_metadata']
show_columns = edit_columns + ['table_names']
search_columns = ('dashboard_title', 'slug', 'owners')
add_columns = edit_columns
base_order = ('changed_on', 'desc')
description_columns = {
'position_json': _(
'This json object describes the positioning of the widgets in '
'the dashboard. It is dynamically generated when adjusting '
'the widgets size and positions by using drag & drop in '
'the dashboard view'),
'css': _(
'The css for individual dashboards can be altered here, or '
'in the dashboard view where changes are immediately '
'visible'),
'slug': _('To get a readable URL for your dashboard'),
'json_metadata': _(
'This JSON object is generated dynamically when clicking '
'the save or overwrite button in the dashboard view. It '
'is exposed here for reference and for power users who may '
'want to alter specific parameters.'),
'owners': _('Owners is a list of users who can alter the dashboard.'),
}
base_filters = [['slice', DashboardFilter, lambda: []]]
add_form_query_rel_fields = {
'slices': [['slices', SliceFilter, None]],
}
edit_form_query_rel_fields = add_form_query_rel_fields
label_columns = {
'dashboard_link': _('Dashboard'),
'dashboard_title': _('Title'),
'slug': _('Slug'),
'slices': _('Charts'),
'owners': _('Owners'),
'creator': _('Creator'),
'modified': _('Modified'),
'position_json': _('Position JSON'),
'css': _('CSS'),
'json_metadata': _('JSON Metadata'),
'table_names': _('Underlying Tables'),
}
def pre_add(self, obj):
obj.slug = obj.slug.strip() or None
if obj.slug:
obj.slug = obj.slug.replace(' ', '-')
obj.slug = re.sub(r'[^\w\-]+', '', obj.slug)
if g.user not in obj.owners:
obj.owners.append(g.user)
utils.validate_json(obj.json_metadata)
utils.validate_json(obj.position_json)
owners = [o for o in obj.owners]
for slc in obj.slices:
slc.owners = list(set(owners) | set(slc.owners))
def pre_update(self, obj):
check_ownership(obj)
self.pre_add(obj)
def pre_delete(self, obj):
check_ownership(obj)
@action('mulexport', __('Export'), __('Export dashboards?'), 'fa-database')
def mulexport(self, items):
if not isinstance(items, list):
items = [items]
ids = ''.join('&id={}'.format(d.id) for d in items)
return redirect(
'/dashboardmodelview/export_dashboards_form?{}'.format(ids[1:]))
@expose('/export_dashboards_form')
def download_dashboards(self):
if request.args.get('action') == 'go':
ids = request.args.getlist('id')
return Response(
models.Dashboard.export_dashboards(ids),
headers=generate_download_headers('json'),
mimetype='application/text')
return self.render_template(
'superset/export_dashboards.html',
dashboards_url='/dashboardmodelview/list',
)
appbuilder.add_view(
DashboardModelView,
'Dashboards',
label=__('Dashboards'),
icon='fa-dashboard',
category='',
category_icon='')
class DashboardModelViewAsync(DashboardModelView): # noqa
list_columns = [
'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',
'changed_on', 'url', 'changed_by_name',
]
label_columns = {
'dashboard_link': _('Dashboard'),
'dashboard_title': _('Title'),
'creator': _('Creator'),
'modified': _('Modified'),
}
appbuilder.add_view_no_menu(DashboardModelViewAsync)
class DashboardAddView(DashboardModelView): # noqa
list_columns = [
'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',
'changed_on', 'url', 'changed_by_name',
]
show_columns = list(set(DashboardModelView.edit_columns + list_columns))
appbuilder.add_view_no_menu(DashboardAddView)
class LogModelView(SupersetModelView):
datamodel = SQLAInterface(models.Log)
list_columns = ('user', 'action', 'dttm')
edit_columns = ('user', 'action', 'dttm', 'json')
base_order = ('dttm', 'desc')
label_columns = {
'user': _('User'),
'action': _('Action'),
'dttm': _('dttm'),
'json': _('JSON'),
}
appbuilder.add_view(
LogModelView,
'Action Log',
label=__('Action Log'),
category='Security',
category_label=__('Security'),
icon='fa-list-ol')
@app.route('/health')
def health():
return 'OK'
@app.route('/healthcheck')
def healthcheck():
return 'OK'
@app.route('/ping')
def ping():
return 'OK'
class KV(BaseSupersetView):
"""Used for storing and retrieving key value pairs"""
@log_this
@expose('/store/', methods=['POST'])
def store(self):
try:
value = request.form.get('data')
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(
json.dumps({'id': obj.id}),
status=200)
@log_this
@expose('/<key_id>/', methods=['GET'])
def get_value(self, key_id):
kv = None
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).one()
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200)
appbuilder.add_view_no_menu(KV)
class R(BaseSupersetView):
"""used for short urls"""
@log_this
@expose('/<url_id>')
def index(self, url_id):
url = db.session.query(models.Url).filter_by(id=url_id).first()
if url:
return redirect('/' + url.url)
else:
flash('URL to nowhere...', 'danger')
return redirect('/')
@log_this
@expose('/shortner/', methods=['POST', 'GET'])
def shortner(self):
url = request.form.get('data')
directory = url.split('?')[0][2:]
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
'http://{request.headers[Host]}/{directory}?r={obj.id}'.format(
request=request, directory=directory, obj=obj),
mimetype='text/plain')
@expose('/msg/')
def msg(self):
"""Redirects to specified url while flash a message"""
flash(Markup(request.args.get('msg')), 'info')
return redirect(request.args.get('url'))
appbuilder.add_view_no_menu(R)
class Superset(BaseSupersetView):
"""The base views for Superset!"""
def json_response(self, obj, status=200):
return Response(
json.dumps(obj, default=utils.json_int_dttm_ser),
status=status,
mimetype='application/json')
@has_access_api
@expose('/datasources/')
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources]
datasources = sorted(datasources, key=lambda o: o['name'])
return self.json_response(datasources)
@has_access_api
@expose('/override_role_permissions/', methods=['POST'])
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data['role_name']
databases = data['database']
db_ds_names = set()
for dbs in databases:
for schema in dbs['schema']:
for ds_name in schema['datasources']:
fullname = utils.get_datasource_full_name(
dbs['name'], ds_name, schema=schema['name'])
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm,
permission_name='datasource_access')
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response({
'granted': granted_perms,
'requested': list(db_ds_names),
}, status=201)
@log_this
@has_access
@expose('/request_access/')
def request_access(self):
datasources = set()
dashboard_id = request.args.get('dashboard_id')
if dashboard_id:
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(dashboard_id))
.one()
)
datasources |= dash.datasources
datasource_id = request.args.get('datasource_id')
datasource_type = request.args.get('datasource_type')
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class)
.filter_by(id=int(datasource_id))
.one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
))
if has_access:
return redirect('/superset/dashboard/{}'.format(dashboard_id))
if request.args.get('action') == 'go':
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id,
datasource_type=datasource.type)
db.session.add(access_request)
db.session.commit()
flash(__('Access was requested'), 'info')
return redirect('/')
return self.render_template(
'superset/request_access.html',
datasources=datasources,
datasource_names=', '.join([o.name for o in datasources]),
)
@log_this
@has_access
@expose('/approve')
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session)
user = security_manager.get_user_by_id(r.created_by_fk)
if not datasource or \
security_manager.datasource_access(datasource, user):
# datasource does not exist anymore
session.delete(r)
session.commit()
datasource_type = request.args.get('datasource_type')
datasource_id = request.args.get('datasource_id')
created_by_username = request.args.get('created_by')
role_to_grant = request.args.get('role_to_grant')
role_to_extend = request.args.get('role_to_extend')
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session)
if not datasource:
flash(DATASOURCE_MISSING_ERR, 'alert')
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, 'alert')
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, 'alert')
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
# check if you can approve
if security_manager.all_datasource_access() or g.user.id == datasource.owner_id:
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
'%(user)s was granted the role %(role)s that gives access '
'to the %(datasource)s',
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_granted.txt', app.config)
flash(msg, 'info')
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
'email/datasource_access', datasource.perm)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __('Role %(r)s was extended to provide the access to '
'the datasource %(ds)s', r=role_to_extend,
ds=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_extended.txt', app.config)
flash(msg, 'info')
clean_fulfilled_requests(session)
else:
flash(__('You have no permission to approve this request'),
'danger')
return redirect('/accessrequestsmodelview/list/')
for r in requests:
session.delete(r)
session.commit()
return redirect('/accessrequestsmodelview/list/')
def get_form_data(self, slice_id=None):
form_data = {}
post_data = request.form.get('form_data')
request_args_data = request.args.get('form_data')
# Supporting POST
if post_data:
form_data.update(json.loads(post_data))
# request params can overwrite post body
if request_args_data:
form_data.update(json.loads(request_args_data))
url_id = request.args.get('r')
if url_id:
saved_url = db.session.query(models.Url).filter_by(id=url_id).first()
if saved_url:
url_str = parse.unquote_plus(
saved_url.url.split('?')[1][10:], encoding='utf-8', errors=None)
url_form_data = json.loads(url_str)
# allow form_date in request override saved url
url_form_data.update(form_data)
form_data = url_form_data
if request.args.get('viz_type'):
# Converting old URLs
form_data = cast_form_data(form_data)
form_data = {
k: v
for k, v in form_data.items()
if k not in FORM_DATA_KEY_BLACKLIST
}
# When a slice_id is present, load from DB and override
# the form_data from the DB with the other form_data provided
slice_id = form_data.get('slice_id') or slice_id
slc = None
if slice_id:
slc = db.session.query(models.Slice).filter_by(id=slice_id).first()
slice_form_data = slc.form_data.copy()
# allow form_data in request override slice from_data
slice_form_data.update(form_data)
form_data = slice_form_data
return form_data, slc
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = (
db.session.query(models.Slice)
.filter_by(id=slice_id)
.one()
)
return slc.get_viz()
else:
viz_type = form_data.get('viz_type', 'table')
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
viz_obj = viz.viz_types[viz_type](
datasource,
form_data=form_data,
force=force,
)
return viz_obj
@has_access
@expose('/slice/<slice_id>/')
def slice(self, slice_id):
form_data, slc = self.get_form_data(slice_id)
endpoint = '/superset/explore/?form_data={}'.format(
parse.quote(json.dumps(form_data)),
)
if request.args.get('standalone') == 'true':
endpoint += '&standalone=true'
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logging.exception(e)
return json_error_response(e)
if query_obj and query_obj['prequeries']:
query_obj['prequeries'].append(query)
query = ';\n\n'.join(query_obj['prequeries'])
if query:
query += ';'
else:
query = 'No query.'
return Response(
json.dumps({
'query': query,
'language': viz_obj.datasource.query_language,
}),
status=200,
mimetype='application/json')
def generate_json(self, datasource_type, datasource_id, form_data,
csv=False, query=False, force=False):
try:
viz_obj = self.get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
except Exception as e:
logging.exception(e)
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
if not security_manager.datasource_access(viz_obj.datasource, g.user):
return json_error_response(DATASOURCE_ACCESS_ERR, status=404)
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers('csv'),
mimetype='application/csv')
if query:
return self.get_query_string_response(viz_obj)
try:
payload = viz_obj.get_payload()
except SupersetException as se:
logging.exception(se)
return json_error_response(utils.error_msg_from_exception(se),
status=se.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e))
status = 200
if (
payload.get('status') == QueryStatus.FAILED or
payload.get('error') is not None
):
status = 400
return json_success(viz_obj.json_dumps(payload), status=status)
@log_this
@has_access_api
@expose('/slice_json/<slice_id>')
def slice_json(self, slice_id):
try:
form_data, slc = self.get_form_data(slice_id)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
except Exception as e:
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return self.generate_json(datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data)
@log_this
@has_access_api
@expose('/annotation_json/<layer_id>')
def annotation_json(self, layer_id):
form_data = self.get_form_data()[0]
form_data['layer_id'] = layer_id
form_data['filters'] = [{'col': 'layer_id',
'op': '==',
'val': layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types['table'](
datasource,
form_data=form_data,
force=False,
)
try:
payload = viz_obj.get_payload()
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e))
status = 200
if payload.get('status') == QueryStatus.FAILED:
status = 400
return json_success(viz_obj.json_dumps(payload), status=status)
@log_this
@has_access_api
@expose('/explore_json/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])
@expose('/explore_json/', methods=['GET', 'POST'])
def explore_json(self, datasource_type=None, datasource_id=None):
try:
csv = request.args.get('csv') == 'true'
query = request.args.get('query') == 'true'
force = request.args.get('force') == 'true'
form_data = self.get_form_data()[0]
datasource_id, datasource_type = self.datasource_info(
datasource_id, datasource_type, form_data)
except Exception as e:
logging.exception(e)
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return self.generate_json(datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
csv=csv,
query=query,
force=force)
@log_this
@has_access
@expose('/import_dashboards', methods=['GET', 'POST'])
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get('file')
if request.method == 'POST' and f:
current_tt = int(time.time())
data = json.loads(f.stream.read(), object_hook=utils.decode_dashboards)
# TODO: import DRUID datasources
for table in data['datasources']:
type(table).import_obj(table, import_time=current_tt)
db.session.commit()
for dashboard in data['dashboards']:
models.Dashboard.import_obj(
dashboard, import_time=current_tt)
db.session.commit()
return redirect('/dashboardmodelview/list/')
return self.render_template('superset/import_dashboards.html')
@log_this
@has_access
@expose('/explorev2/<datasource_type>/<datasource_id>/')
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args))
@staticmethod
def datasource_info(datasource_id, datasource_type, form_data):
"""Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '')
if '__' in datasource:
datasource_id, datasource_type = datasource.split('__')
datasource_id = int(datasource_id)
return datasource_id, datasource_type
@log_this
@has_access
@expose('/explore/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])
@expose('/explore/', methods=['GET', 'POST'])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = self.get_form_data()
datasource_id, datasource_type = self.datasource_info(
datasource_id, datasource_type, form_data)
error_redirect = '/slicemodelview/list/'
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
flash(DATASOURCE_MISSING_ERR, 'danger')
return redirect(error_redirect)
if not security_manager.datasource_access(datasource):
flash(
__(get_datasource_access_error_msg(datasource.name)),
'danger')
return redirect(
'superset/request_access/?'
'datasource_type={datasource_type}&'
'datasource_id={datasource_id}&'
''.format(**locals()))
viz_type = form_data.get('viz_type')
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# slc perms
slice_add_perm = security_manager.can_access('can_add', 'SliceModelView')
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
'can_download', 'SliceModelView')
form_data['datasource'] = str(datasource_id) + '__' + datasource_type
# On explore, merge extra filters into the form data
merge_extra_filters(form_data)
# merge request url params
if request.method == 'GET':
merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get('action')
if action == 'overwrite' and not slice_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') + _('chart'),
status=400)
if action == 'saveas' and not slice_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('chart'),
status=400)
if action in ('saveas', 'overwrite'):
return self.save_or_overwrite_slice(
request.args,
slc, slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name)
standalone = request.args.get('standalone') == 'true'
bootstrap_data = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': slice_overwrite_perm,
'datasource': datasource.data,
'form_data': form_data,
'datasource_id': datasource_id,
'datasource_type': datasource_type,
'slice': slc.data if slc else None,
'standalone': standalone,
'user_id': user_id,
'forced_height': request.args.get('height'),
'common': self.common_bootsrap_payload(),
}
table_name = datasource.table_name \
if datasource_type == 'table' \
else datasource.datasource_name
if slc:
title = slc.slice_name
else:
title = 'Explore - ' + table_name
return self.render_template(
'superset/basic.html',
bootstrap_data=json.dumps(bootstrap_data),
entry='explore',
title=title,
standalone_mode=standalone)
@api
@has_access_api
@expose('/filter/<datasource_type>/<datasource_id>/<column>/')
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
if not security_manager.datasource_access(datasource):
return json_error_response(DATASOURCE_ACCESS_ERR)
payload = json.dumps(
datasource.values_for_column(
column,
config.get('FILTER_SELECT_ROW_LIMIT', 10000),
),
default=utils.json_int_dttm_ser)
return json_success(payload)
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data, _ = self.get_form_data()
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
'Slice [{}] was added to dashboard [{}]'.format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
'Dashboard [{}] just got created and slice [{}] was added '
'to it'.format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = 'Slice [{}] has been saved'.format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, 'info')
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = 'Slice [{}] has been overwritten'.format(slc.slice_name)
flash(msg, 'info')
@api
@has_access_api
@expose('/checkbox/<model_view>/<id_>/<attr>/<value>', methods=['GET'])
def checkbox(self, model_view, id_, attr, value):
"""endpoint for checking/unchecking any boolean in a sqla model"""
modelview_to_model = {
'TableColumnInlineView':
ConnectorRegistry.sources['table'].column_class,
'DruidColumnInlineView':
ConnectorRegistry.sources['druid'].column_class,
}
model = modelview_to_model[model_view]
col = db.session.query(model).filter_by(id=id_).first()
checked = value == 'true'
if col:
setattr(col, attr, checked)
if checked:
metrics = col.get_metrics().values()
col.datasource.add_missing_metrics(metrics)
db.session.commit()
return json_success('OK')
@api
@has_access_api
@expose('/schemas/<db_id>/')
def schemas(self, db_id):
db_id = int(db_id)
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
schemas = database.all_schema_names()
schemas = security_manager.schemas_accessible_by_user(database, schemas)
return Response(
json.dumps({'schemas': schemas}),
mimetype='application/json')
@api
@has_access_api
@expose('/tables/<db_id>/<schema>/<substr>/')
def tables(self, db_id, schema, substr):
"""Endpoint to fetch the list of tables for given database"""
db_id = int(db_id)
schema = utils.js_string_to_python(schema)
substr = utils.js_string_to_python(substr)
database = db.session.query(models.Database).filter_by(id=db_id).one()
table_names = security_manager.accessible_by_user(
database, database.all_table_names(schema), schema)
view_names = security_manager.accessible_by_user(
database, database.all_view_names(schema), schema)
if substr:
table_names = [tn for tn in table_names if substr in tn]
view_names = [vn for vn in view_names if substr in vn]
max_items = config.get('MAX_TABLE_NAMES') or len(table_names)
total_items = len(table_names) + len(view_names)
max_tables = len(table_names)
max_views = len(view_names)
if total_items and substr:
max_tables = max_items * len(table_names) // total_items
max_views = max_items * len(view_names) // total_items
table_options = [{'value': tn, 'label': tn}
for tn in table_names[:max_tables]]
table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}
for vn in view_names[:max_views]])
payload = {
'tableLength': len(table_names) + len(view_names),
'options': table_options,
}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose('/copy_dash/<dashboard_id>/', methods=['GET', 'POST'])
def copy_dash(self, dashboard_id):
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form.get('data'))
dash = models.Dashboard()
original_dash = (
session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data['dashboard_title']
if data['duplicate_slices']:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids['{}'.format(slc.id)] =\
'{}'.format(new_slice.id)
for d in data['positions']:
d['slice_id'] = old_to_new_sliceids[d['slice_id']]
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose('/save_dash/<dashboard_id>/', methods=['GET', 'POST'])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get('data'))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return 'SUCCESS'
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data['positions']
slice_ids = [int(d['slice_id']) for d in positions]
dashboard.slices = [o for o in dashboard.slices if o.id in slice_ids]
positions = sorted(data['positions'], key=lambda x: int(x['slice_id']))
dashboard.position_json = json.dumps(positions, indent=4, sort_keys=True)
md = dashboard.params_dict
dashboard.css = data['css']
dashboard.dashboard_title = data['dashboard_title']
if 'filter_immune_slices' not in md:
md['filter_immune_slices'] = []
if 'timed_refresh_immune_slices' not in md:
md['timed_refresh_immune_slices'] = []
if 'filter_immune_slice_fields' not in md:
md['filter_immune_slice_fields'] = {}
md['expanded_slices'] = data['expanded_slices']
md['default_filters'] = data.get('default_filters', '')
dashboard.json_metadata = json.dumps(md, indent=4)
@api
@has_access_api
@expose('/add_slices/<dashboard_id>/', methods=['POST'])
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get('data'))
session = db.session()
Slice = models.Slice # noqa
dash = (
session.query(models.Dashboard).filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(
Slice.id.in_(data['slice_ids']))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return 'SLICES ADDED'
@api
@has_access_api
@expose('/testconn', methods=['POST', 'GET'])
def testconn(self):
"""Tests a sqla connection"""
try:
username = g.user.username if g.user is not None else None
uri = request.json.get('uri')
db_name = request.json.get('name')
impersonate_user = request.json.get('impersonate_user')
database = None
if db_name:
database = (
db.session
.query(models.Database)
.filter_by(database_name=db_name)
.first()
)
if database and uri == database.safe_sqlalchemy_uri():
# the password-masked uri was passed
# use the URI associated with this database
uri = database.sqlalchemy_uri_decrypted
configuration = {}
if database and uri:
url = make_url(uri)
db_engine = models.Database.get_db_engine_spec_for_backend(
url.get_backend_name())
db_engine.patch()
masked_url = database.get_password_masked_url_from_uri(uri)
logging.info('Superset.testconn(). Masked URL: {0}'.format(masked_url))
configuration.update(
db_engine.get_configuration_for_impersonation(uri,
impersonate_user,
username),
)
connect_args = (
request.json
.get('extras', {})
.get('engine_params', {})
.get('connect_args', {}))
if configuration:
connect_args['configuration'] = configuration
engine = create_engine(uri, connect_args=connect_args)
engine.connect()
return json_success(json.dumps(engine.table_names(), indent=4))
except Exception as e:
logging.exception(e)
return json_error_response((
'Connection failed!\n\n'
'The error message returned was:\n{}').format(e))
@api
@has_access_api
@expose('/recent_activity/<user_id>/', methods=['GET'])
def recent_activity(self, user_id):
"""Recent activity (actions) for a given user"""
M = models # noqa
if request.args.get('limit'):
limit = int(request.args.get('limit'))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, M.Slice)
.outerjoin(
M.Dashboard,
M.Dashboard.id == M.Log.dashboard_id,
)
.outerjoin(
M.Slice,
M.Slice.id == M.Log.slice_id,
)
.filter(
sqla.and_(
~M.Log.action.in_(('queries', 'shortner', 'sql_json')),
M.Log.user_id == user_id,
),
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append({
'action': log.Log.action,
'item_url': item_url,
'item_title': item_title,
'time': log.Log.dttm,
})
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/csrf_token/', methods=['GET'])
def csrf_token(self):
return Response(
self.render_template('superset/csrf_token.json'),
mimetype='text/json',
)
@api
@has_access_api
@expose('/fave_dashboards_by_username/<username>/', methods=['GET'])
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose('/fave_dashboards/<user_id>/', methods=['GET'])
def fave_dashboards(self, user_id):
qry = (
db.session.query(
models.Dashboard,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'Dashboard',
models.Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Dashboard.id,
'dashboard': o.Dashboard.dashboard_link(),
'title': o.Dashboard.dashboard_title,
'url': o.Dashboard.url,
'dttm': o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/created_dashboards/<user_id>/', methods=['GET'])
def created_dashboards(self, user_id):
Dash = models.Dashboard # noqa
qry = (
db.session.query(
Dash,
)
.filter(
sqla.or_(
Dash.created_by_fk == user_id,
Dash.changed_by_fk == user_id,
),
)
.order_by(
Dash.changed_on.desc(),
)
)
payload = [{
'id': o.id,
'dashboard': o.dashboard_link(),
'title': o.dashboard_title,
'url': o.url,
'dttm': o.changed_on,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/user_slices', methods=['GET'])
@expose('/user_slices/<user_id>/', methods=['GET'])
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
FavStar = models.FavStar # noqa
qry = (
db.session.query(Slice,
FavStar.dttm).join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
isouter=True).filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
),
)
.order_by(Slice.slice_name.asc())
)
payload = [{
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'data': o.Slice.form_data,
'dttm': o.dttm if o.dttm else o.Slice.changed_on,
'viz_type': o.Slice.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/created_slices', methods=['GET'])
@expose('/created_slices/<user_id>/', methods=['GET'])
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
qry = (
db.session.query(Slice)
.filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
),
)
.order_by(Slice.changed_on.desc())
)
payload = [{
'id': o.id,
'title': o.slice_name,
'url': o.slice_url,
'dttm': o.changed_on,
'viz_type': o.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/fave_slices', methods=['GET'])
@expose('/fave_slices/<user_id>/', methods=['GET'])
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(
models.Slice,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'dttm': o.dttm,
'viz_type': o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/warm_up_cache/', methods=['GET'])
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get('slice_id')
table_name = request.args.get('table_name')
db_name = request.args.get('db_name')
if not slice_id and not (table_name and db_name):
return json_error_response(__(
'Malformed request. slice_id or table_name and db_name '
'arguments are expected'), status=400)
if slice_id:
slices = session.query(models.Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(__(
'Slice %(id)s not found', id=slice_id), status=404)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources['table']
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name or
SqlaTable.table_name == table_name)
).first()
if not table:
return json_error_response(__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name, s=db_name), status=404)
slices = session.query(models.Slice).filter_by(
datasource_id=table.id,
datasource_type=table.type).all()
for slc in slices:
try:
obj = slc.get_viz(force=True)
obj.get_json()
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
return json_success(json.dumps(
[{'slice_id': slc.id, 'slice_name': slc.slice_name}
for slc in slices]))
@expose('/favstar/<class_name>/<obj_id>/<action>/')
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar # noqa
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id,
user_id=g.user.get_id()).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
),
)
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({'count': count}))
@has_access
@expose('/dashboard/<dashboard_id>/')
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one()
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config.get('ENABLE_ACCESS_REQUEST'):
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(get_datasource_access_error_msg(datasource.name)),
'danger')
return redirect(
'superset/request_access/?'
'dashboard_id={dash.id}&'.format(**locals()))
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(dashboard_id=dash.id)
dash_edit_perm = check_ownership(dash, raise_if_false=False)
dash_save_perm = \
dash_edit_perm and security_manager.can_access('can_save_dash', 'Superset')
standalone_mode = request.args.get('standalone') == 'true'
dashboard_data = dash.data
dashboard_data.update({
'standalone_mode': standalone_mode,
'dash_save_perm': dash_save_perm,
'dash_edit_perm': dash_edit_perm,
})
bootstrap_data = {
'user_id': g.user.get_id(),
'dashboard_data': dashboard_data,
'datasources': {ds.uid: ds.data for ds in datasources},
'common': self.common_bootsrap_payload(),
'editMode': request.args.get('edit') == 'true',
}
if request.args.get('json') == 'true':
return json_success(json.dumps(bootstrap_data))
return self.render_template(
'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(bootstrap_data),
)
@api
@log_this
@expose('/log/', methods=['POST'])
def log(self):
return Response(status=200)
@has_access
@expose('/sync_druid/', methods=['POST'])
@log_this
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload['config']
user_name = payload['user']
cluster_name = payload['cluster']
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources['druid']
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
cluster_name=cluster_name).first()
if not cluster:
err_msg = __("Can't find DruidCluster with cluster_name = "
"'%(name)s'", name=cluster_name)
logging.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(
druid_config, user, cluster)
except Exception as e:
logging.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose('/sqllab_viz/', methods=['POST'])
@log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources['table']
data = json.loads(request.form.get('data'))
table_name = data.get('datasourceName')
table = (
db.session.query(SqlaTable)
.filter_by(table_name=table_name)
.first()
)
if not table:
table = SqlaTable(table_name=table_name)
table.database_id = data.get('dbId')
table.schema = data.get('schema')
table.is_sqllab_view = True
q = SupersetQuery(data.get('sql'))
table.sql = q.stripped()
db.session.add(table)
cols = []
dims = []
metrics = []
for column_name, config in data.get('columns').items():
is_dim = config.get('is_dim', False)
SqlaTable = ConnectorRegistry.sources['table']
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=is_dim,
groupby=is_dim,
is_dttm=config.get('is_date', False),
type=config.get('type', False),
)
cols.append(col)
if is_dim:
dims.append(col)
agg = config.get('agg')
if agg:
if agg == 'count_distinct':
metrics.append(SqlMetric(
metric_name='{agg}__{column_name}'.format(**locals()),
expression='COUNT(DISTINCT {column_name})'
.format(**locals()),
))
else:
metrics.append(SqlMetric(
metric_name='{agg}__{column_name}'.format(**locals()),
expression='{agg}({column_name})'.format(**locals()),
))
if not metrics:
metrics.append(SqlMetric(
metric_name='count'.format(**locals()),
expression='count(*)'.format(**locals()),
))
table.columns = cols
table.metrics = metrics
db.session.commit()
return self.json_response(json.dumps({
'table_id': table.id,
}))
@has_access
@expose('/table/<database_id>/<table_name>/<schema>/')
@log_this
def table(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload_columns = []
indexes = []
primary_key = []
foreign_keys = []
try:
columns = mydb.get_columns(table_name, schema)
indexes = mydb.get_indexes(table_name, schema)
primary_key = mydb.get_pk_constraint(table_name, schema)
foreign_keys = mydb.get_foreign_keys(table_name, schema)
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
keys = []
if primary_key and primary_key.get('constrained_columns'):
primary_key['column_names'] = primary_key.pop('constrained_columns')
primary_key['type'] = 'pk'
keys += [primary_key]
for fk in foreign_keys:
fk['column_names'] = fk.pop('constrained_columns')
fk['type'] = 'fk'
keys += foreign_keys
for idx in indexes:
idx['type'] = 'index'
keys += indexes
for col in columns:
dtype = ''
try:
dtype = '{}'.format(col['type'])
except Exception:
pass
payload_columns.append({
'name': col['name'],
'type': dtype.split('(')[0] if '(' in dtype else dtype,
'longType': dtype,
'keys': [
k for k in keys
if col['name'] in k.get('column_names')
],
})
tbl = {
'name': table_name,
'columns': payload_columns,
'selectStar': mydb.select_star(
table_name, schema=schema, show_cols=True, indent=True,
cols=columns, latest_partition=False),
'primaryKey': primary_key,
'foreignKeys': foreign_keys,
'indexes': keys,
}
return json_success(json.dumps(tbl))
@has_access
@expose('/extra_table_metadata/<database_id>/<table_name>/<schema>/')
@log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(
mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose('/select_star/<database_id>/<table_name>/')
@log_this
def select_star(self, database_id, table_name):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
return self.render_template(
'superset/ajah.html',
content=mydb.select_star(table_name, show_cols=True),
)
@expose('/theme/')
def theme(self):
return self.render_template('superset/theme.html')
@has_access_api
@expose('/cached_key/<key>/')
@log_this
def cached_key(self, key):
"""Returns a key from the cache"""
resp = cache.get(key)
if resp:
return resp
return 'nope'
@has_access_api
@expose('/cache_key_exist/<key>/')
@log_this
def cache_key_exist(self, key):
"""Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status)
@has_access_api
@expose('/results/<key>/')
@log_this
def results(self, key):
"""Serves a key off of the results backend"""
if not results_backend:
return json_error_response("Results backend isn't configured")
blob = results_backend.get(key)
if not blob:
return json_error_response(
'Data could not be retrieved. '
'You may want to re-run the query.',
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one()
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
return json_error_response(get_datasource_access_error_msg(
'{}'.format(rejected_tables)))
payload = utils.zlib_decompress_to_string(blob)
display_limit = app.config.get('DISPLAY_SQL_MAX_ROW', None)
if display_limit:
payload_json = json.loads(payload)
payload_json['data'] = payload_json['data'][:display_limit]
return json_success(
json.dumps(payload_json, default=utils.json_iso_dttm_ser))
@has_access_api
@expose('/stop_query/', methods=['POST'])
@log_this
def stop_query(self):
client_id = request.form.get('client_id')
try:
query = (
db.session.query(Query)
.filter_by(client_id=client_id).one()
)
query.status = utils.QueryStatus.STOPPED
db.session.commit()
except Exception:
pass
return self.json_response('OK')
@has_access_api
@expose('/sql_json/', methods=['POST', 'GET'])
@log_this
def sql_json(self):
"""Runs arbitrary sql and returns and json"""
async = request.form.get('runAsync') == 'true'
sql = request.form.get('sql')
database_id = request.form.get('database_id')
schema = request.form.get('schema') or None
template_params = json.loads(
request.form.get('templateParams') or '{}')
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).first()
if not mydb:
json_error_response(
'Database with id {} is missing.'.format(database_id))
rejected_tables = security_manager.rejected_datasources(sql, mydb, schema)
if rejected_tables:
return json_error_response(get_datasource_access_error_msg(
'{}'.format(rejected_tables)))
session.commit()
select_as_cta = request.form.get('select_as_cta') == 'true'
tmp_table_name = request.form.get('tmp_table_name')
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = '{}.{}'.format(
mydb.force_ctas_schema,
tmp_table_name,
)
query = Query(
database_id=int(database_id),
limit=int(app.config.get('SQL_MAX_ROW', None)),
sql=sql,
schema=schema,
select_as_cta=request.form.get('select_as_cta') == 'true',
start_time=utils.now_as_float(),
tab_name=request.form.get('tab'),
status=QueryStatus.PENDING if async else QueryStatus.RUNNING,
sql_editor_id=request.form.get('sql_editor_id'),
tmp_table_name=tmp_table_name,
user_id=int(g.user.get_id()),
client_id=request.form.get('client_id'),
)
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
if not query_id:
raise Exception(_('Query record was not created as expected.'))
logging.info('Triggering query_id: {}'.format(query_id))
try:
template_processor = get_template_processor(
database=query.database, query=query)
rendered_query = template_processor.process_template(
query.sql,
**template_params)
except Exception as e:
return json_error_response(
'Template rendering failed: {}'.format(utils.error_msg_from_exception(e)))
# Async request.
if async:
logging.info('Running query on a Celery worker')
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query_id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username)
except Exception as e:
logging.exception(e)
msg = (
'Failed to start remote query on a worker. '
'Tell your administrator to verify the availability of '
'the message queue.'
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response('{}'.format(msg))
resp = json_success(json.dumps(
{'query': query.to_dict()}, default=utils.json_int_dttm_ser,
allow_nan=False), status=202)
session.commit()
return resp
# Sync request.
try:
timeout = config.get('SQLLAB_TIMEOUT')
timeout_msg = (
'The query exceeded the {timeout} seconds '
'timeout.').format(**locals())
with utils.timeout(seconds=timeout,
error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query_id,
rendered_query,
return_results=True)
payload = json.dumps(
data, default=utils.pessimistic_json_iso_dttm_ser)
except Exception as e:
logging.exception(e)
return json_error_response('{}'.format(e))
if data.get('status') == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access
@expose('/csv/<client_id>')
@log_this
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(get_datasource_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = (
'attachment; filename={}.csv'.format(unidecode(query.name)))
logging.info('Ready to return response')
return response
@has_access
@expose('/fetch_datasource_metadata')
@log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = (
request.args.get('datasourceKey').split('__'))
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
# Check permission for datasource
if not security_manager.datasource_access(datasource):
return json_error_response(DATASOURCE_ACCESS_ERR)
return json_success(json.dumps(datasource.data))
@expose('/queries/<last_updated_ms>')
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose('/search_queries')
@log_this
def search_queries(self):
"""Search for queries."""
query = db.session.query(Query)
search_user_id = request.args.get('user_id')
database_id = request.args.get('database_id')
search_text = request.args.get('search_text')
status = request.args.get('status')
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get('from')
to_time = request.args.get('to')
if search_user_id:
# Filter on db Id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query \
.filter(Query.sql.like('%{}%'.format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config.get('QUERY_SEARCH_LIMIT', 1000)
sql_queries = (
query.order_by(Query.start_time.asc())
.limit(query_limit)
.all()
)
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype='application/json')
@app.errorhandler(500)
def show_traceback(self):
return render_template(
'superset/traceback.html',
error_msg=get_error_msg(),
), 500
@expose('/welcome')
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
payload = {
'user': bootstrap_user_data(),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='welcome',
title='Superset',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
@has_access
@expose('/profile/<username>/')
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
payload = {
'user': bootstrap_user_data(username, include_perms=True),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
title=username + "'s profile",
entry='profile',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
@has_access
@expose('/sqllab')
def sqllab(self):
"""SQL Editor"""
d = {
'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='sqllab',
bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),
)
@api
@has_access_api
@expose('/slice_query/<slice_id>/')
def sliceQuery(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = self.get_viz(slice_id)
if not security_manager.datasource_access(viz_obj.datasource):
return json_error_response(DATASOURCE_ACCESS_ERR, status=401)
return self.get_query_string_response(viz_obj)
appbuilder.add_view_no_menu(Superset)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
list_columns = ['template_name']
edit_columns = ['template_name', 'css']
add_columns = edit_columns
label_columns = {
'template_name': _('Template Name'),
}
class CssTemplateAsyncModelView(CssTemplateModelView):
list_columns = ['template_name', 'css']
appbuilder.add_separator('Sources')
appbuilder.add_view(
CssTemplateModelView,
'CSS Templates',
label=__('CSS Templates'),
icon='fa-css3',
category='Manage',
category_label=__('Manage'),
category_icon='')
appbuilder.add_view_no_menu(CssTemplateAsyncModelView)
appbuilder.add_link(
'SQL Editor',
label=_('SQL Editor'),
href='/superset/sqllab',
category_icon='fa-flask',
icon='fa-flask',
category='SQL Lab',
category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Query Search',
label=_('Query Search'),
href='/superset/sqllab#search',
icon='fa-search',
category_icon='fa-flask',
category='SQL Lab',
category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Upload a CSV',
label=__('Upload a CSV'),
href='/csvtodatabaseview/form',
icon='fa-upload',
category='Sources',
category_label=__('Sources'),
category_icon='fa-wrench')
appbuilder.add_separator('Sources')
@app.after_request
def apply_caching(response):
"""Applies the configuration's http headers to all responses"""
for k, v in config.get('HTTP_HEADERS').items():
response.headers[k] = v
return response
# ---------------------------------------------------------------------
# Redirecting URL from previous names
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
@app.route('/<regex("panoramix\/.*"):url>')
def panoramix(url): # noqa
return redirect(request.full_path.replace('panoramix', 'superset'))
@app.route('/<regex("caravel\/.*"):url>')
def caravel(url): # noqa
return redirect(request.full_path.replace('caravel', 'superset'))
# ---------------------------------------------------------------------
| 35.812523
| 90
| 0.579101
|
979233a8b18b5256d8a642c9e871d5c1c049ddf3
| 69,148
|
py
|
Python
|
s2s-ft/s2s_ft/modeling_decoding.py
|
gregbugaj/unilm
|
75c4a158ac7b0ea2dccc949e7fbd1313ae8b30f4
|
[
"MIT"
] | 1
|
2021-11-07T00:30:05.000Z
|
2021-11-07T00:30:05.000Z
|
s2s-ft/s2s_ft/modeling_decoding.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | null | null | null |
s2s-ft/s2s_ft/modeling_decoding.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.file_utils import cached_path
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased.bin"
}
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
rel_pos_bins=0,
max_rel_pos=0, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.type_vocab_size == 0:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None, rel_pos=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None,
seg_ids=None, key_history=None, value_history=None, rel_pos=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None, rel_pos=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
if self.rel_pos_bias is not None:
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
# print("Rel pos size = %s" % str(rel_pos.size()))
else:
rel_pos = None
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids, rel_pos=rel_pos)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask,
task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
# print("Rel pos size = %s" % str(rel_pos.size()))
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
if rel_pos is not None:
rel_pos = first_expand(rel_pos)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 46.376928
| 139
| 0.613525
|
20437982e5914558b0460ae58e3923fe5f1d8e8c
| 638
|
py
|
Python
|
mfr_pdb/render.py
|
erinspace/modular-file-renderer
|
acbc1ea188173832dd9d0e037b55653557a04704
|
[
"Apache-2.0"
] | null | null | null |
mfr_pdb/render.py
|
erinspace/modular-file-renderer
|
acbc1ea188173832dd9d0e037b55653557a04704
|
[
"Apache-2.0"
] | null | null | null |
mfr_pdb/render.py
|
erinspace/modular-file-renderer
|
acbc1ea188173832dd9d0e037b55653557a04704
|
[
"Apache-2.0"
] | null | null | null |
import os
import mfr
from mfr.core import RenderResult, get_assets_from_list
# assets must be loaded in this order
JS_ASSETS = [
"jquery-1.7.min.js",
"Three49custom.js",
"GLmol.js",
]
HERE = os.path.dirname(os.path.abspath(__file__))
TEMPLATE = os.path.join(HERE, 'templates', 'pdb.html')
def render_html(fp, **kwargs):
with open(TEMPLATE) as template:
content = template.read().format(pdb_file=fp.read())
assets_uri_base = '{0}/mfr_pdb'.format(mfr.config['STATIC_URL'])
assets = {
'js': get_assets_from_list(assets_uri_base, 'js', JS_ASSETS)
}
return RenderResult(content, assets)
| 22.785714
| 68
| 0.681818
|
3ea7236e8206695d40fcff0e9369235e7095da9f
| 660
|
py
|
Python
|
contacts/migrations/0003_alter_organisation_type.py
|
nogalliance/aid_coordinator
|
9af6413982e6f56914440c227949ce2f1cb4ebcf
|
[
"BSD-3-Clause"
] | null | null | null |
contacts/migrations/0003_alter_organisation_type.py
|
nogalliance/aid_coordinator
|
9af6413982e6f56914440c227949ce2f1cb4ebcf
|
[
"BSD-3-Clause"
] | null | null | null |
contacts/migrations/0003_alter_organisation_type.py
|
nogalliance/aid_coordinator
|
9af6413982e6f56914440c227949ce2f1cb4ebcf
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-03-18 03:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0002_alter_organisation_type'),
]
operations = [
migrations.AlterField(
model_name='organisation',
name='type',
field=models.PositiveIntegerField(choices=[(0, 'Other'), (100, 'Commercial (generic)'), (101, 'Internet Provider'), (102, 'Internet Exchange'), (200, 'Non-Profit (generic)'), (201, 'Association'), (202, 'Foundation'), (900, 'Government (generic)'), (901, 'Regulator')], default=0, verbose_name='type'),
),
]
| 34.736842
| 314
| 0.625758
|
a47c6a3759ea77fb09cc54f4a423d25fe069b4bf
| 2,021
|
py
|
Python
|
_sadm/plugin/os/user/debian/check.py
|
jrmsdev/pysadm
|
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
|
[
"BSD-3-Clause"
] | 1
|
2019-10-15T08:37:56.000Z
|
2019-10-15T08:37:56.000Z
|
_sadm/plugin/os/user/debian/check.py
|
jrmsdev/pysadm
|
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
|
[
"BSD-3-Clause"
] | null | null | null |
_sadm/plugin/os/user/debian/check.py
|
jrmsdev/pysadm
|
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from collections import deque
from os import devnull
# TODO: move pwd package funcs to utils so we can mock it for tests
from pwd import getpwnam
from _sadm.utils.cmd import call, callOutput
__all__ = ['check']
def check(env):
diff = deque()
_checkGroups(diff, env)
_checkUsers(diff, env)
return diff
def _checkGroups(diff, env):
if not env.settings.has_section('os.group'):
return
for group in env.settings['os.group']:
gid = env.settings.getint('os.group', group)
rc = call("getent group %s >%s" % (group, devnull))
if rc == 2:
diff.append(('group', group, gid))
env.warn("%d %s not found" % (gid, group))
elif rc == 0:
env.log("%d %s OK" % (gid, group))
else:
raise env.error("getent group command failed: %d" % rc)
def _checkUsers(diff, env):
for user in env.settings['os.user']:
uid = env.settings.getint('os.user', user)
try:
info = getpwnam(user)
except KeyError:
diff.append(('user', user, uid))
env.warn("%d %s not found" % (uid, user))
groups = env.settings.getlist("os.user.%s" % user, 'groups', fallback = [])
for g in groups:
diff.append(('user.group', user, g))
else:
if info.pw_uid != uid:
env.warn("%d %s uid %d does not match" % (uid, user, info.pw_uid))
else:
env.log("%d %s OK" % (uid, user))
groups = env.settings.getlist("os.user.%s" % user, 'groups', fallback = [])
_checkUserGroups(diff, env, user, groups)
def _checkUserGroups(diff, env, user, groups):
env.debug("check %s user groups %s" % (user, groups))
okAll = True
for g in groups:
cmd = "getent group %s | cut -d ':' -f 4 | tr ',' ' '" % g
rc, output = callOutput(cmd)
if rc == 0:
ok = False
for n in output.split(' '):
n = n.strip()
if n == user:
ok = True
break
if not ok:
diff.append(('user.group', user, g))
okAll = False
elif rc == 2:
env.warn("os group %s not found" % g)
if okAll:
env.log("user %s groups OK" % user)
| 27.684932
| 79
| 0.624938
|
daaabf6d88b7faff27788734ce320b208447d3c0
| 3,713
|
py
|
Python
|
test/functional/rpc_createmultisig.py
|
winnie-chaintope/tapyrus-core
|
9924fd6fbb832ea6ae71f33cad19efeea3cc126d
|
[
"MIT"
] | null | null | null |
test/functional/rpc_createmultisig.py
|
winnie-chaintope/tapyrus-core
|
9924fd6fbb832ea6ae71f33cad19efeea3cc126d
|
[
"MIT"
] | null | null | null |
test/functional/rpc_createmultisig.py
|
winnie-chaintope/tapyrus-core
|
9924fd6fbb832ea6ae71f33cad19efeea3cc126d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Copyright (c) 2019 Chaintope Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import BitcoinTestFramework
import decimal
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def get_keys(self):
node0,node1,node2 = self.nodes
self.add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in self.add]
self.priv = [node1.dumpprivkey(a) for a in self.add]
self.final = node2.getnewaddress()
def run_test(self):
node0,node1,node2 = self.nodes
# 50 TPC starting balance
node0.generate(1, self.signblockprivkey)
self.sync_all()
self.moved = 0
self.output_type = "legacy"
for self.nkeys in [3,5]:
for self.nsigs in [2,3]:
self.get_keys()
self.do_multisig()
self.checkbalances()
def checkbalances(self):
node0,node1,node2 = self.nodes
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert height == 9 # initial 1 + 8 blocks mined
assert bal0+bal1+bal2 == 9*50
# bal0 is initial 50 + total_block_rewards - self.moved - fee paid (total_block_rewards - 400)
assert bal0 == 450 - self.moved
assert bal1 == 0
assert bal2 == self.moved
def do_multisig(self):
node0,node1,node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses",[])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1, self.signblockprivkey)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], prevtxs, "ALL", self.options.scheme)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs, "ALL", self.options.scheme)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1, self.signblockprivkey)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| 37.505051
| 160
| 0.635874
|
32b21dcea712fb98bdb48290fecf2df848183bab
| 1,995
|
py
|
Python
|
support/oauth/login.py
|
VanMenoz92/msh
|
b2a1b71083065ec3ebb097c05f58e79073578ecf
|
[
"Apache-2.0"
] | 1
|
2021-11-29T07:46:29.000Z
|
2021-11-29T07:46:29.000Z
|
support/oauth/login.py
|
VanMenoz92/msh
|
b2a1b71083065ec3ebb097c05f58e79073578ecf
|
[
"Apache-2.0"
] | null | null | null |
support/oauth/login.py
|
VanMenoz92/msh
|
b2a1b71083065ec3ebb097c05f58e79073578ecf
|
[
"Apache-2.0"
] | 1
|
2019-05-07T22:37:26.000Z
|
2019-05-07T22:37:26.000Z
|
from webapp3 import RequestHandler
from logging import info, exception
from json import loads
from oauth2client.client import OAuth2WebServerFlow
from module.xml_reader import XmlReader
from urllib import request
from module.dbmanager import DbManager
class Login(RequestHandler):
def get(self):
info("%s %s", self.request.method, self.request.url)
flow = OAuth2WebServerFlow(client_id=XmlReader.settings['oauth_google']['client_id'],
client_secret=XmlReader.settings['oauth_google']['client_secret'],
scope="https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email",
redirect_uri=XmlReader.settings['oauth_google']['url_callback'])
code = self.request.get("code")
try:
if code != "":
credentials = flow.step2_exchange(code)
url = "https://www.googleapis.com/oauth2/v1/userinfo?access_token=" + credentials.access_token
response = request.urlopen(url)
res = str(response.read())[2:-3].replace("\\n", "")
info(res)
data = loads(res)
nome = data['given_name']
cognome = data['family_name']
mail = data['email']
info("Tentaivo di login da %s %s %s", nome, cognome, mail)
DbManager()
user = DbManager.select_tb_user_from_mail(mail)
if len(user) == 1:
info("Utente presente sul database")
DbManager.update_tb_user(mail, token_google=credentials.access_token)
self.redirect("/")
else:
info("Utente non presente sul database")
else:
auth_url = flow.step1_get_authorize_url()
self.redirect(auth_url)
except Exception:
exception("Exception")
| 46.395349
| 139
| 0.573935
|
dcb5ec6ca3599ad79337d149db586d25d304a5fa
| 3,851
|
py
|
Python
|
src/model/ImpulsoNet.py
|
pystokes/telop_detection
|
a94ad87db57a0944c5060162a51f47ded784d23e
|
[
"Xnet",
"X11"
] | 2
|
2019-07-16T05:04:47.000Z
|
2019-08-24T13:43:41.000Z
|
src/model/ImpulsoNet.py
|
pystokes/telop_detection
|
a94ad87db57a0944c5060162a51f47ded784d23e
|
[
"Xnet",
"X11"
] | null | null | null |
src/model/ImpulsoNet.py
|
pystokes/telop_detection
|
a94ad87db57a0944c5060162a51f47ded784d23e
|
[
"Xnet",
"X11"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
from ..lib import optimizer
import keras
from keras.layers import Input, Dense, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Model
from keras.layers.core import Dropout
from keras import optimizers
import tensorflow as tf
from logging import DEBUG, INFO
from logging import getLogger
# Set logger
logger = getLogger('impulso')
logger.info(tf.__version__)
logger.info(keras.__version__)
# Set HOME directory.
IMPULSO_HOME = os.environ['IMPULSO_HOME']
class ImpulsoNet(object):
def __init__(self, exec_type, hparams):
logger.info('Begin init of ImpulsoNet')
self.exec_type = exec_type
self.hparams = hparams
def create_model(self):
logger.info('Begin to create ImpulsoNet model')
logger.info('Input layer')
input_h = self.hparams['common']['resize']['height']
input_w = self.hparams['common']['resize']['width']
inputs = Input(shape=(input_h, input_w, 3))
logger.info('Block1')
x = Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
x = BatchNormalization()(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
logger.info('Block2')
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
logger.info('Block3')
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
logger.info('Block4')
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
logger.info('Full Connection')
flattened = Flatten(name='flatten')(x)
x = Dense(256, activation='relu', name='fc1')(flattened)
x = Dropout(0.5, name='dropout1')(x)
x = Dense(256, activation='relu', name='fc2')(x)
x = Dropout(0.5, name='dropout2')(x)
logger.info('Output layer')
predictions = Dense(input_h * input_w, activation='sigmoid', name='predictions')(x)
logger.info('Create model')
self.model = Model(inputs=inputs, outputs=predictions)
logger.info('Finish creating ImpulsoNet model')
def select_optimizer(self):
logger.info('Select optimizer')
self.selected_optimizer = optimizer.select_optimizer(self.hparams[self.exec_type]['optimizer'])
def compile(self):
logger.info('Compile model')
self.model.compile(optimizer=self.selected_optimizer,
loss='mean_squared_error',
metrics=['accuracy'])
if __name__ == '__main__':
pass
| 35.657407
| 103
| 0.618021
|
552126ccafd77e6c5f9a2adaf0ac5fac0939c239
| 2,882
|
py
|
Python
|
pygcn/utils.py
|
darnbi/pygcn
|
0201fba38c4157b3748771b6c1cea30a181d9a73
|
[
"MIT"
] | null | null | null |
pygcn/utils.py
|
darnbi/pygcn
|
0201fba38c4157b3748771b6c1cea30a181d9a73
|
[
"MIT"
] | null | null | null |
pygcn/utils.py
|
darnbi/pygcn
|
0201fba38c4157b3748771b6c1cea30a181d9a73
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = np.unique(labels)
# classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_data(path="../data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
print('Done loading {} dataset'.format(dataset))
return adj, features, labels, idx_train, idx_val, idx_test
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
| 34.309524
| 98
| 0.649549
|
e56bdb8500008a6a9c4720584bd592b8d38d1c9d
| 6,142
|
py
|
Python
|
kairon/api/app/routers/history.py
|
ash-pramila/chiron
|
ed207d52766fcce48ebc884ac97185b2901161d4
|
[
"Apache-2.0"
] | null | null | null |
kairon/api/app/routers/history.py
|
ash-pramila/chiron
|
ed207d52766fcce48ebc884ac97185b2901161d4
|
[
"Apache-2.0"
] | 1
|
2021-01-29T22:20:59.000Z
|
2021-01-29T22:20:59.000Z
|
kairon/api/app/routers/history.py
|
ash-pramila/chiron
|
ed207d52766fcce48ebc884ac97185b2901161d4
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import APIRouter
from kairon.api.auth import Authentication
from kairon.data_processor.history import ChatHistory
from kairon.api.models import Response, User, HistoryMonth
from fastapi import Depends
from typing import Text
router = APIRouter()
auth = Authentication()
@router.get("/users", response_model=Response)
async def chat_history_users(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the list of user who has conversation with the agent
"""
users, message = ChatHistory.fetch_chat_users(current_user.get_bot(), month)
return {"data": {"users": users}, "message": message}
@router.get("/users/{sender}", response_model=Response)
async def chat_history(
sender: Text, month: HistoryMonth = 1,current_user: User = Depends(auth.get_current_user)
):
"""
Fetches the list of conversation with the agent by particular user
"""
history, message = ChatHistory.fetch_chat_history(current_user.get_bot(), sender, month)
return {"data": {"history": list(history)}, "message": message}
@router.get("/metrics/users", response_model=Response)
async def user_with_metrics(
month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the list of user who has conversation with the agent with steps anf time
"""
users, message = ChatHistory.user_with_metrics(
current_user.get_bot(), month
)
return {"data": {"users": users}, "message": message}
@router.get("/metrics/fallback", response_model=Response)
async def visitor_hit_fallback(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the number of times the agent hit a fallback (ie. not able to answer) to user queries
"""
visitor_hit_fallback, message = ChatHistory.visitor_hit_fallback(
current_user.get_bot(), month
)
return {"data": visitor_hit_fallback, "message": message}
@router.get("/metrics/conversation/steps", response_model=Response)
async def conversation_steps(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the number of conversation steps that took place in the chat between the users and the agent
"""
conversation_steps, message = ChatHistory.conversation_steps(current_user.get_bot(), month)
return {"data": conversation_steps, "message": message}
@router.get("/metrics/conversation/time", response_model=Response)
async def conversation_time(month: HistoryMonth = 1,current_user: User = Depends(auth.get_current_user)):
"""
Fetches the duration of the chat that took place between the users and the agent"""
conversation_time, message = ChatHistory.conversation_time(current_user.get_bot(), month)
return {"data": conversation_time, "message": message}
@router.get("/metrics/engaged_user", response_model=Response)
async def count_engaged_users(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the number of engaged users of the bot
"""
engaged_user_count, message = ChatHistory.engaged_users(
current_user.get_bot(), month
)
return {"data": engaged_user_count, "message": message}
@router.get("/metrics/new_user", response_model=Response)
async def count_new_users(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the number of new users of the bot
"""
user_count, message = ChatHistory.new_users(
current_user.get_bot(), month
)
return {"data": user_count, "message": message}
@router.get("/metrics/successful_conversation", response_model=Response)
async def complete_conversations(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the number of successful conversations of the bot, which had no fallback
"""
conversation_count, message = ChatHistory.successful_conversations(
current_user.get_bot(), month
)
return {"data": conversation_count, "message": message}
@router.get("/metrics/user_retentions", response_model=Response)
async def calculate_retention(month: HistoryMonth = 1, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the user retention percentage of the bot
"""
retention_count, message = ChatHistory.user_retention(
current_user.get_bot(), month
)
return {"data": retention_count, "message": message}
@router.get("/metrics/engaged_user_range", response_model=Response)
async def count_engaged_users_range(month: HistoryMonth = 6, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the counts of engaged users of the bot for previous months
"""
range_value = ChatHistory.engaged_users_range(
current_user.get_bot(), month
)
return {"data": range_value}
@router.get("/metrics/new_user_range", response_model=Response)
async def count_new_users_range(month: HistoryMonth = 6, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the counts of new users of the bot for previous months
"""
range_value = ChatHistory.new_users_range(
current_user.get_bot(), month
)
return {"data": range_value}
@router.get("/metrics/successful_conversation_range", response_model=Response)
async def complete_conversation_range(month: HistoryMonth = 6, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the counts of successful conversations of the bot for previous months
"""
range_value = ChatHistory.successful_conversation_range(
current_user.get_bot(), month
)
return {"data": range_value}
@router.get("/metrics/user_retention_range", response_model=Response)
async def calculate_retention_range(month: HistoryMonth = 6, current_user: User = Depends(auth.get_current_user)):
"""
Fetches the counts of user retention percentages of the bot for previous months
"""
range_value = ChatHistory.user_retention_range(
current_user.get_bot(), month
)
return {"data": range_value}
| 37.91358
| 116
| 0.731032
|
80cc7797863d711b67a9a712ac04024ce17b4c83
| 4,010
|
py
|
Python
|
inference_mp4_4x_parallel.py
|
a1600012888/arXiv2020-RIFE
|
b17c6ba426da90129941ac7d6ba3287eef10b6bf
|
[
"MIT"
] | null | null | null |
inference_mp4_4x_parallel.py
|
a1600012888/arXiv2020-RIFE
|
b17c6ba426da90129941ac7d6ba3287eef10b6bf
|
[
"MIT"
] | null | null | null |
inference_mp4_4x_parallel.py
|
a1600012888/arXiv2020-RIFE
|
b17c6ba426da90129941ac7d6ba3287eef10b6bf
|
[
"MIT"
] | null | null | null |
import os
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--video', dest='video', required=True)
parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
parser.add_argument('--fps', dest='fps', type=int, default=60)
parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs')
args = parser.parse_args()
from model.RIFE import Model
model = Model()
model.load_model('./train_log')
model.eval()
model.device()
videoCapture = cv2.VideoCapture(args.video)
fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS))
success, frame = videoCapture.read()
h, w, _ = frame.shape
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
if args.png:
if not os.path.exists('output'):
os.mkdir('output')
else:
output = cv2.VideoWriter('{}_4x.mp4'.format(args.video[:-4]), fourcc, args.fps, (w, h))
cnt = 0
skip_frame = 1
def writeframe(I0, mid0, mid1, mid2, I1, p):
global cnt, skip_frame
for i in range(I0.shape[0]):
if p[i] > 0.2:
mid0[i] = I0[i]
mid1[i] = I0[i]
mid2[i] = I1[i]
if p[i] < 1e-3 and args.skip:
if skip_frame % 100 == 0:
print("Warning: Your video has {} static frames, skipping them may change the duration of the generated video.".format(skip_frame))
skip_frame += 1
continue
if args.png:
cv2.imwrite('output/{:0>7d}.png'.format(cnt), I0[i])
cnt += 1
cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid0[i])
cnt += 1
cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid1[i])
cnt += 1
cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid2[i])
cnt += 1
else:
output.write(I0[i])
output.write(mid0[i])
output.write(mid1[i])
output.write(mid2[i])
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
print('{}.mp4, {} frames in total, {}FPS to {}FPS'.format(args.video[:-4], tot_frame, fps, args.fps))
pbar = tqdm(total=tot_frame)
img_list = []
img_list.append(frame)
while success:
success, frame = videoCapture.read()
if success:
img_list.append(frame)
if len(img_list) == 5 or (not success and len(img_list) > 1):
I0 = torch.from_numpy(np.transpose(img_list[:-1], (0, 3, 1, 2)).astype("float32") / 255.).to(device)
I1 = torch.from_numpy(np.transpose(img_list[1:], (0, 3, 1, 2)).astype("float32") / 255.).to(device)
p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False)
- F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs()
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
mid1 = model.inference(I0, I1)
mid0 = model.inference(I0, mid1)
mid2 = model.inference(mid1, I1)
I0 = (((I0 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
I1 = (((I1 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
mid0 = (((mid0 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
mid1 = (((mid1 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
mid2 = (((mid2 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
writeframe(I0, mid0, mid1, mid2, I1, p.mean(3).mean(2).mean(1))
pbar.update(4)
img_list = img_list[-1:]
pbar.close()
output.release()
| 40.505051
| 147
| 0.600499
|
92bfd946b4c4ab5933d21f134f2c4ae4de43cd6c
| 9,735
|
py
|
Python
|
evaluator_graph_case.py
|
SparkJiao/LARCH
|
93e2e103ff5e134f5a7d3501b46f510167fb99d5
|
[
"Apache-2.0"
] | 5
|
2021-05-08T04:59:41.000Z
|
2022-03-12T14:22:57.000Z
|
evaluator_graph_case.py
|
SparkJiao/LARCH
|
93e2e103ff5e134f5a7d3501b46f510167fb99d5
|
[
"Apache-2.0"
] | 1
|
2022-02-22T07:04:53.000Z
|
2022-03-04T08:33:16.000Z
|
evaluator_graph_case.py
|
SparkJiao/LARCH
|
93e2e103ff5e134f5a7d3501b46f510167fb99d5
|
[
"Apache-2.0"
] | null | null | null |
import gc
import random
import torch
import torch.backends.cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import constants
from constants import DUMP_DIR, DEVICE, TEST_BATCH_SIZE, TOT_IMG_NUM, TEST_SUB_BATCH_SIZE, TEST_DATA_LOAD_WORKERS
from datasets.eval_dataset_case_study import EvalDatasetGraphDGLCase
from eval import eval
from knowledge_embed import KnowledgeData
import models
from raw_data_fix import RawData, dialog_to_list
from utils import collate_fn_eval_case
QueryEncoder = {
'simple': models.QueryEncoder,
'expand': models.QueryEncoderExpand
}[constants.QUERY_TYPE]
KnowledgeEncoder = {
'simple': models.KnowledgeEncoder,
'reverse': models.KnowledgeEncoderReverse,
'fuse': models.KnowledgeEncoderReverseFuse,
'sep': models.KnowledgeEncoderReverseSeparate,
'act': models.KnowledgeEncoderReverseActivate,
'bi': models.KnowledgeEncoderBidirectional,
'bi_g': models.KnowledgeEncoderBidirectionalGate,
'bi_g_wo_img': models.KnowledgeEncoderBidirectionalGateWoImg,
'bi_g_wo_que': models.KnowledgeEncoderBidirectionalGateWoQuery
}[constants.KNOWLEDGE_TYPE]
print(f'Query encoder type: {QueryEncoder}')
print(f'Knowledge encoder type: {KnowledgeEncoder}')
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
_top_k = 20
class Evaluator:
def __init__(self):
random.seed(constants.SEED)
np.random.seed(constants.SEED)
torch.manual_seed(constants.SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.raw_data = RawData()
self.knowledge_data = KnowledgeData(self.raw_data)
self.query_encoder = QueryEncoder(self.raw_data).to(DEVICE)
self.knowledge_encoder = KnowledgeEncoder(self.knowledge_data).to(DEVICE)
self.test_dataset = EvalDatasetGraphDGLCase(self.raw_data, self.knowledge_data, 'small_test')
self.test_data_loader = DataLoader(self.test_dataset, batch_size=TEST_BATCH_SIZE,
shuffle=False, collate_fn=collate_fn_eval_case,
num_workers=TEST_DATA_LOAD_WORKERS)
@staticmethod
def fold_tensor(x):
return x.reshape(x.size(0) * x.size(1), *x.size()[2:])
def eval(self):
# model_file = DUMP_DIR / 'check_points.tar_dgl_bid_gate1.2'
model_file = DUMP_DIR / 'check_points.tar_dgl_bid_gate1.2_img_only'
print(f'Load model from {model_file}')
state = torch.load(model_file)
self.query_encoder.load_state_dict(state['query_encoder'])
self.knowledge_encoder.load_state_dict(state['knowledge_encoder'])
self.query_encoder.eval()
self.knowledge_encoder.eval()
self.query_encoder.apply(set_bn_eval)
self.knowledge_encoder.apply(set_bn_eval)
# log_writer = open(DUMP_DIR / 'check_points_dgl_bid_gate1.2_full_neg_img1000.log', 'w')
log_writer = open(DUMP_DIR / 'case_study_dgl_bid_gate1.2_img_only_full_neg_img1000.log', 'w')
print(f'Load model from {model_file}', file=log_writer, flush=True)
predictions = []
with torch.no_grad():
p_5, p_10, p_20, recall_5, recall_10, recall_20, ndcg_5, ndcg_10, ndcg_20 = 0, 0, 0, 0, 0, 0, 0, 0, 0
print('start')
for batch_id, data in enumerate(tqdm(self.test_data_loader)):
# print('batch data loaded')
graph_inputs, num_pos_products, products, image_files, dialogs = data
session_embeddings = self.query_encoder(*graph_inputs)
all_scores = []
_images, _style_tips, _celebrities, _attributes = products
batch_size = _images.size(0)
for index in range(0, TOT_IMG_NUM, TEST_SUB_BATCH_SIZE):
# print('start sub batch data copy')
images = _images[:, index:(index + TEST_SUB_BATCH_SIZE)].to(DEVICE)
style_tips = _style_tips[:, index:(index + TEST_SUB_BATCH_SIZE)].to(DEVICE)
celebrities = _celebrities[:, index:(index + TEST_SUB_BATCH_SIZE)].to(DEVICE)
attributes = _attributes[:, index:(index + TEST_SUB_BATCH_SIZE)].to(DEVICE)
_image_num = images.size(1)
images = self.fold_tensor(images)
style_tips = self.fold_tensor(style_tips)
celebrities = self.fold_tensor(celebrities)
attributes = self.fold_tensor(attributes)
_session_embeddings = self.fold_tensor(session_embeddings.unsqueeze(1).expand(-1, _image_num, -1))
desired_images = self.knowledge_encoder(_session_embeddings,
images, style_tips, celebrities, attributes)
if isinstance(desired_images, tuple):
images_scores = torch.cosine_similarity(_session_embeddings, desired_images[0])
knowledge_scores = torch.cosine_similarity(_session_embeddings, desired_images[1])
scores = constants.MIX_SCALAR * images_scores + (1 - constants.MIX_SCALAR) * knowledge_scores
else:
scores = torch.cosine_similarity(_session_embeddings, desired_images)
all_scores.append(scores.detach().reshape(batch_size, _image_num))
del images
del style_tips
del celebrities
del attributes
del desired_images
del scores
all_scores = torch.cat(all_scores, dim=1).cpu()
del _images
del _style_tips
del _celebrities
del _attributes
del products
# Remove pad
num_pos_products, num_padding = zip(*num_pos_products)
assert len(num_pos_products) == len(num_padding) == batch_size
for _b, _pad_len in enumerate(num_padding):
all_scores[_b, -_pad_len:] = torch.zeros(_pad_len).fill_(-10000.0)
_p_5, _p_10, _p_20, _recall_5, _recall_10, _recall_20, _ndcg_5, _ndcg_10, _ndcg_20 = eval(all_scores, [
list(range(num)) for num in num_pos_products])
_, _top_image_ids = all_scores.topk(_top_k, dim=-1, largest=True, sorted=True)
for _b, _batch_ids in enumerate(_top_image_ids):
dialog_pred = []
for _prod in _batch_ids:
if _prod < num_pos_products[_b]:
positive = 1
else:
positive = 0
dialog_pred.append({
'positive': positive,
'image_file_name': str(image_files[_b][_prod.item()])
})
predictions.append({
"dialog_predictions": dialog_pred,
"dialog": dialog_to_list(dialogs[_b]),
"positive_num": num_pos_products[_b]
})
del all_scores
N = batch_id + 1
p_5 += _p_5
p_10 += _p_10
p_20 += _p_20
recall_5 += _recall_5
recall_10 += _recall_10
recall_20 += _recall_20
ndcg_5 += _ndcg_5
ndcg_10 += _ndcg_10
ndcg_20 += _ndcg_20
if batch_id % 10 == 0:
gc.collect()
print('--p_5:', str(p_5 / N), '--p_10:', str(p_10 / N), '--p_20:', str(p_20 / N),
'--recall_5:', str(recall_5 / N), '--recall_10:', str(recall_10 / N), '--recall_20:',
str(recall_20 / N),
'--ndcg_5:', str(ndcg_5 / N), '--ndcg_10:', str(ndcg_10 / N), '--ndcg_20:', str(ndcg_20 / N))
print(f'N = {N}', file=log_writer, flush=True)
print('--p_5:', str(p_5 / N), '--p_10:', str(p_10 / N), '--p_20:', str(p_20 / N),
'--recall_5:', str(recall_5 / N), '--recall_10:', str(recall_10 / N), '--recall_20:',
str(recall_20 / N),
'--ndcg_5:', str(ndcg_5 / N), '--ndcg_10:', str(ndcg_10 / N), '--ndcg_20:', str(ndcg_20 / N),
file=log_writer, flush=True)
print('================== End ===========================')
print('--p_5:', str(p_5 / N), '--p_10:', str(p_10 / N), '--p_20:', str(p_20 / N),
'--recall_5:', str(recall_5 / N), '--recall_10:', str(recall_10 / N), '--recall_20:',
str(recall_20 / N),
'--ndcg_5:', str(ndcg_5 / N), '--ndcg_10:', str(ndcg_10 / N), '--ndcg_20:', str(ndcg_20 / N))
print('================== End ===========================', file=log_writer, flush=True)
print('--p_5:', str(p_5 / N), '--p_10:', str(p_10 / N), '--p_20:', str(p_20 / N),
'--recall_5:', str(recall_5 / N), '--recall_10:', str(recall_10 / N), '--recall_20:',
str(recall_20 / N),
'--ndcg_5:', str(ndcg_5 / N), '--ndcg_10:', str(ndcg_10 / N), '--ndcg_20:', str(ndcg_20 / N),
file=log_writer, flush=True)
import json
with open(DUMP_DIR / f'{model_file}_predictions.json', 'w') as f:
json.dump(predictions, f, indent=2)
| 45.490654
| 119
| 0.564355
|
365bdbcb4f557ee63f155d5e2a6608327d73e711
| 8,239
|
py
|
Python
|
homeassistant/components/apns/notify.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 2
|
2019-07-31T16:09:15.000Z
|
2019-09-05T08:07:12.000Z
|
homeassistant/components/apns/notify.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 4
|
2021-02-08T20:31:45.000Z
|
2022-03-11T23:48:46.000Z
|
homeassistant/components/apns/notify.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""APNS Notification platform."""
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.const import ATTR_NAME, CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import template as template_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_state_change
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TARGET, DOMAIN, PLATFORM_SCHEMA, BaseNotificationService)
APNS_DEVICES = 'apns.yaml'
CONF_CERTFILE = 'cert_file'
CONF_TOPIC = 'topic'
CONF_SANDBOX = 'sandbox'
DEVICE_TRACKER_DOMAIN = 'device_tracker'
SERVICE_REGISTER = 'apns_register'
ATTR_PUSH_ID = 'push_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PLATFORM): 'apns',
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CERTFILE): cv.isfile,
vol.Required(CONF_TOPIC): cv.string,
vol.Optional(CONF_SANDBOX, default=False): cv.boolean,
})
REGISTER_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_PUSH_ID): cv.string,
vol.Optional(ATTR_NAME): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Return push service."""
name = config.get(CONF_NAME)
cert_file = config.get(CONF_CERTFILE)
topic = config.get(CONF_TOPIC)
sandbox = config.get(CONF_SANDBOX)
service = ApnsNotificationService(hass, name, topic, sandbox, cert_file)
hass.services.register(
DOMAIN, 'apns_{}'.format(name), service.register,
schema=REGISTER_SERVICE_SCHEMA)
return service
class ApnsDevice:
"""
The APNS Device class.
Stores information about a device that is registered for push
notifications.
"""
def __init__(self, push_id, name, tracking_device_id=None, disabled=False):
"""Initialize APNS Device."""
self.device_push_id = push_id
self.device_name = name
self.tracking_id = tracking_device_id
self.device_disabled = disabled
@property
def push_id(self):
"""Return the APNS id for the device."""
return self.device_push_id
@property
def name(self):
"""Return the friendly name for the device."""
return self.device_name
@property
def tracking_device_id(self):
"""
Return the device Id.
The id of a device that is tracked by the device
tracking component.
"""
return self.tracking_id
@property
def full_tracking_device_id(self):
"""
Return the fully qualified device id.
The full id of a device that is tracked by the device
tracking component.
"""
return '{}.{}'.format(DEVICE_TRACKER_DOMAIN, self.tracking_id)
@property
def disabled(self):
"""Return the state of the service."""
return self.device_disabled
def disable(self):
"""Disable the device from receiving notifications."""
self.device_disabled = True
def __eq__(self, other):
"""Return the comparison."""
if isinstance(other, self.__class__):
return self.push_id == other.push_id and self.name == other.name
return NotImplemented
def __ne__(self, other):
"""Return the comparison."""
return not self.__eq__(other)
def _write_device(out, device):
"""Write a single device to file."""
attributes = []
if device.name is not None:
attributes.append(
'name: {}'.format(device.name))
if device.tracking_device_id is not None:
attributes.append(
'tracking_device_id: {}'.format(device.tracking_device_id))
if device.disabled:
attributes.append('disabled: True')
out.write(device.push_id)
out.write(": {")
if attributes:
separator = ", "
out.write(separator.join(attributes))
out.write("}\n")
class ApnsNotificationService(BaseNotificationService):
"""Implement the notification service for the APNS service."""
def __init__(self, hass, app_name, topic, sandbox, cert_file):
"""Initialize APNS application."""
self.hass = hass
self.app_name = app_name
self.sandbox = sandbox
self.certificate = cert_file
self.yaml_path = hass.config.path(app_name + '_' + APNS_DEVICES)
self.devices = {}
self.device_states = {}
self.topic = topic
if os.path.isfile(self.yaml_path):
self.devices = {
str(key): ApnsDevice(
str(key),
value.get('name'),
value.get('tracking_device_id'),
value.get('disabled', False)
)
for (key, value) in
load_yaml_config_file(self.yaml_path).items()
}
tracking_ids = [
device.full_tracking_device_id
for (key, device) in self.devices.items()
if device.tracking_device_id is not None
]
track_state_change(
hass, tracking_ids, self.device_state_changed_listener)
def device_state_changed_listener(self, entity_id, from_s, to_s):
"""
Listen for sate change.
Track device state change if a device has a tracking id specified.
"""
self.device_states[entity_id] = str(to_s.state)
def write_devices(self):
"""Write all known devices to file."""
with open(self.yaml_path, 'w+') as out:
for _, device in self.devices.items():
_write_device(out, device)
def register(self, call):
"""Register a device to receive push messages."""
push_id = call.data.get(ATTR_PUSH_ID)
device_name = call.data.get(ATTR_NAME)
current_device = self.devices.get(push_id)
current_tracking_id = None if current_device is None \
else current_device.tracking_device_id
device = ApnsDevice(push_id, device_name, current_tracking_id)
if current_device is None:
self.devices[push_id] = device
with open(self.yaml_path, 'a') as out:
_write_device(out, device)
return True
if device != current_device:
self.devices[push_id] = device
self.write_devices()
return True
def send_message(self, message=None, **kwargs):
"""Send push message to registered devices."""
from apns2.client import APNsClient
from apns2.payload import Payload
from apns2.errors import Unregistered
apns = APNsClient(
self.certificate,
use_sandbox=self.sandbox,
use_alternative_port=False)
device_state = kwargs.get(ATTR_TARGET)
message_data = kwargs.get(ATTR_DATA)
if message_data is None:
message_data = {}
if isinstance(message, str):
rendered_message = message
elif isinstance(message, template_helper.Template):
rendered_message = message.render()
else:
rendered_message = ''
payload = Payload(
alert=rendered_message,
badge=message_data.get('badge'),
sound=message_data.get('sound'),
category=message_data.get('category'),
custom=message_data.get('custom', {}),
content_available=message_data.get('content_available', False))
device_update = False
for push_id, device in self.devices.items():
if not device.disabled:
state = None
if device.tracking_device_id is not None:
state = self.device_states.get(
device.full_tracking_device_id)
if device_state is None or state == str(device_state):
try:
apns.send_notification(
push_id, payload, topic=self.topic)
except Unregistered:
logging.error("Device %s has unregistered", push_id)
device_update = True
device.disable()
if device_update:
self.write_devices()
return True
| 31.326996
| 79
| 0.622042
|
b71c209d1375acf8f5f755d756db0cb1da2c9aed
| 64,006
|
py
|
Python
|
nova/scheduler/utils.py
|
jiangbiaoah/nova
|
16074287c0c60aa5b0a02ba8d03bf78a184a95fb
|
[
"Apache-2.0"
] | null | null | null |
nova/scheduler/utils.py
|
jiangbiaoah/nova
|
16074287c0c60aa5b0a02ba8d03bf78a184a95fb
|
[
"Apache-2.0"
] | null | null | null |
nova/scheduler/utils.py
|
jiangbiaoah/nova
|
16074287c0c60aa5b0a02ba8d03bf78a184a95fb
|
[
"Apache-2.0"
] | null | null | null |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for scheduling."""
import collections
import re
import sys
import typing as ty
from urllib import parse
import os_resource_classes as orc
import os_traits
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields as obj_fields
from nova.objects import instance as obj_instance
from nova import rpc
from nova.scheduler.filters import utils as filters_utils
from nova.virt import hardware
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
GroupDetails = collections.namedtuple('GroupDetails', ['hosts', 'policy',
'members'])
class ResourceRequest(object):
"""Presents a granular resource request via RequestGroup instances."""
# extra_specs-specific consts
XS_RES_PREFIX = 'resources'
XS_TRAIT_PREFIX = 'trait'
# Regex patterns for suffixed or unsuffixed resources/trait keys
XS_KEYPAT = re.compile(r"^(%s)([a-zA-Z0-9_-]{1,64})?:(.*)$" %
'|'.join((XS_RES_PREFIX, XS_TRAIT_PREFIX)))
def __init__(self):
"""Create an empty ResourceRequest
Do not call this directly, use the existing static factory methods
from_*()
"""
self._rg_by_id: ty.Dict[str, objects.RequestGroup] = {}
self._group_policy: ty.Optional[str] = None
# Default to the configured limit but _limit can be
# set to None to indicate "no limit".
self._limit = CONF.scheduler.max_placement_results
self._root_required: ty.Set[str] = set()
self._root_forbidden: ty.Set[str] = set()
self.suffixed_groups_from_flavor = 0
# TODO(stephenfin): Remove this parameter once we drop support for
# 'vcpu_pin_set'
self.cpu_pinning_requested = False
@classmethod
def from_request_spec(
cls,
request_spec: 'objects.RequestSpec',
enable_pinning_translate: bool = True
) -> 'ResourceRequest':
"""Create a new instance of ResourceRequest from a RequestSpec.
Examines the flavor, flavor extra specs, (optional) image metadata,
and (optional) requested_resources and request_level_params of the
provided ``request_spec``.
For extra specs, items of the following form are examined:
- ``resources:$RESOURCE_CLASS``: $AMOUNT
- ``resources$S:$RESOURCE_CLASS``: $AMOUNT
- ``trait:$TRAIT_NAME``: "required"
- ``trait$S:$TRAIT_NAME``: "required"
...where ``$S`` is a string suffix as supported via Placement
microversion 1.33
https://docs.openstack.org/placement/train/specs/train/implemented/2005575-nested-magic-1.html#arbitrary-group-suffixes
.. note::
This does *not* yet handle ``member_of[$S]``.
The string suffix is used as the RequestGroup.requester_id to
facilitate mapping of requests to allocation candidates using the
``mappings`` piece of the response added in Placement microversion 1.34
https://docs.openstack.org/placement/train/specs/train/implemented/placement-resource-provider-request-group-mapping-in-allocation-candidates.html
For image metadata, traits are extracted from the ``traits_required``
property, if present.
For the flavor, ``VCPU``, ``MEMORY_MB`` and ``DISK_GB`` are calculated
from Flavor properties, though these are only used if they aren't
overridden by flavor extra specs.
requested_resources, which are existing RequestGroup instances created
on the RequestSpec based on resources specified outside of the flavor/
image (e.g. from ports) are incorporated as is, but ensuring that they
get unique group suffixes.
request_level_params - settings associated with the request as a whole
rather than with a specific RequestGroup - are incorporated as is.
:param request_spec: An instance of ``objects.RequestSpec``.
:param enable_pinning_translate: True if the CPU policy extra specs
should be translated to placement resources and traits.
:return: a ResourceRequest instance
"""
res_req = cls()
# root_required+=these
res_req._root_required = request_spec.root_required
# root_required+=!these
res_req._root_forbidden = request_spec.root_forbidden
# TODO(efried): Handle member_of[$S], which will need to be reconciled
# with destination.aggregates handling in resources_from_request_spec
# request_spec.image is nullable
if 'image' in request_spec and request_spec.image:
image = request_spec.image
else:
image = objects.ImageMeta(properties=objects.ImageMetaProps())
# Parse the flavor extra specs
res_req._process_extra_specs(request_spec.flavor)
# NOTE(gibi): this assumes that _process_extra_specs() was already
# called but _process_requested_resources() hasn't called it yet.
res_req.suffixed_groups_from_flavor = (
res_req.get_num_of_suffixed_groups())
# Now parse the (optional) image metadata
res_req._process_image_meta(image)
if enable_pinning_translate:
# Next up, let's handle those pesky CPU pinning policies
res_req._translate_pinning_policies(request_spec.flavor, image)
# Add on any request groups that came from outside of the flavor/image,
# e.g. from ports or device profiles.
res_req._process_requested_resources(request_spec)
# Parse the flavor itself, though we'll only use these fields if they
# don't conflict with something already provided by the flavor extra
# specs. These are all added to the unsuffixed request group.
merged_resources = res_req.merged_resources()
if (orc.VCPU not in merged_resources and
orc.PCPU not in merged_resources):
res_req._add_resource(orc.VCPU, request_spec.vcpus)
if orc.MEMORY_MB not in merged_resources:
res_req._add_resource(orc.MEMORY_MB, request_spec.memory_mb)
if orc.DISK_GB not in merged_resources:
disk = request_spec.ephemeral_gb
disk += compute_utils.convert_mb_to_ceil_gb(request_spec.swap)
if 'is_bfv' not in request_spec or not request_spec.is_bfv:
disk += request_spec.root_gb
if disk:
res_req._add_resource(orc.DISK_GB, disk)
res_req._translate_memory_encryption(request_spec.flavor, image)
res_req._translate_vpmems_request(request_spec.flavor)
res_req._translate_vtpm_request(request_spec.flavor, image)
res_req._translate_pci_numa_affinity_policy(request_spec.flavor, image)
res_req._translate_secure_boot_request(request_spec.flavor, image)
res_req.strip_zeros()
return res_req
@classmethod
def from_request_group(
cls,
request_group: 'objects.RequestGroup',
) -> 'ResourceRequest':
"""Create a new instance of ResourceRequest from a RequestGroup."""
res_req = cls()
res_req._add_request_group(request_group)
res_req.strip_zeros()
return res_req
def _process_requested_resources(self, request_spec):
requested_resources = (request_spec.requested_resources
if 'requested_resources' in request_spec and
request_spec.requested_resources
else [])
for group in requested_resources:
self._add_request_group(group)
def _process_extra_specs(self, flavor):
if 'extra_specs' not in flavor:
return
for key, val in flavor.extra_specs.items():
if key == 'group_policy':
self._add_group_policy(val)
continue
match = self.XS_KEYPAT.match(key)
if not match:
continue
# 'prefix' is 'resources' or 'trait'
# 'suffix' is $S or None
# 'name' is either the resource class name or the trait name.
prefix, suffix, name = match.groups()
# Process "resources[$S]"
if prefix == self.XS_RES_PREFIX:
self._add_resource(name, val, group=suffix)
# Process "trait[$S]"
elif prefix == self.XS_TRAIT_PREFIX:
self._add_trait(name, val, group=suffix)
def _process_image_meta(self, image):
if not image or 'properties' not in image:
return
for trait in image.properties.get('traits_required', []):
# required traits from the image are always added to the
# unsuffixed request group, granular request groups are not
# supported in image traits
self._add_trait(trait, 'required')
def _translate_secure_boot_request(self, flavor, image):
sb_policy = hardware.get_secure_boot_constraint(flavor, image)
if sb_policy != obj_fields.SecureBoot.REQUIRED:
return
trait = os_traits.COMPUTE_SECURITY_UEFI_SECURE_BOOT
self._add_trait(trait, 'required')
LOG.debug("Requiring secure boot support via trait %s.", trait)
def _translate_vtpm_request(self, flavor, image):
vtpm_config = hardware.get_vtpm_constraint(flavor, image)
if not vtpm_config:
return
# Require the appropriate vTPM version support trait on a host.
if vtpm_config.version == obj_fields.TPMVersion.v1_2:
trait = os_traits.COMPUTE_SECURITY_TPM_1_2
else:
trait = os_traits.COMPUTE_SECURITY_TPM_2_0
self._add_trait(trait, 'required')
LOG.debug("Requiring emulated TPM support via trait %s.", trait)
def _translate_memory_encryption(self, flavor, image):
"""When the hw:mem_encryption extra spec or the hw_mem_encryption
image property are requested, translate into a request for
resources:MEM_ENCRYPTION_CONTEXT=1 which requires a slot on a
host which can support encryption of the guest memory.
"""
# NOTE(aspiers): In theory this could raise FlavorImageConflict,
# but we already check it in the API layer, so that should never
# happen.
if not hardware.get_mem_encryption_constraint(flavor, image):
# No memory encryption required, so no further action required.
return
self._add_resource(orc.MEM_ENCRYPTION_CONTEXT, 1)
LOG.debug("Added %s=1 to requested resources",
orc.MEM_ENCRYPTION_CONTEXT)
def _translate_vpmems_request(self, flavor):
"""When the hw:pmem extra spec is present, require hosts which can
provide enough vpmem resources.
"""
vpmem_labels = hardware.get_vpmems(flavor)
if not vpmem_labels:
# No vpmems required
return
amount_by_rc: ty.DefaultDict[str, int] = collections.defaultdict(int)
for vpmem_label in vpmem_labels:
resource_class = orc.normalize_name(
"PMEM_NAMESPACE_" + vpmem_label)
amount_by_rc[resource_class] += 1
for resource_class, amount in amount_by_rc.items():
self._add_resource(resource_class, amount)
LOG.debug("Added resource %s=%d to requested resources",
resource_class, amount)
def _translate_pinning_policies(self, flavor, image):
"""Translate the legacy pinning policies to resource requests."""
# NOTE(stephenfin): These can raise exceptions but these have already
# been validated by 'nova.virt.hardware.numa_get_constraints' in the
# API layer (see change I06fad233006c7bab14749a51ffa226c3801f951b).
# This call also handles conflicts between explicit VCPU/PCPU
# requests and implicit 'hw:cpu_policy'-based requests, mismatches
# between the number of CPUs in the flavor and explicit VCPU/PCPU
# requests, etc.
cpu_policy = hardware.get_cpu_policy_constraint(
flavor, image)
cpu_thread_policy = hardware.get_cpu_thread_policy_constraint(
flavor, image)
emul_thread_policy = hardware.get_emulator_thread_policy_constraint(
flavor)
# We don't need to worry about handling 'SHARED' - that will result in
# VCPUs which we include by default
if cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED:
# TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set'
self.cpu_pinning_requested = True
# Switch VCPU -> PCPU
pcpus = flavor.vcpus
LOG.debug('Translating request for %(vcpu_rc)s=%(pcpus)d to '
'%(vcpu_rc)s=0,%(pcpu_rc)s=%(pcpus)d',
{'vcpu_rc': orc.VCPU, 'pcpu_rc': orc.PCPU,
'pcpus': pcpus})
if cpu_policy == obj_fields.CPUAllocationPolicy.MIXED:
# Get dedicated CPU list from flavor extra spec. For a mixed
# instance a non-empty 'hw:cpu_dedicated_mask' or realtime CPU
# mask configuration must exist, which is already ensured in
# the API layer.
dedicated_cpus = hardware.get_dedicated_cpu_constraint(flavor)
realtime_cpus = hardware.get_realtime_cpu_constraint(flavor, image)
pcpus = len(dedicated_cpus or realtime_cpus or [])
vcpus = flavor.vcpus - pcpus
# apply for the VCPU resource of a 'mixed' instance
self._add_resource(orc.VCPU, vcpus)
if cpu_policy in (
obj_fields.CPUAllocationPolicy.DEDICATED,
obj_fields.CPUAllocationPolicy.MIXED,
):
if emul_thread_policy == 'isolate':
pcpus += 1
LOG.debug('Adding additional %(pcpu_rc)s to account for '
'emulator threads', {'pcpu_rc': orc.PCPU})
self._add_resource(orc.PCPU, pcpus)
trait = {
obj_fields.CPUThreadAllocationPolicy.ISOLATE: 'forbidden',
obj_fields.CPUThreadAllocationPolicy.REQUIRE: 'required',
}.get(cpu_thread_policy)
if trait:
LOG.debug('Adding %(trait)s=%(value)s trait',
{'trait': os_traits.HW_CPU_HYPERTHREADING,
'value': trait})
self._add_trait(os_traits.HW_CPU_HYPERTHREADING, trait)
def _translate_pci_numa_affinity_policy(self, flavor, image):
policy = hardware.get_pci_numa_policy_constraint(flavor, image)
# only the socket policy supports a trait
if policy == objects.fields.PCINUMAAffinityPolicy.SOCKET:
trait = os_traits.COMPUTE_SOCKET_PCI_NUMA_AFFINITY
self._add_trait(trait, 'required')
LOG.debug(
"Requiring 'socket' PCI NUMA affinity support via trait %s.",
trait)
@property
def group_policy(self):
return self._group_policy
@group_policy.setter
def group_policy(self, value):
self._group_policy = value
def get_request_group(self, ident):
if ident not in self._rg_by_id:
rq_grp = objects.RequestGroup(
use_same_provider=bool(ident),
requester_id=ident)
self._rg_by_id[ident] = rq_grp
return self._rg_by_id[ident]
def _add_request_group(self, request_group):
"""Inserts the existing group with a unique suffix.
The groups coming from the flavor can have arbitrary suffixes; those
are guaranteed to be unique within the flavor.
A group coming from "outside" (ports, device profiles) must be
associated with a requester_id, such as a port UUID. We use this
requester_id as the group suffix (but ensure that it is unique in
combination with suffixes from the flavor).
Groups coming from "outside" are not allowed to be no-ops. That is,
they must provide resources and/or required/forbidden traits/aggregates
:param request_group: the RequestGroup to be added.
:raise: ValueError if request_group has no requester_id, or if it
provides no resources or (required/forbidden) traits or aggregates.
:raise: RequestGroupSuffixConflict if request_group.requester_id
already exists in this ResourceRequest.
"""
# NOTE(efried): Deliberately check False-ness rather than None-ness
# here, since both would result in the unsuffixed request group being
# used, and that's bad.
if not request_group.requester_id:
# NOTE(efried): An "outside" RequestGroup is created by a
# programmatic agent and that agent is responsible for guaranteeing
# the presence of a unique requester_id. This is in contrast to
# flavor extra_specs where a human is responsible for the group
# suffix.
raise ValueError(
_('Missing requester_id in RequestGroup! This is probably a '
'programmer error. %s') % request_group)
if request_group.is_empty():
# NOTE(efried): It is up to the calling code to enforce a nonempty
# RequestGroup with suitable logic and exceptions.
raise ValueError(
_('Refusing to add no-op RequestGroup with requester_id=%s. '
'This is a probably a programmer error.') %
request_group.requester_id)
if request_group.requester_id in self._rg_by_id:
raise exception.RequestGroupSuffixConflict(
suffix=request_group.requester_id)
self._rg_by_id[request_group.requester_id] = request_group
def _add_resource(self, rclass, amount, group=None):
"""Add resource request to specified request group.
Defaults to the unsuffixed request group if no group is provided.
"""
self.get_request_group(group).add_resource(rclass, amount)
def _add_trait(self, trait_name, trait_type, group=None):
"""Add trait request to specified group.
Defaults to the unsuffixed request group if no group is provided.
"""
self.get_request_group(group).add_trait(trait_name, trait_type)
def _add_group_policy(self, policy):
# The only valid values for group_policy are 'none' and 'isolate'.
if policy not in ('none', 'isolate'):
LOG.warning(
"Invalid group_policy '%s'. Valid values are 'none' and "
"'isolate'.", policy)
return
self._group_policy = policy
def get_num_of_suffixed_groups(self):
return len([ident for ident in self._rg_by_id.keys()
if ident is not None])
def merged_resources(self):
"""Returns a merge of {resource_class: amount} for all resource groups.
Amounts of the same resource class from different groups are added
together.
:return: A dict of the form {resource_class: amount}
"""
ret: ty.DefaultDict[str, int] = collections.defaultdict(lambda: 0)
for rg in self._rg_by_id.values():
for resource_class, amount in rg.resources.items():
ret[resource_class] += amount
return dict(ret)
def strip_zeros(self):
"""Remove any resources whose amounts are zero."""
for rg in self._rg_by_id.values():
rg.strip_zeros()
# Get rid of any empty RequestGroup instances.
for ident, rg in list(self._rg_by_id.items()):
if rg.is_empty():
self._rg_by_id.pop(ident)
def to_querystring(self):
"""Produce a querystring of the form expected by
GET /allocation_candidates.
"""
if self._limit is not None:
qparams = [('limit', self._limit)]
else:
qparams = []
if self._group_policy is not None:
qparams.append(('group_policy', self._group_policy))
if self._root_required or self._root_forbidden:
vals = sorted(self._root_required) + ['!' + t for t in
sorted(self._root_forbidden)]
qparams.append(('root_required', ','.join(vals)))
for rg in self._rg_by_id.values():
# [('resources[$S]', 'rclass:amount,rclass:amount,...'),
# ('required[$S]', 'trait_name,!trait_name,...'),
# ('member_of[$S]', 'in:uuid,uuid,...'),
# ('member_of[$S]', 'in:uuid,uuid,...')]
qparams.extend(rg.to_queryparams())
return parse.urlencode(sorted(qparams))
@property
def all_required_traits(self):
traits: ty.Set[str] = set()
for rr in self._rg_by_id.values():
traits = traits.union(rr.required_traits)
return traits
def __str__(self):
return ', '.join(sorted(
list(str(rg) for rg in list(self._rg_by_id.values()))))
def build_request_spec(image, instances, instance_type=None):
"""Build a request_spec (ahem, not a RequestSpec) for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
type.
:param image: optional primitive image meta dict
:param instances: list of instances; objects will be converted to
primitives
:param instance_type: optional flavor; objects will be converted to
primitives
:return: dict with the following keys::
'image': the image dict passed in or {}
'instance_properties': primitive version of the first instance passed
'instance_type': primitive version of the instance_type or None
'num_instances': the number of instances passed in
"""
instance = instances[0]
if instance_type is None:
if isinstance(instance, obj_instance.Instance):
instance_type = instance.get_flavor()
else:
instance_type = flavors.extract_flavor(instance)
if isinstance(instance, obj_instance.Instance):
instance = obj_base.obj_to_primitive(instance)
# obj_to_primitive doesn't copy this enough, so be sure
# to detach our metadata blob because we modify it below.
instance['system_metadata'] = dict(instance.get('system_metadata', {}))
if isinstance(instance_type, objects.Flavor):
instance_type = obj_base.obj_to_primitive(instance_type)
# NOTE(danms): Replicate this old behavior because the
# scheduler RPC interface technically expects it to be
# there. Remove this when we bump the scheduler RPC API to
# v5.0
try:
flavors.save_flavor_info(instance.get('system_metadata', {}),
instance_type)
except KeyError:
# If the flavor isn't complete (which is legit with a
# flavor object, just don't put it in the request spec
pass
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances)}
# NOTE(mriedem): obj_to_primitive above does not serialize everything
# in an object, like datetime fields, so we need to still call to_primitive
# to recursively serialize the items in the request_spec dict.
return jsonutils.to_primitive(request_spec)
def resources_from_flavor(instance, flavor):
"""Convert a flavor into a set of resources for placement, taking into
account boot-from-volume instances.
This takes an instance and a flavor and returns a dict of
resource_class:amount based on the attributes of the flavor, accounting for
any overrides that are made in extra_specs.
"""
is_bfv = compute_utils.is_volume_backed_instance(instance._context,
instance)
# create a fake RequestSpec as a wrapper to the caller
req_spec = objects.RequestSpec(flavor=flavor, is_bfv=is_bfv)
# TODO(efried): This method is currently only used from places that
# assume the compute node is the only resource provider. So for now, we
# just merge together all the resources specified in the flavor and pass
# them along. This will need to be adjusted when nested and/or shared RPs
# are in play.
res_req = ResourceRequest.from_request_spec(req_spec)
return res_req.merged_resources()
def resources_from_request_spec(ctxt, spec_obj, host_manager,
enable_pinning_translate=True):
"""Given a RequestSpec object, returns a ResourceRequest of the resources,
traits, and aggregates it represents.
:param context: The request context.
:param spec_obj: A RequestSpec object.
:param host_manager: A HostManager object.
:param enable_pinning_translate: True if the CPU policy extra specs should
be translated to placement resources and traits.
:return: A ResourceRequest object.
:raises NoValidHost: If the specified host/node is not found in the DB.
"""
res_req = ResourceRequest.from_request_spec(
spec_obj, enable_pinning_translate)
# values to get the destination target compute uuid
target_host = None
target_node = None
target_cell = None
if 'requested_destination' in spec_obj:
destination = spec_obj.requested_destination
if destination:
if 'host' in destination:
target_host = destination.host
if 'node' in destination:
target_node = destination.node
if 'cell' in destination:
target_cell = destination.cell
if destination.aggregates:
grp = res_req.get_request_group(None)
# If the target must be either in aggA *or* in aggB and must
# definitely be in aggC, the destination.aggregates would be
# ['aggA,aggB', 'aggC']
# Here we are converting it to
# [['aggA', 'aggB'], ['aggC']]
grp.aggregates = [ored.split(',')
for ored in destination.aggregates]
if destination.forbidden_aggregates:
grp = res_req.get_request_group(None)
grp.forbidden_aggregates |= destination.forbidden_aggregates
if 'force_hosts' in spec_obj and spec_obj.force_hosts:
# Prioritize the value from requested_destination just in case
# so that we don't inadvertently overwrite it to the old value
# of force_hosts persisted in the DB
target_host = target_host or spec_obj.force_hosts[0]
if 'force_nodes' in spec_obj and spec_obj.force_nodes:
# Prioritize the value from requested_destination just in case
# so that we don't inadvertently overwrite it to the old value
# of force_nodes persisted in the DB
target_node = target_node or spec_obj.force_nodes[0]
if target_host or target_node:
nodes = host_manager.get_compute_nodes_by_host_or_node(
ctxt, target_host, target_node, cell=target_cell)
if not nodes:
reason = (_('No such host - host: %(host)s node: %(node)s ') %
{'host': target_host, 'node': target_node})
raise exception.NoValidHost(reason=reason)
if len(nodes) == 1:
if 'requested_destination' in spec_obj and destination:
# When we only supply hypervisor_hostname in api to create a
# server, the destination object will only include the node.
# Here when we get one node, we set both host and node to
# destination object. So we can reduce the number of HostState
# objects to run through the filters.
destination.host = nodes[0].host
destination.node = nodes[0].hypervisor_hostname
grp = res_req.get_request_group(None)
grp.in_tree = nodes[0].uuid
else:
# Multiple nodes are found when a target host is specified
# without a specific node. Since placement doesn't support
# multiple uuids in the `in_tree` queryparam, what we can do here
# is to remove the limit from the `GET /a_c` query to prevent
# the found nodes from being filtered out in placement.
res_req._limit = None
# Don't limit allocation candidates when using affinity/anti-affinity.
if ('scheduler_hints' in spec_obj and any(
key in ['group', 'same_host', 'different_host']
for key in spec_obj.scheduler_hints)):
res_req._limit = None
if res_req.get_num_of_suffixed_groups() >= 2 and not res_req.group_policy:
LOG.warning(
"There is more than one numbered request group in the "
"allocation candidate query but the flavor did not specify "
"any group policy. This query would fail in placement due to "
"the missing group policy. If you specified more than one "
"numbered request group in the flavor extra_spec then you need to "
"specify the group policy in the flavor extra_spec. If it is OK "
"to let these groups be satisfied by overlapping resource "
"providers then use 'group_policy': 'none'. If you want each "
"group to be satisfied from a separate resource provider then "
"use 'group_policy': 'isolate'.")
if res_req.suffixed_groups_from_flavor <= 1:
LOG.info(
"At least one numbered request group is defined outside of "
"the flavor (e.g. in a port that has a QoS minimum bandwidth "
"policy rule attached) but the flavor did not specify any "
"group policy. To avoid the placement failure nova defaults "
"the group policy to 'none'.")
res_req.group_policy = 'none'
return res_req
def claim_resources_on_destination(
context, reportclient, instance, source_node, dest_node,
source_allocations=None, consumer_generation=None):
"""Copies allocations from source node to dest node in Placement
Normally the scheduler will allocate resources on a chosen destination
node during a move operation like evacuate and live migration. However,
because of the ability to force a host and bypass the scheduler, this
method can be used to manually copy allocations from the source node to
the forced destination node.
This is only appropriate when the instance flavor on the source node
is the same on the destination node, i.e. don't use this for resize.
:param context: The request context.
:param reportclient: An instance of the SchedulerReportClient.
:param instance: The instance being moved.
:param source_node: source ComputeNode where the instance currently
lives
:param dest_node: destination ComputeNode where the instance is being
moved
:param source_allocations: The consumer's current allocations on the
source compute
:param consumer_generation: The expected generation of the consumer.
None if a new consumer is expected
:raises NoValidHost: If the allocation claim on the destination
node fails.
:raises: keystoneauth1.exceptions.base.ClientException on failure to
communicate with the placement API
:raises: ConsumerAllocationRetrievalFailed if the placement API call fails
:raises: AllocationUpdateFailed: If a parallel consumer update changed the
consumer
"""
# Get the current allocations for the source node and the instance.
# NOTE(gibi) For the live migrate case, the caller provided the
# allocation that needs to be used on the dest_node along with the
# expected consumer_generation of the consumer (which is the instance).
if not source_allocations:
# NOTE(gibi): This is the forced evacuate case where the caller did not
# provide any allocation request. So we ask placement here for the
# current allocation and consumer generation and use that for the new
# allocation on the dest_node. If the allocation fails due to consumer
# generation conflict then the claim will raise and the operation will
# be aborted.
# NOTE(gibi): This only detect a small portion of possible
# cases when allocation is modified outside of the this
# code path. The rest can only be detected if nova would
# cache at least the consumer generation of the instance.
allocations = reportclient.get_allocs_for_consumer(
context, instance.uuid)
source_allocations = allocations.get('allocations', {})
consumer_generation = allocations.get('consumer_generation')
if not source_allocations:
# This shouldn't happen, so just raise an error since we cannot
# proceed.
raise exception.ConsumerAllocationRetrievalFailed(
consumer_uuid=instance.uuid,
error=_(
'Expected to find allocations for source node resource '
'provider %s. Retry the operation without forcing a '
'destination host.') % source_node.uuid)
# Generate an allocation request for the destination node.
# NOTE(gibi): if the source allocation allocates from more than one RP
# then we need to fail as the dest allocation might also need to be
# complex (e.g. nested) and we cannot calculate that allocation request
# properly without a placement allocation candidate call.
# Alternatively we could sum up the source allocation and try to
# allocate that from the root RP of the dest host. It would only work
# if the dest host would not require nested allocation for this server
# which is really a rare case.
if len(source_allocations) > 1:
reason = (_('Unable to move instance %(instance_uuid)s to '
'host %(host)s. The instance has complex allocations '
'on the source host so move cannot be forced.') %
{'instance_uuid': instance.uuid,
'host': dest_node.host})
raise exception.NoValidHost(reason=reason)
alloc_request = {
'allocations': {
dest_node.uuid: {
'resources':
source_allocations[source_node.uuid]['resources']}
},
}
# import locally to avoid cyclic import
from nova.scheduler.client import report
# The claim_resources method will check for existing allocations
# for the instance and effectively "double up" the allocations for
# both the source and destination node. That's why when requesting
# allocations for resources on the destination node before we move,
# we use the existing resource allocations from the source node.
if reportclient.claim_resources(
context, instance.uuid, alloc_request,
instance.project_id, instance.user_id,
allocation_request_version=report.CONSUMER_GENERATION_VERSION,
consumer_generation=consumer_generation):
LOG.debug('Instance allocations successfully created on '
'destination node %(dest)s: %(alloc_request)s',
{'dest': dest_node.uuid,
'alloc_request': alloc_request},
instance=instance)
else:
# We have to fail even though the user requested that we force
# the host. This is because we need Placement to have an
# accurate reflection of what's allocated on all nodes so the
# scheduler can make accurate decisions about which nodes have
# capacity for building an instance.
reason = (_('Unable to move instance %(instance_uuid)s to '
'host %(host)s. There is not enough capacity on '
'the host for the instance.') %
{'instance_uuid': instance.uuid,
'host': dest_node.host})
raise exception.NoValidHost(reason=reason)
def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
ex, request_spec):
"""Updates the instance, sets the fault and sends an error notification.
:param context: The request context.
:param instance_uuid: The UUID of the instance to update.
:param service: The name of the originating service, e.g. 'compute_task'.
This becomes part of the publisher_id for the notification payload.
:param method: The method that failed, e.g. 'migrate_server'.
:param updates: dict of updates for the instance object, typically a
vm_state and task_state value.
:param ex: An exception which occurred during the given method.
:param request_spec: Optional request spec.
"""
# e.g. "Failed to compute_task_migrate_server: No valid host was found"
LOG.warning("Failed to %(service)s_%(method)s: %(ex)s",
{'service': service, 'method': method, 'ex': ex})
# Convert the request spec to a dict if needed.
if request_spec is not None:
if isinstance(request_spec, objects.RequestSpec):
request_spec = request_spec.to_legacy_request_spec_dict()
else:
request_spec = {}
# TODO(mriedem): We should make vm_state optional since not all callers
# of this method want to change the vm_state, e.g. the Exception block
# in ComputeTaskManager._cold_migrate.
vm_state = updates['vm_state']
properties = request_spec.get('instance_properties', {})
notifier = rpc.get_notifier(service)
state = vm_state.upper()
LOG.warning('Setting instance to %s state.', state,
instance_uuid=instance_uuid)
instance = objects.Instance(context=context, uuid=instance_uuid,
**updates)
instance.obj_reset_changes(['uuid'])
instance.save()
compute_utils.add_instance_fault_from_exc(
context, instance, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_state,
method=method,
reason=ex)
event_type = '%s.%s' % (service, method)
notifier.error(context, event_type, payload)
compute_utils.notify_about_compute_task_error(
context, method, instance_uuid, request_spec, vm_state, ex)
def build_filter_properties(scheduler_hints, forced_host,
forced_node, instance_type):
"""Build the filter_properties dict from data in the boot request."""
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
# TODO(alaski): It doesn't seem necessary that these are conditionally
# added. Let's just add empty lists if not forced_host/node.
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def populate_filter_properties(filter_properties, selection):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
:param filter_properties: dict of filter properties (the legacy form of
the RequestSpec)
:param selection: Selection object
"""
host = selection.service_host
nodename = selection.nodename
# Need to convert SchedulerLimits object to older dict format.
if "limits" in selection and selection.limits is not None:
limits = selection.limits.to_dict()
else:
limits = {}
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)
# Adds oversubscription policy
if not filter_properties.get('force_hosts'):
filter_properties['limits'] = limits
def populate_retry(filter_properties, instance_uuid):
max_attempts = CONF.scheduler.max_attempts
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
# In the case of multiple force hosts/nodes, scheduler should not
# disable retry filter but traverse all force hosts/nodes one by
# one till scheduler gets a valid target host.
if (max_attempts == 1 or len(force_hosts) == 1 or len(force_nodes) == 1):
# re-scheduling is disabled, log why
if max_attempts == 1:
LOG.debug('Re-scheduling is disabled due to "max_attempts" config')
else:
LOG.debug("Re-scheduling is disabled due to forcing a host (%s) "
"and/or node (%s)", force_hosts, force_nodes)
return
# retry is enabled, update attempt count:
retry = filter_properties.setdefault(
'retry', {
'num_attempts': 0,
'hosts': [] # list of compute hosts tried
})
retry['num_attempts'] += 1
_log_compute_error(instance_uuid, retry)
exc_reason = retry.pop('exc_reason', None)
if retry['num_attempts'] > max_attempts:
msg = (_('Exceeded max scheduling attempts %(max_attempts)d '
'for instance %(instance_uuid)s. '
'Last exception: %(exc_reason)s')
% {'max_attempts': max_attempts,
'instance_uuid': instance_uuid,
'exc_reason': exc_reason})
raise exception.MaxRetriesExceeded(reason=msg)
def _log_compute_error(instance_uuid, retry):
"""If the request contained an exception from a previous compute
build/resize operation, log it to aid debugging
"""
exc = retry.get('exc') # string-ified exception from compute
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(
'Error from last host: %(last_host)s (node %(last_node)s): %(exc)s',
{'last_host': last_host, 'last_node': last_node, 'exc': exc},
instance_uuid=instance_uuid)
def _add_retry_host(filter_properties, host, node):
"""Add a retry entry for the selected compute node. In the event that
the request gets re-scheduled, this entry will signal that the given
node has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append([host, node])
def parse_options(opts, sep='=', converter=str, name=""):
"""Parse a list of options, each in the format of <key><sep><value>. Also
use the converter to convert the value into desired type.
:params opts: list of options, e.g. from oslo_config.cfg.ListOpt
:params sep: the separator
:params converter: callable object to convert the value, should raise
ValueError for conversion failure
:params name: name of the option
:returns: a lists of tuple of values (key, converted_value)
"""
good = []
bad = []
for opt in opts:
try:
key, seen_sep, value = opt.partition(sep)
value = converter(value)
except ValueError:
key = None
value = None
if key and seen_sep and value is not None:
good.append((key, value))
else:
bad.append(opt)
if bad:
LOG.warning("Ignoring the invalid elements of the option "
"%(name)s: %(options)s",
{'name': name, 'options': ", ".join(bad)})
return good
def validate_filter(filter):
"""Validates that the filter is configured in the default filters."""
return filter in CONF.filter_scheduler.enabled_filters
def validate_weigher(weigher):
"""Validates that the weigher is configured in the default weighers."""
weight_classes = CONF.filter_scheduler.weight_classes
if 'nova.scheduler.weights.all_weighers' in weight_classes:
return True
return weigher in weight_classes
_SUPPORTS_AFFINITY = None
_SUPPORTS_ANTI_AFFINITY = None
_SUPPORTS_SOFT_AFFINITY = None
_SUPPORTS_SOFT_ANTI_AFFINITY = None
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
enabled.
:param instance_uuid: UUID of the instance to check
:param user_group_hosts: Hosts from the group or empty set
:returns: None or namedtuple GroupDetails
"""
global _SUPPORTS_AFFINITY
if _SUPPORTS_AFFINITY is None:
_SUPPORTS_AFFINITY = validate_filter(
'ServerGroupAffinityFilter')
global _SUPPORTS_ANTI_AFFINITY
if _SUPPORTS_ANTI_AFFINITY is None:
_SUPPORTS_ANTI_AFFINITY = validate_filter(
'ServerGroupAntiAffinityFilter')
global _SUPPORTS_SOFT_AFFINITY
if _SUPPORTS_SOFT_AFFINITY is None:
_SUPPORTS_SOFT_AFFINITY = validate_weigher(
'nova.scheduler.weights.affinity.ServerGroupSoftAffinityWeigher')
global _SUPPORTS_SOFT_ANTI_AFFINITY
if _SUPPORTS_SOFT_ANTI_AFFINITY is None:
_SUPPORTS_SOFT_ANTI_AFFINITY = validate_weigher(
'nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher')
if not instance_uuid:
return
try:
group = objects.InstanceGroup.get_by_instance_uuid(context,
instance_uuid)
except exception.InstanceGroupNotFound:
return
policies = set(('anti-affinity', 'affinity', 'soft-affinity',
'soft-anti-affinity'))
if group.policy in policies:
if not _SUPPORTS_AFFINITY and 'affinity' == group.policy:
msg = _("ServerGroupAffinityFilter not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
if not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' == group.policy:
msg = _("ServerGroupAntiAffinityFilter not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
if (not _SUPPORTS_SOFT_AFFINITY and 'soft-affinity' == group.policy):
msg = _("ServerGroupSoftAffinityWeigher not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
if (not _SUPPORTS_SOFT_ANTI_AFFINITY and
'soft-anti-affinity' == group.policy):
msg = _("ServerGroupSoftAntiAffinityWeigher not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
group_hosts = set(group.get_hosts())
user_hosts = set(user_group_hosts) if user_group_hosts else set()
return GroupDetails(hosts=user_hosts | group_hosts,
policy=group.policy, members=group.members)
def _get_instance_group_hosts_all_cells(context, instance_group):
def get_hosts_in_cell(cell_context):
# NOTE(melwitt): The obj_alternate_context is going to mutate the
# cell_instance_group._context and to do this in a scatter-gather
# with multiple parallel greenthreads, we need the instance groups
# to be separate object copies.
cell_instance_group = instance_group.obj_clone()
with cell_instance_group.obj_alternate_context(cell_context):
return cell_instance_group.get_hosts()
results = nova_context.scatter_gather_skip_cell0(context,
get_hosts_in_cell)
hosts = []
for result in results.values():
# TODO(melwitt): We will need to handle scenarios where an exception
# is raised while targeting a cell and when a cell does not respond
# as part of the "handling of a down cell" spec:
# https://blueprints.launchpad.net/nova/+spec/handling-down-cell
if not nova_context.is_cell_failure_sentinel(result):
hosts.extend(result)
return hosts
def setup_instance_group(context, request_spec):
"""Add group_hosts and group_policies fields to filter_properties dict
based on instance uuids provided in request_spec, if those instances are
belonging to a group.
:param request_spec: Request spec
"""
# NOTE(melwitt): Proactively query for the instance group hosts instead of
# relying on a lazy-load via the 'hosts' field of the InstanceGroup object.
if (request_spec.instance_group and
'hosts' not in request_spec.instance_group):
group = request_spec.instance_group
# If the context is already targeted to a cell (during a move
# operation), we don't need to scatter-gather. We do need to use
# obj_alternate_context here because the RequestSpec is queried at the
# start of a move operation in compute/api, before the context has been
# targeted.
# NOTE(mriedem): If doing a cross-cell move and the group policy
# is anti-affinity, this could be wrong since there could be
# instances in the group on other hosts in other cells. However,
# ServerGroupAntiAffinityFilter does not look at group.hosts.
if context.db_connection:
with group.obj_alternate_context(context):
group.hosts = group.get_hosts()
else:
group.hosts = _get_instance_group_hosts_all_cells(context, group)
if request_spec.instance_group and request_spec.instance_group.hosts:
group_hosts = request_spec.instance_group.hosts
else:
group_hosts = None
instance_uuid = request_spec.instance_uuid
# This queries the group details for the group where the instance is a
# member. The group_hosts passed in are the hosts that contain members of
# the requested instance group.
group_info = _get_group_details(context, instance_uuid, group_hosts)
if group_info is not None:
request_spec.instance_group.hosts = list(group_info.hosts)
request_spec.instance_group.policy = group_info.policy
request_spec.instance_group.members = group_info.members
def request_is_rebuild(spec_obj):
"""Returns True if request is for a rebuild.
:param spec_obj: An objects.RequestSpec to examine (or None).
"""
if not spec_obj:
return False
if 'scheduler_hints' not in spec_obj:
return False
check_type = spec_obj.scheduler_hints.get('_nova_check_type')
return check_type == ['rebuild']
def claim_resources(ctx, client, spec_obj, instance_uuid, alloc_req,
allocation_request_version=None):
"""Given an instance UUID (representing the consumer of resources) and the
allocation_request JSON object returned from Placement, attempt to claim
resources for the instance in the placement API. Returns True if the claim
process was successful, False otherwise.
:param ctx: The RequestContext object
:param client: The scheduler client to use for making the claim call
:param spec_obj: The RequestSpec object - needed to get the project_id
:param instance_uuid: The UUID of the consuming instance
:param alloc_req: The allocation_request received from placement for the
resources we want to claim against the chosen host. The
allocation_request satisfies the original request for
resources and can be supplied as-is (along with the
project and user ID to the placement API's PUT
/allocations/{consumer_uuid} call to claim resources for
the instance
:param allocation_request_version: The microversion used to request the
allocations.
"""
if request_is_rebuild(spec_obj):
# NOTE(danms): This is a rebuild-only scheduling request, so we should
# not be doing any extra claiming
LOG.debug('Not claiming resources in the placement API for '
'rebuild-only scheduling of instance %(uuid)s',
{'uuid': instance_uuid})
return True
LOG.debug("Attempting to claim resources in the placement API for "
"instance %s", instance_uuid)
project_id = spec_obj.project_id
# We didn't start storing the user_id in the RequestSpec until Rocky so
# if it's not set on an old RequestSpec, use the user_id from the context.
if 'user_id' in spec_obj and spec_obj.user_id:
user_id = spec_obj.user_id
else:
# FIXME(mriedem): This would actually break accounting if we relied on
# the allocations for something like counting quota usage because in
# the case of migrating or evacuating an instance, the user here is
# likely the admin, not the owner of the instance, so the allocation
# would be tracked against the wrong user.
user_id = ctx.user_id
# NOTE(gibi): this could raise AllocationUpdateFailed which means there is
# a serious issue with the instance_uuid as a consumer. Every caller of
# utils.claim_resources() assumes that instance_uuid will be a new consumer
# and therefore we passing None as expected consumer_generation to
# reportclient.claim_resources() here. If the claim fails
# due to consumer generation conflict, which in this case means the
# consumer is not new, then we let the AllocationUpdateFailed propagate and
# fail the build / migrate as the instance is in inconsistent state.
return client.claim_resources(ctx, instance_uuid, alloc_req, project_id,
user_id, allocation_request_version=allocation_request_version,
consumer_generation=None)
def get_weight_multiplier(host_state, multiplier_name, multiplier_config):
"""Given a HostState object, multplier_type name and multiplier_config,
returns the weight multiplier.
It reads the "multiplier_name" from "aggregate metadata" in host_state
to override the multiplier_config. If the aggregate metadata doesn't
contain the multiplier_name, the multiplier_config will be returned
directly.
:param host_state: The HostState object, which contains aggregate metadata
:param multiplier_name: The weight multiplier name, like
"cpu_weight_multiplier".
:param multiplier_config: The weight multiplier configuration value
"""
aggregate_vals = filters_utils.aggregate_values_from_key(host_state,
multiplier_name)
try:
value = filters_utils.validate_num_values(
aggregate_vals, multiplier_config, cast_to=float)
except ValueError as e:
LOG.warning("Could not decode '%(name)s' weight multiplier: %(exce)s",
{'exce': e, 'name': multiplier_name})
value = multiplier_config
return value
def fill_provider_mapping(request_spec, host_selection):
"""Fills out the request group - resource provider mapping in the
request spec.
:param request_spec: The RequestSpec object associated with the
operation
:param host_selection: The Selection object returned by the scheduler
for this operation
"""
# Exit early if this request spec does not require mappings.
if not request_spec.maps_requested_resources:
return
# Technically out-of-tree scheduler drivers can still not create
# allocations in placement but if request_spec.maps_requested_resources
# is not empty and the scheduling succeeded then placement has to be
# involved
mappings = jsonutils.loads(host_selection.allocation_request)['mappings']
for request_group in request_spec.requested_resources:
# NOTE(efried): We can count on request_group.requester_id being set:
# - For groups from flavors, ResourceRequest.get_request_group sets it
# to the group suffix.
# - For groups from other sources (e.g. ports, accelerators), it is
# required to be set by ResourceRequest._add_request_group, and that
# method uses it as the suffix.
# And we can count on mappings[requester_id] existing because each
# RequestGroup translated into a (replete - empties are disallowed by
# ResourceRequest._add_request_group) group fed to Placement.
request_group.provider_uuids = mappings[request_group.requester_id]
def fill_provider_mapping_based_on_allocation(
context, report_client, request_spec, allocation):
"""Fills out the request group - resource provider mapping in the
request spec based on the current allocation of the instance.
The fill_provider_mapping() variant is expected to be called in every
scenario when a Selection object is available from the scheduler. However
in case of revert operations such Selection does not exists. In this case
the mapping is calculated based on the allocation of the source host the
move operation is reverting to.
This is a workaround as placement does not return which RP fulfills which
granular request group except in the allocation candidate request (because
request groups are ephemeral, only existing in the scope of that request).
.. todo:: Figure out a better way to preserve the mappings so we can get
rid of this workaround.
:param context: The security context
:param report_client: SchedulerReportClient instance to be used to
communicate with placement
:param request_spec: The RequestSpec object associated with the
operation
:param allocation: allocation dict of the instance, keyed by RP UUID.
"""
# Exit early if this request spec does not require mappings.
if not request_spec.maps_requested_resources:
return
# NOTE(gibi): Getting traits from placement for each instance in a
# instance multi-create scenario is unnecessarily expensive. But
# instance multi-create cannot be used with pre-created neutron ports
# and this code can only be triggered with such pre-created ports so
# instance multi-create is not an issue. If this ever become an issue
# in the future then we could stash the RP->traits mapping on the
# Selection object since we can pull the traits for each provider from
# the GET /allocation_candidates response in the scheduler (or leverage
# the change from the spec mentioned in the docstring above).
provider_traits = {
rp_uuid: report_client.get_provider_traits(
context, rp_uuid).traits
for rp_uuid in allocation}
# NOTE(gibi): The allocation dict is in the format of the PUT /allocations
# and that format can change. The current format can be detected from
# allocation_request_version key of the Selection object.
request_spec.map_requested_resources_to_providers(
allocation, provider_traits)
# FIXME(sbauza) : Move this method closer to the prefilter once split.
def get_aggregates_for_routed_network(
context, network_api, report_client, network_uuid):
"""Collects the aggregate UUIDs describing the segmentation of a routed
network from Nova perspective.
A routed network consists of multiple network segments. Each segment is
available on a given set of compute hosts. Such segmentation is modelled as
host aggregates from Nova perspective.
:param context: The security context
:param network_api: nova.network.neutron.API instance to be used to
communicate with Neutron
:param report_client: SchedulerReportClient instance to be used to
communicate with Placement
:param network_uuid: The UUID of the Neutron network to be translated to
aggregates
:returns: A list of aggregate UUIDs
:raises InvalidRoutedNetworkConfiguration: if something goes wrong when
try to find related aggregates
"""
aggregates = []
segment_ids = network_api.get_segment_ids_for_network(
context, network_uuid)
# Each segment is a resource provider in placement and is in an
# aggregate for the routed network, so we have to get the
# aggregates for each segment provider - and those aggregates are
# mirrored as nova host aggregates.
# NOTE(sbauza): In case of a network with non-configured routed segments,
# we will get an empty list of segment UUIDs, so we won't enter the loop.
for segment_id in segment_ids:
# TODO(sbauza): Don't use a private method.
agg_info = report_client._get_provider_aggregates(context, segment_id)
# @safe_connect can return None but we also want to hard-stop here if
# we can't find the aggregate that Neutron created for the segment.
if agg_info is None or not agg_info.aggregates:
raise exception.InvalidRoutedNetworkConfiguration(
'Failed to find aggregate related to segment %s' % segment_id)
aggregates.extend(agg_info.aggregates)
return aggregates
# FIXME(sbauza) : Move this method closer to the prefilter once split.
def get_aggregates_for_routed_subnet(
context, network_api, report_client, subnet_id):
"""Collects the aggregate UUIDs matching the segment that relates to a
particular subnet from a routed network.
A routed network consists of multiple network segments. Each segment is
available on a given set of compute hosts. Such segmentation is modelled as
host aggregates from Nova perspective.
:param context: The security context
:param network_api: nova.network.neutron.API instance to be used to
communicate with Neutron
:param report_client: SchedulerReportClient instance to be used to
communicate with Placement
:param subnet_id: The UUID of the Neutron subnet to be translated to
aggregate
:returns: A list of aggregate UUIDs
:raises InvalidRoutedNetworkConfiguration: if something goes wrong when
try to find related aggregates
"""
segment_id = network_api.get_segment_id_for_subnet(
context, subnet_id)
if segment_id:
# TODO(sbauza): Don't use a private method.
agg_info = report_client._get_provider_aggregates(context, segment_id)
# @safe_connect can return None but we also want to hard-stop here if
# we can't find the aggregate that Neutron created for the segment.
if agg_info is None or not agg_info.aggregates:
raise exception.InvalidRoutedNetworkConfiguration(
'Failed to find aggregate related to segment %s' % segment_id)
return agg_info.aggregates
return []
| 44.325485
| 154
| 0.669656
|
755ed878615f3116b872583908680ca36d6a33d3
| 1,298
|
py
|
Python
|
xanthus/utils/benchmarking/core.py
|
markdouthwaite/xanthus
|
8d4e64bd49e4bdec1e640d72ecffbc0a9d0f0c01
|
[
"MIT"
] | 4
|
2020-07-15T21:02:46.000Z
|
2020-07-17T16:35:03.000Z
|
xanthus/utils/benchmarking/core.py
|
markdouthwaite/xanthus
|
8d4e64bd49e4bdec1e640d72ecffbc0a9d0f0c01
|
[
"MIT"
] | 2
|
2021-11-10T19:52:54.000Z
|
2022-02-10T02:11:33.000Z
|
xanthus/utils/benchmarking/core.py
|
markdouthwaite/xanthus
|
8d4e64bd49e4bdec1e640d72ecffbc0a9d0f0c01
|
[
"MIT"
] | null | null | null |
import uuid
import json
import logging
from pathlib import Path
from datetime import datetime
import pandas as pd
TIMESTAMP_FORMAT = "%H:%M:%S.%f %y-%m-%d"
def benchmark(manager, epochs, **kwargs):
logger = logging.getLogger(f"Benchmark ({manager.name}|{kwargs})")
start = datetime.now()
records = []
for epoch in range(epochs):
logger.info(f"Running epoch {epoch + 1} of {epochs}...")
manager.update(1)
metrics = manager.metrics(**kwargs)
metrics["epoch"] = epoch + 1
records.append(metrics)
end = datetime.now()
info = dict(
start=start.strftime(TIMESTAMP_FORMAT),
end=end.strftime(TIMESTAMP_FORMAT),
elapsed=(end - start).seconds,
params=manager.params(),
)
return records, info
def save(experiment, manager, records, info=None, root=None, identifier=None):
identifier = identifier or uuid.uuid4().hex[:6]
if root is not None:
path = Path(root) / experiment / manager.name / identifier
else:
path = Path(experiment) / manager.name / identifier
path.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame.from_records(records)
df.to_csv(path / "results.csv")
if info is not None:
json.dump(info, (path / "info.json").open("w"))
| 25.45098
| 78
| 0.639445
|
2222861312de87ea47c85dee70147889a8f0f705
| 5,050
|
py
|
Python
|
scripts/tweet_scraper.py
|
ubclaunchpad/mimic
|
a5cee4e96d726d8d91f344ad86428501b63b1320
|
[
"MIT"
] | 4
|
2019-02-08T06:25:29.000Z
|
2020-02-12T04:29:40.000Z
|
scripts/tweet_scraper.py
|
ubclaunchpad/mimic
|
a5cee4e96d726d8d91f344ad86428501b63b1320
|
[
"MIT"
] | 62
|
2019-02-02T22:35:38.000Z
|
2022-02-26T10:17:19.000Z
|
scripts/tweet_scraper.py
|
ubclaunchpad/mimic
|
a5cee4e96d726d8d91f344ad86428501b63b1320
|
[
"MIT"
] | 1
|
2019-07-11T22:33:49.000Z
|
2019-07-11T22:33:49.000Z
|
"""Scrapes tweets from the Twitter advanced search page."""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup as bs
from datetime import date, timedelta
import time
import sys
import io
import re
import zipfile
def main():
"""Run the browser driver."""
# Trump's Campaign 2016-06-15 - 2019-02-24
# The timeline can be toggled below
from_date = date(2016, 6, 15)
to_date = date(2019, 2, 24)
# Scraping tweets every "gap" days
gap = 1
# Twitter handle
user_input = "realDonaldTrump"
days = (to_date-from_date).days
# HTML contents will be appended here
all_browser = ""
# Chrome is used
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 2)
# Launches Twitter advanced search page
browser.get("https://twitter.com/search-advanced?"
"lang=en&lang=en&lang=en&lang=en&lang=en")
# Iterates through desired dates to obtain tweets
for day in range(0, days, gap):
from_ = from_date + timedelta(days=day)
to_ = from_date + timedelta(days=day+gap)
from_input = "{dt.year}-{dt.month}-{dt.day}".format(dt=from_)
to_input = "{dt.year}-{dt.month}-{dt.day}".format(dt=to_)
time.sleep(2)
try:
user_field = browser.find_element_by_xpath("//input[@type='text'"
"and @name='from']")
user_field.send_keys(user_input)
from_field = browser.find_element_by_xpath("//input\
[contains(@class, 'input-sm') and @name='since']")
from_field.send_keys(from_input)
to_field = browser.find_element_by_xpath("//input\
[contains(@class, 'input-sm') and @name='until']")
to_field.send_keys(to_input)
search_button = browser.find_element_by_xpath("//button\
[@type='submit' and contains(@class, 'EdgeButton')]")
search_button.click()
try:
wait.until(EC.presence_of_element_located((
By.CLASS_NAME, "tweet-text")))
scroller(browser, wait)
except TimeoutException:
pass
all_browser += browser.page_source
browser.execute_script("window.history.go(-1)")
except Exception:
# Returns to original search page
browser.get("https://twitter.com/search-advanced?"
"lang=en&lang=en&lang=en&lang=en&lang=en")
# with open("all_html.txt", "w", encoding="utf8") as f:
# f.write(all_browser)
# Parses out the individual tweets from HTML
tweets = ""
for page in all_browser.split("<!DOCTYPE html>"):
soup = bs(page, "lxml")
for tweet in soup.find_all(class_="tweet-text", text=True):
tweets += tweet.text + "\n\n"
tweets = re.sub("\\npic.twitter.*\\n", "", tweets)
# Size of HTML scraped
print("HTML size: {} MB".format(sys.getsizeof(all_browser)/1e6))
# Approximately number of words and size of tweets
print("Words: {}\nTweets Size: {} MB".format(sys.getsizeof(tweets)/5,
sys.getsizeof(tweets)/1e6))
# Saves tweets as zip
mf = io.BytesIO()
with zipfile.ZipFile(mf, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr("trump_tweets.txt", str.encode(tweets, 'utf-8'))
with open("../data/trump_tweets.zip", "wb") as f: # use `wb` mode
f.write(mf.getvalue())
class last_element_is_the_same():
"""
Class used to detect when end of page is reached.
Take in a tuple of (HTML attribute, name) and text of previous tweet.
"""
def __init__(self, locator, previous):
"""
Constructor.
Takes in a tuple of (HTML attribute, name) and text of previous tweet.
"""
self.locator = locator
self.previous = previous
def __call__(self, browser):
"""Verify whether the last and current tweets are the same."""
new_tweets = browser.find_elements(*self.locator)
if new_tweets[-1].text != self.previous:
return True
else:
return False
def scroller(browser, wait):
"""
Scrolls to end of page.
Takes in the browser driver and the 'WebDriverWait' object.
"""
while True:
tweets = browser.find_elements_by_class_name("tweet-text")
browser.execute_script("arguments[0].scrollIntoView();", tweets[-1])
try:
wait.until(last_element_is_the_same((By.CLASS_NAME, "tweet-text"),
tweets[-1].text))
except TimeoutException:
break
if __name__ == "__main__":
"""Runs main body."""
main()
| 32.792208
| 79
| 0.606535
|
3e84f8db3182cd4e954b32d6863ebf64d90bbf46
| 82
|
py
|
Python
|
servidor/machine_learning/turn_into_matrix.py
|
FelipeLimaM/ItsMyLife-Framework
|
c1d1ce89db1882a2594b126ac6407fca6d9255aa
|
[
"MIT"
] | null | null | null |
servidor/machine_learning/turn_into_matrix.py
|
FelipeLimaM/ItsMyLife-Framework
|
c1d1ce89db1882a2594b126ac6407fca6d9255aa
|
[
"MIT"
] | null | null | null |
servidor/machine_learning/turn_into_matrix.py
|
FelipeLimaM/ItsMyLife-Framework
|
c1d1ce89db1882a2594b126ac6407fca6d9255aa
|
[
"MIT"
] | null | null | null |
# codigo legado que transforma o csv original em matriz binaria e pdf de gabrito
| 41
| 81
| 0.792683
|
784ae26036f8e69c5fe080f675f5e4a838690951
| 86
|
py
|
Python
|
tests/periodicities/Business_Day/Cycle_Business_Day_400_B_5.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/periodicities/Business_Day/Cycle_Business_Day_400_B_5.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/periodicities/Business_Day/Cycle_Business_Day_400_B_5.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.periodicities.period_test as per
per.buildModel((5 , 'B' , 400));
| 17.2
| 50
| 0.72093
|
193345160933aedc451907d204cdfc27e0a58274
| 724
|
py
|
Python
|
tests/python/rotate_pil.py
|
fffy2366/image-processing
|
e1170b3f670e23e5728caa2716c96b90216ce4b3
|
[
"MIT"
] | 38
|
2017-03-08T14:21:51.000Z
|
2022-02-23T07:14:24.000Z
|
tests/python/rotate_pil.py
|
haigemsa/image-processing
|
e1170b3f670e23e5728caa2716c96b90216ce4b3
|
[
"MIT"
] | null | null | null |
tests/python/rotate_pil.py
|
haigemsa/image-processing
|
e1170b3f670e23e5728caa2716c96b90216ce4b3
|
[
"MIT"
] | 16
|
2016-07-20T01:40:21.000Z
|
2022-02-23T07:14:26.000Z
|
#!bin/evn python
# -*-coding:utf8-*-
from PIL import Image
'''
Python 之 使用 PIL 库做图像处理
http://www.cnblogs.com/way_testlife/archive/2011/04/17/2019013.html
'''
im = Image.open("/Users/fengxuting/Downloads/1463815812385A98C108.jpg")
print im.format, im.size, im.mode
out = im.rotate(45) ##逆时针旋转 45 度角。
#out = out.resize((1000,1000),Image.BILINEAR)
#out = out.rotate(-45) ##逆时针旋转 45 度角。
# out = im.transpose(Image.FLIP_LEFT_RIGHT) #左右对换。
# out = im.transpose(Image.FLIP_TOP_BOTTOM) #上下对换。
# out = im.transpose(Image.ROTATE_90) #旋转 90 度角。
# out = im.transpose(Image.ROTATE_180) #旋转 180 度角。
# out = im.transpose(Image.ROTATE_270)
#out.show()
out.save('/Users/fengxuting/Downloads/result.jpg')
| 32.909091
| 71
| 0.685083
|
6ef26d4978afc498e5c695be3ab2d6a3b6fc5eb3
| 163
|
py
|
Python
|
apps/announcements/apps.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
apps/announcements/apps.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
apps/announcements/apps.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AnnouncementsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps.announcements'
| 23.285714
| 56
| 0.779141
|
cb190892e9d67b6b5479c15696ec596ce34ed86d
| 6,096
|
py
|
Python
|
test/test_Collator.py
|
SethMMorton/pyicu
|
910a75ebac6f6a4ee38f997320d0325230595e83
|
[
"MIT"
] | 140
|
2015-04-16T02:43:31.000Z
|
2022-03-08T11:52:52.000Z
|
test/test_Collator.py
|
SethMMorton/pyicu
|
910a75ebac6f6a4ee38f997320d0325230595e83
|
[
"MIT"
] | 140
|
2015-04-17T01:56:36.000Z
|
2021-04-08T23:13:36.000Z
|
test/test_Collator.py
|
SethMMorton/pyicu
|
910a75ebac6f6a4ee38f997320d0325230595e83
|
[
"MIT"
] | 81
|
2015-04-16T20:17:05.000Z
|
2021-09-14T06:33:00.000Z
|
# ====================================================================
# Copyright (c) 2005-2018 Open Source Applications Foundation.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ====================================================================
#
import sys, os, six
from unittest import TestCase, main
from icu import *
class TestCollator(TestCase):
def filePath(self, name):
module = sys.modules[TestCollator.__module__].__file__
return os.path.join(os.path.dirname(module), name)
def assertIsInstance(self, obj, cls):
if hasattr(TestCase, 'assertIsInstance'):
TestCase.assertIsInstance(self, obj, cls)
else:
self.assertTrue(isinstance(obj, cls),
u'%s is not an instance of %s' % (obj, cls))
def testSort(self):
collator = Collator.createInstance(Locale.getFrance())
input = open(self.filePath('noms.txt'), 'rb')
names = [six.text_type(n.strip(), 'utf-8') for n in input.readlines()]
input.close()
ecole = names[0]
names.sort()
self.assertTrue(names[-1] is ecole)
if (sys.version_info >= (3,)):
names.sort(key=collator.getSortKey)
else:
names.sort(collator.compare)
self.assertTrue(names[2] is ecole)
def testCreateInstancePolymorph(self):
collator = Collator.createInstance(Locale("epo")) # Esperanto
self.assertIsInstance(collator, RuleBasedCollator)
rules = collator.getRules()
def testGetSortKey(self):
# Do not test sort key byte sequences directly:
# They are unstable, that is, likely to change
# with every UCA/CLDR/ICU release.
# Instead, test that compare() is consistent with
# comparing the equivalent sort keys.
collator = Collator.createInstance(Locale.getJapanese())
collator.setAttribute(UCollAttribute.NORMALIZATION_MODE,
UCollAttributeValue.ON)
collator.setAttribute(UCollAttribute.ALTERNATE_HANDLING,
UCollAttributeValue.SHIFTED)
collator.setAttribute(UCollAttribute.STRENGTH,
UCollAttributeValue.TERTIARY)
# In Japanese, the following characters should be different
# only on quaternary level.
hira_ge = u'\u3052' # Hiragana letter Ge
kana_ge = u'\u30B2' # Katakana letter Ge
self.assertEqual(0, collator.compare(hira_ge, kana_ge))
hira_ge_key = collator.getSortKey(hira_ge)
kana_ge_key = collator.getSortKey(kana_ge)
self.assertEqual(hira_ge_key, kana_ge_key)
collator.setAttribute(UCollAttribute.STRENGTH,
UCollAttributeValue.QUATERNARY)
self.assertEqual(-1, collator.compare(hira_ge, kana_ge))
hira_ge_key = collator.getSortKey(hira_ge)
kana_ge_key = collator.getSortKey(kana_ge)
self.assertTrue(hira_ge_key < kana_ge_key)
def setupCollator(self, collator):
collator.setAttribute(UCollAttribute.NORMALIZATION_MODE,
UCollAttributeValue.ON)
collator.setAttribute(UCollAttribute.CASE_FIRST,
UCollAttributeValue.UPPER_FIRST)
collator.setAttribute(UCollAttribute.ALTERNATE_HANDLING,
UCollAttributeValue.SHIFTED)
collator.setAttribute(UCollAttribute.STRENGTH,
UCollAttributeValue.QUATERNARY)
collator.setAttribute(UCollAttribute.HIRAGANA_QUATERNARY_MODE,
UCollAttributeValue.ON)
def LoadCollatorFromRules(self):
rules = u"&z<\u00e6 &h<ch"
collator = RuleBasedCollator(rules)
self.setupCollator(collator)
return collator
def LoadCollatorFromBinaryBuffer(self, bin):
root = Collator.createInstance(Locale.getRoot())
collator = RuleBasedCollator(bin, root)
self.setupCollator(collator)
return collator
def testCollatorLoading(self):
if ICU_VERSION >= '4.6':
collator = self.LoadCollatorFromRules()
s = u'hchz\u00e6'
key0 = collator.getSortKey(s)
bin = collator.cloneBinary()
collator = self.LoadCollatorFromBinaryBuffer(bin)
key1 = collator.getSortKey(s)
self.assertTrue(key0 == key1)
def testAlphabeticIndex(self):
if ICU_VERSION >= '4.8':
index = AlphabeticIndex(Locale.getItaly())
index.addRecord("foo", "bar")
index.addRecord("topo", "lino")
def allData(index):
for ((label, type)) in index:
while index.nextRecord():
yield (label, type, index.recordData)
self.assertTrue(list(allData(index)) == [
('F', 0, 'bar'), ('T', 0, 'lino')])
if ICU_VERSION >= '51.0':
self.assertTrue(len(index.buildImmutableIndex()) == 28)
if __name__ == "__main__":
main()
| 37.62963
| 78
| 0.627625
|
e862647dcbb0c17828eef9f263a3fd5a5bfa1e9d
| 1,568
|
py
|
Python
|
vanitynumbers/get_word_rankings.py
|
Greg-s-Tutorials/vanity-numbers
|
9f31f16ba7c51e704b6b0da839a47c16fe772449
|
[
"MIT"
] | null | null | null |
vanitynumbers/get_word_rankings.py
|
Greg-s-Tutorials/vanity-numbers
|
9f31f16ba7c51e704b6b0da839a47c16fe772449
|
[
"MIT"
] | null | null | null |
vanitynumbers/get_word_rankings.py
|
Greg-s-Tutorials/vanity-numbers
|
9f31f16ba7c51e704b6b0da839a47c16fe772449
|
[
"MIT"
] | 1
|
2021-12-02T00:05:13.000Z
|
2021-12-02T00:05:13.000Z
|
def get_word_rankings(word_groups, country_code, area_code, phone_number, number_map):
""" Takes in word groups and returns a list of all vanity numbers ranked by longest word.
Vanity numbers are returned in the form: +1-area_code-vanity_number
If there are less than five vanity numbers, random letters (per digit) will be assigned
to make up the difference.
"""
word_rankings = []
for word_group in word_groups:
if len(word_group.keys()) != 0:
for index, word_list in word_group.items():
for word in word_list:
vanity_number = splice_words_in_number(
index,
phone_number,
word
).upper()
word_rankings.append(vanity_number)
# Handle cases where there aren't enough matches
next_number = ""
level = 0
while len(word_rankings) < 5:
for num in phone_number:
if num in number_map and level < len(number_map[num]):
next_number += number_map[num][level]
else:
next_number += num # Fail safe. Ensures if no letters, the original number is returned.
if level >= 4:
level = 0
else: level += 1
word_rankings.append(next_number)
next_number = ""
return [f"+{country_code}-{area_code}-{''.join(word)}" for word in word_rankings]
def splice_words_in_number(start_index, phone_number, word):
phone_split = list(phone_number)
# print(phone_split)
for i in range(start_index, start_index + len(word)):
# print(i)
phone_split[i] = word[i - start_index]
return "".join(phone_split)
| 30.153846
| 95
| 0.667092
|
0aa89ec1c402fafa78467d03184e56bc654b6414
| 1,989
|
py
|
Python
|
examples/scheduledNotebook.py
|
helbonygaard/DataAppTest
|
eefc9e968d8fbf330458a3deff69c2cc743d79e5
|
[
"Apache-2.0"
] | null | null | null |
examples/scheduledNotebook.py
|
helbonygaard/DataAppTest
|
eefc9e968d8fbf330458a3deff69c2cc743d79e5
|
[
"Apache-2.0"
] | null | null | null |
examples/scheduledNotebook.py
|
helbonygaard/DataAppTest
|
eefc9e968d8fbf330458a3deff69c2cc743d79e5
|
[
"Apache-2.0"
] | null | null | null |
import time
import papermill as pm
from datetime import datetime
import json, sys
import os
def install(package):
os.system(str("python3 -m pip install " + package))
def job(): # parameterize job - naturally!
with open("subscriptionConfig.json") as jsonfile:
dictionary = json.load(jsonfile)
pm.execute_notebook(
'OverflowServiceSandbox.ipynb',
'./runLogs/OverflowServiceSandbox_run_time_'+str(datetime.timestamp(datetime.now()))+'.ipynb',
parameters = dictionary
)
#Dictionary
#dict(start='2019-06-04T0:55:52Z', stop='2019-06-05T19:28:52Z', levelThreshold=0.45, maxThreshold=0.90, levelSlopeAngle=0.000085, dataOffset=0, bufferLength=30, resultAttribute='overflow')
#JSON
#{"start":"2019-06-04T0:55:52Z", "stop":"2019-06-05T19:28:52Z", "levelThreshold":0.45, "maxThreshold":0.90, "levelSlopeAngle":0.000085, "dataOffset":0, "bufferLength":30, "resultAttribute":"overflow"}
# Run single line cli example
#papermill OverflowServiceSandbox.ipynb ./OverflowServiceSandbox_run_cli.ipynb -p start '2019-06-04T0:55:52Z' -p stop '2019-06-05T19:28:52Z' -p levelThreshold 0.45 -p maxThreshold 0.90 -p levelSlopeAngle 0.000085 -p dataOffset 0 -p bufferLength 30 -p resultAttribute 'overflow'
# Activate job schedule
# Set up CLI Arguments
install('schedule') # Special environment package for production scheduling
import schedule
schedule.every(10).minutes.do(job)
# Other schedules
#schedule.every().hour.do(job)
#schedule.every().day.at("10:30").do(job)
#schedule.every(5).to(10).minutes.do(job)
#schedule.every().monday.do(job)
#schedule.every().wednesday.at("13:15").do(job)
#schedule.every().minute.at(":17").do(job)
# Execute schedule for ever
while True:
schedule.run_pending()
time.sleep(1)
# How to start-up in cli
# python scheduledNotebook.py '{start:'2019-06-04T0:55:52Z', stop:'2019-06-05T19:28:52Z', levelThreshold:0.45, maxThreshold:0.90, levelSlopeAngle:0.000085, dataOffset=0, bufferLength=30, resultAttribute='overflow'}'
| 40.591837
| 277
| 0.740573
|
5ff71f42aa22e94536f515ecb4d96c60bb1f0871
| 229,312
|
py
|
Python
|
src/plugins/gbp/test/test_gbp.py
|
5G-Center-Chinatelecom/vpp
|
63aafbbc2b2de65bf2839d59ca3a715df2e4a03f
|
[
"Apache-2.0"
] | 44
|
2018-12-10T09:43:46.000Z
|
2022-03-18T13:30:10.000Z
|
src/plugins/gbp/test/test_gbp.py
|
5G-Center-Chinatelecom/vpp
|
63aafbbc2b2de65bf2839d59ca3a715df2e4a03f
|
[
"Apache-2.0"
] | 63
|
2018-06-11T09:48:35.000Z
|
2021-01-05T09:11:03.000Z
|
src/plugins/gbp/test/test_gbp.py
|
5G-Center-Chinatelecom/vpp
|
63aafbbc2b2de65bf2839d59ca3a715df2e4a03f
|
[
"Apache-2.0"
] | 30
|
2018-03-15T09:56:48.000Z
|
2021-07-13T01:58:00.000Z
|
#!/usr/bin/env python3
from socket import AF_INET, AF_INET6, inet_pton, inet_ntop
import unittest
from ipaddress import ip_address, IPv4Network, IPv6Network
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP, Dot1Q
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6NDOptSrcLLAddr, \
ICMPv6ND_NA, ICMPv6EchoRequest
from scapy.utils6 import in6_getnsma, in6_getnsmac
from scapy.layers.vxlan import VXLAN
from scapy.data import ETH_P_IP, ETH_P_IPV6, ETH_P_ARP
from framework import VppTestCase, VppTestRunner
from vpp_object import VppObject
from vpp_interface import VppInterface
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, \
VppIpInterfaceAddress, VppIpInterfaceBind, find_route, FibPathProto, \
FibPathType
from vpp_l2 import VppBridgeDomain, VppBridgeDomainPort, \
VppBridgeDomainArpEntry, VppL2FibEntry, find_bridge_domain_port, VppL2Vtr
from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
from vpp_ip import DpoProto, get_dpo_proto
from vpp_papi import VppEnum, MACAddress
from vpp_vxlan_gbp_tunnel import find_vxlan_gbp_tunnel, INDEX_INVALID, \
VppVxlanGbpTunnel
from vpp_neighbor import VppNeighbor
from vpp_acl import AclRule, VppAcl
try:
text_type = unicode
except NameError:
text_type = str
NUM_PKTS = 67
def find_gbp_endpoint(test, sw_if_index=None, ip=None, mac=None,
tep=None, sclass=None, flags=None):
if ip:
vip = ip
if mac:
vmac = MACAddress(mac)
eps = test.vapi.gbp_endpoint_dump()
for ep in eps:
if tep:
src = tep[0]
dst = tep[1]
if src != str(ep.endpoint.tun.src) or \
dst != str(ep.endpoint.tun.dst):
continue
if sw_if_index:
if ep.endpoint.sw_if_index != sw_if_index:
continue
if sclass:
if ep.endpoint.sclass != sclass:
continue
if flags:
if flags != (flags & ep.endpoint.flags):
continue
if ip:
for eip in ep.endpoint.ips:
if vip == str(eip):
return True
if mac:
if vmac == ep.endpoint.mac:
return True
return False
def find_gbp_vxlan(test, vni):
ts = test.vapi.gbp_vxlan_tunnel_dump()
for t in ts:
if t.tunnel.vni == vni:
return True
return False
class VppGbpEndpoint(VppObject):
"""
GBP Endpoint
"""
@property
def mac(self):
return str(self.vmac)
@property
def ip4(self):
return self._ip4
@property
def fip4(self):
return self._fip4
@property
def ip6(self):
return self._ip6
@property
def fip6(self):
return self._fip6
@property
def ips(self):
return [self.ip4, self.ip6]
@property
def fips(self):
return [self.fip4, self.fip6]
def __init__(self, test, itf, epg, recirc, ip4, fip4, ip6, fip6,
flags=0,
tun_src="0.0.0.0",
tun_dst="0.0.0.0",
mac=True):
self._test = test
self.itf = itf
self.epg = epg
self.recirc = recirc
self._ip4 = ip4
self._fip4 = fip4
self._ip6 = ip6
self._fip6 = fip6
if mac:
self.vmac = MACAddress(self.itf.remote_mac)
else:
self.vmac = MACAddress("00:00:00:00:00:00")
self.flags = flags
self.tun_src = tun_src
self.tun_dst = tun_dst
def add_vpp_config(self):
res = self._test.vapi.gbp_endpoint_add(
self.itf.sw_if_index,
[self.ip4, self.ip6],
self.vmac.packed,
self.epg.sclass,
self.flags,
self.tun_src,
self.tun_dst)
self.handle = res.handle
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_endpoint_del(self.handle)
def object_id(self):
return "gbp-endpoint:[%d==%d:%s:%d]" % (self.handle,
self.itf.sw_if_index,
self.ip4,
self.epg.sclass)
def query_vpp_config(self):
return find_gbp_endpoint(self._test,
self.itf.sw_if_index,
self.ip4)
class VppGbpRecirc(VppObject):
"""
GBP Recirculation Interface
"""
def __init__(self, test, epg, recirc, is_ext=False):
self._test = test
self.recirc = recirc
self.epg = epg
self.is_ext = is_ext
def add_vpp_config(self):
self._test.vapi.gbp_recirc_add_del(
1,
self.recirc.sw_if_index,
self.epg.sclass,
self.is_ext)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_recirc_add_del(
0,
self.recirc.sw_if_index,
self.epg.sclass,
self.is_ext)
def object_id(self):
return "gbp-recirc:[%d]" % (self.recirc.sw_if_index)
def query_vpp_config(self):
rs = self._test.vapi.gbp_recirc_dump()
for r in rs:
if r.recirc.sw_if_index == self.recirc.sw_if_index:
return True
return False
class VppGbpExtItf(VppObject):
"""
GBP ExtItfulation Interface
"""
def __init__(self, test, itf, bd, rd, anon=False):
self._test = test
self.itf = itf
self.bd = bd
self.rd = rd
self.flags = 1 if anon else 0
def add_vpp_config(self):
self._test.vapi.gbp_ext_itf_add_del(
1, self.itf.sw_if_index, self.bd.bd_id, self.rd.rd_id, self.flags)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_ext_itf_add_del(
0, self.itf.sw_if_index, self.bd.bd_id, self.rd.rd_id, self.flags)
def object_id(self):
return "gbp-ext-itf:[%d]%s" % (self.itf.sw_if_index,
" [anon]" if self.flags else "")
def query_vpp_config(self):
rs = self._test.vapi.gbp_ext_itf_dump()
for r in rs:
if r.ext_itf.sw_if_index == self.itf.sw_if_index:
return True
return False
class VppGbpSubnet(VppObject):
"""
GBP Subnet
"""
def __init__(self, test, rd, address, address_len,
type, sw_if_index=None, sclass=None):
self._test = test
self.rd_id = rd.rd_id
a = ip_address(address)
if 4 == a.version:
self.prefix = IPv4Network("%s/%d" % (address, address_len),
strict=False)
else:
self.prefix = IPv6Network("%s/%d" % (address, address_len),
strict=False)
self.type = type
self.sw_if_index = sw_if_index
self.sclass = sclass
def add_vpp_config(self):
self._test.vapi.gbp_subnet_add_del(
1,
self.rd_id,
self.prefix,
self.type,
sw_if_index=self.sw_if_index if self.sw_if_index else 0xffffffff,
sclass=self.sclass if self.sclass else 0xffff)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_subnet_add_del(
0,
self.rd_id,
self.prefix,
self.type)
def object_id(self):
return "gbp-subnet:[%d-%s]" % (self.rd_id, self.prefix)
def query_vpp_config(self):
ss = self._test.vapi.gbp_subnet_dump()
for s in ss:
if s.subnet.rd_id == self.rd_id and \
s.subnet.type == self.type and \
s.subnet.prefix == self.prefix:
return True
return False
class VppGbpEndpointRetention(object):
def __init__(self, remote_ep_timeout=0xffffffff):
self.remote_ep_timeout = remote_ep_timeout
def encode(self):
return {'remote_ep_timeout': self.remote_ep_timeout}
class VppGbpEndpointGroup(VppObject):
"""
GBP Endpoint Group
"""
def __init__(self, test, vnid, sclass, rd, bd, uplink,
bvi, bvi_ip4, bvi_ip6=None,
retention=VppGbpEndpointRetention()):
self._test = test
self.uplink = uplink
self.bvi = bvi
self.bvi_ip4 = bvi_ip4
self.bvi_ip6 = bvi_ip6
self.vnid = vnid
self.bd = bd
self.rd = rd
self.sclass = sclass
if 0 == self.sclass:
self.sclass = 0xffff
self.retention = retention
def add_vpp_config(self):
self._test.vapi.gbp_endpoint_group_add(
self.vnid,
self.sclass,
self.bd.bd.bd_id,
self.rd.rd_id,
self.uplink.sw_if_index if self.uplink else INDEX_INVALID,
self.retention.encode())
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_endpoint_group_del(self.sclass)
def object_id(self):
return "gbp-endpoint-group:[%d]" % (self.vnid)
def query_vpp_config(self):
epgs = self._test.vapi.gbp_endpoint_group_dump()
for epg in epgs:
if epg.epg.vnid == self.vnid:
return True
return False
class VppGbpBridgeDomain(VppObject):
"""
GBP Bridge Domain
"""
def __init__(self, test, bd, rd, bvi, uu_fwd=None,
bm_flood=None, learn=True,
uu_drop=False, bm_drop=False,
ucast_arp=False):
self._test = test
self.bvi = bvi
self.uu_fwd = uu_fwd
self.bm_flood = bm_flood
self.bd = bd
self.rd = rd
e = VppEnum.vl_api_gbp_bridge_domain_flags_t
self.flags = e.GBP_BD_API_FLAG_NONE
if not learn:
self.flags |= e.GBP_BD_API_FLAG_DO_NOT_LEARN
if uu_drop:
self.flags |= e.GBP_BD_API_FLAG_UU_FWD_DROP
if bm_drop:
self.flags |= e.GBP_BD_API_FLAG_MCAST_DROP
if ucast_arp:
self.flags |= e.GBP_BD_API_FLAG_UCAST_ARP
def add_vpp_config(self):
self._test.vapi.gbp_bridge_domain_add(
self.bd.bd_id,
self.rd.rd_id,
self.flags,
self.bvi.sw_if_index,
self.uu_fwd.sw_if_index if self.uu_fwd else INDEX_INVALID,
self.bm_flood.sw_if_index if self.bm_flood else INDEX_INVALID)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_bridge_domain_del(self.bd.bd_id)
def object_id(self):
return "gbp-bridge-domain:[%d]" % (self.bd.bd_id)
def query_vpp_config(self):
bds = self._test.vapi.gbp_bridge_domain_dump()
for bd in bds:
if bd.bd.bd_id == self.bd.bd_id:
return True
return False
class VppGbpRouteDomain(VppObject):
"""
GBP Route Domain
"""
def __init__(self, test, rd_id, scope, t4, t6, ip4_uu=None, ip6_uu=None):
self._test = test
self.rd_id = rd_id
self.scope = scope
self.t4 = t4
self.t6 = t6
self.ip4_uu = ip4_uu
self.ip6_uu = ip6_uu
def add_vpp_config(self):
self._test.vapi.gbp_route_domain_add(
self.rd_id,
self.scope,
self.t4.table_id,
self.t6.table_id,
self.ip4_uu.sw_if_index if self.ip4_uu else INDEX_INVALID,
self.ip6_uu.sw_if_index if self.ip6_uu else INDEX_INVALID)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_route_domain_del(self.rd_id)
def object_id(self):
return "gbp-route-domain:[%d]" % (self.rd_id)
def query_vpp_config(self):
rds = self._test.vapi.gbp_route_domain_dump()
for rd in rds:
if rd.rd.rd_id == self.rd_id:
return True
return False
class VppGbpContractNextHop():
def __init__(self, mac, bd, ip, rd):
self.mac = mac
self.ip = ip
self.bd = bd
self.rd = rd
def encode(self):
return {'ip': self.ip,
'mac': self.mac.packed,
'bd_id': self.bd.bd.bd_id,
'rd_id': self.rd.rd_id}
class VppGbpContractRule():
def __init__(self, action, hash_mode, nhs=None):
self.action = action
self.hash_mode = hash_mode
self.nhs = [] if nhs is None else nhs
def encode(self):
nhs = []
for nh in self.nhs:
nhs.append(nh.encode())
while len(nhs) < 8:
nhs.append({})
return {'action': self.action,
'nh_set': {
'hash_mode': self.hash_mode,
'n_nhs': len(self.nhs),
'nhs': nhs}}
def __repr__(self):
return '<VppGbpContractRule action=%s, hash_mode=%s>' % (
self.action, self.hash_mode)
class VppGbpContract(VppObject):
"""
GBP Contract
"""
def __init__(self, test, scope, sclass, dclass, acl_index,
rules, allowed_ethertypes):
self._test = test
if not isinstance(rules, list):
raise ValueError("'rules' must be a list.")
if not isinstance(allowed_ethertypes, list):
raise ValueError("'allowed_ethertypes' must be a list.")
self.scope = scope
self.acl_index = acl_index
self.sclass = sclass
self.dclass = dclass
self.rules = rules
self.allowed_ethertypes = allowed_ethertypes
while (len(self.allowed_ethertypes) < 16):
self.allowed_ethertypes.append(0)
def add_vpp_config(self):
rules = []
for r in self.rules:
rules.append(r.encode())
r = self._test.vapi.gbp_contract_add_del(
is_add=1,
contract={
'acl_index': self.acl_index,
'scope': self.scope,
'sclass': self.sclass,
'dclass': self.dclass,
'n_rules': len(rules),
'rules': rules,
'n_ether_types': len(self.allowed_ethertypes),
'allowed_ethertypes': self.allowed_ethertypes})
self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_contract_add_del(
is_add=0,
contract={
'acl_index': self.acl_index,
'scope': self.scope,
'sclass': self.sclass,
'dclass': self.dclass,
'n_rules': 0,
'rules': [],
'n_ether_types': len(self.allowed_ethertypes),
'allowed_ethertypes': self.allowed_ethertypes})
def object_id(self):
return "gbp-contract:[%d:%d:%d:%d]" % (self.scope,
self.sclass,
self.dclass,
self.acl_index)
def query_vpp_config(self):
cs = self._test.vapi.gbp_contract_dump()
for c in cs:
if c.contract.scope == self.scope \
and c.contract.sclass == self.sclass \
and c.contract.dclass == self.dclass:
return True
return False
def get_drop_stats(self):
c = self._test.statistics.get_counter("/net/gbp/contract/drop")
return c[0][self.stats_index]
def get_permit_stats(self):
c = self._test.statistics.get_counter("/net/gbp/contract/permit")
return c[0][self.stats_index]
class VppGbpVxlanTunnel(VppInterface):
"""
GBP VXLAN tunnel
"""
def __init__(self, test, vni, bd_rd_id, mode, src):
super(VppGbpVxlanTunnel, self).__init__(test)
self._test = test
self.vni = vni
self.bd_rd_id = bd_rd_id
self.mode = mode
self.src = src
def add_vpp_config(self):
r = self._test.vapi.gbp_vxlan_tunnel_add(
self.vni,
self.bd_rd_id,
self.mode,
self.src)
self.set_sw_if_index(r.sw_if_index)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_vxlan_tunnel_del(self.vni)
def object_id(self):
return "gbp-vxlan:%d" % (self.sw_if_index)
def query_vpp_config(self):
return find_gbp_vxlan(self._test, self.vni)
class TestGBP(VppTestCase):
""" GBP Test Case """
@property
def config_flags(self):
return VppEnum.vl_api_nat_config_flags_t
@classmethod
def setUpClass(cls):
super(TestGBP, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestGBP, cls).tearDownClass()
def setUp(self):
super(TestGBP, self).setUp()
self.create_pg_interfaces(range(9))
self.create_loopback_interfaces(8)
self.router_mac = MACAddress("00:11:22:33:44:55")
for i in self.pg_interfaces:
i.admin_up()
for i in self.lo_interfaces:
i.admin_up()
self.vlan_100 = VppDot1QSubint(self, self.pg0, 100)
self.vlan_100.admin_up()
self.vlan_101 = VppDot1QSubint(self, self.pg0, 101)
self.vlan_101.admin_up()
self.vlan_102 = VppDot1QSubint(self, self.pg0, 102)
self.vlan_102.admin_up()
def tearDown(self):
for i in self.pg_interfaces:
i.admin_down()
super(TestGBP, self).tearDown()
for i in self.lo_interfaces:
i.remove_vpp_config()
self.lo_interfaces = []
self.vlan_102.remove_vpp_config()
self.vlan_101.remove_vpp_config()
self.vlan_100.remove_vpp_config()
def send_and_expect_bridged(self, src, tx, dst):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[IP].src, tx[0][IP].src)
self.assertEqual(r[IP].dst, tx[0][IP].dst)
return rx
def send_and_expect_bridged6(self, src, tx, dst):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
return rx
def send_and_expect_routed(self, src, tx, dst, src_mac):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, src_mac)
self.assertEqual(r[Ether].dst, dst.remote_mac)
self.assertEqual(r[IP].src, tx[0][IP].src)
self.assertEqual(r[IP].dst, tx[0][IP].dst)
return rx
def send_and_expect_routed6(self, src, tx, dst, src_mac):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, src_mac)
self.assertEqual(r[Ether].dst, dst.remote_mac)
self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
return rx
def send_and_expect_natted(self, src, tx, dst, src_ip):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[IP].src, src_ip)
self.assertEqual(r[IP].dst, tx[0][IP].dst)
return rx
def send_and_expect_natted6(self, src, tx, dst, src_ip):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[IPv6].src, src_ip)
self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
return rx
def send_and_expect_unnatted(self, src, tx, dst, dst_ip):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[IP].dst, dst_ip)
self.assertEqual(r[IP].src, tx[0][IP].src)
return rx
def send_and_expect_unnatted6(self, src, tx, dst, dst_ip):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[IPv6].dst, dst_ip)
self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
return rx
def send_and_expect_double_natted(self, src, tx, dst, src_ip, dst_ip):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, str(self.router_mac))
self.assertEqual(r[Ether].dst, dst.remote_mac)
self.assertEqual(r[IP].dst, dst_ip)
self.assertEqual(r[IP].src, src_ip)
return rx
def send_and_expect_double_natted6(self, src, tx, dst, src_ip, dst_ip):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, str(self.router_mac))
self.assertEqual(r[Ether].dst, dst.remote_mac)
self.assertEqual(r[IPv6].dst, dst_ip)
self.assertEqual(r[IPv6].src, src_ip)
return rx
def send_and_expect_no_arp(self, src, tx, dst):
self.pg_send(src, tx)
dst.get_capture(0, timeout=1)
dst.assert_nothing_captured(remark="")
timeout = 0.1
def send_and_expect_arp(self, src, tx, dst):
rx = self.send_and_expect(src, tx, dst)
for r in rx:
self.assertEqual(r[Ether].src, tx[0][Ether].src)
self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
self.assertEqual(r[ARP].psrc, tx[0][ARP].psrc)
self.assertEqual(r[ARP].pdst, tx[0][ARP].pdst)
self.assertEqual(r[ARP].hwsrc, tx[0][ARP].hwsrc)
self.assertEqual(r[ARP].hwdst, tx[0][ARP].hwdst)
return rx
def test_gbp(self):
""" Group Based Policy """
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
#
# Route Domains
#
gt4 = VppIpTable(self, 0)
gt4.add_vpp_config()
gt6 = VppIpTable(self, 0, is_ip6=True)
gt6.add_vpp_config()
nt4 = VppIpTable(self, 20)
nt4.add_vpp_config()
nt6 = VppIpTable(self, 20, is_ip6=True)
nt6.add_vpp_config()
rd0 = VppGbpRouteDomain(self, 0, 400, gt4, gt6, None, None)
rd20 = VppGbpRouteDomain(self, 20, 420, nt4, nt6, None, None)
rd0.add_vpp_config()
rd20.add_vpp_config()
#
# Bridge Domains
#
bd1 = VppBridgeDomain(self, 1)
bd2 = VppBridgeDomain(self, 2)
bd20 = VppBridgeDomain(self, 20)
bd1.add_vpp_config()
bd2.add_vpp_config()
bd20.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd0, self.loop0)
gbd2 = VppGbpBridgeDomain(self, bd2, rd0, self.loop1)
gbd20 = VppGbpBridgeDomain(self, bd20, rd20, self.loop2)
gbd1.add_vpp_config()
gbd2.add_vpp_config()
gbd20.add_vpp_config()
#
# 3 EPGs, 2 of which share a BD.
# 2 NAT EPGs, one for floating-IP subnets, the other for internet
#
epgs = [VppGbpEndpointGroup(self, 220, 1220, rd0, gbd1,
self.pg4, self.loop0,
"10.0.0.128", "2001:10::128"),
VppGbpEndpointGroup(self, 221, 1221, rd0, gbd1,
self.pg5, self.loop0,
"10.0.1.128", "2001:10:1::128"),
VppGbpEndpointGroup(self, 222, 1222, rd0, gbd2,
self.pg6, self.loop1,
"10.0.2.128", "2001:10:2::128"),
VppGbpEndpointGroup(self, 333, 1333, rd20, gbd20,
self.pg7, self.loop2,
"11.0.0.128", "3001::128"),
VppGbpEndpointGroup(self, 444, 1444, rd20, gbd20,
self.pg8, self.loop2,
"11.0.0.129", "3001::129")]
recircs = [VppGbpRecirc(self, epgs[0], self.loop3),
VppGbpRecirc(self, epgs[1], self.loop4),
VppGbpRecirc(self, epgs[2], self.loop5),
VppGbpRecirc(self, epgs[3], self.loop6, is_ext=True),
VppGbpRecirc(self, epgs[4], self.loop7, is_ext=True)]
epg_nat = epgs[3]
recirc_nat = recircs[3]
#
# 4 end-points, 2 in the same subnet, 3 in the same BD
#
eps = [VppGbpEndpoint(self, self.pg0,
epgs[0], recircs[0],
"10.0.0.1", "11.0.0.1",
"2001:10::1", "3001::1"),
VppGbpEndpoint(self, self.pg1,
epgs[0], recircs[0],
"10.0.0.2", "11.0.0.2",
"2001:10::2", "3001::2"),
VppGbpEndpoint(self, self.pg2,
epgs[1], recircs[1],
"10.0.1.1", "11.0.0.3",
"2001:10:1::1", "3001::3"),
VppGbpEndpoint(self, self.pg3,
epgs[2], recircs[2],
"10.0.2.1", "11.0.0.4",
"2001:10:2::1", "3001::4")]
#
# Config related to each of the EPGs
#
for epg in epgs:
# IP config on the BVI interfaces
if epg != epgs[1] and epg != epgs[4]:
b4 = VppIpInterfaceBind(self, epg.bvi,
epg.rd.t4).add_vpp_config()
b6 = VppIpInterfaceBind(self, epg.bvi,
epg.rd.t6).add_vpp_config()
epg.bvi.set_mac(self.router_mac)
# The BVIs are NAT inside interfaces
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=epg.bvi.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat66_add_del_interface(
is_add=1, flags=flags,
sw_if_index=epg.bvi.sw_if_index)
if_ip4 = VppIpInterfaceAddress(self, epg.bvi,
epg.bvi_ip4, 32,
bind=b4).add_vpp_config()
if_ip6 = VppIpInterfaceAddress(self, epg.bvi,
epg.bvi_ip6, 128,
bind=b6).add_vpp_config()
# EPG uplink interfaces in the RD
VppIpInterfaceBind(self, epg.uplink, epg.rd.t4).add_vpp_config()
VppIpInterfaceBind(self, epg.uplink, epg.rd.t6).add_vpp_config()
# add the BD ARP termination entry for BVI IP
epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
str(self.router_mac),
epg.bvi_ip4)
epg.bd_arp_ip6 = VppBridgeDomainArpEntry(self, epg.bd.bd,
str(self.router_mac),
epg.bvi_ip6)
epg.bd_arp_ip4.add_vpp_config()
epg.bd_arp_ip6.add_vpp_config()
# EPG in VPP
epg.add_vpp_config()
for recirc in recircs:
# EPG's ingress recirculation interface maps to its RD
VppIpInterfaceBind(self, recirc.recirc,
recirc.epg.rd.t4).add_vpp_config()
VppIpInterfaceBind(self, recirc.recirc,
recirc.epg.rd.t6).add_vpp_config()
self.vapi.nat44_interface_add_del_feature(
sw_if_index=recirc.recirc.sw_if_index, is_add=1)
self.vapi.nat66_add_del_interface(
is_add=1,
sw_if_index=recirc.recirc.sw_if_index)
recirc.add_vpp_config()
for recirc in recircs:
self.assertTrue(find_bridge_domain_port(self,
recirc.epg.bd.bd.bd_id,
recirc.recirc.sw_if_index))
for ep in eps:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
#
# routes to the endpoints. We need these since there are no
# adj-fibs due to the fact the the BVI address has /32 and
# the subnet is not attached.
#
for (ip, fip) in zip(ep.ips, ep.fips):
# Add static mappings for each EP from the 10/8 to 11/8 network
if ip_address(ip).version == 4:
flags = self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_static_mapping(
is_add=1,
local_ip_address=ip,
external_ip_address=fip,
external_sw_if_index=0xFFFFFFFF,
vrf_id=0,
flags=flags)
else:
self.vapi.nat66_add_del_static_mapping(
local_ip_address=ip,
external_ip_address=fip,
vrf_id=0, is_add=1)
# VPP EP create ...
ep.add_vpp_config()
self.logger.info(self.vapi.cli("sh gbp endpoint"))
# ... results in a Gratuitous ARP/ND on the EPG's uplink
rx = ep.epg.uplink.get_capture(len(ep.ips), timeout=0.2)
for ii, ip in enumerate(ep.ips):
p = rx[ii]
if ip_address(ip).version == 6:
self.assertTrue(p.haslayer(ICMPv6ND_NA))
self.assertEqual(p[ICMPv6ND_NA].tgt, ip)
else:
self.assertTrue(p.haslayer(ARP))
self.assertEqual(p[ARP].psrc, ip)
self.assertEqual(p[ARP].pdst, ip)
# add the BD ARP termination entry for floating IP
for fip in ep.fips:
ba = VppBridgeDomainArpEntry(self, epg_nat.bd.bd, ep.mac,
fip)
ba.add_vpp_config()
# floating IPs route via EPG recirc
r = VppIpRoute(
self, fip, ip_address(fip).max_prefixlen,
[VppRoutePath(fip,
ep.recirc.recirc.sw_if_index,
type=FibPathType.FIB_PATH_TYPE_DVR,
proto=get_dpo_proto(fip))],
table_id=20)
r.add_vpp_config()
# L2 FIB entries in the NAT EPG BD to bridge the packets from
# the outside direct to the internal EPG
lf = VppL2FibEntry(self, epg_nat.bd.bd, ep.mac,
ep.recirc.recirc, bvi_mac=0)
lf.add_vpp_config()
#
# ARP packets for unknown IP are sent to the EPG uplink
#
pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
src=self.pg0.remote_mac) /
ARP(op="who-has",
hwdst="ff:ff:ff:ff:ff:ff",
hwsrc=self.pg0.remote_mac,
pdst="10.0.0.88",
psrc="10.0.0.99"))
self.vapi.cli("clear trace")
self.pg0.add_stream(pkt_arp)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rxd = epgs[0].uplink.get_capture(1)
#
# ARP/ND packets get a response
#
pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
src=self.pg0.remote_mac) /
ARP(op="who-has",
hwdst="ff:ff:ff:ff:ff:ff",
hwsrc=self.pg0.remote_mac,
pdst=epgs[0].bvi_ip4,
psrc=eps[0].ip4))
self.send_and_expect(self.pg0, [pkt_arp], self.pg0)
nsma = in6_getnsma(inet_pton(AF_INET6, eps[0].ip6))
d = inet_ntop(AF_INET6, nsma)
pkt_nd = (Ether(dst=in6_getnsmac(nsma),
src=self.pg0.remote_mac) /
IPv6(dst=d, src=eps[0].ip6) /
ICMPv6ND_NS(tgt=epgs[0].bvi_ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
self.send_and_expect(self.pg0, [pkt_nd], self.pg0)
#
# broadcast packets are flooded
#
pkt_bcast = (Ether(dst="ff:ff:ff:ff:ff:ff",
src=self.pg0.remote_mac) /
IP(src=eps[0].ip4, dst="232.1.1.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.vapi.cli("clear trace")
self.pg0.add_stream(pkt_bcast)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rxd = eps[1].itf.get_capture(1)
self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
rxd = epgs[0].uplink.get_capture(1)
self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
#
# packets to non-local L3 destinations dropped
#
pkt_intra_epg_220_ip4 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst="10.0.0.99") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
pkt_inter_epg_222_ip4 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst="10.0.1.99") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(self.pg0,
pkt_intra_epg_220_ip4 * NUM_PKTS)
pkt_inter_epg_222_ip6 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IPv6(src=eps[0].ip6,
dst="2001:10::99") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(self.pg0,
pkt_inter_epg_222_ip6 * NUM_PKTS)
#
# Add the subnet routes
#
s41 = VppGbpSubnet(
self, rd0, "10.0.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s42 = VppGbpSubnet(
self, rd0, "10.0.1.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s43 = VppGbpSubnet(
self, rd0, "10.0.2.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s61 = VppGbpSubnet(
self, rd0, "2001:10::1", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s62 = VppGbpSubnet(
self, rd0, "2001:10:1::1", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s63 = VppGbpSubnet(
self, rd0, "2001:10:2::1", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s41.add_vpp_config()
s42.add_vpp_config()
s43.add_vpp_config()
s61.add_vpp_config()
s62.add_vpp_config()
s63.add_vpp_config()
self.send_and_expect_bridged(eps[0].itf,
pkt_intra_epg_220_ip4 * NUM_PKTS,
eps[0].epg.uplink)
self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_222_ip4 * NUM_PKTS,
eps[0].epg.uplink)
self.send_and_expect_bridged6(eps[0].itf,
pkt_inter_epg_222_ip6 * NUM_PKTS,
eps[0].epg.uplink)
self.logger.info(self.vapi.cli("sh ip fib 11.0.0.2"))
self.logger.info(self.vapi.cli("sh gbp endpoint-group"))
self.logger.info(self.vapi.cli("sh gbp endpoint"))
self.logger.info(self.vapi.cli("sh gbp recirc"))
self.logger.info(self.vapi.cli("sh int"))
self.logger.info(self.vapi.cli("sh int addr"))
self.logger.info(self.vapi.cli("sh int feat loop6"))
self.logger.info(self.vapi.cli("sh vlib graph ip4-gbp-src-classify"))
self.logger.info(self.vapi.cli("sh int feat loop3"))
self.logger.info(self.vapi.cli("sh int feat pg0"))
#
# Packet destined to unknown unicast is sent on the epg uplink ...
#
pkt_intra_epg_220_to_uplink = (Ether(src=self.pg0.remote_mac,
dst="00:00:00:33:44:55") /
IP(src=eps[0].ip4,
dst="10.0.0.99") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged(eps[0].itf,
pkt_intra_epg_220_to_uplink * NUM_PKTS,
eps[0].epg.uplink)
# ... and nowhere else
self.pg1.get_capture(0, timeout=0.1)
self.pg1.assert_nothing_captured(remark="Flood onto other VMS")
pkt_intra_epg_221_to_uplink = (Ether(src=self.pg2.remote_mac,
dst="00:00:00:33:44:66") /
IP(src=eps[0].ip4,
dst="10.0.0.99") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged(eps[2].itf,
pkt_intra_epg_221_to_uplink * NUM_PKTS,
eps[2].epg.uplink)
#
# Packets from the uplink are forwarded in the absence of a contract
#
pkt_intra_epg_220_from_uplink = (Ether(src="00:00:00:33:44:55",
dst=self.pg0.remote_mac) /
IP(src=eps[0].ip4,
dst="10.0.0.99") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged(self.pg4,
pkt_intra_epg_220_from_uplink * NUM_PKTS,
self.pg0)
#
# in the absence of policy, endpoints in the same EPG
# can communicate
#
pkt_intra_epg = (Ether(src=self.pg0.remote_mac,
dst=self.pg1.remote_mac) /
IP(src=eps[0].ip4,
dst=eps[1].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged(self.pg0,
pkt_intra_epg * NUM_PKTS,
self.pg1)
#
# in the absence of policy, endpoints in the different EPG
# cannot communicate
#
pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
dst=self.pg2.remote_mac) /
IP(src=eps[0].ip4,
dst=eps[2].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
dst=self.pg0.remote_mac) /
IP(src=eps[2].ip4,
dst=eps[0].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst=eps[3].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_221 * NUM_PKTS)
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * NUM_PKTS)
#
# A uni-directional contract from EPG 220 -> 221
#
rule = AclRule(is_permit=1, proto=17)
rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule, rule2])
acl.add_vpp_config()
c1 = VppGbpContract(
self, 400, epgs[0].sclass, epgs[1].sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_220_to_221 * NUM_PKTS,
eps[2].itf)
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * NUM_PKTS)
#
# contract for the return direction
#
c2 = VppGbpContract(
self, 400, epgs[1].sclass, epgs[0].sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_220_to_221 * NUM_PKTS,
eps[2].itf)
self.send_and_expect_bridged(eps[2].itf,
pkt_inter_epg_221_to_220 * NUM_PKTS,
eps[0].itf)
ds = c2.get_drop_stats()
self.assertEqual(ds['packets'], 0)
ps = c2.get_permit_stats()
self.assertEqual(ps['packets'], NUM_PKTS)
#
# the contract does not allow non-IP
#
pkt_non_ip_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
dst=self.pg2.remote_mac) /
ARP())
self.send_and_assert_no_replies(eps[0].itf,
pkt_non_ip_inter_epg_220_to_221 * 17)
#
# check that inter group is still disabled for the groups
# not in the contract.
#
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * NUM_PKTS)
#
# A uni-directional contract from EPG 220 -> 222 'L3 routed'
#
c3 = VppGbpContract(
self, 400, epgs[0].sclass, epgs[2].sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c3.add_vpp_config()
self.logger.info(self.vapi.cli("sh gbp contract"))
self.send_and_expect_routed(eps[0].itf,
pkt_inter_epg_220_to_222 * NUM_PKTS,
eps[3].itf,
str(self.router_mac))
#
# remove both contracts, traffic stops in both directions
#
c2.remove_vpp_config()
c1.remove_vpp_config()
c3.remove_vpp_config()
acl.remove_vpp_config()
self.send_and_assert_no_replies(eps[2].itf,
pkt_inter_epg_221_to_220 * NUM_PKTS)
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_221 * NUM_PKTS)
self.send_and_expect_bridged(eps[0].itf,
pkt_intra_epg * NUM_PKTS,
eps[1].itf)
#
# EPs to the outside world
#
# in the EP's RD an external subnet via the NAT EPG's recirc
se1 = VppGbpSubnet(
self, rd0, "0.0.0.0", 0,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
sw_if_index=recirc_nat.recirc.sw_if_index,
sclass=epg_nat.sclass)
se2 = VppGbpSubnet(
self, rd0, "11.0.0.0", 8,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
sw_if_index=recirc_nat.recirc.sw_if_index,
sclass=epg_nat.sclass)
se16 = VppGbpSubnet(
self, rd0, "::", 0,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
sw_if_index=recirc_nat.recirc.sw_if_index,
sclass=epg_nat.sclass)
# in the NAT RD an external subnet via the NAT EPG's uplink
se3 = VppGbpSubnet(
self, rd20, "0.0.0.0", 0,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
sw_if_index=epg_nat.uplink.sw_if_index,
sclass=epg_nat.sclass)
se36 = VppGbpSubnet(
self, rd20, "::", 0,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
sw_if_index=epg_nat.uplink.sw_if_index,
sclass=epg_nat.sclass)
se4 = VppGbpSubnet(
self, rd20, "11.0.0.0", 8,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
sw_if_index=epg_nat.uplink.sw_if_index,
sclass=epg_nat.sclass)
se1.add_vpp_config()
se2.add_vpp_config()
se16.add_vpp_config()
se3.add_vpp_config()
se36.add_vpp_config()
se4.add_vpp_config()
self.logger.info(self.vapi.cli("sh ip fib 0.0.0.0/0"))
self.logger.info(self.vapi.cli("sh ip fib 11.0.0.1"))
self.logger.info(self.vapi.cli("sh ip6 fib ::/0"))
self.logger.info(self.vapi.cli("sh ip6 fib %s" %
eps[0].fip6))
#
# From an EP to an outside address: IN2OUT
#
pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst="1.1.1.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
# no policy yet
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_global * NUM_PKTS)
rule = AclRule(is_permit=1, proto=17, ports=1234)
rule2 = AclRule(is_permit=1, proto=17, ports=1234,
src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)))
acl2 = VppAcl(self, rules=[rule, rule2])
acl2.add_vpp_config()
c4 = VppGbpContract(
self, 400, epgs[0].sclass, epgs[3].sclass, acl2.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c4.add_vpp_config()
self.send_and_expect_natted(eps[0].itf,
pkt_inter_epg_220_to_global * NUM_PKTS,
self.pg7,
eps[0].fip4)
pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IPv6(src=eps[0].ip6,
dst="6001::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_natted6(self.pg0,
pkt_inter_epg_220_to_global * NUM_PKTS,
self.pg7,
eps[0].fip6)
#
# From a global address to an EP: OUT2IN
#
pkt_inter_epg_220_from_global = (Ether(src=str(self.router_mac),
dst=self.pg0.remote_mac) /
IP(dst=eps[0].fip4,
src="1.1.1.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(
self.pg7, pkt_inter_epg_220_from_global * NUM_PKTS)
c5 = VppGbpContract(
self, 400, epgs[3].sclass, epgs[0].sclass, acl2.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c5.add_vpp_config()
self.send_and_expect_unnatted(self.pg7,
pkt_inter_epg_220_from_global * NUM_PKTS,
eps[0].itf,
eps[0].ip4)
pkt_inter_epg_220_from_global = (Ether(src=str(self.router_mac),
dst=self.pg0.remote_mac) /
IPv6(dst=eps[0].fip6,
src="6001::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_unnatted6(
self.pg7,
pkt_inter_epg_220_from_global * NUM_PKTS,
eps[0].itf,
eps[0].ip6)
#
# From a local VM to another local VM using resp. public addresses:
# IN2OUT2IN
#
pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst=eps[1].fip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_double_natted(eps[0].itf,
pkt_intra_epg_220_global * NUM_PKTS,
eps[1].itf,
eps[0].fip4,
eps[1].ip4)
pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IPv6(src=eps[0].ip6,
dst=eps[1].fip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_double_natted6(
eps[0].itf,
pkt_intra_epg_220_global * NUM_PKTS,
eps[1].itf,
eps[0].fip6,
eps[1].ip6)
#
# cleanup
#
for ep in eps:
# del static mappings for each EP from the 10/8 to 11/8 network
flags = self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_static_mapping(
is_add=0,
local_ip_address=ep.ip4,
external_ip_address=ep.fip4,
external_sw_if_index=0xFFFFFFFF,
vrf_id=0,
flags=flags)
self.vapi.nat66_add_del_static_mapping(
local_ip_address=ep.ip6,
external_ip_address=ep.fip6,
vrf_id=0, is_add=0)
for epg in epgs:
# IP config on the BVI interfaces
if epg != epgs[0] and epg != epgs[3]:
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=epg.bvi.sw_if_index,
flags=flags,
is_add=0)
self.vapi.nat66_add_del_interface(
is_add=0, flags=flags,
sw_if_index=epg.bvi.sw_if_index)
for recirc in recircs:
self.vapi.nat44_interface_add_del_feature(
sw_if_index=recirc.recirc.sw_if_index,
is_add=0)
self.vapi.nat66_add_del_interface(
is_add=0,
sw_if_index=recirc.recirc.sw_if_index)
def wait_for_ep_timeout(self, sw_if_index=None, ip=None, mac=None,
tep=None, n_tries=100, s_time=1):
# only learnt EP can timeout
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
flags = ep_flags.GBP_API_ENDPOINT_FLAG_LEARNT
while (n_tries):
if not find_gbp_endpoint(self, sw_if_index, ip, mac, tep=tep,
flags=flags):
return True
n_tries = n_tries - 1
self.sleep(s_time)
self.assertFalse(find_gbp_endpoint(self, sw_if_index, ip, mac, tep=tep,
flags=flags))
return False
def test_gbp_learn_l2(self):
""" GBP L2 Endpoint Learning """
drop_no_contract = self.statistics.get_err_counter(
'/err/gbp-policy-port/drop-no-contract')
allow_intra_class = self.statistics.get_err_counter(
'/err/gbp-policy-port/allow-intra-sclass')
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
learnt = [{'mac': '00:00:11:11:11:01',
'ip': '10.0.0.1',
'ip6': '2001:10::2'},
{'mac': '00:00:11:11:11:02',
'ip': '10.0.0.2',
'ip6': '2001:10::3'}]
#
# IP tables
#
gt4 = VppIpTable(self, 1)
gt4.add_vpp_config()
gt6 = VppIpTable(self, 1, is_ip6=True)
gt6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
rd1.add_vpp_config()
#
# Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
# Pg3 hosts the IP4 UU-flood VXLAN tunnel
# Pg4 hosts the IP6 UU-flood VXLAN tunnel
#
self.pg2.config_ip4()
self.pg2.resolve_arp()
self.pg2.generate_remote_hosts(4)
self.pg2.configure_ipv4_neighbors()
self.pg3.config_ip4()
self.pg3.resolve_arp()
self.pg4.config_ip4()
self.pg4.resolve_arp()
#
# Add a mcast destination VXLAN-GBP tunnel for B&M traffic
#
tun_bm = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
"239.1.1.1", 88,
mcast_itf=self.pg4)
tun_bm.add_vpp_config()
#
# a GBP bridge domain with a BVI and a UU-flood interface
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
self.pg3, tun_bm)
gbd1.add_vpp_config()
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
self.logger.info(self.vapi.cli("sh gbp bridge"))
# ... and has a /32 applied
ip_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128", 32)
ip_addr.add_vpp_config()
#
# The Endpoint-group in which we are learning endpoints
#
epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
None, self.loop0,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(4))
epg_220.add_vpp_config()
epg_330 = VppGbpEndpointGroup(self, 330, 113, rd1, gbd1,
None, self.loop1,
"10.0.1.128",
"2001:11::128",
VppGbpEndpointRetention(4))
epg_330.add_vpp_config()
#
# The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
# learning enabled
#
vx_tun_l2_1 = VppGbpVxlanTunnel(
self, 99, bd1.bd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2,
self.pg2.local_ip4)
vx_tun_l2_1.add_vpp_config()
#
# A static endpoint that the learnt endpoints are trying to
# talk to
#
ep = VppGbpEndpoint(self, self.pg0,
epg_220, None,
"10.0.0.127", "11.0.0.127",
"2001:10::1", "3001::1")
ep.add_vpp_config()
self.assertTrue(find_route(self, ep.ip4, 32, table_id=1))
# a packet with an sclass from an unknown EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[0].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=88, flags=0x88) /
Ether(src=learnt[0]["mac"], dst=ep.mac) /
IP(src=learnt[0]["ip"], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(self.pg2, p)
self.logger.info(self.vapi.cli("sh error"))
self.assert_error_counter_equal(
'/err/gbp-policy-port/drop-no-contract',
drop_no_contract + 1)
#
# we should not have learnt a new tunnel endpoint, since
# the EPG was not learnt.
#
self.assertEqual(INDEX_INVALID,
find_vxlan_gbp_tunnel(self,
self.pg2.local_ip4,
self.pg2.remote_hosts[0].ip4,
99))
# ep is not learnt, because the EPG is unknown
self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
#
# Learn new EPs from IP packets
#
for ii, l in enumerate(learnt):
# a packet with an sclass from a known EPG
# arriving on an unknown TEP
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=112, flags=0x88) /
Ether(src=l['mac'], dst=ep.mac) /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, [p], self.pg0)
# the new TEP
tep1_sw_if_index = find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
99)
self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
#
# the EP is learnt via the learnt TEP
# both from its MAC and its IP
#
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
ip=l['ip']))
self.assert_error_counter_equal(
'/err/gbp-policy-port/allow-intra-sclass',
allow_intra_class + 2)
self.logger.info(self.vapi.cli("show gbp endpoint"))
self.logger.info(self.vapi.cli("show gbp vxlan"))
self.logger.info(self.vapi.cli("show ip mfib"))
#
# If we sleep for the threshold time, the learnt endpoints should
# age out
#
for l in learnt:
self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
mac=l['mac'])
#
# Learn new EPs from GARP packets received on the BD's mcast tunnel
#
for ii, l in enumerate(learnt):
# add some junk in the reserved field of the vxlan-header
# next to the VNI. we should accept since reserved bits are
# ignored on rx.
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst="239.1.1.1") /
UDP(sport=1234, dport=48879) /
VXLAN(vni=88, reserved2=0x80, gpid=112, flags=0x88) /
Ether(src=l['mac'], dst="ff:ff:ff:ff:ff:ff") /
ARP(op="who-has",
psrc=l['ip'], pdst=l['ip'],
hwsrc=l['mac'], hwdst="ff:ff:ff:ff:ff:ff"))
rx = self.send_and_expect(self.pg4, [p], self.pg0)
# the new TEP
tep1_sw_if_index = find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
99)
self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
#
# the EP is learnt via the learnt TEP
# both from its MAC and its IP
#
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
ip=l['ip']))
#
# wait for the learnt endpoints to age out
#
for l in learnt:
self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
mac=l['mac'])
#
# Learn new EPs from L2 packets
#
for ii, l in enumerate(learnt):
# a packet with an sclass from a known EPG
# arriving on an unknown TEP
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=112, flags=0x88) /
Ether(src=l['mac'], dst=ep.mac) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, [p], self.pg0)
# the new TEP
tep1_sw_if_index = find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
99)
self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
#
# the EP is learnt via the learnt TEP
# both from its MAC and its IP
#
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
self.logger.info(self.vapi.cli("show gbp endpoint"))
self.logger.info(self.vapi.cli("show gbp vxlan"))
self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
#
# wait for the learnt endpoints to age out
#
for l in learnt:
self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
mac=l['mac'])
#
# repeat. the do not learn bit is set so the EPs are not learnt
#
for l in learnt:
# a packet with an sclass from a known EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=112, flags=0x88, gpflags="D") /
Ether(src=l['mac'], dst=ep.mac) /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
for l in learnt:
self.assertFalse(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
#
# repeat
#
for l in learnt:
# a packet with an sclass from a known EPG
# set a reserved bit in addition to the G and I
# reserved bits should not be checked on rx.
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=112, flags=0xc8) /
Ether(src=l['mac'], dst=ep.mac) /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
#
# Static EP replies to dynamics
#
self.logger.info(self.vapi.cli("sh l2fib bd_id 1"))
for l in learnt:
p = (Ether(src=ep.mac, dst=l['mac']) /
IP(dst=l['ip'], src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 17, self.pg2)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 112)
self.assertEqual(rx[VXLAN].vni, 99)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
for l in learnt:
self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
mac=l['mac'])
#
# repeat in the other EPG
# there's no contract between 220 and 330, but the A-bit is set
# so the packet is cleared for delivery
#
for l in learnt:
# a packet with an sclass from a known EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=113, flags=0x88, gpflags='A') /
Ether(src=l['mac'], dst=ep.mac) /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
#
# static EP cannot reach the learnt EPs since there is no contract
# only test 1 EP as the others could timeout
#
p = (Ether(src=ep.mac, dst=l['mac']) /
IP(dst=learnt[0]['ip'], src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(self.pg0, [p])
#
# refresh the entries after the check for no replies above
#
for l in learnt:
# a packet with an sclass from a known EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=113, flags=0x88, gpflags='A') /
Ether(src=l['mac'], dst=ep.mac) /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
#
# Add the contract so they can talk
#
rule = AclRule(is_permit=1, proto=17)
rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule, rule2])
acl.add_vpp_config()
c1 = VppGbpContract(
self, 401, epg_220.sclass, epg_330.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
for l in learnt:
p = (Ether(src=ep.mac, dst=l['mac']) /
IP(dst=l['ip'], src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg0, [p], self.pg2)
#
# send UU packets from the local EP
#
self.logger.info(self.vapi.cli("sh gbp bridge"))
self.logger.info(self.vapi.cli("sh bridge-domain 1 detail"))
p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
IP(dst="10.0.0.133", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(ep.itf, [p_uu], gbd1.uu_fwd)
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
IP(dst="10.0.0.133", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect_only(ep.itf, [p_bm], tun_bm.mcast_itf)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg4.local_ip4)
self.assertEqual(rx[IP].dst, "239.1.1.1")
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 112)
self.assertEqual(rx[VXLAN].vni, 88)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertFalse(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
rule = AclRule(is_permit=1, proto=17)
rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule, rule2])
acl.add_vpp_config()
c2 = VppGbpContract(
self, 401, epg_330.sclass, epg_220.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
for l in learnt:
self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
mac=l['mac'])
#
# Check v6 Endpoints learning
#
for l in learnt:
# a packet with an sclass from a known EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=113, flags=0x88) /
Ether(src=l['mac'], dst=ep.mac) /
IPv6(src=l['ip6'], dst=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(
self,
vx_tun_l2_1.sw_if_index,
ip=l['ip6'],
tep=[self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4]))
self.logger.info(self.vapi.cli("sh int"))
self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
self.logger.info(self.vapi.cli("sh gbp vxlan"))
self.logger.info(self.vapi.cli("sh gbp endpoint"))
self.logger.info(self.vapi.cli("sh gbp interface"))
#
# EP moves to a different TEP
#
for l in learnt:
# a packet with an sclass from a known EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[2].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=113, flags=0x88) /
Ether(src=l['mac'], dst=ep.mac) /
IPv6(src=l['ip6'], dst=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * 1, self.pg0)
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(
self,
vx_tun_l2_1.sw_if_index,
sclass=113,
mac=l['mac'],
tep=[self.pg2.local_ip4,
self.pg2.remote_hosts[2].ip4]))
#
# v6 remote EP reachability
#
for l in learnt:
p = (Ether(src=ep.mac, dst=l['mac']) /
IPv6(dst=l['ip6'], src=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 112)
self.assertEqual(rx[VXLAN].vni, 99)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
self.assertEqual(rx[IPv6].dst, l['ip6'])
#
# EP changes sclass
#
for l in learnt:
# a packet with an sclass from a known EPG
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[2].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=112, flags=0x88) /
Ether(src=l['mac'], dst=ep.mac) /
IPv6(src=l['ip6'], dst=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, p * 1, self.pg0)
rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(
self,
vx_tun_l2_1.sw_if_index,
mac=l['mac'],
sclass=112,
tep=[self.pg2.local_ip4,
self.pg2.remote_hosts[2].ip4]))
#
# check reachability and contract intra-epg
#
allow_intra_class = self.statistics.get_err_counter(
'/err/gbp-policy-mac/allow-intra-sclass')
for l in learnt:
p = (Ether(src=ep.mac, dst=l['mac']) /
IPv6(dst=l['ip6'], src=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
self.assertEqual(rx[UDP].dport, 48879)
self.assertEqual(rx[VXLAN].gpid, 112)
self.assertEqual(rx[VXLAN].vni, 99)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
self.assertEqual(rx[IPv6].dst, l['ip6'])
allow_intra_class += NUM_PKTS
self.assert_error_counter_equal(
'/err/gbp-policy-mac/allow-intra-sclass',
allow_intra_class)
#
# clean up
#
for l in learnt:
self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
mac=l['mac'])
self.pg2.unconfig_ip4()
self.pg3.unconfig_ip4()
self.pg4.unconfig_ip4()
def test_gbp_contract(self):
""" GBP Contracts """
#
# Route Domains
#
gt4 = VppIpTable(self, 0)
gt4.add_vpp_config()
gt6 = VppIpTable(self, 0, is_ip6=True)
gt6.add_vpp_config()
rd0 = VppGbpRouteDomain(self, 0, 400, gt4, gt6, None, None)
rd0.add_vpp_config()
#
# Bridge Domains
#
bd1 = VppBridgeDomain(self, 1, arp_term=0)
bd2 = VppBridgeDomain(self, 2, arp_term=0)
bd1.add_vpp_config()
bd2.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd0, self.loop0)
gbd2 = VppGbpBridgeDomain(self, bd2, rd0, self.loop1)
gbd1.add_vpp_config()
gbd2.add_vpp_config()
#
# 3 EPGs, 2 of which share a BD.
#
epgs = [VppGbpEndpointGroup(self, 220, 1220, rd0, gbd1,
None, self.loop0,
"10.0.0.128", "2001:10::128"),
VppGbpEndpointGroup(self, 221, 1221, rd0, gbd1,
None, self.loop0,
"10.0.1.128", "2001:10:1::128"),
VppGbpEndpointGroup(self, 222, 1222, rd0, gbd2,
None, self.loop1,
"10.0.2.128", "2001:10:2::128")]
#
# 4 end-points, 2 in the same subnet, 3 in the same BD
#
eps = [VppGbpEndpoint(self, self.pg0,
epgs[0], None,
"10.0.0.1", "11.0.0.1",
"2001:10::1", "3001::1"),
VppGbpEndpoint(self, self.pg1,
epgs[0], None,
"10.0.0.2", "11.0.0.2",
"2001:10::2", "3001::2"),
VppGbpEndpoint(self, self.pg2,
epgs[1], None,
"10.0.1.1", "11.0.0.3",
"2001:10:1::1", "3001::3"),
VppGbpEndpoint(self, self.pg3,
epgs[2], None,
"10.0.2.1", "11.0.0.4",
"2001:10:2::1", "3001::4")]
#
# Config related to each of the EPGs
#
for epg in epgs:
# IP config on the BVI interfaces
if epg != epgs[1]:
b4 = VppIpInterfaceBind(self, epg.bvi,
epg.rd.t4).add_vpp_config()
b6 = VppIpInterfaceBind(self, epg.bvi,
epg.rd.t6).add_vpp_config()
epg.bvi.set_mac(self.router_mac)
if_ip4 = VppIpInterfaceAddress(self, epg.bvi,
epg.bvi_ip4, 32,
bind=b4).add_vpp_config()
if_ip6 = VppIpInterfaceAddress(self, epg.bvi,
epg.bvi_ip6, 128,
bind=b6).add_vpp_config()
# add the BD ARP termination entry for BVI IP
epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
str(self.router_mac),
epg.bvi_ip4)
epg.bd_arp_ip4.add_vpp_config()
# EPG in VPP
epg.add_vpp_config()
#
# config ep
#
for ep in eps:
ep.add_vpp_config()
self.logger.info(self.vapi.cli("show gbp endpoint"))
self.logger.info(self.vapi.cli("show interface"))
self.logger.info(self.vapi.cli("show br"))
#
# Intra epg allowed without contract
#
pkt_intra_epg_220_to_220 = (Ether(src=self.pg0.remote_mac,
dst=self.pg1.remote_mac) /
IP(src=eps[0].ip4,
dst=eps[1].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged(self.pg0,
pkt_intra_epg_220_to_220 * 65,
self.pg1)
pkt_intra_epg_220_to_220 = (Ether(src=self.pg0.remote_mac,
dst=self.pg1.remote_mac) /
IPv6(src=eps[0].ip6,
dst=eps[1].ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged6(self.pg0,
pkt_intra_epg_220_to_220 * 65,
self.pg1)
#
# Inter epg denied without contract
#
pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
dst=self.pg2.remote_mac) /
IP(src=eps[0].ip4,
dst=eps[2].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(self.pg0, pkt_inter_epg_220_to_221)
#
# A uni-directional contract from EPG 220 -> 221
#
rule = AclRule(is_permit=1, proto=17)
rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
rule3 = AclRule(is_permit=1, proto=1)
acl = VppAcl(self, rules=[rule, rule2, rule3])
acl.add_vpp_config()
c1 = VppGbpContract(
self, 400, epgs[0].sclass, epgs[1].sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_220_to_221 * 65,
eps[2].itf)
pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst=eps[3].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * 65)
#
# ping router IP in different BD
#
pkt_router_ping_220_to_221 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[0].ip4,
dst=epgs[1].bvi_ip4) /
ICMP(type='echo-request'))
self.send_and_expect(self.pg0, [pkt_router_ping_220_to_221], self.pg0)
pkt_router_ping_220_to_221 = (Ether(src=self.pg0.remote_mac,
dst=str(self.router_mac)) /
IPv6(src=eps[0].ip6,
dst=epgs[1].bvi_ip6) /
ICMPv6EchoRequest())
self.send_and_expect(self.pg0, [pkt_router_ping_220_to_221], self.pg0)
#
# contract for the return direction
#
c2 = VppGbpContract(
self, 400, epgs[1].sclass, epgs[0].sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_220_to_221 * 65,
eps[2].itf)
pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
dst=self.pg0.remote_mac) /
IP(src=eps[2].ip4,
dst=eps[0].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_bridged(eps[2].itf,
pkt_inter_epg_221_to_220 * 65,
eps[0].itf)
pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
dst=str(self.router_mac)) /
IP(src=eps[2].ip4,
dst=eps[0].ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_routed(eps[2].itf,
pkt_inter_epg_221_to_220 * 65,
eps[0].itf,
str(self.router_mac))
pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
dst=str(self.router_mac)) /
IPv6(src=eps[2].ip6,
dst=eps[0].ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect_routed6(eps[2].itf,
pkt_inter_epg_221_to_220 * 65,
eps[0].itf,
str(self.router_mac))
#
# contract between 220 and 222 uni-direction
#
c3 = VppGbpContract(
self, 400, epgs[0].sclass, epgs[2].sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c3.add_vpp_config()
self.send_and_expect(eps[0].itf,
pkt_inter_epg_220_to_222 * 65,
eps[3].itf)
c3.remove_vpp_config()
c1.remove_vpp_config()
c2.remove_vpp_config()
acl.remove_vpp_config()
def test_gbp_bd_drop_flags(self):
""" GBP BD drop flags """
#
# IP tables
#
gt4 = VppIpTable(self, 1)
gt4.add_vpp_config()
gt6 = VppIpTable(self, 1, is_ip6=True)
gt6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
rd1.add_vpp_config()
#
# a GBP bridge domain with a BVI only
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
None, None,
uu_drop=True, bm_drop=True)
gbd1.add_vpp_config()
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
self.logger.info(self.vapi.cli("sh gbp bridge"))
# ... and has a /32 applied
ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 32).add_vpp_config()
#
# The Endpoint-group
#
epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
None, self.loop0,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(3))
epg_220.add_vpp_config()
ep = VppGbpEndpoint(self, self.pg0,
epg_220, None,
"10.0.0.127", "11.0.0.127",
"2001:10::1", "3001::1")
ep.add_vpp_config()
#
# send UU/BM packet from the local EP with UU drop and BM drop enabled
# in bd
#
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
self.logger.info(self.vapi.cli("sh gbp bridge"))
p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
IP(dst="10.0.0.133", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(ep.itf, [p_uu])
p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
IP(dst="10.0.0.133", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(ep.itf, [p_bm])
self.pg3.unconfig_ip4()
self.logger.info(self.vapi.cli("sh int"))
def test_gbp_bd_arp_flags(self):
""" GBP BD arp flags """
#
# IP tables
#
gt4 = VppIpTable(self, 1)
gt4.add_vpp_config()
gt6 = VppIpTable(self, 1, is_ip6=True)
gt6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
rd1.add_vpp_config()
#
# Pg4 hosts the IP6 UU-flood VXLAN tunnel
#
self.pg4.config_ip4()
self.pg4.resolve_arp()
#
# Add a mcast destination VXLAN-GBP tunnel for B&M traffic
#
tun_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
"239.1.1.1", 88,
mcast_itf=self.pg4)
tun_uu.add_vpp_config()
#
# a GBP bridge domain with a BVI and a UU-flood interface
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
tun_uu, None,
ucast_arp=True)
gbd1.add_vpp_config()
# ... and has a /32 applied
ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 32).add_vpp_config()
#
# The Endpoint-group
#
epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
None, self.loop0,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(2))
epg_220.add_vpp_config()
ep = VppGbpEndpoint(self, self.pg0,
epg_220, None,
"10.0.0.127", "11.0.0.127",
"2001:10::1", "3001::1")
ep.add_vpp_config()
#
# send ARP packet from the local EP expect it on the uu interface
#
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
self.logger.info(self.vapi.cli("sh gbp bridge"))
p_arp = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
ARP(op="who-has",
psrc=ep.ip4, pdst="10.0.0.99",
hwsrc=ep.mac,
hwdst="ff:ff:ff:ff:ff:ff"))
self.send_and_expect(ep.itf, [p_arp], self.pg4)
self.pg4.unconfig_ip4()
def test_gbp_learn_vlan_l2(self):
""" GBP L2 Endpoint w/ VLANs"""
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
learnt = [{'mac': '00:00:11:11:11:01',
'ip': '10.0.0.1',
'ip6': '2001:10::2'},
{'mac': '00:00:11:11:11:02',
'ip': '10.0.0.2',
'ip6': '2001:10::3'}]
#
# IP tables
#
gt4 = VppIpTable(self, 1)
gt4.add_vpp_config()
gt6 = VppIpTable(self, 1, is_ip6=True)
gt6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
rd1.add_vpp_config()
#
# Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
#
self.pg2.config_ip4()
self.pg2.resolve_arp()
self.pg2.generate_remote_hosts(4)
self.pg2.configure_ipv4_neighbors()
self.pg3.config_ip4()
self.pg3.resolve_arp()
#
# The EP will be on a vlan sub-interface
#
vlan_11 = VppDot1QSubint(self, self.pg0, 11)
vlan_11.admin_up()
self.vapi.l2_interface_vlan_tag_rewrite(
sw_if_index=vlan_11.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
push_dot1q=11)
bd_uu_fwd = VppVxlanGbpTunnel(self, self.pg3.local_ip4,
self.pg3.remote_ip4, 116)
bd_uu_fwd.add_vpp_config()
#
# a GBP bridge domain with a BVI and a UU-flood interface
# The BD is marked as do not learn, so no endpoints are ever
# learnt in this BD.
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, bd_uu_fwd,
learn=False)
gbd1.add_vpp_config()
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
self.logger.info(self.vapi.cli("sh gbp bridge"))
# ... and has a /32 applied
ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 32).add_vpp_config()
#
# The Endpoint-group in which we are learning endpoints
#
epg_220 = VppGbpEndpointGroup(self, 220, 441, rd1, gbd1,
None, self.loop0,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(4))
epg_220.add_vpp_config()
#
# The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
# learning enabled
#
vx_tun_l2_1 = VppGbpVxlanTunnel(
self, 99, bd1.bd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2,
self.pg2.local_ip4)
vx_tun_l2_1.add_vpp_config()
#
# A static endpoint that the learnt endpoints are trying to
# talk to
#
ep = VppGbpEndpoint(self, vlan_11,
epg_220, None,
"10.0.0.127", "11.0.0.127",
"2001:10::1", "3001::1")
ep.add_vpp_config()
self.assertTrue(find_route(self, ep.ip4, 32, table_id=1))
#
# Send to the static EP
#
for ii, l in enumerate(learnt):
# a packet with an sclass from a known EPG
# arriving on an unknown TEP
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=99, gpid=441, flags=0x88) /
Ether(src=l['mac'], dst=ep.mac) /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg2, [p], self.pg0)
#
# packet to EP has the EP's vlan tag
#
for rx in rxs:
self.assertEqual(rx[Dot1Q].vlan, 11)
#
# the EP is not learnt since the BD setting prevents it
# also no TEP too
#
self.assertFalse(find_gbp_endpoint(self,
vx_tun_l2_1.sw_if_index,
mac=l['mac']))
self.assertEqual(INDEX_INVALID,
find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
99))
self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
#
# static to remotes
# we didn't learn the remotes so they are sent to the UU-fwd
#
for l in learnt:
p = (Ether(src=ep.mac, dst=l['mac']) /
Dot1Q(vlan=11) /
IP(dst=l['ip'], src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 17, self.pg3)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg3.local_ip4)
self.assertEqual(rx[IP].dst, self.pg3.remote_ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 441)
self.assertEqual(rx[VXLAN].vni, 116)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertFalse(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
self.pg2.unconfig_ip4()
self.pg3.unconfig_ip4()
def test_gbp_learn_l3(self):
""" GBP L3 Endpoint Learning """
self.vapi.cli("set logging class gbp level debug")
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
routed_dst_mac = "00:0c:0c:0c:0c:0c"
routed_src_mac = "00:22:bd:f8:19:ff"
learnt = [{'mac': '00:00:11:11:11:02',
'ip': '10.0.1.2',
'ip6': '2001:10::2'},
{'mac': '00:00:11:11:11:03',
'ip': '10.0.1.3',
'ip6': '2001:10::3'}]
#
# IP tables
#
t4 = VppIpTable(self, 1)
t4.add_vpp_config()
t6 = VppIpTable(self, 1, True)
t6.add_vpp_config()
tun_ip4_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
self.pg4.remote_ip4, 114)
tun_ip6_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
self.pg4.remote_ip4, 116)
tun_ip4_uu.add_vpp_config()
tun_ip6_uu.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 2, 401, t4, t6, tun_ip4_uu, tun_ip6_uu)
rd1.add_vpp_config()
self.loop0.set_mac(self.router_mac)
#
# Bind the BVI to the RD
#
b4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
b6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
#
# Pg2 hosts the vxlan tunnel
# hosts on pg2 to act as TEPs
# pg3 is BD uu-fwd
# pg4 is RD uu-fwd
#
self.pg2.config_ip4()
self.pg2.resolve_arp()
self.pg2.generate_remote_hosts(4)
self.pg2.configure_ipv4_neighbors()
self.pg3.config_ip4()
self.pg3.resolve_arp()
self.pg4.config_ip4()
self.pg4.resolve_arp()
#
# a GBP bridge domain with a BVI and a UU-flood interface
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, self.pg3)
gbd1.add_vpp_config()
self.logger.info(self.vapi.cli("sh bridge 1 detail"))
self.logger.info(self.vapi.cli("sh gbp bridge"))
self.logger.info(self.vapi.cli("sh gbp route"))
# ... and has a /32 and /128 applied
ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 32,
bind=b4).add_vpp_config()
ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"2001:10::128", 128,
bind=b6).add_vpp_config()
#
# The Endpoint-group in which we are learning endpoints
#
epg_220 = VppGbpEndpointGroup(self, 220, 441, rd1, gbd1,
None, self.loop0,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(4))
epg_220.add_vpp_config()
#
# The VXLAN GBP tunnel is in L3 mode with learning enabled
#
vx_tun_l3 = VppGbpVxlanTunnel(
self, 101, rd1.rd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
self.pg2.local_ip4)
vx_tun_l3.add_vpp_config()
#
# A static endpoint that the learnt endpoints are trying to
# talk to
#
ep = VppGbpEndpoint(self, self.pg0,
epg_220, None,
"10.0.0.127", "11.0.0.127",
"2001:10::1", "3001::1")
ep.add_vpp_config()
#
# learn some remote IPv4 EPs
#
for ii, l in enumerate(learnt):
# a packet with an sclass from a known EPG
# arriving on an unknown TEP
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=101, gpid=441, flags=0x88) /
Ether(src=l['mac'], dst="00:00:00:11:11:11") /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, [p], self.pg0)
# the new TEP
tep1_sw_if_index = find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
vx_tun_l3.vni)
self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
# endpoint learnt via the parent GBP-vxlan interface
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip=l['ip']))
#
# Static IPv4 EP replies to learnt
#
for l in learnt:
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(dst=l['ip'], src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg2)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 441)
self.assertEqual(rx[VXLAN].vni, 101)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, routed_dst_mac)
self.assertEqual(inner[IP].src, ep.ip4)
self.assertEqual(inner[IP].dst, l['ip'])
for l in learnt:
self.assertFalse(find_gbp_endpoint(self,
tep1_sw_if_index,
ip=l['ip']))
#
# learn some remote IPv6 EPs
#
for ii, l in enumerate(learnt):
# a packet with an sclass from a known EPG
# arriving on an unknown TEP
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=101, gpid=441, flags=0x88) /
Ether(src=l['mac'], dst="00:00:00:11:11:11") /
IPv6(src=l['ip6'], dst=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, [p], self.pg0)
# the new TEP
tep1_sw_if_index = find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
vx_tun_l3.vni)
self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
self.logger.info(self.vapi.cli("show gbp bridge"))
self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
self.logger.info(self.vapi.cli("show gbp vxlan"))
self.logger.info(self.vapi.cli("show int addr"))
# endpoint learnt via the TEP
self.assertTrue(find_gbp_endpoint(self, ip=l['ip6']))
self.logger.info(self.vapi.cli("show gbp endpoint"))
self.logger.info(self.vapi.cli("show ip fib index 1 %s" % l['ip']))
#
# Static EP replies to learnt
#
for l in learnt:
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IPv6(dst=l['ip6'], src=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 441)
self.assertEqual(rx[VXLAN].vni, 101)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, routed_dst_mac)
self.assertEqual(inner[IPv6].src, ep.ip6)
self.assertEqual(inner[IPv6].dst, l['ip6'])
self.logger.info(self.vapi.cli("sh gbp endpoint"))
for l in learnt:
self.wait_for_ep_timeout(ip=l['ip'])
#
# Static sends to unknown EP with no route
#
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(dst="10.0.0.99", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_assert_no_replies(self.pg0, [p])
#
# Add a route to static EP's v4 and v6 subnet
#
se_10_24 = VppGbpSubnet(
self, rd1, "10.0.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
se_10_24.add_vpp_config()
#
# static pings router
#
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(dst=epg_220.bvi_ip4, src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IPv6(dst=epg_220.bvi_ip6, src=ep.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
#
# packets to address in the subnet are sent on the uu-fwd
#
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(dst="10.0.0.99", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, [p], self.pg4)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg4.local_ip4)
self.assertEqual(rx[IP].dst, self.pg4.remote_ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 441)
self.assertEqual(rx[VXLAN].vni, 114)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# policy is not applied to packets sent to the uu-fwd interfaces
self.assertFalse(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
#
# learn some remote IPv4 EPs
#
for ii, l in enumerate(learnt):
# a packet with an sclass from a known EPG
# arriving on an unknown TEP
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[2].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=101, gpid=441, flags=0x88) /
Ether(src=l['mac'], dst="00:00:00:11:11:11") /
IP(src=l['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, [p], self.pg0)
# the new TEP
tep1_sw_if_index = find_vxlan_gbp_tunnel(
self,
self.pg2.local_ip4,
self.pg2.remote_hosts[2].ip4,
vx_tun_l3.vni)
self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
# endpoint learnt via the parent GBP-vxlan interface
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip=l['ip']))
#
# Add a remote endpoint from the API
#
rep_88 = VppGbpEndpoint(self, vx_tun_l3,
epg_220, None,
"10.0.0.88", "11.0.0.88",
"2001:10::88", "3001::88",
ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
self.pg2.local_ip4,
self.pg2.remote_hosts[2].ip4,
mac=None)
rep_88.add_vpp_config()
#
# Add a remote endpoint from the API that matches an existing one
# this is a lower priority, hence the packet is sent to the DP leanrt
# TEP
#
rep_2 = VppGbpEndpoint(self, vx_tun_l3,
epg_220, None,
learnt[0]['ip'], "11.0.0.101",
learnt[0]['ip6'], "3001::101",
ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
mac=None)
rep_2.add_vpp_config()
#
# Add a route to the learned EP's v4 subnet
# packets should be send on the v4/v6 uu=fwd interface resp.
#
se_10_1_24 = VppGbpSubnet(
self, rd1, "10.0.1.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
se_10_1_24.add_vpp_config()
self.logger.info(self.vapi.cli("show gbp endpoint"))
ips = ["10.0.0.88", learnt[0]['ip']]
for ip in ips:
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(dst=ip, src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
self.assertEqual(rx[UDP].dport, 48879)
# the UDP source port is a random value for hashing
self.assertEqual(rx[VXLAN].gpid, 441)
self.assertEqual(rx[VXLAN].vni, 101)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, routed_dst_mac)
self.assertEqual(inner[IP].src, ep.ip4)
self.assertEqual(inner[IP].dst, ip)
#
# remove the API remote EPs, only API sourced is gone, the DP
# learnt one remains
#
rep_88.remove_vpp_config()
rep_2.remove_vpp_config()
self.assertTrue(find_gbp_endpoint(self, ip=rep_2.ip4))
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(src=ep.ip4, dst=rep_2.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, [p], self.pg2)
self.assertFalse(find_gbp_endpoint(self, ip=rep_88.ip4))
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(src=ep.ip4, dst=rep_88.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, [p], self.pg4)
#
# to appease the testcase we cannot have the registered EP still
# present (because it's DP learnt) when the TC ends so wait until
# it is removed
#
self.wait_for_ep_timeout(ip=rep_88.ip4)
self.wait_for_ep_timeout(ip=rep_2.ip4)
#
# Same as above, learn a remote EP via CP and DP
# this time remove the DP one first. expect the CP data to remain
#
rep_3 = VppGbpEndpoint(self, vx_tun_l3,
epg_220, None,
"10.0.1.4", "11.0.0.103",
"2001::10:3", "3001::103",
ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
self.pg2.local_ip4,
self.pg2.remote_hosts[1].ip4,
mac=None)
rep_3.add_vpp_config()
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[2].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=101, gpid=441, flags=0x88) /
Ether(src=l['mac'], dst="00:00:00:11:11:11") /
IP(src="10.0.1.4", dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip=rep_3.ip4,
tep=[self.pg2.local_ip4,
self.pg2.remote_hosts[2].ip4]))
p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
IP(dst="10.0.1.4", src=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
# host 2 is the DP learned TEP
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
self.wait_for_ep_timeout(ip=rep_3.ip4,
tep=[self.pg2.local_ip4,
self.pg2.remote_hosts[2].ip4])
rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
# host 1 is the CP learned TEP
for rx in rxs:
self.assertEqual(rx[IP].src, self.pg2.local_ip4)
self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
#
# shutdown with learnt endpoint present
#
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_hosts[1].ip4,
dst=self.pg2.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=101, gpid=441, flags=0x88) /
Ether(src=l['mac'], dst="00:00:00:11:11:11") /
IP(src=learnt[1]['ip'], dst=ep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg2, [p], self.pg0)
# endpoint learnt via the parent GBP-vxlan interface
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip=l['ip']))
#
# TODO
# remote endpoint becomes local
#
self.pg2.unconfig_ip4()
self.pg3.unconfig_ip4()
self.pg4.unconfig_ip4()
def test_gbp_redirect(self):
""" GBP Endpoint Redirect """
self.vapi.cli("set logging class gbp level debug")
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
routed_dst_mac = "00:0c:0c:0c:0c:0c"
routed_src_mac = "00:22:bd:f8:19:ff"
learnt = [{'mac': '00:00:11:11:11:02',
'ip': '10.0.1.2',
'ip6': '2001:10::2'},
{'mac': '00:00:11:11:11:03',
'ip': '10.0.1.3',
'ip6': '2001:10::3'}]
#
# IP tables
#
t4 = VppIpTable(self, 1)
t4.add_vpp_config()
t6 = VppIpTable(self, 1, True)
t6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 2, 402, t4, t6)
rd1.add_vpp_config()
self.loop0.set_mac(self.router_mac)
#
# Bind the BVI to the RD
#
b_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
b_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
#
# Pg7 hosts a BD's UU-fwd
#
self.pg7.config_ip4()
self.pg7.resolve_arp()
#
# a GBP bridge domains for the EPs
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0)
gbd1.add_vpp_config()
bd2 = VppBridgeDomain(self, 2)
bd2.add_vpp_config()
gbd2 = VppGbpBridgeDomain(self, bd2, rd1, self.loop1)
gbd2.add_vpp_config()
# ... and has a /32 and /128 applied
ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 32,
bind=b_ip4).add_vpp_config()
ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"2001:10::128", 128,
bind=b_ip6).add_vpp_config()
ip4_addr = VppIpInterfaceAddress(self, gbd2.bvi,
"10.0.1.128", 32).add_vpp_config()
ip6_addr = VppIpInterfaceAddress(self, gbd2.bvi,
"2001:11::128", 128).add_vpp_config()
#
# The Endpoint-groups in which we are learning endpoints
#
epg_220 = VppGbpEndpointGroup(self, 220, 440, rd1, gbd1,
None, gbd1.bvi,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(60))
epg_220.add_vpp_config()
epg_221 = VppGbpEndpointGroup(self, 221, 441, rd1, gbd2,
None, gbd2.bvi,
"10.0.1.128",
"2001:11::128",
VppGbpEndpointRetention(60))
epg_221.add_vpp_config()
epg_222 = VppGbpEndpointGroup(self, 222, 442, rd1, gbd1,
None, gbd1.bvi,
"10.0.2.128",
"2001:12::128",
VppGbpEndpointRetention(60))
epg_222.add_vpp_config()
#
# a GBP bridge domains for the SEPs
#
bd_uu1 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
self.pg7.remote_ip4, 116)
bd_uu1.add_vpp_config()
bd_uu2 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
self.pg7.remote_ip4, 117)
bd_uu2.add_vpp_config()
bd3 = VppBridgeDomain(self, 3)
bd3.add_vpp_config()
gbd3 = VppGbpBridgeDomain(self, bd3, rd1, self.loop2,
bd_uu1, learn=False)
gbd3.add_vpp_config()
bd4 = VppBridgeDomain(self, 4)
bd4.add_vpp_config()
gbd4 = VppGbpBridgeDomain(self, bd4, rd1, self.loop3,
bd_uu2, learn=False)
gbd4.add_vpp_config()
#
# EPGs in which the service endpoints exist
#
epg_320 = VppGbpEndpointGroup(self, 320, 550, rd1, gbd3,
None, gbd1.bvi,
"12.0.0.128",
"4001:10::128",
VppGbpEndpointRetention(60))
epg_320.add_vpp_config()
epg_321 = VppGbpEndpointGroup(self, 321, 551, rd1, gbd4,
None, gbd2.bvi,
"12.0.1.128",
"4001:11::128",
VppGbpEndpointRetention(60))
epg_321.add_vpp_config()
#
# three local endpoints
#
ep1 = VppGbpEndpoint(self, self.pg0,
epg_220, None,
"10.0.0.1", "11.0.0.1",
"2001:10::1", "3001:10::1")
ep1.add_vpp_config()
ep2 = VppGbpEndpoint(self, self.pg1,
epg_221, None,
"10.0.1.1", "11.0.1.1",
"2001:11::1", "3001:11::1")
ep2.add_vpp_config()
ep3 = VppGbpEndpoint(self, self.pg2,
epg_222, None,
"10.0.2.2", "11.0.2.2",
"2001:12::1", "3001:12::1")
ep3.add_vpp_config()
#
# service endpoints
#
sep1 = VppGbpEndpoint(self, self.pg3,
epg_320, None,
"12.0.0.1", "13.0.0.1",
"4001:10::1", "5001:10::1")
sep1.add_vpp_config()
sep2 = VppGbpEndpoint(self, self.pg4,
epg_320, None,
"12.0.0.2", "13.0.0.2",
"4001:10::2", "5001:10::2")
sep2.add_vpp_config()
sep3 = VppGbpEndpoint(self, self.pg5,
epg_321, None,
"12.0.1.1", "13.0.1.1",
"4001:11::1", "5001:11::1")
sep3.add_vpp_config()
# this EP is not installed immediately
sep4 = VppGbpEndpoint(self, self.pg6,
epg_321, None,
"12.0.1.2", "13.0.1.2",
"4001:11::2", "5001:11::2")
#
# an L2 switch packet between local EPs in different EPGs
# different dest ports on each so the are LB hashed differently
#
p4 = [(Ether(src=ep1.mac, dst=ep3.mac) /
IP(src=ep1.ip4, dst=ep3.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=ep3.mac, dst=ep1.mac) /
IP(src=ep3.ip4, dst=ep1.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
p6 = [(Ether(src=ep1.mac, dst=ep3.mac) /
IPv6(src=ep1.ip6, dst=ep3.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=ep3.mac, dst=ep1.mac) /
IPv6(src=ep3.ip6, dst=ep1.ip6) /
UDP(sport=1234, dport=1230) /
Raw(b'\xa5' * 100))]
# should be dropped since no contract yet
self.send_and_assert_no_replies(self.pg0, [p4[0]])
self.send_and_assert_no_replies(self.pg0, [p6[0]])
#
# Add a contract with a rule to load-balance redirect via SEP1 and SEP2
# one of the next-hops is via an EP that is not known
#
rule4 = AclRule(is_permit=1, proto=17)
rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule4, rule6])
acl.add_vpp_config()
#
# test the src-ip hash mode
#
c1 = VppGbpContract(
self, 402, epg_220.sclass, epg_222.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
c2 = VppGbpContract(
self, 402, epg_222.sclass, epg_220.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
#
# send again with the contract preset, now packets arrive
# at SEP1 or SEP2 depending on the hashing
#
rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, ep3.ip4)
rxs = self.send_and_expect(self.pg2, p4[1] * 17, sep2.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep2.mac)
self.assertEqual(rx[IP].src, ep3.ip4)
self.assertEqual(rx[IP].dst, ep1.ip4)
rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 117)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, sep4.mac)
self.assertEqual(inner[IPv6].src, ep1.ip6)
self.assertEqual(inner[IPv6].dst, ep3.ip6)
rxs = self.send_and_expect(self.pg2, p6[1] * 17, sep3.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep3.mac)
self.assertEqual(rx[IPv6].src, ep3.ip6)
self.assertEqual(rx[IPv6].dst, ep1.ip6)
#
# programme the unknown EP
#
sep4.add_vpp_config()
rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep4.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep4.mac)
self.assertEqual(rx[IPv6].src, ep1.ip6)
self.assertEqual(rx[IPv6].dst, ep3.ip6)
#
# and revert back to unprogrammed
#
sep4.remove_vpp_config()
rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 117)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, sep4.mac)
self.assertEqual(inner[IPv6].src, ep1.ip6)
self.assertEqual(inner[IPv6].dst, ep3.ip6)
c1.remove_vpp_config()
c2.remove_vpp_config()
#
# test the symmetric hash mode
#
c1 = VppGbpContract(
self, 402, epg_220.sclass, epg_222.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
c2 = VppGbpContract(
self, 402, epg_222.sclass, epg_220.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
#
# send again with the contract preset, now packets arrive
# at SEP1 for both directions
#
rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, ep3.ip4)
rxs = self.send_and_expect(self.pg2, p4[1] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep3.ip4)
self.assertEqual(rx[IP].dst, ep1.ip4)
#
# programme the unknown EP for the L3 tests
#
sep4.add_vpp_config()
#
# an L3 switch packet between local EPs in different EPGs
# different dest ports on each so the are LB hashed differently
#
p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
IP(src=ep1.ip4, dst=ep2.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=ep2.mac, dst=str(self.router_mac)) /
IP(src=ep2.ip4, dst=ep1.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
IPv6(src=ep1.ip6, dst=ep2.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=ep2.mac, dst=str(self.router_mac)) /
IPv6(src=ep2.ip6, dst=ep1.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
c3 = VppGbpContract(
self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c3.add_vpp_config()
rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, ep2.ip4)
#
# learn a remote EP in EPG 221
# packets coming from unknown remote EPs will be leant & redirected
#
vx_tun_l3 = VppGbpVxlanTunnel(
self, 444, rd1.rd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
self.pg2.local_ip4)
vx_tun_l3.add_vpp_config()
c4 = VppGbpContract(
self, 402, epg_221.sclass, epg_220.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c4.add_vpp_config()
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=444, gpid=441, flags=0x88) /
Ether(src="00:22:22:22:22:33", dst=str(self.router_mac)) /
IP(src="10.0.0.88", dst=ep1.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
# unknown remote EP to local EP redirected
rxs = self.send_and_expect(self.pg7, [p], sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, "10.0.0.88")
self.assertEqual(rx[IP].dst, ep1.ip4)
# endpoint learnt via the parent GBP-vxlan interface
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip="10.0.0.88"))
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=444, gpid=441, flags=0x88) /
Ether(src="00:22:22:22:22:33", dst=str(self.router_mac)) /
IPv6(src="2001:10::88", dst=ep1.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
# unknown remote EP to local EP redirected (ipv6)
rxs = self.send_and_expect(self.pg7, [p], sep3.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep3.mac)
self.assertEqual(rx[IPv6].src, "2001:10::88")
self.assertEqual(rx[IPv6].dst, ep1.ip6)
# endpoint learnt via the parent GBP-vxlan interface
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip="2001:10::88"))
#
# L3 switch from local to remote EP
#
p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
IP(src=ep1.ip4, dst="10.0.0.88") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
IPv6(src=ep1.ip6, dst="2001:10::88") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, "10.0.0.88")
rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep4.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep4.mac)
self.assertEqual(rx[IPv6].src, ep1.ip6)
self.assertEqual(rx[IPv6].dst, "2001:10::88")
#
# test the dst-ip hash mode
#
c5 = VppGbpContract(
self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd),
VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
sep2.ip4, sep2.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
sep3.ip6, sep3.epg.rd),
VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
sep4.ip6, sep4.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c5.add_vpp_config()
rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, "10.0.0.88")
rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep3.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep3.mac)
self.assertEqual(rx[IPv6].src, ep1.ip6)
self.assertEqual(rx[IPv6].dst, "2001:10::88")
#
# a programmed remote SEP in EPG 320
#
# gbp vxlan tunnel for the remote SEP
vx_tun_l3_sep = VppGbpVxlanTunnel(
self, 555, rd1.rd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
self.pg2.local_ip4)
vx_tun_l3_sep.add_vpp_config()
# remote SEP
sep5 = VppGbpEndpoint(self, vx_tun_l3_sep,
epg_320, None,
"12.0.0.10", "13.0.0.10",
"4001:10::10", "5001:10::10",
ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
self.pg7.local_ip4,
self.pg7.remote_ip4,
mac=None)
sep5.add_vpp_config()
#
# local l3out redirect tests
#
# add local l3out
# the external bd
self.loop4.set_mac(self.router_mac)
b_lo4_ip4 = VppIpInterfaceBind(self, self.loop4, t4).add_vpp_config()
b_lo4_ip6 = VppIpInterfaceBind(self, self.loop4, t6).add_vpp_config()
ebd = VppBridgeDomain(self, 100)
ebd.add_vpp_config()
gebd = VppGbpBridgeDomain(self, ebd, rd1, self.loop4, None, None)
gebd.add_vpp_config()
# the external epg
eepg = VppGbpEndpointGroup(self, 888, 765, rd1, gebd,
None, gebd.bvi,
"10.1.0.128",
"2001:10:1::128",
VppGbpEndpointRetention(60))
eepg.add_vpp_config()
# add subnets to BVI
VppIpInterfaceAddress(
self,
gebd.bvi,
"10.1.0.128",
24, bind=b_lo4_ip4).add_vpp_config()
VppIpInterfaceAddress(
self,
gebd.bvi,
"2001:10:1::128",
64, bind=b_lo4_ip6).add_vpp_config()
# ... which are L3-out subnets
VppGbpSubnet(self, rd1, "10.1.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=765).add_vpp_config()
VppGbpSubnet(self, rd1, "2001:10:1::128", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=765).add_vpp_config()
# external endpoints
VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
eep1 = VppGbpEndpoint(self, self.vlan_100, eepg, None, "10.1.0.1",
"11.1.0.1", "2001:10:1::1", "3001:10:1::1",
ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
eep1.add_vpp_config()
VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
eep2 = VppGbpEndpoint(self, self.vlan_101, eepg, None, "10.1.0.2",
"11.1.0.2", "2001:10:1::2", "3001:10:1::2",
ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
eep2.add_vpp_config()
# external subnets reachable though eep1 and eep2 respectively
VppIpRoute(self, "10.220.0.0", 24,
[VppRoutePath(eep1.ip4, eep1.epg.bvi.sw_if_index)],
table_id=t4.table_id).add_vpp_config()
VppGbpSubnet(self, rd1, "10.220.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220).add_vpp_config()
VppIpRoute(self, "10:220::", 64,
[VppRoutePath(eep1.ip6, eep1.epg.bvi.sw_if_index)],
table_id=t6.table_id).add_vpp_config()
VppGbpSubnet(self, rd1, "10:220::", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220).add_vpp_config()
VppIpRoute(self, "10.221.0.0", 24,
[VppRoutePath(eep2.ip4, eep2.epg.bvi.sw_if_index)],
table_id=t4.table_id).add_vpp_config()
VppGbpSubnet(self, rd1, "10.221.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4221).add_vpp_config()
VppIpRoute(self, "10:221::", 64,
[VppRoutePath(eep2.ip6, eep2.epg.bvi.sw_if_index)],
table_id=t6.table_id).add_vpp_config()
VppGbpSubnet(self, rd1, "10:221::", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4221).add_vpp_config()
#
# l3out redirect to remote (known, then unknown) SEP
#
# packets from 1 external subnet to the other
p = [(Ether(src=eep1.mac, dst=self.router_mac) /
Dot1Q(vlan=100) /
IP(src="10.220.0.17", dst="10.221.0.65") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=eep1.mac, dst=self.router_mac) /
Dot1Q(vlan=100) /
IPv6(src="10:220::17", dst="10:221::65") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
# packets should be dropped in absence of contract
self.send_and_assert_no_replies(self.pg0, p)
# contract redirecting to sep5
VppGbpContract(
self, 402, 4220, 4221, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
sep5.ip4, sep5.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
sep5.ip6, sep5.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
rxs = self.send_and_expect(self.pg0, p, self.pg7)
for rx, tx in zip(rxs, p):
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
# this should use the programmed remote leaf TEP
self.assertEqual(rx[VXLAN].vni, 555)
self.assertEqual(rx[VXLAN].gpid, 4220)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertTrue(rx[VXLAN].gpflags.D)
rxip = rx[VXLAN][Ether].payload
txip = tx[Dot1Q].payload
self.assertEqual(rxip.src, txip.src)
self.assertEqual(rxip.dst, txip.dst)
# remote SEP: it is now an unknown remote SEP and should go
# to spine proxy
sep5.remove_vpp_config()
rxs = self.send_and_expect(self.pg0, p, self.pg7)
for rx, tx in zip(rxs, p):
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
# this should use the spine proxy TEP
self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
self.assertEqual(rx[VXLAN].gpid, 4220)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertTrue(rx[VXLAN].gpflags.D)
rxip = rx[VXLAN][Ether].payload
txip = tx[Dot1Q].payload
self.assertEqual(rxip.src, txip.src)
self.assertEqual(rxip.dst, txip.dst)
#
# l3out redirect to local SEP
#
# change the contract between l3out to redirect to local SEPs
# instead of remote SEP
VppGbpContract(
self, 402, 4220, 4221, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip6, sep1.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
rxs = self.send_and_expect(self.pg0, p, sep1.itf)
for rx, tx in zip(rxs, p):
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
rxip = rx[Ether].payload
txip = tx[Ether].payload
self.assertEqual(rxip.src, txip.src)
self.assertEqual(rxip.dst, txip.dst)
#
# redirect remote EP to remote (known then unknown) SEP
#
# remote SEP known again
sep5.add_vpp_config()
# contract to redirect to learnt SEP
VppGbpContract(
self, 402, epg_221.sclass, epg_222.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
sep5.ip4, sep5.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
[VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
sep5.ip6, sep5.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
# packets from unknown EP 221 to known EP in EPG 222
# should be redirected to known remote SEP
base = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=444, gpid=441, flags=0x88) /
Ether(src="00:22:22:22:22:44", dst=str(self.router_mac)))
p = [(base /
IP(src="10.0.1.100", dst=ep3.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(base /
IPv6(src="2001:10::100", dst=ep3.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
# unknown remote EP to local EP redirected to known remote SEP
rxs = self.send_and_expect(self.pg7, p, self.pg7)
for rx, tx in zip(rxs, p):
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
# this should use the programmed remote leaf TEP
self.assertEqual(rx[VXLAN].vni, 555)
self.assertEqual(rx[VXLAN].gpid, epg_221.sclass)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
rxip = rx[VXLAN][Ether].payload
txip = tx[VXLAN][Ether].payload
self.assertEqual(rxip.src, txip.src)
self.assertEqual(rxip.dst, txip.dst)
# endpoint learnt via the parent GBP-vxlan interface
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip="10.0.1.100"))
self.assertTrue(find_gbp_endpoint(self,
vx_tun_l3._sw_if_index,
ip="2001:10::100"))
# remote SEP: it is now an unknown remote SEP and should go
# to spine proxy
sep5.remove_vpp_config()
# remote EP (coming from spine proxy) to local EP redirected to
# known remote SEP
rxs = self.send_and_expect(self.pg7, p, self.pg7)
for rx, tx in zip(rxs, p):
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
# this should use the spine proxy TEP
self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
self.assertEqual(rx[VXLAN].gpid, epg_221.sclass)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
rxip = rx[VXLAN][Ether].payload
txip = tx[VXLAN][Ether].payload
self.assertEqual(rxip.src, txip.src)
self.assertEqual(rxip.dst, txip.dst)
#
# cleanup
#
self.pg7.unconfig_ip4()
def test_gbp_redirect_extended(self):
""" GBP Endpoint Redirect Extended """
self.vapi.cli("set logging class gbp level debug")
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
routed_dst_mac = "00:0c:0c:0c:0c:0c"
routed_src_mac = "00:22:bd:f8:19:ff"
learnt = [{'mac': '00:00:11:11:11:02',
'ip': '10.0.1.2',
'ip6': '2001:10::2'},
{'mac': '00:00:11:11:11:03',
'ip': '10.0.1.3',
'ip6': '2001:10::3'}]
#
# IP tables
#
t4 = VppIpTable(self, 1)
t4.add_vpp_config()
t6 = VppIpTable(self, 1, True)
t6.add_vpp_config()
# create IPv4 and IPv6 RD UU VxLAN-GBP TEP and bind them to the right
# VRF
rd_uu4 = VppVxlanGbpTunnel(
self,
self.pg7.local_ip4,
self.pg7.remote_ip4,
114,
mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
VXLAN_GBP_API_TUNNEL_MODE_L3))
rd_uu4.add_vpp_config()
VppIpInterfaceBind(self, rd_uu4, t4).add_vpp_config()
rd_uu6 = VppVxlanGbpTunnel(
self,
self.pg7.local_ip4,
self.pg7.remote_ip4,
115,
mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
VXLAN_GBP_API_TUNNEL_MODE_L3))
rd_uu6.add_vpp_config()
VppIpInterfaceBind(self, rd_uu6, t4).add_vpp_config()
rd1 = VppGbpRouteDomain(self, 2, 402, t4, t6, rd_uu4, rd_uu6)
rd1.add_vpp_config()
self.loop0.set_mac(self.router_mac)
self.loop1.set_mac(self.router_mac)
self.loop2.set_mac(self.router_mac)
#
# Bind the BVI to the RD
#
b_lo0_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
b_lo0_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
b_lo1_ip4 = VppIpInterfaceBind(self, self.loop1, t4).add_vpp_config()
b_lo1_ip6 = VppIpInterfaceBind(self, self.loop1, t6).add_vpp_config()
b_lo2_ip4 = VppIpInterfaceBind(self, self.loop2, t4).add_vpp_config()
b_lo2_ip6 = VppIpInterfaceBind(self, self.loop2, t6).add_vpp_config()
#
# Pg7 hosts a BD's UU-fwd
#
self.pg7.config_ip4()
self.pg7.resolve_arp()
#
# a GBP bridge domains for the EPs
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0)
gbd1.add_vpp_config()
bd2 = VppBridgeDomain(self, 2)
bd2.add_vpp_config()
gbd2 = VppGbpBridgeDomain(self, bd2, rd1, self.loop1)
gbd2.add_vpp_config()
# ... and has a /32 and /128 applied
ip4_addr1 = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 32,
bind=b_lo0_ip4).add_vpp_config()
ip6_addr1 = VppIpInterfaceAddress(self, gbd1.bvi,
"2001:10::128", 128,
bind=b_lo0_ip6).add_vpp_config()
ip4_addr2 = VppIpInterfaceAddress(self, gbd2.bvi,
"10.0.1.128", 32,
bind=b_lo1_ip4).add_vpp_config()
ip6_addr2 = VppIpInterfaceAddress(self, gbd2.bvi,
"2001:11::128", 128,
bind=b_lo1_ip6).add_vpp_config()
#
# The Endpoint-groups
#
epg_220 = VppGbpEndpointGroup(self, 220, 440, rd1, gbd1,
None, gbd1.bvi,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(60))
epg_220.add_vpp_config()
epg_221 = VppGbpEndpointGroup(self, 221, 441, rd1, gbd2,
None, gbd2.bvi,
"10.0.1.128",
"2001:11::128",
VppGbpEndpointRetention(60))
epg_221.add_vpp_config()
#
# a GBP bridge domains for the SEPs
#
bd_uu3 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
self.pg7.remote_ip4, 116)
bd_uu3.add_vpp_config()
bd3 = VppBridgeDomain(self, 3)
bd3.add_vpp_config()
gbd3 = VppGbpBridgeDomain(self, bd3, rd1, self.loop2,
bd_uu3, learn=False)
gbd3.add_vpp_config()
ip4_addr3 = VppIpInterfaceAddress(self, gbd3.bvi,
"12.0.0.128", 32,
bind=b_lo2_ip4).add_vpp_config()
ip6_addr3 = VppIpInterfaceAddress(self, gbd3.bvi,
"4001:10::128", 128,
bind=b_lo2_ip6).add_vpp_config()
#
# self.logger.info(self.vapi.cli("show gbp bridge"))
# self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
# self.logger.info(self.vapi.cli("show gbp vxlan"))
# self.logger.info(self.vapi.cli("show int addr"))
#
#
# EPGs in which the service endpoints exist
#
epg_320 = VppGbpEndpointGroup(self, 320, 550, rd1, gbd3,
None, gbd3.bvi,
"12.0.0.128",
"4001:10::128",
VppGbpEndpointRetention(60))
epg_320.add_vpp_config()
#
# endpoints
#
ep1 = VppGbpEndpoint(self, self.pg0,
epg_220, None,
"10.0.0.1", "11.0.0.1",
"2001:10::1", "3001:10::1")
ep1.add_vpp_config()
ep2 = VppGbpEndpoint(self, self.pg1,
epg_221, None,
"10.0.1.1", "11.0.1.1",
"2001:11::1", "3001:11::1")
ep2.add_vpp_config()
#
# service endpoints
#
sep1 = VppGbpEndpoint(self, self.pg3,
epg_320, None,
"12.0.0.1", "13.0.0.1",
"4001:10::1", "5001:10::1")
sep2 = VppGbpEndpoint(self, self.pg4,
epg_320, None,
"12.0.0.2", "13.0.0.2",
"4001:10::2", "5001:10::2")
# sep1 and sep2 are not added to config yet
# they are unknown for now
#
# add routes to EPG subnets
#
VppGbpSubnet(self, rd1, "10.0.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT
).add_vpp_config()
VppGbpSubnet(self, rd1, "10.0.1.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT
).add_vpp_config()
#
# Local host to known local host in different BD
# with SFC contract (source and destination are in
# one node and service endpoint in another node)
#
p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
IP(src=ep1.ip4, dst=ep2.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=ep2.mac, dst=str(self.router_mac)) /
IP(src=ep2.ip4, dst=ep1.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))]
p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
IPv6(src=ep1.ip6, dst=ep2.ip6) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=ep2.mac, dst=str(self.router_mac)) /
IPv6(src=ep2.ip6, dst=ep1.ip6) /
UDP(sport=1234, dport=1230) /
Raw(b'\xa5' * 100))]
# should be dropped since no contract yet
self.send_and_assert_no_replies(self.pg0, [p4[0]])
self.send_and_assert_no_replies(self.pg0, [p6[0]])
#
# Add a contract with a rule to load-balance redirect via SEP1 and SEP2
# one of the next-hops is via an EP that is not known
#
rule4 = AclRule(is_permit=1, proto=17)
rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule4, rule6])
acl.add_vpp_config()
#
# test the src-ip hash mode
#
c1 = VppGbpContract(
self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip6, sep1.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
c2 = VppGbpContract(
self, 402, epg_221.sclass, epg_220.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip4, sep1.epg.rd)]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
[VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
sep1.ip6, sep1.epg.rd)])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
# ep1 <--> ep2 redirected through sep1
# sep1 is unknown
# packet is redirected to sep bd and then go through sep bd UU
rxs = self.send_and_expect(self.pg0, p4[0] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 116)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, sep1.mac)
self.assertEqual(inner[IP].src, ep1.ip4)
self.assertEqual(inner[IP].dst, ep2.ip4)
rxs = self.send_and_expect(self.pg1, p4[1] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 116)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, sep1.mac)
self.assertEqual(inner[IP].src, ep2.ip4)
self.assertEqual(inner[IP].dst, ep1.ip4)
rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 116)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, sep1.mac)
self.assertEqual(inner[IPv6].src, ep1.ip6)
self.assertEqual(inner[IPv6].dst, ep2.ip6)
rxs = self.send_and_expect(self.pg1, p6[1] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 116)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, sep1.mac)
self.assertEqual(inner[IPv6].src, ep2.ip6)
self.assertEqual(inner[IPv6].dst, ep1.ip6)
# configure sep1: it is now local
# packets between ep1 and ep2 are redirected locally
sep1.add_vpp_config()
rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, ep2.ip4)
rxs = self.send_and_expect(self.pg1, p6[1] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, routed_src_mac)
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IPv6].src, ep2.ip6)
self.assertEqual(rx[IPv6].dst, ep1.ip6)
# packet coming from the l2 spine-proxy to sep1
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=116, gpid=440, gpflags=0x08, flags=0x88) /
Ether(src=str(self.router_mac), dst=sep1.mac) /
IP(src=ep1.ip4, dst=ep2.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, [p] * 17, sep1.itf)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, sep1.mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, ep2.ip4)
# contract for SEP to communicate with dst EP
c3 = VppGbpContract(
self, 402, epg_320.sclass, epg_221.sclass, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC)],
[ETH_P_IP, ETH_P_IPV6])
c3.add_vpp_config()
# temporarily remove ep2, so that ep2 is remote & unknown
ep2.remove_vpp_config()
# packet going back from sep1 to its original dest (ep2)
# as ep2 is now unknown (see above), it must go through
# the rd UU (packet is routed)
p1 = (Ether(src=sep1.mac, dst=self.router_mac) /
IP(src=ep1.ip4, dst=ep2.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg3, [p1] * 17, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 114)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# redirect policy has been applied
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].src, routed_src_mac)
self.assertEqual(inner[Ether].dst, routed_dst_mac)
self.assertEqual(inner[IP].src, ep1.ip4)
self.assertEqual(inner[IP].dst, ep2.ip4)
self.logger.info(self.vapi.cli("show bridge 3 detail"))
sep1.remove_vpp_config()
self.logger.info(self.vapi.cli("show bridge 1 detail"))
self.logger.info(self.vapi.cli("show bridge 2 detail"))
# re-add ep2: it is local again :)
ep2.add_vpp_config()
# packet coming back from the remote sep through rd UU
p2 = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=114, gpid=441, gpflags=0x09, flags=0x88) /
Ether(src=str(self.router_mac), dst=self.router_mac) /
IP(src=ep1.ip4, dst=ep2.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, [p2], self.pg1)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
self.assertEqual(rx[IP].src, ep1.ip4)
self.assertEqual(rx[IP].dst, ep2.ip4)
#
# bd_uu2.add_vpp_config()
#
#
# cleanup
#
c1.remove_vpp_config()
c2.remove_vpp_config()
c3.remove_vpp_config()
self.pg7.unconfig_ip4()
def test_gbp_l3_out(self):
""" GBP L3 Out """
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
self.vapi.cli("set logging class gbp level debug")
routed_dst_mac = "00:0c:0c:0c:0c:0c"
routed_src_mac = "00:22:bd:f8:19:ff"
#
# IP tables
#
t4 = VppIpTable(self, 1)
t4.add_vpp_config()
t6 = VppIpTable(self, 1, True)
t6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 2, 55, t4, t6)
rd1.add_vpp_config()
self.loop0.set_mac(self.router_mac)
#
# Bind the BVI to the RD
#
b_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
b_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
#
# Pg7 hosts a BD's BUM
# Pg1 some other l3 interface
#
self.pg7.config_ip4()
self.pg7.resolve_arp()
#
# a multicast vxlan-gbp tunnel for broadcast in the BD
#
tun_bm = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
"239.1.1.1", 88,
mcast_itf=self.pg7)
tun_bm.add_vpp_config()
#
# a GBP external bridge domains for the EPs
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, None, tun_bm)
gbd1.add_vpp_config()
#
# The Endpoint-groups in which the external endpoints exist
#
epg_220 = VppGbpEndpointGroup(self, 220, 113, rd1, gbd1,
None, gbd1.bvi,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(4))
epg_220.add_vpp_config()
# the BVIs have the subnets applied ...
ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128",
24, bind=b_ip4).add_vpp_config()
ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi, "2001:10::128",
64, bind=b_ip6).add_vpp_config()
# ... which are L3-out subnets
l3o_1 = VppGbpSubnet(
self, rd1, "10.0.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=113)
l3o_1.add_vpp_config()
#
# an external interface attached to the outside world and the
# external BD
#
VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
vlan_144 = VppDot1QSubint(self, self.pg0, 144)
vlan_144.admin_up()
# vlan_102 is not poped
#
# an unicast vxlan-gbp for inter-RD traffic
#
vx_tun_l3 = VppGbpVxlanTunnel(
self, 444, rd1.rd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
self.pg2.local_ip4)
vx_tun_l3.add_vpp_config()
#
# External Endpoints
#
eep1 = VppGbpEndpoint(self, self.vlan_100,
epg_220, None,
"10.0.0.1", "11.0.0.1",
"2001:10::1", "3001::1",
ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
eep1.add_vpp_config()
eep2 = VppGbpEndpoint(self, self.vlan_101,
epg_220, None,
"10.0.0.2", "11.0.0.2",
"2001:10::2", "3001::2",
ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
eep2.add_vpp_config()
eep3 = VppGbpEndpoint(self, self.vlan_102,
epg_220, None,
"10.0.0.3", "11.0.0.3",
"2001:10::3", "3001::3",
ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
eep3.add_vpp_config()
#
# A remote external endpoint
#
rep = VppGbpEndpoint(self, vx_tun_l3,
epg_220, None,
"10.0.0.101", "11.0.0.101",
"2001:10::101", "3001::101",
ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
self.pg7.local_ip4,
self.pg7.remote_ip4,
mac=None)
rep.add_vpp_config()
#
# EP1 impersonating EP3 is dropped
#
p = (Ether(src=eep1.mac, dst="ff:ff:ff:ff:ff:ff") /
Dot1Q(vlan=100) /
ARP(op="who-has",
psrc="10.0.0.3", pdst="10.0.0.128",
hwsrc=eep1.mac, hwdst="ff:ff:ff:ff:ff:ff"))
self.send_and_assert_no_replies(self.pg0, p)
#
# ARP packet from External EPs are accepted and replied to
#
p_arp = (Ether(src=eep1.mac, dst="ff:ff:ff:ff:ff:ff") /
Dot1Q(vlan=100) /
ARP(op="who-has",
psrc=eep1.ip4, pdst="10.0.0.128",
hwsrc=eep1.mac, hwdst="ff:ff:ff:ff:ff:ff"))
rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0)
#
# ARP packet from host in remote subnet are accepted and replied to
#
p_arp = (Ether(src=eep3.mac, dst="ff:ff:ff:ff:ff:ff") /
Dot1Q(vlan=102) /
ARP(op="who-has",
psrc=eep3.ip4, pdst="10.0.0.128",
hwsrc=eep3.mac, hwdst="ff:ff:ff:ff:ff:ff"))
rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0)
#
# packets destined to unknown addresses in the BVI's subnet
# are ARP'd for
#
p4 = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.0.0.1", dst="10.0.0.88") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
p6 = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IPv6(src="2001:10::1", dst="2001:10::88") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p4 * 1, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
# self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, "239.1.1.1")
self.assertEqual(rx[VXLAN].vni, 88)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# policy was applied to the original IP packet
self.assertEqual(rx[VXLAN].gpid, 113)
self.assertTrue(rx[VXLAN].gpflags.A)
self.assertFalse(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertTrue(inner.haslayer(ARP))
#
# remote to external
#
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=444, gpid=113, flags=0x88) /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src="10.0.0.101", dst="10.0.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
#
# local EP pings router
#
p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src=eep1.ip4, dst="10.0.0.128") /
ICMP(type='echo-request'))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, eep1.mac)
self.assertEqual(rx[Dot1Q].vlan, 100)
#
# local EP pings other local EP
#
p = (Ether(src=eep1.mac, dst=eep2.mac) /
Dot1Q(vlan=100) /
IP(src=eep1.ip4, dst=eep2.ip4) /
ICMP(type='echo-request'))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, eep1.mac)
self.assertEqual(rx[Ether].dst, eep2.mac)
self.assertEqual(rx[Dot1Q].vlan, 101)
#
# local EP pings router w/o vlan tag poped
#
p = (Ether(src=eep3.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=102) /
IP(src=eep3.ip4, dst="10.0.0.128") /
ICMP(type='echo-request'))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, self.vlan_102.remote_mac)
#
# A ip4 subnet reachable through the external EP1
#
ip_220 = VppIpRoute(self, "10.220.0.0", 24,
[VppRoutePath(eep1.ip4,
eep1.epg.bvi.sw_if_index)],
table_id=t4.table_id)
ip_220.add_vpp_config()
l3o_220 = VppGbpSubnet(
self, rd1, "10.220.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220)
l3o_220.add_vpp_config()
#
# An ip6 subnet reachable through the external EP1
#
ip6_220 = VppIpRoute(self, "10:220::", 64,
[VppRoutePath(eep1.ip6,
eep1.epg.bvi.sw_if_index)],
table_id=t6.table_id)
ip6_220.add_vpp_config()
l3o6_220 = VppGbpSubnet(
self, rd1, "10:220::", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220)
l3o6_220.add_vpp_config()
#
# A subnet reachable through the external EP2
#
ip_221 = VppIpRoute(self, "10.221.0.0", 24,
[VppRoutePath(eep2.ip4,
eep2.epg.bvi.sw_if_index)],
table_id=t4.table_id)
ip_221.add_vpp_config()
l3o_221 = VppGbpSubnet(
self, rd1, "10.221.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4221)
l3o_221.add_vpp_config()
#
# ping between hosts in remote subnets
# dropped without a contract
#
p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.221.0.1") /
ICMP(type='echo-request'))
self.send_and_assert_no_replies(self.pg0, p * 1)
#
# contract for the external nets to communicate
#
rule4 = AclRule(is_permit=1, proto=17)
rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule4, rule6])
acl.add_vpp_config()
#
# A contract with the wrong scope is not matched
#
c_44 = VppGbpContract(
self, 44, 4220, 4221, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c_44.add_vpp_config()
self.send_and_assert_no_replies(self.pg0, p * 1)
c1 = VppGbpContract(
self, 55, 4220, 4221, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
#
# Contracts allowing ext-net 200 to talk with external EPs
#
c2 = VppGbpContract(
self, 55, 4220, 113, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
c3 = VppGbpContract(
self, 55, 113, 4220, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c3.add_vpp_config()
#
# ping between hosts in remote subnets
#
p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.221.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, eep2.mac)
self.assertEqual(rx[Dot1Q].vlan, 101)
# we did not learn these external hosts
self.assertFalse(find_gbp_endpoint(self, ip="10.220.0.1"))
self.assertFalse(find_gbp_endpoint(self, ip="10.221.0.1"))
#
# from remote external EP to local external EP
#
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=444, gpid=113, flags=0x88) /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src="10.0.0.101", dst="10.220.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
#
# ping from an external host to the remote external EP
#
p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst=rep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
# self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 444)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# the sclass of the ext-net the packet came from
self.assertEqual(rx[VXLAN].gpid, 4220)
# policy was applied to the original IP packet
self.assertTrue(rx[VXLAN].gpflags.A)
# since it's an external host the reciever should not learn it
self.assertTrue(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[IP].src, "10.220.0.1")
self.assertEqual(inner[IP].dst, rep.ip4)
#
# An external subnet reachable via the remote external EP
#
#
# first the VXLAN-GBP tunnel over which it is reached
#
vx_tun_r1 = VppVxlanGbpTunnel(
self, self.pg7.local_ip4,
self.pg7.remote_ip4, 445,
mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
VXLAN_GBP_API_TUNNEL_MODE_L3))
vx_tun_r1.add_vpp_config()
VppIpInterfaceBind(self, vx_tun_r1, t4).add_vpp_config()
self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
#
# then the special adj to resolve through on that tunnel
#
n1 = VppNeighbor(self,
vx_tun_r1.sw_if_index,
"00:0c:0c:0c:0c:0c",
self.pg7.remote_ip4)
n1.add_vpp_config()
#
# the route via the adj above
#
ip_222 = VppIpRoute(self, "10.222.0.0", 24,
[VppRoutePath(self.pg7.remote_ip4,
vx_tun_r1.sw_if_index)],
table_id=t4.table_id)
ip_222.add_vpp_config()
l3o_222 = VppGbpSubnet(
self, rd1, "10.222.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4222)
l3o_222.add_vpp_config()
#
# ping between hosts in local and remote external subnets
# dropped without a contract
#
p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.222.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
#
# Add contracts ext-nets for 220 -> 222
#
c4 = VppGbpContract(
self, 55, 4220, 4222, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c4.add_vpp_config()
#
# ping from host in local to remote external subnets
#
p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.222.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 3, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 445)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# the sclass of the ext-net the packet came from
self.assertEqual(rx[VXLAN].gpid, 4220)
# policy was applied to the original IP packet
self.assertTrue(rx[VXLAN].gpflags.A)
# since it's an external host the reciever should not learn it
self.assertTrue(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].dst, "00:0c:0c:0c:0c:0c")
self.assertEqual(inner[IP].src, "10.220.0.1")
self.assertEqual(inner[IP].dst, "10.222.0.1")
#
# make the external subnet ECMP
#
vx_tun_r2 = VppVxlanGbpTunnel(
self, self.pg7.local_ip4,
self.pg7.remote_ip4, 446,
mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
VXLAN_GBP_API_TUNNEL_MODE_L3))
vx_tun_r2.add_vpp_config()
VppIpInterfaceBind(self, vx_tun_r2, t4).add_vpp_config()
self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
n2 = VppNeighbor(self,
vx_tun_r2.sw_if_index,
"00:0c:0c:0c:0c:0c",
self.pg7.remote_ip4)
n2.add_vpp_config()
ip_222.modify([VppRoutePath(self.pg7.remote_ip4,
vx_tun_r1.sw_if_index),
VppRoutePath(self.pg7.remote_ip4,
vx_tun_r2.sw_if_index)])
#
# now expect load-balance
#
p = [(Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.222.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.222.0.1") /
UDP(sport=1222, dport=1235) /
Raw(b'\xa5' * 100))]
rxs = self.send_and_expect(self.pg0, p, self.pg7)
self.assertEqual(rxs[0][VXLAN].vni, 445)
self.assertEqual(rxs[1][VXLAN].vni, 446)
#
# Same LB test for v6
#
n3 = VppNeighbor(self,
vx_tun_r1.sw_if_index,
"00:0c:0c:0c:0c:0c",
self.pg7.remote_ip6)
n3.add_vpp_config()
n4 = VppNeighbor(self,
vx_tun_r2.sw_if_index,
"00:0c:0c:0c:0c:0c",
self.pg7.remote_ip6)
n4.add_vpp_config()
ip_222_6 = VppIpRoute(self, "10:222::", 64,
[VppRoutePath(self.pg7.remote_ip6,
vx_tun_r1.sw_if_index),
VppRoutePath(self.pg7.remote_ip6,
vx_tun_r2.sw_if_index)],
table_id=t6.table_id)
ip_222_6.add_vpp_config()
l3o_222_6 = VppGbpSubnet(
self, rd1, "10:222::", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4222)
l3o_222_6.add_vpp_config()
p = [(Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IPv6(src="10:220::1", dst="10:222::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=eep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IPv6(src="10:220::1", dst="10:222::1") /
UDP(sport=7777, dport=8881) /
Raw(b'\xa5' * 100))]
self.logger.info(self.vapi.cli("sh ip6 fib 10:222::1"))
rxs = self.send_and_expect(self.pg0, p, self.pg7)
self.assertEqual(rxs[0][VXLAN].vni, 445)
self.assertEqual(rxs[1][VXLAN].vni, 446)
#
# ping from host in remote to local external subnets
# there's no contract for this, but the A bit is set.
#
p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src="10.222.0.1", dst="10.220.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, p * 3, self.pg0)
self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1"))
#
# ping from host in remote to remote external subnets
# this is dropped by reflection check.
#
p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src="10.222.0.1", dst="10.222.0.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IPv6(src="10:222::1", dst="10:222::2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
#
# local EP
#
lep1 = VppGbpEndpoint(self, vlan_144,
epg_220, None,
"10.0.0.44", "11.0.0.44",
"2001:10::44", "3001::44")
lep1.add_vpp_config()
#
# local EP to local ip4 external subnet
#
p = (Ether(src=lep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=144) /
IP(src=lep1.ip4, dst="10.220.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, eep1.mac)
self.assertEqual(rx[Dot1Q].vlan, 100)
#
# local EP to local ip6 external subnet
#
p = (Ether(src=lep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=144) /
IPv6(src=lep1.ip6, dst="10:220::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, eep1.mac)
self.assertEqual(rx[Dot1Q].vlan, 100)
#
# ip4 and ip6 subnets that load-balance
#
ip_20 = VppIpRoute(self, "10.20.0.0", 24,
[VppRoutePath(eep1.ip4,
eep1.epg.bvi.sw_if_index),
VppRoutePath(eep2.ip4,
eep2.epg.bvi.sw_if_index)],
table_id=t4.table_id)
ip_20.add_vpp_config()
l3o_20 = VppGbpSubnet(
self, rd1, "10.20.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220)
l3o_20.add_vpp_config()
ip6_20 = VppIpRoute(self, "10:20::", 64,
[VppRoutePath(eep1.ip6,
eep1.epg.bvi.sw_if_index),
VppRoutePath(eep2.ip6,
eep2.epg.bvi.sw_if_index)],
table_id=t6.table_id)
ip6_20.add_vpp_config()
l3o6_20 = VppGbpSubnet(
self, rd1, "10:20::", 64,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220)
l3o6_20.add_vpp_config()
self.logger.info(self.vapi.cli("sh ip fib 10.20.0.1"))
self.logger.info(self.vapi.cli("sh ip6 fib 10:20::1"))
# two ip6 packets whose port are chosen so they load-balance
p = [(Ether(src=lep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=144) /
IPv6(src=lep1.ip6, dst="10:20::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)),
(Ether(src=lep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=144) /
IPv6(src=lep1.ip6, dst="10:20::1") /
UDP(sport=124, dport=1230) /
Raw(b'\xa5' * 100))]
rxs = self.send_and_expect(self.pg0, p, self.pg0, 2)
self.assertEqual(rxs[0][Dot1Q].vlan, 101)
self.assertEqual(rxs[1][Dot1Q].vlan, 100)
# two ip4 packets whose port are chosen so they load-balance
p = [(Ether(src=lep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=144) /
IP(src=lep1.ip4, dst="10.20.0.1") /
UDP(sport=1235, dport=1235) /
Raw(b'\xa5' * 100)),
(Ether(src=lep1.mac, dst=str(self.router_mac)) /
Dot1Q(vlan=144) /
IP(src=lep1.ip4, dst="10.20.0.1") /
UDP(sport=124, dport=1230) /
Raw(b'\xa5' * 100))]
rxs = self.send_and_expect(self.pg0, p, self.pg0, 2)
self.assertEqual(rxs[0][Dot1Q].vlan, 101)
self.assertEqual(rxs[1][Dot1Q].vlan, 100)
#
# cleanup
#
ip_222.remove_vpp_config()
self.pg7.unconfig_ip4()
self.vlan_101.set_vtr(L2_VTR_OP.L2_DISABLED)
self.vlan_100.set_vtr(L2_VTR_OP.L2_DISABLED)
def test_gbp_anon_l3_out(self):
""" GBP Anonymous L3 Out """
ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
self.vapi.cli("set logging class gbp level debug")
routed_dst_mac = "00:0c:0c:0c:0c:0c"
routed_src_mac = "00:22:bd:f8:19:ff"
#
# IP tables
#
t4 = VppIpTable(self, 1)
t4.add_vpp_config()
t6 = VppIpTable(self, 1, True)
t6.add_vpp_config()
rd1 = VppGbpRouteDomain(self, 2, 55, t4, t6)
rd1.add_vpp_config()
self.loop0.set_mac(self.router_mac)
#
# Bind the BVI to the RD
#
bind_l0_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
bind_l0_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
#
# Pg7 hosts a BD's BUM
# Pg1 some other l3 interface
#
self.pg7.config_ip4()
self.pg7.resolve_arp()
#
# a GBP external bridge domains for the EPs
#
bd1 = VppBridgeDomain(self, 1)
bd1.add_vpp_config()
gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, None, None)
gbd1.add_vpp_config()
#
# The Endpoint-groups in which the external endpoints exist
#
epg_220 = VppGbpEndpointGroup(self, 220, 113, rd1, gbd1,
None, gbd1.bvi,
"10.0.0.128",
"2001:10::128",
VppGbpEndpointRetention(4))
epg_220.add_vpp_config()
# the BVIs have the subnet applied ...
ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
"10.0.0.128", 24,
bind=bind_l0_ip4).add_vpp_config()
# ... which is an Anonymous L3-out subnets
l3o_1 = VppGbpSubnet(
self, rd1, "10.0.0.0", 24,
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_ANON_L3_OUT,
sclass=113)
l3o_1.add_vpp_config()
#
# an external interface attached to the outside world and the
# external BD
#
VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
#
# vlan_100 and vlan_101 are anonymous l3-out interfaces
#
ext_itf = VppGbpExtItf(self, self.vlan_100, bd1, rd1, anon=True)
ext_itf.add_vpp_config()
ext_itf = VppGbpExtItf(self, self.vlan_101, bd1, rd1, anon=True)
ext_itf.add_vpp_config()
#
# an unicast vxlan-gbp for inter-RD traffic
#
vx_tun_l3 = VppGbpVxlanTunnel(
self, 444, rd1.rd_id,
VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
self.pg2.local_ip4)
vx_tun_l3.add_vpp_config()
#
# A remote external endpoint
#
rep = VppGbpEndpoint(self, vx_tun_l3,
epg_220, None,
"10.0.0.201", "11.0.0.201",
"2001:10::201", "3001::101",
ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
self.pg7.local_ip4,
self.pg7.remote_ip4,
mac=None)
rep.add_vpp_config()
#
# ARP packet from host in external subnet are accepted, flooded and
# replied to. We expect 2 packets:
# - APR request flooded over the other vlan subif
# - ARP reply from BVI
#
p_arp = (Ether(src=self.vlan_100.remote_mac,
dst="ff:ff:ff:ff:ff:ff") /
Dot1Q(vlan=100) /
ARP(op="who-has",
psrc="10.0.0.100",
pdst="10.0.0.128",
hwsrc=self.vlan_100.remote_mac,
hwdst="ff:ff:ff:ff:ff:ff"))
rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0, n_rx=2)
p_arp = (Ether(src=self.vlan_101.remote_mac,
dst="ff:ff:ff:ff:ff:ff") /
Dot1Q(vlan=101) /
ARP(op="who-has",
psrc='10.0.0.101',
pdst="10.0.0.128",
hwsrc=self.vlan_101.remote_mac,
hwdst="ff:ff:ff:ff:ff:ff"))
rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0, n_rx=2)
#
# remote to external
#
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=vx_tun_l3.vni, gpid=epg_220.sclass, flags=0x88) /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src=str(rep.ip4), dst="10.0.0.100") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
#
# local EP pings router
#
p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.0.0.100", dst="10.0.0.128") /
ICMP(type='echo-request'))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, self.vlan_100.remote_mac)
self.assertEqual(rx[Dot1Q].vlan, 100)
#
# local EP pings other local EP
#
p = (Ether(src=self.vlan_100.remote_mac,
dst=self.vlan_101.remote_mac) /
Dot1Q(vlan=100) /
IP(src="10.0.0.100", dst="10.0.0.101") /
ICMP(type='echo-request'))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.vlan_100.remote_mac)
self.assertEqual(rx[Ether].dst, self.vlan_101.remote_mac)
self.assertEqual(rx[Dot1Q].vlan, 101)
#
# A subnet reachable through an external router on vlan 100
#
ip_220 = VppIpRoute(self, "10.220.0.0", 24,
[VppRoutePath("10.0.0.100",
epg_220.bvi.sw_if_index)],
table_id=t4.table_id)
ip_220.add_vpp_config()
l3o_220 = VppGbpSubnet(
self, rd1, "10.220.0.0", 24,
# note: this a "regular" L3 out subnet (not connected)
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4220)
l3o_220.add_vpp_config()
#
# A subnet reachable through an external router on vlan 101
#
ip_221 = VppIpRoute(self, "10.221.0.0", 24,
[VppRoutePath("10.0.0.101",
epg_220.bvi.sw_if_index)],
table_id=t4.table_id)
ip_221.add_vpp_config()
l3o_221 = VppGbpSubnet(
self, rd1, "10.221.0.0", 24,
# note: this a "regular" L3 out subnet (not connected)
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4221)
l3o_221.add_vpp_config()
#
# ping between hosts in remote subnets
# dropped without a contract
#
p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.221.0.1") /
ICMP(type='echo-request'))
rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
#
# contract for the external nets to communicate
#
rule4 = AclRule(is_permit=1, proto=17)
rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
acl = VppAcl(self, rules=[rule4, rule6])
acl.add_vpp_config()
c1 = VppGbpContract(
self, 55, 4220, 4221, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c1.add_vpp_config()
#
# Contracts allowing ext-net 200 to talk with external EPs
#
c2 = VppGbpContract(
self, 55, 4220, 113, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c2.add_vpp_config()
c3 = VppGbpContract(
self, 55, 113, 4220, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c3.add_vpp_config()
#
# ping between hosts in remote subnets
#
p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.221.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].src, str(self.router_mac))
self.assertEqual(rx[Ether].dst, self.vlan_101.remote_mac)
self.assertEqual(rx[Dot1Q].vlan, 101)
# we did not learn these external hosts
self.assertFalse(find_gbp_endpoint(self, ip="10.220.0.1"))
self.assertFalse(find_gbp_endpoint(self, ip="10.221.0.1"))
#
# from remote external EP to local external EP
#
p = (Ether(src=self.pg7.remote_mac,
dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4,
dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=444, gpid=113, flags=0x88) /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src=rep.ip4, dst="10.220.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
#
# ping from an external host to the remote external EP
#
p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst=rep.ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 1, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
# self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 444)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# the sclass of the ext-net the packet came from
self.assertEqual(rx[VXLAN].gpid, 4220)
# policy was applied to the original IP packet
self.assertTrue(rx[VXLAN].gpflags.A)
# since it's an external host the reciever should not learn it
self.assertTrue(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[IP].src, "10.220.0.1")
self.assertEqual(inner[IP].dst, rep.ip4)
#
# An external subnet reachable via the remote external EP
#
#
# first the VXLAN-GBP tunnel over which it is reached
#
vx_tun_r = VppVxlanGbpTunnel(
self, self.pg7.local_ip4,
self.pg7.remote_ip4, 445,
mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
VXLAN_GBP_API_TUNNEL_MODE_L3))
vx_tun_r.add_vpp_config()
VppIpInterfaceBind(self, vx_tun_r, t4).add_vpp_config()
self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
#
# then the special adj to resolve through on that tunnel
#
n1 = VppNeighbor(self,
vx_tun_r.sw_if_index,
"00:0c:0c:0c:0c:0c",
self.pg7.remote_ip4)
n1.add_vpp_config()
#
# the route via the adj above
#
ip_222 = VppIpRoute(self, "10.222.0.0", 24,
[VppRoutePath(self.pg7.remote_ip4,
vx_tun_r.sw_if_index)],
table_id=t4.table_id)
ip_222.add_vpp_config()
l3o_222 = VppGbpSubnet(
self, rd1, "10.222.0.0", 24,
# note: this a "regular" l3out subnet (not connected)
VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
sclass=4222)
l3o_222.add_vpp_config()
#
# ping between hosts in local and remote external subnets
# dropped without a contract
#
p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.222.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
#
# Add contracts ext-nets for 220 -> 222
#
c4 = VppGbpContract(
self, 55, 4220, 4222, acl.acl_index,
[VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[]),
VppGbpContractRule(
VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
[])],
[ETH_P_IP, ETH_P_IPV6])
c4.add_vpp_config()
#
# ping from host in local to remote external subnets
#
p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
Dot1Q(vlan=100) /
IP(src="10.220.0.1", dst="10.222.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg0, p * 3, self.pg7)
for rx in rxs:
self.assertEqual(rx[Ether].src, self.pg7.local_mac)
self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
self.assertEqual(rx[IP].src, self.pg7.local_ip4)
self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
self.assertEqual(rx[VXLAN].vni, 445)
self.assertTrue(rx[VXLAN].flags.G)
self.assertTrue(rx[VXLAN].flags.Instance)
# the sclass of the ext-net the packet came from
self.assertEqual(rx[VXLAN].gpid, 4220)
# policy was applied to the original IP packet
self.assertTrue(rx[VXLAN].gpflags.A)
# since it's an external host the reciever should not learn it
self.assertTrue(rx[VXLAN].gpflags.D)
inner = rx[VXLAN].payload
self.assertEqual(inner[Ether].dst, "00:0c:0c:0c:0c:0c")
self.assertEqual(inner[IP].src, "10.220.0.1")
self.assertEqual(inner[IP].dst, "10.222.0.1")
#
# ping from host in remote to local external subnets
# there's no contract for this, but the A bit is set.
#
p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src="10.222.0.1", dst="10.220.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_expect(self.pg7, p * 3, self.pg0)
self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1"))
#
# ping from host in remote to remote external subnets
# this is dropped by reflection check.
#
p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
UDP(sport=1234, dport=48879) /
VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
IP(src="10.222.0.1", dst="10.222.0.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
#
# cleanup
#
self.vlan_101.set_vtr(L2_VTR_OP.L2_DISABLED)
self.vlan_100.set_vtr(L2_VTR_OP.L2_DISABLED)
self.pg7.unconfig_ip4()
# make sure the programmed EP is no longer learnt from DP
self.wait_for_ep_timeout(sw_if_index=rep.itf.sw_if_index, ip=rep.ip4)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 39.005273
| 79
| 0.512856
|
1836d88cad0148297954cd1c0ac20284bc384a64
| 62
|
py
|
Python
|
reamber/sm/SMMine.py
|
Eve-ning/reamber_base_py
|
6d19c84f2c110b60e633b82b73e0516396466f56
|
[
"MIT"
] | 10
|
2020-06-28T11:16:36.000Z
|
2021-08-09T21:41:43.000Z
|
reamber/sm/SMMine.py
|
Eve-ning/reamberPy
|
6d19c84f2c110b60e633b82b73e0516396466f56
|
[
"MIT"
] | 35
|
2020-06-18T13:05:50.000Z
|
2022-02-18T10:13:35.000Z
|
reamber/sm/SMMine.py
|
Eve-ning/reamber_base_py
|
6d19c84f2c110b60e633b82b73e0516396466f56
|
[
"MIT"
] | 2
|
2021-05-26T17:05:06.000Z
|
2021-06-12T18:42:13.000Z
|
from reamber.base.Hit import Hit
class SMMine(Hit):
...
| 10.333333
| 32
| 0.66129
|
00d0086628827564b10910166fc8f530c03f6150
| 1,148
|
py
|
Python
|
tp/tp03/solutions/4-mcat/mcat.py
|
UPB-FILS/sde
|
7c02249b6ff4aaf0efef23d35b0a842f44903015
|
[
"Apache-2.0"
] | 5
|
2020-03-09T15:53:16.000Z
|
2021-03-02T08:11:19.000Z
|
tp/tp03/solutions/4-mcat/mcat.py
|
UPB-FILS/sde
|
7c02249b6ff4aaf0efef23d35b0a842f44903015
|
[
"Apache-2.0"
] | 101
|
2019-02-17T15:32:03.000Z
|
2020-05-20T17:43:54.000Z
|
tp/tp03/solutions/4-mcat/mcat.py
|
UPB-FILS/sde
|
7c02249b6ff4aaf0efef23d35b0a842f44903015
|
[
"Apache-2.0"
] | 2
|
2019-04-01T11:33:54.000Z
|
2020-04-02T07:39:11.000Z
|
import os
import sys
BUFSIZE = 10
# len (sys.argv) - the number of arguments (command line) that the program received
# sys.argv - array of strings with the arguments
# sys.argv[0] - program name
# sys.argv[1] - first argument
argc = len (sys.argv)
if (argc < 2 or argc > 3):
print ("Usage:\n\t " + sys.argv[0]+ " source_file [destination_file]")
sys.exit (0)
in_file = sys.argv[1]
try:
fd1 = os.open(in_file, os.O_CREAT | os.O_RDWR, 0o644)
if (argc == 2):
b_msg = os.read(fd1, BUFSIZE)
rc = 0
while (rc < len(b_msg)):
rc = rc + os.write(1, b_msg[rc:])
while (len (b_msg) != 0):
b_msg = os.read(fd1, BUFSIZE)
rc = 0
while (rc < len(b_msg)):
rc = rc + os.write(1, b_msg[rc:])
elif (argc == 3):
out_file = sys.argv[2]
fd2 = os.open(out_file, os.O_CREAT | os.O_RDWR, 0o644)
b_msg = os.read(fd1, BUFSIZE)
rc = 0
while (rc < len(b_msg)):
rc = rc + os.write(fd2, b_msg[rc:])
while (len (b_msg) != 0):
b_msg = os.read(fd1, BUFSIZE)
rc = 0
while (rc < len(b_msg)):
rc = rc + os.write(fd2, b_msg[rc:])
os.close (fd2)
os.close (fd1)
except Exception as e:
print ("Error: {}".format (e))
| 23.916667
| 83
| 0.605401
|
82f0f0cafe71a234e4687d8b64b4aff8cd11b72b
| 7,348
|
py
|
Python
|
sympy/assumptions/facts.py
|
nihirag/sympy
|
0a2f378e73f6e1f1f992aab63b2516b12439b728
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/assumptions/facts.py
|
nihirag/sympy
|
0a2f378e73f6e1f1f992aab63b2516b12439b728
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/assumptions/facts.py
|
nihirag/sympy
|
0a2f378e73f6e1f1f992aab63b2516b12439b728
|
[
"BSD-3-Clause"
] | 1
|
2018-08-25T08:05:00.000Z
|
2018-08-25T08:05:00.000Z
|
"""
Known facts in assumptions module.
This module defines the facts in ``get_known_facts()``, and supports functions
to generate the contents in ``sympy.assumptions.ask_generated`` file.
"""
from sympy.core.cache import cacheit
from sympy.assumptions import Q
from sympy.assumptions.cnf import CNF
from sympy.logic.boolalg import (to_cnf, And, Not, Implies, Equivalent)
from sympy.logic.inference import satisfiable
@cacheit
def get_composite_predicates():
# To reduce the complexity of sat solver, these predicates never goes into facts
# but are transformed into the combination of primitive predicates.
return {
Q.real : Q.negative | Q.zero | Q.positive,
Q.integer : Q.even | Q.odd,
Q.nonpositive : Q.negative | Q.zero,
Q.nonzero : Q.negative | Q.positive,
Q.nonnegative : Q.zero | Q.positive,
Q.extended_real : Q.negative_infinite | Q.negative | Q.zero | Q.positive | Q.positive_infinite,
Q.extended_positive: Q.positive | Q.positive_infinite,
Q.extended_negative: Q.negative | Q.negative_infinite,
Q.extended_nonzero: Q.negative_infinite | Q.negative | Q.positive | Q.positive_infinite,
Q.extended_nonpositive: Q.negative_infinite | Q.negative | Q.zero,
Q.extended_nonnegative: Q.zero | Q.positive | Q.positive_infinite,
Q.complex : Q.algebraic | Q.transcendental
}
@cacheit
def get_known_facts():
# We build the facts starting with primitive predicates.
# DO NOT include the predicates in get_composite_predicates()'s keys here!
return And(
# primitive predicates exclude each other
Implies(Q.negative_infinite, ~Q.positive_infinite),
Implies(Q.negative, ~Q.zero & ~Q.positive),
Implies(Q.positive, ~Q.zero),
# build real line and complex plane
Implies(Q.negative | Q.zero | Q.positive, ~Q.imaginary),
Implies(Q.negative | Q.zero | Q.positive | Q.imaginary, Q.algebraic | Q.transcendental),
# other subsets of complex
Implies(Q.transcendental, ~Q.algebraic),
Implies(Q.irrational, ~Q.rational),
Equivalent(Q.rational | Q.irrational, Q.negative | Q.zero | Q.positive),
Implies(Q.rational, Q.algebraic),
# integers
Implies(Q.even, ~Q.odd),
Implies(Q.even | Q.odd, Q.rational),
Implies(Q.zero, Q.even),
Implies(Q.composite, ~Q.prime),
Implies(Q.composite | Q.prime, (Q.even | Q.odd) & Q.positive),
Implies(Q.even & Q.positive & ~Q.prime, Q.composite),
# hermitian and antihermitian
Implies(Q.negative | Q.zero | Q.positive, Q.hermitian),
Implies(Q.imaginary, Q.antihermitian),
Implies(Q.zero, Q.hermitian | Q.antihermitian),
# define finity and infinity, and build extended real line
Implies(Q.infinite, ~Q.finite),
Implies(Q.algebraic | Q.transcendental, Q.finite),
Implies(Q.negative_infinite | Q.positive_infinite, Q.infinite),
# commutativity
Implies(Q.finite | Q.infinite, Q.commutative),
# matrices
Implies(Q.orthogonal, Q.positive_definite),
Implies(Q.orthogonal, Q.unitary),
Implies(Q.unitary & Q.real_elements, Q.orthogonal),
Implies(Q.unitary, Q.normal),
Implies(Q.unitary, Q.invertible),
Implies(Q.normal, Q.square),
Implies(Q.diagonal, Q.normal),
Implies(Q.positive_definite, Q.invertible),
Implies(Q.diagonal, Q.upper_triangular),
Implies(Q.diagonal, Q.lower_triangular),
Implies(Q.lower_triangular, Q.triangular),
Implies(Q.upper_triangular, Q.triangular),
Implies(Q.triangular, Q.upper_triangular | Q.lower_triangular),
Implies(Q.upper_triangular & Q.lower_triangular, Q.diagonal),
Implies(Q.diagonal, Q.symmetric),
Implies(Q.unit_triangular, Q.triangular),
Implies(Q.invertible, Q.fullrank),
Implies(Q.invertible, Q.square),
Implies(Q.symmetric, Q.square),
Implies(Q.fullrank & Q.square, Q.invertible),
Equivalent(Q.invertible, ~Q.singular),
Implies(Q.integer_elements, Q.real_elements),
Implies(Q.real_elements, Q.complex_elements),
)
@cacheit
def get_known_facts_keys():
result = []
exclude = get_composite_predicates()
for attr in Q.__class__.__dict__:
if attr.startswith('__'):
continue
pred = getattr(Q, attr)
if pred in exclude:
continue
result.append(pred)
return result
def compute_known_facts(known_facts, known_facts_keys):
"""Compute the various forms of knowledge compilation used by the
assumptions system.
Explanation
===========
This function is typically applied to the results of the ``get_known_facts``
and ``get_known_facts_keys`` functions defined at the bottom of
this file.
"""
from textwrap import dedent, wrap
fact_string = dedent('''\
"""
The contents of this file are the return value of
``sympy.assumptions.ask.compute_known_facts``.
Do NOT manually edit this file.
Instead, run ./bin/ask_update.py.
"""
from sympy.core.cache import cacheit
from sympy.assumptions.cnf import Literal
from sympy.assumptions.ask import Q
@cacheit
def get_all_known_facts():
"""
Known facts as CNF clauses. Used by satask.
"""
return {
%s
}
# -{ Known facts in compressed sets }-
@cacheit
def get_known_facts_dict():
"""
Logical implication as dictionary. Key implies every item in its value.
Used for quick lookup of single facts.
"""
return {
%s
}
''')
# Compute the known facts in CNF form for logical inference
LINE = ",\n "
HANG = ' '*8
cnf = to_cnf(known_facts)
cnf_ = CNF.to_CNF(known_facts)
p = LINE.join(sorted(['frozenset((' + ', '.join(str(lit) for lit in sorted(clause, key=str)) +'))' for clause in cnf_.clauses]))
mapping = single_fact_lookup(known_facts_keys, cnf)
items = sorted(mapping.items(), key=str)
keys = [str(i[0]) for i in items]
values = ['set(%s)' % sorted(i[1], key=str) for i in items]
m = LINE.join(['\n'.join(
wrap("{}: {}".format(k, v),
subsequent_indent=HANG,
break_long_words=False))
for k, v in zip(keys, values)]) + ','
return fact_string % (p, m)
def single_fact_lookup(known_facts_keys, known_facts_cnf):
# Return the dictionary for quick lookup of single fact
mapping = {}
for key in known_facts_keys:
mapping[key] = {key}
for other_key in known_facts_keys:
if other_key != key:
if ask_full_inference(other_key, key, known_facts_cnf):
mapping[key].add(other_key)
if ask_full_inference(~other_key, key, known_facts_cnf):
mapping[key].add(~other_key)
return mapping
def ask_full_inference(proposition, assumptions, known_facts_cnf):
"""
Method for inferring properties about objects.
"""
if not satisfiable(And(known_facts_cnf, assumptions, proposition)):
return False
if not satisfiable(And(known_facts_cnf, assumptions, Not(proposition))):
return True
return None
| 35.497585
| 132
| 0.645346
|
0474a5a0ccdf93da21d52fb25c881c3c3e8df9c2
| 18,382
|
py
|
Python
|
test/functional/feature_bip68_sequence.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
test/functional/feature_bip68_sequence.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
test/functional/feature_bip68_sequence.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex
from test_framework.script import CScript
from test_framework.test_framework import VivuscoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, bytes_to_hex_str, get_bip9_status, parvus_round, sync_blocks
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final (code 64)"
class BIP68Test(VivuscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 VVC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(parvus_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a' * 35]))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN), CScript([b'a' * 35]))]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a' * 35]))]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
add_witness_commitment(block)
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in")
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active")
sync_blocks(self.nodes)
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
| 45.5
| 152
| 0.639811
|
3a1169e7afb607b2b4fc30cfb0a89f33666e2273
| 1,529
|
py
|
Python
|
S7/augment.py
|
madhucharan/EVA6
|
3d5f54cfa2862bef6d6619614290c9e22e7d08bc
|
[
"MIT"
] | 1
|
2021-05-04T16:31:04.000Z
|
2021-05-04T16:31:04.000Z
|
S7/augment.py
|
madhucharan/EVA6
|
3d5f54cfa2862bef6d6619614290c9e22e7d08bc
|
[
"MIT"
] | null | null | null |
S7/augment.py
|
madhucharan/EVA6
|
3d5f54cfa2862bef6d6619614290c9e22e7d08bc
|
[
"MIT"
] | 6
|
2021-04-24T23:50:13.000Z
|
2022-02-17T07:53:53.000Z
|
import albumentations as A
from albumentations.pytorch import ToTensorV2
def get_train_transform(mu, sigma):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
train_transform = A.Compose([
A.HorizontalFlip(p=0.4),
A.ShiftScaleRotate(),
A.Normalize(mean=(mu),
std=(sigma)),
A.CoarseDropout(max_holes=1,
max_height=16,
max_width=16,
min_holes=1,
min_height=16,
min_width=16,
fill_value=(mu)),
A.ToGray(),
ToTensorV2(),
])
return(train_transform)
def get_test_transform(mu, sigma):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
test_transform = A.Compose([
A.Normalize(
mean=(mu),
std=(sigma)),
ToTensorV2(),
])
return(test_transform)
def no_transform():
return(A.Compose([A.Normalize()]))
| 29.980392
| 73
| 0.395029
|
a68a867656fb6f704ecf48135cf9c8078e322639
| 42,055
|
py
|
Python
|
odoo/addons/website_sale/controllers/main.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/website_sale/controllers/main.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/website_sale/controllers/main.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
from openerp.addons.web.controllers.main import login_redirect
PPG = 20 # Products Per Page
PPR = 4 # Products Per Row
class table_compute(object):
def __init__(self):
self.table = {}
def _check_place(self, posx, posy, sizex, sizey):
res = True
for y in range(sizey):
for x in range(sizex):
if posx+x>=PPR:
res = False
break
row = self.table.setdefault(posy+y, {})
if row.setdefault(posx+x) is not None:
res = False
break
for x in range(PPR):
self.table[posy+y].setdefault(x, None)
return res
def process(self, products):
# Compute products positions on the grid
minpos = 0
index = 0
maxy = 0
for p in products:
x = min(max(p.website_size_x, 1), PPR)
y = min(max(p.website_size_y, 1), PPR)
if index>=PPG:
x = y = 1
pos = minpos
while not self._check_place(pos%PPR, pos/PPR, x, y):
pos += 1
# if 21st products (index 20) and the last line is full (PPR products in it), break
# (pos + 1.0) / PPR is the line where the product would be inserted
# maxy is the number of existing lines
# + 1.0 is because pos begins at 0, thus pos 20 is actually the 21st block
# and to force python to not round the division operation
if index >= PPG and ((pos + 1.0) / PPR) > maxy:
break
if x==1 and y==1: # simple heuristic for CPU optimization
minpos = pos/PPR
for y2 in range(y):
for x2 in range(x):
self.table[(pos/PPR)+y2][(pos%PPR)+x2] = False
self.table[pos/PPR][pos%PPR] = {
'product': p, 'x':x, 'y': y,
'class': " ".join(map(lambda x: x.html_class or '', p.website_style_ids))
}
if index<=PPG:
maxy=max(maxy,y+(pos/PPR))
index += 1
# Format table according to HTML needs
rows = self.table.items()
rows.sort()
rows = map(lambda x: x[1], rows)
for col in range(len(rows)):
cols = rows[col].items()
cols.sort()
x += len(cols)
rows[col] = [c for c in map(lambda x: x[1], cols) if c != False]
return rows
# TODO keep with input type hidden
class QueryURL(object):
def __init__(self, path='', **args):
self.path = path
self.args = args
def __call__(self, path=None, **kw):
if not path:
path = self.path
for k,v in self.args.items():
kw.setdefault(k,v)
l = []
for k,v in kw.items():
if v:
if isinstance(v, list) or isinstance(v, set):
l.append(werkzeug.url_encode([(k,i) for i in v]))
else:
l.append(werkzeug.url_encode([(k,v)]))
if l:
path += '?' + '&'.join(l)
return path
def get_pricelist():
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
sale_order = context.get('sale_order')
if sale_order:
pricelist = sale_order.pricelist_id
else:
partner = pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
pricelist = partner.property_product_pricelist
return pricelist
class website_sale(http.Controller):
def get_pricelist(self):
return get_pricelist()
def get_attribute_value_ids(self, product):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
currency_obj = pool['res.currency']
attribute_value_ids = []
visible_attrs = set(l.attribute_id.id
for l in product.attribute_line_ids
if len(l.value_ids) > 1)
if request.website.pricelist_id.id != context['pricelist']:
website_currency_id = request.website.currency_id.id
currency_id = self.get_pricelist().currency_id.id
for p in product.product_variant_ids:
price = currency_obj.compute(cr, uid, website_currency_id, currency_id, p.lst_price)
attribute_value_ids.append([p.id, [v.id for v in p.attribute_value_ids if v.attribute_id.id in visible_attrs], p.price, price])
else:
attribute_value_ids = [[p.id, [v.id for v in p.attribute_value_ids if v.attribute_id.id in visible_attrs], p.price, p.lst_price]
for p in product.product_variant_ids]
return attribute_value_ids
@http.route(['/shop',
'/shop/page/<int:page>',
'/shop/category/<model("product.public.category"):category>',
'/shop/category/<model("product.public.category"):category>/page/<int:page>'
], type='http', auth="public", website=True)
def shop(self, page=0, category=None, search='', **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
domain = request.website.sale_product_domain()
if search:
for srch in search.split(" "):
domain += ['|', '|', '|', ('name', 'ilike', srch), ('description', 'ilike', srch),
('description_sale', 'ilike', srch), ('product_variant_ids.default_code', 'ilike', srch)]
if category:
domain += [('public_categ_ids', 'child_of', int(category))]
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int,v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
if attrib_values:
attrib = None
ids = []
for value in attrib_values:
if not attrib:
attrib = value[0]
ids.append(value[1])
elif value[0] == attrib:
ids.append(value[1])
else:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
attrib = value[0]
ids = [value[1]]
if attrib:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
keep = QueryURL('/shop', category=category and int(category), search=search, attrib=attrib_list)
if not context.get('pricelist'):
pricelist = self.get_pricelist()
context['pricelist'] = int(pricelist)
else:
pricelist = pool.get('product.pricelist').browse(cr, uid, context['pricelist'], context)
product_obj = pool.get('product.template')
url = "/shop"
product_count = product_obj.search_count(cr, uid, domain, context=context)
if search:
post["search"] = search
if category:
category = pool['product.public.category'].browse(cr, uid, int(category), context=context)
url = "/shop/category/%s" % slug(category)
if attrib_list:
post['attrib'] = attrib_list
pager = request.website.pager(url=url, total=product_count, page=page, step=PPG, scope=7, url_args=post)
product_ids = product_obj.search(cr, uid, domain, limit=PPG, offset=pager['offset'], order='website_published desc, website_sequence desc', context=context)
products = product_obj.browse(cr, uid, product_ids, context=context)
style_obj = pool['product.style']
style_ids = style_obj.search(cr, uid, [], context=context)
styles = style_obj.browse(cr, uid, style_ids, context=context)
category_obj = pool['product.public.category']
category_ids = category_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
categs = category_obj.browse(cr, uid, category_ids, context=context)
attributes_obj = request.registry['product.attribute']
attributes_ids = attributes_obj.search(cr, uid, [], context=context)
attributes = attributes_obj.browse(cr, uid, attributes_ids, context=context)
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
values = {
'search': search,
'category': category,
'attrib_values': attrib_values,
'attrib_set': attrib_set,
'pager': pager,
'pricelist': pricelist,
'products': products,
'bins': table_compute().process(products),
'rows': PPR,
'styles': styles,
'categories': categs,
'attributes': attributes,
'compute_currency': compute_currency,
'keep': keep,
'style_in_product': lambda style, product: style.id in [s.id for s in product.website_style_ids],
'attrib_encode': lambda attribs: werkzeug.url_encode([('attrib',i) for i in attribs]),
}
return request.website.render("website_sale.products", values)
@http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True)
def product(self, product, category='', search='', **kwargs):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
category_obj = pool['product.public.category']
template_obj = pool['product.template']
context.update(active_id=product.id)
if category:
category = category_obj.browse(cr, uid, int(category), context=context)
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int,v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
keep = QueryURL('/shop', category=category and category.id, search=search, attrib=attrib_list)
category_ids = category_obj.search(cr, uid, [], context=context)
category_list = category_obj.name_get(cr, uid, category_ids, context=context)
category_list = sorted(category_list, key=lambda category: category[1])
pricelist = self.get_pricelist()
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
if not context.get('pricelist'):
context['pricelist'] = int(self.get_pricelist())
product = template_obj.browse(cr, uid, int(product), context=context)
values = {
'search': search,
'category': category,
'pricelist': pricelist,
'attrib_values': attrib_values,
'compute_currency': compute_currency,
'attrib_set': attrib_set,
'keep': keep,
'category_list': category_list,
'main_object': product,
'product': product,
'get_attribute_value_ids': self.get_attribute_value_ids
}
return request.website.render("website_sale.product", values)
@http.route(['/shop/product/comment/<int:product_template_id>'], type='http', auth="public", website=True)
def product_comment(self, product_template_id, **post):
if not request.session.uid:
return login_redirect()
cr, uid, context = request.cr, request.uid, request.context
if post.get('comment'):
request.registry['product.template'].message_post(
cr, uid, product_template_id,
body=post.get('comment'),
type='comment',
subtype='mt_comment',
context=dict(context, mail_create_nosubscribe=True))
return werkzeug.utils.redirect('/shop/product/%s#comments' % product_template_id)
@http.route(['/shop/pricelist'], type='http', auth="public", website=True)
def pricelist(self, promo, **post):
cr, uid, context = request.cr, request.uid, request.context
request.website.sale_get_order(code=promo, context=context)
return request.redirect("/shop/cart")
@http.route(['/shop/cart'], type='http', auth="public", website=True)
def cart(self, **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order()
if order:
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = order.pricelist_id.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
else:
compute_currency = lambda price: price
values = {
'order': order,
'compute_currency': compute_currency,
'suggested_products': [],
}
if order:
_order = order
if not context.get('pricelist'):
_order = order.with_context(pricelist=order.pricelist_id.id)
values['suggested_products'] = _order._cart_accessories()
return request.website.render("website_sale.cart", values)
@http.route(['/shop/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, product_id, add_qty=1, set_qty=0, **kw):
cr, uid, context = request.cr, request.uid, request.context
request.website.sale_get_order(force_create=1)._cart_update(product_id=int(product_id), add_qty=float(add_qty), set_qty=float(set_qty))
return request.redirect("/shop/cart")
@http.route(['/shop/cart/update_json'], type='json', auth="public", methods=['POST'], website=True)
def cart_update_json(self, product_id, line_id, add_qty=None, set_qty=None, display=True):
order = request.website.sale_get_order(force_create=1)
if order.state != 'draft':
request.website.sale_reset()
return {}
value = order._cart_update(product_id=product_id, line_id=line_id, add_qty=add_qty, set_qty=set_qty)
if not order.cart_quantity:
request.website.sale_reset()
return {}
if not display:
return None
value['cart_quantity'] = order.cart_quantity
value['website_sale.total'] = request.website._render("website_sale.total", {
'website_sale_order': request.website.sale_get_order()
})
return value
#------------------------------------------------------
# Checkout
#------------------------------------------------------
def checkout_redirection(self, order):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
# must have a draft sale order with lines at this point, otherwise reset
if not order or order.state != 'draft':
request.session['sale_order_id'] = None
request.session['sale_transaction_id'] = None
return request.redirect('/shop')
# if transaction pending / done: redirect to confirmation
tx = context.get('website_sale_transaction')
if tx and tx.state != 'draft':
return request.redirect('/shop/payment/confirmation/%s' % order.id)
def checkout_values(self, data=None):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
orm_partner = registry.get('res.partner')
orm_user = registry.get('res.users')
orm_country = registry.get('res.country')
state_orm = registry.get('res.country.state')
country_ids = orm_country.search(cr, SUPERUSER_ID, [], context=context)
countries = orm_country.browse(cr, SUPERUSER_ID, country_ids, context)
states_ids = state_orm.search(cr, SUPERUSER_ID, [], context=context)
states = state_orm.browse(cr, SUPERUSER_ID, states_ids, context)
partner = orm_user.browse(cr, SUPERUSER_ID, request.uid, context).partner_id
order = None
shipping_id = None
shipping_ids = []
checkout = {}
if not data:
if request.uid != request.website.user_id.id:
checkout.update( self.checkout_parse("billing", partner) )
shipping_ids = orm_partner.search(cr, SUPERUSER_ID, [("parent_id", "=", partner.id), ('type', "=", 'delivery')], context=context)
else:
order = request.website.sale_get_order(force_create=1, context=context)
if order.partner_id:
domain = [("partner_id", "=", order.partner_id.id)]
user_ids = request.registry['res.users'].search(cr, SUPERUSER_ID, domain, context=dict(context or {}, active_test=False))
if not user_ids or request.website.user_id.id not in user_ids:
checkout.update( self.checkout_parse("billing", order.partner_id) )
else:
checkout = self.checkout_parse('billing', data)
try:
shipping_id = int(data["shipping_id"])
except ValueError:
pass
if shipping_id == -1:
checkout.update(self.checkout_parse('shipping', data))
if shipping_id is None:
if not order:
order = request.website.sale_get_order(context=context)
if order and order.partner_shipping_id:
shipping_id = order.partner_shipping_id.id
shipping_ids = list(set(shipping_ids) - set([partner.id]))
if shipping_id == partner.id:
shipping_id = 0
elif shipping_id > 0 and shipping_id not in shipping_ids:
shipping_ids.append(shipping_id)
elif shipping_id is None and shipping_ids:
shipping_id = shipping_ids[0]
ctx = dict(context, show_address=1)
shippings = []
if shipping_ids:
shippings = shipping_ids and orm_partner.browse(cr, SUPERUSER_ID, list(shipping_ids), ctx) or []
if shipping_id > 0:
shipping = orm_partner.browse(cr, SUPERUSER_ID, shipping_id, ctx)
checkout.update( self.checkout_parse("shipping", shipping) )
checkout['shipping_id'] = shipping_id
# Default search by user country
if not checkout.get('country_id'):
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = request.registry.get('res.country').search(cr, uid, [('code', '=', country_code)], context=context)
if country_ids:
checkout['country_id'] = country_ids[0]
values = {
'countries': countries,
'states': states,
'checkout': checkout,
'shipping_id': partner.id != shipping_id and shipping_id or 0,
'shippings': shippings,
'error': {},
'has_check_vat': hasattr(registry['res.partner'], 'check_vat')
}
return values
mandatory_billing_fields = ["name", "phone", "email", "street2", "city", "country_id"]
optional_billing_fields = ["street", "state_id", "vat", "vat_subjected", "zip"]
mandatory_shipping_fields = ["name", "phone", "street", "city", "country_id"]
optional_shipping_fields = ["state_id", "zip"]
def checkout_parse(self, address_type, data, remove_prefix=False):
""" data is a dict OR a partner browse record
"""
# set mandatory and optional fields
assert address_type in ('billing', 'shipping')
if address_type == 'billing':
all_fields = self.mandatory_billing_fields + self.optional_billing_fields
prefix = ''
else:
all_fields = self.mandatory_shipping_fields + self.optional_shipping_fields
prefix = 'shipping_'
# set data
if isinstance(data, dict):
query = dict((prefix + field_name, data[prefix + field_name])
for field_name in all_fields if prefix + field_name in data)
else:
query = dict((prefix + field_name, getattr(data, field_name))
for field_name in all_fields if getattr(data, field_name))
if address_type == 'billing' and data.parent_id:
query[prefix + 'street'] = data.parent_id.name
if query.get(prefix + 'state_id'):
query[prefix + 'state_id'] = int(query[prefix + 'state_id'])
if query.get(prefix + 'country_id'):
query[prefix + 'country_id'] = int(query[prefix + 'country_id'])
if query.get(prefix + 'vat'):
query[prefix + 'vat_subjected'] = True
if not remove_prefix:
return query
return dict((field_name, data[prefix + field_name]) for field_name in all_fields if prefix + field_name in data)
def checkout_form_validate(self, data):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
# Validation
error = dict()
for field_name in self.mandatory_billing_fields:
if not data.get(field_name):
error[field_name] = 'missing'
if data.get("vat") and hasattr(registry["res.partner"], "check_vat"):
if request.website.company_id.vat_check_vies:
# force full VIES online check
check_func = registry["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = registry["res.partner"].simple_vat_check
vat_country, vat_number = registry["res.partner"]._split_vat(data.get("vat"))
if not check_func(cr, uid, vat_country, vat_number, context=None): # simple_vat_check
error["vat"] = 'error'
if data.get("shipping_id") == -1:
for field_name in self.mandatory_shipping_fields:
field_name = 'shipping_' + field_name
if not data.get(field_name):
error[field_name] = 'missing'
return error
def checkout_form_save(self, checkout):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order(force_create=1, context=context)
orm_partner = registry.get('res.partner')
orm_user = registry.get('res.users')
order_obj = request.registry.get('sale.order')
partner_lang = request.lang if request.lang in [lang.code for lang in request.website.language_ids] else None
billing_info = {}
if partner_lang:
billing_info['lang'] = partner_lang
billing_info.update(self.checkout_parse('billing', checkout, True))
# set partner_id
partner_id = None
if request.uid != request.website.user_id.id:
partner_id = orm_user.browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
elif order.partner_id:
user_ids = request.registry['res.users'].search(cr, SUPERUSER_ID,
[("partner_id", "=", order.partner_id.id)], context=dict(context or {}, active_test=False))
if not user_ids or request.website.user_id.id not in user_ids:
partner_id = order.partner_id.id
# save partner informations
if partner_id and request.website.partner_id.id != partner_id:
orm_partner.write(cr, SUPERUSER_ID, [partner_id], billing_info, context=context)
else:
# create partner
partner_id = orm_partner.create(cr, SUPERUSER_ID, billing_info, context=context)
# create a new shipping partner
if checkout.get('shipping_id') == -1:
shipping_info = {}
if partner_lang:
shipping_info['lang'] = partner_lang
shipping_info.update(self.checkout_parse('shipping', checkout, True))
shipping_info['type'] = 'delivery'
shipping_info['parent_id'] = partner_id
checkout['shipping_id'] = orm_partner.create(cr, SUPERUSER_ID, shipping_info, context)
order_info = {
'partner_id': partner_id,
'message_follower_ids': [(4, partner_id), (3, request.website.partner_id.id)],
'partner_invoice_id': partner_id,
}
order_info.update(order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner_id, context=context)['value'])
address_change = order_obj.onchange_delivery_id(cr, SUPERUSER_ID, [], order.company_id.id, partner_id,
checkout.get('shipping_id'), None, context=context)['value']
order_info.update(address_change)
if address_change.get('fiscal_position'):
fiscal_update = order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [], address_change['fiscal_position'],
[(4, l.id) for l in order.order_line], context=None)['value']
order_info.update(fiscal_update)
order_info.pop('user_id')
order_info.update(partner_shipping_id=checkout.get('shipping_id') or partner_id)
order_obj.write(cr, SUPERUSER_ID, [order.id], order_info, context=context)
@http.route(['/shop/checkout'], type='http', auth="public", website=True)
def checkout(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(force_create=1, context=context)
redirection = self.checkout_redirection(order)
if redirection:
return redirection
values = self.checkout_values()
return request.website.render("website_sale.checkout", values)
@http.route(['/shop/confirm_order'], type='http', auth="public", website=True)
def confirm_order(self, **post):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order(context=context)
if not order:
return request.redirect("/shop")
redirection = self.checkout_redirection(order)
if redirection:
return redirection
values = self.checkout_values(post)
values["error"] = self.checkout_form_validate(values["checkout"])
if values["error"]:
return request.website.render("website_sale.checkout", values)
self.checkout_form_save(values["checkout"])
request.session['sale_last_order_id'] = order.id
request.website.sale_get_order(update_pricelist=True, context=context)
return request.redirect("/shop/payment")
#------------------------------------------------------
# Payment
#------------------------------------------------------
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
""" Payment step. This page proposes several payment means based on available
payment.acquirer. State at this point :
- a draft sale order with lines; otherwise, clean context / session and
back to the shop
- no transaction in context / session, or only a draft one, if the customer
did go to a payment.acquirer website but closed the tab without
paying / canceling
"""
cr, uid, context = request.cr, request.uid, request.context
payment_obj = request.registry.get('payment.acquirer')
sale_order_obj = request.registry.get('sale.order')
order = request.website.sale_get_order(context=context)
redirection = self.checkout_redirection(order)
if redirection:
return redirection
shipping_partner_id = False
if order:
if order.partner_shipping_id.id:
shipping_partner_id = order.partner_shipping_id.id
else:
shipping_partner_id = order.partner_invoice_id.id
values = {
'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context)
}
values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)
values.update(sale_order_obj._get_website_data(cr, uid, order, context))
# fetch all registered payment means
# if tx:
# acquirer_ids = [tx.acquirer_id.id]
# else:
if not values['errors']:
acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)
values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))
render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Pay Now'))
for acquirer in values['acquirers']:
acquirer.button = payment_obj.render(
cr, SUPERUSER_ID, acquirer.id,
order.name,
order.amount_total,
order.pricelist_id.currency_id.id,
partner_id=shipping_partner_id,
tx_values={
'return_url': '/shop/payment/validate',
},
context=render_ctx)
return request.website.render("website_sale.payment", values)
@http.route(['/shop/payment/transaction/<int:acquirer_id>'], type='json', auth="public", website=True)
def payment_transaction(self, acquirer_id):
""" Json method that creates a payment.transaction, used to create a
transaction when the user clicks on 'pay now' button. After having
created the transaction, the event continues and the user is redirected
to the acquirer website.
:param int acquirer_id: id of a payment.acquirer record. If not set the
user is redirected to the checkout page
"""
cr, uid, context = request.cr, request.uid, request.context
transaction_obj = request.registry.get('payment.transaction')
order = request.website.sale_get_order(context=context)
if not order or not order.order_line or acquirer_id is None:
return request.redirect("/shop/checkout")
assert order.partner_id.id != request.website.partner_id.id
# find an already existing transaction
tx = request.website.sale_get_transaction()
if tx:
if tx.state == 'draft': # button cliked but no more info -> rewrite on tx or create a new one ?
tx.write({
'acquirer_id': acquirer_id,
'amount': order.amount_total,
})
tx_id = tx.id
else:
tx_id = transaction_obj.create(cr, SUPERUSER_ID, {
'acquirer_id': acquirer_id,
'type': 'form',
'amount': order.amount_total,
'currency_id': order.pricelist_id.currency_id.id,
'partner_id': order.partner_id.id,
'partner_country_id': order.partner_id.country_id.id,
'reference': order.name,
'sale_order_id': order.id,
}, context=context)
request.session['sale_transaction_id'] = tx_id
# update quotation
request.registry['sale.order'].write(
cr, SUPERUSER_ID, [order.id], {
'payment_acquirer_id': acquirer_id,
'payment_tx_id': request.session['sale_transaction_id']
}, context=context)
return tx_id
@http.route('/shop/payment/get_status/<int:sale_order_id>', type='json', auth="public", website=True)
def payment_get_status(self, sale_order_id, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
assert order.id == request.session.get('sale_last_order_id')
if not order:
return {
'state': 'error',
'message': '<p>%s</p>' % _('There seems to be an error with your request.'),
}
tx_ids = request.registry['payment.transaction'].search(
cr, SUPERUSER_ID, [
'|', ('sale_order_id', '=', order.id), ('reference', '=', order.name)
], context=context)
if not tx_ids:
if order.amount_total:
return {
'state': 'error',
'message': '<p>%s</p>' % _('There seems to be an error with your request.'),
}
else:
state = 'done'
message = ""
validation = None
else:
tx = request.registry['payment.transaction'].browse(cr, SUPERUSER_ID, tx_ids[0], context=context)
state = tx.state
if state == 'done':
message = '<p>%s</p>' % _('Your payment has been received.')
elif state == 'cancel':
message = '<p>%s</p>' % _('The payment seems to have been canceled.')
elif state == 'pending' and tx.acquirer_id.validation == 'manual':
message = '<p>%s</p>' % _('Your transaction is waiting confirmation.')
if tx.acquirer_id.post_msg:
message += tx.acquirer_id.post_msg
elif state == 'error':
message = '<p>%s</p>' % _('An error occured during the transaction.')
validation = tx.acquirer_id.validation
return {
'state': state,
'message': message,
'validation': validation
}
@http.route('/shop/payment/validate', type='http', auth="public", website=True)
def payment_validate(self, transaction_id=None, sale_order_id=None, **post):
""" Method that should be called by the server when receiving an update
for a transaction. State at this point :
- UDPATE ME
"""
cr, uid, context = request.cr, request.uid, request.context
email_act = None
sale_order_obj = request.registry['sale.order']
if transaction_id is None:
tx = request.website.sale_get_transaction()
else:
tx = request.registry['payment.transaction'].browse(cr, uid, transaction_id, context=context)
if sale_order_id is None:
order = request.website.sale_get_order(context=context)
else:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
assert order.id == request.session.get('sale_last_order_id')
if not order or (order.amount_total and not tx):
return request.redirect('/shop')
if (not order.amount_total and not tx) or tx.state in ['pending', 'done']:
if (not order.amount_total and not tx):
# Orders are confirmed by payment transactions, but there is none for free orders,
# (e.g. free events), so confirm immediately
order.with_context(dict(context, send_email=True)).action_button_confirm()
elif tx and tx.state == 'cancel':
# cancel the quotation
sale_order_obj.action_cancel(cr, SUPERUSER_ID, [order.id], context=request.context)
# clean context and session, then redirect to the confirmation page
request.website.sale_reset(context=context)
if tx and tx.state == 'draft':
return request.redirect('/shop')
return request.redirect('/shop/confirmation')
@http.route(['/shop/confirmation'], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
""" End of checkout process controller. Confirmation is basically seing
the status of a sale.order. State at this point :
- should not have any context / session info: clean them
- take a sale.order id, because we request a sale.order and are not
session dependant anymore
"""
cr, uid, context = request.cr, request.uid, request.context
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
else:
return request.redirect('/shop')
return request.website.render("website_sale.confirmation", {'order': order})
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route(['/shop/add_product'], type='http', auth="user", methods=['POST'], website=True)
def add_product(self, name=None, category=0, **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
if not name:
name = _("New Product")
product_obj = request.registry.get('product.product')
product_id = product_obj.create(cr, uid, { 'name': name, 'public_categ_ids': category }, context=context)
product = product_obj.browse(cr, uid, product_id, context=context)
return request.redirect("/shop/product/%s?enable_editor=1" % slug(product.product_tmpl_id))
@http.route(['/shop/change_styles'], type='json', auth="public")
def change_styles(self, id, style_id):
product_obj = request.registry.get('product.template')
product = product_obj.browse(request.cr, request.uid, id, context=request.context)
remove = []
active = False
for style in product.website_style_ids:
if style.id == style_id:
remove.append(style.id)
active = True
break
style = request.registry.get('product.style').browse(request.cr, request.uid, style_id, context=request.context)
if remove:
product.write({'website_style_ids': [(3, rid) for rid in remove]})
if not active:
product.write({'website_style_ids': [(4, style.id)]})
return not active
@http.route(['/shop/change_sequence'], type='json', auth="public")
def change_sequence(self, id, sequence):
product_obj = request.registry.get('product.template')
if sequence == "top":
product_obj.set_sequence_top(request.cr, request.uid, [id], context=request.context)
elif sequence == "bottom":
product_obj.set_sequence_bottom(request.cr, request.uid, [id], context=request.context)
elif sequence == "up":
product_obj.set_sequence_up(request.cr, request.uid, [id], context=request.context)
elif sequence == "down":
product_obj.set_sequence_down(request.cr, request.uid, [id], context=request.context)
@http.route(['/shop/change_size'], type='json', auth="public")
def change_size(self, id, x, y):
product_obj = request.registry.get('product.template')
product = product_obj.browse(request.cr, request.uid, id, context=request.context)
return product.write({'website_size_x': x, 'website_size_y': y})
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
ret = []
for line in order_lines:
ret.append({
'id': line.order_id and line.order_id.id,
'sku': line.product_id.id,
'name': line.product_id.name or '-',
'category': line.product_id.categ_id and line.product_id.categ_id.name or '-',
'price': line.price_unit,
'quantity': line.product_uom_qty,
})
return ret
@http.route(['/shop/tracking_last_order'], type='json', auth="public")
def tracking_cart(self, **post):
""" return data about order in JSON needed for google analytics"""
cr, context = request.cr, request.context
ret = {}
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
ret['transaction'] = {
'id': sale_order_id,
'affiliation': order.company_id.name,
'revenue': order.amount_total,
'currency': order.currency_id.name
}
ret['lines'] = self.order_lines_2_google_api(order.order_line)
return ret
@http.route(['/shop/get_unit_price'], type='json', auth="public", methods=['POST'], website=True)
def get_unit_price(self, product_ids, add_qty, use_order_pricelist=False, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
products = pool['product.product'].browse(cr, uid, product_ids, context=context)
partner = pool['res.users'].browse(cr, uid, uid, context=context).partner_id
if use_order_pricelist:
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
else:
pricelist_id = partner.property_product_pricelist.id
prices = pool['product.pricelist'].price_rule_get_multi(cr, uid, [], [(product, add_qty, partner) for product in products], context=context)
return {product_id: prices[product_id][pricelist_id][0] for product_id in product_ids}
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| 44.834755
| 164
| 0.602307
|
0a43a3e223b2f5914d94320b59bd86a3518e27a7
| 11,938
|
py
|
Python
|
Deprecated Working Pair/CLIENT/main.py
|
albert752/PBE
|
3ea7c1659df77f338f31e4d16b2323afdb26d53d
|
[
"MIT"
] | null | null | null |
Deprecated Working Pair/CLIENT/main.py
|
albert752/PBE
|
3ea7c1659df77f338f31e4d16b2323afdb26d53d
|
[
"MIT"
] | 1
|
2018-11-14T15:40:11.000Z
|
2018-11-14T15:40:11.000Z
|
Deprecated Working Pair/CLIENT/main.py
|
albert752/PBE
|
3ea7c1659df77f338f31e4d16b2323afdb26d53d
|
[
"MIT"
] | null | null | null |
from ReaderThread import ReaderThread
from query.QueryThread import QueryThreader
import gi, os, sys
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, Gtk, GObject, GdkPixbuf, Gdk
from pprint import pprint as pp
WORKINGDIR = os.getcwd()
tickPath = os.path.join(WORKINGDIR, "styles", "icons", "tick.png")
crossPath = os.path.join(WORKINGDIR, "styles", "icons", "cross.png")
defaultText = "Please identify yourself"
class Window(Gtk.Window):
def __init__(self):
if len(sys.argv) > 1:
self.test = sys.argv[1] == "test"
else:
self.test = False
# ---- User Variables ----
# ----- Global window parameters -----
Gtk.Window.__init__(self, title="UPC")
self.connect("destroy", Gtk.main_quit)
self.set_border_width(20)
self.set_resizable(False)
# ----- CSS Style -----
style_provider = Gtk.CssProvider()
style_provider.load_from_path('./styles/css/styles.css')
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(), style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
# ----- SetUp window parameters -----
# Main box
self.vlipbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
self.entry_ip = Gtk.Entry()
self.entry_ip.set_text("localhost:8081")
self.entry_ip.connect("key-release-event", self.on_key_ip_release)
self.vlipbox.pack_start(self.entry_ip, True, True, 0)
self.add(self.vlipbox)
# ----- LogIn window parameters -----
# Main box
self.hloginbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
#self.add(self.hloginbox)
self.vloginbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
self.hloginbox.pack_start(self.vloginbox, True, True, 0)
self.hbuttonbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
# Label UID
self.uid_label = Gtk.Label(label=defaultText, justify=Gtk.Justification.LEFT)
self.vloginbox.pack_start(self.uid_label, True, True, 0)
# Clear button
self.clear_button = Gtk.Button(label="Clear")
self.clear_button.connect("clicked", self.on_clear_clicked)
self.hbuttonbox.pack_start(self.clear_button, True, True, 0)
self.clear_button.set_name("clear_button")
self.clear_button.set_sensitive(False)
# OK button
self.ok_button = Gtk.Button(label="Ok!")
self.ok_button.connect("clicked", self.on_ok_clicked)
self.hbuttonbox.pack_start(self.ok_button, True, True, 0)
self.ok_button.set_name("ok_button")
self.vloginbox.pack_start(self.hbuttonbox, True, True, 10)
self.ok_button.set_sensitive(False)
# Icon
self.image = Gtk.Image()
self.tick = GdkPixbuf.Pixbuf.new_from_file_at_size(tickPath, 40, 40)
self.cross = GdkPixbuf.Pixbuf.new_from_file_at_size(crossPath, 40, 40)
self.image.set_from_pixbuf(self.cross)
self.image.set_name("imager_name")
self.hloginbox.pack_start(self.image, True, True, 10)
# ----- User window parameters -----
# Main boxes
self.vuserbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
self.huserbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
self.vuserbox.pack_start(self.huserbox, True, True, 0)
# Labels
self.greater_label = Gtk.Label(label="Welcome ", justify=Gtk.Justification.LEFT)
self.huserbox.pack_start(self.greater_label, True, True, 0)
self.username_label = Gtk.Label(justify=Gtk.Justification.LEFT)
self.huserbox.pack_start(self.username_label, True, True, 0)
self.username_label.set_name("username_label")
# LogOut Button
self.logout_button = Gtk.Button(label="Logout")
self.logout_button.connect("clicked", self.on_logout_clicked)
self.huserbox.pack_start(self.logout_button, False, False, 50)
self.logout_button.set_name("logout_button")
# TextBox + label
self.entry = Gtk.Entry()
self.entry.set_text("Type in your query")
self.entry.connect("key-release-event", self.on_key_release)
self.vuserbox.pack_start(self.entry, True, True, 20)
self.entry.set_icon_from_icon_name(Gtk.EntryIconPosition.PRIMARY, "system-search-symbolic")
self.message_label = Gtk.Label(label="Enter your querry", justify=Gtk.Justification.CENTER)
self.message_label.set_name("padded_label")
self.vuserbox.pack_start(self.message_label, True, True, 0)
# Tree view and scrolled window
self.scrolled_window = Gtk.ScrolledWindow()
self.scrolled_window.set_border_width(0)
self.scrolled_window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.ALWAYS)
self.timetables_store = Gtk.ListStore(str, str, str, str)
self.marks_class_store = Gtk.ListStore(str, str, int)
self.tree = Gtk.TreeView()
self.scrolled_window.add(self.tree)
self.scrolled_window.set_min_content_height(300)
self.titles = {
"Timetables": ["day", "hour", "subject", "room"],
"Tasks": ["data", "subject", "name"],
"Marks": ["subject", "name", "mark"]
}
# ----- Start the reader thread and set up the query thread -----
self.reader = ReaderThread(self.test, self.on_uid)
self.reader.startReader()
self.server = None
def on_key_ip_release(self, widget, ev, data=None):
""" Function connected to the key release of the ip entry, if it's escape, it clears the text and if it's enter
it sets the ip
:param widget: The entry itself
:return:
"""
if ev.keyval == Gdk.KEY_Escape:
widget.set_text("")
self.show_all()
elif ev.keyval == Gdk.KEY_Return: # Value for enter
ip = widget.get_text().split(':')[0]
port = widget.get_text().split(':')[1]
self.server = QueryThreader(ip, port)
self.remove(self.vlipbox)
self.add(self.hloginbox)
self.show_all()
def on_uid(self, UID):
""" Handler for the ReaderThread module to change the label text and the image pixel source
:param UID: User Identifier of the read card
:return: None
"""
self.uid_label.set_text(UID.center(len(defaultText)))
self.image.set_from_pixbuf(self.tick)
self.ok_button.set_sensitive(True)
self.clear_button.set_sensitive(True)
def on_clear_clicked(self, widget):
""" Function connected to the clicked signal of the clear button. Restores default values for the label and
the image. Restarts de reader thread.
:param widget: The button itself
:return:
"""
self.login_to_default()
self.reader.startReader()
def on_ok_clicked(self, widget):
""" Function connected to the clicked signal of the ok button. Restores default values of the log in page,
changes to teh user page and requests the user name to the server.
:param widget: The button itself
:return: None
"""
self.server.get_username(self.uid_label.get_text().replace(" ", ""), self.on_username_label)
self.login_to_default()
self.remove(self.hloginbox)
self.add(self.vuserbox)
self.show_all()
self.resize(400, 480)
def on_username_label(self, username):
""" Handler for the QueryThread module to change the label text and t
:param username: Users full name
:return: None
"""
self.username_label.set_text(username)
def login_to_default(self):
""" Restores default LogIn window parameters
:param
:return: None
"""
self.uid_label.set_text("Please identify yourself")
self.image.set_from_pixbuf(self.cross)
self.ok_button.set_sensitive(False)
self.clear_button.set_sensitive(False)
def on_logout_clicked(self, widget):
""" Function connected to the clicked signal of the LogOut button. Restores default LogIn window
:param widget: The button itself
:return: None
"""
self.user_to_default()
self.remove(self.vuserbox)
self.add(self.hloginbox)
self.show_all()
self.reader.startReader()
self.message_label.set_name("padded_label")
self.resize(232, 90) # Apropiate values for teh login screen
def on_key_release(self, widget, ev, data=None):
""" Function connected to the key release of the entry, if it's escape, it clears the text and if it's enter
it sends the command.
:param widget: The entry itself
:return:
"""
if ev.keyval == Gdk.KEY_Escape:
widget.set_text("")
self.show_all()
elif ev.keyval == Gdk.KEY_Return: # Value for enter
print(widget.get_text())
self.server.send_query(self.on_tree, widget.get_text())
print(self.get_size())
def on_tree(self, data, type_of_query):
""" Handler for all types of query that afect the tree view
:param data: vector of dicts
:return None
"""
if len(data[0].keys()) == 4:
self.tree.set_model(self.timetables_store)
elif len(data[0].keys()) == 3:
self.tree.set_model(self.marks_class_store)
# Check if is the first time
if len(self.timetables_store) == 0 and len(self.marks_class_store) == 0:
self.vuserbox.pack_start(self.scrolled_window, True, True, 20)
self.show_all()
self._clear_tree_titles()
self._set_tree_titles(data, type_of_query)
self._set_store_data(data, type_of_query)
def user_to_default(self):
""" Restores default User window parameters
:param
:return: None
"""
self._clear_tree_titles()
self.timetables_store.clear()
self.marks_class_store.clear()
self.vuserbox.remove(self.scrolled_window)
def _clear_tree_titles(self):
""" Removes the titles from the tree
:param
:return: None
"""
cols = self.tree.get_columns()
for col in cols:
self.tree.remove_column(col)
def _set_store_data(self, data, type_of_query):
"""Sets the data to be displayed to the store and the colours
:param: data: response for the server, a vector of rows (dictionaries)
type_of_query: key foer the self.titles dict
:return: None
"""
for i, row in enumerate(data):
aux = []
for title in self.titles[type_of_query]:
aux.append(row[title])
if i % 2 == 0:
background_color = "#fff"
else:
background_color = "#bbb"
# self.tree.get_model().append(aux+(background_color,))
self.tree.get_model().append(aux)
def _set_tree_titles(self, data, type_of_query):
"""Sets the titles of the data to be displayed to the store
:param: type_of_query: key foer the self.titles dict
:return: None
"""
titles = self.titles[type_of_query]
self.message_label.set_text(type_of_query)
self.message_label.set_name("colection_label")
self.tree.get_model().clear()
for i, title in enumerate(titles):
renderer = Gtk.CellRendererText(title)
column = Gtk.TreeViewColumn(title, renderer, text=i)
#column.pack_start(cellrenderertext, True)
#column.add_attribute(cellrenderertext, "text", i)
#column.add_attribute(cellrenderertext, "background", 2)
self.tree.append_column(column)
if __name__ == '__main__':
win = Window()
win.show_all()
Gtk.main()
| 38.140575
| 119
| 0.636455
|
d1f4c48dddad0af40f739c187f1761bcc57c828e
| 65,002
|
py
|
Python
|
Config.py
|
Crimdahl/timespinnerbingo
|
66199cea2553eab114c2421373f36dec99d04fe3
|
[
"MIT"
] | 1
|
2021-03-23T15:25:07.000Z
|
2021-03-23T15:25:07.000Z
|
Config.py
|
Crimdahl/timespinnerbingo
|
66199cea2553eab114c2421373f36dec99d04fe3
|
[
"MIT"
] | 10
|
2021-03-20T21:58:06.000Z
|
2021-08-29T05:56:47.000Z
|
Config.py
|
Crimdahl/timespinnerbingo
|
66199cea2553eab114c2421373f36dec99d04fe3
|
[
"MIT"
] | null | null | null |
import os
import json
import codecs
import sys
from time import time
from tkinter import messagebox
if getattr(sys, "frozen", False):
CONFIG_PATH = os.path.join(os.path.dirname(sys.executable), "config.txt")
elif __file__:
CONFIG_PATH = os.path.join(os.path.dirname(__file__), "config.txt")
class Config(object):
def __init__(self):
# Apply default settings
self.CONFIG_VERSION = "1.1.4"
# If a settings file exists, apply the settings from that file
try:
with codecs.open(CONFIG_PATH, encoding="utf-8-sig", mode="r") as f:
new_settings = json.load(f)
if "CONFIG_VERSION" in new_settings.keys() and new_settings["CONFIG_VERSION"] == \
self.CONFIG_VERSION:
new_settings["seed"] = int(time() * 1000)
self.__dict__ = new_settings
else:
messagebox.showerror("Notice", "A settings file from a different version of TimespinnerBingo "
"has been detected - your settings may be reset.")
raise IOError
except IOError:
self.tile_data = {
"advisor hat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"advisor robe": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"aelana": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"alchemy backpack": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items"
]
},
"ancient coin": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "shop items"
]
},
"ancient frail": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"antheia": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"antidote": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"arm cannon": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: piercing", "element: light", "set: gun"
]
},
"aura blast": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: aura", "set: blue"
]
},
"aura serpent": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: aura", "set: empire"
]
},
"aura up": {
"enabled": True,
"path": "icons\\",
"tags": [
"stat upgrades"
]
},
"azure stole": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops", "enemy drops: rare"
]
},
"azure queen": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"baby cheveur": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"barbed anemone": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"berry pick-mi-up": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "shop items"
]
},
"berry pick-mi-up+": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables"
]
},
"bird statue": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets"
]
},
"biscuit": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "shop items"
]
},
"blade orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: sharp", "set: blade"
]
},
"bleak ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: blue"
]
},
"blood orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: dark", "set: blood"
]
},
"blossom automaton": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"blue orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: blunt", "set: blue"
]
},
"bombardment": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: blunt", "set: shattered"
]
},
"buckle hat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"cantoran": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses", "quest locked"
]
},
"captain's helmet": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"captain's uniform": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"celestial sash": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "progression: vertical"
]
},
"chaos blades": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: sharp", "set: eye"
]
},
"chaos horn": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops", "enemy drops: rare"
]
},
"chaos rose": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "shop items"
]
},
"chaos stole": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops", "enemy drops: rare"
]
},
"cheveur au vin": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest locked", "shop items"
]
},
"cheveur breast": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items", "enemy drops"
]
},
"cheveur dragoon": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"cheveur drumstick": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items", "enemy drops"
]
},
"cheveur feather": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items", "enemy drops"
]
},
"cheveur fly": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"cheveur hatchling": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"cheveur helicopter": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"cheveur plume": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops"
]
},
"cheveur spring": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"cheveur tank": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"colossal blade": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: sharp", "set: blade"
]
},
"colossal hammer": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: blunt", "set: iron"
]
},
"combat helmet": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats", "enemy drops"
]
},
"conviction": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"copper breastplate": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors", "enemy drops"
]
},
"copper helmet": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors", "enemy drops"
]
},
"copper wyvern": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"corruption": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: dark", "set: nether"
]
},
"creeping fungus": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"crimson vortex": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: dark", "set: blood"
]
},
"dark flames": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: dark", "set: umbra"
]
},
"demon": {
"enabled": True,
"path": "icons\\",
"tags": [
"familiars"
]
},
"demon guard": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"devil's vine": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"djinn": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses", "vanilla only"
]
},
"djinn inferno": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: fire", "set: forbidden"
]
},
"dragoon armor": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors", "quest items", "quest locked"
]
},
"dragoon helmet": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats", "enemy drops"
]
},
"dream wisp": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "enemy drops: rare"
]
},
"dusk ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: umbra"
]
},
"economizer ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: gun"
]
},
"ectoplasm": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"eel meat": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "quest items"
]
},
"elemental beads": {
"enabled": True,
"path": "icons\\",
"tags": [
"crafting", "enemy drops", "enemy drops: rare"
]
},
"elevator keycard": {
"enabled": True,
"path": "icons\\",
"tags": [
"progression", "relics"
]
},
"empire crown": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"empire giantess": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"empire knight": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"empire orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: aura", "set: empire"
]
},
"empire sniper": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"empress cake": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"empress robe": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"engineer goggles": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats", "enemy drops"
]
},
"essence crystal": {
"enabled": True,
"path": "icons\\",
"tags": [
"crafting", "shop items", "quest items"
]
},
"eternal brooch": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "relics: cosmetics"
]
},
"eternal coat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"eternal crown": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"ether": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "shop items"
]
},
"experiment 11": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"experiment 13": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"eye orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: sharp", "set: eye"
]
},
"fanged anemone": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"feline sentry": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"fetid wyvern": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"fiend": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"filigree clasp": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets"
]
},
"filigree tea": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"fire orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: fire", "set: fire"
]
},
"flesh arachnid": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"fledgling warbird": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"food synthesizer": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items"
]
},
"forbidden tome": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: sharp", "set: forbidden"
]
},
"freshwater eel": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"fried cheveur": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest locked", "shop items"
]
},
"frozen spires": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: ice", "set: ice"
]
},
"galactic sage": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"galaxy earrings": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets"
]
},
"galaxy stone": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items"
]
},
"gas mask": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression"
]
},
"genza": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"gilded egg": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "shop items"
]
},
"glass pumpkin": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "danger: vanilla"
]
},
"goddess brooch": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "relics: cosmetics"
]
},
"gold necklace": {
"enabled": True,
"path": "icons\\",
"tags": [
"crafting", "shop items"
]
},
"gold ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"crafting", "shop items"
]
},
"golden idol": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"greed brooch": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "relics: cosmetics", "kickstarter exclusive"
]
},
"griffin": {
"enabled": True,
"path": "icons\\",
"tags": [
"familiars"
]
},
"gun orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: sharp", "set: gun"
]
},
"harvest rat": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"health up": {
"enabled": True,
"path": "icons\\",
"tags": [
"stat upgrades"
]
},
"helix toad": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"hell gazer": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"herb": {
"enabled": True,
"path": "icons\\",
"tags": [
"crafting", "enemy drops", "quest items"
]
},
"hi-ether": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"hi-potion": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables"
]
},
"historical documents": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest locked", "quest items"
]
},
"hope ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: radiant"
]
},
"ice adept": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"ice orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: ice", "set: ice"
]
},
"ichor": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"icicle ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "element: ice", "set: ice"
]
},
"infernal flames": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: fire", "set: fire"
]
},
"iron orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: blunt", "set: iron"
]
},
"jerky": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables"
]
},
"jewelry box": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "flags: jewelry box start"
]
},
"justice": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"kain": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: rare"
]
},
"keycard a": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "keycards"
]
},
"keycard b": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "keycards"
]
},
"keycard c": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "keycards"
]
},
"keycard d": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "keycards"
]
},
"keycard v": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "keycards"
]
},
"kobo": {
"enabled": True,
"path": "icons\\",
"tags": [
"familiars"
]
},
"lab coat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"lab glasses": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"lachiem archer": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"lachiem engineer": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"lachiem giantess": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"lachiem knight": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"lachiemi sun": {
"enabled": False,
"path": "icons\\",
"tags": [
"consumables"
]
},
"leather helmet": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats", "quest items", "quest locked"
]
},
"leather jerkin": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors", "enemy drops"
]
},
"librarian hat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"librarian robe": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"lightwall": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: light", "set: radiant", "progression", "progression: vertical"
]
},
"maw": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"merchant crow": {
"enabled": True,
"path": "icons\\",
"tags": [
"familiars", "danger: vanilla"
]
},
"metal wristband": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets"
]
},
"meteor sparrow": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"meyef": {
"enabled": True,
"path": "icons\\",
"tags": [
"familiars", "flags: meyef start"
]
},
"midnight cloak": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"military armor": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors", "enemy drops"
]
},
"mind refresh ultra": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables"
]
},
"mind refresh": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "shop items"
]
},
"mobile blossom": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"mother of pearl": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops", "enemy drops: rare"
]
},
"mushroom tower": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"mushroom": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "quest items"
]
},
"nether orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: blunt", "element: dark", "set: nether"
]
},
"nethershade": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: rare"
]
},
"nightmare": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"nuvius": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"nymph hairband": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops"
]
},
"oculus ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: eye"
]
},
"old coat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"orange juice": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"ornagy rut": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: rare", "vanilla only"
]
},
"pendulum": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets"
]
},
"plantbat": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"plasma core": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items", "enemy drops"
]
},
"plasma crystal": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items"
]
},
"plasma geyser": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: plasma", "set: plasma"
]
},
"plasma iv bag": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items"
]
},
"plasma orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: plasma", "set: plasma"
]
},
"plasma pod": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"plump maggot": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest items", "enemy drops"
]
},
"pointy hat": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"poison moth": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"potion": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "shop items"
]
},
"princess dress": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"pyro ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "element: fire", "set: fire"
]
},
"radiant orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: light", "set: radiant"
]
},
"rotten tail": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"royal advisor": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"royal casserole": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest locked", "shop items"
]
},
"royal demon": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"royal guard": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"royal ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: plasma"
]
},
"ryshia": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: rare"
]
},
"sand bottle": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"sand up": {
"enabled": True,
"path": "icons\\",
"tags": [
"stat upgrades"
]
},
"sand vial": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops"
]
},
"sanguine ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: blood"
]
},
"sauteed wyvern tail": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest locked", "shop items"
]
},
"savage cheveur": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"scythe ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "element: sharp"
]
},
"security guard": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"security vest": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors", "enemy drops"
]
},
"security visor": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats", "enemy drops"
]
},
"selen's bangle": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets"
]
},
"security turret": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"shadow seal": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: nether"
]
},
"shattered orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: blunt", "set: shattered"
]
},
"shield ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: iron"
]
},
"shiny rock": {
"enabled": True,
"path": "icons\\",
"tags": [
"shop items", "equipment", "equipment: trinkets"
]
},
"silence ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: shattered"
]
},
"silver ore": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items"
]
},
"siren ink": {
"enabled": True,
"path": "icons\\",
"tags": [
"quest items", "enemy drops"
]
},
"siren": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"soul scanner": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics"
]
},
"spaghetti": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest locked"
]
},
"sporevine": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"sprite": {
"enabled": True,
"path": "icons\\",
"tags": [
"familiars"
]
},
"star of lachiem": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: empire"
]
},
"starship engineer": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"storm eye": {
"enabled": True,
"path": "icons\\",
"tags": [
"spells", "element: sharp", "set: wind"
]
},
"succubus hairpin": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "progression: vertical"
]
},
"sun ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: forbidden"
]
},
"sunglasses": {
"enabled": True,
"path": "icons\\",
"tags": [
"shop items", "equipment", "equipment: hats"
]
},
"synthetic plume": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: trinkets", "enemy drops"
]
},
"tablet": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics"
]
},
"tailwind ring": {
"enabled": True,
"path": "icons\\",
"tags": [
"passives", "set: wind"
]
},
"talaria attachment": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression", "flags: fast start"
]
},
"tenebrous moth": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"timespinner gear 1": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "timespinner pieces", "progression"
]
},
"timespinner gear 2": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "timespinner pieces", "progression"
]
},
"timespinner gear 3": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "timespinner pieces", "progression"
]
},
"timespinner spindle": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "timespinner pieces", "progression"
]
},
"timespinner wheel": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "timespinner pieces", "progression", "progression: vertical"
]
},
"traveller's cloak": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: body armors"
]
},
"trendy jacket": {
"enabled": True,
"path": "icons\\",
"tags": [
"shop items", "equipment", "equipment: body armors"
]
},
"twin pyramids": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression"
]
},
"umbra orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: dark", "set: umbra", "kickstarter exclusive"
]
},
"unagi roll": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "quest locked", "shop items"
]
},
"viletian crown": {
"enabled": True,
"path": "icons\\",
"tags": [
"equipment", "equipment: hats"
]
},
"vol terrillis": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"warp shard": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "shop items"
]
},
"water mask": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "progression"
]
},
"wind orb": {
"enabled": True,
"path": "icons\\",
"tags": [
"orbs", "element: sharp", "set: wind"
]
},
"worm blossom": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"wyrm brooch": {
"enabled": True,
"path": "icons\\",
"tags": [
"relics", "relics: cosmetics", "kickstarter exclusive"
]
},
"wyvern tail": {
"enabled": True,
"path": "icons\\",
"tags": [
"consumables", "enemy drops", "quest items"
]
},
"xarion": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: bosses"
]
},
"zeal": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: normal"
]
},
"zel": {
"enabled": True,
"path": "icons\\",
"tags": [
"enemies", "enemies: rare", "vanilla only"
]
}
}
self.tags = {}
# Auto-populate tags based on the above json data
for key in self.tile_data.keys():
for tag in self.tile_data[key]['tags']:
if tag not in self.tags.keys():
self.tags[tag] = {}
try:
self.tags[tag]['icons'].append(key)
except KeyError:
self.tags[tag]['icons'] = []
self.tags[tag]['icons'].append(key)
if tag not in ['vanilla only']:
self.tags[tag]['enabled'] = True
else:
self.tags[tag]['enabled'] = False
self.use_compact_mode = {"friendlyName": "Use Compact Mode", "settingtype": "generation", "value": False}
self.allow_duplicates = {"friendlyName": "Allow Duplicates", "settingtype": "generation", "value": False}
self.rows = {"friendlyName": "Number of Rows", "settingtype": "generation", "value": 5}
self.columns = {"friendlyName": "Number of Columns", "settingtype": "generation", "value": 5}
self.seed = int(time() * 1000)
# Save the settings to file
self.save_settings()
# Reload settings from file
def reload_settings(self):
with codecs.open(CONFIG_PATH, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8", indent=4)
return
# Save settings to file
def save_settings(self):
try:
with codecs.open(CONFIG_PATH, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, indent=4)
except Exception as e:
print("Error saving file: " + str(e))
return
# Getter
def get_tile_data(self):
return self.tile_data
def get_tag_data(self):
return self.tags
# Getter
def get_rows(self):
return self.rows
# Setter
def set_rows(self, arg):
if type(arg) is int:
self.rows["value"] = arg
else:
raise TypeError("Bad argument passed to the rows setter.")
return
# Getter
def get_columns(self):
return self.columns
# Setter
def set_columns(self, arg):
if type(arg) is int:
self.columns["value"] = arg
else:
raise TypeError("Bad argument passed to the columns setter.")
return
# Getter
def get_use_compact_mode(self):
return self.use_compact_mode
# Setter
def set_use_compact_mode(self, arg):
if type(arg) is bool:
self.use_compact_mode["value"] = arg
else:
raise TypeError("Bad argument passed to the useCompactMode setter.")
return
# Getter
def get_allow_duplicates(self):
return self.allow_duplicates
# Setter
def set_allow_duplicates(self, arg):
if type(arg) is bool:
self.allow_duplicates["value"] = arg
else:
raise TypeError("Bad argument passed to the allowDuplicates setter.")
return
# Getter
def get_seed(self):
return self.seed
# Setter
def set_seed(self, arg):
if type(arg) is int:
self.seed = arg
else:
raise TypeError("Bad argument passed to the setSeed setter.")
return
def items(self):
return self.__dict__.items()
| 34.085999
| 117
| 0.267484
|
db6cb4fdf7d52db969364979520c5d20b205df9b
| 1,908
|
py
|
Python
|
RaiderFetch/__init__.py
|
frc5024/RaiderFetch
|
20c816eedd9d9860291a46acea43af2ecc787c21
|
[
"MIT"
] | 2
|
2018-10-09T13:56:19.000Z
|
2018-10-09T13:56:21.000Z
|
RaiderFetch/__init__.py
|
frc5024/RaiderFetch
|
20c816eedd9d9860291a46acea43af2ecc787c21
|
[
"MIT"
] | null | null | null |
RaiderFetch/__init__.py
|
frc5024/RaiderFetch
|
20c816eedd9d9860291a46acea43af2ecc787c21
|
[
"MIT"
] | null | null | null |
import requests
import feedparser
class Fetcher(object):
"""(optional) Pass in a github account"""
def __init__(self, account="frc5024"):
self.account = str(account)
self.feed = None
## PUBLIC ##
def fetch(self):
self.feed = feedparser.parse("https://github.com/" + self.account + ".atom")
def getFeed(self):
output = []
for entry in self.feed.entries:
output.append(self.__toReadable(entry))
return output
def getMembers(self):
"""Gets all public members"""
memberlist = requests.get("https://api.github.com/orgs/"+ self.account +"/members").json()
output = []
for account in memberlist:
output.append({"username":account["login"], "avatar":account["avatar_url"], "url":account["html_url"]})
return output
def isMember(self, username: str):
if str(requests.get("https://api.github.com/orgs/"+ self.account +"/members/" + username)) == "<Response [204]>":
return True
else:
return False
def getRepos(self):
"""Get all public repos"""
repolist = requests.get("https://api.github.com/orgs/"+ self.account +"/repos").json()
output = []
for repo in repolist:
output.append({"name":repo["name"], "full_name":repo["full_name"], "description":repo["description"], "url":repo["html_url"], "ssh_url":repo["ssh_url"], "id":str(repo["id"]), "default_branch":repo["default_branch"], "language":str(repo["language"]), "counts":{"forks":repo["forks_count"], "stars":repo["stargazers_count"], "watchers":repo["watchers_count"],"size":repo["size"], "issues":repo["open_issues_count"]} })
return output
## PRIVATE ##
def __getEntry(self, entry):
return(self.feed.entries[entry])
def __toReadable(self, entry):
return {"link":entry["link"], "time":entry["updated"], "title":entry["title"]}
## DEBUG ##
# rf = Fetcher()
# # rf.fetch()
# # print(rf.getFeed())
# print(rf.getMembers())
# print(rf.isMember("ewpratten"))
# print(rf.getRepos())
| 33.473684
| 420
| 0.666143
|
aac6a7f584513ea85a3c201e7b05a5f22c8ca4b5
| 4,221
|
py
|
Python
|
tests/test_sockets_message.py
|
marcusrbrown/aspen
|
5229bef0d9c76ec1c990532e8d4f8e8a858d3724
|
[
"MIT"
] | 1
|
2021-07-07T11:49:35.000Z
|
2021-07-07T11:49:35.000Z
|
tests/test_sockets_message.py
|
marcusrbrown/aspen
|
5229bef0d9c76ec1c990532e8d4f8e8a858d3724
|
[
"MIT"
] | null | null | null |
tests/test_sockets_message.py
|
marcusrbrown/aspen
|
5229bef0d9c76ec1c990532e8d4f8e8a858d3724
|
[
"MIT"
] | null | null | null |
from aspen.sockets.message import Message
from aspen.testing import assert_raises, attach_teardown
def test_message_can_be_instantiated_from_bytes():
expected = Message
actual = Message.from_bytes('3:::').__class__
assert actual is expected, actual
def test_from_bytes_too_few_colons_raises_SyntaxError():
exc = assert_raises(SyntaxError, Message.from_bytes, '3:')
expected = "This message has too few colons: 3:."
actual = exc.args[0]
assert actual == expected, actual
def test_from_bytes_data_part_is_optional():
message = Message.from_bytes('3::')
expected = ""
actual = message.data
assert actual == expected, actual
def test_from_bytes_too_many_colons_and_the_extras_end_up_in_the_data():
message = Message.from_bytes('3::::')
expected = ":"
actual = message.data
assert actual == expected, actual
def test_from_bytes_non_digit_type_raises_ValueError():
exc = assert_raises(ValueError, Message.from_bytes, 'foo:::')
expected = "The message type is not in 0..8: foo."
actual = exc.args[0]
assert actual == expected, actual
def test_from_bytes_type_too_small_raises_ValueError():
exc = assert_raises(ValueError, Message.from_bytes, '-1:::')
expected = "The message type is not in 0..8: -1."
actual = exc.args[0]
assert actual == expected, actual
def test_from_bytes_type_too_big_raises_ValueError():
exc = assert_raises(ValueError, Message.from_bytes, '9:::')
expected = "The message type is not in 0..8: 9."
actual = exc.args[0]
assert actual == expected, actual
def test_from_bytes_type_lower_bound_instantiable():
message = Message.from_bytes('0:::')
expected = 0
actual = message.type
assert actual == expected, actual
def test_from_bytes_type_upper_bound_instantiable():
message = Message.from_bytes('8:::')
expected = 8
actual = message.type
assert actual == expected, actual
def test_id_passes_through():
message = Message.from_bytes('3:deadbeef::')
expected = 'deadbeef'
actual = message.id
assert actual == expected, actual
def test_endpoint_passes_through():
message = Message.from_bytes('3:deadbeef:/cheese.sock:')
expected = '/cheese.sock'
actual = message.endpoint
assert actual == expected, actual
def test_data_passes_through():
message = Message.from_bytes('3:deadbeef:/cheese.sock:Greetings, program!')
expected = 'Greetings, program!'
actual = message.data
assert actual == expected, actual
def test_json_data_decoded():
message = Message.from_bytes('4:deadbeef:/cheese.sock:{"foo": "bar"}')
expected = {"foo": "bar"}
actual = message.data
assert actual == expected, actual
def test_json_roundtrip():
bytes = '4:deadbeef:/cheese.sock:{"foo": "bar"}'
message = Message.from_bytes(bytes)
expected = bytes
actual = str(message)
assert actual == expected, actual
def test_event_data_decoded():
message = Message.from_bytes('5:::{"name": "bar", "args": []}')
expected = {u'args': [], u'name': 'bar'}
actual = message.data
assert actual == expected, actual
def test_event_data_without_name_raises_ValueError():
exc = assert_raises( ValueError
, Message.from_bytes
, '5:::{"noom": "bar", "args": []}'
)
expected = "An event message must have a 'name' key."
actual = exc.args[0]
assert actual == expected, actual
def test_event_data_without_args_raises_ValueError():
exc = assert_raises( ValueError
, Message.from_bytes
, '5:::{"name": "bar", "arrrrgs": []}'
)
expected = "An event message must have an 'args' key."
actual = exc.args[0]
assert actual == expected, actual
def test_event_data_with_reserved_name_raises_ValueError():
exc = assert_raises( ValueError
, Message.from_bytes
, '5:::{"name": "connect", "args": []}'
)
expected = "That event name is reserved: connect."
actual = exc.args[0]
assert actual == expected, actual
attach_teardown(globals())
| 34.040323
| 79
| 0.657427
|
5e18e31eca0b6350084ec39d6b930c1c93ec2be5
| 20,071
|
py
|
Python
|
goblet/resources/routes.py
|
davidyum/goblet
|
11a0bdf2dcdd3e951e5106902e9551051d4e3227
|
[
"Apache-2.0"
] | null | null | null |
goblet/resources/routes.py
|
davidyum/goblet
|
11a0bdf2dcdd3e951e5106902e9551051d4e3227
|
[
"Apache-2.0"
] | null | null | null |
goblet/resources/routes.py
|
davidyum/goblet
|
11a0bdf2dcdd3e951e5106902e9551051d4e3227
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
from time import sleep
from marshmallow.schema import Schema
from ruamel import yaml
import base64
import logging
import re
from typing import get_type_hints
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
import goblet
from goblet.handler import Handler
from goblet.client import Client, get_default_project, get_default_location
from goblet.utils import get_g_dir
from goblet.config import GConfig
from goblet.common_cloud_actions import get_cloudrun_url
from googleapiclient.errors import HttpError
log = logging.getLogger("goblet.deployer")
log.setLevel(logging.INFO)
class ApiGateway(Handler):
"""Api Gateway instance, which includes api, api config, api gateway instances
https://cloud.google.com/api-gateway
"""
resource_type = "apigateway"
valid_backends = ["cloudfunction", "cloudrun"]
def __init__(self, app_name, resources=None, cors=None, backend="cloudfunction"):
self.backend = backend
self.name = self.format_name(app_name)
self.resources = resources or {}
self.cors = cors or {}
self._api_client = None
self.cloudfunction = f"https://{get_default_location()}-{get_default_project()}.cloudfunctions.net/{self.name}"
@property
def api_client(self):
if not self._api_client:
self._api_client = self._create_api_client()
return self._api_client
def format_name(self, name):
# ([a-z0-9-.]+) for api gateway name
return name.replace("_", "-")
def register_route(self, name, func, kwargs):
path = kwargs.pop("path")
methods = kwargs.pop("methods")
kwargs = kwargs.pop("kwargs")
if not kwargs.get("cors"):
kwargs["cors"] = self.cors
path_entries = self.resources.get(path, {})
for method in methods:
if path_entries.get(method):
raise ValueError(
"Duplicate method: '%s' detected for route: '%s'\n"
'between view functions: "%s" and "%s". A specific '
"method may only be specified once for "
"a particular path."
% (method, path, self.resources[path][method].function_name, name)
)
entry = RouteEntry(func, name, path, method, **kwargs)
path_entries[method] = entry
self.resources[path] = path_entries
def __call__(self, request, context=None):
method = request.method
path = request.path
entry = self.resources.get(path, {}).get(method)
if not entry:
# test param paths
for p in self.resources:
if "{" in p and self._matched_path(p, path):
entry = self.resources.get(p, {}).get(method)
# TODO: better handling
if not entry:
raise ValueError(f"No route found for {path} with {method}")
return entry(request)
@staticmethod
def _matched_path(org_path, path):
split_org = re.sub(r"{[\w]+}", "", org_path).split("/")
split_path = path.split("/")
if len(split_path) != len(split_org):
return False
for i, item in enumerate(split_org):
if item and split_path[i] != item:
return False
return True
def _create_api_client(self):
return Client(
"apigateway",
"v1",
calls="projects.locations.apis",
parent_schema="projects/{project_id}/locations/global",
)
def _create_config_client(self):
return Client(
"apigateway",
"v1",
calls="projects.locations.apis.configs",
parent_schema="projects/{project_id}/locations/global/apis/" + self.name,
)
def _patch_config_client(self):
return Client(
"apigateway",
"v1",
calls="projects.locations.apis.configs",
parent_schema="projects/{project_id}/locations/global/apis/"
+ self.name
+ "/configs/"
+ self.name,
)
def _create_gateway_client(self):
return Client(
"apigateway",
"v1",
calls="projects.locations.gateways",
parent_schema="projects/{project_id}/locations/{location_id}",
)
def _patch_gateway_client(self):
return Client(
"apigateway",
"v1",
calls="projects.locations.gateways",
parent_schema="projects/{project_id}/locations/{location_id}/gateways/"
+ self.name,
)
def _deploy(self, sourceUrl=None, entrypoint=None):
if len(self.resources) == 0:
return
log.info("deploying api......")
base_url = self.cloudfunction
if self.backend == "cloudrun":
base_url = get_cloudrun_url(self.name)
self.generate_openapi_spec(base_url)
try:
resp = self.api_client.execute("create", params={"apiId": self.name})
self.api_client.wait_for_operation(resp["name"])
except HttpError as e:
if e.resp.status == 409:
log.info("api already deployed")
else:
raise e
goblet_config = GConfig()
config = {
"openapiDocuments": [
{
"document": {
"path": f"{get_g_dir()}/{self.name}_openapi_spec.yml",
"contents": base64.b64encode(
open(
f"{get_g_dir()}/{self.name}_openapi_spec.yml", "rb"
).read()
).decode("utf-8"),
}
}
],
**(goblet_config.apiConfig or {}),
}
try:
config_version_name = self.name
self._create_config_client().execute(
"create", params={"apiConfigId": self.name, "body": config}
)
except HttpError as e:
if e.resp.status == 409:
log.info("updating api endpoints")
configs = self._create_config_client().execute("list")
# TODO: use hash
version = len(configs["apiConfigs"])
config_version_name = f"{self.name}-v{version}"
self._create_config_client().execute(
"create",
params={"apiConfigId": config_version_name, "body": config},
)
else:
raise e
gateway = {
"apiConfig": f"projects/{get_default_project()}/locations/global/apis/{self.name}/configs/{config_version_name}",
}
try:
gateway_resp = self._create_gateway_client().execute(
"create", params={"gatewayId": self.name, "body": gateway}
)
except HttpError as e:
if e.resp.status == 409:
log.info("updating gateway")
gateway_resp = self._patch_gateway_client().execute(
"patch",
parent_key="name",
params={"updateMask": "apiConfig", "body": gateway},
)
else:
raise e
if gateway_resp:
self._create_gateway_client().wait_for_operation(gateway_resp["name"])
log.info("api successfully deployed...")
gateway_resp = self._patch_gateway_client().execute("get", parent_key="name")
log.info(f"api endpoint is {gateway_resp['defaultHostname']}")
return
def destroy(self):
if len(self.resources) == 0:
return
# destroy api gateway
try:
gateway_client = Client(
"apigateway",
"v1",
calls="projects.locations.gateways",
parent_schema="projects/{project_id}/locations/{location_id}/gateways/"
+ self.name,
)
gateway_client.execute("delete", parent_key="name")
log.info("destroying api gateway......")
except HttpError as e:
if e.resp.status == 404:
log.info("api gateway already destroyed")
else:
raise e
# destroy api config
try:
configs = self._create_config_client().execute("list")
api_client = None
resp = {}
for c in configs.get("apiConfigs", []):
api_client = Client(
"apigateway",
"v1",
calls="projects.locations.apis.configs",
parent_schema="projects/{project_id}/locations/global/apis/"
+ self.name
+ "/configs/"
+ c["displayName"],
)
resp = api_client.execute("delete", parent_key="name")
log.info("api configs destroying....")
if api_client:
api_client.wait_for_operation(resp["name"])
sleep(10)
except HttpError as e:
if e.resp.status == 404:
log.info("api configs already destroyed")
else:
raise e
# destroy api
try:
api_client = Client(
"apigateway",
"v1",
calls="projects.locations.apis",
parent_schema="projects/{project_id}/locations/global/apis/"
+ self.name,
)
api_client.execute("delete", parent_key="name")
log.info("apis successfully destroyed......")
except HttpError as e:
if e.resp.status == 404:
log.info("api already destroyed")
else:
raise e
def generate_openapi_spec(self, cloudfunction):
config = GConfig()
spec = OpenApiSpec(
self.name,
cloudfunction,
security_definitions=config.securityDefinitions,
security=config.security,
)
spec.add_apigateway_routes(self.resources)
with open(f"{get_g_dir()}/{self.name}_openapi_spec.yml", "w") as f:
spec.write(f)
PRIMITIVE_MAPPINGS = {str: "string", bool: "boolean", int: "integer"}
class OpenApiSpec:
def __init__(
self,
app_name,
cloudfunction,
version="1.0.0",
security_definitions=None,
security=None,
):
self.spec = OrderedDict()
self.app_name = app_name
self.cloudfunction = cloudfunction
self.version = version
self.spec["swagger"] = "2.0"
self.spec["info"] = {
"title": self.app_name,
"description": "Goblet Autogenerated Spec",
"version": self.version,
}
if security_definitions:
self.spec["securityDefinitions"] = security_definitions
self.spec["security"] = security or list(
map(lambda s: {s: []}, security_definitions)
)
self.spec["schemes"] = ["https"]
self.spec["produces"] = ["application/json"]
self.spec["paths"] = {}
self.component_spec = APISpec(
title="",
version="1.0.0",
openapi_version="2.0",
plugins=[MarshmallowPlugin()],
)
self.spec["definitions"] = {}
def add_component(self, component):
if component.__name__ in self.component_spec.components.schemas:
return
self.component_spec.components.schema(component.__name__, schema=component)
self.spec["definitions"] = self.component_spec.to_dict()["definitions"]
def add_apigateway_routes(self, apigateway):
for path, methods in apigateway.items():
for method, entry in methods.items():
self.add_route(entry)
def get_param_type(self, type_info, only_primititves=False):
if not type_info:
return {"type": "string"}
if type_info in PRIMITIVE_MAPPINGS.keys():
param_type = {"type": PRIMITIVE_MAPPINGS[type_info]}
elif issubclass(type_info, Schema) and not only_primititves:
self.add_component(type_info)
param_type = {"$ref": f"#/definitions/{type_info.__name__}"}
else:
raise ValueError(
f"param_type has type {type_info}. \
It must be of type {PRIMITIVE_MAPPINGS.values} or a dataclass inheriting from Schema"
)
return param_type
def add_route(self, entry):
method_spec = OrderedDict()
method_spec["x-google-backend"] = {
"address": entry.backend or self.cloudfunction,
"protocol": "h2",
"path_translation": "APPEND_PATH_TO_ADDRESS",
}
method_spec["operationId"] = entry.function_name
params = []
type_hints = get_type_hints(entry.route_function)
for param in entry.view_args:
type_info = type_hints.get(param, str)
param_type = self.get_param_type(type_info, only_primititves=True)
param_entry = {"in": "path", "name": param, "required": True, **param_type}
params.append(param_entry)
if entry.request_body:
if isinstance(entry.request_body, dict):
params.append(
{
"in": "body",
"name": "requestBody",
"schema": entry.request_body["schema"],
}
)
if entry.form_data:
params.append({"in": "formData", "name": "file", "type": "file"})
if params:
method_spec["parameters"] = params
# TODO: add query strings
return_type = type_hints.get("return")
content = {}
if return_type:
if return_type in PRIMITIVE_MAPPINGS.keys():
content = {"schema": {"type": PRIMITIVE_MAPPINGS[return_type]}}
# list
elif "typing.List" in str(return_type):
type_info = return_type.__args__[0]
param_type = self.get_param_type(type_info)
content = {"schema": {"type": "array", "items": {**param_type}}}
elif issubclass(return_type, Schema):
param_type = self.get_param_type(return_type)
content = {"schema": {**param_type}}
if entry.responses:
method_spec["responses"] = entry.responses
else:
method_spec["responses"] = {
"200": {"description": "A successful response", **content}
}
if entry.security:
method_spec["security"] = entry.security
path_exists = self.spec["paths"].get(entry.uri_pattern)
if path_exists:
self.spec["paths"][entry.uri_pattern][entry.method.lower()] = dict(
method_spec
)
else:
self.spec["paths"][entry.uri_pattern] = {
entry.method.lower(): dict(method_spec)
}
def write(self, file):
yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict)
yaml.YAML().dump(dict(self.spec), file)
_PARAMS = re.compile(r"{\w+}")
class RouteEntry:
def __init__(
self,
route_function,
function_name,
path,
method,
api_key_required=None,
content_types=None,
cors=False,
**kwargs,
):
self.route_function = route_function
self.function_name = function_name
self.uri_pattern = path
self.method = method
self.api_key_required = api_key_required
self.request_body = kwargs.get("request_body")
self.form_data = kwargs.get("form_data")
self.responses = kwargs.get("responses")
self.backend = kwargs.get("backend")
self.security = kwargs.get("security")
#: A list of names to extract from path:
#: e.g, '/foo/{bar}/{baz}/qux -> ['bar', 'baz']
self.view_args = self._parse_view_args()
self.content_types = content_types
# cors is passed as either a boolean or a CORSConfig object. If it is a
# boolean it needs to be replaced with a real CORSConfig object to
# pass the typechecker. None in this context will not inject any cors
# headers, otherwise the CORSConfig object will determine which
# headers are injected.
if cors is True:
if isinstance(cors, CORSConfig):
cors = cors
else:
cors = CORSConfig()
elif cors is False:
cors = None
self.cors = cors
self.kwargs = {**kwargs}
def _extract_view_args(self, path):
components = path.split("/")
original_components = self.uri_pattern.split("/")
matches = 0
args = {}
for i, component in enumerate(components):
if component != original_components[i]:
args[self.view_args[matches]] = component
matches += 1
return args
def __call__(self, request):
# TODO: pass in args and kwargs and options
args = self._extract_view_args(request.path)
resp = self.route_function(**args)
return self._apply_cors(resp)
def _parse_view_args(self):
if "{" not in self.uri_pattern:
return []
# The [1:-1] slice is to remove the braces
# e.g {foobar} -> foobar
results = [r[1:-1] for r in _PARAMS.findall(self.uri_pattern)]
return results
def __eq__(self, other):
return self.__dict__ == other.__dict__
def _apply_cors(self, resp):
"""Apply cors to Response"""
if not self.cors:
return resp
# Apply to Response Obj
if isinstance(resp, goblet.Response):
resp.headers.update(self.cors.get_access_control_headers())
if isinstance(resp, tuple):
resp[2].update(self.cors.get_access_control_headers())
if isinstance(resp, str):
resp = goblet.Response(resp, headers=self.cors.get_access_control_headers())
return resp
class CORSConfig(object):
"""A cors configuration to attach to a route."""
_REQUIRED_HEADERS = ["Content-Type", "Authorization"]
def __init__(
self,
allow_origin="*",
allow_headers=None,
expose_headers=None,
max_age=None,
allow_credentials=None,
):
self.allow_origin = allow_origin
if allow_headers is None:
allow_headers = set(self._REQUIRED_HEADERS)
else:
allow_headers = set(allow_headers + self._REQUIRED_HEADERS)
self._allow_headers = allow_headers
if expose_headers is None:
expose_headers = []
self._expose_headers = expose_headers
self._max_age = max_age
self._allow_credentials = allow_credentials
@property
def allow_headers(self):
return ",".join(sorted(self._allow_headers))
def get_access_control_headers(self):
headers = {
"Access-Control-Allow-Origin": self.allow_origin,
"Access-Control-Allow-Headers": self.allow_headers,
}
if self._expose_headers:
headers.update(
{"Access-Control-Expose-Headers": ",".join(self._expose_headers)}
)
if self._max_age is not None:
headers.update({"Access-Control-Max-Age": str(self._max_age)})
if self._allow_credentials is True:
headers.update({"Access-Control-Allow-Credentials": "true"})
return headers
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.get_access_control_headers() == other.get_access_control_headers()
)
return False
| 35.461131
| 125
| 0.559663
|
fcc4645f49b23afa4f29a3f723772740bf9979e8
| 4,877
|
py
|
Python
|
demo.py
|
xintao222/PoseDetect
|
409fe7dc56f96771044fcf8a90fad1d771c53b78
|
[
"BSD-3-Clause"
] | 6
|
2020-12-27T01:59:31.000Z
|
2022-03-26T03:34:46.000Z
|
demo.py
|
xintao222/PoseDetect
|
409fe7dc56f96771044fcf8a90fad1d771c53b78
|
[
"BSD-3-Clause"
] | null | null | null |
demo.py
|
xintao222/PoseDetect
|
409fe7dc56f96771044fcf8a90fad1d771c53b78
|
[
"BSD-3-Clause"
] | 5
|
2020-11-11T08:19:17.000Z
|
2022-03-10T07:50:14.000Z
|
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import ImageLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
from pPose_nms import pose_nms, write_json
args = opt
args.dataset = 'coco'
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
if __name__ == "__main__":
import glob
inputdir='D:\\myFile\\data\\pose\\0712_pos_reg_myself\\im\\2\\'
args.outputpath='D:\\myFile\\data\\pose\\0712_pos_reg_myself\\json2\\'
input_folders = glob.glob(inputdir+'\\*')
for inputpath in input_folders:
input_file_name = inputpath.split('\\')[-1]
# inputpath = args.inputpath
inputlist = args.inputlist
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if len(inputlist):
im_names = open(inputlist, 'r').readlines()
elif len(inputpath) and inputpath != '/':
for root, dirs, files in os.walk(inputpath):
im_names = files
root= root
else:
raise IOError('Error: must contain either --indir/--list')
# Load input images
data_loader = ImageLoader(root,im_names, batchSize=args.detbatch, format='yolo').start()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Init data writer
writer = DataWriter(args.save_video).start()
data_len = data_loader.length()
im_names_desc = tqdm(range(data_len))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu()
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while(writer.running()):
pass
writer.stop()
final_result = writer.results()
dest_path = os.path.join(args.outputpath,input_file_name)
if not os.path.exists(dest_path):
os.mkdir(dest_path)
write_json(final_result, dest_path)
| 36.125926
| 148
| 0.57925
|
3c4ac18d847fd1fa36c2073d2d8591e17788b583
| 4,393
|
py
|
Python
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/spanish/holiday_parser_config.py
|
XiaoxiaoMa0815/Recognizers-Text
|
d9a4bc939348bd79b5982345255961dff5f356c6
|
[
"MIT"
] | 1
|
2020-12-02T03:35:04.000Z
|
2020-12-02T03:35:04.000Z
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/spanish/holiday_parser_config.py
|
XiaoxiaoMa0815/Recognizers-Text
|
d9a4bc939348bd79b5982345255961dff5f356c6
|
[
"MIT"
] | 76
|
2018-11-09T18:19:44.000Z
|
2019-08-20T20:29:53.000Z
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/spanish/holiday_parser_config.py
|
XiaoxiaoMa0815/Recognizers-Text
|
d9a4bc939348bd79b5982345255961dff5f356c6
|
[
"MIT"
] | 6
|
2017-05-04T17:24:59.000Z
|
2019-07-23T15:48:44.000Z
|
from typing import List, Dict, Callable
from datetime import datetime
from recognizers_text.utilities import RegExpUtility
from ..utilities import DateUtils, HolidayFunctions
from ..base_holiday import BaseHolidayParserConfiguration
from ...resources.spanish_date_time import SpanishDateTime
class SpanishHolidayParserConfiguration(BaseHolidayParserConfiguration):
@property
def holiday_names(self) -> Dict[str, List[str]]:
return self._holiday_names
@property
def holiday_regex_list(self) -> List[str]:
return self._holiday_regexes
@property
def holiday_func_dictionary(self) -> Dict[str, Callable[[int], datetime]]:
return self._holiday_func_dictionary
def __init__(self, config):
super().__init__()
self._holiday_regexes = [
RegExpUtility.get_safe_reg_exp(SpanishDateTime.HolidayRegex1),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.HolidayRegex2),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.HolidayRegex3)
]
self._holiday_names = SpanishDateTime.HolidayNames
self._variable_holidays_timex_dictionary = SpanishDateTime.VariableHolidaysTimexDictionary
self.next_prefix_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.NextPrefixRegex)
self.previous_prefix_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.PreviousPrefixRegex)
self.this_prefix_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.ThisPrefixRegex)
def _init_holiday_funcs(self) -> Dict[str, Callable[[int], datetime]]:
local = dict([
('padres', SpanishHolidayParserConfiguration.fathers_day),
('madres', SpanishHolidayParserConfiguration.mothers_day),
('acciondegracias', SpanishHolidayParserConfiguration.thanksgiving_day),
('trabajador', SpanishHolidayParserConfiguration.international_workers_day),
('delaraza', SpanishHolidayParserConfiguration.columbus_day),
('memoria', SpanishHolidayParserConfiguration.memorial_day),
('pascuas', SpanishHolidayParserConfiguration.easter_day),
('navidad', SpanishHolidayParserConfiguration.christmas_day),
('nochebuena', SpanishHolidayParserConfiguration.christmas_eve),
('añonuevo', SpanishHolidayParserConfiguration.new_year),
('nochevieja', SpanishHolidayParserConfiguration.new_year_eve),
('yuandan', SpanishHolidayParserConfiguration.new_year),
('maestro', SpanishHolidayParserConfiguration.teacher_day),
('todoslossantos', SpanishHolidayParserConfiguration.halloween_day),
('niño', SpanishHolidayParserConfiguration.children_day),
('mujer', SpanishHolidayParserConfiguration.female_day)
])
return {**super()._init_holiday_funcs(), **local}
@staticmethod
def new_year(year: int) -> datetime:
return datetime(year, 1, 1)
@staticmethod
def new_year_eve(year: int) -> datetime:
return datetime(year, 12, 31)
@staticmethod
def christmas_day(year: int) -> datetime:
return datetime(year, 12, 25)
@staticmethod
def christmas_eve(year: int) -> datetime:
return datetime(year, 12, 24)
@staticmethod
def female_day(year: int) -> datetime:
return datetime(year, 3, 8)
@staticmethod
def children_day(year: int) -> datetime:
return datetime(year, 6, 1)
@staticmethod
def halloween_day(year: int) -> datetime:
return datetime(year, 10, 31)
@staticmethod
def teacher_day(year: int) -> datetime:
return datetime(year, 9, 11)
@staticmethod
def easter_day(year: int) -> datetime:
return HolidayFunctions.calculate_holiday_by_easter(year)
def get_swift_year(self, text: str) -> int:
trimmed_text = text.strip().lower()
swift = -10
if self.next_prefix_regex.search(trimmed_text):
swift = 1
if self.previous_prefix_regex.search(trimmed_text):
swift = -1
if self.this_prefix_regex.search(trimmed_text):
swift = 0
return swift
def sanitize_holiday_token(self, holiday: str) -> str:
return holiday.replace(' ', '').replace('á', 'a').replace('é', 'e').replace('í', 'i').replace('ó', 'o').replace('ú', 'u')
| 38.2
| 129
| 0.689051
|
7c5566ecc73162c53c9ec893bd93dc1858e337e7
| 55,624
|
py
|
Python
|
uvtools/plot.py
|
LBJ-Wade/uvtools
|
b1bbe5fd8cff06354bed6ca4ab195bf82b8db976
|
[
"MIT"
] | null | null | null |
uvtools/plot.py
|
LBJ-Wade/uvtools
|
b1bbe5fd8cff06354bed6ca4ab195bf82b8db976
|
[
"MIT"
] | 122
|
2017-06-26T21:09:41.000Z
|
2022-03-29T17:36:09.000Z
|
uvtools/plot.py
|
LBJ-Wade/uvtools
|
b1bbe5fd8cff06354bed6ca4ab195bf82b8db976
|
[
"MIT"
] | 1
|
2018-01-27T06:58:54.000Z
|
2018-01-27T06:58:54.000Z
|
import numpy as np
import warnings
from astropy import units
from scipy.stats import binned_statistic_2d
from . import utils
def data_mode(data, mode='abs'):
"""
Apply filter to data according to a chosen plotting mode.
Parameters
----------
data : array_like
Array of data to be plotted (normally complex floats).
mode : str, optional
Which transformation to apply to the data. Options are:
- 'phs': Phase angle.
- 'abs': Absolute value.
- 'real': Real value.
- 'imag': Imaginary value.
- 'log': Log (base-10) of absolute value.
Default: 'abs'.
Returns
-------
data : array_like
Data transformed according to the value of `mode`.
"""
if mode.startswith('phs'):
data = np.angle(data)
elif mode.startswith('abs'):
data = np.absolute(data)
elif mode.startswith('real'):
data = data.real
elif mode.startswith('imag'):
data = data.imag
elif mode.startswith('log'):
data = np.absolute(data)
data = np.log10(data)
else:
raise ValueError('Unrecognized plot mode.')
return data
def waterfall(d, mode='log', vmin=None, vmax=None, drng=None, mx=None,
recenter=False, **kwargs):
"""
Generate a 2D waterfall plot.
Parameters
----------
d : array_like
2D array of data.
mode : str, optional
Which transformation to apply to the data before plotting. Options are:
- 'phs': Phase angle.
- 'abs': Absolute value.
- 'real': Real value.
- 'imag': Imaginary value.
- 'log': Log (base-10) of absolute value.
Default: 'log'.
vmin, vmax : float, optional
Minimum and maximum values of the color scale. If not set (and `mx` and
`drng` are not set), the min. and max. values of the data will be used.
Note that that the min. and max. values are the ones _after_ the data
have been transformed according to `mode`. So, if `mode='log'`, these
values are the min. and max. of log_10(data).
mx : float, optional
The max. value of the color scale in the plot (equivalent to vmax).
Cannot be specified at the same time as `vmin` and `vmax`.
drng : float, optional
The difference between the min. and max. values of the color scale in
the plot, `drng = mx - min`, where these are the min/max values after
applying the transformation specified by `mode`.
Cannot be specified at the same time as `vmin` and `vmax`.
recenter : bool, optional
Recenter the image data by shifting by 50% around a circle (useful for
recentering Fourier-transformed data). Default: False.
Returns
-------
plot : matplotlib.imshow
Waterfall plot.
"""
# import matplotlib
import pylab as plt
# Check validity of inputs
validity_msg = "Must specify either `vmin` and `vmax` *or* `mx` and `drng`."
if mx is not None or drng is not None:
assert vmin is None and vmax is None, validity_msg
if vmin is not None or vmax is not None:
assert mx is None and drng is None, validity_msg
mx = vmax
drng = vmax - vmin
# Fill masked array and recenter if requested
if np.ma.isMaskedArray(d):
d = d.filled(0)
if recenter:
import aipy
d = aipy.img.recenter(d, np.array(d.shape)/2)
# Apply requested transform to data
d = data_mode(d, mode=mode)
# Get min/max values for color scale
if mx is None:
mx = d.max()
if drng is None:
drng = mx - d.min()
mn = mx - drng
# Choose aspect ratio
if 'aspect' not in kwargs.keys():
kwargs['aspect'] = 'auto'
return plt.imshow(d, vmax=mx, vmin=mn, interpolation='nearest', **kwargs)
def plot_antpos(antpos, ants=None, xants=None, aspect_equal=True,
ant_numbers=True):
"""
Plot antenna x,y positions from a dictionary of antenna positions.
Parameters
----------
antpos : dict
Dictionary of antenna positions
ants : list, optional
A list of which antennas to plot. If not specified, all of the antennas
in `antpos` will be plotted. Default: None.
xants : list, optional
List of antennas to exclude from the plot. Default: None.
aspect_equal : bool, optional
Whether to make the width and height of the plot equal.
Default: True.
ant_numbers : bool, optional
Whether to add the antenna numbers to the plot.
Default: True
Returns
-------
plot : matplotlib.Axes
Plot of antenna x,y positions.
"""
import pylab as plt
if ants is None:
ants = antpos.keys()
if xants is not None:
ants = [ant for ant in ants if ant not in xants]
xs = [antpos[ant][0] for ant in ants]
ys = [antpos[ant][1] for ant in ants]
# Plot the antenna positions with black circles
plt.figure()
plt.scatter(xs, ys, marker='.', color='k', s=3000)
# Add antenna numbers
if ant_numbers:
for i, ant in enumerate(ants):
plt.text(xs[i], ys[i], ant, color='w', va='center', ha='center')
# Axis labels
plt.xlabel('X-position (m)')
plt.ylabel('Y-position (m)')
# Aspect ratio
if aspect_equal:
plt.axis('equal')
ax = plt.gca()
return ax
def plot_phase_ratios(data, cmap='twilight'):
"""
Plot grid of waterfalls, each showing the phase of the product (V_1 V_2^*)
for bls 1 and 2.
Parameters
----------
data : dict
Nested dictionary of data; first key is baseline, second key is pol.
cmap : str, optional
Colormap to use for plots. Default: 'twilight'.
"""
import pylab as plt
bls = data.keys()
nbls = len(bls)
pol = data[bls[0]].keys()[0]
# Calculate no. rows and columns
nratios = (nbls * (nbls-1))/2
r = int(divmod(nratios,3)[0] + np.ceil(divmod(nratios,3)[1]/3.))
c = 3
# Construct list of blpairs
ncross = []
for k in range(nbls):
for i in range(k+1,nbls):
ncross.append((bls[k], bls[i]))
# Plot waterfall
fig = plt.figure(figsize=(16,12))
for i,k in enumerate(ncross):
ax = plt.subplot(r,c,i+1)
plt.title(str(k), color='magenta')
g = 1.0
waterfall(data[k[0]][pol]*np.conj(data[k[-1]][pol])*g,
mode='phs', cmap=cmap, mx=np.pi, drng=2*np.pi)
plt.grid(0)
if divmod(i,c)[-1] != 0:
ax.yaxis.set_visible(False)
if divmod(i,c)[0] != r-1:
ax.xaxis.set_visible(False)
cax = fig.add_axes([0.2, 0.06, 0.6, 0.01])
plt.colorbar(cax=cax, orientation='horizontal')
def omni_view(reds, vis, pol, integration=10, chan=500, norm=False,
cursor=True, save=None, colors=None, symbols=None, ex_ants=[],
title=''):
"""
Scatter plot of the real vs imaginary parts of all visibilities in a set
of redundant groups.
Parameters
----------
reds : list of lists
List of redundant baseline groups. Each group should be a list of
baselines, specified as an antenna-pair tuple.
vis : nested dict of array_like
Nested dictionary containing visibility data. The structure is defined
as: `vis[baseline][pol][integration, chan]`, where `integration` is
the index of a time sample and `chan` is the index of a frequency
channel.
pol : str
Which polarization to plot from the `vis` dict.
integration : int, optional
Which time integration to plot from the `vis` dict. Default: 10.
chan : int, optional
Which frequency channel to plot from the `vis` dict. Default: 500
norm : bool, optional
Whether to normalize each point by its absolute value. Default: False.
cursor : bool, optional
Whether to include an interactive cursor label in the plot.
Default: True
save : str, optional
If specified, the filename to save the plot to. Default: None
colors : list of str, optional
List of colors to cycle through.
Default: None (will use a default list).
symbols : list of str, optional
List of symbols to cycle through.
Default: None (will use a default list).
ex_ants : list, optional
List of antennas to skip plotting. Default: [].
title : str, optional
Figure title. Default: ''.
"""
import pylab as plt
# Set default values for colors and symbols
if not colors:
colors = ["#006BA4", "#FF7F0E", "#2CA02C", "#D61D28", "#9467BD",
"#8C564B", "#E377C2", "#7F7F7F", "#BCBD22", "#17BECF"]
if not symbols:
symbols = ["o", "v", "^", "<", ">", "*"]
points = []
sym = []
col = []
bl = []
ngps = len(reds)
if save:
plt.clf()
plt.cla()
# Loop over redundant groups
for i, gp in enumerate(reds):
c = colors[i%len(colors)]
s = symbols[i/len(colors)]
for r in gp:
if np.any([ant in r for ant in ex_ants]): continue
try:
points.append(vis[r][pol][integration,chan])
bl.append(r)
except(KeyError):
points.append(np.conj(vis[r[::-1]][pol][integration,chan]))
bl.append(r[::-1])
sym.append(s)
col.append(c)
points = np.array(points)
max_x = 0
max_y = 0
fig, ax = plt.subplots(nrows=1, ncols=1)
# Loop over points
for i, pt in enumerate(points):
if norm:
ax.scatter(pt.real/np.abs(pt), pt.imag/np.abs(pt), c=col[i],
marker=sym[i], s=50, label='{}'.format(bl[i]))
else:
ax.scatter(pt.real, pt.imag, c=col[i], marker=sym[i], s=50,
label='{}'.format(bl[i]))
if np.abs(pt.real) > max_x: max_x = np.abs(pt.real)
if np.abs(pt.imag) > max_y: max_y = np.abs(pt.imag)
plt.suptitle(title)
# Choose scale according to whether normalized
if norm:
plt.xlim(-1, 1)
plt.ylim(-1, 1)
else:
plt.xlim(-1.1 * max_x, 1.1 * max_x)
plt.ylim(-1.1 * max_y, 1.1 * max_y)
plt.ylabel('imag(V)')
plt.xlabel('real(V)')
if cursor:
from mpldatacursor import datacursor
datacursor(formatter='{label}'.format)
if save:
plt.savefig(save)
def omni_view_gif(filenames, name='omni_movie.gif'):
"""
Create a gif from a list of images. Uses the `imageio` library.
Parameters
----------
filenames : list
Ordered list of full paths to images that will be added to the
animation.
name : str, optional
Output filename for animation. Default: 'omni_movie.gif'.
"""
import imageio
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave(name, images)
def labeled_waterfall(
data,
antpairpol=None,
freqs=None,
times=None,
lsts=None,
time_or_lst="lst",
plot_units=None,
data_units="Jy",
mode="log",
set_title=True,
ax=None,
figsize=(10,7),
dpi=100,
aspect="auto",
fontsize=None,
draw_colorbar=True,
cmap="best",
vmin=None,
vmax=None,
dynamic_range=None,
fft_axis=None,
freq_taper=None,
freq_taper_kwargs=None,
time_taper=None,
time_taper_kwargs=None,
):
"""Make a waterfall plot with axes labeled.
Parameters
----------
data: array-like of complex, or :class:`pyuvdata.UVData` instance
Object containing visibility data. If an array is passed, then ``freqs``
and either ``times`` or ``lsts`` must be provided, and the array must
have shape (``lsts.size``, ``freqs.size``). Otherwise, an ``antpairpol``
key must be provided.
antpairpol: tuple
(ant1, ant2, pol) tuple specifying the baseline and polarization to
pull data for if ``data`` is a :class:`pyuvdata.UVData` instance. Ignored
if ``data`` is an array-like object.
freqs: array-like of float
Frequencies corresponding to the observed data, in Hz. Required if ``data``
is an array-like object; ignored otherwise.
times: array-like of float
Observation times, in JD. Required if ``data`` is an array-like object and
``lsts`` is not provided.
lsts: array-like of float
Observed LSTs, in radians. Required if ``data`` is an array-like object
and ``times`` is not provided.
time_or_lst: str, optional
Either "time" or "lst". Used to specify whether the time axis should be
in JD or LST. If ``data`` is an array-like object and only one of ``times``
or ``lsts`` is provided, then this parameter is ignored.
plot_units: dict, optional
Dictionary mapping axis dimension to plotting units. Keys must come from
("lst", "time", "freq", "fringe-rate", "delay"); values must have supported
conversion methods in ``astropy``. LST units may be specified either as
radian-equivalent units or day-equivalent units. Default is:
{
"lst": "hour",
"time": "day",
"freq": "MHz",
"fringe-rate": "mHz",
"delay": "ns"
}
data_units: str, optional
Units for the provided data. If ``data`` is a :class:`pyuvdata.UVData`
instance, then these units are pulled from the object, if they are defined
in it (accessed via the ``vis_units`` attribute). Default is to assume the
data units are in Jy.
mode: str, optional
Plotting mode to use; must be one of ("log", "phs", "abs", "real", "imag").
Default is "log", which plots the base-10 logarithm of the absolute value
of the data. See :func:`data_mode` documentation for details.
set_title: bool or str, optional
Whether to add a title to the subplot. Default is to add a title using the
provided ``antpairpol``. If a string is passed, then that string is used
to set the subplot title.
ax: :class:`plt.Axes` instance, optional
:class:`plt.Axes` object to use for plotting the waterfall. If not provided,
then a new :class:`plt.Figure` object and :class:`plt.Axes` instance is created.
figsize: tuple of int, optional
Length-2 tuple specifying figure dimensions in inches. Ignored if ``ax``
is provided.
dpi: int, optional
Dots per inch to be used in creating the figure. Ignored if ``ax`` is provided.
aspect: str or float, optional
Aspect ratio of the plot. Should be either "equal", "auto", or a number.
Default is to use "auto".
fontsize: float, optional
Font size for labels, in points.
draw_colorbar: bool, optional
Whether to draw a colorbar. Default is to draw a colorbar.
cmap: str or :class:`plt.cm.colors.Colormap` instance, optional
Colormap to use for plotting the waterfall. Default is to choose a colormap
appropriate for the plotting mode chosen ("twilight" for plotting phases,
and "inferno" otherwise).
vmin: float, optional
Minimum value to use for generating the colormap. This parameter is ignored
if ``mode=="phs"``. WARNING: specifying this parameter may result in the
colorscale not being mapped correctly if it is not specified in the same
units used to plot the data. Default is to calculate this parameter under
the hood.
vmax: float, optional
Maximum value to use for generating the colormap. This parameter is ignored
if ``mode=="phs"``. The same warning issued for the ``vmin`` parameter holds.
dynamic_range: float, optional
Number of orders of magnitude of dynamic range to plot. For example, setting
``dynamic_range=5`` limits the colorbar to range from the maximum value to
five orders of magnitude below the maximum. If ``mode=="phs"``, then this
parameter is ignored. If both ``vmin`` and ``vmax`` are provided, then this
parameter is ignored.
fft_axis: int or str, optional
Axis over which to perform a Fourier transform. May be specified with one
of three strings ("time", "freq", "both") or one of three integers (0, 1,
-1), where the integers map to the axes specified by the strings. Default
is to not perform a Fourier transform over any axis.
freq_taper: str, optional
Taper to use when performing a Fourier transform along the frequency axis.
Must be one of the tapers supported by :func:`dspec.gen_window`.
freq_taper_kwargs: dict, optional
Keyword arguments to use in generating the taper for the frequency axis.
time_taper: str, optional
Taper to use when performing a Fourier transform along the time axis.
Must be one of the tapers supported by :func:`dspec.gen_window`.
time_taper_kwargs: dict, optional
Keyword arguments to use in generating the taper for the time axis.
Returns
-------
fig: :class:`plt.Figure` instance
Figure containing the plot.
ax: :class:`plt.Axes` instance
Axes object the waterfall is drawn into.
"""
import matplotlib.pyplot as plt
# Validate parameters.
if time_or_lst not in ("time", "lst"):
raise ValueError("time_or_lst must be 'time' or 'lst'.")
if np.array(data).dtype != np.dtype('O'):
data = np.atleast_2d(data)
if not np.iscomplexobj(data):
raise TypeError("array-like data must consist of complex numbers.")
if data.ndim != 2 or (data.ndim == 2 and 1 in data.shape):
raise ValueError("array-like data must be 2-dimensional.")
if isinstance(data, np.ndarray):
if freqs is None or (times is None and lsts is None):
raise ValueError(
"freqs and either times or lsts must be provided for plotting an array."
)
if times is None:
time_or_lst = "lst"
times = lsts * units.rad.to("cycle") # For Fourier transform purposes
elif lsts is None:
time_or_lst = "time"
else:
try:
from pyuvdata import UVData
# In case UVData is installed and a non-UVData object was passed.
if type(data) is not UVData:
raise ImportError
except ImportError:
raise TypeError("data must either be an ndarray or UVData object.")
if antpairpol is None:
raise ValueError(
"You must provide an antpairpol key if data is a UVData object."
)
freqs = np.unique(data.freq_array)
times = np.unique(data.time_array)
lsts = np.unique(data.lst_array)
data_units = data.vis_units or data_units
data = data.get_data(antpairpol)
# Determine units to use for plotting.
provided_plot_units = plot_units or {}
if not isinstance(provided_plot_units, dict):
raise TypeError("plot_units must be provided as a dictionary.")
plot_units = {
"lst": "hour",
"time": "day",
"freq": "MHz",
"fringe-rate": "mHz",
"delay": "ns"
}
plot_units.update(provided_plot_units)
# Configure the plot axes using the desired units.
xvals = freqs * units.Hz.to(plot_units["freq"])
xlabel = f"Frequency [{plot_units['freq']}]"
if time_or_lst == "time":
yvals = (times - int(times[0])) * units.day.to(plot_units["time"])
ylabel = f"JD - {int(times[0]):d}"
if plot_units["time"] != "day":
ylabel += f" [{plot_units['time']}]"
else:
if units.rad.is_equivalent(plot_units["lst"]):
yvals = lsts * units.rad.to(plot_units["lst"])
else:
yvals = lsts * units.rad.to("cycle") * units.day.to(plot_units["lst"])
ylabel = f"LST [{plot_units['lst']}]"
# Do any requested Fourier transforms and update axis labels.
if fft_axis is not None:
scale_factor = 1 # To get the FFT data units right.
freq_taper_kwargs = freq_taper_kwargs or {}
time_taper_kwargs = time_taper_kwargs or {}
if fft_axis not in ("freq", "time", "both", -1, 0, 1):
raise ValueError("fft_axis not recognized.")
if type(fft_axis) is int:
fft_axis = ("time", "freq", "both")[fft_axis]
if fft_axis in ("freq", "both"):
scale_factor *= np.mean(np.diff(freqs)) # Hz
delays = utils.fourier_freqs(freqs) * units.s.to(plot_units["delay"])
data = utils.FFT(data, axis=1, taper=freq_taper, **freq_taper_kwargs)
xvals = delays
xlabel = f"Delay [{plot_units['delay']}]"
if fft_axis in ("time", "both"):
scale_factor *= np.mean(np.diff(times * units.day.to("s")))
fringe_rates = utils.fourier_freqs(times * units.day.to("s"))
fringe_rates *= units.Hz.to(plot_units["fringe-rate"])
data = utils.FFT(data, axis=0, taper=time_taper, **time_taper_kwargs)
yvals = fringe_rates
ylabel = f"Fringe Rate [{plot_units['fringe-rate']}]"
data *= scale_factor # Convert to correct units.
# Update data for plotting.
data = data_mode(data, mode=mode)
# Prepare colorbar parameters.
if mode == "phs":
cbar_label = "Phase [radians]"
# Allow custom setting of phase bounds.
vmin = vmin if vmin is not None else -np.pi
vmax = vmax if vmax is not None else np.pi
if cmap == "best":
if np.allclose((vmin, vmax), (-np.pi, np.pi)):
# Use a cyclic colormap for boundaries at the phase wrap.
cmap = "twilight"
elif np.isclose(0.5 * (vmin + vmax), 0, atol=0.01):
# Use diverging colormap if phase bounds are not at the
# phase wrap, but have mean nearly zero.
cmap = "RdBu"
else:
# In the case of weird phase bounds.
cmap = "viridis"
else:
if fft_axis == "freq":
base_label = r"$\tilde{V}(t,\tau)$"
unit_label = f"[{data_units} Hz]"
elif fft_axis == "time":
base_label = r"$\tilde{V}(f,\nu)$"
unit_label = f"[{data_units} s]"
elif fft_axis == "both":
base_label = r"$\tilde{V}(f,\tau)$"
unit_label = f"[{data_units} Hz s]"
else:
base_label = r"$V(t,\nu)$"
unit_label = f"[{data_units}]"
if mode == "abs":
cbar_label = f"|{base_label}| {unit_label}"
elif mode == "real":
cbar_label = r"$\mathfrak{Re}$" + f"({base_label}) {unit_label}"
elif mode == "imag":
cbar_label = r"$\mathfrak{Im}$" + f"({base_label}) {unit_label}"
else:
cbar_label = r"$\log_{10}$" + f"|{base_label}| {unit_label}"
if cmap == "best":
cmap = "inferno"
# Limit the dynamic range if desired.
if dynamic_range is not None:
if vmin is not None and vmax is not None:
# Normalization has already been set.
pass
elif vmin is not None:
if mode == "log":
vmax = vmin + dynamic_range
else:
vmax = vmin * 10 ** dynamic_range
else:
if vmax is None:
vmax = data.max()
if mode == "log":
vmin = vmax - dynamic_range
else:
vmin = vmax / 10 ** dynamic_range
else:
vmin = vmin if vmin is not None else data.min()
vmax = vmax if vmax is not None else data.max()
# Setup mappable for drawing colorbar.
norm = plt.cm.colors.Normalize(vmin=vmin, vmax=vmax)
scalar_map = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# Prepare the figure.
return_value = ax
if ax is None:
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
# Finish setup, then plot.
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.imshow(
data,
aspect=aspect,
cmap=cmap,
norm=norm,
extent=(xvals.min(), xvals.max(), yvals.max(), yvals.min()),
)
# Optionally draw a colorbar.
if draw_colorbar:
# Make colorbar edges pointy if data values exceed colorscale bounds.
if data.max() > vmax and data.min() < vmin:
extend = "both"
elif data.max() > vmax:
extend = "max"
elif data.min() < vmin:
extend = "min"
else:
extend = "neither"
cbar = fig.colorbar(mappable=scalar_map, ax=ax, extend=extend)
cbar.set_label(cbar_label, fontsize=fontsize)
# Optionally set a subplot title.
if set_title:
if isinstance(set_title, str):
ax.set_title(set_title, fontsize=fontsize)
elif antpairpol is not None:
ax.set_title(antpairpol, fontsize=fontsize)
else:
pass # Not enough information to make a title.
return fig, ax
def fourier_transform_waterfalls(
data,
antpairpol=None,
freqs=None,
times=None,
lsts=None,
time_or_lst="lst",
plot_units=None,
data_units="Jy",
mode="log",
set_title=True,
figsize=(14,10),
dpi=100,
aspect="auto",
fontsize=None,
cmap="best",
dynamic_range=None,
plot_limits=None,
freq_taper=None,
freq_taper_kwargs=None,
time_taper=None,
time_taper_kwargs=None,
):
"""
Plot a 2x2 grid of waterfalls showing all possible Fourier transforms.
Moving clockwise from the top-left, the plots are as follows:
time vs frequency
fringe-rate vs frequency
fringe-rate vs delay
time vs delay
Parameters
----------
data: array-like of complex, or :class:`pyuvdata.UVData` instance
Object containing visibility data. If an array is passed, then ``freqs``
and either ``times`` or ``lsts`` must be provided, and the array must
have shape (``lsts.size``, ``freqs.size``). Otherwise, an ``antpairpol``
key must be provided.
antpairpol: tuple
(ant1, ant2, pol) tuple specifying the baseline and polarization to
pull data for if ``data`` is a :class:`pyuvdata.UVData` instance. Ignored
if ``data`` is an array-like object.
freqs: array-like of float
Frequencies corresponding to the observed data, in Hz. Required if ``data``
is an array-like object; ignored otherwise.
times: array-like of float
Observation times, in JD. Required if ``data`` is an array-like object and
``lsts`` is not provided.
lsts: array-like of float
Observed LSTs, in radians. Required if ``data`` is an array-like object
and ``times`` is not provided.
time_or_lst: str, optional
Either "time" or "lst". Used to specify whether the time axis should be
in JD or LST. If ``data`` is an array-like object and only one of ``times``
or ``lsts`` is provided, then this parameter is ignored.
plot_units: dict, optional
Dictionary mapping axis dimension to plotting units. Keys must come from
("lst", "time", "freq", "fringe-rate", "delay"); values must have supported
conversion methods in ``astropy``. LST units may be specified either as
radian-equivalent units or day-equivalent units. Default is:
{
"lst": "hour",
"time": "day",
"freq": "MHz",
"fringe-rate": "mHz",
"delay": "ns"
}
data_units: str, optional
Units for the provided data. If ``data`` is a :class:`pyuvdata.UVData`
instance, then these units are pulled from the object, if they are defined
in it (accessed via the ``vis_units`` attribute). Default is to assume the
data units are in Jy.
mode: str, optional
Plotting mode to use; must be one of ("log", "phs", "abs", "real", "imag").
Default is "log", which plots the base-10 logarithm of the absolute value
of the data. See :func:`data_mode` documentation for details.
set_title: bool or str, optional
Whether to set a title for the figure. If a string is passed, then that
string is used for the figure title. Default is to use the provided
``antpairpol`` as the title.
figsize: tuple of float, optional
Size of the figure to be produced, in inches. Default is 14x10.
dpi: float, optional
Dots-per-inch of the figure. Default is 100.
aspect: float or str, optional
Aspect ratio to use for each subplot. Default is "auto".
fontsize: float, optional
Font size to use for plotting labels, in points.
cmap: str or :class:`plt.cm.colors.Colormap` instance
Color map to be used when drawing waterfalls. Default is to have the choice
be based on the data mode selected: if ``mode`` is "phs", then "twilight"
is used; otherwise, "inferno" is used.
dynamic_range: dict, optional
Dictionary mapping strings to number of orders-of-magnitude to restrict
the plot dynamic range to. Accepted strings are as follows:
("time", "freq", "delay", "fringe-rate"): specifying one of these
will limit the dynamic range for the associated row or column. For
example, passing {"time": 5} will limit the dynamic range of the left
column to five orders-of-magnitude, clipping values on the low-end.
Any length-2 combination of an entry from the following pairs:
("time", "fringe-rate"), ("freq", "delay")
This type of mapping will limit the dynamic range for a single plot
with axes specified by the pair specified. For example, passing
{("fringe-rate", "delay"): 5} will only limit the dynamic range for
the bottom-right plot.
plot_limits: dict, optional
Dictionary mapping strings to length-2 tuples. The keys designate the
dimension ("time", "freq", "fringe-rate", "delay") to crop, and the values
give the lower- and upper-bounds of the cropped region. For example, passing
{"delay": (-500, 500)} will crop the delay axis to only show delays between
-500 ns and +500 ns (assuming delays are plotted in ns). The values passed
must be in the same units as the plot units used; see the description of
the ``plot_units`` parameter for details on default units.
freq_taper: str, optional
Name of the taper to be applied along the frequency-axis when performing
Fourier transforms. Must be a taper supported by :func:`dspec.gen_window`.
Default is no taper (an implicit top-hat or boxcar).
freq_taper_kwargs: dict, optional
Keyword arguments to be used in generating the frequency taper.
time_taper: str, optional
Name of the taper to be applied along the time-axis when performing Fourier
transforms. Default is the same as for the frequency taper.
time_taper_kwargs: dict, optional
Keyword arguments to be used in generating the time taper.
Returns
-------
fig: :class:`plt.Figure` instance
Figure containing 2x2 grid of plots visualizing the data in the selected
mode for all possible Fourier transforms, with axis labels and colorbars.
"""
import matplotlib.pyplot as plt
# Convert potential None-types to empty dictionaries where needed.
dynamic_range = dynamic_range or {}
plot_limits = plot_limits or {}
# Figure setup
fig = plt.figure(figsize=figsize, dpi=dpi)
axes = fig.subplots(2,2)
transform_axes = (None, 0, 1, -1)
axes_dims = (
("freq", "time"),
("freq", "fringe-rate"),
("delay", "time"),
("delay", "fringe-rate")
)
# Make the plots.
for i, ax in enumerate(axes.ravel()):
# Determine any adjustments to be made to axes in plotting routine.
x_dim, y_dim = axes_dims[i]
possible_drng_keys = (x_dim, y_dim, (x_dim, y_dim), (y_dim, x_dim))
transform_axis = transform_axes[i]
limit_dynamic_range = list(
key in dynamic_range.keys()
for key in possible_drng_keys
)
if any(limit_dynamic_range):
drng = dynamic_range[possible_drng_keys[limit_dynamic_range.index(True)]]
else:
drng = None
# Adjust the plot boundaries if requested.
if x_dim in plot_limits:
ax.set_xlim(*plot_limits[x_dim])
if y_dim in plot_limits:
ax.set_ylim(*plot_limits[y_dim])
# Actually make the plot.
ax = labeled_waterfall(
data=data,
antpairpol=antpairpol,
freqs=freqs,
times=times,
lsts=lsts,
time_or_lst=time_or_lst,
plot_units=plot_units,
data_units=data_units,
mode=mode,
set_title=False,
ax=ax,
aspect=aspect,
fontsize=fontsize,
draw_colorbar=True,
cmap=cmap,
dynamic_range=drng,
fft_axis=transform_axis,
freq_taper=freq_taper,
freq_taper_kwargs=freq_taper_kwargs,
time_taper=time_taper,
time_taper_kwargs=time_taper_kwargs,
)[1]
# Set a figure title if desired.
if set_title:
if type(set_title) is bool:
set_title = antpairpol
if set_title is not None:
# Though complicated, this is the only way I can think of ensuring
# that the figure title is positioned reasonably and aesthetically.
axes = fig.get_axes()
uppermost_y = max(ax.get_position().y1 for ax in axes)
top_row = [
ax for ax in axes
if np.isclose(ax.get_position().y1, uppermost_y)
]
axes_widths = [
ax.get_position().x1 - ax.get_position().x0
for ax in top_row
]
colorbars = [
ax for ax, width in zip(top_row, axes_widths)
if not np.isclose(width, max(axes_widths))
]
plots = [ax for ax in top_row if ax not in colorbars]
# Find the visual horizontal center of the figure.
x1 = min(cbar.get_position().x1 for cbar in colorbars)
x2 = max(plot.get_position().x0 for plot in plots)
title_position = (0.5 * (x1 + x2), uppermost_y)
# Position the title at the apparent "top center" of the figure.
fig.text(
*title_position,
set_title,
ha="center",
va="bottom",
fontsize=fontsize
)
return fig
def plot_diff_waterfall(uvd1, uvd2, antpairpol, plot_type="all",
check_metadata=True, freq_taper=None,
freq_taper_kwargs=None, time_taper=None,
time_taper_kwargs=None):
"""Produce waterfall plot(s) of differenced visibilities.
Parameters
----------
uvd1, uvd2 : pyuvdata.UVData
UVData objects which store visibilities to be differenced and their
associated metadata. They should have the same number of frequencies,
same baselines, and same times as each other.
antpairpol : tuple
Tuple specifying which baseline and polarization to use to compare
visibility waterfalls. See pyuvdata.UVData.get_data method docstring
for information on accepted tuples.
plot_type : str, tuple of str, or list of str, optional
Which spaces to use for investigating differences. Available options
are as follows: time and frequency ('time_vs_freq'); time and delay
('time_vs_dly'); fringe rate and frequency ('fr_vs_freq'); fringe
rate and delay ('fr_vs_dly'). Default is to use all plot types.
check_metadata : bool, optional
Whether to check that the metadata for `uvd1` and `uvd2` match.
See ``utils.check_uvd_pair_metadata`` docstring for details on
how the metadata are compared. If `check_metadata` is set to
False, but the metadata don't agree, then the plotter may or
may not error out, depending on how the metadata disagree.
Default behavior is to check the metadata.
freq_taper : str, optional
Choice of tapering function to use along the frequency axis. Default
is to use no taper.
freq_taper_kwargs : dict, optional
Keyword arguments to be used with the taper for the frequency-axis.
These are ultimately passed to ``dspec.gen_window``. Default behavior
is to use an empty dictionary.
time_taper : str, optional
Choice of tapering function to use along the time axis. Default is to
use no taper.
time_taper_kwargs : dict, optional
Keyword arguments to be used with the taper for the time axis. These
are ultimately passed to ``dspec.gen_window``. Default behavior is to
use an empty dictionary.
Returns
-------
fig : matplotlib.figure.Figure
Figure object containing all of the desired plots.
"""
# check that metadata agrees, unless specified otherwise
if check_metadata:
utils.check_uvd_pair_metadata(uvd1, uvd2)
# get visibility data
vis1 = uvd1.get_data(antpairpol)
vis2 = uvd2.get_data(antpairpol)
# get important metadata
times = np.unique(uvd1.time_array) # days
lsts = np.unique(uvd1.lst_array) # radians
freqs = uvd1.freq_array[0] # choose 0th spectral window; Hz
# import astropy.units for conversion from days to seconds
import astropy.units as u
frs = utils.fourier_freqs(times * u.day.to('s')) # Hz
dlys = utils.fourier_freqs(freqs) # s
# make dictionary of plotting parameters; keys chosen for ease-of-use
plot_params = {"time" : lsts,
"freq" : freqs / 1e6, # MHz
"fr" : frs * 1e3, # mHz
"dly" : dlys * 1e9, # ns
}
# make some axis labels; use LST instead of time b/c time is clunky
labels = {"time" : "LST [radians]",
"freq" : "Frequency [MHz]",
"fr" : "Fringe Rate [mHz]",
"dly" : "Delay [ns]",
}
# convert taper kwargs to empty dictionaries if not specified
freq_taper_kwargs = freq_taper_kwargs or {}
time_taper_kwargs = time_taper_kwargs or {}
# map plot types to transforms needed
plot_types = {
"time_vs_freq" : lambda data : data, # do nothing
"time_vs_dly" : lambda data : utils.FFT(data, 1, freq_taper, **freq_taper_kwargs),
"fr_vs_freq" : lambda data : utils.FFT(data, 0, time_taper, **time_taper_kwargs),
"fr_vs_dly" : lambda data : utils.FFT(
utils.FFT(data, 0, time_taper, **time_taper_kwargs),
1, freq_taper, **freq_taper_kwargs
),
}
# convert plot type to tuple
if isinstance(plot_type, str):
plot_type = tuple(plot_types.keys()) if plot_type == "all" else (plot_type,)
# check that chosen plot type(s) OK
assert all([plot in plot_types.keys() for plot in plot_type]), \
"Please ensure the plot type chosen is supported. The supported " \
"types are : {types}".format(types=list(plot_types.keys()))
# now make a dictionary of the transformed visibilities
visibilities = {plot : {label : xform(vis)
for label, vis in zip(("vis1", "vis2"), (vis1, vis2))}
for plot, xform in plot_types.items()
if plot in plot_type} # but only use desired transforms
# import matplotlib, setup the figure
import matplotlib.pyplot as plt
figsize = (4 * 3, 3 * len(plot_type)) # (4,3) figsize for each plot
fig = plt.figure(figsize=figsize)
axes = fig.subplots(len(plot_type), 3)
axes = [axes,] if len(axes.shape) == 1 else axes # avoid bug for single row
axes[0][0].set_title("Amplitude Difference", fontsize=12)
axes[0][1].set_title("Phase Difference", fontsize=12)
axes[0][2].set_title("Amplitude of Complex Difference", fontsize=12)
# helper function for getting the extent of axes
extent = lambda xvals, yvals : (xvals[0], xvals[-1], yvals[-1], yvals[0])
# loop over items in visibilities and plot them
for i, item in enumerate(visibilities.items()):
# extract visibilities, get diffs
visA, visB = item[1].values()
diffs = (utils.diff(visA, visB, 'abs'),
utils.diff(visA, visB, 'phs'),
utils.diff(visA, visB, 'complex'))
# extract parameters
ykey, xkey = item[0].split("_vs_") # keys for choosing parameters
xvals, yvals = plot_params[xkey], plot_params[ykey]
# get labels
xlabel, ylabel = labels[xkey], labels[ykey]
# plot stuff
for ax, diff in zip(axes[i], diffs):
# set labels
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
# plot waterfall and add a colorbar
fig.sca(ax)
cax = waterfall(diff, mode="real", cmap='viridis',
extent=extent(xvals, yvals))
fig.colorbar(cax)
return fig
def plot_diff_uv(uvd1, uvd2, pol=None, check_metadata=True, bins=50):
"""Summary plot for difference between visibilities.
This function produces three plots which summarize the differences
between the data arrays in `uvd1` and `uvd2`. Each summary plot is
shown in a regridded uv-plane, with a resolution set by the `bins`
parameter. This function uses ``scipy.stats.binned_statistic_2d``
to perform a complex average in the uv-plane for each visibility
array before performing any further operations. After taking the
complex average in the uv-plane, the following plots are produced:
first, the difference of the amplitudes of each array; second, the
difference of the phases of each array; third, the amplitude of the
complex difference of the visibility arrays.
Parameters
----------
uvd1, uvd2 : pyuvdata.UVData
Input UVData objects which contain the visibilities to be differenced
and any relevant metadata.
pol : str, None, optional
String specifying which polarization to be used. Must be one of the
polarizations listed in the UVData.get_pols() method for both
`uvd1` and `uvd2`. Default is to use the 0th polarization.
check_metadata : bool, optional
Whether to check that the metadata for `uvd1` and `uvd2` match.
See ``utils.check_uvd_pair_metadata`` docstring for details on
how the metadata are compared. If `check_metadata` is set to
False, but the metadata don't agree, then the plotter may or
may not error out, depending on how the metadata disagree.
Default behavior is to check the metadata.
bins : int, optional
Number of bins to use for regridding the u and v arrays.
"""
# check the metadata unless instructed otherwise
if check_metadata:
utils.check_uvd_pair_metadata(uvd1, uvd2)
# convert polarization to index
pol = 0 if pol is None else uvd1.get_pols().index(pol)
# load in relevant metadata
bl_vecs = uvd1.uvw_array
freqs = uvd1.freq_array[0]
# import astropy constants to convert freq to wavelength
from astropy.constants import c
wavelengths = c.value / freqs
# get uvw vectors; shape = (Nfreq, Nblts, 3)
uvw_vecs = np.array([bl_vecs / wavelength for wavelength in wavelengths])
# reshape uvw vectors to (Nblts, Nfreq, 3)
uvw_vecs = np.swapaxes(uvw_vecs, 0, 1)
# get the u and v arrays, flattened
uvals, vvals = uvw_vecs[:,:,0].flatten(), uvw_vecs[:,:,1].flatten()
# get the regridded u and v arrays' bin edges
u_regrid = np.linspace(uvals.min(), uvals.max(), bins+1)
v_regrid = np.linspace(vvals.min(), vvals.max(), bins+1)
# make an alias for regridding an array and taking the complex mean
# this also takes the transpose so that axis0 is along the v-axis
bin_2d = lambda arr : binned_statistic_2d(
uvals, vvals, arr, statistic='mean',
bins=[u_regrid, v_regrid])[0].T
# regrid the visibilities
# need to do real/imag separately or information is lost
vis1 = uvd1.data_array[:,0,:,pol].flatten()
vis2 = uvd2.data_array[:,0,:,pol].flatten()
vis1 = bin_2d(vis1.real) + 1j*bin_2d(vis1.imag)
vis2 = bin_2d(vis2.real) + 1j*bin_2d(vis2.imag)
# calculate differences of amplitudes and phases as masked arrays
absdiff_ma = utils.diff(vis1, vis2, "abs")
phsdiff_ma = utils.diff(vis1, vis2, "phs")
cabsdiff_ma = utils.diff(vis1, vis2, "complex")
# make the arrays into proper masked arrays
mask = lambda arr : np.ma.MaskedArray(arr, np.isnan(arr))
absdiff_ma = mask(absdiff_ma)
phsdiff_ma = mask(phsdiff_ma)
cabsdiff_ma = mask(cabsdiff_ma)
# remove nans so that the data can actually be normalized
unnan = lambda arr : arr[np.where(np.logical_not(np.isnan(arr)))]
absdiff = unnan(absdiff_ma)
phsdiff = unnan(phsdiff_ma)
cabsdiff = unnan(cabsdiff_ma)
# import matplotlib to set things up and make the plot
import matplotlib.pyplot as plt
# get norms for generating colormaps for difference arrays
absnorm = plt.cm.colors.SymLogNorm(0.1, vmin=absdiff.min(), vmax=absdiff.max())
phsnorm = plt.cm.colors.Normalize(vmin=phsdiff.min(), vmax=phsdiff.max())
cabsnorm = plt.cm.colors.LogNorm(vmin=cabsdiff.min(), vmax=cabsdiff.max())
# setup the figure
fig = plt.figure(figsize=(15,4.5))
axes = fig.subplots(1,3)
# add labels
for ax, label in zip(axes, ("Amplitude", "Phase", "Amplitude of Complex")):
ax.set_xlabel(r'$u$', fontsize=12)
ax.set_ylabel(r'$v$', fontsize=12)
ax.set_title(" ".join([label, "Difference"]), fontsize=12)
extent = (uvals.min(), uvals.max(), vvals.max(), vvals.min())
plot_iterable = zip(axes,
(absdiff_ma, phsdiff_ma, cabsdiff_ma),
(absnorm, phsnorm, cabsnorm))
for ax, diff, norm in plot_iterable:
cax = ax.imshow(diff, norm=norm, aspect="auto",
cmap='viridis', extent=extent)
fig.sca(ax)
fig.colorbar(cax)
return fig
def plot_diff_1d(uvd1, uvd2, antpairpol, plot_type="both",
check_metadata=True, dimension=None,
taper=None, taper_kwargs=None,
average_mode=None, **kwargs):
"""Produce plots of visibility differences along a single axis.
Parameters
----------
uvd1, uvd2 : pyuvdata.UVData
UVData objects which store visibilities to be differenced and their
associated metadata. They should have the same number of frequencies,
same baselines, and same times as each other.
antpairpol : tuple
Tuple specifying which baseline and polarization to use to compare
visibility waterfalls. See pyuvdata.UVData.get_data method docstring
for information on accepted tuples.
plot_type : str, optional
A string identifying which quantities to plot. Accepted values are
as follows:
- normal
- Single row of plots in the usual basis (time or frequency).
- fourier
- Single row of plots in Fourier space (fringe rate or delay).
- both
- Two rows of plots in the usual and Fourier domains.
Default behavior is to use the 'both' setting.
check_metadata : bool, optional
Whether to check that the metadata for `uvd1` and `uvd2` match.
See ``utils.check_uvd_pair_metadata`` docstring for details on
how the metadata are compared. If `check_metadata` is set to
False, but the metadata don't agree, then the plotter may or
may not error out, depending on how the metadata disagree.
Default behavior is to check the metadata.
dimension : str, optional
String specifying which dimension is used for the normal domain. This
may be either 'time' or 'freq'. Default is to determine which axis has
more entries and to use that axis.
taper : str, optional
Sting specifying which taper to use; must be a taper supported by
``dspec.gen_window``. Default is to use no taper.
taper_kwargs : dict, optional
Dictionary of keyword arguments and their values, passed downstream to
``dspec.gen_window``. Default is to use an empty dictionary (i.e.
default parameter values for whatever window is generated).
average_mode : str, optional
String specifying which ``numpy`` averaging function to use. Default
behavior is to use ``np.mean``.
**kwargs
These are passed directly to the averaging function used. Refer to
the documentation of the averaging function you want to use for
information regarding what parameters may be specified here.
Returns
-------
fig : matplotlib.pyplot.Figure
Figure object containing the plots. The plots have their axes and
titles automatically set depending on what quantities are being
plotted.
Notes
-----
This function extracts the visibility waterfall corresponding to the
provided antpairpol and flattens it by taking the average along the axis
not being used. The averaging function used may be specified with the
`average_mode` parameter, and weights (or optional parameters to be
passed to the averaging function) may be specified in the variable
keyword parameter `kwargs`. Any flags relevant for the data are
currently ignored, but this functionality may be introduced in a future
update.
"""
if check_metadata:
utils.check_uvd_pair_metadata(uvd1, uvd2)
if plot_type not in ("normal", "fourier", "both"):
raise ValueError(
"You must specify whether to make one or two plots with "
"the ``plot_type`` parameter. You may choose to plot the "
"visibility difference as a function of frequency/time by "
"setting ``plot_type`` to 'normal', or you can choose to "
"plot the difference in Fourier space by setting "
"``plot_type`` to 'fourier'. If you would like to plot both, "
"then set ``plot_type`` to 'both'."
)
dimensions_to_duals = {"time" : "fr", "freq" : "dly"}
if dimension is None:
dimension = "time" if uvd1.Ntimes > uvd1.Nfreqs else "freq"
if uvd1.Ntimes == uvd1.Nfreqs:
warnings.warn(
"The UVData objects passed have the same number of " \
"times as they do frequencies. You did not specify " \
"which dimension to use, so the difference plots " \
"will be made along the time axis."
)
if dimension not in ("time", "freq"):
raise ValueError(
"You must specify whether the visibilities are a function "
"of time or frequency by setting the ``dimension`` "
"parameter to 'time' or 'freq', respectively."
)
dual = dimensions_to_duals[dimension]
use_axis = 0 if dimension == "time" else 1
proj_axis = (use_axis + 1) % 2
# choose an averaging function
if average_mode is not None:
try:
average = getattr(np, average_mode)
except AttributeError as err:
err_msg = err.args[0] + "\nDefaulting to using np.mean"
warnings.warn(err_msg)
average = np.mean
else:
average = np.mean
# get visibility data
vis1 = average(uvd1.get_data(antpairpol), axis=proj_axis, **kwargs)
vis2 = average(uvd2.get_data(antpairpol), axis=proj_axis, **kwargs)
# use same approach as in plot_diff_waterfall
# get important metadata
times = np.unique(uvd1.time_array) # days
lsts = np.unique(uvd1.lst_array) # radians
freqs = np.unique(uvd1.freq_array) # Hz
# import astropy for unit conversions
import astropy.units as u
frs = utils.fourier_freqs(times * u.day.to('s')) # Hz
dlys = utils.fourier_freqs(freqs) # s
# make dictionary of plotting parameters
plot_params = {"time" : lsts, # radians
"freq" : freqs / 1e6, # MHz
"fr" : frs * 1e3, # mHz
"dly" : dlys * 1e9 # ns
}
# now do the same for abscissa labels
labels = {"time" : "LST [radians]",
"freq" : "Frequency [MHz]",
"fr" : "Fringe Rate [mHz]",
"dly" : "Delay [ns]"
}
# and now for ordinate labels
vis_labels = {"time" : r"$V(t)$ [{vis_units}]",
"freq" : r"$V(\nu)$ [{vis_units}]",
"fr" : r"$\tilde{V}(f)$ [{vis_units}$\cdot$s]",
"dly" : r"$\tilde{V}(\tau)$ [{vis_units}$\cdot$Hz]"
}
# make sure the taper kwargs are a dictionary
taper_kwargs = taper_kwargs or {}
# make some mappings for plot types
plot_types = {dimension : lambda data : data, # no fft
dual : lambda data : utils.FFT(data, 0, taper, **taper_kwargs)
}
# update the plot_type parameter to something useful
if plot_type == "normal":
plot_type = (dimension,)
elif plot_type == "fourier":
plot_type = (dual,)
else:
plot_type = (dimension, dual)
# make a dictionary of visibilities to plot
visibilities = {
plot : [xform(vis) for vis in (vis1, vis2)]
for plot, xform in plot_types.items()
if plot in plot_type
}
# XXX make a helper function for this
# now setup the figure
import matplotlib.pyplot as plt
figsize = (4 * 3, 3 * len(plot_type))
fig = plt.figure(figsize=figsize)
axes = fig.subplots(len(plot_type), 3)
axes = [axes,] if axes.ndim == 1 else axes
axes[0][0].set_title("Amplitude Difference", fontsize=12)
axes[0][1].set_title("Phase Difference", fontsize=12)
axes[0][2].set_title("Amplitude of Complex Difference", fontsize=12)
# plot the visibilities
for i, item in enumerate(visibilities.items()):
# get the differences
visA, visB = item[1]
diffs = (
utils.diff(visA, visB, 'abs'),
utils.diff(visA, visB, 'phs'),
utils.diff(visA, visB, 'complex')
)
xdim = item[0]
xlabel = labels[xdim]
# to ensure appropriate LaTeX formatting and visibility units
ylabel = vis_labels[xdim].format(V="{V}", vis_units=uvd1.vis_units)
# actually plot it
for ax, diff in zip(axes[i], diffs):
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
ax.plot(plot_params[xdim], diff, marker='o', color='k', lw=0)
return fig
| 38.203297
| 90
| 0.612469
|
5e19a61cae76534b81a3cca26eaff1acc4152739
| 798
|
py
|
Python
|
hotword/run_with_gui.py
|
WqVoon/PythonHomework
|
ed72b36101beb01065d8d46056fa1b9bdf78bc49
|
[
"MIT"
] | null | null | null |
hotword/run_with_gui.py
|
WqVoon/PythonHomework
|
ed72b36101beb01065d8d46056fa1b9bdf78bc49
|
[
"MIT"
] | 1
|
2021-01-08T12:39:19.000Z
|
2021-01-08T12:39:19.000Z
|
hotword/run_with_gui.py
|
WqVoon/PythonHomework
|
ed72b36101beb01065d8d46056fa1b9bdf78bc49
|
[
"MIT"
] | null | null | null |
from backend.cp import ContentProvider as CP
from frontend.ui import ClsChooser, FormChooser
from frontend.drawers import Drawers
from utils import Loger, SUPPORTED_KEYS
import tkinter as tk
Loger.is_debug_mode = True
def gen_btn_action(c, f):
"""
利用闭包生成一个绑定了 ClsChooser 和 FormChooser 的动作函数
"""
def inner_func():
cls = c.get_value()
form = f.get_value()
try:
CP.get_info(cls)
Drawers[form](CP.get_topn(10)).draw()
except Exception as err:
pass
return inner_func
top = tk.Tk()
clss = ClsChooser(top)
forms = FormChooser(top)
btn = tk.Button(top, text="查询", command=gen_btn_action(clss, forms))
clss.grid(sticky='w')
forms.grid(sticky='w')
btn.grid()
top.title("新闻热词分析")
top.resizable(False, False)
top.protocol('WM_DELETE_WINDOW', lambda: top.destroy())
top.mainloop()
| 20.461538
| 68
| 0.733083
|
bfa0b22cbb67268874cfb57d2601df22620b5611
| 122
|
py
|
Python
|
src/tictactoe/urls.py
|
kardandon/ChatAPP_websockets
|
fe1e567253774332b12527c73f4c51785d4c67a0
|
[
"MIT"
] | null | null | null |
src/tictactoe/urls.py
|
kardandon/ChatAPP_websockets
|
fe1e567253774332b12527c73f4c51785d4c67a0
|
[
"MIT"
] | null | null | null |
src/tictactoe/urls.py
|
kardandon/ChatAPP_websockets
|
fe1e567253774332b12527c73f4c51785d4c67a0
|
[
"MIT"
] | null | null | null |
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='tictactoe'),
]
| 17.428571
| 44
| 0.688525
|
12bbbfe679a33bed6131ca53a2fe16c1858b50b7
| 311
|
py
|
Python
|
data/multilingual/Cyrl.TYV/Sun-ExtA_16/pdf_to_json_test_Cyrl.TYV_Sun-ExtA_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Cyrl.TYV/Sun-ExtA_16/pdf_to_json_test_Cyrl.TYV_Sun-ExtA_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Cyrl.TYV/Sun-ExtA_16/pdf_to_json_test_Cyrl.TYV_Sun-ExtA_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Cyrl.TYV/Sun-ExtA_16/udhr_Cyrl.TYV_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 31.1
| 81
| 0.810289
|
aa5fb2d41e9f21155d2004e873f019d01776ea4c
| 7,167
|
py
|
Python
|
yum/cli.py
|
HalfDeadPie/Yum4FIT
|
e47ae12f9da037c5c56277ae3e58d8d29b9692d0
|
[
"MIT"
] | 1
|
2018-02-12T15:48:59.000Z
|
2018-02-12T15:48:59.000Z
|
yum/cli.py
|
HalfDeadPie/Yum4FIT
|
e47ae12f9da037c5c56277ae3e58d8d29b9692d0
|
[
"MIT"
] | 1
|
2018-02-11T09:36:21.000Z
|
2018-02-11T09:36:21.000Z
|
yum/cli.py
|
HalfDeadPie/Yum4FIT
|
e47ae12f9da037c5c56277ae3e58d8d29b9692d0
|
[
"MIT"
] | null | null | null |
import click
import yum.Parser as Parser
import yum.account as acc
import yum.state as state
import yum.yum as yum
from yum import connector, gui
from flask import json
import yum.CONSTANTS as CONST
@click.group('yum')
@click.option('--config', '-c', default='config.cfg',
help='Path of the auth config file.')
@click.option('--username', '-u', envvar='IG_USERNAME',
help='Instagram username')
@click.option('--password', '-p', envvar='IG_PASSWORD',
help='Instagram password')
@click.pass_context
def cli(ctx, config, username, password):
ctx.obj['config'] = config
ctx.obj['username'] = username
ctx.obj['password'] = password
ctx.obj['recipe_file'] = CONST.RECIPE_FILE
ctx.obj['save_file'] = CONST.SAVE_FILE
ctx.obj['friends_file'] = CONST.FRIENDS_FILE
ctx.obj['server_url'] = Parser.get(config,'server','url')
@cli.command()
@click.pass_context
@click.option('--caption', '-c', default='Check my Yum4FIT food!',
help='Caption for Instagram picture')
@click.argument('path')
def share(ctx, path, caption):
"""
Share photo on instagram
:param path: Path to the picture
:param caption: Caption of picture on instagram
"""
config = ctx.obj['config']
username = Parser.getUsername(ctx.obj['username'], config)
password = Parser.getPassword(ctx.obj['password'], config)
hashtag = Parser.get(config, 'instagram', 'hashtag')
serverURL = ctx.obj['server_url']
acc.upload(username, password, path, caption +" "+ hashtag, serverURL)
@cli.command()
@click.option('--id', '-i', help='XP Gain from the photo with ID')
@click.pass_context
def gain(ctx, id):
"""
Gain XP from Yum4FIT pictures on Instagram
:param id: Returns the XP gain only from the picture with the ID set as an argument
:return: XP Gain
"""
config = ctx.obj['config']
username = Parser.getUsername(ctx.obj['username'], config)
password = Parser.getPassword(ctx.obj['password'], config)
hashtag = Parser.get(config, 'instagram', 'hashtag')
friendsFile = ctx.obj['friends_file']
serverURL = ctx.obj['server_url']
setConfirmed(password, serverURL)
result = acc.gain(username, password, hashtag, id, friendsFile)
if not id:
data = { 'likes' : str(result.likes),
'level' : str(result.level),
'xp' : str(result.xp )
}
json_data = json.dumps(data)
url = ctx.obj['server_url']
url += 'gain'
connector.post(url, json_data, password)
# result is state
if(id == None):
click.echo( result.text() )
# result is likes count
else:
click.echo("%s: " % id + "%d likes" % result + " (%s XP)" % (result * CONST.XP_FACTOR))
@cli.command()
@click.pass_context
def food(ctx):
"""
Print all the Yum4FIT food on your instagram
:param ctx:
:return:
"""
config = ctx.obj['config']
username = Parser.getUsername(ctx.obj['username'], config)
password = Parser.getPassword(ctx.obj['password'], config)
hashtag = Parser.get(config, 'instagram', 'hashtag')
food = acc.getAllFood(username, password, hashtag)
for actual in food:
print(actual)
@cli.command()
@click.pass_context
@click.argument('username')
@click.argument('id')
def add_friend(ctx, username, id):
data = {"username": username, "post": id}
json_data = json.dumps(data)
url = ctx.obj['server_url']
url += 'addfriend'
Parser.updateSection(CONST.FRIENDS_FILE, username, id, 'no')
config = ctx.obj['config']
password = Parser.getPassword(ctx.obj['password'], config)
connector.post(url, json_data, password)
@cli.command()
@click.pass_context
@click.option('--diet', '-d', help='The diet-allowed recipe. Supported diets are lacto vegatarian, '
'ovo vegetarian, pescetarian, vegan and lacto-ovo vegetarian')
@click.option('--ingredient', '-i', help='Find the recipe with the ingredient')
@click.option('--allergy', '-a', help='The allergy-allowed recipe. Supported allergies are dairy, egg, gluten, peanut, seafood, sesame, soy, '
'sulfite, tree nut and wheat')
@click.option('--cuisine', '-c', help='Allow the recipes from national cuisines. Supported cuisines are American, Italian, Asian, Mexican, '
'Southern & Soul Food, French, Southwestern, Barbecue, Indian, Chinese, Cajun & Creole, English, '
'Mediterranean, Greek, Spanish, German, Thai, Moroccan, Irish, Japanese, Cuban, Hawaiin, Swedish, '
'Hungarian, Portugese')
@click.option('--holiday', '-h', help='Find the holiday recipes')
@click.option('--exclude-holiday', '-eh', help='Exclude the recipes from the holidays')
@click.option('--exclude-cuisine', '-ec', help='Exclude the recipes from national cuisines. Supported cuisines are the same like in --cuisine option')
@click.option('--exclude-ingredient', '-ei', help='Exclude the recipes from igredients. Supported cuisines are the same like in --cuisine option')
@click.option('--phrase', '-p', help='Find the recipe matching the phrase')
@click.option('--file','-f', is_flag=True)
def recipe(ctx, diet, allergy, cuisine, exclude_cuisine, ingredient, exclude_ingredient, holiday, exclude_holiday, phrase, file):
"""
Generate recipes with difficulty computed from your level
:param diet: Diets
:param allergy: Allergies
:param cuisine: National cuissines
:param exclude_cuisine: Excluding national cuisines
:param ingredient: Ingredients
:param exclude_ingredient: Excluding ingredients
:param holiday: Holiday recipes
:param exclude_holiday: Excluding holiday recipes
:param phrase: Name of the recipe
:param file: Config file
"""
config = ctx.obj['config']
api_id = Parser.get(config, 'yummly', 'api-id')
api_key = Parser.get(config, 'yummly', 'api-key')
server_url = Parser.get(config,'server','url')
yummer = yum.Yum(api_id, api_key, server_url)
result = yummer.recipe(diet, allergy,
cuisine, exclude_cuisine,
ingredient, exclude_ingredient,
holiday, exclude_holiday,
phrase,
file, config)
if result:
print(result.text())
@cli.command()
@click.pass_context
def run(ctx):
gui.GUI( ctx.obj['config'] )
def setConfirmed(password, url):
response = connector.post(url+'confirmated', "", password)
parser = Parser.parse(CONST.FRIENDS_FILE)
friends = parser.sections()
if not response:
return
data = json.loads(response.text)
sections = []
fields = []
for local in friends:
for new in data:
extern = new.split('--')[0]
if extern == local:
sections.append(local)
fields.append(data[new])
for s in sections:
for f in fields:
Parser.updateSection(CONST.FRIENDS_FILE, s, f, 'yes')
if __name__ == '__main__':
cli(obj={})
| 38.12234
| 150
| 0.635831
|
ffa793eb9ed0928c0463de593473b42788cec054
| 3,646
|
py
|
Python
|
tools/python/boutiques/puller.py
|
joeyzhou98/boutiques
|
2ec2e9f2df61c63a64330bb0786f5b3ba90352fe
|
[
"MIT"
] | 2
|
2016-11-01T15:08:37.000Z
|
2018-09-07T20:56:43.000Z
|
tools/python/boutiques/puller.py
|
joeyzhou98/boutiques
|
2ec2e9f2df61c63a64330bb0786f5b3ba90352fe
|
[
"MIT"
] | null | null | null |
tools/python/boutiques/puller.py
|
joeyzhou98/boutiques
|
2ec2e9f2df61c63a64330bb0786f5b3ba90352fe
|
[
"MIT"
] | 1
|
2018-03-20T15:51:00.000Z
|
2018-03-20T15:51:00.000Z
|
import requests
import urllib
import os
from boutiques.logger import raise_error, print_info
from boutiques.searcher import Searcher
from boutiques.zenodoHelper import ZenodoError
try:
# Python 3
from urllib.request import urlopen
from urllib.request import urlretrieve
except ImportError:
# Python 2
from urllib2 import urlopen
from urllib import urlretrieve
class Puller():
def __init__(self, zids, verbose=False, sandbox=False):
# remove zenodo prefix
self.zenodo_entries = []
self.cache_dir = os.path.join(os.path.expanduser('~'), ".cache",
"boutiques")
discarded_zids = zids
# This removes duplicates, should maintain order
zids = list(dict.fromkeys(zids))
for zid in zids:
discarded_zids.remove(zid)
try:
# Zenodo returns the full DOI, but for the purposes of
# Boutiques we just use the Zenodo-specific portion (as its the
# unique part). If the API updates on Zenodo to no longer
# provide the full DOI, this still works because it just grabs
# the last thing after the split.
zid = zid.split('/')[-1]
newzid = zid.split(".", 1)[1]
newfname = os.path.join(self.cache_dir,
"zenodo-{0}.json".format(newzid))
self.zenodo_entries.append({"zid": newzid, "fname": newfname})
except IndexError:
raise_error(ZenodoError, "Zenodo ID must be prefixed by "
"'zenodo', e.g. zenodo.123456")
self.verbose = verbose
self.sandbox = sandbox
if(self.verbose):
for zid in discarded_zids:
print_info("Discarded duplicate id {0}".format(zid))
def pull(self):
# return cached file if it exists
json_files = []
for entry in self.zenodo_entries:
if os.path.isfile(entry["fname"]):
if(self.verbose):
print_info("Found cached file at %s"
% entry["fname"])
json_files.append(entry["fname"])
continue
searcher = Searcher(entry["zid"], self.verbose, self.sandbox,
exact_match=True)
r = searcher.zenodo_search()
if not len(r.json()["hits"]["hits"]):
raise_error(ZenodoError, "Descriptor \"{0}\" "
"not found".format(entry["zid"]))
for hit in r.json()["hits"]["hits"]:
file_path = hit["files"][0]["links"]["self"]
file_name = file_path.split(os.sep)[-1]
if hit["id"] == int(entry["zid"]):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
if(self.verbose):
print_info("Downloading descriptor %s"
% file_name)
downloaded = urlretrieve(file_path, entry["fname"])
if(self.verbose):
print_info("Downloaded descriptor to "
+ downloaded[0])
json_files.append(downloaded[0])
else:
raise_error(ZenodoError, "Searched-for descriptor \"{0}\" "
"does not match descriptor \"{1}\" returned "
"from Zenodo".format(entry["zid"], hit["id"]))
return json_files
| 41.431818
| 79
| 0.518651
|
271c7a5901e5d4bbee6fb3be6375c57d8030ac76
| 698
|
py
|
Python
|
dai05shou/code5-8.py
|
naoshige314/workshop01
|
5c7be08f99eb164b7901628de26cecfd04fa926f
|
[
"MIT"
] | null | null | null |
dai05shou/code5-8.py
|
naoshige314/workshop01
|
5c7be08f99eb164b7901628de26cecfd04fa926f
|
[
"MIT"
] | null | null | null |
dai05shou/code5-8.py
|
naoshige314/workshop01
|
5c7be08f99eb164b7901628de26cecfd04fa926f
|
[
"MIT"
] | 2
|
2021-06-10T11:53:02.000Z
|
2021-06-20T15:43:39.000Z
|
def chmin(a, b):
if a > b:
return b
else:
return a
S = list("logistic")
T = list("algorithm")
INF = 1000000000000000000000
dp = [[INF for j in range(len(T) + 1)] for i in range(len(S) + 1)]
dp[0][0] = 0
for i in range(len(S) + 1):
for j in range(len(T) + 1):
if i > 0 and j > 0:
# 変更操作
if S[i-1] == T[j-1]:
dp[i][j] = chmin(dp[i][j], dp[i-1][j-1])
else:
dp[i][j] = chmin(dp[i][j], dp[i-1][j-1] + 1)
# 削除操作
if i > 0:
dp[i][j] = chmin(dp[i][j], dp[i-1][j] + 1)
# 挿入操作
if j > 0:
dp[i][j] = chmin(dp[i][j], dp[i][j-1] + 1)
print(dp[-1][-1])
| 25.851852
| 66
| 0.408309
|
f071c8e2949607474e0a7acede5014f9b67492bf
| 25,125
|
py
|
Python
|
terracotta/globals/manager.py
|
kvshamray/terracota
|
8f6419693a2add12c0cd27005e6f58f8295ad7e6
|
[
"Apache-2.0"
] | 1
|
2020-01-21T11:31:39.000Z
|
2020-01-21T11:31:39.000Z
|
terracotta/globals/manager.py
|
kvshamray/terracota
|
8f6419693a2add12c0cd27005e6f58f8295ad7e6
|
[
"Apache-2.0"
] | null | null | null |
terracotta/globals/manager.py
|
kvshamray/terracota
|
8f6419693a2add12c0cd27005e6f58f8295ad7e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main global manager module.
The global manager is deployed on the management host and is
responsible for making VM placement decisions and initiating VM
migrations. It exposes a REST web service, which accepts requests from
local managers. The global manager processes only one type of requests
-- reallocation of a set of VM instances. Once a request is received,
the global manager invokes a VM placement algorithm to determine
destination hosts to migrate the VMs to. Once a VM placement is
determined, the global manager submits a request to the Nova API to
migrate the VMs. The global manager is also responsible for switching
idle hosts to the sleep mode, as well as re-activating hosts when
necessary.
The global manager is agnostic of a particular implementation of the
VM placement algorithm in use. The VM placement algorithm to use can
be specified in the configuration file using the
`algorithm_vm_placement_factory` option. A VM placement algorithm can
call the Nova API to obtain the information about host characteristics
and current VM placement. If necessary, it can also query the central
database to obtain the historical information about the resource usage
by the VMs.
The global manager component provides a REST web service implemented
using the Bottle framework. The authentication is done using the admin
credentials specified in the configuration file. Upon receiving a
request from a local manager, the following steps will be performed:
1. Parse the `vm_uuids` parameter and transform it into a list of
UUIDs of the VMs to migrate.
2. Call the Nova API to obtain the current placement of VMs on the
hosts.
3. Call the function specified in the `algorithm_vm_placement_factory`
configuration option and pass the UUIDs of the VMs to migrate and
the current VM placement as arguments.
4. Call the Nova API to migrate the VMs according to the placement
determined by the `algorithm_vm_placement_factory` algorithm.
When a host needs to be switched to the sleep mode, the global manager
will use the account credentials from the `compute_user` and
`compute_password` configuration options to open an SSH connection
with the target host and then invoke the command specified in the
`sleep_command`, which defaults to `pm-suspend`.
When a host needs to be re-activated from the sleep mode, the global
manager will leverage the Wake-on-LAN technology and send a magic
packet to the target host using the `ether-wake` program and passing
the corresponding MAC address as an argument. The mapping between the
IP addresses of the hosts and their MAC addresses is initialized in
the beginning of the global manager's execution.
"""
from hashlib import sha1
import platform
import subprocess
import time
import novaclient
from novaclient.v2 import client
from oslo_config import cfg
from oslo_log import log as logging
from terracotta import common
from terracotta.utils import db_utils
dist = platform.linux_distribution(full_distribution_name=0)[0]
if dist in ['redhat', 'centos']:
etherwake = 'ether-wake'
else:
etherwake = 'etherwake'
global_mgr_ops = [
cfg.StrOpt('os_admin_user',
default='admin',
help='The admin user name for authentication '
'with Nova using Keystone.'),
cfg.StrOpt('os_admin_password',
default='admin',
help='The admin user password for authentication '
'with Nova using Keystone.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(global_mgr_ops)
def host_mac(host):
"""Get mac address of a host.
:param host: A host name.
:return: The mac address of the host.
"""
mac = subprocess.Popen(
("ping -c 1 {0} > /dev/null;" +
"arp -a {0} | awk '{{print $4}}'").format(host),
stdout=subprocess.PIPE,
shell=True).communicate()[0].strip()
if len(mac) != 17:
LOG.warning('Received a wrong mac address for %s: %s',
host, mac)
return ''
return mac
def flavors_ram(nova):
"""Get a dict of flavor IDs to the RAM limits.
:param nova: A Nova client.
:return: A dict of flavor IDs to the RAM limits.
"""
return dict((str(fl.id), fl.ram) for fl in nova.flavors.list())
def vms_ram_limit(nova, vms):
"""Get the RAM limit from the flavors of the VMs.
:param nova: A Nova client.
:param vms: A list of VM UUIDs.
:return: A dict of VM UUIDs to the RAM limits.
"""
flavors_to_ram = flavors_ram(nova)
vms_ram = {}
for uuid in vms:
try:
vm = nova.servers.get(uuid)
vms_ram[uuid] = flavors_to_ram[vm.flavor['id']]
except novaclient.exceptions.NotFound:
pass
return vms_ram
def host_used_ram(nova, host):
"""Get the used RAM of the host using the Nova API.
:param nova: A Nova client.
:param host: A host name.
:return: The used RAM of the host.
"""
data = nova.hosts.get(host)
if len(data) > 2 and data[2].memory_mb != 0:
return data[2].memory_mb
return data[1].memory_mb
def vms_by_hosts(nova, hosts):
"""Get a map of host names to VMs using the Nova API.
:param nova: A Nova client.
:param hosts: A list of host names.
:return: A dict of host names to lists of VM UUIDs.
"""
result = dict((host, []) for host in hosts)
for vm in nova.servers.list():
result[vm_hostname(vm)].append(str(vm.id))
return result
def vms_by_host(nova, host):
"""Get VMs from the specified host using the Nova API.
:param nova: A Nova client.
:param host: A host name.
:return: A list of VM UUIDs from the specified host.
"""
return [str(vm.id) for vm in nova.servers.list()
if (vm_hostname(vm) == host and str(getattr(
vm, 'OS-EXT-STS:vm_state')) == 'active')]
def vm_hostname(vm):
"""Get the name of the host where VM is running.
:param vm: A Nova VM object.
:return: The hostname.
"""
return str(vm.get('OS-EXT-SRV-ATTR:host'))
def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
"""Synchronously live migrate a set of VMs.
:param db: The database object.
:param nova: A Nova client.
:param vm_instance_directory: The VM instance directory.
:param placement: A dict of VM UUIDs to host names.
:param block_migration: Whether to use block migration.
"""
retry_placement = {}
vms = placement.keys()
# Migrate only 2 VMs at a time, as otherwise migrations may fail
# vm_pairs = [vms[x:x + 2] for x in xrange(0, len(vms), 2)]
# Temporary migrates VMs one by one
vm_pairs = [vms[x:x + 1] for x in xrange(0, len(vms), 1)]
for vm_pair in vm_pairs:
start_time = time.time()
for vm_uuid in vm_pair:
migrate_vm(nova, vm_instance_directory, vm_uuid,
placement[vm_uuid], block_migration)
time.sleep(10)
while True:
for vm_uuid in list(vm_pair):
vm = nova.servers.get(vm_uuid)
LOG.debug('VM %s: %s, %s',
vm_uuid,
vm_hostname(vm),
vm.status)
if vm_hostname(vm) == placement[vm_uuid] and \
vm.status == u'ACTIVE':
vm_pair.remove(vm_uuid)
db.insert_vm_migration(vm_uuid, placement[vm_uuid])
LOG.info('Completed migration of VM %s to %s',
vm_uuid, placement[vm_uuid])
elif time.time() - start_time > 300 and vm_hostname(
vm) != placement[vm_uuid] and vm.status == u'ACTIVE':
vm_pair.remove(vm_uuid)
retry_placement[vm_uuid] = placement[vm_uuid]
LOG.warning('Time-out for migration of VM %s to %s, ' +
'will retry', vm_uuid, placement[vm_uuid])
else:
break
else:
break
time.sleep(3)
if retry_placement:
LOG.info('Retrying the following migrations: %s',
str(retry_placement))
migrate_vms(db, nova, vm_instance_directory,
retry_placement, block_migration)
def migrate_vm(nova, vm_instance_directory, vm, host, block_migration):
"""Live migrate a VM.
:param nova: A Nova client.
:param vm_instance_directory: The VM instance directory.
:param vm: The UUID of a VM to migrate.
:param host: The name of the destination host.
:param block_migration: Whether to use block migration.
"""
# To avoid problems with migration, need the following:
subprocess.call('chown -R nova:nova ' + vm_instance_directory,
shell=True)
nova.servers.live_migrate(vm, host, block_migration, False)
LOG.info('Started migration of VM %s to %s', vm, host)
def switch_hosts_off(db, sleep_command, hosts):
"""Switch hosts to a low-power mode.
:param db: The database object.
:param sleep_command: A Shell command to switch off a host.
:param hosts: A list of hosts to switch off.
"""
if sleep_command:
for host in hosts:
command = 'ssh {0} "{1}"'.format(host, sleep_command)
LOG.debug('Calling: %s', command)
subprocess.call(command, shell=True)
LOG.info('Switched off hosts: %s', str(hosts))
db.insert_host_states(dict((x, 0) for x in hosts))
class GlobalManager(object):
def __init__(self, *args, **kwargs):
self.state = self.init_state()
self.switch_hosts_on(self.state['compute_hosts'])
def init_state(self):
"""Initialize a dict for storing the state of the global manager.
"""
return {'previous_time': 0,
'db': db_utils.init_db(),
'nova': client.Client(2,
CONF.os_admin_user,
CONF.os_admin_password,
CONF.global_manager.os_admin_tenant_name,
CONF.global_manager.os_auth_url,
service_type="compute"),
'hashed_username': sha1(CONF.os_admin_user).hexdigest(),
'hashed_password': sha1(CONF.os_admin_password).hexdigest(),
'compute_hosts': CONF.global_manager.compute_hosts,
'host_macs': {}}
def switch_hosts_on(self, hosts):
"""Switch hosts to the active mode.
"""
for host in hosts:
if host not in self.state['host_macs']:
self.state['host_macs'][host] = host_mac(host)
command = '{0} -i {1} {2}'.format(
etherwake,
CONF.global_manager.ether_wake_interface,
self.state['host_macs'][host])
LOG.debug('Calling: %s', command)
subprocess.call(command, shell=True)
LOG.info('Switched on hosts: %s', str(hosts))
self.state['db'].insert_host_states(
dict((x, 1) for x in hosts))
def execute_underload(self, host):
"""Process an underloaded host: migrate all VMs from the host.
1. Prepare the data about the current states of the hosts and VMs.
2. Call the function specified in the `algorithm_vm_placement_factory`
configuration option and pass the data on the states of the hosts
and VMs.
3. Call the Nova API to migrate the VMs according to the placement
determined by the `algorithm_vm_placement_factory` algorithm.
4. Switch off the host at the end of the VM migration.
:param host: A host name.
:return: The updated state dictionary.
"""
LOG.info('Started processing an underload request')
underloaded_host = host
hosts_cpu_total, _, hosts_ram_total = self.state[
'db'].select_host_characteristics()
hosts_to_vms = vms_by_hosts(self.state['nova'],
self.state['compute_hosts'])
vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms()
hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts()
# Remove VMs from hosts_to_vms that are not in vms_last_cpu
# These VMs are new and no data have been collected from them
for host, vms in hosts_to_vms.items():
for i, vm in enumerate(vms):
if vm not in vms_last_cpu:
del hosts_to_vms[host][i]
LOG.debug('hosts_to_vms: %s', str(hosts_to_vms))
hosts_cpu_usage = {}
hosts_ram_usage = {}
hosts_to_keep_active = set()
for host, vms in hosts_to_vms.items():
if vms:
host_cpu_mhz = hosts_last_cpu[host]
for vm in vms:
if vm not in vms_last_cpu:
LOG.info('No data yet for VM: %s - skipping host %s',
vm,
host)
hosts_to_keep_active.add(host)
hosts_cpu_total.pop(host, None)
hosts_ram_total.pop(host, None)
hosts_cpu_usage.pop(host, None)
hosts_ram_usage.pop(host, None)
break
host_cpu_mhz += vms_last_cpu[vm]
else:
hosts_cpu_usage[host] = host_cpu_mhz
hosts_ram_usage[host] = host_used_ram(
self.state['nova'], host)
else:
# Exclude inactive hosts
hosts_cpu_total.pop(host, None)
hosts_ram_total.pop(host, None)
LOG.debug('Host CPU usage: %s', str(hosts_last_cpu))
LOG.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
# Exclude the underloaded host
hosts_cpu_usage.pop(underloaded_host, None)
hosts_cpu_total.pop(underloaded_host, None)
hosts_ram_usage.pop(underloaded_host, None)
hosts_ram_total.pop(underloaded_host, None)
LOG.debug('Excluded the underloaded host %s', underloaded_host)
LOG.debug('Host CPU usage: %s', str(hosts_last_cpu))
LOG.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
vms_to_migrate = vms_by_host(self.state['nova'], underloaded_host)
vms_cpu = {}
for vm in vms_to_migrate:
if vm not in vms_last_cpu:
LOG.info('No data yet for VM: %s - dropping the request',
vm)
LOG.info('Skipped an underload request')
return self.state
vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
vm,
CONF.data_collector_data_length)
vms_ram = vms_ram_limit(self.state['nova'], vms_to_migrate)
# Remove VMs that are not in vms_ram
# These instances might have been deleted
for i, vm in enumerate(vms_to_migrate):
if vm not in vms_ram:
del vms_to_migrate[i]
if not vms_to_migrate:
LOG.info('No VMs to migrate - completed the underload request')
return self.state
for vm in vms_cpu.keys():
if vm not in vms_ram:
del vms_cpu[vm]
time_step = CONF.data_collector_interval
migration_time = common.calculate_migration_time(
vms_ram,
CONF.network_migration_bandwidth)
if 'vm_placement' not in self.state:
vm_placement_params = common.parse_parameters(
CONF.global_manager.algorithm_vm_placement_parameters)
vm_placement_state = None
vm_placement = common.call_function_by_name(
CONF.global_manager.algorithm_vm_placement_factory,
[time_step,
migration_time,
vm_placement_params])
self.state['vm_placement'] = vm_placement
self.state['vm_placement_state'] = {}
else:
vm_placement = self.state['vm_placement']
vm_placement_state = self.state['vm_placement_state']
LOG.info('Started underload VM placement')
placement, vm_placement_state = vm_placement(
hosts_cpu_usage, hosts_cpu_total,
hosts_ram_usage, hosts_ram_total,
{}, {},
vms_cpu, vms_ram,
vm_placement_state)
LOG.info('Completed underload VM placement')
self.state['vm_placement_state'] = vm_placement_state
LOG.info('Underload: obtained a new placement %s', str(placement))
active_hosts = hosts_cpu_total.keys()
inactive_hosts = set(self.state['compute_hosts']) - set(active_hosts)
prev_inactive_hosts = set(self.state['db'].select_inactive_hosts())
hosts_to_deactivate = list(
inactive_hosts - prev_inactive_hosts - hosts_to_keep_active)
if not placement:
LOG.info('Nothing to migrate')
if underloaded_host in hosts_to_deactivate:
hosts_to_deactivate.remove(underloaded_host)
else:
LOG.info('Started underload VM migrations')
migrate_vms(self.state['db'],
self.state['nova'],
CONF.global_manager.vm_instance_directory,
placement,
CONF.global_manager.block_migration)
LOG.info('Completed underload VM migrations')
if hosts_to_deactivate:
switch_hosts_off(self.state['db'],
CONF.global_manager.sleep_command,
hosts_to_deactivate)
LOG.info('Completed processing an underload request')
return self.state
def execute_overload(self, host, vm_uuids):
"""Process an overloaded host: migrate the selected VMs from it.
1. Prepare the data about the current states of the hosts and VMs.
2. Call the function specified in the `algorithm_vm_placement_factory`
configuration option and pass the data on the states of the hosts
and VMs.
3. Call the Nova API to migrate the VMs according to the placement
determined by the `algorithm_vm_placement_factory` algorithm.
4. Switch on the inactive hosts required to accommodate the VMs.
"""
LOG.info('Started processing an overload request')
overloaded_host = host
hosts_cpu_total, _, hosts_ram_total = self.state[
'db'].select_host_characteristics()
hosts_to_vms = vms_by_hosts(self.state['nova'],
self.state['compute_hosts'])
vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms()
hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts()
# Remove VMs from hosts_to_vms that are not in vms_last_cpu
# These VMs are new and no data have been collected from them
for host, vms in hosts_to_vms.items():
for i, vm in enumerate(vms):
if vm not in vms_last_cpu:
del hosts_to_vms[host][i]
hosts_cpu_usage = {}
hosts_ram_usage = {}
inactive_hosts_cpu = {}
inactive_hosts_ram = {}
for host, vms in hosts_to_vms.items():
if vms:
host_cpu_mhz = hosts_last_cpu[host]
for vm in vms:
if vm not in vms_last_cpu:
LOG.info(
'No data yet for VM: %s - skipping host %s',
vm, host)
hosts_cpu_total.pop(host, None)
hosts_ram_total.pop(host, None)
hosts_cpu_usage.pop(host, None)
hosts_ram_usage.pop(host, None)
break
host_cpu_mhz += vms_last_cpu[vm]
else:
hosts_cpu_usage[host] = host_cpu_mhz
hosts_ram_usage[host] = host_used_ram(self.state['nova'],
host)
else:
inactive_hosts_cpu[host] = hosts_cpu_total[host]
inactive_hosts_ram[host] = hosts_ram_total[host]
hosts_cpu_total.pop(host, None)
hosts_ram_total.pop(host, None)
# Exclude the overloaded host
hosts_cpu_usage.pop(overloaded_host, None)
hosts_cpu_total.pop(overloaded_host, None)
hosts_ram_usage.pop(overloaded_host, None)
hosts_ram_total.pop(overloaded_host, None)
LOG.debug('Host CPU usage: %s', str(hosts_last_cpu))
LOG.debug('Host total CPU usage: %s', str(hosts_cpu_usage))
vms_to_migrate = vm_uuids
vms_cpu = {}
for vm in vms_to_migrate:
if vm not in vms_last_cpu:
LOG.info(
'No data yet for VM: %s - dropping the request',
vm)
LOG.info('Skipped an underload request')
return self.state
vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
vm,
CONF.data_collector_data_length)
vms_ram = vms_ram_limit(self.state['nova'], vms_to_migrate)
# Remove VMs that are not in vms_ram
# These instances might have been deleted
for i, vm in enumerate(vms_to_migrate):
if vm not in vms_ram:
del vms_to_migrate[i]
if not vms_to_migrate:
LOG.info(
'No VMs to migrate - completed the overload request')
return self.state
for vm in vms_cpu.keys():
if vm not in vms_ram:
del vms_cpu[vm]
time_step = CONF.data_collector_interval
migration_time = common.calculate_migration_time(
vms_ram,
CONF.network_migration_bandwidth)
if 'vm_placement' not in self.state:
vm_placement_params = common.parse_parameters(
CONF.global_manager.algorithm_vm_placement_parameters)
vm_placement_state = None
vm_placement = common.call_function_by_name(
CONF.global_manager.algorithm_vm_placement_factory,
[time_step,
migration_time,
vm_placement_params])
self.state['vm_placement'] = vm_placement
self.state['vm_placement_state'] = {}
else:
vm_placement = self.state['vm_placement']
vm_placement_state = self.state['vm_placement_state']
LOG.info('Started overload VM placement')
placement, vm_placement_state = vm_placement(
hosts_cpu_usage, hosts_cpu_total,
hosts_ram_usage, hosts_ram_total,
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram,
vm_placement_state)
LOG.info('Completed overload VM placement')
self.state['vm_placement_state'] = vm_placement_state
LOG.info('Overload: obtained a new placement %s', str(placement))
if not placement:
LOG.info('Nothing to migrate')
else:
hosts_to_activate = list(
set(inactive_hosts_cpu.keys()).intersection(
set(placement.values())))
if hosts_to_activate:
self.switch_hosts_on(hosts_to_activate)
LOG.info('Started overload VM migrations')
migrate_vms(self.state['db'],
self.state['nova'],
CONF.global_manager.vm_instance_directory,
placement,
CONF.global_manager.block_migration)
LOG.info('Completed overload VM migrations')
LOG.info('Completed processing an overload request')
return self.state
def service(self, reason, host, vm_uuids):
try:
if reason == 0:
LOG.info('Processing an underload of a host %s', host)
self.execute_underload(host)
else:
LOG.info('Processing an overload, VMs: %s', str(vm_uuids))
self.execute_overload(host, vm_uuids)
except Exception:
LOG.exception('Exception during request processing:')
raise
| 38.893189
| 79
| 0.608995
|
2f84b4ac77f17db4911e46a6b3ece511d5d72154
| 3,143
|
py
|
Python
|
src/modules/powerswitch.py
|
mpember/aiyprojects-raspbian
|
0fab1d56b278a07fb64a99ecbc4838ffdbca813a
|
[
"Apache-2.0"
] | null | null | null |
src/modules/powerswitch.py
|
mpember/aiyprojects-raspbian
|
0fab1d56b278a07fb64a99ecbc4838ffdbca813a
|
[
"Apache-2.0"
] | null | null | null |
src/modules/powerswitch.py
|
mpember/aiyprojects-raspbian
|
0fab1d56b278a07fb64a99ecbc4838ffdbca813a
|
[
"Apache-2.0"
] | 2
|
2019-01-22T11:16:16.000Z
|
2019-04-13T05:46:48.000Z
|
import configparser
import logging
import aiy.audio
import requests
import json
from modules.mqtt import Mosquitto
# PowerSwitch: Send MQTT command to control remote devices
# ================================
#
class PowerSwitch(object):
""" Control power sockets"""
def __init__(self, configPath, remotePath):
self.configPath = configPath
self.remotePath = remotePath
self.mqtt = Mosquitto(configPath)
def run(self, voice_command):
try:
if self.remotePath.startswith("http"):
logging.warning('Loading remote device list')
response = requests.get(self.remotePath)
self.config = json.loads(response.text.lower())
else:
logging.warning('Loading local device list')
self.config = json.loads(open(self.remotePath).read())
except:
logging.warning('Failed to load remote device list')
return
self.devices = self.config["remotes"]
devices = None
action = None
if voice_command == 'list':
logging.info('Enumerating switchable devices')
aiy.audio.say('Available switches are')
for device in self.devices:
aiy.audio.say(device["names"][0])
return
elif voice_command.startswith('on '):
action = 'on'
devices = voice_command[3:].split(' and ')
elif voice_command.startswith('off '):
action = 'off'
devices = voice_command[4:].split(' and ')
elif voice_command.startswith('up '):
action = 'up'
devices = voice_command[3:].split(' and ')
elif voice_command.startswith('down '):
action = 'down'
devices = voice_command[5:].split(' and ')
else:
aiy.audio.say('Unrecognised command')
logging.warning('Unrecognised command: ' + device)
return
if action is not None:
for device in devices:
logging.info('Processing switch request for ' + device)
self.processCommand(device, action)
def processCommand(self, device, action):
config = configparser.ConfigParser()
config.read(self.configPath)
if device.startswith('the '):
device = device[4:]
for deviceobj in self.devices:
if device in deviceobj["names"]:
logging.info('Device found: ' + device)
if action in deviceobj["codes"]:
logging.info('Code found for "' + action + '" action')
self.mqtt.command(config["mqtt"].get("power_topic","power/code"), deviceobj["codes"][action])
else:
aiy.audio.say(device + ' does not support command ' + action)
logging.warning('Device "' + device + '" does not support command: ' + action)
return
logging.info('Device not matched')
aiy.audio.say('Unrecognised switch')
logging.warning('Unrecognised device: ' + device)
| 29.933333
| 113
| 0.562202
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.