max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/test_shared/test_utils.py | brucetony/PathMe | 12 | 12768051 | <reponame>brucetony/PathMe
# -*- coding: utf-8 -*-
"""Tests for converting WikiPathways."""
import os
import unittest
from pathme.export_utils import get_paths_in_folder
from pathme.wikipathways.utils import get_file_name_from_url, merge_two_dicts
from tests.constants import WP22, WP_TEST_RESOURCES
class TestUtils(unittest.TestCase):
"""Tests for utils."""
def test_get_wikipathways_files(self):
"""Test getting WikiPathways files."""
files = get_paths_in_folder(WP_TEST_RESOURCES)
self.assertEqual(len(files), 7)
self.assertEqual(os.path.join(WP_TEST_RESOURCES, WP22), WP22)
def test_merge_dict(self):
"""Test merging of two dictionaries."""
dict_1 = {1: 'uno'}
dict_2 = {2: 'dos'}
merged_dict = merge_two_dicts(dict_1, dict_2)
self.assertEqual(merged_dict, {1: 'uno', 2: 'dos'})
def test_url(self):
"""Test get url."""
world = get_file_name_from_url('https://hello/world')
self.assertEqual(world, 'world')
| 2.5625 | 3 |
nanotune/tests/device/conftest.py | microsoft/nanotune | 5 | 12768052 | <gh_stars>1-10
import pytest
from qcodes.tests.instrument_mocks import MockDAC as QcodesMockDAC
from qcodes.instrument.delegate.delegate_instrument import DelegateInstrument
@pytest.fixture(scope="function")
def qcodes_dac():
dac = QcodesMockDAC('qcodes_dac', num_channels=3)
try:
yield dac
finally:
dac.close()
@pytest.fixture(scope="function")
def delegate_instrument(station):
instr = DelegateInstrument(
'dummy',
station,
parameters={'test_param': 'lockin.phase'}
)
return instr
@pytest.fixture(name="moc_dac_server")
def _make_mock_dac_server():
class DACClient:
def __init__(self) -> None:
self.socket = None
def send(self, message: str) -> None:
pass
dac = DACClient()
yield dac
| 2.28125 | 2 |
yt_velmodel_vis/shapeplotter.py | chrishavlin/yt_velmodel_vis | 2 | 12768053 | <reponame>chrishavlin/yt_velmodel_vis
'''
shapeplotter module: support for plotting shapefile data on yt scenes
'''
from . import seis_model as sm
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import yt
import os
import geopandas as gpd
from . import datamanager as dm
class shapeTrace(object):
'''
individual trace of a shape.
shapeTrace(lat,lon,radius) with lat,lon in degrees.
lat, lon can be scalar int or float, lists or arrays of the same size and
shape, radius can be single value or list/array of the same shape
'''
def __init__(self,lat,lon,radius=6371.):
coord_input={'lat':lat,'lon':lon,'radius':radius}
self.coords=self.processCoordInputs(coord_input)
self.projectCartesian()
def processCoordInputs(self,coord_input):
'''
initialization function, checks coordinate input after initial
processing.
Parameters:
----------
coord_input dictionary of 'lat', 'lon', 'radius' arrays
Output:
------
coord_input modified dictionary of 'lat', 'lon', 'radius' arrays
Projects radius into a np array of same size as lat/lon
arrays if input radius is a single value.
Raises errors for failed criteria
'''
# make everything an array
for coord, coord_vals in coord_input.items():
if type(coord_vals)==int or type(coord_vals)==float:
coord_input[coord]=np.array([coord_vals])
if type(coord_vals)==list:
coord_input[coord]=np.array([coord_vals])
# error checks (everything should be np array by now)
if coord_input['lat'].shape!= coord_input['lon'].shape:
msg=('You must supply lat, lon values of equal length.'
' lat, lon values can be scalars, lists or arrays but must be the '
' length (and shape if array/list).')
raise ValueError(msg)
# check radius
if coord_input['lat'].shape != coord_input['radius'].shape:
if coord_input['radius'].shape==(1,):
R=coord_input['radius'][0]
coord_input['radius']= np.full(coord_input['lat'].shape, R)
else:
msg=('Radius must be a single value or have the same length and '
' shape as lat, lon ')
raise ValueError(msg)
# lon in (0,360)
coord_input['lon'][coord_input['lon']<0]=coord_input['lon'][coord_input['lon']<0]+360.
return coord_input
def projectCartesian(self):
'''
projects coordinates to x,y,z
'''
phi=( 90.-self.coords['lat'])*np.pi/180.
theta=self.coords['lon']*np.pi/180
(x,y,z)=sm.sphere2cart(phi,theta,self.coords['radius'])
self.projection={'x':x,'y':y,'z':z}
self.bbox={'x':[x.min(),x.max()],'y':[y.min(),y.max()],'z':[z.min(),z.max()]}
return
def mplot(self,fig=None,ax=None,c=None,ptype='scatter'):
'''
adds shape to matplotlib 3d plot
Parameters:
----------
fig (optional) matplotlib figure handle. will create a new figure if it
does not exist.
ax (optional) the 3d axis handle to plot on, will create a new axis if
creating a new figure, will use the first axis if fig is supplied
without ax
'''
if fig is None:
fig=plt.figure()
if ax is None and len(fig.get_axes())==0:
ax = fig.add_subplot(111, projection='3d')
elif ax is None:
axs=fig.get_axes()
ax=axs[0]
if ptype=='scatter':
ax.scatter(self.projection['x'],self.projection['y'],self.projection['z'],c=c)
elif ptype=='plot':
ax.plot(self.projection['x'],self.projection['y'],self.projection['z'],c=c)
return fig
def buildYtSource(self,src_type='LineSource',RGBa=[1.,1.,1.,0.05],pt_size=1):
'''
builds the LineSource or PointSource to add to a yt view
Parameters:
----------
src_type 'LineSource' (default) or 'PointSource', corresponding to the
yt source types
RGBa 4 element list-like for RGBa values to use
'''
clrs=[]
x=self.projection['x'].ravel()
y=self.projection['y'].ravel()
z=self.projection['z'].ravel()
if src_type=='LineSource':
segments=[]
for i in range(0,len(x)-1):
segment=[[x[i],y[i],z[i]],
[x[i+1],y[i+1],z[i+1]]]
segments.append(segment)
clrs.append(RGBa)
clrs=np.array(clrs)
segments=np.array(segments)
OutSource=yt.visualization.volume_rendering.api.LineSource(segments,clrs)
elif src_type=='PointSource':
verts=np.stack((x,y,z),axis=1)
for vert in verts:
clrs.append(RGBa)
clrs=np.array(clrs)
pt_sizes=np.full(x.shape,pt_size)
OutSource=yt.visualization.volume_rendering.api.PointSource(verts,clrs,radii=pt_sizes)
return OutSource
def addShapeToScene(sc,lats,lons,rads,src_type='LineSource',RGBa=[1.,1.,1.,0.005],pt_size=3):
'''
appends a shapeTrace to the current scene, wrapps PointSource and LineSource
parameters
----------
sc a yt scene instance or an empty list
lats, lons latitude and longitude, can be scalar int or float, lists or
arrays of the same size and shape
rads radius, single value scalar or list/array same shape as
lats, lons
src_type (optional) either 'PointSource' or 'LineSource', default
is 'LineSource'
RGBa (optional) The RGBa value to use for all points or line
segements, default is [1.,1.,1.,0.005]
pt_size (optional) The pixel size of point data, default is 3
Output
------
sc scene with shapeTrace added
'''
shp=shapeTrace(lats,lons,rads)
if type(sc)==list:
sc.append(shp)
else:
sc.add_source(shp.buildYtSource(src_type=src_type,RGBa=RGBa,pt_size=pt_size))
return sc
class sphericalChunk(object):
'''
class for adding annotations to a spherical chunk in cartesian coordinates
SC=sphericalChunk(lat_range,lon_range,radius_range)
parameters
----------
lat_range list or tuple of latitude range, [min_lat, max_lat]
lon_range list or tuple of longitude range, [min_lon, max_lon]
radius_range list or tuple of radius range, [min_radius, max_radius]
'''
def __init__(self,lat_range,lon_range,radius_range):
self.lat_range=lat_range
self.lon_range=lon_range
self.radius_range=radius_range
return
def domainExtent(self,sc,RGBa=[1.,1.,1.,0.005],n_latlon=100,n_rad=25):
'''
adds domain boundary for spherical grid interpolated to cartesian grid
parameters
----------
sc a yt scene instance or an empty list
RGBa (optional) The RGBa value to use for all points or line
segements, default is [1.,1.,1.,0.005]
n_latlon (optional) number of points to use to create line segments
for lat/lon segment at fixed radius, default is 100
n_rad (optional) number of points for variable radius segments,
default is 25
Output
------
sc modified yt scene
'''
# extents of this chunk
lat_range=self.lat_range
lon_range=self.lon_range
radius_range=self.radius_range
# constant radius, variable lat/lon boundaries
lats=np.linspace(lat_range[0],lat_range[1],n_latlon)
lons=np.linspace(lon_range[0],lon_range[1],n_latlon)
for this_rad in radius_range:
sc=addShapeToScene(sc,lats,np.full(lats.shape,lon_range[0]),this_rad,RGBa=RGBa)
sc=addShapeToScene(sc,lats,np.full(lats.shape,lon_range[1]),this_rad,RGBa=RGBa)
sc=addShapeToScene(sc,np.full(lons.shape,lat_range[0]),lons,this_rad,RGBa=RGBa)
sc=addShapeToScene(sc,np.full(lons.shape,lat_range[1]),lons,this_rad,RGBa=RGBa)
# boundary lines of constant lat/lon and variable radius
rads=np.linspace(radius_range[0],radius_range[1],n_rad)
rshp=rads.shape
for lat in lat_range:
for lon in lon_range:
lats=np.full(rshp,lat)
lons=np.full(rshp,lon)
sc=addShapeToScene(sc,lats,lons,rads,RGBa=RGBa)
return sc
def latlonGrid(self,sc,n_lat=10,n_lon=10,radius=None,n_lat2=50,n_lon2=50,RGBa=[1.,1.,1.,0.005]):
'''
latlonGrid(n_lat=10,n_lon=10)
adds a lat/lon grid at fixed radius (max radius by default)
Parameters
----------
sc the yt scene to add to
n_lat (optional) number of latitudinal lines to add, default 10
n_lon (optional) number of longitudinal lines to add, default 10
radius (optional) the radius to add the grid at, default is None,
which will pull the max radius from radius_range
n_lat2 (optional) for a given longitude, number of lat points to use
for line segments, default is 50
n_lon2 (optional) for a given latitude, number of lon points to use
for line segments, default is 50
RGBa (optional) The RGBa for lat/lon grid, default [1.,1.,1.,0.005]
Output
------
sc the modified yt scene
'''
if radius is None:
radius = max(self.radius_range)
# fixed lat, vary longitude
lat_pts=np.linspace(self.lat_range[0],self.lat_range[1],n_lat)
lon_pts=np.linspace(self.lon_range[0],self.lon_range[1],n_lon2)
for lat in lat_pts:
lats=np.full(lon_pts.shape,lat)
sc=addShapeToScene(sc,lats,lon_pts,radius,RGBa=RGBa)
# fixed longitude, vary latitude
lon_pts=np.linspace(self.lon_range[0],self.lon_range[1],n_lon)
lat_pts=np.linspace(self.lat_range[0],self.lat_range[1],n_lat2)
for lon in lon_pts:
lons=np.full(lat_pts.shape,lon)
sc=addShapeToScene(sc,lat_pts,lons,radius,RGBa=RGBa)
return sc
def wholeSphereReference(self,sc,RGBa=[1.,1.,1.,0.001],radius=None):
'''
adds whole sphere reference
'''
if radius is None:
radius = max(self.radius_range)
# radius lines from 0 to radius
rads=np.linspace(0,radius,10)
rshp=rads.shape
for lat in self.lat_range:
for lon in self.lon_range:
lats=np.full(rshp,lat)
lons=np.full(rshp,lon)
sc=addShapeToScene(sc,lats,lons,rads,RGBa=RGBa)
return sc
def availableShapeFiles():
''' returns a dictionary of available shapefiles '''
# those included in package [filename,short name, category, description,source]
keynames=['file','short_name','category','description','source']
included=[
['cb_2018_us_state_20m.shp','us_states','political_boundaries',
'US state boundaries, 20m resolution','https://census.gov'],
['GLB_VOLC.shp','global_volcanos','tectonic',
'global volcanic fields with eruptions in last 10k years',
'https://earthworks.stanford.edu/catalog/harvard-glb-volc'],
['ridge.shp','ridge','tectonic',
'divergent plate boundaries',
'http://www-udc.ig.utexas.edu/external/plates/data.htm'],
['transform.shp','transform','tectonic',
'transform plate boundaries',
'http://www-udc.ig.utexas.edu/external/plates/data.htm'],
['trench.shp','trench','tectonic',
'convergent plate boundaries',
'http://www-udc.ig.utexas.edu/external/plates/data.htm']
]
# add on the natural earth files
src='https://www.naturalearthdata.com/'
res_s={'10m':'h','50m':'m','110m':'l'}
for res in ['10m','50m','110m']:
a=res_s[res]
included.append(
['ne_'+res+'_admin_0_countries.shp','countries_'+a,'political_boundaries',src]
)
included.append(
['ne_'+res+'_admin_1_states_provinces.shp','states_'+a,'political_boundaries',src]
)
included.append(
['ne_'+res+'_coastline.shp','coast_'+a,'tectonic',src]
)
db=dm.filesysDB()
shapeDict={'available':[],'details':{}}
fullfiles=[]
for shp in included:
if db.validateFile(shp[0]):
new_row=dict(zip(keynames,shp))
shapeDict['available'].append(shp[1])
shapeDict['details'][shp[1]]=new_row
fullfiles.append(shp[0].split('.')[0])
# look for other shapefiles in db
for fi in db.FilesByDir['shapedata']:
shrtnm=fi.split('.')[0]
file_ext=fi.split('.')[-1]
if shrtnm not in fullfiles and file_ext in ['shp']:
fullfiles.append(fi)
this_fi=[fi,shrtnm,'','','']
shapeDict['available'].append(shrtnm)
shapeDict['details'][shrtnm]=dict(zip(keynames,this_fi))
return shapeDict
class shapedata(object):
'''
parses shapefiles using geopandas to construct yt line and point sources
from points, lines and polygons in shapefiles.
shp=shapedata(filename,buildTraces=True,bbox=None)
Parameters
----------
filename the full path filename of the shapefile OR the short_name from
availableShapeFiles()
buildTraces if True, will build the traces on instantiating class
bbox bounding box to use when reading in shapefile, four element list
[lon_min,lat_min,lon_max,lat_max]
'''
def __init__(self,filename,buildTraces=True,bbox=None,radius=6371.):
self.db=dm.filesysDB()
if os.path.isfile(filename):
self.filename=filename
else:
shpDict=availableShapeFiles()
if filename in shpDict['available']:
# given filename is a short name
filename=shpDict['details'][filename]['file'] # now file name
filename=self.db.validateFile(filename) # now full path
if filename is False:
raise ValueError(filename + ' does not exist.')
else:
self.filename=filename
else:
raise ValueError(filename + ' does not exist.')
self.radius=radius
self.Traces=[]
if buildTraces:
self.Traces=self.buildTraces(bbox=bbox)
return
def buildTraces(self,traces=[],bbox=None,sc=None,include_points=True,
include_lines=True,include_polygons=True,
RGBa=[1.,1.,1.,0.05],pt_size=3):
'''
loads a shapefile and builds the yt traces.
shapedata.buildTraces(traces=[],bbox=None,sc=None,include_points=True,
include_lines=True,include_polygons=True,
RGBa=[1.,1.,1.,0.05],pt_size=3):
Parameters
----------
traces list of yt point or line sources
bbox bounding box for shapefile read
sc the yt scence to add traces to
include_points boolean, include point data from shapefile?
include_lines boolean, include line data from shapefile?
include_polygons boolean, include polygon data from shapefile?
RGBa RGBa list or tuple for yt
pt_size pixel size for point data
Output
------
if sc is provided, will return:
sc the modified yt scene, if
otherwise, returns
traces list of yt line and point sources
kwargs passed to shapeTrace.buildYtSource
'''
R0=self.radius
def traversePoints(df,traces=[]):
'''
traverses shapefile points, appends to traces
'''
# print("traversing point data")
# handle Points
pt_df=df[df.geometry.type=='Point']['geometry']
if len(pt_df)>0:
# print(len(pt_df))
pts=shapeTrace(pt_df.y.to_numpy(),pt_df.x.to_numpy(),R0)
# print('appending with point size '+str(pt_size)+' and RGBa')
# print(RGBa)
traces.append(pts.buildYtSource('PointSource',RGBa,pt_size))
# handle MultiPoints
# print("traversing multipoint")
pt_df=df[df.geometry.type=='MultiPoint'].geometry.tolist()
lons=[]
lats=[]
for multipt in pt_df:
for pt in multipt:
lons.append(pt.x)
lats.append(pt.y)
# print('assembled lat lons for multipoint')
# print(len(lons))
# print(len(lats))
if len(lons)>0 and len(lats)>0:
pts=shapeTrace(lats,lons,R0)
traces.append(pts.buildYtSource('PointSource',RGBa,pt_size))
return traces
def traverseLines(df,traces=[]):
'''
traverses shapefile lines, appends line segments to traces
'''
# Lines
pt_df=df[df.geometry.type=='LineString'].geometry.tolist()
for ln in pt_df:
pts=shapeTrace(np.array(ln.xy[1]),np.array(ln.xy[0]),R0)
traces.append(pts.buildYtSource('LineSource',RGBa))
# MultiLines
pt_df=df[df.geometry.type=='MultiLine'].geometry.tolist()
for lns in pt_df:
for ln in lns:
pts=shapeTrace(np.array(ln.xy[1]),np.array(ln.xy[0]),R0)
traces.append(pts.buildYtSource('LineSource',RGBa))
return traces
def traversePoly(poly,traces=[]):
'''
appends traces for each line segment in a polygon
'''
# print(" traversing single polygon")
if poly.boundary.type=='LineString':
# print(' points in this polygon boundary:')
# print(len(poly.boundary.xy[1]))
pts=shapeTrace(np.array(poly.boundary.xy[1]),np.array(poly.boundary.xy[0]),R0)
traces.append(pts.buildYtSource('LineSource',RGBa))
else:
# print(" looping over multiline")
for ln in poly.boundary:
# print(len(ln.xy[1]))
pts=shapeTrace(np.array(ln.xy[1]),np.array(ln.xy[0]),R0)
traces.append(pts.buildYtSource('LineSource',RGBa))
return traces
def traversePolygons(df,traces=[]):
'''
traverses shapefile polygons, appends line segments to traces
'''
# Polygons
pt_df=df[df.geometry.type=='Polygon'].geometry.tolist()
# print("traversing "+str(len(pt_df))+' polygons')
for poly in pt_df:
traces=traversePoly(poly,traces)
# Multi-Polygons
pt_df=df[df.geometry.type=='MultiPolygon'].geometry.tolist()
# print("traversing "+str(len(pt_df))+' multi polygons')
for multipoly in pt_df:
for poly in multipoly:
traces=traversePoly(poly,traces)
return traces
df=gpd.read_file(self.filename,bbox=bbox)
if include_points:
traces=traversePoints(df,traces)
if include_lines:
traces=traverseLines(df,traces)
if include_polygons:
traces=traversePolygons(df,traces)
if sc is not None:
return self.addToScene(sc,traces)
else:
return traces
def addToScene(self,sc,traces=[]):
'''
shapedata.addToScene(sc,traces)
adds the yt line and point sources to the yt scene
Parameters
----------
sc the yt scene to modify
traces list of yt line and point sources
Output
------
sc the modified yt scene
'''
if len(traces)==0:
traces=self.Traces
for Trc in traces:
sc.add_source(Trc)
return sc
| 2.578125 | 3 |
src/test/pythonFiles/definition/three.py | ChaseKnowlden/vscode-jupyter | 615 | 12768054 | import two
two.ct().fun() | 1.023438 | 1 |
FaceDetection/face_classifier.py | technetbytes/OpenCV | 0 | 12768055 | <filename>FaceDetection/face_classifier.py
import numpy as np
import cv2
#open data folder path
folder_path = "/home/saqib/anaconda3/share/OpenCV/haarcascades/"
#set cascade classifier xml files
frontalface_classifier = 'haarcascade_frontalface_default.xml'
eye_classifier = 'haarcascade_eye.xml'
face_cascade = cv2.CascadeClassifier(folder_path + frontalface_classifier)
eye_cascade = cv2.CascadeClassifier(folder_path + eye_classifier)
input_name = "close_up.jpg"
output_name = "classifier_output.jpg"
#read image
img = cv2.imread(input_name)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#apply both classifier
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),12)
#write image
cv2.imwrite(output_name,img) | 3.171875 | 3 |
commands/perform.py | zbylyrcxr/DennisMUD | 2 | 12768056 | #######################
# <NAME> #
# perform.py #
# Copyright 2018-2020 #
# <NAME> #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
from lib.color import *
import random
NAME = "perform"
CATEGORIES = ["actions","users"]
ALIASES = ["miracle", "cast", "ritual"]
SCOST=0
USAGE = "perform <ritual> <optional ...>"
DESCRIPTION = """Perform the ritual called <ritual>.
Current rituals: telepathy, identify, reveal, seer, ghost, cleanse, whirlpool.
TELEPATHY can send someone an anonymous message.
IDENTIFY can show you additional information about an object.
REVEAL can reveal hidden things in a room.
SEER can show you information about the location of someone.
GHOST can hide you almost completely for continual spirit cost.
CLEANSE can cleanse someone and the cursed items they have.
WHIRLPOOL can teleport a sleeping player to a random room.
Ex. `perform telepathy seisatsu Hello there!`
Ex1. `perform reveal`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argmin=1, awake=True):
return False
# WHIRLPOOL can teleport a sleeping player to a random room.
if args[0]=="whirlpool":
SCOST=5
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST, awake=True):
return False
thisreceiver=' '.join(args[1:])
targetuser = COMMON.check_user(NAME, console, thisreceiver, room=True, online=True, live=True, reason=False,
wizardskip=["room", "online"])
if not targetuser:
# Check for a partial user match, and try running again if there's just one.
partial = COMMON.match_partial(NAME, console, thisreceiver.lower(), "user")
if partial:
partial=["whirlpool"]+partial
return COMMAND(console, partial)
console.msg("{0}: No such user in this room.".format(NAME))
return False
# Found the user, let's teleport them away.
userconsole = console.shell.console_by_username(targetuser["name"])
if not userconsole:
return False
elif userconsole["posture"]=="sleeping":
console.shell.broadcast_room(console,"{0} whispers some words in the ears of {1}.".format(console.user["nick"],targetuser["nick"]))
destroom=random.choice(console.database.rooms.all())
thisroom = COMMON.check_room(NAME, console, console.user["room"])
# Somehow we got a nonexistent room. Log and report it.
if not destroom:
console.msg("{0}: ERROR: Tried to teleport a sleeper into a nonexistent room!".format(NAME))
console.log.error("Tried to teleport a sleeper into a nonexistent room!")
# Proceed with teleportation.
else:
if userconsole["posture_item"]: userconsole["posture_item"]=""
# Remove us from the current room.
if targetuser["name"] in thisroom["users"]:
thisroom["users"].remove(targetuser["name"])
# Add us to the destination room.
if targetuser["name"] not in destroom["users"]:
destroom["users"].append(targetuser["name"])
# Broadcast our teleportation to the origin room.
console.shell.broadcast_room(console, "{0} vanished from the room.".format(targetuser["nick"]))
# Set our current room to the new room.
targetuser["room"] = destroom["id"]
# Broadcast our arrival to the destination room, but not to ourselves.
console.shell.broadcast_room(userconsole, "{0} appeared.".format(targetuser["nick"]))
# Save the origin room, the destination room, and our user document.
console.database.upsert_room(thisroom)
console.database.upsert_room(destroom)
console.database.upsert_user(targetuser)
# Update console's exit list.
userconsole.exits = []
for exi in range(len(destroom["exits"])):
userconsole.exits.append(destroom["exits"][exi]["name"])
else:
if userconsole.user["pronouns"]=="male":
console.msg("He is not asleep.")
elif userconsole.user["pronouns"]=="female":
console.msg("She is not asleep.")
elif userconsole.user["pronouns"]=="neutral":
console.msg("They are not asleep.")
else: console.msg("{0} is not asleep.".format(userconsole.user["pronouns"].capitalize()))
return False
# CLEANSE can cleanse someone and the cursed items they have.
elif args[0]=="cleanse":
SCOST=5
# Should we be able to cleanse ourselves?
#if thisreceiver==console.user["name"] or thisreceiver==console.user["nick"] or thisreceiver==console.user["nick"].lower():
# console.msg("Can't cleanse yourself.")
# return False
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST, awake=True):
return False
thisreceiver = ' '.join(args[1:])
targetuser = COMMON.check_user(NAME, console, thisreceiver, room=True, online=True, live=True, reason=False,
wizardskip=["room", "online"])
if not targetuser:
# Check for a partial user match, and try running again if there's just one.
partial = COMMON.match_partial(NAME, console, thisreceiver, "user", message=False)
if partial:
return COMMAND(console,["cleanse"]+partial)
console.msg("{0}: No such user in this room.".format(NAME))
return False
if console.user["name"]==targetuser["name"]:
if console.user["pronouns"]=="male":
msg = "{0} focuses on himself for a moment.".format(console.user["nick"])
elif console.user["pronouns"]=="female":
msg = "{0} focuses on herself for a moment.".format(console.user["nick"])
elif console.user["pronouns"]=="neutral":
msg = "{0} focuses on themself for a moment.".format(console.user["nick"])
else:
msg = "{0} focuses on {1}self for a moment.".format(console.user["nick"],console.user["pronouno"])
else:
msg = "{0} focuses on {1} for a moment.".format(console.user["nick"],targetuser["nick"])
console.shell.broadcast_room(console, msg)
for it in targetuser["inventory"]:
thisitem = COMMON.check_item(NAME, console, it, owner=False, holding=False)
if thisitem["cursed"]["enabled"]:
thisitem["cursed"]["enabled"]=False
console.database.upsert_item(thisitem)
if not console.user["name"]==targetuser["name"]:
console.shell.msg_user(targetuser["name"],"{0} cleansed some of your items.".format(console.user["nick"]))
if targetuser["pronouns"]=="male":
console.msg("You cleansed some of his items.")
elif targetuser["pronouns"]=="female":
console.msg("You cleansed some of her items.")
elif targetuser["pronouns"]=="neutral":
console.msg("You cleansed some of their items.")
else:
console.msg("You cleansed some of {0} items.".format(targetuser["pronouno"]))
else:
console.msg("You cleansed some of your items.")
return True
# SEER can show you information about the location of someone.
elif args[0]=="seer":
SCOST=5
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST):
return False
thisreceiver = ' '.join(args[1:])
# Make sure the named user exists and is online.
targetuser = COMMON.check_user(NAME, console, thisreceiver, online=True)
if not targetuser:
# Check for a partial user match, and try running again if there's just one.
partial = COMMON.match_partial(NAME, console, thisreceiver, "user", message=False)
if partial:
return COMMAND(console,["seer"]+partial)
console.msg("{0}: No such user was found.".format(NAME))
return False
# Look up room.
targetroom = COMMON.check_room(NAME, console, roomid=targetuser["room"])
msg = "{0} looks into the distance for a moment.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
console.msg("You see a vision... \n{0}\nThe vision ends...".format(targetroom["desc"]))
return True
# GHOST can hide you almost completely for continual spirit cost.
elif args[0]=="ghost":
SCOST=50
if not COMMON.check(NAME, console, args, argmax=1, spiritcost=SCOST):
return False
# We are ghosts already, lets appear.
if console.user["ghost"]:
console.user["spirit"]+=50
msg = "{0} suddenly appears.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
console.user["ghost"]=False
# We arent ghosts, lets disappear.
else:
msg = "{0} mutters a few words and disappears.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
console.user["ghost"]=True
console.database.upsert_user(console.user)
return True
# REVEAL can reveal hidden things in a room.
elif args[0]=="reveal":
SCOST=5
if not COMMON.check(NAME, console, args, argmax=1, spiritcost=SCOST):
return False
msg = "{0} tries to reveal hidden things with a ritual.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
destroom = COMMON.check_room(NAME,console)
dexits = destroom["exits"]
for dex in range(len(dexits)):
# Check for randomized chance
if dexits[dex]["chance"] and dexits[dex]["hidden"]==True:
if random.randint(1,dexits[dex]["chance"])==1:
dexits[dex]["hidden"]=False
# Random items check.
ditems = destroom["items"]
for dit in ditems:
dit = console.database.item_by_id(dit)
# Check for randomized chance
if dit["chance"] and dit["hidden"]==True:
if dit["truehide"]==True:
console.msg("You sense {0} being hidden around here.".format(COMMON.format_item(NAME, dit["name"])))
elif random.randint(1,dit["chance"])==1:
dit["hidden"]=False
# Should we be able to reveal ghosts?
#for uss in destroom["users"]:
# duss = console.database.user_by_name(uss)
# if duss["ghost"]:
# if random.randint(1,4)==1:
# duss["ghost"]=False
# console.shell.msg_user(duss["name"],"Someone revealed you.")
return True
# IDENTIFY can show you additional information about an object.
elif args[0]=="identify":
SCOST=5
found_something = False
partials = []
target=' '.join(args[1:])
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST):
return False
# Lookup the current room and perform room checks.
thisroom = COMMON.check_room(NAME, console)
if not thisroom:
return False
# It wasn't us, so maybe it's an item in the room.
for itemid in thisroom["items"]:
item = console.database.item_by_id(itemid)
# A reference was found to a nonexistent item. Report this and continue searching.
if not item:
console.log.error("Item referenced in room does not exist: {room} :: {item}", room=console.user["room"],
item=itemid)
console.msg("{0}: ERROR: Item referenced in this room does not exist: {1}".format(NAME, itemid))
continue
attributes = []
# Record partial matches.
if target in item["name"].lower() or target.replace("the ", "", 1) in item["name"].lower():
partials.append(item["name"].lower())
# It was an item in the room. Show the item's name, ID, owners, description, and attributes.
if target in [item["name"].lower(), "the " + item["name"].lower()]:
# Only enumerate item attributes if we are the item owner or a wizard.
if item["duplified"]:
attributes.append("This thing can be anywhere, somehow at the same time.")
if item["cursed"]["enabled"]:
attributes.append("A dark presence haunts it.")
if item["glued"]:
attributes.append("This object can't be carried with you.")
if item["truehide"]:
attributes.append("Maybe it's invisible, but something truly hides it from sight.")
if item["hidden"]:
attributes.append("Somehow it blends into it's environment.")
if item["lang"]:
attributes.append("You sense that this thing can teach you and alter your language.")
if item["container"]["enabled"]:
attributes.append("Something else could easily fit into the insides of this object.")
if item["telekey"]:
attributes.append("Using this thing would take you somewhere else.")
# Send the info for this item.
if len(attributes)>0:
console.msg("You sense the {0}. {1}".format(item["name"], ' '.join(attributes)))
else:
console.msg("You sense the {0}.".format(item["name"]))
console.msg("It seems to be connected to {0}.".format(', '.join(item["owners"])))
# List content if it's a container
if item["container"]["enabled"]:
if len(item["container"]["inventory"])>0:
console.msg("{0} seems to contain some items.".format(item["name"].capitalize()))
else:
console.msg("{0} seems to be empty.".format(item["name"].capitalize()))
found_something = True
msg = "{0} performs a ritual of knowledge.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
return True
# Maybe it's an item in our inventory.
for itemid in console.user["inventory"]:
item = console.database.item_by_id(itemid)
# A reference was found to a nonexistent item. Report this and continue searching.
if not item:
console.log.error("Item referenced in user inventory does not exist: {user} :: {item}",
user=console.user["name"], item=itemid)
console.msg("{0}: ERROR: Item referenced in your inventory does not exist: {1}".format(NAME, itemid))
continue
attributes = []
# Record partial matches.
if target in item["name"].lower() or target.replace("the ", "", 1) in item["name"].lower():
partials.append(item["name"].lower())
# It was an item in our inventory. Show the item's name, ID, owners, description, and attributes,
# but only if we didn't already see it in the current room. Also check if the user prepended "the ".
if target in [item["name"].lower(), "the " + item["name"].lower()]:
# Only enumerate item attributes if we are the item owner or a wizard.
if item["duplified"]:
attributes.append("This thing can be anywhere, somehow at the same time.")
if item["cursed"]["enabled"]:
attributes.append("A dark presence haunts it.")
if item["glued"]:
attributes.append("This object can't be carried with you.")
if item["truehide"]:
attributes.append("Maybe it's invisible, but something truly hides it from sight.")
if item["hidden"]:
attributes.append("Somehow it blends into it's environment.")
if item["lang"]:
attributes.append("You sense that this thing can teach you and alter your language.")
if item["container"]["enabled"]:
attributes.append("Something else could easily fit into the insides of this object.")
if item["telekey"]:
attributes.append("Using this thing would take you somewhere else.")
# Send the info for this item.
if len(attributes)>0:
console.msg("You sense the {0}. {1}".format(item["name"], ' '.join(attributes)))
else:
console.msg("You sense the {0}.".format(item["name"]))
console.msg("It seems to be connected to {0}.".format(', '.join(item["owners"])))
# Description exists, so show it.
#if item["desc"]:
# console.msg(item["desc"])
# List content if it's a container
if item["container"]["enabled"]:
if len(item["container"]["inventory"])>0:
console.msg("{0} seems to contain some items.".format(item["name"].capitalize()))
else:
console.msg("{0} seems to be empty.".format(item["name"].capitalize()))
found_something = True
msg = "{0} performs a ritual of knowledge.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
return True
# We didn't find anything by that name. See if we found partial matches.
if not found_something:
# Eliminate duplicate matches.
if partials:
partials = list(dict.fromkeys(partials))
# We got exactly one partial match. Assume that one.
if len(partials) == 1:
#console.msg("Assuming {0}.".format(partials[0]))
console.user["spirit"]+=SCOST
partials[0]="identify "+partials[0]
return COMMAND(console, partials[0].split(' '))
# We got up to 5 partial matches. List them.
elif partials and len(partials) <= 5:
console.msg("{0}: Did you mean one of: {1}".format(NAME, ', '.join(partials)))
return False
# We got too many matches.
elif len(partials) > 5:
console.msg("{0}: Too many possible matches.".format(NAME))
return False
# Really nothing.
else:
console.msg("{0}: No such thing: {1}".format(NAME, ' '.join(args[1:])))
return False
# TELEPATHY can send someone an anonymous message.
elif args[0]=="telepathy":
SCOST=5
if not COMMON.check(NAME, console, args, argmin=3, spiritcost=SCOST):
return False
# Make sure the named user exists and is online.
targetuser = COMMON.check_user(NAME, console, args[1].lower(), online=True)
if not targetuser:
return False
# Finished. Message the user, and echo the message to ourselves, if it wasn't a self-message.
console.shell.msg_user(args[1].lower(), mcolor(CBYELLO,"You hear a whisper in your mind: '{0}'".format(' '.join(args[2:])),targetuser["colors"]))
if targetuser["name"] != console.user["name"]:
console.msg(mcolor(CBYELLO,"You plant a message in the mind of {0}, that says: '{1}'".format(targetuser["name"], ' '.join(args[2:])),console.user["colors"]))
msg = "{0} focuses for a moment to perform a ritual.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
return True
# Unknown ritual name.
else:
console.msg("You never heard of such a ritual.")
return False
| 1.796875 | 2 |
backend/project/conversations/urls.py | winoutt/winoutt-django | 0 | 12768057 | <reponame>winoutt/winoutt-django
from django.urls import path, re_path
from . import views
urlpatterns = [
# Message Urls
path('api/messages', views.Message.as_view(), name='messages'),
path('api/messages/<int:message_id>', views.ReadMessage.as_view(), name='read_message'),
path('api/messages/<int:chat_id>/paginate', views.PaginateMessages.as_view(), name='paginate_messages'),
path('api/messages/unreads/count', views.UnreadMessageCount.as_view(), name='unread_message_count'),
# Chats Urls
path('api/chats/<int:chat_id>/archive', views.ChatArchiveHandler.as_view(), name='archive_chat'),
path('api/chats/<int:chat_id>/unarchive', views.ChatArchiveHandler.as_view(), name='unarchive_chat'),
path('api/chats/paginate', views.ChatPaginator.as_view(), name='chat_paginator'),
path('api/chats/archived', views.ArchivedChat.as_view(), name='chat_archived'),
path('api/chats/search', views.SearchChat.as_view(), name='chat_search'),
path('api/chats/<int:chat_id>/read', views.ReadChat.as_view(), name='read_chat'),
path('api/chats/mark/delivered', views.MarkDelivered.as_view(), name='mark_delivered'),
path('api/chats/user/<int:user_id>', views.ReadFromUser.as_view(), name='read_from_user'),
] | 1.875 | 2 |
beacon_aug/screenshot.py | adobe-research/beacon-aug | 11 | 12768058 | # Copyright 2021 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib
# matplotlib.use('TkAgg')
import albumentations as A
from skimage import data
import os
from copy import deepcopy
import random
import time
from PIL import Image
from skimage.color import label2rgb
# import beacon_aug as BA
from . import properties
''' flatten the pipeline tree'''
def extract_single_operation(augPipeline):
def flatten(dict, flatten_ls=[]):
'''use DFS to unfold the operations'''
for operation in dict["transforms"]: # "OneOf" or "OneOrOther", etc
class_name = operation['__class_fullname__']
if "." in class_name:
if operation['__class_fullname__'].split(".")[-2] == "composition":
flatten(operation, flatten_ls)
continue
flatten_ls.append(operation)
return flatten_ls
transform_dict = A.to_dict(augPipeline)
flatten_ls = flatten(transform_dict["transform"])
return [{'__version__': transform_dict['__version__'], 'transform':opr} for opr in flatten_ls]
def screenshot_pipeline(augPipeline, image, save_fig_path=None):
''' Visualize an augmentation pipeline by displaying the extreme case for all the parameters
'''
# get the flattened operator sequence avoiding hierarchical structure
single_operation_ls = extract_single_operation(augPipeline)
numOfOperation = len(single_operation_ls)
fig, axs = plt.subplots(numOfOperation, 3,
figsize=(6, 2*numOfOperation),
constrained_layout=True)
axs[0, 1].set_title("Lower Limit")
axs[0, 2].set_title("Upper Limit")
for i, single_operation in enumerate(single_operation_ls):
# Extract the upper and lower limit
transform_name = single_operation["transform"]['__class_fullname__'].split(".")[-1]
# deep copy to avoid pointing save location in dict
lowerAndUpper = [single_operation, deepcopy(single_operation)]
limit_para_name = None
# Extract all the limit parameters
for para in single_operation["transform"]:
if para == "p": # change prob to 1 to make it always happen
lowerAndUpper[0]["transform"][para] = 1
lowerAndUpper[1]["transform"][para] = 1
if "limit" in para:
limit_para_name = para
original_values = list(single_operation["transform"][para])
lowerAndUpper[0]["transform"][para] = [original_values[0]]*2
lowerAndUpper[1]["transform"][para] = [original_values[1]]*2
# plot
for lu in range(2): # lower or upper limit
lu_transform = A.from_dict(lowerAndUpper[lu])
axs[i, lu+1].imshow(lu_transform(image=image)["image"])
axs[i, lu+1].axis("off")
if limit_para_name:
axs[i, 0].text(0.15, 0.5, transform_name+"\n" + limit_para_name+":" +
str(lowerAndUpper[0]["transform"][limit_para_name][0]) + "," +
str(lowerAndUpper[1]["transform"][limit_para_name][1]), dict(size=10))
else:
axs[i, 0].text(0.15, 0.5, transform_name, dict(size=10))
axs[i, 0].axis("off")
if save_fig_path:
figname = os.path.join(save_fig_path, "aug_pipeline-screenshot.png")
print("\n...screenshot figure save as : ", figname)
plt.savefig(figname)
return fig
def screenshot_library(BA_operator, image_data, save_fig_path=None, individual_fig=False, **kwargs):
''' Visualize the augmentation result comparision to all available libraries
e.g.
----
import beacon_aug as BA
from beacon_aug import screenshot
fig, __ = BA.screenshot.screenshot_library(BA.Brightness(), image_data=image)
fig.show()
'''
avail_libraries = BA_operator(**kwargs).avail_libraries
numOfLibraries = len(avail_libraries)
fig, axs = plt.subplots(2, 1 + numOfLibraries,
figsize=(4*numOfLibraries, 4),
constrained_layout=True)
fig.suptitle("beacon_aug."+BA_operator.__name__ + " with " +
str(kwargs)) # or plt.suptitle('Main title')
axs[0][0].imshow(image_data)
axs[0][0].set_title("Raw")
axs[1][0].text(0.3, 0.5, "Difference to\n" + "raw")
axs[1][0].axis("off")
attributes_result = {"runtime": {}, "differentiable": {}}
# axs[1][0].text(0.3, 0.5, "Sanity Check:\n p=0 ->", dict(size=10))
for i, library in enumerate(avail_libraries):
t_before = time.time()
op = BA_operator(always_apply=False, p=1, library=library, **kwargs)
image_auged = op(image=image_data)["image"]
t_after = time.time()
runtime = t_after - t_before
image_auged_vis = image_auged
attributes_result["runtime"][library] = runtime
attributes_result["differentiable"][library] = properties.isOpDifferentiable(op)
axs[0][1+i].set_title(library + ":" + '{0:.1f}'.format(runtime*1000) + " (ms)")
axs[0][1+i].imshow(image_auged)
# display the difference of original to augmented images
if image_auged.shape == image_data.shape:
axs[1][1+i].imshow(image_auged - image_data)
if save_fig_path and individual_fig == True:
img_name = os.path.join(save_fig_path, BA_operator.__name__+"-" + library+".jpeg")
if os.path.isfile(img_name):
print("\n...screenshot individual figure already existed as : ", img_name)
else:
if image_auged.min() < 0: # normalzied case, need to
image_auged = image_auged - image_auged.min()
image_auged = image_auged/image_auged.max()
print("@@@@@@@", image_auged.min())
plt.imsave(img_name, image_auged)
print("\n...screenshot individual figure save as : ", img_name)
fig.subplots_adjust(wspace=0)
if save_fig_path and individual_fig == False:
fig_name = os.path.join(save_fig_path, BA_operator.__name__+"aug_library-screenshot.png")
print("\n...screenshot figure save as : ", fig_name)
plt.savefig(fig_name)
return fig, attributes_result
def visualize_bboxes(img, bboxes, color=(255, 0, 0), thickness=2, **kwargs):
'''
color = BOX_COLOR (BOX_COLOR = (255, 0, 0) # Red
'''
image = img.copy()
for bbox in bboxes:
# x_min, y_min, w, h = bbox
if len(bbox) == 5:
bbox = bbox[:4] # the last one is label
x_min, y_min, x_max, y_max = map(int, bbox) # need to make sure bbox is integer
# x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
img = cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
return image
def visualize_kps(img, kps, color=(0, 255, 0), key_point_diameter=2, **kwargs):
'''
'''
image = img.copy()
for kp in kps:
x, y = kp
image = cv2.circle(image, (int(x), int(y)), key_point_diameter, color, -1)
return image
def visualize_titles(img, bbox, title, color=(255, 0, 0), thickness=2, font_thickness=2, font_scale=0.35, **kwargs):
x_min, y_min, x_max, y_max = map(int, bbox) # x_min, y_min, w, h = bbox
# x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
((text_width, text_height), _) = cv2.getTextSize(
title, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)),
(x_min + text_width, y_min), color=(255, 0, 0))
cv2.putText(img, title, (x_min, y_min - int(0.3 * text_height)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),
font_thickness, lineType=cv2.LINE_AA)
return img
def visualize_targets(image, mask=None, bboxes=None, keypoints=None, image0=None):
''' Stack all the targets '''
target_list = []
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
target_list.append(image.copy())
if image0 is not None:
if image0.ndim == 2:
image0 = cv2.cvtColor(image0, cv2.COLOR_GRAY2RGB)
target_list.append(image0)
if mask is not None:
target_list.append(cv2.cvtColor((mask*255).astype('uint8'), cv2.COLOR_GRAY2RGB))
if bboxes is not None:
target_list.append(visualize_bboxes(image, bboxes, thickness=10))
if keypoints is not None:
target_list.append(visualize_kps(image, keypoints, key_point_diameter=15))
return np.hstack(target_list)
def augment_and_show(aug, image, mask=None, bboxes=[], keypoints=[], categories=[], category_id_to_name=[], filename=None,
font_scale_orig=0.35, font_scale_aug=0.35, key_point_diameter=15,
show_title=True, **kwargs):
"""
Use from: https://albumentations.ai/docs/examples/showcase/
visualize the image,(mask), (bbox),(kp) superimposed result before and after augmentation
Args:
aug: augmentation pipelineg
image: single image
mask: original mask
bbox: original bounding boxes
keypoints: original keypoints
output:
augmented: augmented image components
f: visualize image
"""
if mask is None:
augmented = aug(image=image, bboxes=bboxes,
keypoints=keypoints, category_id=categories)
else:
augmented = aug(image=image, mask=mask, bboxes=bboxes,
keypoints=keypoints, category_id=categories)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image_aug = cv2.cvtColor(augmented['image'], cv2.COLOR_BGR2RGB)
image_aug = augmented['image']
visualize_bboxes(image, bboxes, **kwargs)
visualize_bboxes(image_aug, augmented['bboxes'], **kwargs)
visualize_kps(image, keypoints, **kwargs)
visualize_kps(image, augmented["keypoints"], **kwargs)
if show_title:
for bbox, cat_id in zip(bboxes, categories):
visualize_titles(
image, bbox, category_id_to_name[cat_id], font_scale=font_scale_orig, **kwargs)
for bbox, cat_id in zip(augmented['bboxes'], augmented['category_id']):
visualize_titles(
image_aug, bbox, category_id_to_name[cat_id], font_scale=font_scale_aug, **kwargs)
if mask is None:
f, ax = plt.subplots(1, 2, figsize=(16, 8))
ax[0].imshow(image)
ax[0].set_title('Original image')
ax[1].imshow(image_aug)
ax[1].set_title('Augmented image')
else:
f, ax = plt.subplots(2, 2, figsize=(16, 16))
if len(mask.shape) != 3:
mask = label2rgb(mask, bg_label=0)
mask_aug = label2rgb(augmented['mask'], bg_label=0)
else:
import pdb
pdb.set_trace()
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask_aug = cv2.cvtColor(augmented['mask'], cv2.COLOR_BGR2RGB)
ax[0, 0].imshow(image)
ax[0, 0].set_title('Original image')
ax[0, 1].imshow(image_aug)
ax[0, 1].set_title('Augmented image')
ax[1, 0].imshow(mask, interpolation='nearest')
ax[1, 0].set_title('Original mask')
ax[1, 1].imshow(mask_aug, interpolation='nearest')
ax[1, 1].set_title('Augmented mask')
f.tight_layout()
if filename is not None:
f.savefig(filename)
return augmented, f
if __name__ == "__main__":
# Load an example image (uint8, 128x128x3).
image = data.astronaut()
# Example of an augmentation pipeline
augPipeline = A.Compose([
A.RandomCrop(256, 256),
A.OneOf([A.RGBShift(),
A.HueSaturationValue()])])
os.makedirs("tmp", exist_ok=True)
screenshot_pipeline(augPipeline, image, save_fig_path="tmp/")
| 2.0625 | 2 |
openff/bespokefit/optimizers/__init__.py | openforcefield/bespoke-f | 12 | 12768059 | <filename>openff/bespokefit/optimizers/__init__.py
from openff.bespokefit.optimizers.base import (
BaseOptimizer,
deregister_optimizer,
get_optimizer,
list_optimizers,
register_optimizer,
)
from openff.bespokefit.optimizers.forcebalance import ForceBalanceOptimizer
__all__ = [
BaseOptimizer,
deregister_optimizer,
get_optimizer,
list_optimizers,
register_optimizer,
ForceBalanceOptimizer,
]
| 1.101563 | 1 |
testbed_frontend/api/emulation/emulation_manager.py | Ncu-software-research-center/IIOT-testbed | 1 | 12768060 | '''
Vortex OpenSplice
This software and documentation are Copyright 2006 to TO_YEAR ADLINK
Technology Limited, its affiliated companies and licensors. All rights
reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import time
import redis
from api.emulation import (
Config,
EmulationStatus,
WorkerStatus
)
from .emulation_handler import abort_handled
from .utils import parser
class EmulationManager:
def __init__(self):
self.ip_collection = None
self.redis_connection = redis.StrictRedis(
host=Config.FRONTEND_IP, port=Config.REDIS_PORT, password=Config.REDIS_PASSWORD,
encoding="utf-8", decode_responses=True)
@abort_handled
def init(self, emulation_data: dict):
"""
Initialize the redis using the specific emualtion data, then set
emulation status to `init`.
Args:
emulation_data (dict): Emulation data.
"""
print('emulation initialization.')
self.redis_connection.set('emulation_status', EmulationStatus.INIT)
self.redis_connection.set('emulation_time', emulation_data['emulation_time'])
self.ip_collection = [key for key in emulation_data.keys() if 'ip' in key]
for ip in self.ip_collection:
self.redis_connection.hset(ip, 'device_name', emulation_data[ip]['device_name'])
self.redis_connection.hset(ip, 'device_settings', json.dumps(emulation_data[ip]['device_setting']))
self.redis_connection.hset(ip, 'worker_status', WorkerStatus.PREPARE)
@abort_handled
def ready(self):
"""
Waiting for all device is ready to run emulation. If all device is ready,
set the emulation status to `start`.
"""
while not self.check_worker_status(WorkerStatus.READY):
print("Waiting for all of worker is ready.")
if self.redis_connection.get('emulation_status') == EmulationStatus.ABORT:
break
else:
time.sleep(1)
@abort_handled
def start(self):
self.redis_connection.set("emulation_status", EmulationStatus.START)
while not self.check_worker_status(WorkerStatus.DONE):
print("Waiting for emulation finish.")
if self.redis_connection.get('emulation_status') == EmulationStatus.ABORT:
break
else:
time.sleep(1)
@abort_handled
def finish(self):
"""
Waiting for the emulation in each device are finished. After every device finish
the emulations, re-initialize redis table, then set emulation status to `end`.
"""
self.redis_connection.set("emulation_status", EmulationStatus.END)
print("done")
def check_worker_status(self, status):
print(status, end=": ")
worker_status_count = 0
for ip in self.ip_collection:
if self.redis_connection.hget(ip, "worker_status") == status:
worker_status_count += 1
print("{}.".format(ip), end=" ")
else:
print("{}.".format(ip), end=" ")
return worker_status_count == len(self.ip_collection)
| 1.835938 | 2 |
aula 8/01_quiz_exercise_visualization_1.py | RichardPSilva/Udacity-Intro-to-Data-Science | 0 | 12768061 | from pandas import *
from ggplot import *
from datetime import datetime
def get_day(date):
return datetime.strftime(datetime.strptime(date,'%Y-%m-%d').date(),'%a')
def plot_weather_data(turnstile_weather):
'''
You are passed in a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in assignment #3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station (UNIT)
* Which stations have more exits or entries at different times of day
(You can use UNIT as a proxy for subway station.)
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe.
However, due to the limitation of our Amazon EC2 server, we are giving you a random
subset, about 1/3 of the actual data in the turnstile_weather dataframe.
'''
daysn = []
for the_date in turnstile_weather['DATEn']:
daysn.append(get_day(the_date))
turnstile_weather['Dayn'] = daysn
grouped = turnstile_weather.groupby('Dayn',as_index=False).sum()
plot = ggplot(grouped, aes(x='Dayn',y='ENTRIESn_hourly')) + \
geom_bar(aes(weight='ENTRIESn_hourly'), fill='blue') #your code here
return plot | 4.4375 | 4 |
pymyenergi/zappi.py | CJNE/pymyenergi | 5 | 12768062 | <reponame>CJNE/pymyenergi<filename>pymyenergi/zappi.py
from pymyenergi.connection import Connection
from . import ZAPPI
from .base_device import BaseDevice
CHARGE_MODES = ["None", "Fast", "Eco", "Eco+", "Stopped"]
STATES = ["Unkn0", "Paused", "Unkn2", "Charging", "Boosting", "Completed"]
PLUG_STATES = {
"A": "EV Disconnected",
"B1": "EV Connected",
"B2": "Waiting for EV",
"C1": "EV ready to charge",
"C2": "Charging",
"F": "Fault",
}
class Zappi(BaseDevice):
"""Zappi Client for myenergi API."""
def __init__(self, connection: Connection, serialno, data=None) -> None:
self.history_data = {}
self.boost_data = {}
super().__init__(connection, serialno, data)
async def refresh(self):
"""Refresh device data"""
self.data = await self.fetch_data()
async def fetch_boost_data(self):
"""Fetch data from myenergi"""
response = await self._connection.get(
f"/cgi-boost-time-{self.prefix}{self._serialno}"
)
data = response
return data
@property
def kind(self):
return ZAPPI
@property
def prefix(self):
return "Z"
@property
def ct_keys(self):
"""Return CT key names that are not none"""
keys = {}
for i in range(6):
ct = getattr(self, f"ct{i+1}")
if ct.name_as_key == "ct_none":
continue
keys[ct.name_as_key] = keys.get(ct.name_as_key, 0) + 1
return keys
@property
def charge_mode(self):
"""Charge mode, one of Fast, Eco, Eco+ and Stopped"""
return CHARGE_MODES[self._data.get("zmo", 0)]
@property
def charge_added(self):
"""Charge added this session in kWh"""
return self._data.get("che")
@property
def is_dst(self):
"""Is DST in use"""
return self._data.get("dat") == 1
@property
def ct3(self):
"""Current transformer 3"""
return self._create_ct(3)
@property
def ct4(self):
"""Current transformer 4"""
return self._create_ct(4)
@property
def ct5(self):
"""Current transformer 5"""
return self._create_ct(5)
@property
def ct6(self):
"""Current transformer 6"""
return self._create_ct(6)
@property
def supply_frequency(self):
"""Supply frequency in Hz"""
return self._data.get("frq")
@property
def supply_voltage(self):
"""Supply voltage in V"""
return self._data.get("vol", 0) / 10
@property
def power_grid(self):
"""Grid power in W"""
return self._data.get("grd", 0)
@property
def power_generated(self):
"""Generated power in W"""
return self._data.get("gen", 0)
@property
def status(self):
"""Current status, one of Paused, Charging or Completed"""
return STATES[self._data.get("sta", 1)]
@property
def plug_status(self):
"""Plug status, one of EV Disconnected, EV Connected, Waiting for EV, EV Ready to charge, Charging or Fault"""
return PLUG_STATES.get(self._data.get("pst"), "")
@property
def priority(self):
"""Charger priority"""
return self._data.get("pri", 0)
@property
def l1_phase(self):
"""What phase L1 is connected to"""
return self._data.get("pha", 0)
@property
def locked(self):
"""Lock status"""
return self._data.get("lck", 0) >> 1 & 1 == 1
@property
def lock_when_pluggedin(self):
"""Lock when plugged in status"""
return self._data.get("lck", 0) >> 2 & 1 == 1
@property
def lock_when_unplugged(self):
"""Lock when unplugged status"""
return self._data.get("lck", 0) >> 3 & 1 == 1
@property
def charge_when_locked(self):
"""Charge when locked enabled"""
return self._data.get("lck", 0) >> 4 & 1 == 1
@property
def charge_session_allowed(self):
"""Allow charge override"""
return self._data.get("lck", 0) >> 5 & 1 == 1
@property
def minimum_green_level(self):
"""Minimum green level"""
return self._data.get("mgl", -1)
@property
def smart_boost_start_hour(self):
"""Smart boost starting at hour"""
return self._data.get("sbh", -1)
@property
def smart_boost_start_minute(self):
"""Smart boost starting at minute"""
return self._data.get("sbm", -1)
@property
def smart_boost_amount(self):
"""Smart boost amount of energy to add"""
return self._data.get("sbk", -1)
@property
def energy_total(self):
"""Device total energy from history data"""
return self.history_data.get("device_total", 0)
@property
def energy_green(self):
"""Device green energy from history data"""
return self.history_data.get("device_green", 0)
# @property
# def boost_start_hour(self):
# """Boost starting at hour ??"""
# return self._data.get("tbh", -1)
# @property
# def boost_start_minute(self):
# """Boost starting at minute ??"""
# return self._data.get("tbm", -1)
@property
def boost_amount(self):
"""Boost amount of energy to add"""
return self._data.get("tbk", -1)
# The following propterties are have unknown purpose, names will change once known
@property
def bst(self):
return self._data.get("bst")
@property
def bsm(self):
"""Boost mode maybe, turns 1 when manual boosting"""
return self._data.get("bsm")
@property
def bss(self):
return self._data.get("bss")
@property
def tz(self):
return self._data.get("tz")
@property
def pwm(self):
return self._data.get("pwm")
@property
def zs(self):
return self._data.get("zs")
@property
def rdc(self):
return self._data.get("rdc")
@property
def rac(self):
return self._data.get("rac")
@property
def rrac(self):
return self._data.get("rrac")
@property
def zsh(self):
return self._data.get("zsh")
@property
def zsl(self):
return self._data.get("zsl")
def show(self, short_format=False):
"""Returns a string with all data in human readable format"""
name = ""
ret = ""
if self.name:
name = f" {self.name}"
ret = ret + f"Zappi S/N {self.serial_number}"
ret = ret + f"{name} version {self.firmware_version}"
if short_format:
return ret
ret = ret.center(80, "-") + "\n"
ret = ret + f"Status: {self.status}\n"
ret = ret + f"Plug status: {self.plug_status}\n"
ret = ret + f"Locked: {self.locked}\n"
ret = ret + f"Charge added: {self.charge_added}\n"
ret = ret + f"Priority: {self.priority}\n"
ret = ret + f"Charge mode: {self.charge_mode}\n"
ret = ret + "\n"
ret = ret + f"Lock when plugged in : {self.lock_when_pluggedin}\n"
ret = ret + f"Lock when unplugged : {self.lock_when_unplugged}\n"
ret = ret + f"Charge when locked : {self.charge_when_locked}\n"
ret = ret + f"Charge session allowed : {self.charge_session_allowed}\n"
ret = ret + "\n"
ret = ret + f"CT 1 {self.ct1.name} {self.ct1.power}W\n"
ret = ret + f"CT 2 {self.ct2.name} {self.ct2.power}W\n"
ret = ret + f"CT 3 {self.ct3.name} {self.ct3.power}W\n"
if self.ct4.name != "None":
ret = ret + f"CT 4 {self.ct4.name} {self.ct4.power}W\n"
if self.ct5.name != "None":
ret = ret + f"CT 5 {self.ct5.name} {self.ct5.power}W\n"
if self.ct6.name != "None":
ret = ret + f"CT 6 {self.ct6.name} {self.ct6.power}W\n"
ret = ret + "\n"
ret = ret + f"Supply voltage: {self.supply_voltage}V\n"
ret = ret + f"Line frequency: {self.supply_frequency}Hz\n"
ret = ret + f"L1 phase: {self.l1_phase}\n"
ret = ret + "Power:\n"
ret = ret + f" Grid : {self.power_grid}W\n"
ret = ret + f" Generated : {self.power_generated}W\n"
ret = ret + "\n"
for key in self.ct_keys:
ret = ret + f"Energy {key} {self.history_data.get(key, 0)}Wh\n"
ret = ret + "\n"
ret = ret + f"Boost with {self.boost_amount}kWh\n"
ret = ret + "Smart Boost start at"
ret = ret + f" {self.smart_boost_start_hour}:{self.smart_boost_start_minute}"
ret = ret + f" add {self.smart_boost_amount}kWh\n"
ret = ret + f"Minimum green level: {self.minimum_green_level}%"
return ret
async def stop_charge(self):
"""Stop charge"""
await self._connection.get(f"/cgi-zappi-mode-Z{self._serialno}-4-0-0-0000")
return True
async def stop_boost(self):
"""Stop charge"""
await self._connection.get(f"/cgi-zappi-mode-Z{self._serialno}-2-0-0-0000")
return True
async def set_charge_mode(self, mode):
"""Set charge mode, one of Fast, Eco, Eco+ or Stopped"""
mode_int = CHARGE_MODES.index(mode.capitalize())
await self._connection.get(
f"/cgi-zappi-mode-Z{self._serialno}-{mode_int}-0-0-0000"
)
# Set local data if successful
self._data["zmo"] = mode_int
return True
async def set_minimum_green_level(self, level):
"""Set minimum green level 0-100"""
await self._connection.get(f"/cgi-set-min-green-Z{self._serialno}-{level}")
# Set local data if successful
self._data["mgl"] = level
return True
async def start_boost(self, amount):
"""Start boost"""
if self.charge_mode not in ["Eco", "Eco+"]:
return False
await self._connection.get(
f"/cgi-zappi-mode-Z{self._serialno}-0-10-{int(amount)}-0000"
)
return True
async def set_priority(self, priority):
"""Set device priority"""
await self._connection.get(
f"/cgi-set-priority-Z{self._serialno}-{int(priority)}"
)
self._data["pri"] = int(priority)
return True
async def start_smart_boost(self, amount, complete_by):
"""Start smart boost"""
time = complete_by.replace(":", "")
await self._connection.get(
f"/cgi-zappi-mode-Z{self._serialno}-0-11-{int(amount)}-{time}"
)
return True
| 2.234375 | 2 |
translators/object.py | rgirish28/blenderseed | 0 | 12768063 | <gh_stars>0
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2018 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import appleseed as asr
import math
import mathutils
import os
from .assethandlers import AssetType
from .translator import Translator, ProjectExportMode, ObjectKey
from ..logger import get_logger
logger = get_logger()
class ObjectTranslator(Translator):
#
# Constructor.
#
def __init__(self, obj, asset_handler):
super(ObjectTranslator, self).__init__(obj, asset_handler)
self._xform_seq = asr.TransformSequence()
self._num_instances = 1
#
# Properties.
#
@property
def bl_obj(self):
return self._bl_obj
@property
def assembly_name(self):
return self.appleseed_name + "_ass"
#
# Instancing.
#
def add_instance(self):
self._num_instances += 1
#
# Entity translation.
#
def set_transform_key(self, scene, time, key_times):
self._xform_seq.set_transform(time, self._convert_matrix(self.bl_obj.matrix_world))
def set_transform(self, time, matrix):
self._xform_seq.set_transform(time, self._convert_matrix(matrix))
def set_deform_key(self, scene, time, key_times):
pass
def update(self, obj):
self.update_transform(0.0, obj.matrix_world)
class InstanceTranslator(ObjectTranslator):
#
# Constructor.
#
def __init__(self, obj, master_translator, asset_handler):
super(InstanceTranslator, self).__init__(obj, asset_handler)
self.__master = master_translator
#
# Entity translation.
#
def create_entities(self, scene):
pass
def flush_entities(self, assembly):
logger.debug("Creating assembly instance for object %s", self.appleseed_name)
assembly_instance_name = self.appleseed_name + "_ass_inst"
self.__ass_inst = asr.AssemblyInstance(assembly_instance_name, {}, self.__master.assembly_name)
self._xform_seq.optimize()
self.__ass_inst.set_transform_sequence(self._xform_seq)
assembly_instance_name = self._insert_entity_with_unique_name(
assembly.assembly_instances(),
self.__ass_inst,
self.__ass_inst.get_name())
self.__ass_inst = assembly.assembly_instances().get_by_name(assembly_instance_name)
def update_transform(self, time, matrix):
self.__ass_inst.transform_sequence().set_transform(time, self._convert_matrix(matrix))
def _convert_matrix(self, m):
if self.bl_obj.is_duplicator and self.bl_obj.dupli_type == 'GROUP':
rot_comp = mathutils.Matrix.Rotation(math.radians(90), 4, 'X')
m = m * rot_comp
return super(InstanceTranslator, self)._convert_matrix(m)
class ArchiveTranslator(ObjectTranslator):
#
# Constructor.
#
def __init__(self, obj, archive_path, asset_handler):
super(ArchiveTranslator, self).__init__(obj, asset_handler)
self.__archive_path = archive_path
#
# Properties.
#
@property
def bl_obj(self):
return self._bl_obj
#
# Entity translation.
#
def create_entities(self, scene):
self._xform_seq.set_transform(0.0, self._convert_matrix(self.bl_obj.matrix_world))
def flush_entities(self, assembly):
assembly_name = self.appleseed_name + "_ass"
file_path = self.asset_handler.process_path(self.__archive_path, AssetType.ARCHIVE_ASSET)
params = {'filename': file_path}
self.__ass = asr.Assembly("archive_assembly", assembly_name, params)
ass_inst_name = self.appleseed_name + "_ass_inst"
self.__ass_inst = asr.AssemblyInstance(ass_inst_name, {}, assembly_name)
self.__ass_inst.set_transform_sequence(self._xform_seq)
assembly.assemblies().insert(self.__ass)
assembly.assembly_instances().insert(self.__ass_inst)
def update_transform(self, time, matrix):
self.__ass_inst.transform_sequence().set_transform(time, self._convert_matrix(matrix))
def _convert_matrix(self, m):
# undo export rotation
rotation = mathutils.Matrix.Rotation(math.radians(90), 4, 'X')
m = rotation * m
return super(ArchiveTranslator, self)._convert_matrix(m)
| 1.507813 | 2 |
pyfilter/distributions/joint.py | tingiskhan/pyfilter | 61 | 12768064 | from typing import Optional, Any, Tuple, Union, Sequence
from torch.distributions import Distribution
import torch
class JointDistribution(Distribution):
"""
Defines an object for combining multiple distributions by assuming independence, i.e. we define:
.. math::
p(x_1, x_2, ..., x_n) = p(x_1) \\cdot p(x_2) ... \\cdot p(x_n)
Example:
A basic example can be seen below, where we combine a normal and and exponential distribution:
>>> from torch.distributions import Normal, Exponential
>>> import torch
>>>
>>> distribution = JointDistribution(Normal(0.0, 1.0), Exponential(1.0))
>>> y = distribution.sample((1000,)) # should be 1000 x 2
>>>
>>> log_prob = distribution.log_prob(y)
"""
arg_constraints = {}
def __init__(self, *distributions: Distribution, indices: Sequence[Union[int, slice]] = None, **kwargs):
"""
Initializes the ``JointDistribution`` class.
Args:
distributions: Iterable of ``pytorch.distributions.Distribution`` objects.
indices: Optional parameter specifying which distribution corresponds to which column in input tensors. If
``None``, then is inferred.
kwargs: Key-worded arguments passed to base class.
"""
_indices = indices or self.infer_indices(*distributions)
event_shape = torch.Size([(_indices[-1].stop if isinstance(_indices[-1], slice) else _indices[-1] + 1)])
batch_shape = distributions[0].batch_shape
if any(d.batch_shape != batch_shape for d in distributions):
raise NotImplementedError(f"All batch shapes must be congruent!")
super(JointDistribution, self).__init__(event_shape=event_shape, batch_shape=batch_shape, **kwargs)
if any(len(d.event_shape) > 1 for d in distributions):
raise NotImplementedError(f"Currently cannot handle matrix valued distributions!")
self.distributions = distributions
self.indices = _indices
def expand(self, batch_shape, _instance=None):
return JointDistribution(*(d.expand(batch_shape) for d in self.distributions))
@property
def support(self) -> Optional[Any]:
raise NotImplementedError()
@property
def mean(self):
raise NotImplementedError()
@property
def variance(self):
raise NotImplementedError()
def cdf(self, value):
res = 0.0
for d, m in zip(self.distributions, self.indices):
res *= d.cdf(value[..., m])
return res
def icdf(self, value):
raise NotImplementedError()
def enumerate_support(self, expand=True):
raise NotImplementedError()
def entropy(self):
return sum(d.entropy() for d in self.distributions)
@staticmethod
def infer_indices(*distributions: Distribution) -> Tuple[Union[int, slice]]:
"""
Given a sequence of ``pytorch.distributions.Distribution`` objects, this method infers the indices at which to
slice an input tensor.
Args:
distributions: Sequence of ``pytorch.distributions.Distribution`` objects.
Returns:
A tuple containing indices and/or slices.
Example:
>>> from torch.distributions import Normal, Exponential
>>> import torch
>>> from pyfilter.distributions import JointDistribution
>>>
>>> distributions = Normal(0.0, 1.0), Exponential(1.0)
>>> y = torch.stack([d.sample((1000,)) for d in distributions], dim=-1)
>>>
>>> slices = JointDistribution.infer_indices(*distributions)
>>> log_probs = [d.log_prob(y[..., s]) for d, s in zip(distributions, slices)]
"""
res = tuple()
length = 0
for i, d in enumerate(distributions):
multi_dimensional = len(d.event_shape) > 0
if multi_dimensional:
size = d.event_shape[-1]
slice_ = slice(length, size + 1)
length += slice_.stop
else:
slice_ = length
length += 1
res += (slice_,)
return res
def log_prob(self, value):
# TODO: Add check for wrong dimensions
return sum(d.log_prob(value[..., m]) for d, m in zip(self.distributions, self.indices))
def rsample(self, sample_shape=torch.Size()):
res = tuple(
d.rsample(sample_shape) if len(d.event_shape) > 0 else d.rsample(sample_shape).unsqueeze(-1)
for d in self.distributions
)
return torch.cat(res, dim=-1)
| 3.171875 | 3 |
app.py | ericmjl/Firearm_Detection | 3 | 12768065 | import streamlit as st
import numpy as np
from tensorflow.keras.models import load_model
import librosa
import time
import matplotlib.pyplot as plt
def wav2mfcc(wave, sr=22050,n_mfcc=20, max_len=170):
'''wave is a np array'''
wave = np.asfortranarray(wave)
mfcc = librosa.feature.mfcc(wave, sr=sr, n_mfcc=n_mfcc)
# If maximum length exceeds mfcc lengths then pad the remaining ones
if (max_len > mfcc.shape[1]):
pad_width = max_len - mfcc.shape[1]
mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
# Else cutoff the remaining parts
else:
mfcc = mfcc[:, :max_len]
return mfcc
def updateplot(wave,txt_output):
"""
update the plot with the wave file
"""
line.set_ydata(wave)
the_plot.pyplot(plt)
text.set_text(txt_output)
# load the model from disk
model_path="models/"
cnn_model=load_model(model_path+'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')
#-------------------------------------------------
st.title('Firearm Alarm')
st.header('Listening for Firearms in Your Home')
##-----------------------------------------------------------------------------
path="data/external/"
audio_clip1='5-195710-A-10.wav' # ?
audio_clip2='2-121978-A-29.wav' #?
audio_clip3='T_17P.wav'
audio_dict={
'Audio clip 1':audio_clip1,
'Audio clip 2': audio_clip2,
'Audio clip 3': audio_clip3}
#-----------------------------------------------
# select a sidebar to navigate between different options of the app
options=['Test with some sample clips', 'Test with a youtube video']
page=st.sidebar.radio('Select an option',options)
st.sidebar.header('Firearm-Alarm Options')
st.sidebar.markdown('The first option will allow you to test firearm-alarm with some pre-recorded sound clips.')
st.sidebar.markdown('The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0.')
#-----------------------------------------------
if page==options[0]: #The first option is selected
st.text('The following are a set of sample audio clips that can be input into the model.')
st.audio(path+audio_clip1)
st.text('This is audio clip 1.')
st.audio(path+audio_clip2)
st.text('This is audio clip 2.')
st.audio(path+audio_clip3)
option = st.selectbox('Select the clip you would like the model to analyze.',('Audio clip 1', 'Audio clip 2', 'Audio clip 3'))
st.write('You selected:', option)
if st.button('Analyze '+option):
wave, sr = librosa.load(path+audio_dict[option], mono=True, sr=22050)
mfcc=wav2mfcc(wave,sr=sr)
X_test = np.reshape(mfcc,(1, 20, 170, 1))
Y_predict=cnn_model.predict(X_test)
print(Y_predict)
if Y_predict.round()[0][0]==1 :
st.write("This doesn't sound like a firearm.")
if Y_predict.round()[0][0]==0:
st.write("This is a firearm! Contacting local authorities...")
else:
st.write('Click the button to analyze the audio clip.')
###############################################----------------------------------
elif page==options[1]: #if the second page is selected
st.header('Firearm Alarm in Action')
x = np.arange(0, 4,1/22050)
fig, ax=plt.subplots()
ax.set_ylim(-1, 1)
line, = ax.plot(x, np.zeros(len(x)),color='m',linewidth=2)
plt.xlabel('Time (s)')
plt.ylabel('Sound Wave')
the_plot = st.pyplot(plt)
text=plt.text(0,.8,'',fontsize=14)
sample='data/external/Real_life_gunshot_sound_effects.wav'
if st.button('See an example with Firearm Alarm'):
with st.spinner("Listening..."):
array,sr=librosa.load(sample)
tiempo=librosa.get_duration(array) #time in seconds
for t in range(0,int(tiempo),4):
wave, sr = librosa.load(sample, mono=True,offset=t,duration=4)
## run it through the model
mfcc=wav2mfcc(wave)
X_test = np.reshape(mfcc,(1, 20, 170, 1))
Y_predict=cnn_model.predict(X_test)
if Y_predict.round()[0][0]==1 :
txt_output='No firearm sound(s) detected'
# text.set_text('No firearm sounds detected')
if Y_predict.round()[0][0]==0:
txt_output='Firearm sound(s) detected!'
# text.set_text('Firearm sounds detected!')
updateplot(wave,txt_output)
time.sleep(3)
plt.show()
else:
st.write('Click the button to start listening.')
#-----------------------------------
| 3.203125 | 3 |
nndet/inference/ensembler/base.py | joeranbosma/nnDetection | 242 | 12768066 | <filename>nndet/inference/ensembler/base.py
"""
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import PathLike
from pathlib import Path
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, TypeVar
import torch
from nndet.io.load import save_pickle
from nndet.utils.tensor import to_numpy
from nndet.utils.info import maybe_verbose_iterable
class BaseEnsembler(ABC):
ID = "abstract"
def __init__(self,
properties: Dict[str, Any],
parameters: Dict[str, Any],
device: Optional[Union[torch.device, str]] = None,
**kwargs):
"""
Base class to containerize and ensemble the predictions of a single case.
Call :method:`process_batch` to add batched predictions of a case
to the ensembler and :method:`add_model` to signal the next model
if multiple models are used.
Args:
properties: properties of the patient/case (e.g. tranpose axes)
parameters: parameters for ensembling
device: device to use for internal computations
**kwargs: parameters for ensembling
Notes:
Call :method:`add_model` before adding predictions.
"""
self.model_current = None
self.model_results = {}
self.model_weights = {}
self.properties = properties
self.case_result: Optional[Dict] = None
self.parameters = parameters
self.parameters.update(kwargs)
if device is None:
self.device = torch.device("cpu")
elif isinstance(device, str):
self.device = torch.device(device)
elif isinstance(device, torch.device):
self.device = device
else:
raise ValueError(f"Wrong type {type(device)} for device argument.")
@classmethod
def from_case(cls,
case: Dict,
properties: Optional[Dict] = None,
parameters: Optional[Dict] = None,
**kwargs,
):
"""
Primary way to instantiate this class. Automatically extracts all
properties and uses a default set of parameters for ensembling.
Args:
case: case which is predicted
properties: Additional properties. Defaults to None.
parameters: Additional parameters. Defaults to None.
"""
return cls(properties=properties, parameters=parameters, **kwargs)
def add_model(self,
name: Optional[str] = None,
model_weight: Optional[float] = None,
) -> str:
"""
This functions signales the ensembler to add a new model for internal
processing
Args:
name: Name of the model. If None, uses counts the models.
model_weight: Optional weight for this model. Defaults to None.
"""
if name is None:
name = len(self.model_weights) + 1
if name in self.model_results:
raise ValueError(f"Invalid model name, model {name} is already present")
if model_weight is None:
model_weight = 1.0
self.model_weights[name] = model_weight
self.model_results[name] = defaultdict(list)
self.model_current = name
return name
@abstractmethod
@torch.no_grad()
def process_batch(self, result: Dict, batch: Dict):
"""
Process a single batch
Args:
result: predictions to save and ensemble
batch: input batch used for predictions (for additional meta data)
Raises:
NotImplementedError: Overwrite this function in subclasses for the
specific use case.
Warnings:
Make sure to move cached values to the CPU after they have been
processed.
"""
raise NotImplementedError
@abstractmethod
@torch.no_grad()
def get_case_result(self, restore: bool = False) -> Dict[str, torch.Tensor]:
"""
Retrieve the results of a single case
Args:
restore: restores predictions in original image space
Raises:
NotImplementedError: Overwrite this function in subclasses for the
specific use case.
Returns:
Dict[str, torch.Tensor]: the result of a single case
"""
raise NotImplementedError
def update_parameters(self, **parameters: Dict):
"""
Update internal parameters used for ensembling the results
Args:
parameters: parameters to update
"""
self.parameters.update(parameters)
@classmethod
@abstractmethod
def sweep_parameters(cls) -> Tuple[Dict[str, Any], Dict[str, Sequence[Any]]]:
"""
Return a set of parameters which can be used to sweep ensembling
parameters in a postprocessing step
Returns:
Dict[str, Any]: default state to start with
Dict[str, Sequence[Any]]]: Defines the values to search for each
parameter
"""
raise NotImplementedError
def save_state(self,
target_dir: Path,
name: str,
**kwargs,
):
"""
Save case result as pickle file. Identifier of ensembler will
be added to the name
Args:
target_dir: folder to save result to
name: name of case
**kwargs: data to save
"""
kwargs["properties"] = self.properties
kwargs["parameters"] = self.parameters
kwargs["model_current"] = self.model_current
kwargs["model_results"] = self.model_results
kwargs["model_weights"] = self.model_weights
kwargs["case_result"] = self.case_result
with open(Path(target_dir) / f"{name}_{self.ID}.pt", "wb") as f:
torch.save(kwargs, f)
def load_state(self, base_dir: PathLike, case_id: str) -> Dict:
"""
Path to result file
"""
ckp = torch.load(str(Path(base_dir) / f"{case_id}_{self.ID}.pt"))
self._load(ckp)
return ckp
def _load(self, state: Dict):
for key, item in state.items():
setattr(self, key, item)
@classmethod
def from_checkpoint(cls, base_dir: PathLike, case_id: str):
ckp = torch.load(str(Path(base_dir) / f"{case_id}_{cls.ID}.pt"))
t = cls(
properties=ckp["properties"],
parameters=ckp["parameters"],
)
t._load(ckp)
return t
@classmethod
def get_case_ids(cls, base_dir: PathLike):
return [c.stem.rsplit(f"_{cls.ID}", 1)[0]
for c in Path(base_dir).glob(f"*_{cls.ID}.pt")]
class OverlapMap:
def __init__(self, data_shape: Sequence[int]):
"""
Handler for overlap map
Args:
data_shape: spatial dimensions of data (
no batch dim and no channel dim!)
"""
self.overlap_map: torch.Tensor = \
torch.zeros(*data_shape, requires_grad=False, dtype=torch.float)
def add_overlap(self, crop: Sequence[slice]):
"""
Increase values of :param:`self.overlap_map` inside of crop
Args:
crop: defines crop. Negative values are assumed to be outside
of the data and thus discarded
"""
# discard leading indexes which could be due to batches and channels
if len(crop) > self.overlap_map.ndim:
crop = crop[-self.overlap_map.ndim:]
# clip crop to data shape
slicer = []
for data_shape, crop_dim in zip(tuple(self.overlap_map.shape), crop):
start = max(0, crop_dim.start)
stop = min(data_shape, crop_dim.stop)
slicer.append(slice(start, stop, crop_dim.step))
self.overlap_map[slicer] += 1
def mean_num_overlap_of_box(self, box: Sequence[int]) -> float:
"""
Extract mean number of overlaps from a bounding box area
Args:
box: defines bounding box (x1, y1, x2, y2, (z1, z2))
Returns:
int: mean number of overlaps
"""
slicer = [slice(int(box[0]), int(box[2])), slice(int(box[1]), int(box[3]))]
if len(box) == 6:
slicer.append(slice(int(box[4]), int(box[5])))
return torch.mean(self.overlap_map[slicer].float()).item()
def mean_num_overlap_of_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""
Extract mean number of overlaps from a bounding box area
Args:
boxes: defines multiple bounding boxes (x1, y1, x2, y2, (z1, z2))
[N, dim * 2]
Returns:
Tensor: mean number of overlaps per box [N]
"""
return torch.tensor(
[self.mean_num_overlap_of_box(box) for box in boxes]).to(
dtype=torch.float, device=boxes.device)
def avg(self) -> torch.Tensor:
"""
Compute mean over all overlaps
"""
return self.overlap_map.float().median()
def restore_mean(self, val):
"""
Generate a new overlap map filled with the specified value
"""
self.overlap_map = torch.zeros_like(self.overlap_map)
self.overlap_map = float(val)
def extract_results(source_dir: PathLike,
target_dir: PathLike,
ensembler_cls: Callable,
restore: bool,
**params,
) -> None:
"""
Compute case result from ensembler and save it
Args:
source_dir: directory which contains the saved predictions/state from
the ensembler class
target_dir: directory to save results
ensembler_cls: ensembler class for prediction
restore: if true, the results are converted into the opriginal image
space
"""
Path(target_dir).mkdir(parents=True, exist_ok=True)
for case_id in maybe_verbose_iterable(ensembler_cls.get_case_ids(source_dir)):
ensembler = ensembler_cls.from_checkpoint(base_dir=source_dir, case_id=case_id)
ensembler.update_parameters(**params)
pred = to_numpy(ensembler.get_case_result(restore=restore))
save_pickle(pred, Path(target_dir) / f"{case_id}_{ensembler_cls.ID}.pkl")
BaseEnsemblerType = TypeVar('BaseEnsemblerType', bound=BaseEnsembler)
| 1.664063 | 2 |
analytics/ot-iou/utils.py | xwu2git/Smart-City-Sample | 126 | 12768067 |
class BBUtil(object):
def __init__(self,width,height):
super(BBUtil, self).__init__()
self.width=width
self.height=height
def xywh_to_tlwh(self, bbox_xywh):
x,y,w,h = bbox_xywh
xmin = max(int(round(x - (w / 2))),0)
ymin = max(int(round(y - (h / 2))),0)
return [xmin,ymin,int(w),int(h)]
def tlwh_to_xyxy(self, bbox_tlwh):
x,y,w,h = bbox_tlwh
x1 = max(int(x),0)
x2 = min(int(x+w),self.width-1)
y1 = max(int(y),0)
y2 = min(int(y+h),self.height-1)
return [x1,y1,x2,y2]
def xywh_to_xyxy(self, bbox_xywh):
x,y,w,h = bbox_xywh
x1 = max(int(x-w/2),0)
x2 = min(int(x+w/2),self.width-1)
y1 = max(int(y-h/2),0)
y2 = min(int(y+h/2),self.height-1)
return [x1,y1,x2,y2]
def xyxy_to_tlwh(self, bbox_xyxy):
x1,y1,x2,y2 = bbox_xyxy
t = x1
l = y1
w = int(x2-x1)
h = int(y2-y1)
return [t,l,w,h]
def float_to_int(self,bbox_xyxy):
x1,y1,x2,y2 = bbox_xyxy
return [int(x1*self.width), int(y1*self.height), int(x2*self.width), int(y2*self.height)]
def int_to_float(self,bbox_xyxy):
x1,y1,x2,y2 = [float(item) for item in bbox_xyxy]
return [x1/self.width, y1/self.height, x2/self.width, y2/self.height]
| 2.875 | 3 |
examples/config_method_py_example.py | 123zbt/PySODEvalToolkit | 27 | 12768068 | # -*- coding: utf-8 -*-
import os
JLDCF_root = "<your_methods_path>/CVPR2020_JL-DCF"
JLDCF = {
"LFSD": dict(path=os.path.join(JLDCF_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(JLDCF_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(JLDCF_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(JLDCF_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(JLDCF_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(JLDCF_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(JLDCF_root, "DUT-RGBD-testing"), suffix=".png"),
}
CoNet_root = "<your_methods_path>/2020-ECCV-CoNet"
CoNet = {
"LFSD": dict(path=os.path.join(CoNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CoNet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CoNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CoNet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(CoNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CoNet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(CoNet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(CoNet_root, "STERE1000"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CoNet_root, "DUT-RGBD"), suffix=".png"),
}
BBSNet_root = "<your_methods_path>/ECCV2020_BBSNet"
BBSNet = {
"LFSD": dict(path=os.path.join(BBSNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(BBSNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(BBSNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(BBSNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(BBSNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(BBSNet_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(BBSNet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(BBSNet_root, "DUT"), suffix=".png"),
}
CMWNet_root = "<your_methods_path>/ECCV2020_CMWNet"
CMWNet = {
"LFSD": dict(path=os.path.join(CMWNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CMWNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(CMWNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CMWNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(CMWNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CMWNet_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CMWNet_root, "STEREO"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CMWNet_root, "DUT-RGBD"), suffix=".png"),
}
FRDT_root = "<your_methods_path>/2020-ACMMM-FRDT"
FRDT = {
"LFSD": dict(path=os.path.join(FRDT_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(FRDT_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(FRDT_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(FRDT_root, "RGBD-135"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(FRDT_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(FRDT_root, "STEREO"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(FRDT_root, "DUT"), suffix=".png"),
}
S2MA_root = "<your_methods_path>/2020-CVPR-S2MA"
S2MA = {
"LFSD": dict(path=os.path.join(S2MA_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(S2MA_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(S2MA_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(S2MA_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(S2MA_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(S2MA_root, "SSD100"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(S2MA_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(S2MA_root, "DUT-RGBD"), suffix=".png"),
}
UCNet_root = "<your_methods_path>/2020-CVPR-UCNet_Res50/CVPR-UCNet_R50"
UCNet = {
"LFSD": dict(path=os.path.join(UCNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(UCNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(UCNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(UCNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(UCNet_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(UCNet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(UCNet_root, "DUT"), suffix=".png"),
}
UCNet_ABP_root = "<your_methods_path>/2020-CVPR-UCNet_Res50/TPAMI_UCNet_R50_ABP"
UCNet_ABP = {
"LFSD": dict(path=os.path.join(UCNet_ABP_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(UCNet_ABP_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(UCNet_ABP_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(UCNet_ABP_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(UCNet_ABP_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(UCNet_ABP_root, "STERE"), suffix=".png"),
"DUTRGBD": None,
}
UCNet_CVAE_root = "<your_methods_path>/2020-CVPR-UCNet_Res50/TPAMI_UCNet_R50_CVAE"
UCNet_CVAE = {
"LFSD": dict(path=os.path.join(UCNet_CVAE_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(UCNet_CVAE_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(UCNet_CVAE_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(UCNet_CVAE_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(UCNet_CVAE_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(UCNet_CVAE_root, "STERE"), suffix=".png"),
"DUTRGBD": None,
}
CasGNN_root = "<your_methods_path>/2020-ECCV-CasGNN"
CasGNN = {
"LFSD": dict(path=os.path.join(CasGNN_root, "LFSD", "pred"), suffix=".png"),
"NJUD": dict(path=os.path.join(CasGNN_root, "NJUD", "pred"), suffix=".png"),
"NLPR": dict(path=os.path.join(CasGNN_root, "NLPR", "pred"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CasGNN_root, "DES", "pred"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(CasGNN_root, "SSD", "pred"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CasGNN_root, "STERE", "pred"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CasGNN_root, "DUT-RGBD", "pred"), suffix=".png"),
}
DANet_VGG16_root = "<your_methods_path>/2020-ECCV-DANet_VGG/DANet_vgg16"
DANet_VGG16 = {
"LFSD": dict(path=os.path.join(DANet_VGG16_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DANet_VGG16_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DANet_VGG16_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DANet_VGG16_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DANet_VGG16_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DANet_VGG16_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(DANet_VGG16_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DANet_VGG16_root, "DUT-RGBD"), suffix=".png"),
}
DANet_VGG19_root = "<your_methods_path>/2020-ECCV-DANet_VGG/DANet_vgg19"
DANet_VGG19 = {
"LFSD": dict(path=os.path.join(DANet_VGG19_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DANet_VGG19_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DANet_VGG19_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DANet_VGG19_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DANet_VGG19_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DANet_VGG19_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(DANet_VGG19_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DANet_VGG19_root, "DUT-RGBD"), suffix=".png"),
}
PGAR_root = "<your_methods_path>/2020-ECCV-PGAR"
PGAR = {
"LFSD": dict(path=os.path.join(PGAR_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(PGAR_root, "NJUD_test"), suffix=".png"),
"NLPR": dict(path=os.path.join(PGAR_root, "NLPR_test"), suffix=".png"),
"RGBD135": dict(path=os.path.join(PGAR_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(PGAR_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(PGAR_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(PGAR_root, "DUT-RGBD"), suffix=".png"),
}
DisenFuse_root = "<your_methods_path>/2020-TIP-DisenFuse_VGG16"
DisenFuse = {
"LFSD": dict(path=os.path.join(DisenFuse_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DisenFuse_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DisenFuse_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(DisenFuse_root, "DES"), suffix=".bmp"),
"SIP": dict(path=os.path.join(DisenFuse_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(DisenFuse_root, "STEREO1000"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DisenFuse_root, "DUT"), suffix=".png"),
}
DPANet_root = "<your_methods_path>/2020-TIP-DPANet"
DPANet = {
"LFSD": dict(path=os.path.join(DPANet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DPANet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DPANet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DPANet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DPANet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DPANet_root, "SSD100"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DPANet_root, "STEREO797"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(DPANet_root, "DUT"), suffix=".png"),
}
ICNet_root = "<your_methods_path>/2020-TIP-ICNet"
ICNet = {
"LFSD": dict(path=os.path.join(ICNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(ICNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(ICNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(ICNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(ICNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(ICNet_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(ICNet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(ICNet_root, "DUT-RGBD"), suffix=".png"),
}
D3Net_root = "<your_methods_path>/2020-TNNLS-D3Net"
D3Net = {
"LFSD": dict(path=os.path.join(D3Net_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(D3Net_root, "NJU2K_TEST"), suffix=".png"),
"NLPR": dict(path=os.path.join(D3Net_root, "NLPR_TEST"), suffix=".png"),
"RGBD135": dict(path=os.path.join(D3Net_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(D3Net_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(D3Net_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(D3Net_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(D3Net_root, "DUT-RGBD_TEST"), suffix=".png"),
}
RD3D_root = "<your_methods_path>/2021-AAAI-RD3D"
RD3D = {
"LFSD": dict(path=os.path.join(RD3D_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(RD3D_root, "NJU2000"), suffix=".png"),
"NLPR": dict(path=os.path.join(RD3D_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(RD3D_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(RD3D_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(RD3D_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(RD3D_root, "DUT"), suffix=".png"),
}
AFNet_root = "<your_methods_path>/AFNet"
AFNet = {
"LFSD": dict(path=os.path.join(AFNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(AFNet_root, "NJU2K-TEST"), suffix=".png"),
"NLPR": dict(path=os.path.join(AFNet_root, "NLPR-TEST"), suffix=".png"),
"RGBD135": dict(path=os.path.join(AFNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(AFNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(AFNet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(AFNet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(AFNet_root, "STERE"), suffix=".png"),
"DUTRGBD": None,
}
CDCP_root = "<your_methods_path>/CDCP"
CDCP = {
"LFSD": dict(path=os.path.join(CDCP_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CDCP_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CDCP_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CDCP_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(CDCP_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CDCP_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CDCP_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CDCP_root, "DUT-RGBD"), suffix=".png"),
}
CPFP_root = "<your_methods_path>/CPFP"
CPFP = {
"LFSD": dict(path=os.path.join(CPFP_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CPFP_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CPFP_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CPFP_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(CPFP_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CPFP_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CPFP_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CPFP_root, "DUT-RGBD"), suffix=".png"),
}
CTMF_root = "<your_methods_path>/CTMF"
CTMF = {
"LFSD": dict(path=os.path.join(CTMF_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CTMF_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CTMF_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(CTMF_root, "RGBD135"), suffix=".bmp"),
"SIP": dict(path=os.path.join(CTMF_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CTMF_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CTMF_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CTMF_root, "DUT-RGBD"), suffix=".png"),
}
DCMC_root = "<your_methods_path>/DCMC"
DCMC = {
"LFSD": dict(path=os.path.join(DCMC_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DCMC_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DCMC_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DCMC_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DCMC_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DCMC_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DCMC_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DCMC_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DCMC_root, "DUT-RGBD"), suffix=".png"),
}
DES_root = "<your_methods_path>/DES"
DES = {
"LFSD": dict(path=os.path.join(DES_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DES_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DES_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DES_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DES_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DES_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DES_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DES_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DES_root, "DUT-RGBD"), suffix=".png"),
}
DF_root = "<your_methods_path>/DF"
DF = {
"LFSD": dict(path=os.path.join(DF_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DF_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DF_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DF_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DF_root, "SIP/SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DF_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DF_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DF_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DF_root, "DUT-RGBD"), suffix=".png"),
}
DMRA_root = "<your_methods_path>/DMRA"
DMRA = {
"LFSD": dict(path=os.path.join(DMRA_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DMRA_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DMRA_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DMRA_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DMRA_root, "SIP_FromAuthor"), suffix=".png"),
"SSD": dict(path=os.path.join(DMRA_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DMRA_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DMRA_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DMRA_root, "DUT-RGBD"), suffix=".png"),
}
MB_root = "<your_methods_path>/MB"
MB = {
"LFSD": dict(path=os.path.join(MB_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(MB_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(MB_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(MB_root, "RGBD135"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(MB_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(MB_root, "STEREO"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(MB_root, "DUT-RGBD"), suffix=".png"),
}
MMCI_root = "<your_methods_path>/MMCI"
MMCI = {
"LFSD": dict(path=os.path.join(MMCI_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(MMCI_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(MMCI_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(MMCI_root, "RGBD135"), suffix=".bmp"),
"SIP": dict(path=os.path.join(MMCI_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(MMCI_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(MMCI_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(MMCI_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(MMCI_root, "DUT-RGBD"), suffix=".png"),
}
NLPR_root = "<your_methods_path>/NLPR"
NLPR = {
"LFSD": dict(path=os.path.join(NLPR_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(NLPR_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(NLPR_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(NLPR_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(NLPR_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(NLPR_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(NLPR_root, "STEREO-797"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(NLPR_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(NLPR_root, "DUT-RGBD"), suffix=".png"),
}
PCANet_root = "<your_methods_path>/PCANet"
PCANet = {
"LFSD": dict(path=os.path.join(PCANet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(PCANet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(PCANet_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(PCANet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(PCANet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(PCANet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(PCANet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(PCANet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(PCANet_root, "DUT-RGBD"), suffix=".png"),
}
PDNet_root = "<your_methods_path>/PDNet"
PDNet = {
"LFSD": dict(path=os.path.join(PDNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(PDNet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(PDNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(PDNet_root, "RGBD135"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(PDNet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(PDNet_root, "STEREO"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(PDNet_root, "DUT-RGBD"), suffix=".png"),
}
TANet_root = "<your_methods_path>/TANet"
TANet = {
"LFSD": dict(path=os.path.join(TANet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(TANet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(TANet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(TANet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(TANet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(TANet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(TANet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(TANet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(TANet_root, "DUT-RGBD"), suffix=".png"),
}
| 1.960938 | 2 |
post/migrations/0001_initial.py | vis7/connection | 1 | 12768069 | <gh_stars>1-10
# Generated by Django 3.1.1 on 2020-10-04 14:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('comment_id', models.AutoField(primary_key=True, serialize=False)),
('comment_text', models.TextField()),
],
),
migrations.CreateModel(
name='Post',
fields=[
('post_id', models.AutoField(primary_key=True, serialize=False)),
('text_description', models.TextField()),
('content', models.ImageField(blank=True, upload_to=None)),
('post_type', models.CharField(max_length=50)),
('likes', models.DecimalField(decimal_places=0, default=0, max_digits=5)),
('dislikes', models.DecimalField(decimal_places=0, default=0, max_digits=5)),
('status', models.CharField(choices=[('A', 'Active'), ('R', 'Reported'), ('B', 'Blocked')], max_length=50)),
('shared_with', models.CharField(choices=[('F', 'Friends'), ('FOF', 'Friends of Friends'), ('P', 'Public')], max_length=20)),
('datetime', models.DateTimeField(auto_now_add=True)),
],
),
]
| 1.828125 | 2 |
plato/agent/component/nlg/slot_filling_nlg.py | avmi/plato-research-dialogue-system | 899 | 12768070 | <filename>plato/agent/component/nlg/slot_filling_nlg.py
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "<NAME>"
from plato.agent.component.nlg.nlg import NLG
import random
"""
SlotFillingNLG is a simple template-based nlg, designed to work for
Slot-Filling applications. The purpose of this class is to provide a quick way
of running Conversational Agents, sanity checks, and to aid debugging.
"""
class SlotFillingNLG(NLG):
def __init__(self, args=None):
"""
Nothing to initialize. We need the args to support use by the Generic
Agent.
"""
super(SlotFillingNLG, self).__init__()
def initialize(self, args):
"""
Nothing to do here
:param args:
:return:
"""
pass
def generate_output(self, args=None):
"""
Select the appropriate template given the acts in the arguments and
generate the output utterance.
:param args: a dictionary of arguments that contain the dialogue acts
:return: the output utterance
"""
if not args:
print('WARNING! SlotFillingNLG called without arguments!')
return ''
if 'args' in args:
dacts = args['args']
elif 'dacts' not in args:
print('WARNING! SlotFillingNLG called without dacts!')
return ''
else:
dacts = args['dacts']
system = True
if 'system' in args:
system = bool(args['system'])
response = ''
for dact in dacts:
if dact.intent == 'request':
if dact.params and dact.params[0].slot:
if system:
response += 'Which ' + \
dact.params[0].slot + \
' do you prefer?'
else:
response += 'What is the ' + dact.params[0].slot + '?'
else:
response += 'Which one?'
elif dact.intent in ['inform', 'offer']:
for dact_item in dact.params:
if system:
if dact_item.slot == 'name' and \
dact_item.value == 'not found':
response += 'Sorry, I cannot find such an item. '
else:
if not dact_item.value:
response += 'its ' + \
dact_item.slot + \
' is unknown, '
elif dact_item.slot == 'name' and \
len(dact.params) > 1:
response += dact_item.value + ' '
elif dact_item.slot in ['food', 'cuisine']:
response += 'is serving ' + \
dact_item.value + \
' food, '
elif dact_item.slot == 'endorsement':
response += 'is ' + dact_item.value + ', '
else:
response += 'its ' + \
dact_item.slot + \
' is ' + dact_item.value + ', '
else:
if dact.intent == 'offer':
if dact_item.value:
response += dact_item.slot + ' is ' + \
dact_item.value + ', '
else:
response += dact_item.slot + ' is unknown, '
else:
r = random.random()
if r < 0.33:
response += 'I prefer ' + dact_item.value + \
' ' + dact_item.slot + ', '
elif r < 0.66:
response += 'um i want ' + dact_item.value + \
' ' + dact_item.slot + ', '
else:
response += dact_item.value + ' ' + \
dact_item.slot + ' please, '
if response:
# Trim trailing comma and space
response = response[:-2]
elif dact.intent == 'bye':
response += 'Thank you, goodbye'
elif dact.intent == 'deny':
response += 'No'
elif dact.intent == 'negate':
response += 'No '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' is not ' + dact.params[0].value
elif dact.intent == 'ack':
response += 'Ok'
elif dact.intent == 'affirm':
response += 'Yes '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' is ' + dact.params[0].value
elif dact.intent == 'thankyou':
response += 'Thank you'
elif dact.intent == 'reqmore':
response += 'Can you tell me more?'
elif dact.intent == 'repeat':
response += 'Can you please repeat?'
elif dact.intent == 'restart':
response += 'Can we start over?'
elif dact.intent == 'expl-conf':
response += 'Alright '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' is ' + dact.params[0].value
elif dact.intent == 'select':
response += 'Which one do you prefer '
if dact.params and dact.params[0].slot:
response += 'for ' + dact.params[0].slot
elif dact.intent == 'reqalts':
response += 'Is there anything else?'
elif dact.intent in ['confirm', 'confirm-domain']:
response += 'So is '
if dact.params and dact.params[0].slot and \
dact.params[0].value:
response += dact.params[0].slot + \
' ' + dact.params[0].value
elif dact.intent == 'canthelp':
response += 'Sorry, I cannot help you with that.'
elif dact.intent == 'welcomemsg':
response += 'Hello, how may I help you?'
elif dact.intent == 'hello':
response = 'Hi'
elif dact.intent == 'welcome':
response += random.choice(['Hi, how can I help you today?',
'Speak, human.'])
elif dact.intent == 'na':
response += '(no system response)'
else:
response += 'SlotFillingNLG %s' % dact
response += ' '
response = response.replace('addr', 'address')
response = response.replace('pricerange', 'price range')
response = response.replace('postcode', 'post code')
response = response.replace('dontcare', 'any')
return response
def train(self, data):
"""
Nothing to do here.
:param data:
:return:
"""
pass
def save(self, path=None):
"""
Nothing to do here.
:param path:
:return:
"""
pass
def load(self, path):
"""
Nothing to do here.
:param path:
:return:
"""
pass
| 3.09375 | 3 |
stats/extract_MNI_coord.py | athiede13/free_speech | 0 | 12768071 | """
Extract MNI coordinates for all brain maps.
Created on Fri May 24 11:26:07 2019
@author: <NAME> <<EMAIL>>
"""
import mne
import numpy as np
from summarize_clusters_stc_AT import summarize_clusters_stc_AT
import csv
#%% for one-sample T-test whether ISCs are significant
results_path = '/media/cbru/SMEDY/results/ISCs_comp_against_0/'
fres = {'5.000000e-01-4Hz', '4-8Hz', '8-12Hz', '12-25Hz', '25-45Hz', '55-90Hz'}
condition = '_1' # 1 speech, 2 rest
win = '_613' #'_579' #
groups = {'con_', 'dys_'}
fsave_vertices = [np.arange(10242), np.arange(10242)]
for fre in fres:
for group in groups:
T_obs, clusters, cluster_p_values, H0 = clu =\
np.load(results_path + 't_clu_' + group + fre + win + condition + '.npy')
stc_all_cluster_vis = summarize_clusters_stc_AT(clu,
vertices=fsave_vertices,
subject='fsaverage')
# find the max T value and vertex (clusters are all the same size)
max_T = stc_all_cluster_vis.data[:, 0].max()
max_vtx = np.where(stc_all_cluster_vis.data[:, 0] ==
stc_all_cluster_vis.data[:, 0].max())
p_cluster_threshold = 0.05
good_cluster_inds = np.where(cluster_p_values <
p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters[ii][1]):
clu_size = len(clusters[ii][1])
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(group, fre, clu_size, mni.astype(np.int64), round(max_T, 2))
#%% for ISC group differences
results_dir = '/media/cbru/SMEDY/results/dys_con_contrast/2020_02_redo_subject_perm/'
delta = (results_dir + 't_clu_tail1_5.000000e-01-4Hz_613_1.npy',
results_dir + 't_clu_tail-1_5.000000e-01-4Hz_613_1.npy')
theta = (results_dir + 't_clu_tail1_4-8Hz_613_1.npy',
results_dir + 't_clu_tail-1_4-8Hz_613_1.npy')
alpha = (results_dir + 't_clu_tail1_8-12Hz_613_1.npy',
results_dir + 't_clu_tail-1_8-12Hz_613_1.npy')
beta = (results_dir + 't_clu_tail1_12-25Hz_613_1.npy',
results_dir + 't_clu_tail-1_12-25Hz_613_1.npy')
gamma1 = (results_dir + 't_clu_tail1_25-45Hz_613_1.npy',
results_dir + 't_clu_tail-1_25-45Hz_613_1.npy')
gamma2 = (results_dir + 't_clu_tail1_55-90Hz_613_1.npy',
results_dir + 't_clu_tail-1_55-90Hz_613_1.npy')
all_bands = {delta, theta, alpha, beta, gamma1, gamma2}
#all_bands = {gamma1}
p_cluster_threshold = 0.05/6
with open(results_dir + 'mni_corrdinates_out.csv', mode='w') as file_out:
mni_out = csv.writer(file_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for band in all_bands:
max_T = None
min_T = None
clu_size = None
stc_all_cluster_vis_pos = None
stc_all_cluster_vis_neg = None
stc_all_cluster_vis_both = None
clu = np.load(band[0])
T_obs_pos, clusters_pos, cluster_p_values_pos, H0_pos = clu
good_cluster_inds_pos = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
if not good_cluster_inds_pos.any():
print('')
else:
stc_all_cluster_vis_pos = summarize_clusters_stc_AT(clu, p_thresh=p_cluster_threshold,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=None)
clu = np.load(band[1])
T_obs_neg, clusters_neg, cluster_p_values_neg, H0_neg = clu
good_cluster_inds_neg = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
if not good_cluster_inds_neg.any():
print('')
else:
stc_all_cluster_vis_neg = summarize_clusters_stc_AT(clu, p_thresh=p_cluster_threshold,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=None)
# combine positive and negative clusters to one source estimate file
if stc_all_cluster_vis_pos is not None and stc_all_cluster_vis_neg is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_pos.copy()
stc_all_cluster_vis_both.data[:, 0] =\
stc_all_cluster_vis_pos.data[:, 0] + stc_all_cluster_vis_neg.data[:, 0]
elif stc_all_cluster_vis_pos is None and stc_all_cluster_vis_neg is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_neg.copy()
stc_all_cluster_vis_both.data[:, 0] = stc_all_cluster_vis_neg.data[:, 0]
elif stc_all_cluster_vis_neg is None and stc_all_cluster_vis_pos is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_pos.copy()
stc_all_cluster_vis_both.data[:, 0] = stc_all_cluster_vis_pos.data[:, 0]
else:
print('Error! There is no data for negative and positive contrasts.')
# find the max T value and vertex, extreme might be negative or positive
# find largest cluster first
# pos
out = []
if good_cluster_inds_pos.any():
for j in range(0, len(good_cluster_inds_pos)):
inds_t, inds_v = [(clusters_pos[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds_pos)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max_pos = out.index(out2[0])
max_T = stc_all_cluster_vis_pos.data[:, id_max_pos+1].max()
# neg
out = []
if good_cluster_inds_neg.any():
for j in range(0, len(good_cluster_inds_neg)):
inds_t, inds_v = [(clusters_neg[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds_neg)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max_neg = out.index(out2[0])
min_T = stc_all_cluster_vis_neg.data[:, id_max_neg+1].min()
if min_T is None and max_T is None:
print('No pos nor neg clusters')
elif min_T is None: # take only positive clusters
T = max_T
max_vtx = np.where(stc_all_cluster_vis_pos.data[:, id_max_pos+1] ==
stc_all_cluster_vis_pos.data[:, id_max_pos+1].max())
good_cluster_inds = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_pos[ii][1]):
clu_size = len(clusters_pos[ii][1])
elif max_T is None: # take only negative clusters
T = min_T
max_vtx = np.where(stc_all_cluster_vis_neg.data[:, id_max_neg+1] ==
stc_all_cluster_vis_neg.data[:, id_max_neg+1].min())
good_cluster_inds = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_neg[ii][1]):
clu_size = len(clusters_neg[ii][1])
elif abs(max_T) > abs(min_T): # take only positive clusters
T = max_T
max_vtx = np.where(stc_all_cluster_vis_pos.data[:, id_max_pos+1] ==
stc_all_cluster_vis_pos.data[:, id_max_pos+1].max())
good_cluster_inds = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_pos[ii][1]):
clu_size = len(clusters_pos[ii][1])
elif abs(max_T) < abs(min_T): # take only negative clusters
T = min_T
max_vtx = np.where(stc_all_cluster_vis_neg.data[:, id_max_neg+1] ==
stc_all_cluster_vis_neg.data[:, id_max_neg+1].min())
good_cluster_inds = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_neg[ii][1]):
clu_size = len(clusters_neg[ii][1])
else:
print('Something went wrong')
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(band, clu_size, mni.astype(np.int64), round(T, 2))
mni_out.writerow([band[0], clu_size, mni.astype(np.str), round(T, 2)])
#%% for Mantel regressions
results_path = '/media/cbru/SMEDY/results/mantel_correlations/2019_05_simple_model/'
clu_files = [
results_path + 'phon_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'phon_clu_4-8Hz_613_1.npy',
results_path + 'phon_clu_8-12Hz_613_1.npy',
results_path + 'phon_clu_12-25Hz_613_1.npy',
results_path + 'phon_clu_25-45Hz_613_1.npy',
results_path + 'phon_clu_55-90Hz_613_1.npy',
results_path + 'read_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'read_clu_4-8Hz_613_1.npy',
results_path + 'read_clu_8-12Hz_613_1.npy',
results_path + 'read_clu_12-25Hz_613_1.npy',
results_path + 'read_clu_25-45Hz_613_1.npy',
results_path + 'mem_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'iq_clu_5.000000e-01-4Hz_613_1.npy'
]
cutoff = 25
with open(results_path + 'mni_corrdinates_out.csv', mode='w') as file_out:
mni_out = csv.writer(file_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for file in clu_files:
print(file)
# load clu
clu = np.load(file)
r_obs, clusters = clu
fsave_vertices = [np.arange(10242), np.arange(10242)]
# thresholding by cluster length
good_cluster_inds = []
clusters2 = []
for ii in range(0, len(clusters)):
if len(clusters[ii][1]) > (cutoff-1):
good_cluster_inds.append(ii)
clusters2.append(clusters[ii])
clu2 = r_obs, clusters2, np.zeros(len(clusters2)), _
if not clusters2:
print('All clusters are smaller than the minimal length.')
else:
# Investigating the significant effects / Find max cluster
out = []
for j in range(0, len(good_cluster_inds)):
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max = out.index(out2[0])
clusters[good_cluster_inds[id_max]]
stc_all_cluster_vis = summarize_clusters_stc_AT(clu2, p_thresh=0.05,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=fsave_vertices)
max_R = np.absolute(stc_all_cluster_vis.data[:, id_max+1]).max()
R_max = stc_all_cluster_vis.data[:, id_max+1].max()
R_min = stc_all_cluster_vis.data[:, id_max+1].min()
if np.absolute(R_max)<np.absolute(R_min):
max_R = max_R*-1
max_vtx = np.where(np.absolute(stc_all_cluster_vis.data[:, id_max+1]) ==
np.absolute(stc_all_cluster_vis.data[:, id_max+1]).max())
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters[ii][1]):
clu_size = len(clusters[ii][1])
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(file, clu_size, mni.astype(np.int64), round(max_R, 2))
mni_out.writerow([file, clu_size, mni.astype(np.str), round(max_R, 2)])
| 1.96875 | 2 |
ambari-server/src/test/python/TestStackFeature.py | AliceXiaoLu/ambari | 1 | 12768072 | # !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from resource_management.core.logger import Logger
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.script import Script
from resource_management.core.exceptions import Fail
from unittest import TestCase
import json
Logger.initialize_logger()
class TestStackFeature(TestCase):
"""
EU Upgrade (HDP 2.5 to HDP 2.6)
- STOP
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.5
commandParams/version = 2.5.0.0-1237
- START
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.6
commandParams/version = 2.6.0.0-334
EU Downgrade (HDP 2.6 to HDP 2.5)
- STOP
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.6
commandParams/version = 2.6.0.0-334
- START
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.5
commandParams/version = 2.5.0.0-1237
"""
def test_get_stack_feature_version_missing_params(self):
try:
stack_feature_version = get_stack_feature_version({})
self.assertEqual("2.3.0.0-1234", stack_feature_version)
self.fail("Expected an exception when there are required parameters missing from the dictionary")
except Fail:
pass
def test_get_stack_feature_version_for_install_command(self):
"""
Tests the stack feature version calculated during an install command on a new cluster
:return:
"""
command_json = TestStackFeature._get_cluster_install_command_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.4", stack_feature_version)
def test_get_stack_feature_version_for_upgrade_restart(self):
"""
Tests the stack feature version calculated during a restart command in an upgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_upgrade_restart_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.5.9.9-9999", stack_feature_version)
def test_get_stack_feature_version_for_downgrade_restart(self):
"""
Tests the stack feature version calculated during a restart command in a downgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_downgrade_restart_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.4.0.0-1234", stack_feature_version)
def test_get_stack_feature_version_for_downgrade_stop(self):
"""
Tests the stack feature version calculated during a STOP command in a downgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_downgrade_stop_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.5.9.9-9999", stack_feature_version)
command_json = TestStackFeature._get_cluster_downgrade_stop_custom_command_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.5.9.9-9999", stack_feature_version)
def test_get_stack_feature(self):
"""
Tests the stack feature version calculated during a STOP command in a downgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_upgrade_restart_json()
Script.config = command_json
Script.config["configurations"] = {}
Script.config["configurations"]["cluster-env"] = {}
Script.config["configurations"]["cluster-env"]["stack_features"] = {}
Script.config["configurations"]["cluster-env"]["stack_features"] = json.dumps(TestStackFeature._get_stack_feature_json())
stack_feature_version = get_stack_feature_version(command_json)
self.assertTrue(check_stack_feature("stack-feature-1", stack_feature_version))
self.assertTrue(check_stack_feature("stack-feature-2", stack_feature_version))
self.assertFalse(check_stack_feature("stack-feature-3", stack_feature_version))
command_json = TestStackFeature._get_cluster_install_command_json()
Script.config.update(command_json)
stack_feature_version = get_stack_feature_version(command_json)
self.assertTrue(check_stack_feature("stack-feature-1", stack_feature_version))
self.assertTrue(check_stack_feature("stack-feature-2", stack_feature_version))
self.assertFalse(check_stack_feature("stack-feature-3", stack_feature_version))
@staticmethod
def _get_cluster_install_command_json():
"""
Install command JSON with no upgrade and no version information.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand": "ACTIONEXECUTE",
"clusterLevelParams": {
"stack_name": "HDP",
"stack_version": "2.4",
},
"commandParams": {
"command_timeout": "1800",
"script_type": "PYTHON",
"script": "install_packages.py"
}
}
@staticmethod
def _get_cluster_upgrade_restart_json():
"""
A restart command during an upgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"ACTIONEXECUTE",
"clusterLevelParams": {
"stack_name": "HDP",
"stack_version": "2.4",
},
"commandParams": {
"source_stack": "2.4",
"target_stack": "2.5",
"upgrade_direction": "upgrade",
"version": "2.5.9.9-9999"
},
"upgradeSummary": {
"services":{
"HDFS":{
"sourceRepositoryId":1,
"sourceStackId":"HDP-2.4",
"sourceVersion":"2.4.0.0-1234",
"targetRepositoryId":2,
"targetStackId":"HDP-2.5",
"targetVersion":"2.5.9.9-9999"
}
},
"direction":"UPGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"isDowngradeAllowed": True
}
}
@staticmethod
def _get_cluster_downgrade_restart_json():
"""
A restart command during a downgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"ACTIONEXECUTE",
"clusterLevelParams":{
"stack_name":"HDP",
"stack_version":"2.4"
},
"commandParams":{
"source_stack":"2.5",
"target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.4.0.0-1234"
},
"upgradeSummary":{
"services":{
"HDFS":{
"sourceRepositoryId":2,
"sourceStackId":"HDP-2.5",
"sourceVersion":"2.5.9.9-9999",
"targetRepositoryId":1,
"targetStackId":"HDP-2.4",
"targetVersion":"2.4.0.0-1234"
}
},
"direction":"DOWNGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"isDowngradeAllowed": True
}
}
@staticmethod
def _get_cluster_downgrade_stop_json():
"""
A STOP command during a downgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"STOP",
"clusterLevelParams":{
"stack_name":"HDP",
"stack_version":"2.5",
},
"commandParams":{
"source_stack":"2.5",
"target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.5.9.9-9999"
},
"upgradeSummary":{
"services":{
"HDFS":{
"sourceRepositoryId":2,
"sourceStackId":"HDP-2.5",
"sourceVersion":"2.5.9.9-9999",
"targetRepositoryId":1,
"targetStackId":"HDP-2.4",
"targetVersion":"2.4.0.0-1234"
}
},
"direction":"DOWNGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"isDowngradeAllowed": True
}
}
@staticmethod
def _get_cluster_downgrade_stop_custom_command_json():
"""
A STOP command during a downgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"CUSTOM_COMMAND",
"clusterLevelParams":{
"stack_name":"HDP",
"stack_version":"2.5",
"custom_command":"STOP"
},
"commandParams":{
"source_stack":"2.5",
"target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.5.9.9-9999"
},
"upgradeSummary":{
"services":{
"HDFS":{
"sourceRepositoryId":2,
"sourceStackId":"HDP-2.5",
"sourceVersion":"2.5.9.9-9999",
"targetRepositoryId":1,
"targetStackId":"HDP-2.4",
"targetVersion":"2.4.0.0-1234"
}
},
"direction":"DOWNGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD"
}
}
@staticmethod
def _get_stack_feature_json():
"""
A STOP command during a downgrade.
:return:
"""
return {
"HDP": {
"stack_features":[
{
"name":"stack-feature-1",
"description":"Stack Feature 1",
"min_version":"2.2.0.0"
},
{
"name":"stack-feature-2",
"description":"Stack Feature 2",
"min_version":"2.2.0.0",
"max_version":"2.6.0.0"
},
{
"name":"stack-feature-3",
"description":"Stack Feature 3",
"min_version":"2.2.0.0",
"max_version":"2.3.0.0"
}
]
}
}
| 1.84375 | 2 |
helper/enable_keymap_event_listener.py | Bhanditz/JavaScriptEnhancements | 0 | 12768073 | <gh_stars>0
import sublime, sublime_plugin
class enableKeymap(sublime_plugin.EventListener):
def on_text_command(self, view, command_name, args):
if command_name in KEYMAP_COMMANDS and not javascriptCompletions.get("enable_keymap"):
return ("noop", {}) | 2.078125 | 2 |
lib/workers/lambdas/graph/__init__.py | jpelbertrios/Kai | 1 | 12768074 | import boto3
class Graph:
"""
Represents a Graph object in a DynamoDB table
"""
def __init__(self, table_name, graph_id):
dynamodb = boto3.resource("dynamodb")
self.table = dynamodb.Table(table_name)
self.graph_id = graph_id
def check_status(self, expected_status):
"""
Checks whether the graph has an expected status
"""
response = self.table.get_item(
Key={
'graphId': self.graph_id
}
)
# If the graph does not exist, it cannot have the expected status
graph = response["Item"]
if graph is None:
return False
status = graph["currentState"]
return status == expected_status
def update_status(self, status):
"""
Updates the status of a Graph
"""
self.table.update_item(
Key={
"graphId": self.graph_id
},
UpdateExpression="SET currentState = :state",
ExpressionAttributeValues={
":state": status
}
)
def delete(self):
"""
Deletes the graph from the Table
"""
self.table.delete_item(
Key={
"graphId": self.graph_id
}
)
| 3.015625 | 3 |
caf_verilog/dot_prod_pip.py | chiranthsiddappa/caf_verilog | 1 | 12768075 | from .dot_product import DotProduct
import os
from numpy import log2
filedir = os.path.dirname(os.path.realpath(__file__))
dot_product_tb_module_path = os.path.join(filedir, '..', 'src')
dot_product_module_path = os.path.join(filedir, '..', 'src', 'dot_prod_pip.v')
class DotProdPip(DotProduct):
"""
"""
def template_dict(self, inst_name=None):
t_dict = super(DotProdPip, self).template_dict(inst_name)
t_dict['length_counter_bits'] = int(log2(self.length))
return t_dict
| 2.21875 | 2 |
contrib/fedora/ganglia/voltdb.py | guthemberg/tejo | 0 | 12768076 | <filename>contrib/fedora/ganglia/voltdb.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import copy
import subprocess
from configobj import ConfigObj
import sys
#db api available in home_dir
conf_file = "/etc/tejo.conf"
config=ConfigObj(conf_file)
sys.path.append(config['home_dir'])
ALIVE_TIME=int(config['alive_time'])
#from database import MyDB
#available in home_dir
from tejo.common.db.voltdb.stats import VoltStats
NAME_PREFIX = 'voltdb_'
PARAMS = {
#fake example, TO DO: modify this
'net_status' : '/usr/bin/netstat -s -p tcp'
}
METRICS = {
'time' : 0,
'data' : {}
}
LAST_METRICS = copy.deepcopy(METRICS)
METRICS_CACHE_TTL = 3
def running_process(process):
try:
ps_process=subprocess.Popen(["pgrep","-f",process], stdout=subprocess.PIPE)
running=int(subprocess.Popen(["wc","-l"], stdin=ps_process.stdout, stdout=subprocess.PIPE,close_fds=True).communicate()[0].rstrip())
if running>0:
return 1
else:
return 0
except:
return 0
def get_metrics():
"""Return all metrics"""
global METRICS, LAST_METRICS
if (time.time() - METRICS['time']) > METRICS_CACHE_TTL:
metrics = {}
#voltdb metrics
try:
voltdb_stats=VoltStats()
#all available stats are commented in https://voltdb.com/docs/UsingVoltDB/sysprocstatistics.php
#The current resident set size. That is, the total amount of memory allocated to the VoltDB processes on the server.
#Memory
memory_counters=voltdb_stats.get_memory_counters()
metrics['memory_rss']=float(memory_counters[0])
metrics['memory_javaused']=float(memory_counters[1])
metrics['memory_javaunused']=float(memory_counters[2])
metrics['memory_tupledata']=float(memory_counters[3])
metrics['memory_tupleallocated']=float(memory_counters[4])
metrics['memory_indexmemory']=float(memory_counters[5])
metrics['memory_stringmemory']=float(memory_counters[6])
metrics['memory_tuplecount']=float(memory_counters[7])
metrics['memory_pooledmemory']=float(memory_counters[8])
#DR
dr_counters=voltdb_stats.get_dr_counters()
metrics['dr_total_bytes']=float(dr_counters[0])
metrics['dr_total_bytes_in_memory']=float(dr_counters[1])
metrics['dr_total_buffers']=float(dr_counters[2])
#partition count
metrics['partition_count']=float(voltdb_stats.get_partition_count())
#planner
planner_counters=voltdb_stats.get_planner_counters()
metrics['planner_partitions']=float(planner_counters[0])
metrics['planner_cache1_hits']=float(planner_counters[1])
metrics['planner_cache2_hits']=float(planner_counters[2])
metrics['planner_cache_misses']=float(planner_counters[3])
metrics['planner_failures']=float(planner_counters[4])
#procedure
#(invocations,timed_invocations, avg_execution_time,aborts,failures)
(metrics['procedure_invocations'],metrics['procedure_timed_invocations'], \
metrics['procedure_avg_execution_time'],metrics['procedure_aborts'], \
metrics['procedure_failures'])=voltdb_stats.get_procedure_counters()
voltdb_stats.close()
except:
metrics['memory_rss']=0.0
metrics['memory_javaused']=0.0
metrics['memory_javaunused']=0.0
metrics['memory_tupledata']=0.0
metrics['memory_tupleallocated']=0.0
metrics['memory_indexmemory']=0.0
metrics['memory_stringmemory']=0.0
metrics['memory_tuplecount']=0.0
metrics['memory_pooledmemory']=0.0
metrics['dr_total_bytes']=0.0
metrics['dr_total_bytes_in_memory']=0.0
metrics['dr_total_buffers']=0.0
metrics['partition_count']=0.0
metrics['planner_partitions']=0.0
metrics['planner_cache1_hits']=0.0
metrics['planner_cache2_hits']=0.0
metrics['planner_cache_misses']=0.0
metrics['planner_failures']=0.0
metrics['procedure_invocations']=0.0
metrics['procedure_timed_invocations']=0.0
metrics['procedure_avg_execution_time']=0.0
metrics['procedure_aborts']=0.0
metrics['procedure_failures']=0.0
#active
try:
metrics['is_active']=running_process("voltdb")
except:
metrics['is_active']=0
# update cache
LAST_METRICS = copy.deepcopy(METRICS)
METRICS = {
'time': time.time(),
'data': metrics
}
return [METRICS, LAST_METRICS]
def get_value(name):
"""Return a value for the requested metric"""
# get metrics
metrics = get_metrics()[0]
# get value
name = name[len(NAME_PREFIX):] # remove prefix from name
try:
result = metrics['data'][name]
except StandardError:
result = 0
return result
def get_rate(name):
"""Return change over time for the requested metric"""
# get metrics
[curr_metrics, last_metrics] = get_metrics()
# get rate
name = name[len(NAME_PREFIX):] # remove prefix from name
try:
rate = float(curr_metrics['data'][name] - last_metrics['data'][name]) / \
float(curr_metrics['time'] - last_metrics['time'])
if rate < 0:
rate = float(0)
except StandardError:
rate = float(0)
return rate
def metric_init(lparams):
"""Initialize metric descriptors"""
global PARAMS
# set parameters
for key in lparams:
PARAMS[key] = lparams[key]
# define descriptors
time_max = 60
groups = 'voltdb'
descriptors = [
{
'name': NAME_PREFIX + 'is_active',
'call_back': get_value,
'time_max': time_max,
'value_type': 'uint',
'units': 'Yes: 1/ No: 0',
'slope': 'both',
'format': '%d',
'description': 'Is Active',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_rss',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory Resident Set Size',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_javaused',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for Java and in use',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_javaunused',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for Java but unused',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_tupledata',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for storing db records',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_tupleallocated',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for db records and free space',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_indexmemory',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for db indexes',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_stringmemory',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'kilobytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for in-line records',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_tuplecount',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'records',
'slope': 'both',
'format': '%f',
'description': 'Total number of database records now',
'groups': groups
},
{
'name': NAME_PREFIX + 'memory_pooledmemory',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'megabytes',
'slope': 'both',
'format': '%f',
'description': 'Memory for other tasks',
'groups': groups
},
{
'name': NAME_PREFIX + 'dr_total_bytes',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'bytes',
'slope': 'both',
'format': '%f',
'description': 'Current queue length to DR',
'groups': groups
},
{
'name': NAME_PREFIX + 'dr_total_bytes_in_memory',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'bytes',
'slope': 'both',
'format': '%f',
'description': 'Queued data currently held in memory',
'groups': groups
},
{
'name': NAME_PREFIX + 'dr_total_buffers',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'number of',
'slope': 'both',
'format': '%f',
'description': 'Waiting buffers in this partition',
'groups': groups
},
{
'name': NAME_PREFIX + 'partition_count',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'number of',
'slope': 'both',
'format': '%f',
'description': 'Unique or logical partitions on the cluster',
'groups': groups
},
{
'name': NAME_PREFIX + 'planner_partitions',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'partitions',
'slope': 'both',
'format': '%f',
'description': 'Number of partitions',
'groups': groups
},
{
'name': NAME_PREFIX + 'planner_cache1_hits',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'queries',
'slope': 'both',
'format': '%f',
'description': 'Queries that matched and reused in cache1',
'groups': groups
},
{
'name': NAME_PREFIX + 'planner_cache2_hits',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'queries',
'slope': 'both',
'format': '%f',
'description': 'Queries that matched and reused in cache2',
'groups': groups
},
{
'name': NAME_PREFIX + 'planner_cache_misses',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'queries',
'slope': 'both',
'format': '%f',
'description': 'Queries that had no match in the cache',
'groups': groups
},
{
'name': NAME_PREFIX + 'planner_failures',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'events',
'slope': 'both',
'format': '%f',
'description': 'Planning for an ad hoc query failed',
'groups': groups
},
{
'name': NAME_PREFIX + 'procedure_aborts',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'aborts',
'slope': 'both',
'format': '%f',
'description': 'Aborted procedures',
'groups': groups
},
{
'name': NAME_PREFIX + 'procedure_failures',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'failures',
'slope': 'both',
'format': '%f',
'description': 'Procedure failed unexpectedly',
'groups': groups
},
{
'name': NAME_PREFIX + 'procedure_invocations',
'call_back': get_rate,
'time_max': time_max,
'value_type': 'float',
'units': 'invocations',
'slope': 'both',
'format': '%f',
'description': 'Procedures invocations',
'groups': groups
},
{
'name': NAME_PREFIX + 'procedure_timed_invocations',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'invocations',
'slope': 'both',
'format': '%f',
'description': 'Number of invocations for avg,min,max',
'groups': groups
},
{
'name': NAME_PREFIX + 'procedure_avg_execution_time',
'call_back': get_value,
'time_max': time_max,
'value_type': 'float',
'units': 'nanoseconds',
'slope': 'both',
'format': '%f',
'description': 'Avg time to execute the stored procedure',
'groups': groups
}
]
return descriptors
def metric_cleanup():
"""Cleanup"""
pass
# the following code is for debugging and testing
if __name__ == '__main__':
descriptors = metric_init(PARAMS)
while True:
for d in descriptors:
print (('%s = %s') % (d['name'], d['format'])) % (d['call_back'](d['name']))
print ''
time.sleep(METRICS_CACHE_TTL)
| 1.984375 | 2 |
pdftotree/TreeVisualizer.py | zviri/pdftotree | 329 | 12768077 | from typing import Tuple
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from wand.display import display
else:
from IPython.display import display
from wand.color import Color
from wand.drawing import Drawing
from wand.image import Image
class TreeVisualizer:
"""
Object to display bounding boxes on a pdf document
"""
def __init__(self, pdf_file):
"""
:param pdf_path: directory where documents are stored
:return:
"""
self.pdf_file = pdf_file
def display_boxes(self, tree, html_path, filename_prefix, alternate_colors=False):
"""
Displays each of the bounding boxes passed in 'boxes' on images of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = []
colors = {
"section_header": Color("blue"),
"figure": Color("green"),
"figure_caption": Color("green"),
"table_caption": Color("red"),
"list": Color("yellow"),
"paragraph": Color("gray"),
"table": Color("red"),
"header": Color("brown"),
}
for i, page_num in enumerate(tree.keys()):
img = self.pdf_to_img(page_num)
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0.0)")
for clust in tree[page_num]:
for (pnum, pwidth, pheight, top, left, bottom, right) in tree[page_num][
clust
]:
draw.stroke_color = colors[clust]
draw.rectangle(left=left, top=top, right=right, bottom=bottom)
draw.push()
draw.font_size = 20
draw.font_weight = 10
draw.fill_color = colors[clust]
if int(left) > 0 and int(top) > 0:
draw.text(x=int(left), y=int(top), body=clust)
draw.pop()
draw(img)
img.save(filename=html_path + filename_prefix + "_page_" + str(i) + ".png")
imgs.append(img)
return imgs
def display_candidates(self, tree, html_path, filename_prefix):
"""
Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = self.display_boxes(
tree, html_path, filename_prefix, alternate_colors=True
)
return display(*imgs)
def pdf_to_img(self, page_num, pdf_dim=None):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
if not pdf_dim:
pdf_dim = get_pdf_dim(self.pdf_file)
page_width, page_height = pdf_dim
img = Image(filename="{}[{}]".format(self.pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img
def get_pdf_dim(pdf_file) -> Tuple[int, int]:
with open(pdf_file, "rb") as f:
parser = PDFParser(f)
doc = PDFDocument(parser)
# Look at the 1st page only.
page = next(PDFPage.create_pages(doc))
_, _, page_width, page_height = page.mediabox
return page_width, page_height
| 2.59375 | 3 |
biblio/titlecovers.py | lokal-profil/isfdb_site | 0 | 12768078 | #!_PYTHONLOC
#
# (C) COPYRIGHT 2014-2021 Ahasuerus and <NAME>
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from SQLparsing import *
from common import *
from library import *
if __name__ == '__main__':
title_id = SESSION.Parameter(0, 'int')
title = SQLgetTitle(title_id)
if not title:
SESSION.DisplayError('Title Does Not Exist')
PrintHeader('All Covers for %s' % title)
PrintNavbar('titlecovers', 0, 0, 'titlecovers.cgi', title_id)
pubs = SQLGetPubsByTitle(title_id)
count = 0
for pub in pubs:
if pub[PUB_IMAGE]:
print ISFDBLinkNoName("pl.cgi", pub[PUB_PUBID], '<img src="%s" alt="Coverart" class="scans">' % pub[PUB_IMAGE].split("|")[0])
count += 1
if not count:
print '<h3>No covers for %s</h3>' % title
print '<p>%s' % ISFDBLinkNoName('title.cgi', title_id, '<b>Back to the Title page for %s</b>' % title, True)
PrintTrailer('titlecovers', title_id, title_id)
| 2.140625 | 2 |
objects_and_classes/exercise/01_storage.py | Galchov/python-fundamentals | 0 | 12768079 | class Storage:
__storage = 0
def __init__(self, capacity):
self.capacity = capacity
self.storage = []
def add_product(self, product):
if not Storage.__storage == self.capacity:
self.storage.append(product)
Storage.__storage += 1
def get_products(self):
return self.storage | 3.703125 | 4 |
reap_oracle_dbs.py | VeranosTech/alembic | 0 | 12768080 | <reponame>VeranosTech/alembic
"""Drop Oracle databases that are left over from a
multiprocessing test run.
Currently the cx_Oracle driver seems to sometimes not release a
TCP connection even if close() is called, which prevents the provisioning
system from dropping a database in-process.
"""
from alembic.testing import provision
import logging
import sys
logging.basicConfig()
logging.getLogger(provision.__name__).setLevel(logging.INFO)
provision.reap_oracle_dbs(sys.argv[1])
| 1.765625 | 2 |
src/cache.py | aklsh/SimCache | 0 | 12768081 | <gh_stars>0
import random
from replacePLRU import Tree
from replaceLRU import LRUreplace, LRUupdate
import math
from copy import copy
class cacheBlock:
def __init__(self, tag=None):
self.tag = tag
if tag == None:
self.valid = False
else:
self.valid = True
self.dirty = False
def print(self):
print("Valid: {} Dirty: {} Tag: {}".format(self.valid, self.dirty, self.tag))
def access(self, accessType:str):
if self.valid is True:
if accessType == 'r':
pass
if accessType == 'w':
self.dirty = True
else:
raise ValueError("Accessing Invalid Cache Block")
class cacheSet:
def __init__(self, assoc:int, replacementPolicy:str):
self.assoc = assoc
self.blocks = []
for _ in range(assoc):
self.blocks.extend([cacheBlock()])
self.replacementPolicy = replacementPolicy
if self.replacementPolicy == "LRU":
self.LRUCounter = []
for _ in range(self.assoc):
self.LRUCounter.extend([-1])
if (self.replacementPolicy == "PLRU"):
numStages = int(math.log(assoc,2))
self.PLRUTree = Tree(numStages)
def emptyExists(self):
for block in self.blocks:
if block.valid == False:
return True
return False
def accessBlock(self, blockTag, accessType):
for idx, block in enumerate(self.blocks):
idx += 1
if block.tag == blockTag: # block in cache - return hit (True)
block.access(accessType)
if (self.replacementPolicy == "PLRU"):
self.PLRUTree.traverse(idx)
elif self.replacementPolicy == "LRU":
self.LRUCounter = LRUupdate(self.LRUCounter, idx)
return True, None
# if comes here, then block with given tag not in cache
# bring to cache, and return miss (False)
newBlock = cacheBlock(blockTag)
newBlock.access(accessType)
status, replaceBlock = self.insert(newBlock)
if status == 0:
return False, None
else:
return False, replaceBlock
def replace(self):
if self.replacementPolicy == "RANDOM":
replacementCandidate = random.randint(0,self.assoc-1)
elif self.replacementPolicy == "LRU":
replacementCandidate = LRUreplace(self.LRUCounter)
elif self.replacementPolicy == "PLRU":
replacementCandidate = self.PLRUTree.getVictim()
else:
raise ValueError("Invalid Replacement Policy for cache set: ", self.replacementPolicy)
return replacementCandidate
def insert(self, newBlock):
flag = -1
for i, block in enumerate(self.blocks):
if block.valid == False: # empty block
block.valid = newBlock.valid
block.tag = newBlock.tag
block.dirty = newBlock.dirty
if (self.replacementPolicy == "PLRU"):
self.PLRUTree.traverse(i)
elif self.replacementPolicy == "LRU":
self.LRUCounter = LRUupdate(self.LRUCounter, i)
flag = 1
break
if flag == -1: # no empty block - replace
replacementCandidate = self.replace()
replacedBlock = copy(self.blocks[replacementCandidate])
self.blocks[replacementCandidate].valid = True
self.blocks[replacementCandidate].dirty = False
self.blocks[replacementCandidate].tag = newBlock.tag
return 1, replacedBlock
return 0, None
class cache:
def __init__(self, numSets, assoc, replacementPolicy):
self.numSets = numSets
self.assoc = assoc
self.replacementPolicy = replacementPolicy
self.history = []
self.numAccesses = 0
self.numReads = 0
self.numWrites = 0
self.numHits = 0
self.numMisses = 0
self.numCompMisses = 0
self.numCapMisses = 0
self.numConfMisses = 0
self.numReadMisses = 0
self.numWriteMisses = 0
self.numDEs = 0
self.cacheSets = []
for _ in range(self.numSets):
self.cacheSets.extend([cacheSet(self.assoc, self.replacementPolicy)])
def memRequest(self, blockAddress, accessType):
self.numAccesses += 1
if accessType == 'r':
self.numReads += 1
elif accessType == 'w':
self.numWrites += 1
else:
raise ValueError("Invalid Access Type")
setIndex = blockAddress % self.numSets
blockTag = blockAddress//self.numSets
reqStatus, retBlock = self.cacheSets[setIndex].accessBlock(blockTag, accessType)
if reqStatus == True:
self.numHits += 1
else:
self.numMisses += 1
if accessType == 'r':
self.numReadMisses += 1
elif accessType == 'w':
self.numWriteMisses += 1
if blockAddress not in self.history:
self.history.extend([blockAddress])
self.numCompMisses += 1
else:
flag = 0
for set in self.cacheSets:
if set.emptyExists() == True:
self.numConfMisses += 1
flag = 1
break
if flag == 0:
self.numCapMisses += 1
if retBlock is not None:
if retBlock.dirty is True:
self.numDEs += 1
def printStats(self):
print("----------- Simulation Results -----------")
print("Number of Accesses:", self.numAccesses)
print("Number of Reads:", self.numReads)
print("Number of Writes:", self.numWrites)
print("Number of Hits:", self.numHits)
print("Number of Misses:", self.numMisses)
print("Number of Read Misses:", self.numReadMisses)
print("Number of Write Misses:", self.numWriteMisses)
print("Number of Compulsory Misses:", self.numCompMisses)
print("Number of Capacity Misses:", self.numCapMisses)
print("Number of Conflict Misses:", self.numConfMisses)
print("Number of Dirty Evictions:", self.numDEs)
print("------------------------------------------")
| 2.59375 | 3 |
augmented_keypoints/evaluate_fixed_keypoints.py | cbenge509/kaggle_facial_keypoints | 2 | 12768082 | #%%
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
TRAIN_UPDATE_FILE = "C:/kaggle/kaggle_keypoints/pickle/cleandata_updates_augment.pkl"
train = pickle.load(open(TRAIN_UPDATE_FILE, "rb")).reset_index()
print("Size of 'augmentation' set: %d" % train.shape[0])
# %%
fig = plt.figure(figsize=(20,20))
cols = [c for c in train.columns if not c.startswith('image')]
rng = np.clip(train.shape[0], 0, 60)
for i in range(rng):
img = train.iloc[i].image.reshape(96,96)
points = train.iloc[i][cols].values
ax = fig.add_subplot(6,10,i+1)
ax.imshow(img, cmap='gray')
ax.scatter(points[0::2], points[1::2], color = 'red', s = 20)
plt.axis('off')
plt.tight_layout()
plt.show()
# %%
| 2.515625 | 3 |
MyTextEditor.py | Priyeshjena98/MyTextEditor | 2 | 12768083 | from tkinter import Tk,scrolledtext,Menu,filedialog,END,messagebox
import tkinter.scrolledtext as ScrolledText
from tkinter import *
from PIL import ImageTk
#root is the root window into which all other widgets go. It is an instance of the class Tk, and every tkinter application must have exactly one instance of this class. app is an instance of the class App, which is a subclass of Frame.
root=Tk(className=" AJ Text Editor")
textArea=ScrolledText.ScrolledText(root,width=100,height=80,bg='green')
#
#FUNCTIONS
#
def openFile():
file=filedialog.askopenfile(parent=root,mode='rb',title='select a text file')
if file!=None:
contents=file.read()
textArea.insert('1.0',contents)
file.close()
def saveFile():
file=filedialog.asksaveasfile(mode='w')
if file!=None:
#Slice of the last character from get,as an extra enter is added
data=textArea.get('1.0',END+'-1c')
file.write(data)
file.close()
def quit():
if messagebox.askyesno("Quit","Are u sure u want to quit"):
root.destroy()
def about():
label=messagebox.showinfo("A python Text editor using tkinter!")
#Menu options
menu=Menu(root)
root.configure(background = 'black')
root.config(menu=menu)
fileMenu=Menu(menu)
#Creates a new hierarchical menu by associating a given menu to a parent menu(add_cascade)
menu.add_cascade(label="File",menu=fileMenu)
fileMenu.add_command(label="New")
fileMenu.add_command(label="Open",command=openFile)
fileMenu.add_command(label="Save",command=saveFile)
fileMenu.add_command(label="Print")
helpMenu=Menu(menu)
menu.add_cascade(label="Help")
menu.add_cascade(label="About", command=about)
#fileMenu.add_seperator()
fileMenu.add_command(label="Exit",command=quit)
textArea.pack()
photo = PhotoImage(file = "aa.png")
w = Label(root, image=photo)
w.pack()
root.mainloop()
| 3.84375 | 4 |
robosuite/environments/pick_place.py | kyungjaelee/robosuite | 0 | 12768084 | <reponame>kyungjaelee/robosuite
from collections import OrderedDict
import random
import numpy as np
import robosuite.utils.transform_utils as T
from robosuite.environments.robot_env import RobotEnv
from robosuite.robots import SingleArm
from robosuite.models.arenas import BinsArena
from robosuite.models.objects import (
MilkObject,
BreadObject,
CerealObject,
CanObject,
)
from robosuite.models.objects import (
MilkVisualObject,
BreadVisualObject,
CerealVisualObject,
CanVisualObject,
)
from robosuite.models.tasks import ManipulationTask, SequentialCompositeSampler
class PickPlace(RobotEnv):
"""
This class corresponds to the pick place task for a single robot arm.
Args:
robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env
(e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms)
Note: Must be a single single-arm robot!
controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a
custom controller. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
gripper_types (str or list of str): type of gripper, used to instantiate
gripper models from gripper factory. Default is "default", which is the default grippers(s) associated
with the robot(s) the 'robots' specification. None removes the gripper, and any other (valid) model
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
gripper_visualizations (bool or list of bool): True if using gripper visualization.
Useful for teleoperation. Should either be single bool if gripper visualization is to be used for all
robots or else it should be a list of the same length as "robots" param
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
:`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial
joint positions. Setting this value to `None` or 0.0 results in no noise being applied.
If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied,
If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range
:`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform"
Should either be single dict if same noise value is to be used for all robots or else it should be a
list of the same length as "robots" param
:Note: Specifying "default" will automatically use the default noise settings.
Specifying None will automatically create the required dict with "magnitude" set to 0.0.
table_full_size (3-tuple): x, y, and z dimensions of the table.
table_friction (3-tuple): the three mujoco friction parameters for
the table.
bin1_pos (3-tuple): Absolute cartesian coordinates of the bin initially holding the objects
bin2_pos (3-tuple): Absolute cartesian coordinates of the goal bin
use_camera_obs (bool): if True, every observation includes rendered image(s)
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
single_object_mode (int): specifies which version of the task to do. Note that
the observations change accordingly.
:`0`: corresponds to the full task with all types of objects.
:`1`: corresponds to an easier task with only one type of object initialized
on the table with every reset. The type is randomized on every reset.
:`2`: corresponds to an easier task with only one type of object initialized
on the table with every reset. The type is kept constant and will not
change between resets.
object_type (string): if provided, should be one of "milk", "bread", "cereal",
or "can". Determines which type of object will be spawned on every
environment reset. Only used if @single_object_mode is 2.
use_indicator_object (bool): if True, sets up an indicator object that
is useful for debugging.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering
render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
the user using the mouse
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise.
control_freq (float): how many control signals to receive in every second. This sets the amount of
simulation time that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else,
only calls sim.reset and resets all robosuite-internal variables
camera_names (str or list of str): name of camera to be rendered. Should either be single str if
same name is to be used for all cameras' rendering or else it should be a list of cameras to render.
:Note: At least one camera must be specified if @use_camera_obs is True.
:Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the
convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each
robot's camera list).
camera_heights (int or list of int): height of camera frame. Should either be single int if
same height is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_widths (int or list of int): width of camera frame. Should either be single int if
same width is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single
bool if same depth setting is to be used for all cameras or else it should be a list of the same length as
"camera names" param.
Raises:
AssertionError: [Invalid object type specified]
AssertionError: [Invalid number of robots specified]
"""
def __init__(
self,
robots,
controller_configs=None,
gripper_types="default",
gripper_visualizations=False,
initialization_noise="default",
table_full_size=(0.39, 0.49, 0.82),
table_friction=(1, 0.005, 0.0001),
bin1_pos=(0.1, -0.25, 0.8),
bin2_pos=(0.1, 0.28, 0.8),
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
reward_shaping=False,
single_object_mode=0,
object_type=None,
use_indicator_object=False,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
control_freq=10,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
):
# First, verify that only one robot is being inputted
self._check_robot_configuration(robots)
# task settings
self.single_object_mode = single_object_mode
self.object_to_id = {"milk": 0, "bread": 1, "cereal": 2, "can": 3}
if object_type is not None:
assert (
object_type in self.object_to_id.keys()
), "invalid @object_type argument - choose one of {}".format(
list(self.object_to_id.keys())
)
self.object_id = self.object_to_id[
object_type
] # use for convenient indexing
self.obj_to_use = None
# settings for table top
self.table_full_size = table_full_size
self.table_friction = table_friction
# settings for bin position
self.bin1_pos = np.array(bin1_pos)
self.bin2_pos = np.array(bin2_pos)
# reward configuration
self.reward_scale = reward_scale
self.reward_shaping = reward_shaping
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
super().__init__(
robots=robots,
controller_configs=controller_configs,
gripper_types=gripper_types,
gripper_visualizations=gripper_visualizations,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
use_indicator_object=use_indicator_object,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
)
def reward(self, action=None):
"""
Reward function for the task.
Sparse un-normalized reward:
- a discrete reward of 1.0 per object if it is placed in its correct bin
Un-normalized components if using reward shaping, where the maximum is returned if not solved:
- Reaching: in [0, 0.1], proportional to the distance between the gripper and the closest object
- Grasping: in {0, 0.35}, nonzero if the gripper is grasping an object
- Lifting: in {0, [0.35, 0.5]}, nonzero only if object is grasped; proportional to lifting height
- Hovering: in {0, [0.5, 0.7]}, nonzero only if object is lifted; proportional to distance from object to bin
Note that a successfully completed task (object in bin) will return 1.0 per object irregardless of whether the
environment is using sparse or shaped rewards
Note that the final reward is normalized and scaled by reward_scale / 4.0 (or 1.0 if only a single object is
being used) as well so that the max score is equal to reward_scale
Args:
action (np.array): [NOT USED]
Returns:
float: reward value
"""
# compute sparse rewards
self._check_success()
reward = np.sum(self.objects_in_bins)
# add in shaped rewards
if self.reward_shaping:
staged_rewards = self.staged_rewards()
reward += max(staged_rewards)
if self.reward_scale is not None:
reward *= self.reward_scale
if self.single_object_mode == 0:
reward /= 4.0
return reward
def staged_rewards(self):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
Returns:
4-tuple:
- (float) reaching reward
- (float) grasping reward
- (float) lifting reward
- (float) hovering reward
"""
reach_mult = 0.1
grasp_mult = 0.35
lift_mult = 0.5
hover_mult = 0.7
# filter out objects that are already in the correct bins
objs_to_reach = []
geoms_to_grasp = []
target_bin_placements = []
for i in range(len(self.ob_inits)):
if self.objects_in_bins[i]:
continue
obj_str = str(self.item_names[i]) + "0"
objs_to_reach.append(self.obj_body_id[obj_str])
geoms_to_grasp.append(self.obj_geom_id[obj_str])
target_bin_placements.append(self.target_bin_placements[i])
target_bin_placements = np.array(target_bin_placements)
### reaching reward governed by distance to closest object ###
r_reach = 0.
if len(objs_to_reach):
# get reaching reward via minimum distance to a target object
target_object_pos = self.sim.data.body_xpos[objs_to_reach]
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
dists = np.linalg.norm(
target_object_pos - gripper_site_pos.reshape(1, -1), axis=1
)
r_reach = (1 - np.tanh(10.0 * min(dists))) * reach_mult
### grasping reward for touching any objects of interest ###
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in geoms_to_grasp:
bin_id = geoms_to_grasp.index(c.geom1)
if c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
elif c.geom2 in geoms_to_grasp:
bin_id = geoms_to_grasp.index(c.geom2)
if c.geom1 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids:
touch_right_finger = True
has_grasp = touch_left_finger and touch_right_finger
r_grasp = int(has_grasp) * grasp_mult
### lifting reward for picking up an object ###
r_lift = 0.
if len(objs_to_reach) and r_grasp > 0.:
z_target = self.bin2_pos[2] + 0.25
object_z_locs = self.sim.data.body_xpos[objs_to_reach][:, 2]
z_dists = np.maximum(z_target - object_z_locs, 0.)
r_lift = grasp_mult + (1 - np.tanh(15.0 * min(z_dists))) * (
lift_mult - grasp_mult
)
### hover reward for getting object above bin ###
r_hover = 0.
if len(objs_to_reach):
# segment objects into left of the bins and above the bins
object_xy_locs = self.sim.data.body_xpos[objs_to_reach][:, :2]
y_check = (
np.abs(object_xy_locs[:, 1] - target_bin_placements[:, 1])
< self.bin_size[1] / 4.
)
x_check = (
np.abs(object_xy_locs[:, 0] - target_bin_placements[:, 0])
< self.bin_size[0] / 4.
)
objects_above_bins = np.logical_and(x_check, y_check)
objects_not_above_bins = np.logical_not(objects_above_bins)
dists = np.linalg.norm(
target_bin_placements[:, :2] - object_xy_locs, axis=1
)
# objects to the left get r_lift added to hover reward, those on the right get max(r_lift) added (to encourage dropping)
r_hover_all = np.zeros(len(objs_to_reach))
r_hover_all[objects_above_bins] = lift_mult + (
1 - np.tanh(10.0 * dists[objects_above_bins])
) * (hover_mult - lift_mult)
r_hover_all[objects_not_above_bins] = r_lift + (
1 - np.tanh(10.0 * dists[objects_not_above_bins])
) * (hover_mult - lift_mult)
r_hover = np.max(r_hover_all)
return r_reach, r_grasp, r_lift, r_hover
def not_in_bin(self, obj_pos, bin_id):
bin_x_low = self.bin2_pos[0]
bin_y_low = self.bin2_pos[1]
if bin_id == 0 or bin_id == 2:
bin_x_low -= self.bin_size[0] / 2
if bin_id < 2:
bin_y_low -= self.bin_size[1] / 2
bin_x_high = bin_x_low + self.bin_size[0] / 2
bin_y_high = bin_y_low + self.bin_size[1] / 2
res = True
if (
bin_x_low < obj_pos[0] < bin_x_high
and bin_y_low < obj_pos[1] < bin_y_high
and self.bin2_pos[2] < obj_pos[2] < self.bin2_pos[2] + 0.1
):
res = False
return res
def clear_objects(self, obj):
"""
Clears objects without the name @obj out of the task space. This is useful
for supporting task modes with single types of objects, as in
@self.single_object_mode without changing the model definition.
Args:
obj (str): Name of object to keep in the task space
"""
for obj_name, obj_mjcf in self.mujoco_objects.items():
if obj_name == obj:
continue
else:
sim_state = self.sim.get_state()
# print(self.sim.model.get_joint_qpos_addr(obj_name))
sim_state.qpos[self.sim.model.get_joint_qpos_addr(obj_name + "_jnt0")[0]] = 10
self.sim.set_state(sim_state)
self.sim.forward()
def _get_placement_initializer(self):
"""
Helper function for defining placement initializer and object sampling bounds.
"""
self.placement_initializer = SequentialCompositeSampler()
# can sample anywhere in bin
bin_x_half = self.mujoco_arena.table_full_size[0] / 2 - 0.05
bin_y_half = self.mujoco_arena.table_full_size[1] / 2 - 0.05
# each object should just be sampled in the bounds of the bin (with some tolerance)
for obj_name in self.mujoco_objects:
self.placement_initializer.sample_on_top(
obj_name,
surface_name="table",
x_range=[-bin_x_half, bin_x_half],
y_range=[-bin_y_half, bin_y_half],
rotation=None,
rotation_axis='z',
z_offset=0.,
ensure_object_boundary_in_range=True,
)
# each visual object should just be at the center of each target bin
index = 0
for obj_name in self.visual_objects:
# get center of target bin
bin_x_low = self.bin2_pos[0]
bin_y_low = self.bin2_pos[1]
if index == 0 or index == 2:
bin_x_low -= self.bin_size[0] / 2
if index < 2:
bin_y_low -= self.bin_size[1] / 2
bin_x_high = bin_x_low + self.bin_size[0] / 2
bin_y_high = bin_y_low + self.bin_size[1] / 2
bin_center = np.array([
(bin_x_low + bin_x_high) / 2.,
(bin_y_low + bin_y_high) / 2.,
])
# placement is relative to object bin, so compute difference and send to placement initializer
rel_center = bin_center - self.bin1_pos[:2]
self.placement_initializer.sample_on_top(
obj_name,
surface_name="table",
x_range=[rel_center[0], rel_center[0]],
y_range=[rel_center[1], rel_center[1]],
rotation=0.,
rotation_axis='z',
z_offset=self.bin2_pos[2] - self.bin1_pos[2],
ensure_object_boundary_in_range=False,
)
index += 1
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
# Verify the correct robot has been loaded
assert isinstance(self.robots[0], SingleArm), \
"Error: Expected one single-armed robot! Got {} type instead.".format(type(self.robots[0]))
# Adjust base pose accordingly
xpos = self.robots[0].robot_model.base_xpos_offset["bins"]
self.robots[0].robot_model.set_base_xpos(xpos)
# load model for table top workspace
self.mujoco_arena = BinsArena(
bin1_pos=self.bin1_pos,
table_full_size=self.table_full_size,
table_friction=self.table_friction
)
if self.use_indicator_object:
self.mujoco_arena.add_pos_indicator()
# Arena always gets set to zero origin
self.mujoco_arena.set_origin([0, 0, 0])
# store some arena attributes
self.bin_size = self.mujoco_arena.table_full_size
# define mujoco objects
self.ob_inits = [MilkObject, BreadObject, CerealObject, CanObject]
self.vis_inits = [
MilkVisualObject,
BreadVisualObject,
CerealVisualObject,
CanVisualObject,
]
self.item_names = ["Milk", "Bread", "Cereal", "Can"]
self.item_names_org = list(self.item_names)
self.obj_to_use = (self.item_names[0] + "{}").format(0)
lst = []
for j in range(len(self.vis_inits)):
visual_ob_name = ("Visual" + self.item_names[j] + "0")
visual_ob = self.vis_inits[j](
name=visual_ob_name,
joints=[], # no free joint for visual objects
)
lst.append((visual_ob_name, visual_ob))
self.visual_objects = OrderedDict(lst)
lst = []
for i in range(len(self.ob_inits)):
ob_name = (self.item_names[i] + "0")
ob = self.ob_inits[i](
name=ob_name,
joints=[dict(type="free", damping="0.0005")], # damp the free joint for each object
)
lst.append((ob_name, ob))
self.mujoco_objects = OrderedDict(lst)
self.n_objects = len(self.mujoco_objects)
# task includes arena, robot, and objects of interest
self._get_placement_initializer()
self.model = ManipulationTask(
mujoco_arena=self.mujoco_arena,
mujoco_robots=[robot.robot_model for robot in self.robots],
mujoco_objects=self.mujoco_objects,
visual_objects=self.visual_objects,
initializer=self.placement_initializer,
)
# set positions of objects
self.model.place_objects()
# self.model.place_visual()
def _get_reference(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._get_reference()
# Additional object references from this env
self.obj_body_id = {}
self.obj_geom_id = {}
# id of grippers for contact checking
self.l_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.robots[0].gripper.important_geoms["left_finger"]
]
self.r_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.robots[0].gripper.important_geoms["right_finger"]
]
# object-specific ids
for i in range(len(self.ob_inits)):
obj_str = str(self.item_names[i]) + "0"
self.obj_body_id[obj_str] = self.sim.model.body_name2id(obj_str)
self.obj_geom_id[obj_str] = self.sim.model.geom_name2id(obj_str)
# for checking distance to / contact with objects we want to pick up
self.target_object_body_ids = list(map(int, self.obj_body_id.values()))
self.contact_with_object_geom_ids = list(map(int, self.obj_geom_id.values()))
# keep track of which objects are in their corresponding bins
self.objects_in_bins = np.zeros(len(self.ob_inits))
# target locations in bin for each object type
self.target_bin_placements = np.zeros((len(self.ob_inits), 3))
for j in range(len(self.ob_inits)):
bin_id = j
bin_x_low = self.bin2_pos[0]
bin_y_low = self.bin2_pos[1]
if bin_id == 0 or bin_id == 2:
bin_x_low -= self.bin_size[0] / 2.
if bin_id < 2:
bin_y_low -= self.bin_size[1] / 2.
bin_x_low += self.bin_size[0] / 4.
bin_y_low += self.bin_size[1] / 4.
self.target_bin_placements[j, :] = [bin_x_low, bin_y_low, self.bin2_pos[2]]
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
# Reset all object positions using initializer sampler if we're not directly loading from an xml
if not self.deterministic_reset:
# Sample from the placement initializer for all objects
obj_pos, obj_quat = self.model.place_objects()
# Loop through all objects and reset their positions
for i, (obj_name, _) in enumerate(self.mujoco_objects.items()):
self.sim.data.set_joint_qpos(obj_name + "_jnt0", np.concatenate([np.array(obj_pos[i]), np.array(obj_quat[i])]))
# information of objects
self.object_names = list(self.mujoco_objects.keys())
self.object_site_ids = [
self.sim.model.site_name2id(ob_name) for ob_name in self.object_names
]
# Set the bins to the desired position
self.sim.model.body_pos[self.sim.model.body_name2id("bin1")] = self.bin1_pos
self.sim.model.body_pos[self.sim.model.body_name2id("bin2")] = self.bin2_pos
# Move objects out of the scene depending on the mode
if self.single_object_mode == 1:
self.obj_to_use = (random.choice(self.item_names) + "{}").format(0)
self.clear_objects(self.obj_to_use)
elif self.single_object_mode == 2:
self.obj_to_use = (self.item_names[self.object_id] + "{}").format(0)
self.clear_objects(self.obj_to_use)
def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
`'robot-state'`: contains robot-centric information.
`'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.
`'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.
`'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
Contains a rendered depth map from the simulation
Returns:
OrderedDict: Observations from the environment
"""
di = super()._get_observation()
# low-level object information
if self.use_object_obs:
# Get robot prefix
pr = self.robots[0].robot_model.naming_prefix
# remember the keys to collect into object info
object_state_keys = []
# for conversion to relative gripper frame
gripper_pose = T.pose2mat((di[pr + "eef_pos"], di[pr + "eef_quat"]))
world_pose_in_gripper = T.pose_inv(gripper_pose)
for i in range(len(self.item_names_org)):
if self.single_object_mode == 2 and self.object_id != i:
# Skip adding to observations
continue
obj_str = str(self.item_names_org[i]) + "0"
obj_pos = np.array(self.sim.data.body_xpos[self.obj_body_id[obj_str]])
obj_quat = T.convert_quat(
self.sim.data.body_xquat[self.obj_body_id[obj_str]], to="xyzw"
)
di["{}_pos".format(obj_str)] = obj_pos
di["{}_quat".format(obj_str)] = obj_quat
# get relative pose of object in gripper frame
object_pose = T.pose2mat((obj_pos, obj_quat))
rel_pose = T.pose_in_A_to_pose_in_B(object_pose, world_pose_in_gripper)
rel_pos, rel_quat = T.mat2pose(rel_pose)
di["{}_to_{}eef_pos".format(obj_str, pr)] = rel_pos
di["{}_to_{}eef_quat".format(obj_str, pr)] = rel_quat
object_state_keys.append("{}_pos".format(obj_str))
object_state_keys.append("{}_quat".format(obj_str))
object_state_keys.append("{}_to_{}eef_pos".format(obj_str, pr))
object_state_keys.append("{}_to_{}eef_quat".format(obj_str, pr))
if self.single_object_mode == 1:
# Zero out other objects observations
for obj_str, obj_mjcf in self.mujoco_objects.items():
if obj_str == self.obj_to_use:
continue
else:
di["{}_pos".format(obj_str)] *= 0.0
di["{}_quat".format(obj_str)] *= 0.0
di["{}_to_{}eef_pos".format(obj_str, pr)] *= 0.0
di["{}_to_{}eef_quat".format(obj_str, pr)] *= 0.0
di["object-state"] = np.concatenate([di[k] for k in object_state_keys])
return di
def _check_success(self):
"""
Check if all objects have been successfully placed in their corresponding bins.
Returns:
bool: True if all objects are placed correctly
"""
# remember objects that are in the correct bins
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
for i in range(len(self.ob_inits)):
obj_str = str(self.item_names[i]) + "0"
obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
dist = np.linalg.norm(gripper_site_pos - obj_pos)
r_reach = 1 - np.tanh(10.0 * dist)
self.objects_in_bins[i] = int(
(not self.not_in_bin(obj_pos, i)) and r_reach < 0.6
)
# returns True if a single object is in the correct bin
if self.single_object_mode == 1 or self.single_object_mode == 2:
return np.sum(self.objects_in_bins) > 0
# returns True if all objects are in correct bins
return np.sum(self.objects_in_bins) == len(self.ob_inits)
def _visualization(self):
"""
Do any needed visualization here. Overrides superclass implementations.
"""
# color the gripper site appropriately based on distance to cube
if self.robots[0].gripper_visualization:
# find closest object
square_dist = lambda x: np.sum(
np.square(x - self.sim.data.get_site_xpos(self.robots[0].gripper.visualization_sites["grip_site"]))
)
dists = np.array(list(map(square_dist, self.sim.data.site_xpos)))
dists[self.robots[0].eef_site_id] = np.inf # make sure we don't pick the same site
dists[self.robots[0].eef_cylinder_id] = np.inf
ob_dists = dists[
self.object_site_ids
] # filter out object sites we care about
min_dist = np.min(ob_dists)
ob_id = np.argmin(ob_dists)
# set RGBA for the EEF site here
max_dist = 0.1
scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15
rgba = np.zeros(4)
rgba[0] = 1 - scaled
rgba[1] = scaled
rgba[3] = 0.5
self.sim.model.site_rgba[self.robots[0].eef_site_id] = rgba
def _check_robot_configuration(self, robots):
"""
Sanity check to make sure the inputted robots and configuration is acceptable
Args:
robots (str or list of str): Robots to instantiate within this env
"""
if type(robots) is list:
assert len(robots) == 1, "Error: Only one robot should be inputted for this task!"
class PickPlaceSingle(PickPlace):
"""
Easier version of task - place one object into its bin.
A new object is sampled on every reset.
"""
def __init__(self, **kwargs):
assert "single_object_mode" not in kwargs, "invalid set of arguments"
super().__init__(single_object_mode=1, **kwargs)
class PickPlaceMilk(PickPlace):
"""
Easier version of task - place one milk into its bin.
"""
def __init__(self, **kwargs):
assert (
"single_object_mode" not in kwargs and "object_type" not in kwargs
), "invalid set of arguments"
super().__init__(single_object_mode=2, object_type="milk", **kwargs)
class PickPlaceBread(PickPlace):
"""
Easier version of task - place one bread into its bin.
"""
def __init__(self, **kwargs):
assert (
"single_object_mode" not in kwargs and "object_type" not in kwargs
), "invalid set of arguments"
super().__init__(single_object_mode=2, object_type="bread", **kwargs)
class PickPlaceCereal(PickPlace):
"""
Easier version of task - place one cereal into its bin.
"""
def __init__(self, **kwargs):
assert (
"single_object_mode" not in kwargs and "object_type" not in kwargs
), "invalid set of arguments"
super().__init__(single_object_mode=2, object_type="cereal", **kwargs)
class PickPlaceCan(PickPlace):
"""
Easier version of task - place one can into its bin.
"""
def __init__(self, **kwargs):
assert (
"single_object_mode" not in kwargs and "object_type" not in kwargs
), "invalid set of arguments"
super().__init__(single_object_mode=2, object_type="can", **kwargs)
| 2.328125 | 2 |
Task1C.py | Callum-M-Halton/70_Flood_Sys | 0 | 12768085 | <reponame>Callum-M-Halton/70_Flood_Sys
from floodsystem.geo import stations_within_radius
from floodsystem.stationdata import build_station_list
def demonstrate_station_radius():
"""
Prints a sorted list of the names of the stations within a radius r of the cambridge city centre
"""
stations_list = stations_within_radius(build_station_list(), (52.2053, 0.1218), 10)
sorted_stations = []
for station in stations_list:
sorted_stations.append(station.name)
sorted_stations.sort()
print(sorted_stations)
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
demonstrate_station_radius() | 3.265625 | 3 |
content_table.py | schwartzadev/covid-data-pipeline | 0 | 12768086 | # OBSOLETE
import os
from directory_cache import DirectoryCache
from loguru import logger
from typing import List
from lxml import html, etree
from unidecode import unidecode
import re
class ContentTable():
"""
ContentTable represents from an HTML table
contains both a simplied HTML table (new_element) and a list for rows (rows).
the rows are the text content of the table
"""
def __init__(self, element: html.Element, fail_on_unexpected_tags = True):
self.orig_element = element
self.fail_on_unexpected_tags = fail_on_unexpected_tags
self.id = None
self._new_element = html.Element("table")
self.caption = ""
self.rows = []
self._extract_content()
def contains_data(self) -> bool:
" simple test if a table contains anything that looks like data "
for r in self.rows:
for c in r:
if re.search("^[ 0-9,]+$", c): return True
if re.search(":[ 0-9,]+,", c): return True
return False
def reformat(self) -> html.Element:
" get the newly created element "
return self._new_element
def _extract_content(self):
"""
Pull information from HTML table
1. Ignore TH/TD distinction
2. remove content that only changes presentation
3. assume script/comment tags do not contain data
creates a new element fragment and List[List[Str]]
embedded UL are converted into a comma delimited string
"""
#print(f"input table ===>{html.tostring(self.orig_element)}<<====\n")
self.id = self.orig_element.get("id")
if self.id != None:
self._new_element.attrib["id"] = self.id
tr_temp = html.Element("tr")
for x in self.orig_element:
#print(f"row ===>{html.tostring(x)}<<====\n")
# -- handle TD that are missing surrounding TR
if x.tag == "td":
logger.warning(f"misplaced TD: {html.tostring(x)}")
tr_temp.append(x)
continue
#self._extract_td(x)
elif len(tr_temp) > 0:
self._extract_tr(tr_temp)
tr_temp = html.Element("tr")
if x.tag == "tr":
self._extract_tr(x)
elif x.tag == "thead" or x.tag == "tbody" or x.tag == "tfoot":
for y in x:
if y.tag == "tr":
self._extract_tr(y)
elif self.fail_on_unexpected_tags:
raise Exception(f"unexpected tag in tr: {y.tag}")
else:
logger.warning(f"unexpected tag in tr: {html.tostring(y)}")
elif x.tag == "colgroup":
# logger.warning(f"colgroup: {html.tostring(x)}")
pass
elif x.tag == "caption":
self._extract_caption(x)
elif self.fail_on_unexpected_tags:
logger.warning(f"unexpected tag in table: {html.tostring(x)}")
raise Exception(f"unexpected tag in table: {x.tag}")
else:
logger.warning(f"unexpected tag: {html.tostring(x)}")
#print(f"output table ===>{html.tostring(self.new_element)}<<====\n")
def _extract_caption(self, caption: html.Element):
elem, s = self._extract_any(caption)
self.caption = s
self._new_element.append(elem)
def _extract_tr(self, tr: html.Element):
" extract a row "
#print(f"tr ===>{html.tostring(tr)}<<====\n")
elem = html.Element("tr")
elem.text = ""
elem.tail = ""
cells = []
for x in tr:
if x.tag != "td" and x.tag != "th":
if x.tag == etree.Comment: continue
if x.tag == "script": continue
logger.warning(f" adding td around {html.tostring(x)}")
ch_elem = html.Element("td")
bad_elem, val = self._extract_any(x)
if bad_elem != None:
ch_elem.append(bad_elem)
else:
ch_elem.text = val
else:
ch_elem, val = self._extract_any(x)
if ch_elem == None: ch_elem = html.Element(x.tag)
ch_elem.tail = ""
elem.append(ch_elem)
cells.append(val)
self._new_element.append(elem)
self.rows.append(cells)
def _extract_any(self, x: html.Element) -> [html.Element, str]:
" extract/simplify an HTML element (recursive) "
#print(f"extract any ===>{html.tostring(x)}<<====\n")
# nested tables are special because we are processing a flattend list so ignore them.
if x.tag == "table": return html.Element("table"), "[TABLE]"
# lists are special because we want to build up a comma seperated list
if x.tag == "ul": return self._extract_list(x)
if x.tag == etree.Comment: return etree.Comment(), ""
# no children --> text element
if len(x) == 0:
if x.text == None:
return None, ""
elem, val = x, self._extract_text(x.text)
return elem, val
elem = html.Element(x.tag)
items = []
if x.text != None:
elem.text = x.text
items.append(x.text)
for y in x:
#ignore/strip out layout tags
if y.tag == etree.Comment: continue
if y.tag in ["script", "noscript", "br", "hr", "input", "button", "svg", "img", "form"]: continue
if y.tag in ["span", "div", "h3", "h2", "h1", "small", "strong", "em", "sup", "i",
"a", "b", "u", "p", "ul", "label", "sub"]:
elem_ch, s = self._extract_any(y)
if elem_ch != None:
if len(x) == 1:
if s != None and s != "":
elem.text = s
else:
elem.append(elem_ch)
if s != None and s != "":
items.append(s)
elif y.tag == "table" or y.tag == "iframe":
elem.append(html.Element(y.tag))
items.append(f"[{y.tag.upper()}]")
else:
logger.warning(f"unexpected tag {y.tag} ===>{html.tostring(y)}<<====\n")
elem_ch, s = self._extract_any(y)
if elem_ch != None:
if len(x) == 1:
if s != None and s != "":
elem.text = s
else:
elem.append(elem_ch)
if s != None and s != "":
items.append(s)
val = " ".join(items)
return elem, val
def _extract_list(self, x: html.Element) -> [html.Element, str]:
" extract a list "
#print(f"list ===>{html.tostring(x)}<<====\n")
elem = html.Element("ul")
result = []
for y in x:
#print(f"li ===>{html.tostring(y)}<<====\n")
if y.tag != "li": raise Exception(f"Unexpected tag: {y.tag}")
if len(y) == 0: continue
ch_elem, s = self._extract_any(y)
elem.append(ch_elem)
if s != None:
s = s.replace(",", "_comma_")
result.append(s)
val = ", ".join(result)
return elem, val
def _extract_text(self, s: str) -> str:
" filter out specific items with non-ascii chars "
if s == None: return s
return unidecode(s).strip()
| 3.015625 | 3 |
tmp_visualiser.py | sitongye/raspberrypi_sensehat_music | 0 | 12768087 | <reponame>sitongye/raspberrypi_sensehat_music
import time
import os
import sys, math, wave, numpy, pygame
from sense_hat import SenseHat
import subprocess as sp
#from sense_emu import SenseHat
import glob
from time import sleep
from random import randint
import numpy as np
from pygame.locals import *
from scipy.fftpack import dct
PIXEL_SIZE = 8
FPS = 10
sense = SenseHat()
curr_time = time.localtime()
curr_clock = time.strftime("%H:%M:%S", curr_time)
# greeting
if int(curr_clock.split(':')[0])<=12:
greeting = 'Guten Morgen'
elif int(curr_clock.split(':')[0])<17:
greeting = "Guten Nachmittag"
else:
greeting = 'Guten Abend'
# print greeting
background = (0,0,0)
R = (198, 30, 74) #raspberrytips red
W = (255, 255, 255) #white
def draw_pixels_random(pixels, maintain_sec=2):
pixel_state = np.array([0]*64)
#set one pixel at at time
while set(pixel_state)!={1}:
X = randint(0,7)
Y = randint(0,7)
if pixel_state[(X+1)+Y*8-1] == 1:
continue
else:
sense.set_pixel(X, Y, pixels[(X+1)+Y*8-1])
pixel_state[(X+1)+Y*8-1] = 1
sleep(0.02)
sleep(maintain_sec)
#set all pixels at once
love_pixels = [
W, W, W, W, W, W, W, W,
W, R, R, W, R, R, W, W,
R, R, R, R, R, R, R, W,
R, R, R, R, R, R, R, W,
R, R, R, R, R, R, R, W,
W, R, R, R, R, R, W, W,
W, W, R, R, R, W, W, W,
W, W, W, R, W, W, W, W
]
draw_pixels_random(love_pixels)
#set one pixel at at time
sense.clear()
#sense.show_message("{}, Martin! :)".format(greeting), 0.05, W, background)
file_name = 'lightenup'
status = 'stopped'
volume = 5
#fpsclock = pygame.time.Clock()
#screen init, music playback
pygame.mixer.init()
# Get a list of all the music files
path_to_music = "/home/pi/Music"
os.chdir(path_to_music)
music_files = glob.glob("*.mp3")
music_files.sort()
fpsclock = pygame.time.Clock()
volume = 1.0
current_track = 0
no_tracks = len(music_files)
x_axis_color_dict_parcels = {6:tuple((218, 217, 218)),
7:tuple((218, 217, 218)),
0:tuple((42, 75, 114)),
1:tuple((42, 75, 114)),
2:tuple((114, 85, 62)),
3:tuple((114, 85, 62)),
4:tuple((133, 152, 172)),
5:tuple((133, 152, 172))}
#process wave data
filename, extension = os.path.splitext(music_files[current_track])
print(filename, extension)
if extension != "wav":
fnull = open(os.devnull, "w")
pieq_tmp = os.path.expanduser("~") + "/.pieq_tmp/"
wav_path = pieq_tmp + filename + ".wav"
if not os.path.isfile(wav_path):
print("Decompressing...")
sp.call(["mkdir", "-p", pieq_tmp])
sp.call(["ffmpeg", "-i", music_files[current_track], wav_path], stdout=fnull, stderr=sp.STDOUT)
tmp_file_created = True
else:
tmp_file_created = False
wav_path = music_files[current_track]
f = wave.open(wav_path, 'rb')
params = f.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
str_data = f.readframes(nframes)
f.close()
wave_data = numpy.frombuffer(str_data, dtype = numpy.short)
wave_data.shape = -1, 2
wave_data = wave_data.T
num = nframes
pygame.mixer.music.load(wav_path)
pygame.mixer.music.set_volume(volume)
pygame.mixer.music.play()
start_time = 0.0
pygame.time.Clock().tick()
status = 'playing'
def Visualizer(nums):
nums = int(nums)
h = abs(dct(wave_data[0][nframes - nums:nframes - nums + PIXEL_SIZE],2))
h = [min(PIXEL_SIZE, int(i**(1 / 2.5) * PIXEL_SIZE/ 100)+1) for i in h]
draw_bars_pixels(h, x_color_dict=x_axis_color_dict_parcels)
def vis(status):
global num
if status == "stopped":
num = nframes
return
elif status == "paused":
Visualizer(num)
else:
num -= framerate / FPS
if num > 0:
Visualizer(num)
def get_time():
seconds = max(0, pygame.mixer.music.get_pos() / 1000)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
hms = ("%02d:%02d:%02d" % (h, m, s))
return hms
def controller(key):
global status
if status == "stopped":
if key == K_RETURN:
pygame.mixer_music.play()
status = "playing"
elif status == "paused":
if key == K_RETURN:
pygame.mixer_music.stop()
status = "stopped"
elif key == K_SPACE:
pygame.mixer.music.unpause()
status = "playing"
elif status == "playing":
if key == K_RETURN:
pygame.mixer.music.stop()
status = "stopped"
elif key == K_SPACE:
pygame.mixer.music.pause()
status = "paused"
def draw_bars_pixels(h, bgd_clr=(0,0,0), fill_clr=(255,255,255), x_color_dict=None):
matrix = [bgd_clr]*64
np_matrix = np.array(matrix).reshape(8,8,3)
for i in range(len(h)):
if x_color_dict is not None:
fill_clr = x_color_dict[i]
np_matrix[i][(8-h[i]):] = fill_clr
# update column by column
for X in range(0,8):
for Y in range(0,8):
sense.set_pixel(X,Y,np_matrix[X][Y])
#pixel_list = [tuple(i) for i in np_matrix.reshape(64,3).tolist()]
while True:
if num <= 0:
status = "stopped"
fpsclock.tick(FPS)
vis(status)
| 2.9375 | 3 |
rtwilio/tests/test_forms.py | datamade/rapidsms-twilio | 1 | 12768088 | from django.test import TestCase
from rapidsms.tests.harness import CreateDataMixin
from rtwilio.forms import TwilioForm
class TwilioFormTest(CreateDataMixin, TestCase):
def test_valid_form(self):
"""Form should be valid if GET keys match configuration."""
data = {"From": "+12223334444",
"To": "+19998887777",
"Body": self.random_string(50),
"AccountSid": self.random_string(34),
"SmsSid": self.random_string(34)}
form = TwilioForm(data, backend_name='rtwilio-backend')
self.assertTrue(form.is_valid())
def test_invalid_form(self):
"""Form is invalid if POST keys don't match configuration."""
data = {'invalid-phone': '1112223333', 'invalid-message': 'hi there'}
form = TwilioForm(data, backend_name='rtwilio-backend')
self.assertFalse(form.is_valid())
def test_get_incoming_data(self):
"""get_incoming_data should return matching text and connection."""
data = {"From": "+12223334444",
"To": "+19998887777",
"Body": self.random_string(50),
"AccountSid": self.random_string(34),
"SmsSid": self.random_string(34)}
form = TwilioForm(data, backend_name='rtwilio-backend')
form.is_valid()
incoming_data = form.get_incoming_data()
self.assertEqual(data['Body'], incoming_data['text'])
self.assertEqual(data['From'],
incoming_data['connection'].identity)
self.assertEqual(data['SmsSid'],
incoming_data['fields']['external_id'])
self.assertEqual('rtwilio-backend',
incoming_data['connection'].backend.name)
| 2.71875 | 3 |
avmoo/spider_avmo.py | feifeixiaogao/avmoo | 1 | 12768089 | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
import sys
import time
import getopt
import requests
import sqlite3
import math
import re
import os
from lxml import etree
'''
未启用的两个函数
data_check()
按照主表检查缺少数据,时间非常长,需手动配置
test_page() 输出单页数据
图片服务器:
https://jp.netcdn.space/digital/video/miae00056/miae00056jp-10.jpg
https://pics.dmm.co.jp/digital/video/miae00056/miae00056jp-10.jpg
https://pics.dmm.com/digital/video/miae00056/miae00056jp-10.jpg
小封面:
https://jp.netcdn.space/digital/video/miae00056/miae00056ps.jpg
https://pics.javbus.info/thumb/{{linkid}}.jpg
大封面:
https://jp.netcdn.space/digital/video/miae00056/miae00056pl.jpg
'''
class avmo:
def __init__(self):
#================主要配置================
#目标域名
# self.site = 'avmoo.xyz'
# 单页代理
self.site_url = 'https://moozik.cn/mousehole.php?url=https://avmask.com/cn/'
# 原网址
self.site_url = 'https://avmask.com/cn/'
#sqlite数据库地址
if os.path.exists('avmoo_.db'):
self.sqlite_file = 'avmoo_.db'
else:
self.sqlite_file = 'avmoo.db'
#主函数延时
self.main_sleep = 1
#其他配置初始化
self.config()
#================测试区间================
# self.main(sqlfun.return_dict())
# exit()
'''
#重试缺失地址
# self.data_check()
exit()
'''
#================读取参数================
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hs:e:arp:gtu:c",
['help', 'start', 'end', 'auto', 'retry', 'proxies', 'genre', 'stars', 'sub', 'cover']
)
except:
self.usage()
exit()
#展示说明
if len(sys.argv) == 1:
self.usage()
exit()
opt_dict = {}
opt_r = {
'-h':'-help',
'-s':'-start',
'-e':'-end',
'-a':'-auto',
'-r':'-retry',
'-p':'-proxies',
'-g':'-genre',
'-t':'-stars',
'-u':'-sub',
'-c':'-cover',
}
for op, value in opts:
if op in opt_r:
opt_dict[opt_r[op]] = value
else:
opt_dict[op] = value
if '-help' in opt_dict:
self.usage()
exit()
if '-proxies' in opt_dict:
self.s.proxies['https'] = opt_dict['-proxies']
if '-auto' in opt_dict:
self.auto = True
self.get_last()
if '-cover' in opt_dict:
self.sub_cover = True
if '-start' in opt_dict:
self.start_id = opt_dict['-start']
if '-end' in opt_dict:
self.end_id = opt_dict['-end']
if '-retry' in opt_dict:
self.retry_errorurl()
exit()
if '-sub' in opt_dict:
self.sub_keyword = opt_dict['-sub'].upper()
self.get_sub()
exit()
if '-genre' in opt_dict:
self.genre_update()
exit()
if '-stars' in opt_dict:
self.stars_loop()
exit()
#主程序
self.main(self.get_linkid())
#默认配置
def config(self):
#待insert数据
self.insert_list = []
#遍历linkid
self.abc_sequence = '0123456789abcdefghijklmnopqrstuvwxyz'
#获取sl的字典列表dl
self.dl = {}
for item in range(len(self.abc_sequence)):
self.dl[self.abc_sequence[item]] = item
#字幕爬虫默认不覆盖
self.sub_cover = False
#更新flag
self.last_flag = False
#是否重试
self.flag_retry = True
#开始id
self.start_id = '0000'
#结束id
self.end_id = 'zzzz'
#自动获取start stop
self.auto = False
#插入阈值
self.insert_threshold = 20
#用于重试失败计数
self.retry_counter = 0
#重试阈值
self.retry_threshold = 5
#主表
self.table_main = 'av_list'
#重试表
self.table_retry = 'av_error_linkid'
self.table_genre = 'av_genre'
self.table_stars = 'av_stars'
#表结构
self.column = ['id', 'linkid', 'director', 'director_url', 'studio',
'studio_url', 'label', 'label_url', 'series', 'series_url', 'image_len',
'genre', 'len', 'stars', 'av_id', 'title', 'bigimage', 'release_date', 'stars_url']
#表结构str
self.column_str = ",".join(self.column)
#链接数据库
self.conn()
#站点url
# self.site_url = 'https://{0}/cn'.format(self.site)
#番号主页url
self.movie_url = self.site_url+'/movie/'
#导演 制作 发行 系列
self.director = self.site_url+'/director/'
self.studio = self.site_url+'/studio/'
self.label = self.site_url+'/label/'
self.series = self.site_url+'/series/'
self.genre_url = self.site_url+'/genre/'
self.star_url = self.site_url+'/star/'
#创建会话对象
self.s = requests.Session()
#超时时间
self.s.timeout = 3
self.s.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
#代理
self.s.proxies = {
#'https':'http://127.0.0.1:1080'
}
#sqlite conn
def conn(self):
try:
#链接sqlite
self.CONN = sqlite3.connect(self.sqlite_file, check_same_thread=False)
self.CUR = self.CONN.cursor()
except:
print('connect database fail.')
sys.exit()
#写出命令行格式
def usage(self):
usage = '''
-h(-help):使用说明
-s(-start):开始id
例如:'-s 0000' '-s 1ddd'
-e(-end):结束id
例如:'-e xxxx' '-e zzzz'
-a(-auto):(常用功能)获取当前数据库最新的一个id和网站最新的一个id,补全新增数据
-r(-retry):重试错误链接
-g(-genre):更新类别
-t(-stars):更新演员
-p(-proxies):使用指定的https代理服务器或SOCKS5代理服务器。
例如:'-p http://127.0.0.1:1080,-p socks5://127.0.0.1:52772'
-u(-163sub):使用指定关键字查找视频字幕
例如:'-u IPZ' '-u ABP'
-c(-cover):重新抓取字幕数据
'''
print(usage.replace(' ',''))
def get_subjson(self, response):
json = response.json()
data = []
linkID = 0
for item in json.get('Data'):
linkID = item['linkID']
if self.sub_keyword not in item['mkvName'].replace(' ','-'):
continue
avid_tmp = re.findall('[a-zA-Z0-9]+[ \-]\d{3,}',item['mkvName'])
if avid_tmp == []:
continue
time_tmp = re.findall('\d{4}-\d{2}-\d{2}', item['otherName3'])
if time_tmp != []:
time_tmp = time_tmp[0]
else:
time_tmp = ''
data.append(
(
item['ID'].strip(),
avid_tmp[0].upper().replace(' ', '-'),
time_tmp
)
)
return int(json.get('Count')), data, linkID
#获取字幕
def get_sub(self):
def get_suburl(keyword, item=None):
if item == None:
return 'http://www.163sub.org/search.ashx?q={}'.format(keyword)
else:
return 'http://www.163sub.org/search.ashx?q={}&lastid={}'.format(keyword, item)
av_163sub_log = {
'sub_keyword': self.sub_keyword,
'run_time': time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime()
),
'data_count': '',
'insert_count': '',
}
#查询抓取历史
SELECT_SQL = 'SELECT * FROM av_163sub_log WHERE sub_keyword = "{}" ORDER BY run_time DESC LIMIT 1;'.format(
self.sub_keyword)
self.CUR.execute(SELECT_SQL)
log_data = self.CUR.fetchall()
if log_data != []:
print('上次查询时间:{}\n条数:{}\n有效条数:{}\n'.format(
log_data[0][2], log_data[0][3], log_data[0][4]
))
#查询当前条数
response = self.s.get(get_suburl(self.sub_keyword))
res = self.get_subjson(response)
print('163sub实时数据:{}条'.format(
res[0]))
if False == self.sub_cover and log_data != [] and res[0] == log_data[0][3]:
print('需要重新抓取请添加参数-c(-cover)\n')
exit()
resultArr = []
if res[1] != []:
resultArr.extend(res[1])
av_163sub_log['data_count'] = res[0]
else:
print('没有找到!')
exit()
for item in range(1, math.ceil(res[0] / 10)):
print('当前:', item * 10)
response = self.s.get(get_suburl(self.sub_keyword, res[2]))
res = self.get_subjson(response)
resultArr.extend(res[1])
print(self.sub_keyword, '字幕有效条数为:', len(resultArr))
av_163sub_log['insert_count'] = len(resultArr)
#计算新增的字幕
SELECT_SQL = 'SELECT DISTINCT av_id FROM "av_163sub" where av_id like "{}%" ORDER BY av_id;'.format(
self.sub_keyword)
self.CUR.execute(SELECT_SQL)
fetch_data = self.CUR.fetchall()
if fetch_data != []:
history_data = set([x[0] for x in fetch_data])
new_data = set([x[1] for x in resultArr])
new_sub = new_data - history_data
if len(new_sub) != 0:
print('新增的字幕为:')
print("\n".join(list(new_sub)))
if len(resultArr) > 0:
INSERT_SQL = 'REPLACE INTO av_163sub VALUES({});'.format('),('.join([
'"{}","{}","{}"'.format(x[0], x[1], x[2]) for x in resultArr]))
INSERT_LOG = 'REPLACE INTO av_163sub_log ("sub_keyword","run_time","data_count","insert_count")VALUES("{}","{}","{}","{}");'.format(
av_163sub_log['sub_keyword'],
av_163sub_log['run_time'],
av_163sub_log['data_count'],
av_163sub_log['insert_count'],
)
while True:
try:
self.CUR.execute(INSERT_SQL)
self.CUR.execute(INSERT_LOG)
self.CONN.commit()
break
except:
print('database is locked!')
time.sleep(3)
#主函数,抓取页面内信息
def main(self, looplist):
for item in looplist:
url = self.movie_url + item
time.sleep(self.main_sleep)
try:
res = self.s.get(url)
if res.status_code != 200:
self.insert_retry((item, res.status_code))
print(url, res.status_code)
continue
except:
print(url, 'requests.get error')
self.insert_retry((item, 777))
continue
try:
html = etree.HTML(res.text)
except:
print(url, 'etree.HTML error')
self.insert_retry((item, 888))
continue
#解析页面内容
data = self.movie_page_data(html)
#从linkid获取id
id_column = self.linkid2id(item)
#输出当前进度
print(data[12].ljust(30), data[15].ljust(11), item.ljust(5), id_column)
self.insert_list.append(
"'{0}','{1}','{2}'".format(id_column, item, "','".join(data))
)
#存储数据
if len(self.insert_list) == self.insert_threshold:
self.movie_save()
#插入剩余的数据
self.movie_save()
#重试错误数据
self.retry_errorurl()
#获取最后一次的id
def get_last(self):
sql = "SELECT linkid FROM {0} ORDER BY linkid DESC LIMIT 0,1".format(self.table_main)
self.CUR.execute(sql)
res = self.CUR.fetchall()
self.start_id = res[0][0]
try:
response = self.s.get(self.site_url)
except:
print('timeout.')
exit()
if response.status_code != 200:
print('page error.')
exit()
html = etree.HTML(response.text)
self.end_id = html.xpath('//*[@id="waterfall"]/div[1]/a')[0].attrib.get('href')[-4:]
print('数据库最新ID:{0},线上最新ID:{1}'.format(self.start_id, self.end_id))
print('本次更新数量:{}'.format(self.linkid2id(self.end_id)-self.linkid2id(self.start_id)))
#插入重试表
def insert_retry(self, data):
self.CUR.execute("REPLACE INTO {0}(linkid, status_code, datetime)VALUES('{1[0]}', {1[1]}, '{2}');"
.format(
self.table_retry,
data,
time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime()
)
)
)
self.CONN.commit()
#获取演员
def stars_loop(self, map_list = []):
self.CUR.execute(
'SELECT linkid FROM {} ORDER BY linkid DESC LIMIT 0,1'.format(self.table_stars))
self.start_id = self.CUR.fetchall()[0][0]
self.end_id = '3000'
def get_val(str):
return str.split(':')[1].strip()
page_404_count = 0
if map_list == []:
map_list = self.get_linkid()
print(map_list)
return
for linkid in map_list:
url = self.star_url + linkid
sort_id = self.linkid2id(linkid)
print(linkid, sort_id)
data = {
'id': sort_id,
'linkid': linkid,
'name': '',
'name_history': '',
'birthday': '',
'height': '',
'cup': '',
'bust': '',
'waist': '',
'hips': '',
'hometown': '',
'hobby': '',
'headimg': ''
}
try:
response = self.s.get(url)
html = etree.HTML(response.text)
except:
data['birthday'] = 'error'
self.stars_save(data)
print('出现错误,延时10s')
time.sleep(10)
continue
if response.status_code == 403:
print(data['id'], ' ', data['linkid'],' status_code:403')
exit()
if response.status_code == 404:
#id大于38000的页面,出现404直接结束
if sort_id > 38000:
print('page 404,done!')
exit()
page_404_count += 1
#检查error条目
if map_list == []:
sql = 'SELECT linkid FROM "av_stars" WHERE birthday="error"'
self.CUR.execute(sql)
error_list = self.CUR.fetchall()
map_list = [x[0] for x in error_list]
self.stars_loop(map_list)
if page_404_count == 10:
print('stat=404 count:10')
exit()
else:
print(data['id'],' ',data['linkid'],' ',page_404_count)
data['birthday'] = '404'
self.stars_save(data)
time.sleep(1)
continue
page_404_count = 0
try:
data['name'] = html.xpath(
'/html/head/meta[8]/@content')[0].split(',', 1)[0]
data['headimg'] = html.xpath(
'//*[@id="waterfall"]/div[1]/div/div[1]/img/@src')[0].split('/', 3)[3].replace('mono/actjpgs/nowprinting.gif', '')
print(data)
except:
print(response.text)
exit()
for item_p in html.xpath('//*[@id="waterfall"]/div[1]/div/div[2]/p'):
if item_p.text == None:
continue
if '生日' in item_p.text:
data['birthday'] = get_val(item_p.text)
continue
if '身高' in item_p.text:
data['height'] = get_val(item_p.text)
continue
if '罩杯' in item_p.text:
data['cup'] = get_val(item_p.text)
continue
if '胸围' in item_p.text:
data['bust'] = get_val(item_p.text)
continue
if '腰围' in item_p.text:
data['waist'] = get_val(item_p.text)
continue
if '臀围' in item_p.text:
data['hips'] = get_val(item_p.text)
continue
if '出生地' in item_p.text:
data['hometown'] = get_val(item_p.text)
continue
if '爱好' in item_p.text:
data['hobby'] = get_val(item_p.text)
continue
#讲括号中的名字记录为曾用名
tmp = data['name'].replace('(','(').replace(')','').split('(')
if len(tmp) == 2:
data['name_history'] = tmp[1]
print(
data['birthday'].ljust(13),
data['height'].ljust(7),
data['cup'].ljust(3),
data['bust'].ljust(7),
data['waist'].ljust(7),
data['hips'].ljust(7),
data['name'].ljust(15),
data['hometown']
)
self.stars_save(data)
if data['cup'] == 'F':
time.sleep(5)
elif data['cup'] == 'E':
time.sleep(3)
elif data['cup'] == 'D':
time.sleep(2.5)
elif data['cup'] == 'C':
time.sleep(2)
elif data['cup'] == 'B':
time.sleep(1)
else:
time.sleep(1)
def stars_save(self, data):
insert_sql = 'REPLACE INTO "{}" VALUES({},"{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}")'.format(
self.table_stars,
data['id'],
data['linkid'],
data['name'],
data['name_history'],
data['birthday'],
data['height'],
data['cup'],
data['bust'],
data['waist'],
data['hips'],
data['hometown'],
data['hobby'],
data['headimg']
)
self.CUR.execute(insert_sql)
self.CONN.commit()
#遍历urlid
def get_linkid(self):
for abcd in self.abc_map():
if abcd <= self.start_id:
continue
if self.start_id < abcd <= self.end_id:
yield abcd
if abcd > self.end_id:
print('start:{0} end:{1} done!'.format(
self.start_id, self.end_id))
self.movie_save()
exit()
#由urlid获取排序自增id
def linkid2id(self, item):
return self.dl[item[3]] + self.dl[item[2]]*36 + self.dl[item[1]]*1296 + self.dl[item[0]]*46656
#插入数据库
def movie_save(self):
if len(self.insert_list) == 0:
return
self.replace_sql(self.table_main, self.column_str, "),(".join(self.insert_list))
print('INSERT:', len(self.insert_list))
self.insert_list = []
self.retry_counter += 1
if self.flag_retry:
#重试失败地址
if self.retry_counter >= self.retry_threshold:
self.retry_counter = 0
self.retry_errorurl()
def replace_sql(self, table, column, data):
self.CUR.execute("REPLACE INTO {0}({1})VALUES({2});".format(table, column, data))
self.CONN.commit()
#重试
def retry_errorurl(self):
self.CUR.execute("SELECT * FROM {0} WHERE status_code<>'404' ORDER BY linkid;".format(self.table_retry))
res_retry = self.CUR.fetchall()
reslen = len(res_retry)
if reslen == 0:
return
print('error url count:', reslen)
del_list = []
update_list = []
def update_sql(update_list):
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sql = "REPLACE INTO {0}(linkid, status_code, datetime)VALUES({1});".format(
self.table_retry, "),(".join(["'{0[0]}',{0[1]},'{1}'".format(x, time_now) for x in update_list]))
self.CUR.execute(sql)
self.CONN.commit()
def delete_sql(del_list):
sql = 'DELETE FROM {0} WHERE {1};'.format(
self.table_retry, ' OR '.join([" linkid='{0}' ".format(x) for x in del_list]))
self.CUR.execute(sql)
self.CONN.commit()
for item in res_retry:
retry_linkid = item[0]
reslen -= 1
#统一更新表,提高效率
if len(update_list) == 20:
update_sql(update_list)
update_list = []
print('done 20.')
url = self.movie_url + retry_linkid
try:
response = self.s.get(url)
html = etree.HTML(response.text)
except:
# 重写重试记录
if response.status_code == 404:
update_list.append((retry_linkid, 404))
print(reslen, retry_linkid, 'status_code:404')
continue
if response.status_code != 200:
# 重写重试记录
update_list.append((retry_linkid, response.status_code))
print(reslen, retry_linkid, 'status_code:{}'.format(response.status_code))
continue
print(reslen, retry_linkid, 'success')
data = self.movie_page_data(html)
id = self.linkid2id(retry_linkid)
self.insert_list.append("'{0}','{1}','{2}'".format(id, retry_linkid, "','".join(data)))
del_list.append(retry_linkid)
#存储数据
if len(self.insert_list) == self.insert_threshold:
#插入数据
print(self.insert_threshold, 'insert.')
self.replace_sql(self.table_main, self.column_str, "),(".join(self.insert_list))
if del_list != []:
delete_sql(del_list)
del_list = []
#插入数据
if len(self.insert_list) != 0:
self.replace_sql(self.table_main, self.column_str, "),(".join(self.insert_list))
#删除数据
if len(del_list) != 0:
delete_sql(del_list)
#更新数据
if len(update_list) != 0:
update_sql(update_list)
def movie_page_data(self, html):
print(html.xpath('/html/body/div[2]/div[1]/div[2]/p[1]/span[2]/text()'))
exit()
data = ['' for x in range(17)]
#番号
try:
data[12] = html.xpath('/html/body/div[2]/div[1]/div[2]/p[1]/span[2]/text()')[0]
except:
return data
#获取:导演、制作商、发行商、系列
right_info = html.xpath('/html/body/div[2]/div[1]/div[2]/p/a')
for i in right_info:
if i.text == None:
continue
tmp_text = i.text.replace("'", '"')
tmp_href = i.attrib.get('href')
if self.director in tmp_href:
#导演
data[0] = tmp_text
data[1] = tmp_href.replace(self.director, '')
elif self.studio in tmp_href:
#制作商
data[2] = tmp_text
data[3] = tmp_href.replace(self.studio, '')
elif self.label in tmp_href:
#发行商
data[4] = tmp_text
data[5] = tmp_href.replace(self.label, '')
elif self.series in tmp_href:
#系列
data[6] = tmp_text
data[7] = tmp_href.replace(self.series, '')
#图片个数image_len
data[8] = str(len(html.xpath('//div[@id="sample-waterfall"]/a')))
#获取类别列表genre
data[9] = '|'.join(html.xpath('/html/body/div[2]/div[1]/div[2]/p/span/a/text()')).replace("'", '"')
#时长len
lentext = html.xpath('/html/body/div[2]/div[1]/div[2]/p[3]/text()')
if len(lentext) != 0 and '分钟' in lentext[0]:
data[10] = lentext[0].replace('分钟', '').strip()
else:
data[10] = '0'
#演员stars
data[11] = '|'.join(html.xpath('//div[@id="avatar-waterfall"]/a/span/text()')).replace("'", '"')
#接取除了番号的标题
data[13] = html.xpath('/html/body/div[2]/h3/text()')[0][len(data[12]) + 1:].replace("'", '"')
#封面 截取域名之后的部分
data[14] = '/' + html.xpath('/html/body/div[2]/div[1]/div[1]/a/img/@src')[0].split('/',5)[5]
#发行时间
data[15] = html.xpath('/html/body/div[2]/div[1]/div[2]/p[2]/text()')[0].strip()
#stars_url
stars_url_list = html.xpath('//div[@id="avatar-waterfall"]/a/@href')
if stars_url_list != None and len(stars_url_list)!=0:
data[16] = '|'.join([re.findall('([a-z0-9]+)$',x)[0].rjust(4,'0') for x in stars_url_list])
return data
def abc_map(self):
for i1 in self.abc_sequence:
for i2 in self.abc_sequence:
for i3 in self.abc_sequence:
for i4 in self.abc_sequence:
yield (i1 + i2 + i3 + i4)
#检查被遗漏的页面,并插入数据库
#按照linkid的顺序检查漏掉的番号,并不是从重试表检索
def data_check(self):
self.CUR.execute("SELECT linkid FROM {0} WHERE 1 ORDER BY linkid;".format(self.table_main))
res = self.CUR.fetchall()
res_list = [x[0] for x in res]
res_min = res_list[0]
res_max = res_list[len(res)-1]
miss_list = []
for abcd in self.abc_map():
if abcd <= res_min:
continue
if abcd >= res_max:
break
if abcd in res_list:
continue
else:
miss_list.append(abcd)
continue
print('miss count:', len(miss_list))
print('需要遍历请手动修改代码')
exit()
self.CUR.execute('DELETE FROM "{0}";'.format(self.table_retry))
self.CONN.commit()
if len(miss_list) != 0:
for item in miss_list:
self.CUR.execute('INSERT INTO "{0}" ("linkid") VALUES ("{1}");'.format(self.table_retry, item))
self.CONN.commit()
else:
print("miss_list is empty")
return
#重试错误链接并插入数据库
self.CUR.execute('SELECT linkid FROM "{0}" ORDER BY linkid;'.format(self.table_retry))
res = self.CUR.fetchall()
self.main([x[0] for x in res])
#插入剩余的数据
self.movie_save()
#获取所有类别
def genre_update(self):
html = etree.HTML(self.s.get(self.genre_url).text)
insert_list = []
h4 = html.xpath('/html/body/div[2]/h4/text()')
div = html.xpath('/html/body/div[2]/div')
for div_item in range(len(div)):
g_title = h4[div_item]
a_list = div[div_item].xpath('a')
for a_item in a_list:
if a_item.text == None:
continue
g_name = a_item.text#.replace('・','')
g_id = a_item.attrib.get('href').replace(self.genre_url,'')
insert_list.append("'{0}','{1}','{2}'".format(g_id,g_name,g_title))
sql = "REPLACE INTO {} (id,name,title)VALUES({});".format(self.table_genre, "),(".join(insert_list))
self.CUR.execute(sql)
self.CONN.commit()
print('update record:{}'.format(len(insert_list)))
#测试单个页面
def test_page(self, linkid):
url = self.movie_url + linkid
res = self.s.get(url).text
#解析页面内容
data = self.movie_page_data(etree.HTML(res))
print(data)
if __name__ == '__main__':
avmo()
| 2.15625 | 2 |
py/h2o_config_dump_example.py | vkuznet/h2o | 1 | 12768090 | <gh_stars>1-10
# used to create example of what we want for the json file
# not used for normal execution
hostDict = {}
hostDict['username'] = '0xdiag'
hostDict['password'] = '<PASSWORD>'
hostDict['h2o_per_host'] = 2
hostDict['ip'] = []
hostDict['ip'].append('192.168.0.30')
hostDict['ip'].append('192.168.0.31')
hostDict['ip'].append('192.168.0.32')
hostDict['ip'].append('192.168.0.33')
hostDict['ip'].append('192.168.0.34')
jsonConfig = json.dumps(hostDict, sort_keys=False, indent=4)
print jsonConfig
with open('example.json', 'wb') as fp:
json.dump(hostDict, fp, sort_keys=False, indent=4)
| 2.59375 | 3 |
baselines/arch/lostgans/resnet_generator_v2.py | atmacvit/meronymnet | 1 | 12768091 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .norm_module import *
from .mask_regression import *
from .sync_batchnorm import SynchronizedBatchNorm2d
BatchNorm = SynchronizedBatchNorm2d
class ResnetGenerator128(nn.Module):
def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):
super(ResnetGenerator128, self).__init__()
self.num_classes = num_classes
self.label_embedding = nn.Embedding(num_classes, 180)
num_w = 128+180
self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*16*ch))
self.res1 = ResBlock(ch*16, ch*16, upsample=True, num_w=num_w)
self.res2 = ResBlock(ch*16, ch*8, upsample=True, num_w=num_w)
self.res3 = ResBlock(ch*8, ch*4, upsample=True, num_w=num_w)
self.res4 = ResBlock(ch*4, ch*2, upsample=True, num_w=num_w, psp_module=True)
self.res5 = ResBlock(ch*2, ch*1, upsample=True, num_w=num_w, predict_mask=False)
self.final = nn.Sequential(BatchNorm(ch),
nn.ReLU(),
conv2d(ch, output_dim, 3, 1, 1),
nn.Tanh())
# mapping function
mapping = list()
self.mapping = nn.Sequential(*mapping)
self.alpha1 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha2 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha3 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha4 = nn.Parameter(torch.zeros(1, 184, 1))
self.sigmoid = nn.Sigmoid()
self.mask_regress = MaskRegressNetv2(num_w)
self.init_parameter()
def forward(self, z, bbox, z_im=None, y=None):
b, o = z.size(0), z.size(1)
label_embedding = self.label_embedding(y)
z = z.view(b * o, -1)
label_embedding = label_embedding.view(b * o, -1)
latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)
w = self.mapping(latent_vector.view(b * o, -1))
# preprocess bbox
bmask = self.mask_regress(w, bbox)
if z_im is None:
z_im = torch.randn((b, 128), device=z.device)
bbox_mask_ = bbox_mask(z, bbox, 64, 64)
# 4x4
x = self.fc(z_im).view(b, -1, 4, 4)
# 8x8
x, stage_mask = self.res1(x, w, bmask)
# 16x16
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha1 = torch.gather(self.sigmoid(self.alpha1).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha1) + seman_bbox * alpha1
x, stage_mask = self.res2(x, w, stage_bbox)
# 32x32
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha2 = torch.gather(self.sigmoid(self.alpha2).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha2) + seman_bbox * alpha2
x, stage_mask = self.res3(x, w, stage_bbox)
# 64x64
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha3 = torch.gather(self.sigmoid(self.alpha3).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha3) + seman_bbox * alpha3
x, stage_mask = self.res4(x, w, stage_bbox)
# 128x128
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha4 = torch.gather(self.sigmoid(self.alpha4).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha4) + seman_bbox * alpha4
x, _ = self.res5(x, w, stage_bbox)
# to RGB
x = self.final(x)
return x, stage_bbox
def init_parameter(self):
for k in self.named_parameters():
if k[1].dim() > 1:
torch.nn.init.orthogonal_(k[1])
if k[0][-4:] == 'bias':
torch.nn.init.constant_(k[1], 0)
class ResnetGenerator256(nn.Module):
def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):
super(ResnetGenerator256, self).__init__()
self.num_classes = num_classes
self.label_embedding = nn.Embedding(num_classes, 180)
num_w = 128+180
self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*16*ch))
self.res1 = ResBlock(ch*16, ch*16, upsample=True, num_w=num_w)
self.res2 = ResBlock(ch*16, ch*8, upsample=True, num_w=num_w)
self.res3 = ResBlock(ch*8, ch*8, upsample=True, num_w=num_w)
self.res4 = ResBlock(ch*8, ch*4, upsample=True, num_w=num_w)
self.res5 = ResBlock(ch*4, ch*2, upsample=True, num_w=num_w)
self.res6 = ResBlock(ch*2, ch*1, upsample=True, num_w=num_w, predict_mask=False)
self.final = nn.Sequential(BatchNorm(ch),
nn.ReLU(),
conv2d(ch, output_dim, 3, 1, 1),
nn.Tanh())
# mapping function
mapping = list()
self.mapping = nn.Sequential(*mapping)
self.alpha1 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha2 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha3 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha4 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha5 = nn.Parameter(torch.zeros(1, 184, 1))
self.sigmoid = nn.Sigmoid()
self.mask_regress = MaskRegressNetv2(num_w)
self.init_parameter()
def forward(self, z, bbox, z_im=None, y=None, include_mask_loss=False):
b, o = z.size(0), z.size(1)
label_embedding = self.label_embedding(y)
z = z.view(b * o, -1)
label_embedding = label_embedding.view(b * o, -1)
latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)
w = self.mapping(latent_vector.view(b * o, -1))
# preprocess bbox
bmask = self.mask_regress(w, bbox)
if z_im is None:
z_im = torch.randn((b, 128), device=z.device)
bbox_mask_ = bbox_mask(z, bbox, 128, 128)
latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)
w = self.mapping(latent_vector.view(b * o, -1))
# 4x4
x = self.fc(z_im).view(b, -1, 4, 4)
# 8x8
# label mask
x, stage_mask = self.res1(x, w, bmask)
# 16x16
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha1 = torch.gather(self.sigmoid(self.alpha1).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha1) + seman_bbox * alpha1
x, stage_mask = self.res2(x, w, stage_bbox)
# 32x32
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha2 = torch.gather(self.sigmoid(self.alpha2).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha2) + seman_bbox * alpha2
x, stage_mask = self.res3(x, w, stage_bbox)
# 64x64
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha3 = torch.gather(self.sigmoid(self.alpha3).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha3) + seman_bbox * alpha3
x, stage_mask = self.res4(x, w, stage_bbox)
# 128x128
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha4 = torch.gather(self.sigmoid(self.alpha4).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha4) + seman_bbox * alpha4
x, stage_mask = self.res5(x, w, stage_bbox)
# 256x256
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha5 = torch.gather(self.sigmoid(self.alpha5).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha5) + seman_bbox * alpha5
x, _ = self.res6(x, w, stage_bbox)
# to RGB
x = self.final(x)
return x
def init_parameter(self):
for k in self.named_parameters():
if k[1].dim() > 1:
torch.nn.init.orthogonal_(k[1])
if k[0][-4:] == 'bias':
torch.nn.init.constant_(k[1], 0)
class ResBlock(nn.Module):
def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, upsample=False, num_w=128, predict_mask=True, psp_module=False):
super(ResBlock, self).__init__()
self.upsample = upsample
self.h_ch = h_ch if h_ch else out_ch
self.conv1 = conv2d(in_ch, self.h_ch, ksize, pad=pad)
self.conv2 = conv2d(self.h_ch, out_ch, ksize, pad=pad)
self.b1 = SpatialAdaptiveSynBatchNorm2d(in_ch, num_w=num_w, batchnorm_func=BatchNorm)
self.b2 = SpatialAdaptiveSynBatchNorm2d(self.h_ch, num_w=num_w, batchnorm_func=BatchNorm)
self.learnable_sc = in_ch != out_ch or upsample
if self.learnable_sc:
self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
self.activation = nn.ReLU()
self.predict_mask = predict_mask
if self.predict_mask:
if psp_module:
self.conv_mask = nn.Sequential(PSPModule(out_ch, 100),
nn.Conv2d(100, 184, kernel_size=1))
else:
self.conv_mask = nn.Sequential(nn.Conv2d(out_ch, 100, 3, 1, 1),
BatchNorm(100),
nn.ReLU(),
nn.Conv2d(100, 184, 1, 1, 0, bias=True))
def residual(self, in_feat, w, bbox):
x = in_feat
x = self.b1(x, w, bbox)
x = self.activation(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv1(x)
x = self.b2(x, w, bbox)
x = self.activation(x)
x = self.conv2(x)
return x
def shortcut(self, x):
if self.learnable_sc:
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.c_sc(x)
return x
def forward(self, in_feat, w, bbox):
out_feat = self.residual(in_feat, w, bbox) + self.shortcut(in_feat)
if self.predict_mask:
mask = self.conv_mask(out_feat)
else:
mask = None
return out_feat, mask
def conv2d(in_feat, out_feat, kernel_size=3, stride=1, pad=1, spectral_norm=True):
conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, pad)
if spectral_norm:
return nn.utils.spectral_norm(conv, eps=1e-4)
else:
return conv
def batched_index_select(input, dim, index):
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
def bbox_mask(x, bbox, H, W):
b, o, _ = bbox.size()
N = b * o
bbox_1 = bbox.float().view(-1, 4)
x0, y0 = bbox_1[:, 0], bbox_1[:, 1]
ww, hh = bbox_1[:, 2], bbox_1[:, 3]
x0 = x0.contiguous().view(N, 1).expand(N, H)
ww = ww.contiguous().view(N, 1).expand(N, H)
y0 = y0.contiguous().view(N, 1).expand(N, W)
hh = hh.contiguous().view(N, 1).expand(N, W)
X = torch.linspace(0, 1, steps=W).view(1, W).expand(N, W).cuda(device=x.device)
Y = torch.linspace(0, 1, steps=H).view(1, H).expand(N, H).cuda(device=x.device)
X = (X - x0) / ww
Y = (Y - y0) / hh
X_out_mask = ((X < 0) + (X > 1)).view(N, 1, W).expand(N, H, W)
Y_out_mask = ((Y < 0) + (Y > 1)).view(N, H, 1).expand(N, H, W)
out_mask = 1 - (X_out_mask + Y_out_mask).float().clamp(max=1)
return out_mask.view(b, o, H, W)
class PSPModule(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):
super(PSPModule, self).__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes])
self.bottleneck = nn.Sequential(
nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm(out_features),
nn.ReLU(),
nn.Dropout2d(0.1)
)
def _make_stage(self, features, out_features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
bn = nn.BatchNorm2d(out_features)
return nn.Sequential(prior, conv, bn, nn.ReLU())
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return bottle
| 2.296875 | 2 |
tests/test_stac.py | pjhartzell/aerial | 0 | 12768092 | <reponame>pjhartzell/aerial<gh_stars>0
import unittest
from stactools.aerial import stac
from tests import test_data
class StacTest(unittest.TestCase):
def test_create_collection(self):
collection = stac.create_collection()
collection.set_self_href("")
self.assertEqual(collection.id, "test-aerial-imagery")
self.assertEqual(collection.extent.spatial.to_dict()["bbox"],
[[-180., 90., 180., -90.]])
collection.validate()
def test_create_item(self):
path = test_data.get_external_data("EO_20190308.1618_11.tif")
item = stac.create_item(path)
self.assertEqual(item.id, "EO_20190308.1618_11")
item.validate()
| 2.609375 | 3 |
python/distance_to_closest_generated_sample.py | VAlex22/ND_VAE | 7 | 12768093 | # Separate script, to use python multiprocessing, that utilizes pickle #
import sys
from multiprocessing import Pool, cpu_count
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
from scipy import optimize
from python.util import get_model_params, model_path, model_epoch
from python.model.vae import build_vae
test_data_path, model, bound, output_path = sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4]
n_channels, depth, z_dim, n_hid_first, lam, L = get_model_params(model)
test_data = np.load(test_data_path)
# load trained model
input_var = T.matrix('inputs')
z_var = T.vector()
l_z_mean, l_z_stddev, _, _, _, l_x = build_vae(input_var, n_channels=n_channels, depth=depth, z_dim=z_dim,
n_hid_first=n_hid_first, L=1)
with np.load(model_path(model) + str(model_epoch(model)) + '.npz') as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
nn.layers.set_all_param_values(l_x, param_values)
# create encoder function to find initial values for z
encoder = nn.layers.get_output([l_z_mean, l_z_stddev], deterministic=True)
encode = theano.function([input_var], encoder)
# create decoder function
generated_x = nn.layers.get_output(l_x, {l_z_mean: z_var}, deterministic=True)
gen_fn = theano.function([z_var], generated_x)
# create l2 loss to optimize over latent space
z_mean, z_stddev = encode(test_data)
z_0 = z_mean
def loss(z, voxel):
x = gen_fn(z).reshape(n_channels)
return np.linalg.norm(voxel-x)
if bound == 0:
def minimize_voxel(args):
loss, z_0, voxel = args
optimize_result = optimize.minimize(loss, z_0, voxel)
return loss(optimize_result.x, voxel)
else:
boundaries = ((-bound, bound),)
for _ in range(z_dim-1):
boundaries += ((-bound, bound),)
def minimize_voxel(args):
loss, z_0, voxel = args
optimize_result = optimize.minimize(loss, z_0, voxel, bounds=boundaries)
return loss(optimize_result.x, voxel)
args = [(loss, z_0[i], test_data[i]) for i in range(len(test_data))]
p = Pool(cpu_count())
novelty_score = np.array(p.map(minimize_voxel, args))
np.save(output_path, novelty_score)
| 2.078125 | 2 |
ginjinn/core/project.py | AGOberprieler/ginjinn | 3 | 12768094 | <reponame>AGOberprieler/ginjinn
import importlib_resources as resources
import yaml
import json
import jinja2
from jinja2 import Template
from pathlib import Path
import shutil
from ginjinn import data_files
from ginjinn import config
from ginjinn.core import Configuration
from ginjinn.core.tf_dataset import TFDataset, DatasetNotReadyError
from ginjinn.core.tf_model import TFModel, ModelNotReadyError, ModelNotTrainedError, ModelNotExportedError
from ginjinn.core.tf_augmentation import TFAugmentation
''' Default configuration for ginjinn Project object. '''
DEFAULTS = Configuration({
'annotation_path': 'ENTER PATH HERE',
'annotation_type': 'PascalVOC',
'image_dir': 'ENTER PATH HERE',
'test_fraction': 0.25,
'model': 'faster_rcnn_inception_v2_coco',
'use_checkpoint': True,
'checkpoint_path': '',
'n_iter': 5000,
'batch_size': 1,
'augmentation': {
'flip_horizontal': {
'active': True,
},
'flip_vertical': {
'active': True,
},
'flip_90': {
'active': True,
},
'change_brightness': {
'active': False,
'min_delta': 0.1,
'max_delta': 0.2,
},
'change_contrast': {
'active': False,
'min_delta': 0.8,
'max_delta': 1.25,
},
'jitter_boxes': {
'active': False,
'ratio': 0.05,
},
}
})
class MalformedConfigurationError(Exception):
''' Error indicating the project ist not yet set up '''
pass
class ProjectNotReadyError(Exception):
''' Error indicating the project ist not yet set up '''
pass
class Project:
''' A ginjinn Project object.
The main object used to store configuration and run training,
evalution, export and inference.
'''
def __init__(self, project_dir):
project_path = Path(project_dir).resolve()
self.config = Configuration({
'project_dir': str(project_path),
'dataset_dir': str(project_path.joinpath('dataset').resolve()),
'model_dir': str(project_path.joinpath('model').resolve()),
'export_dir': str(project_path.joinpath('export').resolve()),
'config_path': str(project_path.joinpath('config.yaml').resolve()),
'project_json': str(project_path.joinpath('project.json').resolve()),
'ready': False,
'annotation_path': DEFAULTS.annotation_path,
'annotation_type': DEFAULTS.annotation_type,
'image_dir': DEFAULTS.image_dir,
'test_fraction': DEFAULTS.test_fraction,
'model': DEFAULTS.model,
'use_checkpoint': DEFAULTS.use_checkpoint,
'checkpoint_path': DEFAULTS.checkpoint_path,
'n_iter': DEFAULTS.n_iter,
'batch_size': DEFAULTS.batch_size,
'augmentation': {
'flip_horizontal': {
'active': DEFAULTS.augmentation.flip_horizontal.active,
},
'flip_vertical': {
'active': DEFAULTS.augmentation.flip_vertical.active,
},
'flip_90': {
'active': DEFAULTS.augmentation.flip_90.active,
},
'change_brightness': {
'active': DEFAULTS.augmentation.change_brightness.active,
'max_delta': DEFAULTS.augmentation.change_brightness.max_delta,
},
'change_contrast': {
'active': DEFAULTS.augmentation.change_contrast.active,
'min_delta': DEFAULTS.augmentation.change_contrast.min_delta,
'max_delta': DEFAULTS.augmentation.change_contrast.max_delta,
},
'jitter_boxes': {
'active': DEFAULTS.augmentation.jitter_boxes.active,
'ratio': DEFAULTS.augmentation.jitter_boxes.ratio,
},
},
})
def write_config(self):
'''
Write user-facing configuration yaml file
'''
template = Template(resources.read_text(data_files, 'config_template_jinja2.yaml'))
rendered_template = template.render(config=self.config)
with open(self.config.config_path, 'w') as f:
f.write(rendered_template)
def load_config(self):
'''
Update configuration from user-facing configuration yaml file
'''
with open(self.config.config_path) as f:
try:
_config = yaml.safe_load(f)
except yaml.YAMLError as e:
msg = 'Could not parse config.yaml:\n{e}'
raise MalformedConfigurationError(msg)
try:
TFAugmentation(_config['augmentation'])
except:
msg = 'Could not parse augmentation options. Check your config.yaml'
raise MalformedConfigurationError(msg)
# resolve paths to allow '~' in path on linux
_config['image_dir'] = str(Path(_config['image_dir']).resolve(strict=True))
_config['annotation_path'] = str(Path(_config['annotation_path']).resolve(strict=True))
if _config['checkpoint_path']:
_config['checkpoint_path'] = str(Path(_config['checkpoint_path']).resolve(strict=True))
# print(_config)
self.config.update(_config)
def to_json(self, fpath=None):
'''
Write internal configuration json file
'''
fpath = fpath or self.config.project_json
with open(fpath, 'w') as f:
json.dump(self.config, f, indent=4)
def load_json(self, fpath=None):
'''
Replace/Update configuration with configuration from json file.
'''
fpath = fpath or self.config.project_json
with open(fpath) as f:
# self.config = Configuration(json.load(f))
self.config.update(Configuration(json.load(f)))
def setup_project_dir(self, force=False):
''' Setup project directory and generate config files. '''
# TODO: Add some info for user
# create project directory if it does not exist
if Path(self.config.project_dir).exists() and force:
shutil.rmtree(self.config.project_dir)
Path(self.config.project_dir).mkdir(exist_ok=False)
# generate user-facing config
self.write_config()
# generate internal config and update ready status
self.config.ready = True
self.to_json()
def cleanup_project_dir(self):
''' Remove project directory '''
# cleanup dataset dir
self.cleanup_dataset_dir()
# remove project files
for fpath in self._project_files:
path = Path(fpath)
if path.exists():
path.unlink()
# remove project directory
path = Path(self.config.project_dir)
if path.exists():
try:
path.rmdir()
except:
msg = f'''Something went wrong cleaning up the project directory.
Please remove directory "{str(path.resolve())}" manually.'''
raise Exception()
self.config.ready = False
def cleanup_data_model_export(self):
self.cleanup_dataset_dir()
self.cleanup_model_export()
self.cleanup_model_training()
self.cleanup_model_dir()
def is_ready(self):
# TODO: Maybe check whether configuration files exist instead of storing ready state in config?
# Might be more robust
return self.config.ready
# ==
# Dataset
# ==
def setup_dataset(self, force=False):
''' Prepare input files for Tensorflow. This builds a dataset directory. '''
# check if project is set up
self._assert_project_is_ready()
if Path(self.config.dataset_dir).exists() and not force:
raise Exception('Dataset already exists. Rerun with --force if you want to overwrite it.')
dataset = TFDataset(self.config.dataset_dir)
dataset.construct_dataset(
self.config.annotation_path,
self.config.image_dir,
self.config.annotation_type,
self.config.test_fraction,
)
def is_ready_dataset(self):
# check if project is set up
self._assert_project_is_ready()
dataset = self._load_dataset()
if dataset:
return dataset.is_ready()
return False
def cleanup_dataset_dir(self):
''' Remove project directory '''
# cleanup dataset dir
dataset = self._load_dataset()
if dataset:
dataset.cleanup_dataset_dir()
def dataset_summary(self):
self._assert_project_is_ready()
self._assert_dataset_is_ready()
dataset = self._load_dataset()
return dataset.get_summary()
# ==
# ==
# Model
# ==
def setup_model(self, force=False):
self._assert_project_is_ready()
if Path(self.config.model_dir).exists() and not force:
raise Exception('Model already exists. Rerun with --force if you want to overwrite it.')
dataset = self._load_dataset()
if not dataset:
raise DatasetNotReadyError('Dataset not ready. Run Project.setup_dataset first')
model = TFModel(self.config.model_dir)
model.construct_model(
self.config.model,
dataset.config.record_train_path,
dataset.config.record_eval_path,
dataset.config.labelmap_path,
self.config.checkpoint_path,
self.config.use_checkpoint,
self.config.augmentation,
self.config.n_iter,
self.config.batch_size,
self.config.augmentation,
)
def is_ready_model(self):
# check if model is setup
self._assert_project_is_ready()
model = self._load_model()
if model:
return model.is_ready()
return False
def is_model_exported(self):
# check if model is exported
self._assert_project_is_ready()
self._assert_model_is_ready()
model = self._load_model()
if model:
return model.is_exported()
return False
def cleanup_model_dir(self):
''' Remove project directory '''
# cleanup dataset dir
model = self._load_model()
if model:
model.cleanup_model_dir()
def cleanup_model_training(self):
''' Remove all files generated by TF '''
model = self._load_model()
if model:
model.cleanup_train_eval()
def cleanup_model_export(self):
''' Remove all file generated by export '''
model = self._load_model()
if model:
model.cleanup_export()
def cleanup_model(self):
''' Remove all model data'''
self.cleanup_model_export()
self.cleanup_model_training()
self.cleanup_model_dir()
model_path = Path(self.config.model_dir)
if model_path.exists():
try:
model_path.rmdir()
except:
pass
def train_and_eval(self):
self._assert_project_is_ready()
self._assert_dataset_is_ready()
self._assert_model_is_ready()
model = self._load_model()
return model.train_and_eval()
def continue_training(self):
self._assert_project_is_ready()
self._assert_dataset_is_ready()
self._assert_model_is_ready()
model = self._load_model()
return model.continue_training()
def model_checkpoints(self, name_only=True):
'''
Get list of model checkpoints available for export
'''
model = self._load_model()
if model:
return model.checkpoints(name_only=name_only)
else:
return []
def export_model(self, checkpoint=None, force=False):
'''
Export model checkpoint for inference or
as checkpoint for training of another model
'''
model = self._load_model()
ckpt_names = model.checkpoints()
if len(ckpt_names) < 1:
raise ModelNotTrainedError('No model checkpoints available for export. Run Project.train_and_eval first.')
return model.export(checkpoint=checkpoint, force=force)
def get_n_iter(self):
return self.config.n_iter
def set_n_iter(self, n_iter):
self.config.n_iter = n_iter
if self.is_ready_model():
model = self._load_model()
model.n_iter = n_iter
self.write_config()
self.to_json()
def set_batch_size(self, n_iter):
self.config.batch_size = batch_size
if self.is_ready_model():
model = self._load_model()
model.batch_size = batch_size
self.write_config()
self.to_json()
# ==
# ==
# Inference
# ==
def detect(self, out_dir, image_path, output_types, padding=0, th=0.5):
''' Run detection and save outputs to files
Parameters
----------
out_dir : string
path to output directory
image_path: string
path to single image or directory containing images
output_type: list
list of output types ['ibb', 'ebb', 'csv']
padding: int
padding to apply to bounding boxes in pixel
th: float
score threshold to still consider a box. Boxes with scores
'''
self._assert_model_is_exported()
self._assert_dataset_is_ready()
dataset = self._load_dataset()
model = self._load_model()
exported_model_path = model.get_exported_model_path()
import ginjinn.core.tf_detector
detector = ginjinn.core.tf_detector.TFDetector(
exported_model_path,
dataset.labelmap_path,
)
detector.run_detection(
out_dir,
image_path,
output_types,
padding=padding,
th=th,
)
# ==
@classmethod
def from_directory(cls, project_dir):
''' Load Project object from directory. '''
# TODO: print nicely formatted error
Path(project_dir).resolve(strict=True)
project = cls(project_dir)
# load internal config
project.load_json()
# load potentially manipulated user-facing config
# and update project config accordingly
project.load_config()
# save potentially updated config to json
project.to_json()
# update model config in case config.yaml was edited
# since setup_model was called
if project.is_ready_model():
model = project._load_model()
model.n_iter = project.config.n_iter
model.batch_size = project.config.batch_size
# print(project.config)
return project
def _assert_project_is_ready(self):
if not self.is_ready():
raise ProjectNotReadyError(
'Project directory is not yet set up. Run Project.setup_project_dir first.'
)
def _assert_dataset_is_ready(self):
if not self.is_ready_dataset():
raise DatasetNotReadyError(
'Dataset is not set up. Run Project.setup_dataset first.'
)
def _assert_model_is_ready(self):
if not self.is_ready_model():
raise ModelNotReadyError(
'Model is not set up. Run Project.setup_model first.'
)
def _assert_model_is_exported(self):
if not self.is_model_exported():
raise ModelNotExportedError(
'No exported model available. Run Project.export_model first.'
)
def _load_dataset(self):
try:
return TFDataset.from_directory(self.config.dataset_dir)
except:
return None
def _load_model(self):
try:
return TFModel.from_directory(self.config.model_dir)
except:
return None
@property
def _project_files(self):
return [
self.config.config_path,
self.config.project_json,
]
def print_dataset_summary(self):
try:
self._assert_dataset_is_ready()
except:
print('Dataset is not set up.')
return
dataset = self._load_dataset()
dataset.print_summary()
def get_train_image_files(self):
self._assert_dataset_is_ready()
ds = self._load_dataset()
return ds.get_training_image_files()
def get_eval_image_files(self):
self._assert_dataset_is_ready()
ds = self._load_dataset()
return ds.get_eval_image_files() | 1.890625 | 2 |
staticbackend/user.py | ipfans/backend-python | 0 | 12768095 | """User management."""
from functools import lru_cache
from typing import Any
from httpx import Client
from .base import Base
from .database import Database
from .errors import EmailError, LoginError
from .forms import Form
from .storage import Storage
class LoginState(Base):
def __init__(self, client: Client, token: str) -> None:
super().__init__(client, token)
self.token = token
@property # type: ignore
@lru_cache()
def database(self) -> Database:
return Database(self.client, self.token) # type: ignore
@property # type: ignore
@lru_cache()
def forms(self) -> Form:
return Form(self.client, self.token) # type: ignore
@property # type: ignore
@lru_cache()
def storage(self) -> Storage:
return Storage(self.client, self.token) # type: ignore
class User(Base):
def __init__(self, client: Client, root_token: str = None) -> None:
super().__init__(client, root_token)
def _user(self, uri: str, email: str, password: str) -> str:
resp: Any = self._request(uri, {"email": email, "password": password})
return resp # type: ignore
def register(
self,
email: str,
password: str,
) -> LoginState:
"""Register new user.
:param email: User’s email address
:param password: <PASSWORD>’<PASSWORD>
:return: User’s authentication token
"""
token = self._user("/register", email, password)
if "invalid email" in token:
raise EmailError()
return LoginState(self.client, token)
def login(
self,
email: str,
password: str,
) -> LoginState:
"""Validate user by email and password to receive their id and session token.
:param email: User’s email address
:param password: <PASSWORD>’<PASSWORD>
:return: User’s authentication token
"""
token = self._user("/login", email, password)
if "no documents in result" in token:
raise LoginError()
return LoginState(self.client, token)
def send_reset_code(
self,
email: str,
) -> str:
"""Send reset code to user's email.
:param email: User’s email address
:return: Reset code.
"""
resp: Any = self._request(
"/password/resetcode", method="get", params={"e": email}
)
if "invalid email" in resp:
raise EmailError()
return resp # type: ignore
def reset_password(
self,
email: str,
code: str,
password: str,
) -> bool:
"""Reset user password.
:param email: User’s email address
:param code: User’s reset code
:param password: <PASSWORD>
:return: Reset or not
"""
resp: Any = self._request(
"/password/reset",
body={"email": email.lower(), "code": code, "password": password},
)
return resp # type: ignore
| 2.625 | 3 |
lib/clock.py | kostyaby/fc-helper | 0 | 12768096 | from . import Constant
from datetime import date
import os.path
import time
class Clock:
def __init__(self):
self.cached_today = date.today()
self.cached_time = int(time.time() * Constant.Clock.MILLIS_IN_SECOND)
def get_timestamped_directory_name(self, dirname):
return "{}-{}".format(dirname, self.cached_time)
def delta(self, timestamp):
return self.cached_time - timestamp
| 3.109375 | 3 |
django_de/news/models.py | django-de/django-de-v3 | 1 | 12768097 | import datetime
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as url_reverse
from django.template.defaultfilters import slugify
from django.utils.translation import gettext_lazy as _
class NewsItemManager(models.Manager):
def to_export(self):
"""
Filters all items, that haven't been exported yet.
"""
return self.filter(twitter_id__isnull=True)
def exported(self):
return self.filter(twitter_id__isnull=False)
class NewsItem(models.Model):
"""
A basic news item. The title is mostly something that can end up on
services like Twitter. If the body is not empty, the exteral
representation of such an item also includes a link to the post
and a respectively shortened title.
"""
title = models.CharField(verbose_name=_('Title'), max_length=200)
slug = models.SlugField(verbose_name=_('Slug'), blank=True)
body = models.TextField(verbose_name=_('Body'), blank=True, null=True)
pub_date = models.DateTimeField(verbose_name=_('Published at'),
default=datetime.datetime.now)
author = models.ForeignKey(User, verbose_name=_('Author'), null=True,
blank=True)
twitter_id = models.BigIntegerField(verbose_name=_('Twitter ID'),
blank=True, null=True)
objects = NewsItemManager()
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = slugify(self.title[:30])
return super(NewsItem, self).save(*args, **kwargs)
def as_twitter_message(self):
if not self.body:
return self.title
item_url = 'http://%s%s' % (
Site.objects.get_current().domain,
url_reverse('news_shortcut', kwargs={'pk': str(self.pk)})
)
return self.title[:-(len(item_url)+4)] + '... ' + item_url
def get_absolute_url(self):
return url_reverse('news_detail', kwargs=dict(
slug=self.slug, pk=self.pk))
def get_twitter_url(self):
return 'http://twitter.com/%s/status/%d' % (settings.TWITTER_USERNAME,
self.twitter_id,)
class Meta:
ordering = ['-pub_date']
| 2.296875 | 2 |
review/api/urls.py | shahzadfarukh0999/review-app | 0 | 12768098 | from django.conf.urls import url
from .views import (ReviewRUDView,
ReviewListCreateView,
RatingRUDView,
RatingListCreateView,
RatingCategoryRUDView,
RatingCategoryListCreateView
)
urlpatterns = [
url(r'review/(?P<pk>[0-9]+)$', ReviewRUDView.as_view(), name='review-rud'),
url(r'review/$', ReviewListCreateView.as_view(), name='review-create-list'),
url(r'rating/(?P<pk>[0-9]+)$', RatingRUDView.as_view(), name='rating-rud'),
url(r'rating/$', RatingListCreateView.as_view(), name='rating-create-list'),
url(r'category/(?P<pk>[0-9]+)$', RatingCategoryRUDView.as_view(), name='category-rud'),
url(r'category/$', RatingCategoryListCreateView.as_view(), name='category-create-list'),
]
| 1.789063 | 2 |
section_10_(dictionaries)/dict_values.py | alisonjo2786/pythonlessons_materials | 425 | 12768099 | # If you're new to dictionaries, you might want to start with dict_access.py
# We create a dictionary.
contacts = {
'Shannon': '202-555-1234',
'Amy': '410-515-3000',
'Jen': '301-600-5555',
'Julie': '202-333-9876'
}
# We can use the dictionary method .values() to give us a list of all of the values in contacts.
print contacts.values()
for phone in contacts.values():
print "{0}".format(phone)
# .values() is used less frequently than .keys() since you can't get the key from the value (but you can get the value if you know the key)
# Use .values() when you don't care what the key is, you just want a list of all of the values. It's less common, but still good to know. | 4.59375 | 5 |
cantools/database/can/bus.py | VonSquiggles/cantools | 1,063 | 12768100 | <filename>cantools/database/can/bus.py
# A CAN bus.
class Bus(object):
"""A CAN bus.
"""
def __init__(self,
name,
comment=None,
baudrate=None,
fd_baudrate=None,
autosar_specifics=None):
self._name = name
# If the 'comment' argument is a string, we assume that is an
# English comment. This is slightly hacky, because the
# function's behavior depends on the type of the passed
# argument, but it is quite convenient...
if isinstance(comment, str):
# use the first comment in the dictionary as "The" comment
self._comments = { None: comment }
else:
# assume that we have either no comment at all or a
# multi-lingual dictionary
self._comments = comment
self._baudrate = baudrate
self._fd_baudrate = fd_baudrate
self._autosar = autosar_specifics
@property
def name(self):
"""The bus name as a string.
"""
return self._name
@property
def comment(self):
"""The bus' comment, or ``None`` if unavailable.
Note that we implicitly try to return the English comment if
multiple languages were specified.
"""
if self._comments is None:
return None
elif self._comments.get(None) is not None:
return self._comments.get(None)
elif self._comments.get("FOR-ALL") is not None:
return self._comments.get("FOR-ALL")
return self._comments.get('EN')
@property
def comments(self):
"""The dictionary with the descriptions of the bus in multiple
languages. ``None`` if unavailable.
"""
return self._comments
@property
def baudrate(self):
"""The bus baudrate, or ``None`` if unavailable.
"""
return self._baudrate
@property
def fd_baudrate(self):
"""The baudrate used for the payload of CAN-FD frames, or ``None`` if
unavailable.
"""
return self._fd_baudrate
@property
def autosar(self):
"""An object containing AUTOSAR specific properties of the bus.
"""
return self._autosar
@autosar.setter
def autosar(self, value):
self._autosar = value
def __repr__(self):
return "bus('{}', {})".format(
self._name,
"'" + self.comment + "'" if self.comment is not None else None)
| 2.796875 | 3 |
newdle/core/util.py | jrstrayhorn/newdle | 0 | 12768101 | from datetime import datetime
from enum import Enum
from flask import current_app
from itsdangerous import Signer
DATE_FORMAT = '%Y-%m-%d'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M'
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
def parse_dt(text):
return datetime.strptime(text, DATETIME_FORMAT)
def format_dt(dt):
return dt.strftime(DATETIME_FORMAT)
def range_union(ranges):
"""Take a list of (H, M) tuples and merge any overlapping intervals."""
results = []
# tuples are sorted in increasing order, so we are sure we always have
# the "latest" end time at the back of the list
for start, end in sorted(ranges):
last_end_time = results[-1] if results else None
# if the next start time is earlier than the latest end time, then
# we can merge the intervals
if last_end_time and start <= last_end_time[1]:
results[-1] = (last_end_time[0], max(last_end_time[1], end))
else:
results.append((start, end))
return results
def _get_signature_source_bytes(data, fields=None):
if fields:
data = {k: v for k, v in data.items() if k in fields}
return '-'.join(v for k, v in sorted(data.items())).encode()
def sign_user(user_data, fields=None):
"""Sign user data."""
signer = Signer(current_app.config['SECRET_KEY'], salt='<PASSWORD>')
return dict(
user_data,
signature=signer.get_signature(
_get_signature_source_bytes(user_data, fields)
).decode('ascii'),
)
def check_user_signature(user_data, signature, fields=None):
"""Check that user data matches the signature."""
signer = Signer(current_app.config['SECRET_KEY'], salt='<PASSWORD>')
return signer.verify_signature(
_get_signature_source_bytes(user_data, fields), signature.encode('ascii')
)
| 2.8125 | 3 |
profiles_api/urls.py | Lucasanim/profiles-rest-api | 0 | 12768102 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import Hello, HelloViewSets, UserProfileViewSet, UserLoginApiView, UserProfileFeedViewSet
router = DefaultRouter()
router.register('hello', HelloViewSets, base_name='hello')
router.register('profile', UserProfileViewSet)
router.register('feed', UserProfileFeedViewSet)
urlpatterns = [
path('a/', Hello.as_view() ,name="hello"),
path('', include(router.urls)),
path('login/', UserLoginApiView.as_view(), name="login")
] | 2.03125 | 2 |
airflow/providers/microsoft/psrp/operators/psrp.py | holly-evans/airflow | 3 | 12768103 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from logging import DEBUG
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from jinja2.nativetypes import NativeEnvironment
from pypsrp.powershell import Command
from pypsrp.serializer import TaggedValue
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.psrp.hooks.psrp import PsrpHook
from airflow.settings import json
# TODO: Replace with airflow.utils.helpers.exactly_one in Airflow 2.3.
def exactly_one(*args):
return len(set(filter(None, args))) == 1
if TYPE_CHECKING:
from airflow.utils.context import Context
class PsrpOperator(BaseOperator):
"""PowerShell Remoting Protocol operator.
Use one of the 'command', 'cmdlet', or 'powershell' arguments.
The 'securestring' template filter can be used to tag a value for
serialization into a `System.Security.SecureString` (applicable only
for DAGs which have `render_template_as_native_obj=True`).
When using the `cmdlet` or `powershell` arguments and when `do_xcom_push`
is enabled, the command output is converted to JSON by PowerShell using
the `ConvertTo-Json
<https://docs.microsoft.com/en-us/powershell/
module/microsoft.powershell.utility/convertto-json>`__ cmdlet such
that the operator return value is serializable to an XCom value.
:param psrp_conn_id: connection id
:param command: command to execute on remote host. (templated)
:param powershell: powershell to execute on remote host. (templated)
:param cmdlet:
cmdlet to execute on remote host (templated). Also used as the default
value for `task_id`.
:param parameters:
When using the `cmdlet` or `powershell` arguments, use this parameter to
provide parameters (templated). Note that a parameter with a value of `None`
becomes an *argument* (i.e., switch).
:param logging_level:
Logging level for message streams which are received during remote execution.
The default is to include all messages in the task log.
:param runspace_options:
optional dictionary which is passed when creating the runspace pool. See
:py:class:`~pypsrp.powershell.RunspacePool` for a description of the
available options.
:param wsman_options:
optional dictionary which is passed when creating the `WSMan` client. See
:py:class:`~pypsrp.wsman.WSMan` for a description of the available options.
:param psrp_session_init:
Optional command which will be added to the pipeline when a new PowerShell
session has been established, prior to invoking the action specified using
the `cmdlet`, `command`, or `powershell` parameters.
"""
template_fields: Sequence[str] = (
"cmdlet",
"command",
"parameters",
"powershell",
)
template_fields_renderers = {"command": "powershell", "powershell": "powershell"}
ui_color = "#c2e2ff"
def __init__(
self,
*,
psrp_conn_id: str,
command: Optional[str] = None,
powershell: Optional[str] = None,
cmdlet: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
logging_level: int = DEBUG,
runspace_options: Optional[Dict[str, Any]] = None,
wsman_options: Optional[Dict[str, Any]] = None,
psrp_session_init: Optional[Command] = None,
**kwargs,
) -> None:
args = {command, powershell, cmdlet}
if not exactly_one(*args):
raise ValueError("Must provide exactly one of 'command', 'powershell', or 'cmdlet'")
if parameters and not cmdlet:
raise ValueError("Parameters only allowed with 'cmdlet'")
if cmdlet:
kwargs.setdefault('task_id', cmdlet)
super().__init__(**kwargs)
self.conn_id = psrp_conn_id
self.command = command
self.powershell = powershell
self.cmdlet = cmdlet
self.parameters = parameters
self.logging_level = logging_level
self.runspace_options = runspace_options
self.wsman_options = wsman_options
self.psrp_session_init = psrp_session_init
def execute(self, context: "Context") -> Optional[List[Any]]:
with PsrpHook(
self.conn_id,
logging_level=self.logging_level,
runspace_options=self.runspace_options,
wsman_options=self.wsman_options,
on_output_callback=self.log.info if not self.do_xcom_push else None,
) as hook, hook.invoke() as ps:
if self.psrp_session_init is not None:
ps.add_command(self.psrp_session_init)
if self.command:
ps.add_script(f"cmd.exe /c @'\n{self.command}\n'@")
else:
if self.cmdlet:
ps.add_cmdlet(self.cmdlet)
else:
ps.add_script(self.powershell)
if self.parameters:
ps.add_parameters(self.parameters)
if self.do_xcom_push:
ps.add_cmdlet("ConvertTo-Json")
if ps.had_errors:
raise AirflowException("Process failed")
rc = ps.runspace_pool.host.rc
if rc:
raise AirflowException(f"Process exited with non-zero status code: {rc}")
if not self.do_xcom_push:
return None
return [json.loads(output) for output in ps.output]
def get_template_env(self):
# Create a template environment overlay in order to leave the underlying
# environment unchanged.
env = super().get_template_env().overlay()
native = isinstance(env, NativeEnvironment)
def securestring(value: str):
if not native:
raise AirflowException(
"Filter 'securestring' not applicable to non-native templating environment"
)
return TaggedValue("SS", value)
env.filters["securestring"] = securestring
return env
| 1.585938 | 2 |
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.py | jeikabu/lumberyard | 8 | 12768104 | #
# Created by: <NAME>, September 2002
#
from __future__ import division, print_function, absolute_import
import sys
import subprocess
import time
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy.random import rand, seed
from scipy.linalg import _flapack as flapack
from scipy.linalg import inv
from scipy.linalg import svd
from scipy.linalg.lapack import _compute_lwork
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm, a1)
if norm in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm in 'Mm':
ref = np.max(np.abs(a1))
elif norm in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))), m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], (sigmas[0] +
it_len*np.sqrt(np.sum(np.power(m_vec, 2))),)))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.ones(4, dtype) * 3
v = np.ones(4, dtype) * 4
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:,1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read())
class TestSytrd(object):
def test_sytrd(self):
for dtype in REAL_DTYPES:
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
assert_raises(ValueError, sytrd, A)
# Tests for n = 1 currently fail with
# ```
# ValueError: failed to create intent(cache|hide)|optional array--
# must have defined dimensions but got (0,)
# ```
# This is a NumPy issue
# <https://github.com/numpy/numpy/issues/9617>.
# TODO once the issue has been resolved, test for n=1
# some upper triangular array
n = 3
A = np.zeros((n, n), dtype=dtype)
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
def test_hetrd(self):
for real_dtype, complex_dtype in zip(REAL_DTYPES, COMPLEX_DTYPES):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
assert_raises(ValueError, hetrd, A)
# Tests for n = 1 currently fail with
# ```
# ValueError: failed to create intent(cache|hide)|optional array--
# must have defined dimensions but got (0,)
# ```
# This is a NumPy issue
# <https://github.com/numpy/numpy/issues/9617>.
# TODO once the issue has been resolved, test for n=1
# some upper triangular array
n = 3
A = np.zeros((n, n), dtype=complex_dtype)
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# query lwork
lwork, info = hetrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermetian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = np.linalg.norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
| 1.625 | 2 |
web.py | caux/japonicus | 0 | 12768105 | <reponame>caux/japonicus
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import pandas as pd
import os
import flask
import dash
from dash.dependencies import Input, Output, Event
import dash_core_components as dcc
import dash_html_components as html
from flask_caching import Cache
from evaluation.gekko.statistics import epochStatisticsNames, periodicStatisticsNames
import Settings
gsettings = Settings.getSettings()['Global']
settings = Settings.getSettings()['bayesian']
def load_evolution_logs(filename=None):
FileList = os.listdir(gsettings["save_dir"])
filename = os.path.join(gsettings["save_dir"], filename)
df = pd.read_csv(filename, names=columns)
return df
def update_graph(GraphName, Statistics):
print('Loading')
ID = [s for s in GraphName if s.isdigit()]
'''
try:
df = load_evolution_logs(filename="evolution_gen_Locale%s.csv" % ''.join(ID))
except:
print("Failure to read evolution data.")
return None
'''
df = pd.DataFrame(Statistics)
annotations = []
statisticsNames = {}
statisticsNames.update(epochStatisticsNames)
statisticsNames.update(periodicStatisticsNames)
if 'dateRange' in df.keys():
for W in range(len(df['dateRange'])):
DR = df['dateRange'][W]
if DR != None:
annotations.append(
{
'xref': 'axis',
'yref': 'paper',
'xanchor': 'left',
'yanchor': 'bottom',
'font': {'family': 'Arial', 'size': 12, 'color': 'rgb(37,37,37)'},
'x': W,
'y': 1 if not len(annotations) %
2 else 0.93, # avoid label overlap;
'text': DR,
}
)
colorSequence = [
(188, 189, 34),
(100, 11, 182),
(186, 3, 34),
(45, 111, 45),
(66, 128, 66),
(128, 66, 66),
]
statNames = [
'avg', 'std', 'min', 'max', 'evaluationScore', 'evaluationScoreOnSecondary'
]
DATA = [
{
'x': df['id'],
'y': df[statNames[S]],
'type': 'line',
'name': statisticsNames[statNames[S]],
'line': {'color': 'rgb%s' % str(colorSequence[S])},
}
for S in range(len(statNames))
]
fig = {
'data': [
{
'x': [0, df["id"]],
'y': [0],
'type': 'line',
'name': 'markzero',
'line': {'color': 'rgb(0,0,0)'},
}
] +
DATA,
'layout': {'title': 'Evolution at %s' % GraphName, 'annotations': annotations},
}
return fig
def newGraphic(name):
G = dcc.Graph(id=name)
G.Active = True
return G
def run_server():
# Setup the app
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server, csrf_protect=False)
app.scripts.config.serve_locally = False
dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-finance-1.28.0.min.js'
# Add caching
cache = Cache(app.server, config={'CACHE_TYPE': 'simple'})
timeout = 60 * 60 # 1 hour
# Controls
app.update_graph = update_graph
# Layout
app.GraphicList = []
app.newGraphic = lambda name: app.GraphicList.append(newGraphic(name))
app.layout = html.Div(
[
html.Div(
[
html.H2(
'japonicus Evolution Statistics',
style={'padding-top': '20', 'text-align': 'center'},
),
html.Div(
[
dcc.Interval(id='my-interval'),
dcc.RadioItems(
id='set-time',
value=5000,
options=[
{'label': 'Every 60 seconds', 'value': 60000},
{'label': 'Every 15 seconds', 'value': 15000},
{
'label': 'Every hour', 'value': 60 * 60 * 1000
}, # or just every hour
],
),
]
),
html.Div(id='display-time'),
]
),
html.Div(id='Graphs'),
],
style={'width': '1100', 'margin-left': 'auto', 'margin-right': 'auto', 'font-family': 'overpass', 'background-color': '#F3F3F3'},
# Traces>Color
)
app.config['suppress_callback_exceptions'] = True
@app.callback(
Output('display-time', 'children'), events=[Event('my-interval', 'interval')]
)
def display_time():
return str(datetime.datetime.now())
@app.callback(Output('my-interval', 'interval'), [Input('set-time', 'value')])
def update_interval(value):
return value
@cache.memoize(timeout=timeout)
@app.callback(
Output('Graphs', 'children'), events=[Event('my-interval', 'interval')]
)
def updateGraphs():
'''
for F in range(len(app.GraphicList)):
if app.GraphicList[F].Active:
app.GraphicList[F].__setattr__('figure', update_graph(app.GraphicList[F].id))
'''
return app.GraphicList
# External css
external_css = [
"https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css",
]
for css in external_css:
app.css.append_css({"external_url": css})
# Run the Dash app
if __name__ == '__main__':
app.server.run(debug=True, host='0.0.0.0')
else: # this way it integrates with main interface without child procs across pipes,
return app
if __name__ == '__main__':
run_server()
| 2.015625 | 2 |
src/asttrs/utils.py | ryanchao2012/asttrs | 0 | 12768106 | import pathlib
import subprocess as sp
import tempfile
def blacking(source_code: str):
with tempfile.NamedTemporaryFile("w", delete=False) as f:
f.write(source_code)
fname = f.name
p = sp.Popen(f"cat {fname}".split(), stdout=sp.PIPE)
out = sp.check_output("black -q -".split(), stdin=p.stdout)
p.wait()
try:
pathlib.Path(fname).unlink()
except FileNotFoundError:
pass
return out.decode()
def isorting(source_code: str):
with tempfile.NamedTemporaryFile("w", delete=False) as f:
f.write(source_code)
fname = f.name
p = sp.Popen(f"cat {fname}".split(), stdout=sp.PIPE)
out = sp.check_output("isort -q -".split(), stdin=p.stdout)
p.wait()
try:
pathlib.Path(fname).unlink()
except FileNotFoundError:
pass
return out.decode()
| 2.53125 | 3 |
melisa/models/user/user.py | MelisaDev/melisa | 5 | 12768107 | # Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
from enum import IntEnum
from dataclasses import dataclass
from typing import Optional, Dict, Any
from ...utils.conversion import try_enum
from ...utils.api_model import APIModelBase
from ...utils.types import APINullable, UNDEFINED
from ...utils.snowflake import Snowflake
class PremiumTypes(IntEnum):
"""Premium types denote the level of premium a user has.
Attributes
----------
NITRO:
Full nitro
NITRO_CLASSIC:
Nitro (not boost)
NONE:
There is no subscription Discord Nitro(Full or classic)
"""
NONE = 0
NITRO_CLASSIC = 1
NITRO = 2
def __int__(self):
return self.value
class UserFlags(IntEnum):
"""Profile Icons
Attributes
----------
NONE:
None
STAFF:
Discord Employee
PARTNER:
Partnered Server Owner
HYPESQUAD:
HypeSquad Events Coordinator
BUG_HUNTER_LEVEL_1:
Bug Hunter Level 1
HYPESQUAD_ONLINE_HOUSE_1:
House Bravery Member
HYPESQUAD_ONLINE_HOUSE_2:
House Brilliance Member
HYPESQUAD_ONLINE_HOUSE_3:
House Balance Member
PREMIUM_EARLY_SUPPORTER:
Early Nitro Supporter
TEAM_PSEUDO_USER:
User is a team
BUG_HUNTER_LEVEL_2:
Bug Hunter Level 2
VERIFIED_BOT:
Verified Bot
VERIFIED_DEVELOPER:
Early Verified Bot Developer
CERTIFIED_MODERATOR:
Discord Certified Moderator
BOT_HTTP_INTERACTIONS:
Bot uses only HTTP interactions and is shown in the online member list
"""
NONE = 0
STAFF = 1 << 0
PARTNER = 1 << 1
HYPESQUAD = 1 << 2
BUG_HUNTER_LEVEL_1 = 1 << 3
HYPESQUAD_ONLINE_HOUSE_1 = 1 << 6
HYPESQUAD_ONLINE_HOUSE_2 = 1 << 7
HYPESQUAD_ONLINE_HOUSE_3 = 1 << 8
PREMIUM_EARLY_SUPPORTER = 1 << 9
TEAM_PSEUDO_USER = 1 << 10
BUG_HUNTER_LEVEL_2 = 1 << 14
VERIFIED_BOT = 1 << 16
VERIFIED_DEVELOPER = 1 << 17
CERTIFIED_MODERATOR = 1 << 18
BOT_HTTP_INTERACTIONS = 1 << 19
def __int__(self):
return self.value
class VisibilityTypes(IntEnum):
"""The type of connection visibility.
Attributes
----------
None:
invisible to everyone except the user themselves
Everyone:
visible to everyone
"""
NONE = 0
EVERYONE = 1
def __int__(self):
return self.value
@dataclass(repr=False)
class User(APIModelBase):
# ToDo: Update Docstrings
"""User Structure
Attributes
----------
id: :class:`~melisa.utils.types.Snowflake`
the user's id
username: :class:`str`
the user's username, not unique across the platform
discriminator: :class:`int`
the user's 4-digit discord-tag
avatar: Optional[:class:`str`]
the user's avatar hash
bot: APINullable[:class:`bool`]
whether the user belongs to an OAuth2 application
system: APINullable[:class:`bool`]
whether the user is an Official Discord System user (part of the urgent message system)
mfa_enabled: APINullable[:class:`bool`]
whether the user has two factor enabled on their account
banner: APINullable[:class:`str`]
the user's banner hash
accent_color: APINullable[:class:`int`]
the user's banner color encoded as an integer representation of hexadecimal color code
locale: APINullable[:class:`str`]
the user's chosen language option
verified: APINullable[:class:`bool`]
whether the email on this account has been verified
email: APINullable[:class:`str`]
the user's email
flags: APINullable[:class:`~models.user.user.UserFlags`]
the flags on a user's account
premium_type: APINullable[:class:`int`]
the type of Nitro subscription on a user's account
public_flags: APINullable[:class:`int`]
the public flags on a user's account
premium: APINullable[:class:`PremiumTypes`]
The user their premium type in a usable enum.
"""
id: APINullable[Snowflake] = UNDEFINED
username: APINullable[str] = UNDEFINED
discriminator: APINullable[str] = UNDEFINED
avatar: APINullable[str] = UNDEFINED
bot: APINullable[bool] = UNDEFINED
system: APINullable[bool] = UNDEFINED
mfa_enabled: APINullable[bool] = UNDEFINED
banner: APINullable[str] = UNDEFINED
accent_color: APINullable[int] = UNDEFINED
local: APINullable[str] = UNDEFINED
verified: APINullable[bool] = UNDEFINED
email: APINullable[str] = UNDEFINED
premium_type: APINullable[int] = UNDEFINED
public_flags: APINullable[int] = UNDEFINED
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> User:
"""Generate a user from the given data.
Parameters
----------
data: :class:`dict`
The dictionary to convert into a user.
"""
self: User = super().__new__(cls)
self.id = int(data["id"])
self.username = data.get("username")
self.discriminator = data.get("discriminator")
self.avatar = data.get("avatar")
self.bot = data.get("bot", False)
self.system = data.get("system", False)
self.mfa_enabled = data.get("mfa_enable", False)
self.banner = data.get("banner")
self.accent_color = data.get("accent_color")
self.local = data.get("local")
self.verified = data.get("verified", False)
self.email = data.get("email")
self.premium_type = try_enum(PremiumTypes, data.get("premium_type"))
self.public_flags = try_enum(UserFlags, data.get("public_flags"))
return self
@property
def premium(self) -> Optional[PremiumTypes]:
return None if self.premium_type is None else PremiumTypes(self.premium_type)
@property
def flags(self) -> Optional[UserFlags]:
return None if self.flags is None else UserFlags(self.flags)
def __str__(self):
"""String representation of the User object"""
return self.username + "#" + self.discriminator
@property
def mention(self):
""":class:`str`: The user's mention string. (<@id>)"""
return "<@{}>".format(self.id)
def avatar_url(self) -> str:
"""Avatar url (from the Discord CDN server)"""
return "https://cdn.discordapp.com/avatars/{}/{}.png?size=1024".format(
self.id, self.avatar
)
async def create_dm_channel(self):
# ToDo: Add docstrings
# ToDo: Add checking this channel in cache
return await self._http.post(
"/users/@me/channels", data={"recipient_id": self.id}
)
| 2.03125 | 2 |
app/helpers/sentry.py | MTES-MCT/mobilic-api | 0 | 12768108 | import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from werkzeug.exceptions import (
NotFound,
MethodNotAllowed,
HTTPVersionNotSupported,
)
import re
from app import app
from app.helpers.errors import EmailAlreadyRegisteredError, MobilicError
from app.helpers.mail import InvalidEmailAddressError
from config import MOBILIC_ENV
from app.helpers.livestorm import NoLivestormCredentialsError
FILTER_OUT_ERRORS = [
NotFound,
MethodNotAllowed,
HTTPVersionNotSupported,
EmailAlreadyRegisteredError,
InvalidEmailAddressError,
NoLivestormCredentialsError,
]
FILTER_OUT_RE_FOR_MOBILIC_ERRORS = [
re.compile(r"^Wrong email/password combination")
]
def filter_errors(event, hint):
if "exc_info" in hint:
exc_type, exc_value, tb = hint["exc_info"]
if any(
[
issubclass(exc_type, filtered_out_error_type)
for filtered_out_error_type in FILTER_OUT_ERRORS
]
):
return None
if issubclass(exc_type, MobilicError) and any(
[
regexp.search(exc_value.message) is not None
for regexp in FILTER_OUT_RE_FOR_MOBILIC_ERRORS
]
):
return None
return event
def setup_sentry():
sentry_sdk.init(
dsn=app.config["SENTRY_URL"],
integrations=[FlaskIntegration()],
environment=MOBILIC_ENV,
before_send=filter_errors,
)
| 1.9375 | 2 |
Select_Data_loc.py | jonathan-JIPSlok/Aprendendo_Pandas | 1 | 12768109 | <reponame>jonathan-JIPSlok/Aprendendo_Pandas<filename>Select_Data_loc.py
import pandas as pd
import openpyxl
df = pd.read_excel("Arquivo.xlsx", engine="openpyxl")
print(df.loc[0])#Seleciona a linha
print()
print(df.loc[0:4])#Seleciona mais de uma linha
print()
print(df.loc[[0, 3, 5]])#Seleciona apenas as linhas 0, 3 e 5
print()
print(df.loc[3:6, "Nome"])#Seleciona da 3 linha até a sexta linha da coluna "Nome"
print()
print(df.loc[3:6, ["Nome", "Idade"]])#Seleciona tantas linhas das dias colunas
print()
print(df.loc[4:5, "Nome":"ID"]) #Seleciona tantas linhas de uma coluna até outra coluna
print()
| 3.78125 | 4 |
standalones/create_SSM_instance.py | KIT-IBT/AugmentA | 0 | 12768110 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 14:55:02 2021
@author: <NAME>
Copyright 2021 <NAME>
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import h5py
import argparse
import pyvista as pv
import numpy as np
def parser():
parser = argparse.ArgumentParser(description='Cut veins manually')
parser.add_argument('--SSM_file',
type=str,
default="",
help='path to SSM')
parser.add_argument('--coefficients_file',
type=str,
default="",
help='path to SSM coefficients')
parser.add_argument('--output_file',
type=str,
default="",
help='path to output')
return parser
def create_SSM_instance(SSM_file, coefficients_file, output_file):
r = np.loadtxt(coefficients_file, delimiter=',')
with h5py.File(SSM_file, "r") as f:
mean_pts = list(f["model"]["mean"])[0]
mean_cells = np.vstack(list(f["representer"]["cells"])).T
mean_cells = np.c_[np.ones(len(mean_cells),dtype=int)*3,mean_cells]
pca_basisfunctions = np.vstack(list(f["model"]["pcaBasis"])).T
pca_var = list(f["model"]["pcaVariance"])[0]
for i in range(len(pca_basisfunctions)):
mean_pts = mean_pts + r[i]*pca_basisfunctions[i,:]*np.sqrt(pca_var[i])
mean_pts = mean_pts.reshape(int(len(mean_pts)/3),3)
surf = pv.PolyData(mean_pts, mean_cells)
pv.save_meshio(output_file, surf, "obj")
def run():
args = parser().parse_args()
create_SSM_instance(args.SSM_file, args.coefficients_file, args.output_file)
if __name__ == '__main__':
run() | 2.3125 | 2 |
model_train3.py | chunnuanhuakai/360finance | 0 | 12768111 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 15:21:02 2018
@author: zbj
"""
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn import metrics
from xgboost import XGBClassifier
import pandas as pd
from sklearn.metrics import accuracy_score
## 总样本
root = "E:\\liuhongbing\\360finance\\open_data_train_valid1\\open_data_train_valid\\train\\";
train_by_zero = pd.read_table(root+'train_sample.txt', sep='\t')
Y = train_by_zero['tag']
X = train_by_zero.iloc[:,3:5081]
train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.3, random_state=0)
#
#tuned_parameters= [{'n_estimators':[100,200,500],
# 'max_depth':[3,5,7], ##range(3,10,2)
# 'learning_rate':[0.5, 1.0],
# 'subsample':[0.75,0.8,0.85,0.9]
# }]
tuned_parameters= [{'n_estimators':[100,200,500,1000]
}]
train_param = {'max_depth':6, # 树深度
'learning_rate':0.3, # 学习率
'nthread':4,
'min_child_weight':1, #叶子节点最小权重
'gamma':0.1,
'subsample':1, # 全部的样本进行训练
'reg_lambda':1, # 正则话参数
'colsample_btree':0.8, # 80%的特征
'n_estimators':100,
'scale_pos_weight':2, ## 正负样本比例
'seed':100
}
clf = GridSearchCV(XGBClassifier(**train_param),
param_grid=tuned_parameters,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=5)
clf.fit(train_x, train_y)
##clf.grid_scores_, clf.best_params_, clf.best_score_
print(clf.best_params_)
pred_y = clf.predict(test_x)
accuracy = accuracy_score(test_y, pred_y)
print("accuarcy: %.2f%%" % (accuracy*100.0))
y_proba=clf.predict_proba(test_x)[:,1]
print("AUC Score (Train): %f" % metrics.roc_auc_score(test_y, y_proba))
| 2.46875 | 2 |
test_3DLoMatch.py | ZhiChen902/SC2-PCR | 0 | 12768112 | import json
import sys
sys.path.append('.')
import argparse
import logging
from tqdm import tqdm
from easydict import EasyDict as edict
from evaluate_metric import TransformationLoss, ClassificationLoss
from dataset import ThreeDLoMatchLoader
from benchmark_utils import set_seed, icp_refine
from benchmark_utils_predator import *
from utils.timer import Timer
from SC2_PCR import Matcher
set_seed()
from utils.SE3 import *
from collections import defaultdict
def eval_3DLoMatch_scene(loader, matcher, trans_evaluator, cls_evaluator, scene_ind, config):
num_pair = loader.__len__()
final_poses = np.zeros([num_pair, 4, 4])
# 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio, 5. output inlier number
# 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
stats = np.zeros([num_pair, 12])
data_timer, model_timer = Timer(), Timer()
with torch.no_grad():
error_pair = []
for i in tqdm(range(num_pair)):
#################################
# 1. load data
#################################
data_timer.tic()
src_keypts, tgt_keypts, src_features, tgt_features, gt_trans = loader.get_data(i)
data_time = data_timer.toc()
#################################
# 2. match descriptor and compute rigid transformation
#################################
model_timer.tic()
pred_trans, pred_labels, src_keypts_corr, tgt_keypts_corr = matcher.estimator(src_keypts, tgt_keypts,
src_features, tgt_features)
model_time = model_timer.toc()
#################################
# 3. generate the ground-truth classification result
#################################
frag1_warp = transform(src_keypts_corr, gt_trans)
distance = torch.sum((frag1_warp - tgt_keypts_corr) ** 2, dim=-1) ** 0.5
gt_labels = (distance < config.inlier_threshold).float()
#################################
# 4. evaluate result
#################################
loss, recall, Re, Te, rmse = trans_evaluator(pred_trans, gt_trans, src_keypts_corr, tgt_keypts_corr,
pred_labels)
class_stats = cls_evaluator(pred_labels, gt_labels)
#################################
# record the evaluation results.
#################################
# save statistics
stats[i, 0] = float(recall / 100.0) # success
stats[i, 1] = float(Re) # Re (deg)
stats[i, 2] = float(Te) # Te (cm)
stats[i, 3] = int(torch.sum(gt_labels)) # input inlier number
stats[i, 4] = float(torch.mean(gt_labels.float())) # input inlier ratio
stats[i, 5] = int(torch.sum(gt_labels[pred_labels > 0])) # output inlier number
stats[i, 6] = float(class_stats['precision']) # output inlier precision
stats[i, 7] = float(class_stats['recall']) # output inlier recall
stats[i, 8] = float(class_stats['f1']) # output inlier f1 score
stats[i, 9] = model_time
stats[i, 10] = data_time
stats[i, 11] = scene_ind
final_poses[i] = pred_trans[0].detach().cpu().numpy()
print(error_pair)
return stats, final_poses
def eval_3DLoMatch(config):
loader = ThreeDLoMatchLoader(root=config.data_path,
descriptor=config.descriptor,
inlier_threshold=config.inlier_threshold,
num_node=config.num_node,
use_mutual=config.use_mutual,
)
matcher = Matcher(inlier_threshold=config.inlier_threshold,
num_node=config.num_node,
use_mutual=config.use_mutual,
d_thre=config.d_thre,
num_iterations=config.num_iterations,
ratio=config.ratio,
nms_radius=config.nms_radius,
max_points=config.max_points,
k1=config.k1,
k2=config.k2, )
trans_evaluator = TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre)
cls_evaluator = ClassificationLoss()
allpair_stats, allpair_poses = eval_3DLoMatch_scene(loader, matcher, trans_evaluator, cls_evaluator, 0, config)
allpair_average = allpair_stats.mean(0)
allpair_status_ndarray = np.array(allpair_stats, dtype=float)
benchmark_predator(allpair_poses, gt_folder='benchmarks/3DLoMatch')
# benchmarking using the registration recall defined in DGR
allpair_average = allpair_stats.mean(0)
correct_pair_average = allpair_stats[allpair_stats[:, 0] == 1].mean(0)
logging.info(f"*" * 40)
logging.info(f"All {allpair_stats.shape[0]} pairs, Mean Reg Recall={allpair_average[0] * 100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}")
logging.info(f"\tInput: Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4] * 100:.2f}%)")
logging.info(f"\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6] * 100:.2f}%, recall={allpair_average[7] * 100:.2f}%, f1={allpair_average[8] * 100:.2f}%)")
logging.info(f"\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s")
# all_stats_npy = np.concatenate([v for k, v in all_stats.items()], axis=0)
return allpair_stats
def benchmark_predator(pred_poses, gt_folder):
scenes = sorted(os.listdir(gt_folder))
scene_names = [os.path.join(gt_folder,ele) for ele in scenes]
re_per_scene = defaultdict(list)
te_per_scene = defaultdict(list)
re_all, te_all, precision, recall = [], [], [], []
n_valids= []
short_names=['Kitchen','Home 1','Home 2','Hotel 1','Hotel 2','Hotel 3','Study','MIT Lab']
logging.info(("Scene\t¦ prec.\t¦ rec.\t¦ re\t¦ te\t¦ samples\t¦"))
start_ind = 0
for idx,scene in enumerate(scene_names):
# ground truth info
gt_pairs, gt_traj = read_trajectory(os.path.join(scene, "gt.log"))
n_valid=0
for ele in gt_pairs:
diff=abs(int(ele[0])-int(ele[1]))
n_valid+=diff>1
n_valids.append(n_valid)
n_fragments, gt_traj_cov = read_trajectory_info(os.path.join(scene,"gt.info"))
# estimated info
# est_pairs, est_traj = read_trajectory(os.path.join(est_folder,scenes[idx],'est.log'))
est_traj = pred_poses[start_ind:start_ind + len(gt_pairs)]
start_ind = start_ind + len(gt_pairs)
temp_precision, temp_recall,c_flag = evaluate_registration(n_fragments, est_traj, gt_pairs, gt_pairs, gt_traj, gt_traj_cov)
# Filter out the estimated rotation matrices
ext_gt_traj = extract_corresponding_trajectors(gt_pairs,gt_pairs, gt_traj)
re = rotation_error(torch.from_numpy(ext_gt_traj[:,0:3,0:3]), torch.from_numpy(est_traj[:,0:3,0:3])).cpu().numpy()[np.array(c_flag)==0]
te = translation_error(torch.from_numpy(ext_gt_traj[:,0:3,3:4]), torch.from_numpy(est_traj[:,0:3,3:4])).cpu().numpy()[np.array(c_flag)==0]
re_per_scene['mean'].append(np.mean(re))
re_per_scene['median'].append(np.median(re))
re_per_scene['min'].append(np.min(re))
re_per_scene['max'].append(np.max(re))
te_per_scene['mean'].append(np.mean(te))
te_per_scene['median'].append(np.median(te))
te_per_scene['min'].append(np.min(te))
te_per_scene['max'].append(np.max(te))
re_all.extend(re.reshape(-1).tolist())
te_all.extend(te.reshape(-1).tolist())
precision.append(temp_precision)
recall.append(temp_recall)
logging.info("{}\t¦ {:.3f}\t¦ {:.3f}\t¦ {:.3f}\t¦ {:.3f}\t¦ {:3d}¦".format(short_names[idx], temp_precision, temp_recall, np.median(re), np.median(te), n_valid))
# np.save(f'{est_folder}/{scenes[idx]}/flag.npy',c_flag)
weighted_precision = (np.array(n_valids) * np.array(precision)).sum() / np.sum(n_valids)
logging.info("Mean precision: {:.3f}: +- {:.3f}".format(np.mean(precision),np.std(precision)))
logging.info("Weighted precision: {:.3f}".format(weighted_precision))
logging.info("Mean median RRE: {:.3f}: +- {:.3f}".format(np.mean(re_per_scene['median']), np.std(re_per_scene['median'])))
logging.info("Mean median RTE: {:.3F}: +- {:.3f}".format(np.mean(te_per_scene['median']),np.std(te_per_scene['median'])))
if __name__ == '__main__':
from config import str2bool
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', default='', type=str, help='snapshot dir')
parser.add_argument('--solver', default='SVD', type=str, choices=['SVD', 'RANSAC'])
parser.add_argument('--use_icp', default=False, type=str2bool)
parser.add_argument('--save_npy', default=False, type=str2bool)
args = parser.parse_args()
config_path = args.config_path
config = json.load(open(config_path, 'r'))
config = edict(config)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = config.CUDA_Devices
if not os.path.exists("./logs"):
os.makedirs("./logs")
log_filename = f'logs/3DLoMatch-{config.descriptor}.log'
logging.basicConfig(level=logging.INFO,
filename=log_filename,
filemode='a',
format="")
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
# evaluate on the test set
stats = eval_3DLoMatch(config)
if args.save_npy:
save_path = log_filename.replace('.log', '.npy')
np.save(save_path, stats)
print(f"Save the stats in {save_path}")
| 1.78125 | 2 |
src/python/stup/packet/syn.py | Wizmann/STUP-Protocol | 14 | 12768113 | <filename>src/python/stup/packet/syn.py
#!/usr/bin/python
from .packet import Packet
class SynPacket(Packet):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.syn = 1
| 2.3125 | 2 |
Question 7.py | Mkez45634/Python-Coding-Challenges | 0 | 12768114 | <reponame>Mkez45634/Python-Coding-Challenges<filename>Question 7.py
def signFinder (s):
plus = s.count("-")
minus = s.count("+")
total = plus+minus
if total == 1:
return True
else:
return False | 3.90625 | 4 |
postgres_connector/errors.py | sandboxws/beam-postgres-connector | 0 | 12768115 | <filename>postgres_connector/errors.py
"""Postgres Connector error classes."""
class PostgresConnectorError(Exception):
"""Base class for all errors."""
class PostgresClientError(PostgresConnectorError):
"""An error specific to the PostgreSQL driver."""
| 2.171875 | 2 |
zdpapi_file/directory.py | zhangdapeng520/zdpapi_file | 1 | 12768116 | """
文件夹相关
"""
import os
class Directory:
def __init__(self, path:str) -> None:
"""
path: 文件夹路径
"""
self.path = path
def disk_usage(self):
"""
查看文件夹的占用量
"""
# 使用内部函数避开递归函数self问题
def inner_disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path): # 如果是文件夹
for file_name in os.listdir(path):
child_path = os.path.join(path, file_name)
total += inner_disk_usage(child_path)
# 打印当前目录的占用大小
print("{0:<7}".format(total), path)
return total
# 调用内部递归函数
return inner_disk_usage(self.path) | 3.59375 | 4 |
Downloader.py | MRLSK8/Youtube_Downloader | 0 | 12768117 | <reponame>MRLSK8/Youtube_Downloader
from pytube import YouTube
yt = YouTube(str(input("Enter the video link: ")))
videos = yt.streams.filter(subtype='mp4').all() # Gets all download options
print("\n")
counter = 1
for types in videos:
print(str(counter) + ". "+str(types))
counter += 1
quality = int(input("\nEnter the number of the video: "))
qualityChosen = videos[quality - 1]
destination = str(input("Enter the destination: "))
print("\n Wait, downloading ...\n")
qualityChosen.download(destination) #Download the video
print("\n Video: " + yt.title + "\n Has been successfully downloaded ") | 3.265625 | 3 |
examples/counter/send_one.py | ptcrews/p4-mininet-tutorials | 23 | 12768118 | from scapy.all import *
p = Ether(src="aa:bb:cc:dd:ee:ff") / IP(dst="10.0.1.10") / TCP() / "aaaaaaaaaaaaaaaa"
sendp(p, iface = "veth1", verbose = 0)
| 1.960938 | 2 |
src/hammer-vlsi/test_tool_utils.py | XiaoSanchez/hammer | 138 | 12768119 | <reponame>XiaoSanchez/hammer<filename>src/hammer-vlsi/test_tool_utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Helper and utility classes for testing HammerTool.
#
# See LICENSE for licence details.
import json
import os
import tempfile
from abc import ABCMeta, abstractmethod
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import hammer_tech
from hammer_tech import LibraryFilter
from hammer_config import HammerJSONEncoder
import hammer_vlsi
class SingleStepTool(hammer_vlsi.DummyHammerTool, metaclass=ABCMeta):
"""
Helper class to define a single-step tool in tests.
"""
@property
def steps(self) -> List[hammer_vlsi.HammerToolStep]:
return self.make_steps_from_methods([
self.step
])
@abstractmethod
def step(self) -> bool:
"""
Implement this method for the single step.
:return: True if the step passed
"""
pass
class DummyTool(SingleStepTool):
"""
A dummy tool that does nothing and always passes.
"""
def step(self) -> bool:
return True
class HammerToolTestHelpers:
"""
Helper functions to aid in the testing of IP library filtering/processing.
"""
@staticmethod
def create_tech_dir(tech_name: str) -> Tuple[str, str]:
"""
Create a temporary folder for a test technology.
Note: the caller is responsible for removing the tech_dir_base folder
after use!
:param tech_name: Technology name (e.g. "asap7")
:return: Tuple of create tech_dir and tech_dir_base (which the caller
must delete)
"""
tech_dir_base = tempfile.mkdtemp()
tech_dir = os.path.join(tech_dir_base, tech_name)
os.mkdir(tech_dir)
tech_init_py = os.path.join(tech_dir, "__init__.py")
with open(tech_init_py, "w") as f: # pylint: disable=invalid-name
f.write("from hammer_tech import HammerTechnology\nclass {t}Technology(HammerTechnology):\n pass\ntech = {t}Technology()".format(
t=tech_name))
return tech_dir, tech_dir_base
@staticmethod
def write_tech_json(
tech_json_filename: str,
postprocessing_func: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None) -> None:
"""
Write a dummy tech JSON to the given filename with the given
postprocessing.
"""
tech_json = {
"name": "dummy28",
"grid_unit": "0.001",
"time_unit": "1 ns",
"installs": [
{
"path": "test",
"base var": "" # means relative to tech dir
}
],
"libraries": [
{"milkyway techfile": "test/soy"},
{"openaccess techfile": "test/juice"},
{"milkyway techfile": "test/coconut"},
{
"openaccess techfile": "test/orange",
"provides": [
{"lib_type": "stdcell"}
]
},
{
"openaccess techfile": "test/grapefruit",
"provides": [
{"lib_type": "stdcell"}
]
},
{
"openaccess techfile": "test/tea",
"provides": [
{"lib_type": "technology"}
]
},
]
} # type: Dict[str, Any]
if postprocessing_func is not None:
tech_json = postprocessing_func(tech_json)
with open(tech_json_filename, "w") as f: # pylint: disable=invalid-name
f.write(json.dumps(tech_json, cls=HammerJSONEncoder, indent=4))
@staticmethod
def make_test_filter() -> LibraryFilter:
"""
Make a test filter that returns libraries with openaccess techfiles with libraries that provide 'technology'
in lib_type first, with the rest sorted by the openaccess techfile.
"""
def filter_func(lib: hammer_tech.Library) -> bool:
return lib.openaccess_techfile is not None
def paths_func(lib: hammer_tech.Library) -> List[str]:
assert lib.openaccess_techfile is not None
return [lib.openaccess_techfile]
def sort_func(lib: hammer_tech.Library) -> Union[Number, str, tuple]:
assert lib.openaccess_techfile is not None
if lib.provides is not None and len(
list(filter(lambda x: x is not None and x.lib_type == "technology", lib.provides))) > 0:
# Put technology first
return (0, "")
else:
return (1, str(lib.openaccess_techfile))
return LibraryFilter.new(
filter_func=filter_func,
paths_func=paths_func,
tag="test", description="Test filter",
is_file=True,
sort_func=sort_func
)
| 2.40625 | 2 |
library-stats.py | yzhs/calibre-library-stats | 0 | 12768120 | import os
from os.path import expanduser
import altair as alt
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
import sqlite3
from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot
from config import dummy_start_date, dummy_end_date, cutoff_date
# %matplotlib inline
plot_start_date = dummy_start_date
plot_end_date = dummy_end_date
if cutoff_date is not None:
plot_start_date = cutoff_date
day = np.timedelta64(1, 'D')
fiction_scale = alt.Scale(domain=[True, False])
def get_data(library_paths=[expanduser('~/books/non-fiction/')]):
db_path = library_paths[0] + 'metadata.db'
conn = sqlite3.connect(db_path)
custom_column_index = dict(pd.read_sql_query("""
SELECT label, id FROM custom_columns
""", conn).to_dict(orient='split')['data'])
def tbl(name):
return 'custom_column_' + str(custom_column_index[name])
df = pd.read_sql_query(f"""
SELECT
title,
author_sort AS author,
series.name AS series,
series_index,
pubdate,
timestamp,
last_modified,
languages.lang_code AS language,
{tbl('started')}.value AS start,
{tbl('finished')}.value AS end,
{tbl('words')}.value AS words,
{tbl('pages')}.value AS pages,
{tbl('fre')}.value AS fre,
{tbl('fkg')}.value AS fkg,
{tbl('gfi')}.value AS gfi,
({tbl('shelf')}.value = 'Fiction') AS is_fiction,
ifnull({tbl('read')}.value, 0) AS is_read
FROM books
LEFT OUTER JOIN books_series_link
ON books.id = books_series_link.book
LEFT OUTER JOIN series
ON books_series_link.series = series.id
JOIN books_languages_link
ON books.id = books_languages_link.book
JOIN languages
ON books_languages_link.lang_code = languages.id
LEFT OUTER JOIN {tbl('pages')}
ON {tbl('pages')}.book = books.id
LEFT OUTER JOIN {tbl('words')}
ON {tbl('words')}.book = books.id
LEFT OUTER JOIN {tbl('fre')}
ON {tbl('fre')}.book = books.id
LEFT OUTER JOIN {tbl('fkg')}
ON {tbl('fkg')}.book = books.id
LEFT OUTER JOIN {tbl('gfi')}
ON {tbl('gfi')}.book = books.id
JOIN books_{tbl('shelf')}_link
ON books_{tbl('shelf')}_link.book = books.id
JOIN {tbl('shelf')}
ON {tbl('shelf')}.id = books_{tbl('shelf')}_link.value
LEFT OUTER JOIN {tbl('started')}
ON {tbl('started')}.book = books.id
LEFT OUTER JOIN {tbl('finished')}
ON {tbl('finished')}.book = books.id
LEFT OUTER JOIN {tbl('read')} ON {tbl('read')}.book = books.id
WHERE
{tbl('shelf')}.value = 'Fiction'
OR {tbl('shelf')}.value = 'Nonfiction'
""", conn, parse_dates=['start', 'end', 'pubdate', 'timestamp',
'last_modified'])
# Books with no page count are either simply placeholders, not a
# proper part of the library, or have just been added. In both
# cases, it is OK to ignore them.
df = df.loc[df.pages.notna()]
# Fix data types
df.language = df.language.astype('category')
df.pages = df.pages.astype('int64')
# We cannot make df.words an int64 column, as some PDF files have
# no word count associated with them and int64 columns cannot
# contain NAs.
df.is_fiction = df.is_fiction.astype(bool)
df.is_read = df.is_read.astype(bool)
# Compute intermediate columns
df.pubdate = df.pubdate.map(to_local)
df = df.assign(words_per_page=df.words / df.pages,
words_per_day=df.words / ((df.end - df.start) / day))
def to_numeric(x):
return pd.to_numeric(x, errors='coerce', downcast='integer')
df = df.assign(finished_year=to_numeric(df.end.map(to_year)),
finished_month=to_numeric(df.end.map(to_month)),
finished_day=to_numeric(df.end.map(to_day)))
df = df.assign(pubyear=to_numeric(df.pubdate.map(to_year)),
pubmonth=to_numeric(df.pubdate.map(to_month)),
pubday=to_numeric(df.pubdate.map(to_day)))
df.sort_values('start', inplace=True)
return df
def plot_ranges(df, output='ranges.html'):
"""Print date ranges in which the books have been is_read, how many
books have been is_read at any given point in time and how many words
have been is_read per day.
"""
if cutoff_date is not None:
# df = df[(df.start >= cutoff_date) & (df.end >= cutoff_date)]
df = df[df.end.isna() | (df.end >= cutoff_date)]
df.end.fillna(dummy_end_date)
df = df[df.start.notna()].assign(ys=-allocate_ys(df[df.start.notna()]))
bars = alt.Chart(df) \
.mark_bar(clip=True) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('ys:N', axis=None),
color=alt.Color('is_fiction', scale=fiction_scale, legend=None),
tooltip='title'
)
bars.width = 1600
overlapped = alt.Chart(df[df.start.notna()]) \
.mark_bar(clip=True, opacity=0.1) \
.encode(
x=alt.X('start', axis=None),
x2='end',
y=alt.Y('is_fiction', axis=None),
color=alt.Color('is_fiction', scale=fiction_scale, legend=None)
)
overlapped.width = bars.width
baz = df[df.series.notna()]
if cutoff_date is not None:
baz = baz[baz.start.notna() & (baz.end.isna() |
(baz.end >= cutoff_date))]
else:
baz = baz[df.start.notna()]
by_series = alt.Chart(baz) \
.mark_bar(clip=True, opacity=0.7) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('series', title='Series'),
tooltip='title'
)
by_series.width = bars.width
baz = df[df.author.notna()]
if cutoff_date is not None:
baz = baz[baz.start.notna() & (baz.end.isna() |
(baz.end >= cutoff_date))]
else:
baz = baz[df.start.notna()]
baz.ys = -allocate_ys(baz[baz.start.notna()])
by_author = alt.Chart(baz) \
.mark_bar(clip=True, opacity=0.7) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('author', title='Author'),
color='series',
tooltip='title'
)
by_author.width = bars.width
save_plot(overlapped & bars & by_series, output)
save_plot(by_author, 'by_author.html')
def plot_yearly(df, y='count()', output='finished.html'):
chart = alt.Chart(df[df.is_read & df.end]) \
.mark_bar() \
.encode(
x='finished_year:O',
y=y,
color=alt.Color('is_fiction', scale=fiction_scale),
)
save_plot(chart, output)
def number_of_books_per_author(df, output='books_per_author.html'):
df = df[df.is_read]
x = df.author.value_counts()
foo = pd.DataFrame(data={'author': x.index,
'count': x.values})
foo.sort_values('count', ascending=False, inplace=True)
chart = alt.Chart(foo) \
.mark_bar() \
.encode(y=alt.Y('author', sort=None), x='count')
save_plot(chart, output)
def plot_pubdate(df, output='pubdate.html'):
df = df[df.pubdate.notna()]
years = alt.Chart(df).mark_bar().encode(x='pubyear:O', y='count(year):N')
years_nonfiction = alt.Chart(df[~df.is_fiction]) \
.mark_bar(color='orange') \
.encode(x='pubyear:O', y='count(year):N')
months = alt.Chart(df).mark_bar().encode(x='pubmonth:O',
y='count(pubmonth):N')
days = alt.Chart(df).mark_bar().encode(x='pubday:O', y='count(pubday):N')
years.width = 965
save_plot((years + years_nonfiction) & (months | days), output)
def reading_ease(df):
df = df[df.fre.notna() & df.fkg.notna() & df.gfi.notna()]
opacity = 0.2
color = alt.Color('is_fiction', scale=fiction_scale)
a = alt.Chart(df).mark_point(opacity=opacity) \
.encode(x='fre', y='fkg', color=color)
b = alt.Chart(df).mark_point(opacity=opacity) \
.encode(x='fre', y='gfi', color=color)
save_plot(a | b, 'reading_ease.html')
# blue_patch = mpatches.Patch(label='Fiction')
# orange_patch = mpatches.Patch(label='Nonfiction', color='orange')
#
# def plot_histogram(df):
# "Plot histogram of how many days I needed to is_read a book."
# fig = plt.figure(figsize=(8, 6), dpi=dpi)
# ax = fig.add_subplot(111)
#
# ax.hist([np.array(df[df.is_fiction].duration
# .map(lambda x: x.days).dropna(),
# dtype='float64'),
# np.array(df[~df.is_fiction].duration
# .map(lambda x: x.days).dropna(),
# dtype='float64')],
# histtype='barstacked',
# bins=list(range(-7, 1764, 14)))
#
# plt.title('Number of days spent reading a book')
# plt.legend(handles=[blue_patch, orange_patch])
# plt.xlabel("Number of days spent reading")
# plt.ylabel("Number of books")
#
# plt.savefig('histogram.png')
# return plt.show()
#
#
# def scatter_length_duration(df):
# fig = plt.figure(figsize=(8, 6), dpi=dpi)
# ax = fig.add_subplot(111)
# df = df[df.words > 0]
# fiction = df[df.is_fiction]
# nonfiction = df[~df.is_fiction]
#
# duration = np.array(fiction.duration.map(lambda x: x.days),
# dtype='float64')
# ax.scatter(fiction.words.values, duration)
#
# duration = np.array(nonfiction.duration.map(lambda x: x.days),
# dtype='float64')
# ax.scatter(nonfiction.words.values, duration)
#
# plt.title("Number of words vs. days of reading")
# plt.xlabel("Number of words")
# plt.ylabel("Days spent reading")
# plt.legend(handles=[blue_patch, orange_patch])
#
# plt.savefig('scatter.png')
# return plt.show()
#
#
# def scatter_words_vs_words_per_day(df):
# fig = plt.figure()
# ax = fig.gca()
# ax.set_xscale('log')
# ax.set_yscale('log')
# ax.set_xlabel('Words')
# ax.set_ylabel('Words per day')
# ax.plot(df.words, df.words_per_day, 'o')
os.makedirs('output', exist_ok=True)
df = get_data()
avg_words_per_page = df.words.sum() / df.pages[df.words.notna()].sum()
plot_ranges(df)
number_of_books_per_author(df)
plot_yearly(df, output='books_finished.html')
plot_yearly(df, y='sum(pages)', output='pages_finished.html')
plot_yearly(df, y='sum(words)', output='words_finished.html')
plot_pubdate(df)
values = ('words', 'pages')
table = df.pivot_table(values=values,
index=('is_read', 'is_fiction', 'language'),
aggfunc=np.sum).reset_index()
table = table.assign(combined=list(zip(table.is_fiction, table.is_read)))
chart = alt.Chart(table) \
.mark_bar() \
.encode(column='language',
x='is_read',
y='words',
color='language')
ease_df = df[df.fre.notna() & df.fkg.notna() & df.gfi.notna()]
cor_fre_fkg = pearsonr(ease_df.fre, ease_df.fkg)
cor_fre_gfi = pearsonr(ease_df.fre, ease_df.gfi)
cor_fkg_gfi = pearsonr(ease_df.fkg, ease_df.gfi)
reading_ease(df)
| 2.578125 | 3 |
Binary_Search/Python/jiang42/binary_search.py | sunilshahu/Algorithm-Implementations | 3 | 12768121 | def binary_search(arr, target):
low, high = 0, len(arr)-1
while low < high:
mid = (low + high)/2
if arr[mid] == target:
return mid
elif arr[mid] > target:
high = mid - 1
else:
low = mid + 1
return high
if __name__ == "__main__":
lst = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
print(binary_search(lst,15))
| 3.765625 | 4 |
run/run.py | C-Symonds/MCE | 3 | 12768122 | #########################################################################################
#
# Python Run script for Parallel Open MP execution of the MCE / CCS program
# Written by <NAME> 07/10/2020
#
# This script is based of a similar script witted by <NAME> using bash. This script
# aims to simplify the running process and make the program more useable as python is
# widely understood and should make modifications easier to implement.
# The script is designed to compile, copy all reaquired files into an execution folder,
# and submit the program as a job. Included are various checks, output handling,
# parameter setting and module loading porcedures. This script can also be used for
# for restarting a timed-out simulation by setting the restart paramter to 'YES'.
#
# To run the program variables must be set/checked in inputs.py, inham.py
# The following arguemtns then have ot be set in the run folder
# 1) The number of repeats
# 2) The number of folders/nodes
# 3) The number of parallel cores per folder/node (max8)
#
# The propagation and basis set generation flags are also set in this file.
# To restart a run open the run script in the execution folder
# change the resart paramter to 'Yes' and run the copy of this script from the
# execution file. Starting a run with precalcualted basis funcions is not yet
# possible for MCE12.
#
#
#########################################################################################
import sys
import socket
import os
import subprocess
import getpass
import random
import shutil
import glob
import csv
import inham
import inputs
#########################################################################################
# VARIABLES TO SET FOR SIMULATION #
#########################################################################################
# Number of repeats
repeats=400
# Number of nodes/folders
nodes=1
#Number of parallel cores per folder/node (max 8)
cores=1
# Name of running folder
# Default : <method>-<system>-<random number> ie CCS-HP-31254
# Otherwise: <method>-<system>-<runfolder string>
Runfolder='clonetests_noclone'
# Generate Basis Set? YES/NO
gen='YES'
# Propagate Basis Set? YES/NO
prop='YES'
# Restart? YES/NO
# To restart a timedout run set to yes and rerun this script from the execution folder
restart='NO'
# Seed value for doing the random number routine- if you do not specify it
# (leave default value of 0) will automatically generate one
SEED=0
#########################################################################################
# END OF INPUTS #
#########################################################################################
# * NO NEED TO SCROLL FURTHER IF USING AS BLACKBOX * #
#########################################################################################
if __name__=="__main__":
#Check basic arguements
if(isinstance(repeats,int)==False):
sys.exit("Number of repeats must be an integer")
elif(isinstance(nodes,int)==False):
sys.exit("Number of folders must be an integer")
elif(isinstance(cores,int)==False):
sys.exit("Number of parallel cores must be an integer")
elif(repeats<1):
sys.exit("Not enough runs selected. Must be 1 or greater")
elif(nodes<1):
sys.exit("Not enough nodes selected. Must be 1 or greater")
elif(nodes>100):
sys.exit("Too many nodes. Maximum of 100 simultaneous submisions")
elif(cores>8):
sys.exit("Too many cores selected. Maximum of 8 available")
elif(cores<1):
sys.exit("Not enough cores selected. Must be 1 or greater")
elif((repeats/nodes)>5000):
sys.exit("Too many repeats per folder. Must be less than 500")
if(restart=="NO"):
if((inputs.Conjugate_Repeats=='YES')and((repeats%(2*nodes*cores))!=0)):
sys.exit("Number of repeats not valid for conjugate repetition. Should be integer multiple of 2*cores*nodes")
elif((repeats%(nodes*cores))!=0):
sys.exit("Number of repeats must be an integer multiple of cores*folders")
elif(nodes*cores>100):
sys.exit("Total number of cores should stay below 100")
elif(inputs.systems['freqflg']not in{0,1}):
sys.exit("Frequency flag msut be zero or 1")
else:
print("Arguments checked")
Hostname=socket.gethostname()
if(Hostname==("login2.arc4.leeds.ac.uk")):
HPCFLG=1
else:
HPCFLG=0
#Might need grid altering calibration test for chlin451 bash code
#if [[ -n $( echo $HOSTNAME | fgrep -e "chmlin451" ) ]]; then
#grdalt=1
#else
#grdalt=0
#fi
#Makes execution folder and run folder
if(HPCFLG==0):
if not os.path.exists("../EXEC"):
os.mkdir("../EXEC")
EXDIR="../EXEC"
else:
# subprocess.run(['module','load','mkl'])
os.environ['LOGNAME']
EXDIR="/nobackup/"+getpass.getuser()
if(Runfolder=="Default"):
Runfolder=inputs.method+"-"+inputs.systems["System"]+"-"+str(repeats)+"-"+str(nodes)+"-"+str(cores)
else:
Runfolder=inputs.method+"-"+inputs.systems["System"]+"-"+Runfolder
if os.path.exists(EXDIR+"/"+Runfolder):
value=input("File already exists do you want to delete it? y/n\n")
if(value=='y'):
shutil.rmtree(EXDIR+"/"+Runfolder)
else:
sys.exit("Runfolder already exists. Change the Runfolder name or delte/move it")
os.mkdir(EXDIR+"/"+Runfolder)
EXDIR1=EXDIR+"/"+Runfolder
mcerunf=os.getcwd()
#Builds result file
result=open(EXDIR1+"/result.sh","w")
result.write("python "+mcerunf+"/collate.py $PWD "+(str(repeats))+" "+str(nodes)+" '"+Runfolder+"' "+(str(HPCFLG))+" '"+prop+"'")
result.close()
subprocess.run(['chmod', 'u+x', EXDIR1+'/result.sh'])
#Copies input files
shutil.copy2("inham.py",EXDIR1)
shutil.copy2("inputs.py",EXDIR1)
shutil.copy2("run.py",EXDIR1)
shutil.copy2("combine.py",EXDIR1)
#Makes the program input file
if(inputs.method=="MCE12"):
for i in range(2):
with open('rundata'+str(i+1)+'.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow([gen,prop,restart,inputs.cmprss,('MCEv'+str(i+1)),int(repeats/nodes),inputs.Conjugate_Repeats])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
shutil.copy2('rundata'+str(i+1)+'.csv',EXDIR1)
else:
with open('rundata.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow([gen,prop,restart,inputs.cmprss,inputs.method,int(repeats/nodes),inputs.Conjugate_Repeats])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
shutil.copy2("rundata.csv",EXDIR1)
for file in glob.glob(mcerunf+"/*.csv"):
os.remove(file)
#Makes subfolders
if(inputs.method=="MCE12"):
os.mkdir(EXDIR1+"/MCEv1")
os.mkdir(EXDIR1+"/MCEv2")
for j in range(2):
for i in range (nodes):
os.mkdir(EXDIR1+"/MCEv"+str(j+1)+"/run-"+str(i+1))
else:
for i in range(nodes):
path=os.path.join(EXDIR1,"run-"+str(i+1))
os.mkdir(EXDIR1+"/run-"+str(i+1))
#Selects the right make file and executes
os.chdir("../build")
if(HPCFLG==1):
shutil.copy2("../build/makefile_arc","../build/Makefile")
subprocess.run(["make"])
else:
shutil.copy2("../build/makefile_chmlin","../build/Makefile")
subprocess.run(["make"])
shutil.copy2("MCE.exe",EXDIR1)
shutil.copy2("interpolate.exe",EXDIR1)
shutil.copy2("subavrg.exe",EXDIR1)
if(inputs.systems['freqflg']==1):
if os.path.exists(mcerunf+"/freq.dat"):
shutil.copy2(mcerunf+"/freq.dat",EXDIR1)
else:
subprocess.run(["./integrator.exe"])
shutil.copy2("freq.dat",EXDIR1)
os.chdir(EXDIR1)
EXDIR1=os.getcwd()
if(gen=='NO'):
if(inputs.method=="AIMC-MCE2"):
if (glob.glob(mcerunf+"Outbs-001-00000-0_*.out")) or (glob.glob(mcerunf+"/Outbs-0001-00000-0_*.out")):
print("Outbs-0001-00000-0_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Outbs-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Outbs-001-00000-0_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present")
if (glob.glob(mcerunf+"Clonetrack-001_*.out")) or (glob.glob(mcerunf+"/Clonetrack-0001_*.out")):
print("Clonetrack-0001_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Clonetrack-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Clonetrack-001_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present")
else:
if (glob.glob(mcerunf+"Outbs-001_*.out")) or (glob.glob(mcerunf+"/Outbs-0001_*.out")):
print("Outbs-0001_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Outbs-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Outbs-001_*.out not found in runfolder.")
if(inputs.clone['cloning']=='yes'):
if (glob.glob(mcerunf+"Clonetrack-001_*.out")) or (glob.glob(mcerunf+"/Clonetrack-0001_*.out")):
print("Clonetrack-0001_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Clonetrack-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Clonetrack-001_*.out not found in runfolder")
if(inputs.method=="MCE12"):
for j in range(2):
for i in range (nodes):
shutil.copy2("MCE.exe","MCEv"+str(j+1)+"/run-"+str(i+1))
shutil.copy2('rundata'+str(j+1)+'.csv',"MCEv"+str(j+1)+"/run-"+str(i+1)+"/rundata.csv")
if(inputs.systems['freqflg']==1):
for k in range(repeats/nodes):
shutil.copy2("freq.dat","MCEv"+str(j+1)+"/run-"+str(i+1)+"/freq"+str(k+1)+".dat")
else:
for i in range (nodes):
shutil.copy2("MCE.exe","run-"+str(i+1))
shutil.copy2("rundata.csv","run-"+str(i+1))
if(inputs.systems['freqflg']==1):
for k in range(repeats/nodes):
shutil.copy2("freq.dat","/run-"+str(i+1)+"/freq"+str(k+1)+".dat")
if(gen=='NO'):
for file in glob.glob("Outbs-*.out"):
shutil.copy2(file,"/run-"+str(i+1))
if(inputs.clone['cloning']=='yes'):
for file in glob.glob("Clonetrack-*.out"):
shutil.copy2(file,"/run-"+str(i+1))
elif(restart=='YES'):
Hostname=socket.gethostname()
if(Hostname==("login2.arc4.leeds.ac.uk")):
HPCFLG=1
else:
HPCFLG=0
EXDIR1=os.getcwd()
if not os.path.exists("../Outbsbackup"):
os.mkdir("../Outbsbackup")
if(inputs.method=="MCE12"):
for j in range(2):
with open('rundata'+str(i+1)+'.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow(['NO',prop,restart,inputs.cmprss,('MCEv'+str(j+1)),int(repeats/nodes),'NO'])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
for i in range(nodes):
p=1
q=1
for file in glob.glob('MCEv'+str(j+1)+'/run-'+str(i+1)+'/Outbs-*.out'):
shutil.copy2(file,'../Outbsbackup/Outbs-'+str(j+1)+'_'+str(p)+'.out_'+str(i+1)+'.out')
p=p+1
if((inputs.clone['clone'])!='no'):
for file in glob.glob('MCEv'+str(j+1)+'/run-'+str(i+1)+'/clonearr-*.out'):
shutil.copy2(file,'../Outbsbackup/clonearr-'+str(j+1)+'_'+str(p)+'.out_'+str(i+1)+'.out')
q=q+1
shutil.copy2('rundata'+str(j+1)+'.csv',"MCEv"+str(j+1)+"/run-"+str(i+1)+"/rundata.csv")
else:
with open('rundata.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow(['NO',prop,restart,inputs.cmprss,inputs.method,int(repeats/nodes),'NO'])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
for i in range (nodes):
p=1
q=1
for file in glob.glob('run-'+str(i+1)+'/Outbs-00*.out'):
shutil.copy2(file,'../Outbsbackup/Outbs-'+str(p)+'.out_'+str(i+1)+'.out')
p=p+1
if((inputs.clone['Cloning'])!='no'):
for file in glob.glob('run-'+str(i+1)+'/clonearr-*.out'):
shutil.copy2(file,'../Outbsbackup/clonearr-'+str(p)+'.out_'+str(i+1)+'.out')
q=q+1
shutil.copy2("rundata.csv","run-"+str(i+1))
#If on a SGE machine make job submission file
if(HPCFLG==1):
number=random.randint(99999,1000000)
file1="MCE"+str(number)+".sh"
f=open(file1,"w")
f.write("#$ -cwd -V \n")
if(cores!=1):
f.write("#$ -pe smp "+str(cores)+" \n") #Use shared memory parallel environemnt
f.write("#$ -l h_rt=40:00:00 \n")
f.write("#$ -l h_vmem=4G \n")
f.write("#$ -t 1-"+str(nodes)+" \n")
f.write("date \n")
f.write("cd "+EXDIR1+"/run-$SGE_TASK_ID/ \n")
f.write("echo "'"Running on $HOSTNAME in folder $PWD" \n')
f.write("module load mkl \n")
f.write("time ./MCE.exe \n")
f.write("date \n")
f.close()
if(cores!=1):
os.environ["OMP_NUM_THREADS"]=str(cores)
subprocess.call(['qsub',file1])
else:
if(cores!=1):
os.environ["OMP_NUM_THREADS"]=str(cores)
for i in range(nodes):
SUBDIR=EXDIR1+"/run-"+str(i+1)
subprocess.Popen('',executable=SUBDIR+"/MCE.exe",cwd=SUBDIR)
| 2.46875 | 2 |
gdsfactory/components/__init__.py | jorgepadilla19/gdsfactory | 0 | 12768123 | <gh_stars>0
from gdsfactory.components.add_fidutials import add_fidutials, add_fidutials_offsets
from gdsfactory.components.align import add_frame, align_wafer
from gdsfactory.components.array_component import array
from gdsfactory.components.array_with_fanout import (
array_with_fanout,
array_with_fanout_2d,
)
from gdsfactory.components.array_with_via import array_with_via, array_with_via_2d
from gdsfactory.components.awg import awg
from gdsfactory.components.bbox import bbox
from gdsfactory.components.bend_circular import bend_circular, bend_circular180
from gdsfactory.components.bend_circular_heater import bend_circular_heater
from gdsfactory.components.bend_euler import (
bend_euler,
bend_euler180,
bend_euler_s,
bend_straight_bend,
)
from gdsfactory.components.bend_port import bend_port
from gdsfactory.components.bend_s import bend_s
from gdsfactory.components.C import C
from gdsfactory.components.cavity import cavity
from gdsfactory.components.cdc import cdc
from gdsfactory.components.cdsem_all import cdsem_all
from gdsfactory.components.circle import circle
from gdsfactory.components.compass import compass
from gdsfactory.components.component_lattice import component_lattice
from gdsfactory.components.component_sequence import component_sequence
from gdsfactory.components.contact import contact, contact_heater_m3, contact_slab_m3
from gdsfactory.components.contact_slot import contact_slot, contact_slot_m1_m2
from gdsfactory.components.contact_with_offset import contact_with_offset
from gdsfactory.components.copy_layers import copy_layers
from gdsfactory.components.coupler import coupler
from gdsfactory.components.coupler90 import coupler90, coupler90circular
from gdsfactory.components.coupler90bend import coupler90bend
from gdsfactory.components.coupler_adiabatic import coupler_adiabatic
from gdsfactory.components.coupler_asymmetric import coupler_asymmetric
from gdsfactory.components.coupler_full import coupler_full
from gdsfactory.components.coupler_ring import coupler_ring
from gdsfactory.components.coupler_straight import coupler_straight
from gdsfactory.components.coupler_symmetric import coupler_symmetric
from gdsfactory.components.cross import cross
from gdsfactory.components.crossing_waveguide import (
compensation_path,
crossing,
crossing45,
crossing_arm,
crossing_etched,
crossing_from_taper,
)
from gdsfactory.components.cutback_bend import (
cutback_bend,
cutback_bend90,
cutback_bend90circular,
cutback_bend180,
cutback_bend180circular,
staircase,
)
from gdsfactory.components.cutback_component import (
cutback_component,
cutback_component_mirror,
)
from gdsfactory.components.dbr import dbr
from gdsfactory.components.dbr_tapered import dbr_tapered
from gdsfactory.components.delay_snake import delay_snake
from gdsfactory.components.delay_snake2 import delay_snake2
from gdsfactory.components.delay_snake3 import delay_snake3
from gdsfactory.components.delay_snake_sbend import delay_snake_sbend
from gdsfactory.components.dicing_lane import dicing_lane
from gdsfactory.components.die import die
from gdsfactory.components.die_bbox import die_bbox
from gdsfactory.components.die_bbox_frame import die_bbox_frame
from gdsfactory.components.disk import disk
from gdsfactory.components.ellipse import ellipse
from gdsfactory.components.extend_ports_list import extend_ports_list
from gdsfactory.components.extension import extend_port, extend_ports
from gdsfactory.components.fiber import fiber
from gdsfactory.components.fiber_array import fiber_array
from gdsfactory.components.grating_coupler_array import grating_coupler_array
from gdsfactory.components.grating_coupler_circular import (
grating_coupler_circular,
grating_coupler_circular_arbitrary,
)
from gdsfactory.components.grating_coupler_elliptical import (
ellipse_arc,
grating_coupler_elliptical,
grating_coupler_elliptical_te,
grating_coupler_elliptical_tm,
grating_taper_points,
grating_tooth_points,
)
from gdsfactory.components.grating_coupler_elliptical_arbitrary import (
grating_coupler_elliptical_arbitrary,
)
from gdsfactory.components.grating_coupler_elliptical_lumerical import (
grating_coupler_elliptical_lumerical,
)
from gdsfactory.components.grating_coupler_elliptical_trenches import (
grating_coupler_elliptical_trenches,
grating_coupler_te,
grating_coupler_tm,
)
from gdsfactory.components.grating_coupler_loss import (
grating_coupler_loss_fiber_array,
grating_coupler_loss_fiber_array4,
loss_deembedding_ch12_34,
loss_deembedding_ch13_24,
loss_deembedding_ch14_23,
)
from gdsfactory.components.grating_coupler_loss_fiber_single import (
grating_coupler_loss_fiber_single,
)
from gdsfactory.components.grating_coupler_rectangular import (
grating_coupler_rectangular,
)
from gdsfactory.components.grating_coupler_rectangular_arbitrary import (
grating_coupler_rectangular_arbitrary,
)
from gdsfactory.components.grating_coupler_rectangular_arbitrary_slab import (
grating_coupler_rectangular_arbitrary_slab,
)
from gdsfactory.components.grating_coupler_tree import grating_coupler_tree
from gdsfactory.components.hline import hline
from gdsfactory.components.L import L
from gdsfactory.components.litho_calipers import litho_calipers
from gdsfactory.components.litho_ruler import litho_ruler
from gdsfactory.components.litho_steps import litho_steps
from gdsfactory.components.logo import logo
from gdsfactory.components.loop_mirror import loop_mirror
from gdsfactory.components.mmi1x2 import mmi1x2
from gdsfactory.components.mmi2x2 import mmi2x2
from gdsfactory.components.mzi import mzi, mzi1x2_2x2, mzi2x2_2x2, mzi_coupler
from gdsfactory.components.mzi_arm import mzi_arm
from gdsfactory.components.mzi_arms import mzi_arms
from gdsfactory.components.mzi_lattice import mzi_lattice
from gdsfactory.components.mzi_pads_center import mzi_pads_center
from gdsfactory.components.mzi_phase_shifter import (
mzi_phase_shifter,
mzi_phase_shifter_top_heater_metal,
)
from gdsfactory.components.mzit import mzit
from gdsfactory.components.mzit_lattice import mzit_lattice
from gdsfactory.components.nxn import nxn
from gdsfactory.components.pad import (
pad,
pad_array,
pad_array0,
pad_array90,
pad_array180,
pad_array270,
)
from gdsfactory.components.pad_gsg import pad_gsg_open, pad_gsg_short
from gdsfactory.components.pads_shorted import pads_shorted
from gdsfactory.components.ramp import ramp
from gdsfactory.components.rectangle import rectangle
from gdsfactory.components.rectangle_with_slits import rectangle_with_slits
from gdsfactory.components.resistance_meander import resistance_meander
from gdsfactory.components.resistance_sheet import resistance_sheet
from gdsfactory.components.ring import ring
from gdsfactory.components.ring_double import ring_double
from gdsfactory.components.ring_double_heater import ring_double_heater
from gdsfactory.components.ring_single import ring_single
from gdsfactory.components.ring_single_array import ring_single_array
from gdsfactory.components.ring_single_dut import ring_single_dut, taper2
from gdsfactory.components.ring_single_heater import ring_single_heater
from gdsfactory.components.seal_ring import seal_ring
from gdsfactory.components.spiral import spiral
from gdsfactory.components.spiral_circular import spiral_circular
from gdsfactory.components.spiral_external_io import spiral_external_io
from gdsfactory.components.spiral_inner_io import (
spiral_inner_io,
spiral_inner_io_fiber_single,
)
from gdsfactory.components.splitter_chain import splitter_chain
from gdsfactory.components.splitter_tree import (
splitter_tree,
test_splitter_tree_ports,
test_splitter_tree_ports_no_sbend,
)
from gdsfactory.components.straight import straight
from gdsfactory.components.straight_array import straight_array
from gdsfactory.components.straight_heater_doped_rib import straight_heater_doped_rib
from gdsfactory.components.straight_heater_doped_strip import (
straight_heater_doped_strip,
)
from gdsfactory.components.straight_heater_meander import straight_heater_meander
from gdsfactory.components.straight_heater_metal import (
straight_heater_metal,
straight_heater_metal_90_90,
straight_heater_metal_undercut,
straight_heater_metal_undercut_90_90,
test_ports,
)
from gdsfactory.components.straight_pin import straight_pin, straight_pn
from gdsfactory.components.straight_pin_slot import straight_pin_slot
from gdsfactory.components.straight_rib import straight_rib, straight_rib_tapered
from gdsfactory.components.switch_tree import switch_tree
from gdsfactory.components.taper import (
taper,
taper_strip_to_ridge,
taper_strip_to_ridge_trenches,
)
from gdsfactory.components.taper_cross_section import (
taper_cross_section_linear,
taper_cross_section_sine,
)
from gdsfactory.components.taper_from_csv import (
taper_0p5_to_3_l36,
taper_from_csv,
taper_w10_l100,
taper_w10_l150,
taper_w10_l200,
taper_w11_l200,
taper_w12_l200,
)
from gdsfactory.components.taper_parabolic import taper_parabolic
from gdsfactory.components.text import githash, text
from gdsfactory.components.text_rectangular import (
text_rectangular,
text_rectangular_multi_layer,
)
from gdsfactory.components.triangle import triangle, triangle2, triangle4
from gdsfactory.components.verniers import verniers
from gdsfactory.components.version_stamp import pixel, qrcode, version_stamp
from gdsfactory.components.via import via, via1, via2, viac
from gdsfactory.components.via_cutback import via_cutback
from gdsfactory.components.waveguide_template import strip
from gdsfactory.components.wire import wire_corner, wire_straight
from gdsfactory.components.wire_sbend import wire_sbend
# Components to test
factory = dict(
C=C,
L=L,
add_fidutials=add_fidutials,
add_frame=add_frame,
align_wafer=align_wafer,
array=array,
array_with_fanout=array_with_fanout,
array_with_fanout_2d=array_with_fanout_2d,
array_with_via=array_with_via,
array_with_via_2d=array_with_via_2d,
awg=awg,
bbox=bbox,
bend_circular=bend_circular,
bend_circular180=bend_circular180,
bend_circular_heater=bend_circular_heater,
bend_euler=bend_euler,
bend_euler180=bend_euler180,
bend_euler_s=bend_euler_s,
bend_straight_bend=bend_straight_bend,
bend_port=bend_port,
bend_s=bend_s,
cavity=cavity,
copy_layers=copy_layers,
cdc=cdc,
circle=circle,
compass=compass,
compensation_path=compensation_path,
component_lattice=component_lattice,
component_sequence=component_sequence,
coupler=coupler,
coupler90=coupler90,
coupler90bend=coupler90bend,
coupler90circular=coupler90circular,
coupler_adiabatic=coupler_adiabatic,
coupler_asymmetric=coupler_asymmetric,
coupler_full=coupler_full,
coupler_ring=coupler_ring,
coupler_straight=coupler_straight,
coupler_symmetric=coupler_symmetric,
cross=cross,
crossing=crossing,
crossing45=crossing45,
crossing_arm=crossing_arm,
crossing_etched=crossing_etched,
crossing_from_taper=crossing_from_taper,
cutback_bend=cutback_bend,
cutback_bend180=cutback_bend180,
cutback_bend180circular=cutback_bend180circular,
cutback_bend90=cutback_bend90,
cutback_bend90circular=cutback_bend90circular,
cutback_component=cutback_component,
cutback_component_mirror=cutback_component_mirror,
dicing_lane=dicing_lane,
dbr=dbr,
dbr_tapered=dbr_tapered,
delay_snake=delay_snake,
delay_snake2=delay_snake2,
delay_snake3=delay_snake3,
delay_snake_sbend=delay_snake_sbend,
die=die,
die_bbox=die_bbox,
die_bbox_frame=die_bbox_frame,
disk=disk,
ellipse=ellipse,
extend_port=extend_port,
extend_ports=extend_ports,
extend_ports_list=extend_ports_list,
fiber=fiber,
fiber_array=fiber_array,
grating_coupler_array=grating_coupler_array,
grating_coupler_elliptical=grating_coupler_elliptical,
grating_coupler_circular=grating_coupler_circular,
grating_coupler_circular_arbitrary=grating_coupler_circular_arbitrary,
grating_coupler_elliptical_te=grating_coupler_elliptical_te,
grating_coupler_elliptical_tm=grating_coupler_elliptical_tm,
grating_coupler_elliptical_arbitrary=grating_coupler_elliptical_arbitrary,
grating_coupler_elliptical_lumerical=grating_coupler_elliptical_lumerical,
grating_coupler_elliptical_trenches=grating_coupler_elliptical_trenches,
grating_coupler_loss_fiber_array4=grating_coupler_loss_fiber_array4,
grating_coupler_loss_fiber_array=grating_coupler_loss_fiber_array,
grating_coupler_loss_fiber_single=grating_coupler_loss_fiber_single,
grating_coupler_te=grating_coupler_te,
grating_coupler_tm=grating_coupler_tm,
grating_coupler_tree=grating_coupler_tree,
grating_coupler_rectangular=grating_coupler_rectangular,
grating_coupler_rectangular_arbitrary=grating_coupler_rectangular_arbitrary,
grating_coupler_rectangular_arbitrary_slab=grating_coupler_rectangular_arbitrary_slab,
hline=hline,
litho_calipers=litho_calipers,
litho_steps=litho_steps,
logo=logo,
loop_mirror=loop_mirror,
loss_deembedding_ch12_34=loss_deembedding_ch12_34,
loss_deembedding_ch13_24=loss_deembedding_ch13_24,
loss_deembedding_ch14_23=loss_deembedding_ch14_23,
mmi1x2=mmi1x2,
mmi2x2=mmi2x2,
mzi=mzi,
mzi2x2_2x2=mzi2x2_2x2,
mzi1x2_2x2=mzi1x2_2x2,
mzi_coupler=mzi_coupler,
mzi_arm=mzi_arm,
mzi_arms=mzi_arms,
mzi_lattice=mzi_lattice,
mzi_pads_center=mzi_pads_center,
mzi_phase_shifter=mzi_phase_shifter,
mzi_phase_shifter_top_heater_metal=mzi_phase_shifter_top_heater_metal,
mzit=mzit,
mzit_lattice=mzit_lattice,
nxn=nxn,
pad=pad,
pad_gsg_short=pad_gsg_short,
pad_gsg_open=pad_gsg_open,
pad_array=pad_array,
pads_shorted=pads_shorted,
cdsem_all=cdsem_all,
pixel=pixel,
qrcode=qrcode,
ramp=ramp,
rectangle=rectangle,
rectangle_with_slits=rectangle_with_slits,
resistance_meander=resistance_meander,
resistance_sheet=resistance_sheet,
ring=ring,
ring_double=ring_double,
ring_single=ring_single,
ring_single_array=ring_single_array,
ring_single_dut=ring_single_dut,
ring_single_heater=ring_single_heater,
ring_double_heater=ring_double_heater,
spiral=spiral,
spiral_circular=spiral_circular,
spiral_external_io=spiral_external_io,
spiral_inner_io=spiral_inner_io,
spiral_inner_io_fiber_single=spiral_inner_io_fiber_single,
splitter_chain=splitter_chain,
splitter_tree=splitter_tree,
staircase=staircase,
straight=straight,
straight_array=straight_array,
straight_heater_doped_rib=straight_heater_doped_rib,
straight_heater_doped_strip=straight_heater_doped_strip,
straight_heater_metal=straight_heater_metal,
straight_heater_metal_90_90=straight_heater_metal_90_90,
straight_heater_metal_undercut=straight_heater_metal_undercut,
straight_heater_metal_undercut_90_90=straight_heater_metal_undercut_90_90,
straight_heater_meander=straight_heater_meander,
straight_pin=straight_pin,
straight_pn=straight_pn,
straight_pin_slot=straight_pin_slot,
straight_rib=straight_rib,
straight_rib_tapered=straight_rib_tapered,
switch_tree=switch_tree,
taper_cross_section_linear=taper_cross_section_linear,
taper_cross_section_sine=taper_cross_section_sine,
taper=taper,
taper_parabolic=taper_parabolic,
taper2=taper2,
taper_0p5_to_3_l36=taper_0p5_to_3_l36,
taper_from_csv=taper_from_csv,
taper_strip_to_ridge=taper_strip_to_ridge,
taper_strip_to_ridge_trenches=taper_strip_to_ridge_trenches,
taper_w10_l100=taper_w10_l100,
taper_w10_l150=taper_w10_l150,
taper_w10_l200=taper_w10_l200,
taper_w11_l200=taper_w11_l200,
taper_w12_l200=taper_w12_l200,
text=text,
text_rectangular=text_rectangular,
text_rectangular_multi_layer=text_rectangular_multi_layer,
triangle=triangle,
verniers=verniers,
version_stamp=version_stamp,
via=via,
viac=viac,
via1=via1,
via2=via2,
via_cutback=via_cutback,
contact=contact,
contact_slot=contact_slot,
contact_slot_m1_m2=contact_slot_m1_m2,
contact_heater_m3=contact_heater_m3,
contact_slab_m3=contact_slab_m3,
contact_with_offset=contact_with_offset,
wire_corner=wire_corner,
wire_sbend=wire_sbend,
wire_straight=wire_straight,
seal_ring=seal_ring,
)
_factory_passives = dict(
bend_circular=bend_circular,
bend_euler=bend_euler,
bend_euler_s=bend_euler_s,
bend_s=bend_s,
cdc=cdc,
coupler=coupler,
coupler_adiabatic=coupler_adiabatic,
coupler_asymmetric=coupler_asymmetric,
coupler_full=coupler_full,
coupler_ring=coupler_ring,
coupler_symmetric=coupler_symmetric,
crossing=crossing,
crossing45=crossing45,
taper_cross_section_linear=taper_cross_section_linear,
taper_cross_section_sine=taper_cross_section_sine,
taper=taper,
taper2=taper2,
taper_0p5_to_3_l36=taper_0p5_to_3_l36,
taper_from_csv=taper_from_csv,
taper_strip_to_ridge=taper_strip_to_ridge,
taper_strip_to_ridge_trenches=taper_strip_to_ridge_trenches,
taper_w10_l100=taper_w10_l100,
taper_w10_l150=taper_w10_l150,
taper_w10_l200=taper_w10_l200,
taper_w11_l200=taper_w11_l200,
taper_w12_l200=taper_w12_l200,
mmi1x2=mmi1x2,
mmi2x2=mmi2x2,
)
__all__ = [
"factory",
"C",
"L",
"add_frame",
"add_fidutials",
"add_fidutials_offsets",
"align",
"align_wafer",
"array",
"array_with_fanout",
"array_with_fanout_2d",
"array_with_via",
"array_with_via_2d",
"awg",
"bbox",
"bend_circular",
"bend_circular180",
"bend_circular_heater",
"bend_euler",
"bend_euler180",
"bend_euler_s",
"bend_port",
"bend_s",
"cavity",
"circle",
"compass",
"compensation_path",
"component_lattice",
"component_sequence",
"coupler",
"coupler90",
"coupler90bend",
"coupler90circular",
"coupler_adiabatic",
"coupler_asymmetric",
"coupler_full",
"coupler_ring",
"coupler_straight",
"coupler_symmetric",
"cross",
"crossing",
"crossing45",
"crossing_arm",
"crossing_etched",
"crossing_from_taper",
"crossing_waveguide",
"cutback_bend",
"cutback_bend180",
"cutback_bend180circular",
"cutback_bend90",
"cutback_bend90circular",
"cutback_component",
"cutback_component_mirror",
"dbr",
"dbr_tapered",
"delay_snake",
"delay_snake2",
"delay_snake3",
"die",
"die_bbox",
"die_bbox_frame",
"disk",
"ellipse",
"ellipse_arc",
"extend_port",
"extend_ports",
"extend_ports_list",
"extension",
"fiber",
"fiber_array",
"githash",
"grating_coupler_array",
"grating_coupler_elliptical",
"grating_coupler_elliptical_arbitrary",
"grating_coupler_elliptical_lumerical",
"grating_coupler_circular",
"grating_coupler_elliptical_te",
"grating_coupler_elliptical_tm",
"grating_coupler_elliptical_trenches",
"grating_coupler_functions",
"grating_coupler_loss",
"grating_coupler_te",
"grating_coupler_tm",
"grating_coupler_tree",
"grating_coupler_rectangular",
"grating_coupler_rectangular_arbitrary",
"grating_taper_points",
"grating_tooth_points",
"hline",
"litho_calipers",
"litho_ruler",
"litho_steps",
"logo",
"loop_mirror",
"loss_deembedding_ch12_34",
"loss_deembedding_ch13_24",
"loss_deembedding_ch14_23",
"text_rectangular",
"mmi1x2",
"mmi2x2",
"mzi",
"mzi_arm",
"mzi_lattice",
"mzi_phase_shifter",
"mzi_phase_shifter_top_heater_metal",
"mzit",
"mzit_lattice",
"nxn",
"cdsem_all",
"pad",
"pad_array",
"pad_array0",
"pad_array90",
"pad_array180",
"pad_array270",
"pads_shorted",
"pixel",
"qrcode",
"ramp",
"rectangle",
"resistance_meander",
"ring",
"ring_double",
"ring_single",
"ring_single_array",
"ring_single_dut",
"spiral",
"spiral_circular",
"spiral_external_io",
"spiral_inner_io",
"splitter_chain",
"splitter_tree",
"staircase",
"straight",
"straight_array",
"straight_heater_doped_rib",
"straight_heater_doped_strip",
"straight_heater_metal",
"straight_heater_metal_90_90",
"straight_heater_metal_undercut",
"straight_heater_metal_undercut_90_90",
"straight_pin",
"straight_pn",
"straight_rib",
"strip",
"taper",
"taper2",
"taper_0p5_to_3_l36",
"taper_from_csv",
"taper_strip_to_ridge",
"taper_strip_to_ridge_trenches",
"taper_w10_l100",
"taper_w10_l150",
"taper_w10_l200",
"taper_w11_l200",
"taper_w12_l200",
"test_ports",
"test_splitter_tree_ports",
"test_splitter_tree_ports_no_sbend",
"text",
"triangle",
"triangle2",
"triangle4",
"verniers",
"version_stamp",
"via",
"viac",
"via1",
"via2",
"via_cutback",
"contact",
"contact_heater_m3",
"contact_slab_m3",
"contact_with_offset",
"waveguide_template",
"wire",
"wire_corner",
"wire_sbend",
"wire_straight",
]
| 1.148438 | 1 |
Code/Sensors/DHT.py | JoelBuenrostro/micropython-for-esp32 | 1 | 12768124 | # Chip: ESP32-WROOM-32 (ESP32-D0WDQ6)
# Microprocessor: Dual-Core Xtensa® 32-bit LX6
# Clock: 80MHz to 240Mhz
# Crystal: 40MHz
# SPÍ flash: 4 MB
# Operating voltage: 3.0V-3.6V
# Operating current: 80mA
# Purpose: Read temperature and humidity from DHT sensors
# Notes: The DHT driver is implemented in software and works on all pins
import dht
import machine
d = dht.DHT11(machine.Pin(4))
d.measure()
d.temperature()
d.humidity()
d = dht.DHT22(machine.Pin(4))
d.measure()
d.temperature()
d.humidity() | 2.703125 | 3 |
mygamelist/games/models.py | Frostflake/mygamelist | 2 | 12768125 | from django.db import models
from decimal import Decimal
from uuid import uuid4
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
def random_cover_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return 'covers/' + filename
def random_screen_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return 'screenshots/' + filename
def random_avatar_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return 'avatars/' + filename
class Platform(models.Model):
name = models.CharField(max_length=50)
category = models.CharField(max_length=50)
shorthand = models.CharField(max_length=50, blank=True, default='')
class Meta:
ordering = ['category', 'name']
def __str__(self):
return self.category + " - " + self.name
class Tag(models.Model):
name = models.CharField(max_length=50)
category = models.CharField(max_length=50)
description = models.CharField(max_length=250, blank=True)
class Meta:
ordering = ['category', 'name']
def __str__(self):
return self.category + " - " + self.name
class Genre(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Game(models.Model):
name = models.CharField(max_length=150)
year = models.IntegerField()
trailer_link = models.URLField(max_length=250, blank=True, default='')
image = models.ImageField(upload_to=random_cover_filename, null=True)
screen1 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
screen2 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
screen3 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
screen4 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
genres = models.ManyToManyField(Genre, blank=True)
tags = models.ManyToManyField(Tag, blank=True)
description = models.TextField(blank=True, default='')
aliases = models.TextField(blank=True, default='')
main_link = models.URLField(max_length=250, blank=True, default='', verbose_name="Main Website")
wikipedia_link = models.URLField(max_length=250, blank=True, default='', verbose_name="Wikipedia")
gamefaqs_link = models.URLField(max_length=250, blank=True, default='', verbose_name="GameFAQs Link")
steam_link = models.URLField(max_length=250, blank=True, default='', verbose_name="Steam Link")
howlongtobeat_link = models.URLField(max_length=250, blank=True, default='', verbose_name="HowLongToBeat Link")
pcgamingwiki_link = models.URLField(max_length=250, blank=True, default='', verbose_name="PCGamingWiki Link")
winehq_link = models.URLField(max_length=250, blank=True, default='', verbose_name="WineHQ Link")
mobygames_link = models.URLField(max_length=250, blank=True, default='', verbose_name="MobyGames Link")
vndb_link = models.URLField(max_length=250, blank=True, default='', verbose_name="VNDB Link")
temp_pop_score = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True)
def __str__(self):
return self.name + " (" + str(self.year) + ")"
@receiver(post_save, sender=User)
def create_userprofile_signal(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
sexual_content = Tag.objects.get(name="Sexual Content")
instance.userprofile.banned_tags.add(sexual_content)
instance.userprofile.save()
class UserGameListEntry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
game = models.ForeignKey(Game, on_delete=models.CASCADE)
platform = models.ForeignKey(Platform, blank=True, null=True, on_delete=models.CASCADE)
statuses = [
("PLAN", "Plan to Play"),
("PLAY", "Playing"),
("CMPL", "Completed"),
("DROP", "Dropped"),
("HOLD", "Paused"),
("IMPT", "Imported")
]
status = models.CharField(max_length=4, choices=statuses, default="PLAN")
score = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
hours = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
comments = models.CharField(max_length=500, blank=True, default='')
start_date = models.DateField(blank=True, null=True)
stop_date = models.DateField(blank=True, null=True)
times_replayed = models.IntegerField(default=0)
def __str__(self):
return self.user.username + " - " + self.game.name
class ManualUserGameListEntry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=150)
platform = models.ForeignKey(Platform, blank=True, null=True, on_delete=models.CASCADE)
statuses = [
("PLAY", "Playing"),
("CMPL", "Completed"),
("DROP", "Dropped"),
("HOLD", "Paused"),
("PLAN", "Plan to Play"),
("IMPT", "Imported")
]
status = models.CharField(max_length=4, choices=statuses, default="PLAN")
score = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
hours = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
comments = models.CharField(max_length=500, blank=True, default='')
start_date = models.DateField(blank=True, null=True)
stop_date = models.DateField(blank=True, null=True)
times_replayed = models.IntegerField(default=0)
never_migrate = models.BooleanField(default=False, null=True)
def __str__(self):
return self.user.username + " - " + self.name
class UserGameStatus(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
game = models.ForeignKey(Game, on_delete=models.CASCADE)
statuses = [
("PLAY", "Playing"),
("CMPL", "Completed"),
("DROP", "Dropped"),
("HOLD", "Paused"),
("PLAN", "Plan to Play")
]
status = models.CharField(max_length=4, choices=statuses)
created_at = models.DateTimeField(auto_now_add=True)
liked_by = models.ManyToManyField(User, blank=True, related_name='usergamestatus_liked_by')
def __str__(self):
return self.user.username + " " + self.status + " " + self.game.name
class Notification(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
notif_type = models.CharField(max_length=10)
notif_object_id = models.IntegerField()
class Recommendation(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
slot = models.IntegerField()
game = models.ForeignKey(Game, on_delete=models.CASCADE)
rec_data = models.CharField(max_length=10)
class CollectionType(models.Model):
name = models.CharField(max_length=150)
description = models.TextField(blank=True, default='')
def __str__(self):
return self.name
class Collection(models.Model):
name = models.CharField(max_length=150)
category = models.ForeignKey(CollectionType, on_delete=models.CASCADE)
description = models.TextField(blank=True, default='')
games = models.ManyToManyField(Game, blank=True)
def __str__(self):
return self.category.name + " - " + self.name
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
avatar = models.ImageField(upload_to=random_avatar_filename, default='avatars/default.png')
banned_tags = models.ManyToManyField(Tag, blank=True)
ignored_games = models.ManyToManyField(Game, blank=True)
ignored_collections = models.ManyToManyField(Collection, blank=True)
followed_users = models.ManyToManyField(User, blank=True, related_name='userprofile_followed_users')
class UserSettings(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
rating_systems = [
("SMIL","3-Point Smiley"),
("STAR","5-Point Star"),
("DCML","10-Point Decimal")
]
score_type = models.CharField(max_length=4, choices=rating_systems, default="DCML")
class TagAdditionRequest(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
requested_by = models.ForeignKey(User, on_delete=models.CASCADE)
comments = models.CharField(max_length=1000, blank=True, default='')
def __str__(self):
return self.game.name + " -> " + self.tag.name | 2.15625 | 2 |
loss_function.py | lightbooster/TP-GST-BERT-Tacotron2 | 0 | 12768126 | import torch
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
class TPCWLoss(nn.Module):
def __init__(self):
super().__init__()
@staticmethod
def cross_entropy(w_combination, target):
return -(target * torch.log(w_combination)).sum(dim=1).mean()
def forward(self, w_combination, target):
"""
calculates cross-entropy loss over soft classes (GSTs distributions) and predicted weights
:param w_combination: predicted combination weights tensor shape of (batch_size, token_num)
or (batch_size, atn_head_num, token_num)
:param target: GSTs' combination weights tensor shape of (batch_size, token_num)
or (batch_size, atn_head_num, token_num)
:return: cross-entropy loss value or sum of cross-entropy loss values
"""
if w_combination.dim() == 2:
return self.cross_entropy(w_combination, target)
else:
losses = []
for atn_head_index in range(w_combination.size(1)):
loss = self.cross_entropy(w_combination[:, atn_head_index, :], target[:, atn_head_index, :])
losses.append(loss)
return sum(losses)
class TPSELoss(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.L1Loss()
def forward(self, predicted_tokens, target):
"""
calculate L1 loss function between predicted and target GST
:param predicted_tokens: tensor shape of (batch_size, token_dim)
:param target: tensor shape of (batch_size, token_dim)
:return: L1 loss
"""
return self.l1(predicted_tokens, target)
| 2.53125 | 3 |
src/components/vcs/MarkdownArticle.py | leikareipa/vcs-doxy-theme | 0 | 12768127 | <filename>src/components/vcs/MarkdownArticle.py
#
# 2021 <NAME>
#
# Software: VCS Doxygen theme
#
from src.components.vcs import (
ArticleHeader,
)
from xml.etree import ElementTree
from typing import Final
from src import xml2html
# The sub-components used in this component.
childComponents:Final = [
ArticleHeader,
]
def html(xmlTree:ElementTree):
targetEl = xmlTree.find("./compounddef/detaileddescription")
if not targetEl:
return ""
description = "\n".join(map(xml2html.xml_element_to_html, targetEl))
return f"""
<article class='page'>
{ArticleHeader.html(xmlTree)}
<div class='contents page'>
{description}
</div>
</article>
"""
def css():
return """
.contents.page
{
width: 100%;
background-color: var(--article-background-color);
box-sizing: border-box;
padding: var(--article-vertical-padding) var(--article-horizontal-padding);
border-radius: 4px;
overflow: hidden;
box-shadow: inset 0 0 11px rgba(0, 0, 0, 0.4);
min-height: calc(100vh - var(--article-header-height) - var(--header-height) - var(--content-spacing));
}
.contents.page *:last-child
{
margin-bottom: 0;
}
"""
| 2.5 | 2 |
salary_counter.py | MaxChilikin/Salary_counter | 0 | 12768128 | import xlrd
import xlwt
import pylightxl as xl
import os
import sys
import re
import PySimpleGUI as sg
from collections import OrderedDict
class SalaryCounter:
def __init__(self, values):
self.values = values
self.payrolls = {}
self.headers = []
self.counter = False
self.main_col_num = None
self.sum = None
@staticmethod
def find_path():
if getattr(sys, 'frozen', False):
# one-file
application_path = os.path.dirname(sys.executable)
# one-folder
# application_path = sys._MEIPASS
else:
application_path = os.path.dirname(os.path.abspath(__file__))
return application_path
def parse_directory(self):
application_path = self.find_path()
for dir_path, _, file_names in os.walk(application_path):
if dir_path == application_path:
for file_name in file_names:
self.check_format(file_name=file_name)
def check_format(self, file_name: str):
pattern = re.compile(r"^([^\\]{1,50}).(xls|xlsx)$")
result = re.search(pattern=pattern, string=file_name)
if result:
if result[2] == 'xls' or result[2] == 'xlsx':
self.payrolls[file_name] = result[2]
def read(self, payroll: str, format_: str):
to_count = []
if format_ == 'xls':
book = xlrd.open_workbook(payroll)
sheet = book.sheet_by_index(0)
for row in range(sheet.nrows):
new_row = []
for element in sheet.row(row):
value = self._read_helper(value=element.value, row=sheet.row(row), format_=format_)
if value:
new_row.append(value)
if new_row:
to_count.append(new_row)
elif format_ == 'xlsx':
db = xl.readxl(fn=payroll)
ws_name = db.ws_names[0]
for row in db.ws(ws=ws_name).rows:
new_row = []
for element in row:
value = self._read_helper(value=element, row=row, format_=format_)
if value:
new_row.append(value)
if new_row:
to_count.append(new_row)
to_count.pop(0)
to_count.pop(0)
return to_count
def _read_helper(self, value, row, format_: str):
if value == "Фамилия, имя, отчество":
if format_ == 'xls':
self.headers = [el.value for el in row if
el.value and el.value != "Расписка в получении"]
elif format_ == 'xlsx':
self.headers = [el for el in row if el and el != "Расписка в получении"]
self.main_col_num = ([self.headers.index(i) for i in self.headers if i == "Сумма"])[0]
self.headers.extend([str(i) + "р" for i in self.values])
self.counter = True
elif value == "Итого":
self.counter = False
if self.counter and value:
return value
def count(self, data: list):
result = []
for_person = None
for row in data:
for num, element in enumerate(row):
if num == self.main_col_num:
for_person = self._count_one_instance(salary=element)
if for_person:
row.extend(for_person)
result.append(row)
return result
def _count_one_instance(self, salary: float):
result = []
for value in self.values:
amount = 0
if value not in self.sum:
self.sum.setdefault(value, 0)
while salary >= value:
salary -= value
amount += 1
self.sum[value] += 1
result.append(amount)
return result
def save(self, data: list, format_: str, name: str):
data.append(self.sum.keys())
data.append(self.sum.values())
name = name[:len(name) - (len(format_) + 1)]
file_name = name + "_расчёт" + "." + format_
sheetname = "Зарплаты"
if format_ == 'xls':
workbook = xlwt.Workbook()
sheet = workbook.add_sheet(sheetname=sheetname)
for col_num, column in enumerate(self.headers):
sheet.write(r=0, c=col_num, label=column)
for num, row in enumerate(data, start=1):
for col_num, column in enumerate(row):
sheet.write(r=num, c=col_num, label=column)
workbook.save(filename_or_stream=file_name)
elif format_ == 'xlsx':
new_db = xl.Database()
new_db.add_ws(ws=sheetname)
for col_num, column in enumerate(self.headers, start=1):
new_db.ws(ws=sheetname).update_index(row=1, col=col_num, val=column)
for num, row in enumerate(data, start=2):
for col_num, column in enumerate(row, start=1):
new_db.ws(ws=sheetname).update_index(row=num, col=col_num, val=column)
xl.writexl(db=new_db, fn=file_name)
def run(self):
self.parse_directory()
if not self.payrolls:
raise ImportError("Файлов с расширением .xls/.xlsx в папке нет")
for payroll, format_ in self.payrolls.items():
self.sum = OrderedDict()
to_count = self.read(payroll=payroll, format_=format_)
result = self.count(data=to_count)
self.save(data=result, name=payroll, format_=format_)
class Interface:
def __init__(self):
self.title = 'Счётчик купюр/монет'
self.theme = 'DarkAmber'
self.layout = list()
self.main_window = None
self.values = [5000, 2000, 1000, 500, 200, 100, 50, 10, 5, 2, 1, 0.50, 0.10]
def run(self):
self.start_window()
while True:
event, values = self.main_window.read()
if event == sg.WIN_CLOSED or event == "Отмена":
break
elif event == "Посчитать":
values_to_use = []
for value in self.values:
if values[f'check{value}']:
values_to_use.append(value)
try:
counter = SalaryCounter(values=values_to_use)
counter.run()
except Exception as exc:
exc_popup = self.popup_window(title="Ошибка", text=exc)
pop_event, pop_value = exc_popup.read()
if pop_event == sg.WIN_CLOSED:
exc_popup.close()
self.main_window.close()
def popup_window(self, text: Exception, title: str):
sg.theme(self.theme)
layout = [[sg.Text(text=text)]]
size = 100
popup = sg.Window(
title=title,
layout=layout,
default_button_element_size=(10, 2),
size=(size * 5, size),
element_padding=(10, 10),
auto_size_buttons=False,
)
return popup
def start_window(self):
sg.theme(self.theme)
self.layout.append([sg.Text(text="Пересчитать платёжные ведомости формата .xls/.xlsx, "
"находящиеся в папке?")])
self.layout.append([sg.Text(text="Используемые купюры:")])
for value in self.values:
spaces = (4 - len(str(value))) * " "
self.layout.append([sg.Text(f"{value}{spaces}"), sg.Checkbox(text="", default=True, key=f'check{value}')])
self.layout.append([sg.Button("Посчитать"), sg.Button("Отмена")])
window = sg.Window(
title=self.title,
layout=self.layout,
default_button_element_size=(10, 2),
size=(550, 500),
element_padding=(2, 2),
auto_size_buttons=False,
)
self.main_window = window
if __name__ == '__main__':
ui = Interface()
ui.run()
| 2.609375 | 3 |
config.py | justplus/vansel | 3 | 12768129 | #!/usr/bin/env python
# coding=utf-8
__author__ = 'zhaoliang'
__email__ = '<EMAIL>'
__created__ = '15/12/19'
import os
CSRF_ENABLED = True
SECRET_KEY = 'guess what you can and try it'
DATABASE_URI = {
'host': 'localhost',
'user': 'root',
'passwd': '<PASSWORD>',
'port': 3306,
'db': 'vansel',
'charset': 'utf8'
}
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
PROTYPE_PATH = os.path.join(ROOT_PATH, 'cache/protype')
UPLOAD_FOLDER = os.path.join(ROOT_PATH, 'cache/data/')
UPLOAD_BASE_URL = 'http://127.0.0.1/vansel/' | 1.664063 | 2 |
pages/wikipedia_pages/home_page.py | glenn-barker/web-bdd | 0 | 12768130 | from selenium.webdriver.support import expected_conditions as EC
from pages.page import Page
class HomePage(Page):
PAGE_URL = "https://www.wikipedia.org"
def visit(self):
self.selenium.driver.get(HomePage.PAGE_URL)
self.verify_is_loaded()
def verify_is_loaded(self):
self.selenium.load_wait.until(
EC.title_is("Wikipedia"),
f"The Wikipedia homepage did not load on {self.selenium.driver.current_url}")
| 3.0625 | 3 |
examples/toggle_led.py | pimoroni/ht0740-python | 0 | 12768131 | <filename>examples/toggle_led.py
#!/usr/bin/env python
import time
from ht0740 import HT0740
print("""toggle_led.py - Toggles LED on and off using the toggle method.
Press Ctrl+C to exit.
""")
switch = HT0740()
print("Enabling switch!")
switch.enable()
try:
while True:
print("LED state: {}".format(switch.led.state()))
switch.led.toggle()
time.sleep(1)
except KeyboardInterrupt:
print("Disabling switch")
switch.disable()
| 3.515625 | 4 |
BlogApp/migrations/0010_auto_20201016_1546.py | n3trob3/nimrodage | 0 | 12768132 | # Generated by Django 3.1.1 on 2020-10-16 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0009_newsletter'),
]
operations = [
migrations.AlterField(
model_name='newsletter',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
| 1.601563 | 2 |
camping_server2/scraping/__init__.py | solyourock/crawling_project | 0 | 12768133 | import kakao_reviews as kr
import naverv4_blog as nv4
import gocamp_crawl as gc
import gocamp_link_crawl as gl
import datetime
from camping_server2.bot import slackbot
import camping_server2.config as config
import time
import naverv5_category as nv5
import camping_server2.scraping.ogcamp_crawl as oc
import pandas as pd
def target_list():
"""
get gocamping title list
:return:
gocamping title
"""
datas = pd.read_csv(config.Config.PATH + '/target_list.csv')
name = datas[['title']]
name = name.iloc[:]['title']
base_addr = datas[['addr']]
base_addr = base_addr.iloc[:]['addr']
return list(name), list(base_addr)
def get_nv5_result(camping_list, camping_addrs):
"""
naverv5 category review scraping
:param camping_list, camping_addrs:
:return:
naver map v5 category review crawling result csv
"""
highlight_reviews = []
try:
for i, camping_title in enumerate(camping_list):
s = nv5.CategoryScraping(camping_title)
s.switch_iframe()
title, addr = s.move_tab()
print(title, addr)
if title == '':
continue
category = s.get_categories()
cnt = 1
try:
while True:
try:
target_category = s.click_cagetory(category, cnt)
except:
break
else:
elements = s.scroll_down(config.Config.COUNT)
for j, element in enumerate(elements[:config.Config.COUNT]): # default 100
try:
info = s.get_reviews(camping_title, camping_addrs[i], addr, target_category, j)
highlight_reviews.append(info)
except:
break
cnt += 1
finally:
s.driver.quit()
time.sleep(2)
slackbot.IncomingWebhook.send_msg(f'{datetime.datetime.now()} {i}번째 {camping_title}까지 완료')
finally:
print(highlight_reviews)
s.save_res(highlight_reviews)
slackbot.IncomingWebhook.send_msg(f'crawling completed ! result line num : {len(highlight_reviews)}')
if __name__ == '__main__':
# camping_list, camping_addrs = target_list()
# get_nv5_result(camping_list[:], camping_addrs[:])
# s = kr.Scraping()
# s.get_search(target_list())
# v4 = nv4.Scraping(target_list())
# ids, place_name = v4.get_params()
# res_reviews = v4.get_reviews(ids, place_name)
# crawler = gc.CampCrawler()
# crawler.fetch_camp_list()
# crawler.fetch_camp_details()
# result = crawler.df
camp_link = gl.CampLink()
camp_link.fetch_link_list()
camp_link.fetch_link_details()
# ogcamp = oc.OgcampScraping()
# ogcamp.get_data()
# ogcamp = oc.OgcampScraping()
# ogcamp.get_data()
# ogcamp.get_details()
| 2.453125 | 2 |
vmtp/network.py | schoksey/vmtp | 0 | 12768134 | <reponame>schoksey/vmtp
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from log import LOG
# Module containing a helper class for operating on OpenStack networks
from neutronclient.common.exceptions import IpAddressInUseClient
from neutronclient.common.exceptions import NetworkInUseClient
from neutronclient.common.exceptions import NeutronException
from neutronclient.common.exceptions import PortInUseClient
import vmtp
class Network(object):
#
# This constructor will try to find an external network (will use the
# first network that is tagged as external - irrespective of its name)
# and a router attached to it (irrespective of the router name).
# ext_router_name is the name of the external router to create if not None
# and if no external router is found
#
def __init__(self, neutron_client, config):
self.neutron_client = neutron_client
self.networks = neutron_client.list_networks()['networks']
self.ext_net = None
self.ext_router = None
self.ext_router_created = False
self.config = config
# mgmt/data network:
# - first for same network
# - second for network to network communication
self.vm_int_net = []
self.ext_router_name = None
# Store state if the network is ipv4/ipv6 dual stack
self.ipv6_enabled = False
# If reusing existing management network just find this network
if self.config.reuse_network_name:
try:
# An existing management network must be reused
int_net = self.lookup_network(self.config.reuse_network_name)
self.vm_int_net.append(int_net)
except IndexError:
raise vmtp.VmtpException("Unable to find the network to be reused.")
return
else:
##############################################
# If a user provided ext_net_name is not available,
# then find the first network that is external
##############################################
for network in self.networks:
if network['router:external']:
try:
if network['name'] == config.ext_net_name:
self.ext_net = network
break
if not self.ext_net:
self.ext_net = network
except AttributeError:
###############################################
# A attribute error indicates, no user defined
# external network defined, so use the first one
###############################################
self.ext_net = network
break
if self.ext_net:
LOG.info("Using external network: %s.", self.ext_net['name'])
# Find or create the router to the external network
ext_net_id = self.ext_net['id']
routers = neutron_client.list_routers()['routers']
for router in routers:
external_gw_info = router['external_gateway_info']
if external_gw_info:
if external_gw_info['network_id'] == ext_net_id:
self.ext_router = router
LOG.info('Found external router: %s', self.ext_router['name'])
break
# create a new external router if none found and a name was given
self.ext_router_name = config.router_name
if (not self.ext_router) and self.ext_router_name:
self.ext_router = self.create_router(self.ext_router_name,
self.ext_net['id'])
LOG.info('Created ext router %s.', self.ext_router_name)
self.ext_router_created = True
else:
LOG.warning("No external network found.")
if config.ipv6_mode:
self.ipv6_enabled = True
# Create the networks and subnets depending on v4 or v6
enable_dhcp = not config.no_dhcp
if config.ipv6_mode:
for (net, subnet, cidr, subnet_v6, cidr_v6) in zip(config.internal_network_name,
config.internal_subnet_name,
config.internal_cidr,
config.internal_subnet_name_v6,
config.internal_cidr_v6):
int_net = self.create_net(net, subnet, cidr,
config.dns_nameservers,
subnet_v6, cidr_v6, config.ipv6_mode,
enable_dhcp=enable_dhcp)
self.vm_int_net.append(int_net)
if config.same_network_only:
break
else:
for (net, subnet, cidr) in zip(config.internal_network_name,
config.internal_subnet_name,
config.internal_cidr):
int_net = self.create_net(net, subnet, cidr,
config.dns_nameservers,
enable_dhcp=enable_dhcp)
self.vm_int_net.append(int_net)
if config.same_network_only:
break
# Add both internal networks to router interface to enable
# network to network connectivity
if self.ext_net:
self.__add_router_interface()
self.l2agent_type = self._get_l2agent_type()
self.internal_iface_dict = self._get_internal_iface_dict()
# Create a network with associated subnet
# Check first if a network with the same name exists, if it exists
# return that network.
# dns_nameservers: a list of name servers e.g. ['8.8.8.8']
def create_net(self, network_name, subnet_name, cidr, dns_nameservers,
subnet_name_ipv6=None, cidr_ipv6=None, ipv6_mode=None,
enable_dhcp=True):
for network in self.networks:
if network['name'] == network_name:
LOG.info('Found existing internal network: %s', network_name)
return network
body = {
'network': {
'name': network_name,
'admin_state_up': True
}
}
network = self.neutron_client.create_network(body)['network']
body = {
'subnet': {
'name': subnet_name,
'cidr': cidr,
'network_id': network['id'],
'enable_dhcp': True,
'ip_version': 4,
'dns_nameservers': dns_nameservers
}
}
if not enable_dhcp:
body['subnet']['enable_dhcp'] = False
subnet = self.neutron_client.create_subnet(body)['subnet']
# add subnet id to the network dict since it has just been added
network['subnets'] = [subnet['id']]
# If ipv6 is enabled than create and add ipv6 network
if ipv6_mode:
body = {
'subnet': {
'name': subnet_name_ipv6,
'cidr': cidr_ipv6,
'network_id': network['id'],
'enable_dhcp': True,
'ip_version': 6,
'ipv6_ra_mode': ipv6_mode,
'ipv6_address_mode': ipv6_mode
}
}
if not enable_dhcp:
body['subnet']['enable_dhcp'] = False
subnet = self.neutron_client.create_subnet(body)['subnet']
# add the subnet id to the network dict
network['subnets'].append(subnet['id'])
LOG.info('Created internal network: %s.', network_name)
return network
# Delete a network and associated subnet
def delete_net(self, network):
if network:
name = network['name']
# it may take some time for ports to be cleared so we need to retry
for _ in range(1, 5):
try:
self.neutron_client.delete_network(network['id'])
LOG.info('Network %s deleted.', name)
break
except NetworkInUseClient:
time.sleep(1)
# Add a network/subnet to a logical router
# Check that it is not already attached to the network/subnet
def __add_router_interface(self):
# and pick the first in the list - the list should be non empty and
# contain only 1 subnet since it is supposed to be a private network
# But first check that the router does not already have this subnet
# so retrieve the list of all ports, then check if there is one port
# - matches the subnet
# - and is attached to the router
# Assumed that both management networks are created together so checking for one of them
ports = self.neutron_client.list_ports()['ports']
for port in ports:
# Skip the check on stale ports
if port['fixed_ips']:
port_ip = port['fixed_ips'][0]
if (port['device_id'] == self.ext_router['id']) and \
(port_ip['subnet_id'] == self.vm_int_net[0]['subnets'][0]):
LOG.info('Ext router already associated to the internal network.')
return
for int_net in self.vm_int_net:
body = {
'subnet_id': int_net['subnets'][0]
}
self.neutron_client.add_interface_router(self.ext_router['id'], body)
LOG.debug('Ext router associated to ' + int_net['name'])
# If ipv6 is enabled than add second subnet
if self.ipv6_enabled:
body = {
'subnet_id': int_net['subnets'][1]
}
self.neutron_client.add_interface_router(self.ext_router['id'], body)
# Detach the ext router from the mgmt network
def __remove_router_interface(self):
for int_net in self.vm_int_net:
if int_net:
# If ipv6 is enabled remove that subnet too
if self.ipv6_enabled:
body = {
'subnet_id': int_net['subnets'][1]
}
self.neutron_client.remove_interface_router(self.ext_router['id'],
body)
body = {
'subnet_id': int_net['subnets'][0]
}
try:
self.neutron_client.remove_interface_router(self.ext_router['id'],
body)
except NeutronException:
# May fail with neutronclient.common.exceptions.Conflict
# if there are floating IP in use - just ignore
LOG.warning('Router interface may have floating IP in use: not deleted')
except TypeError:
# Externel router is not existed, so let's just continue
pass
# Lookup network given network name
def lookup_network(self, network_name):
networks = self.neutron_client.list_networks(name=network_name)
return networks['networks'][0]
# Create a router and up-date external gateway on router
# to external network
def create_router(self, router_name, net_id):
body = {
"router": {
"name": router_name,
"admin_state_up": True,
"external_gateway_info": {
"network_id": net_id
}
}
}
router = self.neutron_client.create_router(body)
return router['router']
# Show a router based on name
def show_router(self, router_name):
router = self.neutron_client.show_router(router_name)
return router
# Update a router given router and network id
def update_router(self, router_id, net_id):
body = {
"router": {
"name": "pns-router",
"external_gateway_info": {
"network_id": net_id
}
}
}
router = self.neutron_client.update_router(router_id, body)
return router['router']
# Create a port
def create_port(self, net_id, sec_group_list, vnic_type):
body = {
"port": {
"network_id": net_id,
"security_groups": sec_group_list
}
}
if vnic_type:
body['port']['binding:vnic_type'] = vnic_type
port = self.neutron_client.create_port(body)
if self.config.debug:
LOG.debug('Created port ' + port['port']['id'])
return port['port']
def delete_port(self, port):
LOG.debug('Deleting port ' + port['id'])
for _ in range(1, 5):
try:
self.neutron_client.delete_port(port['id'])
break
except PortInUseClient:
time.sleep(1)
# Create a floating ip on the external network and return it
def create_floating_ip(self):
body = {
"floatingip": {
"floating_network_id": self.ext_net['id']
}
}
fip = self.neutron_client.create_floatingip(body)
return fip
# Delete floating ip given a floating ip ad
def delete_floating_ip(self, floatingip):
LOG.info("Deleting floating ip " + floatingip)
for _ in range(1, 5):
try:
self.neutron_client.delete_floatingip(floatingip)
break
except IpAddressInUseClient:
time.sleep(1)
# Dispose all network resources, call after all VM have been deleted
def dispose(self):
# Delete the internal networks only of we did not reuse an existing
# network
if not self.config.reuse_network_name:
self.__remove_router_interface()
for int_net in self.vm_int_net:
self.delete_net(int_net)
# delete the router only if its name matches the pns router name
if self.ext_router_created:
try:
if self.ext_router['name'] == self.ext_router_name:
self.neutron_client.remove_gateway_router(
self.ext_router['id'])
self.neutron_client.delete_router(self.ext_router['id'])
LOG.info('External router %s deleted.', self.ext_router['name'])
except TypeError:
LOG.info("No external router set")
def _get_l2agent_type(self):
'''
Retrieve the list of agents
return 'Linux bridge agent' or 'Open vSwitch agent' or 'Unknown agent'
'''
agents = self.neutron_client.list_agents(fields='agent_type')['agents']
for agent in agents:
agent_type = agent['agent_type']
if 'Linux bridge' in agent_type or 'Open vSwitch' in agent_type:
return agent_type
return 'Unknown agent'
def _get_internal_iface_dict(self):
'''
return a dictionary which contains the information needed to determine
which pysical interface(s) are holding the internal traffic
For Linux Bridge, the Neutron L2 Agent will automatically put the
configurations from Linux Bridge into Neutron config. So just use
the Neutron API to fetch it.
For OVS, the Neutron L2 Agent is not pushing all information to Neutron
config, so we need a second step look-up which will happen in
sshutils.get_nic_name(). Here we just maintain:
In the case of VLAN:
{ '<HOSTNAME>' : '<The bridge which has the interface for internal traffic>' }
In the case of GRE/VxLAN:
{ '<HOSTNAME>' : '<IP Address of local interface>
'''
agents = self.neutron_client.list_agents()['agents']
dp_net = self.config.os_dataplane_network
internal_iface_dict = {}
for agent in agents:
agent_type = agent['agent_type']
hostname = agent['host']
if 'Linux bridge' in agent_type:
agent_detail = self.neutron_client.show_agent(agent['id'])['agent']
if dp_net in agent_detail['configurations']['interface_mappings']:
ifname = agent_detail['configurations']['interface_mappings'][dp_net]
internal_iface_dict[hostname] = ifname
elif 'Open vSwitch' in agent_type:
network_type = self.vm_int_net[0]['provider:network_type']
agent_detail = self.neutron_client.show_agent(agent['id'])['agent']
if network_type == "vlan":
if dp_net in agent_detail['configurations']['bridge_mappings']:
brname = agent_detail['configurations']['bridge_mappings'][dp_net]
internal_iface_dict[hostname] = brname
elif network_type == "vxlan" or network_type == 'gre':
ipaddr = agent_detail['configurations']['tunneling_ip']
internal_iface_dict[hostname] = ipaddr
return internal_iface_dict
| 2.078125 | 2 |
setup.py | ivanleopoldo/mkepub-fork | 0 | 12768135 | import setuptools
with open('README.rst') as f:
readme = f.read()
setuptools.setup(
name='mkepub-fork',
version='1.2',
description='Simple minimalistic library for creating EPUB3 files',
long_description=readme,
url='https://github.com/ivanleopoldo/mkepub-fork/',
author='anqxyr, ivanleopoldo',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'],
packages=['mkepub'],
package_data={'mkepub': ['templates/*']},
tests_require=['epubcheck', 'pytest', 'pytest-cov', 'python-coveralls'],
install_requires=['jinja2'],
)
| 1.351563 | 1 |
Mxonline/apps/organization/adminx.py | chiefsh/song | 1 | 12768136 | # _*_coding:utf-8_*_
__author__ = 'song'
__date__ = '2017/10/24 23:03'
import xadmin
from .models import CityDict,CourseOrg,Teacher
class CityDictAdmin(object):
list_display = ['name', 'desc', 'add_time']
search_fields = ['name', 'desc', 'add_time']
list_filter = ['name', 'desc', 'add_time']
class CourseOrgAdmin(object):
list_display = ['name', 'desc', 'add_time','click_nums','fav_nums','address','city']
search_fields = ['name', 'desc', 'add_time','click_nums','fav_nums','address','city']
list_filter = ['name', 'desc', 'add_time','click_nums','fav_nums','address','city']
class TeacherAdmin(object):
list_display = ['name', 'add_time','org','work_years','work_company','work_position']
search_fields = ['name', 'add_time','org','work_years','work_company','work_position']
list_filter = ['name', 'add_time','org','work_years','work_company','work_position']
xadmin.site.register(CityDict,CityDictAdmin)
xadmin.site.register(CourseOrg,CourseOrgAdmin)
xadmin.site.register(Teacher,TeacherAdmin) | 1.929688 | 2 |
example-project/cli.py | claudio-walser/srgssr-publication-data-api | 1 | 12768137 | # -*- coding: utf-8 -*-
"""Console script for srgssr_publication_data_api."""
import sys
import json
import click
from dotenv import load_dotenv
from sgqlc.types import Variable, non_null
from srgssr_publication_data_api import PublicationDataApi
@click.command()
@click.option('--size', help='number of items', default=10, type=int)
@click.option('--after', help='id of cursor to continue from')
@click.option('--url', help='url of the API', required=True)
@click.option('--username', help='username for basic auth')
@click.option('--password', help='password for basic auth')
def main(size, after, url, username, password):
"""Console script for srgssr_publication_data_api."""
variables = {'first': size}
if after:
variables['after'] = after
client = PublicationDataApi(url, username, password)
op = client.query_op(first=non_null(int), after=str)
op.faro_items(first=Variable('first'), after=Variable('after'))
print(f"Executing GraphQL Query: {op}")
result=client.run_query(op, variables)
if result:
print(result)
return 0
else:
return 1
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 2.53125 | 3 |
oreo/apps.py | Caiseyann/instagram | 0 | 12768138 | <filename>oreo/apps.py
from django.apps import AppConfig
class OreoConfig(AppConfig):
name = 'oreo'
| 1.367188 | 1 |
arealocations/urls.py | mohammadanarul/join-branches | 0 | 12768139 | <filename>arealocations/urls.py
from django.urls import path
from .views import (
AreaLocationListview,
AreaLocationCreateView
)
app_name = 'arealocations'
urlpatterns = [
path('area-location-list/', AreaLocationListview.as_view(), name='area_location_list'),
path('area-location-create/', AreaLocationCreateView.as_view(), name='create_arealocation_view'),
] | 1.984375 | 2 |
minimize_1d.py | rlowrance/mlpack | 1 | 12768140 | <gh_stars>1-10
'''minimizers for functions of 1 variable
FUNCTIONS
golden_section(fun, low, high, interval_size) -> low*, high*
ARGS
fun(x) -> number
low: number
high: number
interval_size: search stops when high* - low* < interval_size
'''
from minimize_1d_golden_section import golden_section
if False:
# use imports, to avoid pyflakes warning
golden_section()
| 2.296875 | 2 |
twinpy/interfaces/aiida/twinboundary_shear.py | kei0822kei/twinpy | 0 | 12768141 | #!/usr/bin/env python
"""
Aiida interface for twinpy.
"""
import warnings
import numpy as np
from aiida.cmdline.utils.decorators import with_dbenv
from aiida.orm import (load_node,
Node,
QueryBuilder,
)
from aiida.plugins import WorkflowFactory
from aiida_twinpy.common.utils import get_create_node
from twinpy.interfaces.aiida.base import (check_process_class,
_WorkChain)
from twinpy.interfaces.aiida.vasp import (AiidaRelaxWorkChain)
from twinpy.interfaces.aiida.twinboundary \
import AiidaTwinBoudnaryRelaxWorkChain
@with_dbenv()
class AiidaTwinBoudnaryShearWorkChain(_WorkChain):
"""
TwinBoundaryShear work chain class.
"""
def __init__(
self,
node:Node,
):
"""
Args:
node: TwinBoundaryShearWorkChain node.
"""
process_class = 'TwinBoundaryShearWorkChain'
check_process_class(node, process_class)
super().__init__(node=node)
self._shear_strain_ratios = None
self._set_shear_strain_ratios()
self._shear_aiida_relaxes = None
self._set_shear_aiida_relaxes()
self._structure_pks = None
self._set_structure_pks()
self._aiida_twinboundary_relax = None
self._set_aiida_twinboundary_relax()
self._additional_relax_pks = None
self._set_additional_relax_pks()
self._twinboundary_analyzer = None
def _set_shear_strain_ratios(self):
"""
Set shear strain ratios.
"""
conf = self._node.inputs.twinboundary_shear_conf.get_dict()
self._shear_strain_ratios = conf['shear_strain_ratios']
@property
def shear_strain_ratios(self):
"""
Shear strain ratios.
"""
return self._shear_strain_ratios
def _set_structure_pks(self):
"""
Set structure pks.
"""
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}}, tag='wf')
qb.append(
Node,
filters={'label': {'==': 'get_twinboundary_shear_structure'}},
project=['id'],
with_incoming='wf')
cf_pks = [ q[0] for q in qb.all() ]
shear_ratios = [ load_node(q[0]).inputs.shear_strain_ratio.value for q in qb.all() ]
orders = list(np.argsort(shear_ratios))
orig_pks = []
input_pks = []
for ix in orders:
cf = load_node(cf_pks[ix])
orig_pks.append(cf.outputs.twinboundary_shear_structure_orig.pk)
input_pks.append(cf.outputs.twinboundary_shear_structure.pk)
rlx_pks = []
for aiida_rlx, i_struct_pk in zip(self._shear_aiida_relaxes, input_pks):
pks = aiida_rlx.get_pks()
assert pks['initial_structure_pk'] == i_struct_pk, \
"Input structure does not match."
rlx_pks.append(pks['final_structure_pk'])
self._structure_pks = {
'original_structures': orig_pks,
'input_structures': input_pks,
'relax_structures': rlx_pks,
}
@property
def structure_pks(self):
"""
Structure pks.
"""
return self._structure_pks
def _set_aiida_twinboundary_relax(self):
"""
Set twinboundary relax pk.
"""
tb_rlx_wf = WorkflowFactory('twinpy.twinboundary_relax')
tb_rlx_struct_pk = self._node.inputs.twinboundary_relax_structure.pk
tb_rlx = get_create_node(tb_rlx_struct_pk, tb_rlx_wf)
self._aiida_twinboundary_relax \
= AiidaTwinBoudnaryRelaxWorkChain(tb_rlx)
def _set_shear_aiida_relaxes(self):
"""
Set list of AiidaRelaxWorkChain objects.
"""
rlx_wf = WorkflowFactory('vasp.relax')
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}}, tag='wf')
qb.append(rlx_wf, with_incoming='wf', project=['id', 'label'])
qb_all = qb.all()
qb_all.sort(key=lambda qb_all: qb_all[1])
rlx_pks = [ q[0] for q in qb_all ]
self._shear_aiida_relaxes = [ AiidaRelaxWorkChain(load_node(pk))
for pk in rlx_pks ]
def _set_additional_relax_pks(self):
"""
Set additional relax pks.
"""
addi_struct_pks = [ self._node.inputs.__getattr__(key).pk
for key in dir(self._node.inputs)
if 'additional_relax__structure' in key ]
self._additional_relax_pks = \
[ get_create_node(pk, rlx_wf).pk for pk in addi_struct_pks ]
@property
def shear_aiida_relaxes(self):
"""
List of AiidaRelaxWorkChain class objects.
"""
return self._shear_aiida_relaxes
def set_twinboundary_analyzer(self,
twinboundary_phonon_pk:int=None,
hexagonal_relax_pk:int=None,
hexagonal_phonon_pk:int=None,
):
"""
Set twinboundary analyzer.
Args:
twinboudnary_phonon_pk: Twinboundary phonon calculation pk.
hexagonal_relax_pk: Hexagonal relax calculation pk.
hexagonal_phonon_pk: Hexagonal phonon calculation pk.
"""
tb_rlx_pk = self._aiida_twinboundary_relax.pk
addi_rlx_pks = self._additional_relax_pks
aiida_tb = AiidaTwinBoudnaryRelaxWorkChain(load_node(tb_rlx_pk))
self._twinboundary_analyzer = aiida_tb.get_twinboundary_analyzer(
twinboundary_phonon_pk=twinboundary_phonon_pk,
additional_relax_pks=addi_rlx_pks,
hexagonal_relax_pk=hexagonal_relax_pk,
hexagonal_phonon_pk=hexagonal_phonon_pk,
)
@property
def twinboundary_analyzer(self):
"""
TwinBoundaryAnalyzer class object.
"""
return self._twinboundary_analyzer
def get_twinboundary_shear_analyzer(self,
shear_phonon_pks:list,
):
"""
Get twinboundary shear analyzer.
Args:
shaer_phonon_pks: List of phonon pks.
Raises:
RuntimeError: Property twinboundary_analyzer is not set.
Note:
Length of phono_pks list must be the same as that of shear strain
ratios. If there is no phonon result, set please set None.
"""
if self._twinboundary_analyzer is None:
raise RuntimeError("Please set twinboundary_analyzer before.")
assert len(self._shear_strain_ratios) == len(shear_phonon_pks), \
"Length of shear_phonon_pks does not match with shear_strain_ratios."
tb_anal = self._twinboundary_analyzer
shr_rlx_pks = \
[ aiida_rlx.pk for aiida_rlx in self._shear_aiida_relaxes ]
ratios = self._shear_strain_ratios
if len(shr_rlx_pks) != len(ratios):
warnings.warn("Some RelaxWorkChain has not finished normally. "
+"They are ignored.")
tb_shear_analyzer = \
tb_anal.get_twinboundary_shear_analyzer_from_relax_pks(
shear_relax_pks=shr_rlx_pks,
shear_strain_ratios=ratios[:len(shr_rlx_pks)],
shear_phonon_pks=shear_phonon_pks[:len(shr_rlx_pks)],
)
return tb_shear_analyzer
def get_pks(self):
"""
Get workflow pks.
Returns:
dict: Workflow pks.
"""
wf_pks = {
'twinboundary_relax_pk': self._aiida_twinboundary_relax.pk,
'additional_relax_pks': self._additional_relax_pks,
'shear_aiida_relax_pks': [ shr_rlx.pk for shr_rlx
in self._shear_aiida_relaxes ],
}
return wf_pks
| 2.03125 | 2 |
little_helpers/reorg_ebooks.py | apefind/python | 0 | 12768142 | <gh_stars>0
#!/usr/bin/env python
import argparse
import pathlib
from apefind.util import script
from apefind.util.script import _ok
log = script.get_logger()
def parse_args():
parser = script.ArgumentParser()
parser.add_argument("path", nargs=argparse.REMAINDER)
return parser.parse_args()
def fix_name(name):
for _ in range(3):
name = name.replace(" ", " ")
return name.strip()
def get_author_and_book(p):
if " - " not in p.name:
return p.parent.name, p.name
S = p.name.split("-")
author = S[0]
book = "-".join(S[1:])
return fix_name(author), fix_name(book)
def get_full_book_title(author, book):
return f"{author} - {book}"
def reorganize_author(root):
log.info(f" {root} {_ok}")
for p in sorted(root.iterdir()):
if not p.is_file():
continue
author, book = get_author_and_book(p)
full_title = get_full_book_title(author, book)
if p.name == full_title:
continue
log.info(f" {book} → {full_title} {_ok}")
p.rename(root / full_title)
def reorganize_genre(root):
for p in sorted(root.iterdir()):
if p.is_dir():
reorganize_author(p)
@script.run()
def run_script(args):
log.info(f" * reorganizing books")
for p in args.path:
reorganize_genre(pathlib.Path(p))
if __name__ == "__main__":
run_script(parse_args())
| 2.828125 | 3 |
server/Original/test/__init__.py | izhuxin/AR.Drone | 0 | 12768143 | <filename>server/Original/test/__init__.py
__author__ = 'mty'
| 1.046875 | 1 |
h02_client.py | faberto/gps2nextcloud | 2 | 12768144 | <gh_stars>1-10
import socket
HOST = "127.0.0.1" # The server's hostname or IP address
PORT = 5012 # The port used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
# s.sendall(b"*HQ,865205030330012,V1,145452,A,2240.55181,N,11358.32389,E,0.00,0,100815,FFFFFBFF#")
# s.sendall(b"*HQ,865205030330012,V2,150421,A,2240.55841,N,11358.33462,E,2.06,0,100815,FFFFFBFF#")
# s.sendall(bytes.fromhex("2410307310010503162209022212874500113466574c014028fffffbffff0001"))
# s.sendall(bytes.fromhex("2a48512c343231303230353832322c56312c3231323034342c562c353034392e393134392c4e2c30303434322e303831362c452c3030302e30302c3030302c3135303731392c46464537464246462c3230362c31302c302c302c36232a48512c343231303230353832322c56342c56312c323031393037313532313234303023"))
s.sendall(bytes.fromhex("2442102058220936310409195045054006004201800e000000ffe7fbffff001909000003af00ce0a000000005c"))
# wrong Y format s.sendall(bytes.fromhex("5906410400001533281008152240563200113583509e003000e7e7fbffff0009"))
| 2.328125 | 2 |
16/infection_statistics.py | OhiyoX/LearnMatplotlibOnTheWay | 0 | 12768145 | <reponame>OhiyoX/LearnMatplotlibOnTheWay
import csv
import matplotlib.pyplot as plt
import datetime
import CaixinData.caixin_pneumonia_data as cpd
# 更新数据
option = input("更新数据?y/n")
if option == 'y':
cpd.run()
filename = 'CaixinData/data2.csv'
with open(filename, encoding='utf-8') as f:
# 计算多少行
lines = len(f.readlines())
# 返回第一行,因为readline()执行完后在文件末尾
f.seek(0)
reader = csv.reader(f)
header_row = next(reader)
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['font.serif'] = ['KaiTi']
"""
enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,
同时列出数据和数据下标,一般用在 for 循环当中。
"""
# 获得全国数据
total = []
hubei = []
for row in reader:
try:
total.append(int(row[1]))
hubei.append(int(row[2]))
except ValueError:
print('data missing')
# 生成一个日期轴
day_list = ['2019-12-31', '2019-1-11']
begin = datetime.date(2020, 1, 16)
for i in range(lines-3):
day = begin + datetime.timedelta(days=i)
day_list.append(str(day))
# 根据数据绘制图形
fig = plt.figure(dpi=200, figsize=(10,6))
plt.plot(day_list, total, c='black', linewidth=3)
plt.plot(day_list, hubei, c='gray', linewidth=3)
plt.fill_between(day_list, total, hubei, facecolor='blue', alpha=0.1)
# 设置图形格式
plt.title("全国和湖北确诊人数", fontsize=24)
plt.ylabel("Infection Toll", fontsize=16)
plt.xticks(rotation=90) # 90为旋转的角度
plt.savefig("result.png", dpi=200, bbox_inches='tight')
plt.show()
| 2.984375 | 3 |
py/gzip_dicts.py | zhongxinghong/Java-Jieba | 6 | 12768146 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: rander_model.py
import os
from utils import txt_load, gzip_dump
OUTPUT_DIR = "../src/main/resources/"
fileList = [
"dict.big",
"dict.std",
"dict.small",
"idf.std",
]
for file in fileList:
txtFile = os.path.join(OUTPUT_DIR, file+".txt")
gzFile = os.path.join(OUTPUT_DIR, file+".gz")
gzip_dump(txt_load(txtFile), gzFile)
| 2.609375 | 3 |
LowLevelApi/NGPF/Python/1Main/bgpRestNgpfPy3.py | murumuthu/IxNetwork | 0 | 12768147 | #!/usr/local/python3.4.6/bin/python3.4
# DISCLAIMER
#
# This is a sample script for demo and reference purpose only.
# It is subject to change for content updates without warning.
#
# DESCRIPTION
# This sample script uses two back-to-back Ixia ports.
# Supports both IxNetwork API server and Linux API server connection.
#
# - Configure two IPv4 Topology Groups
# - Configure BGP and network advertising routes.
# - Start protocols
# - Verify protocol session
# - Create Traffic Item
# - Apply Traffic
# - Start Traffic
# - Get stats
#
import requests, json, sys, os, time, traceback
import IxN_RestApiPy3
# Which REST API server do you want to connect to: linux or windows
connectToApiServer = 'linux'
# Settings for Windows
if connectToApiServer == 'windows':
ixNetRestServerIp = '192.168.70.127'
ixNetRestServerPort = '11009'
# Setitngs for Linux API Server
if connectToApiServer == 'linux':
linuxServerIp = '192.168.70.137'
username = 'admin'
password = '<PASSWORD>'
deleteLinuxSessionWhenDone = True
# Set to True if the Linux API Server is newly installed.
# We need to set the license server settings once.
isLinuxApiServerNewlyInstalled = False
licenseServerIp = '192.168.70.127'
licenseMode = 'subscription' ;# IxVM uses subscription. Physical chassis uses perpetual.
licenseTier = 'tier3'
linuxServerUrl = 'https://%s' % linuxServerIp
ixChassisIp = '192.168.70.10'
portList = [[ixChassisIp, '1', '1'],
[ixChassisIp, '2', '1']]
# For connecting to Linux API server that supports SSL. Provide your SSL certificate here.
verifySslCert = False
try:
# If connecting to Linux API server
if connectToApiServer == 'linux':
# This will disable all the SSL warnings on your terminal.
requests.packages.urllib3.disable_warnings()
returnList = IxN_RestApiPy3.connectToLinuxApiServer(linuxServerIp, username=username, password=password)
sessionUrl, sessionId, apiKey = returnList
if isLinuxApiServerNewlyInstalled:
IxN_RestApiPy3.linuxServerConfigGlobalLicenseServer(linuxServerIp, licenseServerIp,
licenseMode, licenseTier,
apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.linuxServerConfigNewSessionLicense(sessionUrl, linuxServerIp, apiKey, verifySslCert=verifySslCert)
# If connecting to Windows API server
if connectToApiServer == 'windows':
sessionUrl = IxN_RestApiPy3.getSessionUrl(ixNetRestServerIp, ixNetRestServerPort)
apiKey=None
sessionId = sessionUrl.split('/ixnetwork')[0]
IxN_RestApiPy3.newBlankConfig(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.connectIxChassis(sessionUrl,chassisIp=ixChassisIp, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.createVports(sessionUrl, portList, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.assignPorts(sessionUrl, portList, apiKey=apiKey, verifySslCert=verifySslCert)
topologyObj1 = IxN_RestApiPy3.createTopologyNgpf(sessionUrl,
portList=[portList[0]],
topologyName='MyTopo1',
apiKey=apiKey, verifySslCert=verifySslCert)
deviceGroupObj1 = IxN_RestApiPy3.createDeviceGroupNgpf(topologyObj1,
multiplier=1,
deviceGroupName='myDG1',
apiKey=apiKey, verifySslCert=verifySslCert)
topologyObj2 = IxN_RestApiPy3.createTopologyNgpf(sessionUrl,
portList=[portList[1]],
topologyName='MyTopo2',
apiKey=apiKey, verifySslCert=verifySslCert)
deviceGroupObj2 = IxN_RestApiPy3.createDeviceGroupNgpf(topologyObj2,
multiplier=1,
deviceGroupName='myDG2',
apiKey=apiKey, verifySslCert=verifySslCert)
ethernetObj1 = IxN_RestApiPy3.createEthernetNgpf(deviceGroupObj1,
ethernetName='MyEth1',
macAddress={'start': '00:01:01:00:00:01',
'direction': 'increment',
'step': '00:00:00:00:00:01'},
macAddressPortStep='disabled',
vlanId={'start': 103,
'direction': 'increment',
'step':0},
apiKey=apiKey, verifySslCert=verifySslCert)
ethernetObj2 = IxN_RestApiPy3.createEthernetNgpf(deviceGroupObj2,
ethernetName='MyEth2',
macAddress={'start': '00:01:02:00:00:01',
'direction': 'increment',
'step': '00:00:00:00:00:01'},
macAddressPortStep='disabled',
vlanId={'start': 103,
'direction': 'increment',
'step':0},
apiKey=apiKey, verifySslCert=verifySslCert)
ipv4Obj1 = IxN_RestApiPy3.createIpv4Ngpf(ethernetObj1,
ipv4Address={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.1'},
ipv4AddressPortStep='disabled',
gateway={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.0'},
gatewayPortStep='disabled',
prefix=24,
resolveGateway=True, apiKey=apiKey, verifySslCert=verifySslCert)
ipv4Obj2 = IxN_RestApiPy3.createIpv4Ngpf(ethernetObj2,
ipv4Address={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.1'},
ipv4AddressPortStep='disabled',
gateway={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.0'},
gatewayPortStep='disabled',
prefix=24,
resolveGateway=True, apiKey=apiKey, verifySslCert=verifySslCert)
# flap = true or false.
# If there is only one host IP interface, then single value = True or False.
# If there are multiple host IP interfaces, then single value = a list ['true', 'false']
# Provide a list of total true or false according to the total amount of host IP interfaces.
bgpObj1 = IxN_RestApiPy3.configBgp(ipv4Obj1,
apiKey=apiKey,
verifySslCert=verifySslCert,
name = 'bgp_1',
enableBgp = True,
holdTimer = 90,
dutIp={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.0'},
enableGracefulRestart = False,
restartTime = 45,
type = 'internal',
enableBgpIdSameasRouterId = True,
staleTime = 0,
flap = False)
bgpObj2 = IxN_RestApiPy3.configBgp(ipv4Obj2,
apiKey=apiKey,
verifySslCert=verifySslCert,
name = 'bgp_2',
enableBgp = True,
holdTimer = 90,
dutIp={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.0'},
enableGracefulRestart = False,
restartTime = 45,
type = 'internal',
enableBgpIdSameasRouterId = True,
staleTime = 0,
flap = False)
networkGroupObj1 = IxN_RestApiPy3.configNetworkGroup(deviceGroupObj1,
name='networkGroup1',
multiplier = 100,
networkAddress = {'start': '172.16.17.32',
'step': '0.0.0.1',
'direction': 'increment'},
prefixLength = 24,
apiKey=apiKey, verifySslCert=verifySslCert)
networkGroupObj2 = IxN_RestApiPy3.configNetworkGroup(deviceGroupObj2,
name='networkGroup2',
multiplier = 100,
networkAddress = {'start': '172.16.17.32',
'step': '0.0.0.1',
'direction': 'increment'},
prefixLength = 24,
apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.startAllProtocols(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.verifyProtocolSessionsNgpf([ipv4Obj1, ipv4Obj2, bgpObj1, bgpObj2],
apiKey=apiKey, verifySslCert=verifySslCert)
# For all parameter options, please go to the API configTrafficItem
# mode = create or modify
trafficStatus = IxN_RestApiPy3.configTrafficItem(sessionUrl, apiKey=apiKey,
verifySslCert=verifySslCert,
mode='create',
trafficItem = {
'name':'Topo1 to Topo2',
'trafficType':'ipv4',
'biDirectional':True,
'srcDestMesh':'one-to-one',
'routeMesh':'oneToOne',
'allowSelfDestined':False,
'trackBy': ['flowGroup0', 'vlanVlanId0']
},
endpoints = [{'name':'Flow-Group-1',
'sources': [topologyObj1],
'destinations': [topologyObj2]}],
configElements = [{'transmissionType': 'fixedFrameCount',
'frameCount': 50000,
'frameRate': 88,
'frameRateType': 'percentLineRate',
'frameSize': 128}])
trafficItemObj = trafficStatus[0]
endpointObjList = trafficStatus[1]
configElementObjList = trafficStatus[2]
IxN_RestApiPy3.regenerateTrafficItems(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.applyTraffic(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.startTraffic(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.checkTrafficState(sessionUrl,
expectedState=['started', 'startedWaitingForStats'],
apiKey=apiKey, verifySslCert=verifySslCert
)
#If you're sending a large number of packets, you need to set timeout to a larger number.
IxN_RestApiPy3.checkTrafficState(sessionUrl,
expectedState=['stopped'],
timeout=45, apiKey=apiKey, verifySslCert=verifySslCert)
stats = IxN_RestApiPy3.getStats(sessionUrl, viewName='Flow Statistics', apiKey=apiKey)
print('\n{txPort:10} {txFrames:15} {rxPort:10} {rxFrames:15} {frameLoss:10}'.format(
txPort='txPort', txFrames='txFrames', rxPort='rxPort', rxFrames='rxFrames', frameLoss='frameLoss'))
print('-'*90)
for flowGroup,values in stats.items():
txPort = values['Tx Port']
rxPort = values['Rx Port']
txFrames = values['Tx Frames']
rxFrames = values['Rx Frames']
frameLoss = values['Frames Delta']
print('{txPort:10} {txFrames:15} {rxPort:10} {rxFrames:15} {frameLoss:10} '.format(
txPort=txPort, txFrames=txFrames, rxPort=rxPort, rxFrames=rxFrames, frameLoss=frameLoss))
#if txFrames != rxFrames:
# print('\nFrame loss error:', int(txFrames) - int(rxFrames))
if connectToApiServer == 'linux' and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
except IxN_RestApiPy3.IxNetRestUtilityException as errMsg:
print('\nTest failed! {0}\n'.format(errMsg))
if connectToApiServer == 'linux' and 'sessionId' in locals() and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
except Exception as errMsg:
print('\nTest failed! {0}\n'.format(traceback.print_exc()))
if connectToApiServer == 'linux' and 'sessionId' in locals() and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
except KeyboardInterrupt:
print('\nAborting ...')
if connectToApiServer == 'linux' and 'sessionId' in locals() and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
| 2.078125 | 2 |
cgi-bin/hello2.py | wyolum/timezone_db | 0 | 12768148 | <filename>cgi-bin/hello2.py
#!/usr/bin/python
from __future__ import print_function
import cgi
import os
import urllib
import timezone_db
import cgitb
cgitb.enable()
try:
ip = cgi.escape(os.environ["REMOTE_ADDR"])
f = cgi.FieldStorage()
fields = {}
for k in f:
fields[k] = f[k].value
if 'macaddress' in fields:
macaddress = fields['macaddress']
else:
macaddress = 'NA'
except:
ip = "1.2.3.4"
localip = "192.168.1.123"
macaddress = 'made:up:mac:address'
fields = {'localip':localip, 'macaddress':macaddress}
# tz = timezone_db.select(ip, macaddress)
tz = 'TIMEZONE'
body = '''Hello World, how are things at %s?
tz: %s
macaddress: %s
''' % (ip, tz, macaddress)
print ("Content-type: text/html\n\n")
print("<HEAD>HELLO</HEAD><BODY>%s</BODY>" % body)
| 3.203125 | 3 |
biocontainers/pipelines.py | chakrabandla/biocontainers-backend | 2 | 12768149 | <gh_stars>1-10
import configparser
import logging
import urllib
import click
from biocontainers.biomongo.helpers import InsertContainers
from biocontainers.dockerhub.models import DockerHubReader
from biocontainers.github.models import GitHubConfiguration, GitHubDockerReader, GitHubMulledReader, \
LocalGitReader
from biocontainers.quayio.models import QuayIOReader
from biocontainers.singularity.models import SingularityReader
from ruamel.yaml import YAML
logger = logging.getLogger('biocontainers.pipelines')
def print_help(ctx, param, value):
if value is False:
return
click.echo(ctx.get_help())
ctx.exit()
def get_config(file):
"""
This method read the default configuration file configuration.ini in the same path of the pipeline execution
:return:
"""
config = configparser.ConfigParser()
config.read(file)
return config
def import_quayio_containers(config, config_profile):
"""
Import quayio containers into the registry database
:param config_profile:
:param config: Parameters for quayio
:return:
"""
logger.info("Starting importing Conda packages")
reader = QuayIOReader(config[config_profile]['QUAYIO_CONTAINER_LIST'],
config[config_profile]['QUAYIO_CONTAINER_DETAILS'], config[config_profile]['NAMESPACE'])
quayio_containers = reader.get_containers()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.insert_quayio_containers(quayio_containers)
def import_dockerhub_containers(config, config_profile):
"""
Import dockerhub containers into the registry database
:param config_profile:
:param config:
:return:
"""
logger.info("Starting importing DockerHub packages")
reader = DockerHubReader(config[config_profile]['DOCKER_HUB'], config[config_profile]['DOCKER_HUB_TAG'],
config[config_profile]['NAMESPACE'])
dockerhub_containers = reader.get_containers()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.insert_dockerhub_containers(dockerhub_containers)
def import_singularity_containers(config, config_profile):
"""
Import singularity containers into the registry database
:param config_profile:
:param config:
:return:
"""
logger.info("Starting importing singularity packages")
reader = SingularityReader(config[config_profile]['SINGULARITY_CONTAINERS'])
containers = reader.get_containers()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.insert_singularity_containers(containers)
def annotate_quayio_recipes(config, config_profile):
github_url = config[config_profile]['GITHUB_GIT_URL']
github_local = config[config_profile]['GITHUB_LOCAL_REPO']
github_reader = LocalGitReader(github_url, github_local)
github_reader.clone_url()
conda_recipes = github_reader.read_conda_recipes()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.annotate_quayio_containers(conda_recipes)
def annotate_docker_recipes(config, config_profile):
github_conf = GitHubConfiguration(config[config_profile]['GITHUB_API_DOCKER'],
config[config_profile]['GITHUG_DOCKER_RECIPES_READABLE'])
github_reader = GitHubDockerReader(github_conf)
docker_recipes = github_reader.read_docker_recipes()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.annotate_docker_containers(docker_recipes)
def annotate_conda_recipes(config, config_profile):
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.annotate_conda_recipes()
def annotate_workflows_func(config, config_profile):
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.annotate_workflows(config, config_profile)
def annotate_multi_package_containers_func(config, config_profile):
github_conf = GitHubConfiguration(config[config_profile]['GITHUB_API_MULLED_FILES'],
config[config_profile]['GITHUB_MULLED_FILE_CONTENTS'])
reader = GitHubMulledReader(github_conf)
mulled_entries = reader.get_mulled_entries()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.update_multi_package_containers(mulled_entries)
def annotate_biotools_recipes(config, config_profile):
github_url = config[config_profile]['GITHUB_TOOLS_URL']
github_local = config[config_profile]['GITHUB_TOOLS_LOCAL_REPO']
github_reader = LocalGitReader(github_url, github_local)
github_reader.clone_url()
tools_recipes = github_reader.read_biotools_recipes()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.annotate_biotools_metadata(tools_recipes)
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.compute_similarity()
def annotate_biotools_recipes_github(config, config_profile):
github_url = config[config_profile]['GITHUB_BIOTOOLS_REPO']
github_local = config[config_profile]['GITHUB_BIOTOOLS_LOCAL_REPO']
github_reader = LocalGitReader(github_url, github_local)
github_reader.clone_url()
tools_recipes = github_reader.read_biotools_github_recipes()
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.annotate_biotools_metadata(tools_recipes)
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.compute_similarity()
def report_missing_tools(config, config_profile):
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
missing_info = mongo_helper.get_missing_info_tools()
tools = {}
missing_info.sort(key=lambda x: x.total_pulls, reverse=True)
for tool in missing_info:
missing_tool = {}
missing_tool['name'] = tool.name
missing_tool['description'] = tool.description
missing_tool['license'] = tool.license
missing_tool['home_url'] = tool.home_url
missing_tool['total_pulls'] = tool.total_pulls
tools[tool.id] = missing_tool
yaml = YAML()
yaml.indent(mapping=4, sequence=6, offset=3)
with open('../missing_annotations.yaml', 'w') as outfile:
yaml.dump(tools, outfile)
def get_database_uri(param):
uri = 'mongodb://' + param['MONGODB_USER'] + ":" + param['MONGODB_PASS'] + '@' + param['MONGODB_HOST'] + ':' + \
param['MONGO_PORT'] + '/' + param['BIOCONT_DB_NAME'] + '?ssl=false&authSource=' + param['MONGODB_ADMIN_DB']
return uri
def annotate_from_file_containers(config, config_profile):
yaml = YAML()
url = config[config_profile]['ANNOTATED_FILE']
outfile = urllib.request.urlopen(url)
file_annotations = yaml.load(outfile)
mongo_helper = InsertContainers(config[config_profile]['DATABASE_URI'])
mongo_helper.update_from_file(file_annotations)
mongo_helper.compute_facets()
mongo_helper.compute_similarity()
@click.command()
@click.option('--import-quayio', '-q', help='Import Quay.io Recipes', is_flag=True)
@click.option('--import-docker', '-k', help="Import Docker Recipes", is_flag=True)
@click.option('--import-singularity', '-s', help="Import Singularity Recipes", is_flag=True)
@click.option('--annotate-docker', '-ad', help='Annotate Docker Recipes', is_flag=True)
@click.option('--annotate-quayio', '-aq', help='Annotate Quay.io Recipes', is_flag=True)
@click.option('--annotate-conda', '-ac', help='Annotate Conda packages', is_flag=True)
@click.option('--annotate-biotools', '-ab', help='Annotate BioTools metadata from Github', is_flag = True)
@click.option('--annotate-workflows', '-aw', help='Annotate Workflows', is_flag=True)
@click.option('--annotate-identifiers', '-ai', help='Annotate external identifiers (e.g biotools)', is_flag=True)
@click.option('--annotate-multi-package-containers', '-am', help='Annotate multi package containers', is_flag=True)
@click.option('--report-missing-info', '-ri', help = "This pipeline will report the containers without metadata", is_flag =True)
@click.option('--annotate-from-file', '-af', help = 'Annotate metadata from internal file', is_flag = True)
@click.option('--config-file', '-c', type=click.Path(), default='configuration.ini')
@click.option('--config-profile', '-a', help="This option allow to select a config profile", default='PRODUCTION')
@click.option('-db', '--db-name', help="Name of the database", envvar='BIOCONT_DB_NAME')
@click.option('-h', '--db-host', help='Host the database', envvar='MONGODB_HOST')
@click.option('-a', '--db-auth-database', help='Authentication database in Mongo', envvar='MONGODB_ADMIN_DB')
@click.option('-u', '--db-user', help='Database root user', envvar='MONGODB_USER', default='admin')
@click.option('-pw', '--db-password', help='Database password', envvar='MONGODB_PASS')
@click.option('-p', '--db-port', help='Database port', envvar='MONGO_PORT', default='27017')
@click.pass_context
def main(ctx, import_quayio, import_docker, import_singularity, annotate_docker, annotate_quayio,
annotate_conda, annotate_biotools, annotate_workflows, annotate_identifiers,
annotate_multi_package_containers, report_missing_info, annotate_from_file,
config_file, config_profile, db_name,
db_host, db_auth_database, db_user,
db_password, db_port):
"""
The annotations from biotools would be depracted for now. Option '--annotate-biotools', '-ab'.
"""
config = get_config(config_file)
if config[config_profile]['VERBOSE'] == "True":
for key in config[config_profile]:
print(key + "=" + config[config_profile][key])
if (db_name is None) or (db_host is None) or (db_user is None):
print_help(ctx, None, value=True)
else:
config[config_profile]['BIOCONT_DB_NAME'] = db_name
config[config_profile]['MONGODB_HOST'] = db_host
config[config_profile]['MONGO_PORT'] = db_port
config[config_profile]['MONGODB_USER'] = db_user
config[config_profile]['MONGODB_ADMIN_DB'] = db_auth_database
config[config_profile]['MONGODB_PASS'] = <PASSWORD>
config[config_profile]['DATABASE_URI'] = get_database_uri(config[config_profile])
if import_quayio is not False:
import_quayio_containers(config, config_profile)
if import_docker is not False:
import_dockerhub_containers(config, config_profile)
if import_singularity is not False:
import_singularity_containers(config, config_profile)
if annotate_docker is not False:
annotate_docker_recipes(config, config_profile)
if annotate_quayio is not False:
annotate_quayio_recipes(config, config_profile)
if annotate_conda is not False:
annotate_conda_recipes(config, config_profile)
if annotate_workflows is not False:
annotate_workflows_func(config, config_profile)
if annotate_identifiers is not False:
annotate_biotools_recipes(config, config_profile)
if annotate_multi_package_containers is not False:
annotate_multi_package_containers_func(config, config_profile)
if annotate_biotools is not False:
annotate_biotools_recipes_github(config, config_profile)
if report_missing_info is not False:
report_missing_tools(config, config_profile)
if annotate_from_file:
annotate_from_file_containers(config, config_profile)
if __name__ == "__main__":
main()
| 2.15625 | 2 |
experiments/draw_and_classify.py | trejsu/shaper | 0 | 12768150 | <reponame>trejsu/shaper
import argparse
import logging
import os
import time
from pathlib import Path
import pandas as pd
ARGS = None
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
DRAW_COMMANDS_PATH = os.path.join(str(Path.home()), 'draw-commands.txt')
CLASSIFY_COMMANDS_PATH = os.path.join(str(Path.home()), 'classify-commands.txt')
CSV_HEADER = 'n,img,true_class,top1_class,top1_prob,top2_class,top2_prob,top3_class,top3_prob,top4_class,' \
'top4_prob,top5_class,top5_prob,top1,top2,top3,top4,top5\n'
DRAW_CMD_TEMPLATE = 'python {} --input {} --output {}-%d.jpg --n {} --resize {} --output-size {}\n'
DARKNET_CMD_TEMPLATE = 'printf \'{}\' | ./darknet classifier predict cfg/imagenet1k.data cfg/darknet19.cfg ' \
'darknet19.weights | sed \'s/Enter Image Path: //\' > {}'
DARKNET_OUTPUT_PATH = os.path.join(str(Path.home()), 'darknet-output-%d.txt')
TIME_PATH = os.path.join(str(Path.home()), 'imagenet-start-time')
def main():
remove_old_data()
save_start_time()
num_images = draw()
save_classification_start_time()
drawings_chunks = classify_images(
img_dir=ARGS.drawings_dir,
num=num_images * ARGS.n,
output_file=DARKNET_OUTPUT_PATH
)
write_classification_results_to_csv(drawings_chunks)
log.info(f'Results saved under {ARGS.result_csv_path}')
def classify_images(img_dir, num, output_file):
images = os.listdir(img_dir)
num_images = len(images)
log.info(f'Found {num_images} images, should be {num}')
chunk_len = 100 if num_images > 100 * ARGS.cpu else num_images // ARGS.cpu + 1
log.info(f'Chunk length = {chunk_len}.')
images_chunks = [images[i:i + chunk_len] for i in range(0, num_images, chunk_len)]
log.info(f'Divided images to classify into {len(images_chunks)} parts.')
classify(chunks=images_chunks, images_dir=img_dir, output_file=output_file)
return images_chunks
def draw():
num_images = prepare_commands_for_drawing()
parallel_cmd = f'parallel -j {ARGS.cpu} < {DRAW_COMMANDS_PATH}'
log.info('Starting drawing...')
os.system(parallel_cmd)
log.info('Drawing completed')
return num_images
def write_classification_results_to_csv(chunks):
if os.path.exists(ARGS.result_csv_path):
old_results = ARGS.result_csv_path + '.old'
log.warning(f'Found old csv with results, renaming to {old_results}')
os.rename(ARGS.result_csv_path, old_results)
with open(ARGS.result_csv_path, "a") as csv:
csv.write(CSV_HEADER)
write_results(csv, chunks)
def write_results(csv, chunks):
top1_cls, top1_prob, top2_cls, top2_prob, top3_cls, top3_prob, top4_cls, top4_prob, top5_cls, top5_prob = \
extract_results(output_file=DARKNET_OUTPUT_PATH, num_chunks=len(chunks))
df = pd.read_csv(ARGS.img_cls_mapping)
log.info(f'Loaded img to class mapping data frame with {len(df.index)} rows.')
img_to_cls = dict(zip(df['img'], df['class']))
for i, chunk in enumerate(chunks):
for j in range(len(chunk)):
drawing = chunk[j]
name = drawing.split('-')[0]
n = drawing.split('-')[1].split('.')[0]
true_cls, top1, top2, top3, top4, top5 = score_predictions(name, [top1_cls[i][j], top2_cls[i][j],
top3_cls[i][j], top4_cls[i][j],
top5_cls[i][j]], img_to_cls)
csv_line = ','.join([n, name, true_cls, top1_cls[i][j], top1_prob[i][j], top2_cls[i][j], top2_prob[i][j],
top3_cls[i][j], top3_prob[i][j], top4_cls[i][j], top4_prob[i][j], top5_cls[i][j],
top5_prob[i][j], top1, top2, top3, top4, top5]) + '\n'
csv.write(csv_line)
def score_predictions(name, top_cls, img_to_cls):
true_cls = img_to_cls[name]
return true_cls, str(int(true_cls in top_cls[:1])), str(int(true_cls in top_cls[:2])), \
str(int(true_cls in top_cls[:3])), str(int(true_cls in top_cls[:4])), str(int(true_cls in top_cls[:5]))
def extract_results(output_file, num_chunks):
results = [[], [], [], [], [], [], [], [], [], []]
for i in range(num_chunks):
chunk_results = extract_results_for_one_chunk(output_file % i)
for j in range(10):
results[j].append(chunk_results[j])
return results
def extract_results_for_one_chunk(output_file):
log.info(f'Extracting results for {output_file}')
with open(output_file, "r") as darknet_output:
darknet = darknet_output.readlines()
probs = [line.split('%')[0].strip() for line in darknet]
classes = [line.split(': ')[1][:-1] for line in darknet]
assert len(probs) % 5 == 0
assert len(classes) % 5 == 0
return classes[0::5], probs[0::5], classes[1::5], probs[1::5], classes[2::5], probs[2::5], \
classes[3::5], probs[3::5], classes[4::5], probs[4::5]
def classify(chunks, images_dir, output_file):
if os.path.exists(CLASSIFY_COMMANDS_PATH):
os.remove(CLASSIFY_COMMANDS_PATH)
with open(CLASSIFY_COMMANDS_PATH, "a") as commands:
for i, chunk in enumerate(chunks):
images_string = ''
for img in chunk:
images_string += os.path.join(images_dir, img) + '\\n'
cmd = DARKNET_CMD_TEMPLATE.format(images_string, output_file % i)
commands.write(f'cd {ARGS.darknet_path} && {cmd} && cd {CURRENT_DIR}\n')
parallel_cmd = f'parallel -j {ARGS.cpu} < {CLASSIFY_COMMANDS_PATH}'
log.info('Starting classification...')
os.system(parallel_cmd)
log.info('Classification completed')
def prepare_commands_for_drawing():
shaper_main_path = os.path.join(CURRENT_DIR + '/..', 'main.py')
log.info(f'shaper main path: {shaper_main_path}')
imgs = os.listdir(ARGS.images_dir)
log.info(f'found {len(imgs)} images to draw')
if os.path.exists(DRAW_COMMANDS_PATH):
os.remove(DRAW_COMMANDS_PATH)
with open(DRAW_COMMANDS_PATH, "a") as commands:
for img in imgs:
inpt = os.path.join(ARGS.images_dir, img)
output = os.path.join(ARGS.drawings_dir, img.split('.')[0])
commands.write(
DRAW_CMD_TEMPLATE.format(shaper_main_path, inpt, output, ARGS.n, ARGS.resize, ARGS.output_size))
return len(imgs)
def save_start_time():
if os.path.exists(TIME_PATH):
os.remove(TIME_PATH)
with open(TIME_PATH, "a") as t:
t.write(str(time.time()) + '\n')
def save_classification_start_time():
with open(TIME_PATH, "a") as t:
t.write(str(time.time()))
def remove_old_data():
if os.path.exists(DARKNET_OUTPUT_PATH):
os.remove(DARKNET_OUTPUT_PATH)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--images-dir', type=str, help='Directory with input images', required=True)
parser.add_argument('--drawings-dir', type=str, help='Directory to save drawings', required=True)
parser.add_argument('--n', type=int, help='Number of shapes to draw', required=True)
parser.add_argument('--cpu', type=int, help='Number of CPUs to use', required=True)
parser.add_argument('--resize', type=int, default=300)
parser.add_argument('--output-size', type=int, default=300)
parser.add_argument('--result-csv-path', type=str, help='Output path to csv classification results', required=True)
parser.add_argument('--darknet-path', type=str, help='Path to darknet classifier', required=True)
parser.add_argument('--img-cls-mapping', type=str, help='Path to mapping between image names and labels',
required=True)
ARGS = parser.parse_args()
log.info(ARGS)
main()
| 2.21875 | 2 |