repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
dutitello/parAnsys
|
examples/AngTang_MCAI_67.py
|
<gh_stars>1-10
"""
Running example 6.7 from Ang & Tang 1984
"""
import paransys
import numpy as np
# Call ParAnsys
mc = paransys.MonteCarlo()
# Console log On
mc.Info(True)
# Create random variables
mc.CreateVar('y', 'gauss', 40, cv=0.125)
mc.CreateVar('z', 'gauss', 50, cv=0.050)
mc.CreateVar('m', 'gauss', 1000, std=0.200*1000)
# First limit state (0)
mc.CreateLimState('y*z-m')
# Sampling for first limit state (0)
# It create failures before =)
k = 2
# For GAUSS and GUMBEL is better to use STD and for LOGN CV ;)
mc.SetRandomVarSampl('y', 0, 'gauss', 40*(1-k*0.125), std=0.125*40)
mc.SetRandomVarSampl('z', 0, 'gauss', 50*(1-k*0.050), std=0.050*50)
mc.SetRandomVarSampl('m', 0, 'gauss', 1000*(1+k*0.200), std=0.200*1000)
# Running
values = mc.Run(100, 1000, 0.05, 0.005)
# Export
mc.ExportDataCSV('AngTang-MCAI-67', 'Comentarios?')
# Figures
mc.Graph(['N_Pf', 'N_Beta', 'N_CVPf'], show=True)
|
dutitello/parAnsys
|
paransys/ansys.py
|
<filename>paransys/ansys.py<gh_stars>1-10
# -*- coding: UTF-8 -*-
"""
This module calls ANSYS by Probabilistic Design System (PDS) as an FEM tool for
running a couple of parametric analysis and get some variables back to Python.
Built using ANSYS documentation from https://www.sharcnet.ca/Software/Ansys/ (not available anymore)
This module makes no claim to own any rights to ANSYS, it's just an interface
to call the program owned by ANSYS.
Docs are available in https://dutitello.github.io/parAnsys/
"""
import os
import numpy as np
import time
class ANSYS(object):
"""
This class creates a couple of script files with some parameters defined here
by the user, copy an APDL script file created by the user with the model to be
analysed, run everything on ANSYS and get the results from some defined
parameters back to Python.
exec_loc : str, obligatory
Location of ANSYS executable file.
run_location : str, optional
ANSYS working directory. Must be a separated directory.
Defaults to ansys_anl inside current directory.
jobname : str, optional
ANSYS jobname. Defaults to \'file\'.
nproc : int, optional
Number of processors. Defaults to 2.
override : bool, optional
Attempts to delete the .lock file at working directory.
It's useful when ANSYS was interrupted.
Defaults to False
cleardir : bool, optional
Delete all the files from ANSYS working directory when call the Run command.
Defaults to False
add_flags : str, optional
Additional flags to be called with ANSYS.
If it's an academic version use add_flags='-aa_r'
Do not use '-b -i -o'.
Flags can be found at https://www.sharcnet.ca/Software/Ansys/16.2.3/en-us/help/ans_ope/Hlp_G_OPE3_1.html
|
|
**Class methods:**
"""
def __init__(self, exec_loc=None, run_location=os.getcwd()+'\\ansys_anl\\', jobname='file',
nproc=2, override=False, cleardir=False, add_flags=''):
"""
"""
# Verify if the exec_loc is defined and real
if exec_loc == None:
exception = Exception('Undefined ANSYS executable location. \n You must define ANSYS executable location.')
raise exception
else:
# Verify if the file exists
if not os.path.isfile(exec_loc):
exception = Exception('Invalid ANSYS executable file as \"%s\".' % exec_loc)
raise exception
# Save the options to the main object
self.ANSYSprops = {}
self.ANSYSprops['exec_loc'] = exec_loc
self.ANSYSprops['run_location'] = run_location
self.ANSYSprops['jobname'] = jobname
self.ANSYSprops['nproc'] = nproc
self.ANSYSprops['override'] = override
self.ANSYSprops['cleardir'] = cleardir
self.ANSYSprops['add_flags'] = add_flags
# Create lists and set initial values
# Variables names
self.varInNames = []
self.varOutNames = []
# Variables values
self.varInValues = {}
self.varOutValues = {}
# Analysis length
self.length = 0
# Model properties
self.Model = {}
# Defalts to print always
self.PrintR = True
self._PrintR('ANSYS properties defined as:')
self._PrintR(' Executable file: \"%s\".' % self.ANSYSprops['exec_loc'])
self._PrintR(' Working directory: \"%s\".' % self.ANSYSprops['run_location'])
self._PrintR(' Jobname: \"%s\".' % self.ANSYSprops['jobname'])
self._PrintR(' Number of processors used: \"%s\".' % self.ANSYSprops['nproc'])
self._PrintR(' Override lock file: \"%s\".' % self.ANSYSprops['override'])
self._PrintR(' Clear working directory: \"%s\".' % self.ANSYSprops['cleardir'])
self._PrintR(' Additional flags: \"%s\".' % self.ANSYSprops['add_flags'])
def Info(self, act=False):
"""
Turn on/off the return of the commands to Python.
Parameters
----------
act : bool, obligatory
True turn On and False turn Off the return of the commands to Python.
"""
self.PrintR = act
self._PrintR('Now the commands will send a return to Python (like this).')
def _PrintR(self, value):
"""
Internal function to print or not the return of commands based on command Info()
"""
if self.PrintR:
print(value, flush=True)
else:
pass
def _writePDSfile(self):
"""
Internal function to create/write the APDL file with the PDS analysis. (pdsrun.inp)
"""
# Verify if self.length >0
if self.length <= 0:
exception = Exception('Before run you must define the length of the analysis with SetLength()'+
'and set the all the variables values.')
raise exception
# Run all the varInValues[var] looking for an empty
for each in self.varInNames:
if self.varInValues[each] == None:
exception = Exception('Input variable \"%s\" has no defined values.' % each)
raise exception
# Try to open the file for writing
try:
runfile = '%s\\pdsrun.inp' % self.ANSYSprops['run_location']
f = open(runfile, 'wt')
except:
exception = Exception('Unable to open the pdsrun.inp file for writting.')
raise exception
# Now try to write
try:
self._PrintR('Writing the pdsrun.inp file.')
# some ANSYS initial commands
f.write('FINISH\n')
f.write('/GOPR\n')
f.write('KEYW,PR_SET,1\n')
f.write('KEYW,PR_STRUC,1\n')
# Open PDS module
f.write('/PDS\n')
# Clear the PDS database
f.write('PDCLR,ALL\n')
# Here we put the from SetModel(..) that is named here as "current.inp"
f.write('PDANL,current,inp\n')
# Input variables
for each in self.varInNames:
# Variables are declared with uniform distribution betwen 1.5*maxval and 0.5*minval
cmd = 'PDVAR,%s,UNIF,%f,%f\n' % (each, min(0.5*min(self.varInValues[each]),-1), max(1,1.5*max(self.varInValues[each])))
f.write(cmd)
# Output/control variables
for each in self.varOutNames:
# Just the varname and distrib=RESP
cmd = 'PDVAR,%s,RESP\n' % (each)
f.write(cmd)
# We will use Monte Carlo simulation with user sampling, that way we can
# simulate the points declared in each variable easily and without problems.
f.write('PDMETH,MCS,USER\n')
# The sample points file is named here as "current.samp"
f.write('PDUSER,current,samp\n')
# Runs the Analysis - The analysis is named as current so the results file
# will be $jobname%_current.pdrs
f.write('PDEXE,current\n')
# Create a delay of 2s on ANSYS just to make sure that all were done
f.write('/WAIT,2\n')
# Close the file
self._PrintR('Saving the pdsrun.inp file.')
f.close()
except:
exception = Exception('Error while writting the pdsrun.inp file.')
raise exception
else:
pass
def _writeSAMPfile(self):
"""
Internal function to create/write the sample points file to run the PDS. (current.samp)
The sample file format has the PDEXE name in the first line, and the second line is:
ITER CYCL LOOP $INPUT VAR NAMES$
With a new column for each variable.
The values for ITER and CYCL are always 1, LOOP is the number of solution range(0 to length)
"""
#
self._PrintR('Writing the current.samp file.')
# Create initial the header string
header = 'current\nITER CYCL LOOP'
# Create the initial format string
format = '%d %d %d'
# Create initial values array with ones
samparray = np.zeros([self.Length, 3+len(self.varInNames)]) + 1
# Fill the loop column
samparray[:, 2] = range(1, self.Length+1)
# Each variable will add his name at header and a %15.8E in the format
# A counter from 2 (3rd column)
i = 2
for each in self.varInNames:
# Add strings
header = '%s %s' % (header, each)
format = '%s %s' % (format, '%15.8E')
# Add one in counter
i += 1
# Place values
try:
samparray[:, i] = self.varInValues[each]
except:
exception = Exception('Error while passing the values of \"%s\" to the current.samp file.' % each)
raise exception
# Save the file
try:
sampfile = '%s\\current.samp' % self.ANSYSprops['run_location']
np.savetxt(sampfile, samparray, delimiter=' ', newline='\n', header=header, comments='', fmt=format)
except:
exception = Exception('Error while passing the values of \"%s\" to the current.samp file.' % each)
raise exception
pass
def SetModel(self, inputname, extrafiles=[], directory=os.getcwd()):
"""
Set the input script file to be used and extra files that should be copied together.
All this files must be in the same directory set in parameter directory.
Parameters
----------
inputname : str, obligatory
Name with extension of the script that will be executed in the analysis.
The script must be done in function of the INPUT variables defined here,
(as parameters of ANSYS), and must define/calculate ANSYS parameters with
the results using the names defined here.
extrafiles : list of strings, optional
A list of strings containing extra files (with extension) that are necessary to
run the script analys, could be an MODEL with the MESH already generated,
for example.
An example of extrafiles list is:
extrafiles = ['temps.txt', 'model1.ans', 'file.db']
directory : str, optional
If the script is not in the current running Python directory you should
place the entire location, if it's in a subdirectory of current directory
you can use '/dirname/filename.ext'.
Defaults to current running Python directory.
"""
# Verify if the input script file exists
if not os.path.isfile(str(directory+'\\'+inputname)):
exception = Exception('Current input script file (\"%s\") does not exists in (\"%s\") to be copied.' % (inputname, directory))
raise exception
# Copy the script file to workdirectory with the name 'current.inp'
errcopy = os.system('copy /Y %s %s' % (str(directory+'\\'+inputname), str(self.ANSYSprops['run_location']+'\\current.inp')))
if errcopy != 0:
exception = Exception('It was not possible to copy the input script file. (\"%s\").' % str(directory+'\\'+inputname))
raise exception
# Copy the extra files
for each in extrafiles:
errcopy = os.system('copy /Y %s %s' % (str(directory+'\\'+each), str(self.ANSYSprops['run_location']+'\\'+each)))
if errcopy != 0:
exception = Exception('It was not possible to copy an extra file (\"%s\").' % each)
raise exception
# Clear last Model properties and set new
self.Model = {}
self.Model['inputname'] = inputname
self.Model['extrafiles'] = extrafiles
self.Model['directory'] = directory
self._PrintR('Input script file and extra files copied to working directory.')
self._PrintR(' Main APDL script: \"%s\".' % self.Model['inputname'])
self._PrintR(' Extra model files: \"%s\".' % self.Model['extrafiles'])
self._PrintR(' Input directory: \"%s\".' % self.Model['directory'])
def _ClearForRun(self):
"""
Internal function to clear the entire directory or just the lock file before running
"""
# Verify the clear condition
if self.ANSYSprops['cleardir']:
# Try to clear the working directory
self._PrintR('Cleaning the files from ANSYS working directory (\"%s\").' % self.ANSYSprops['run_location'])
delhand = os.system('del /q %s\\*' % self.ANSYSprops['run_location'])
if delhand != 0:
exception = Exception('Unable to clear the ANSYS working directory.')
raise exception
# Put the model back in the working directory
self.SetModel(self.Model['inputname'], self.Model['extrafiles'], self.Model['directory'])
else:
# Verify the override condition
lockfile = '%s\\%s.lock' % (self.ANSYSprops['run_location'], self.ANSYSprops['jobname'])
if self.ANSYSprops['override'] and os.path.isfile(lockfile):
self._PrintR('Deleting lock file.')
delhand = os.system('del /q %s' % lockfile)
if delhand != 0:
exception = Exception('Unable to delete lock file (\"%s\").' % lockfile)
raise exception
# Before execution ALWAYS erase the $jobname$.err file
self._PrintR('Deleting old error log file.')
errfile = '%s\\%s.err' % (self.ANSYSprops['run_location'], self.ANSYSprops['jobname'])
delhand = os.system('del /q %s' % errfile)
pass
def _Run(self):
# ANSYS Run Parameters https://www.sharcnet.ca/Software/Ansys/16.2.3/en-us/help/ans_ope/Hlp_G_OPE3_1.html
# Ansys PDS commands = pdsrun.inp and out=pdsout.out
#
# DO NOT REMOVE THE SPACES BEFORE -np !
#
cmd = 'start "ANSYS" /d "%s" /min /wait /b "%s" " -smp -np %d -j %s -b -i pdsrun.inp -o pdsout.out %s" ' % (self.ANSYSprops['run_location'],
self.ANSYSprops['exec_loc'], self.ANSYSprops['nproc'], self.ANSYSprops['jobname'], self.ANSYSprops['add_flags'])
self._PrintR('Running ANSYS.')
# Get time before run ANSYS
timei = time.time()
ansyshand = os.system(cmd)
if ansyshand != 0:
exception = Exception('ANSYS exited with error id=%d. Please verify the output file (\"%s\").\n\n' %(ansyshand, self.ANSYSprops['run_location']+'\\pdsout.out'))
raise exception
# verify if it has an error on $jobname$.err
#errfile = '%s\\%s.err' % (self.ANSYSprops['run_location'], self.ANSYSprops['jobname'])
#f = open(errfile).read()
#if '*** ERROR ***' in f:
# exception = Exception('ANSYS exited with an error. Please verify the output file (%s).\n\n' % (self.ANSYSprops['run_location']+'\\pdsout.out'))
# raise exception
# Time after ANSYS
timef = time.time()
self._PrintR('Solution is done. It took %f minutes.' % ((timef-timei)/60))
def Run(self):
"""
Execute the analysis on ANSYS.
"""
# Verify if model was set
if self.Model == {}:
exception = Exception('Before running ANSYS you must define the model that will be analysed with SetModel().')
raise exception
# Verify if there are output variables
if self.varOutNames == []:
exception = Exception('Before running ANSYS you must define output variables.')
raise exception
# Performn the conditional clear
self._ClearForRun()
# Write the pdsrun.inp file
self._writePDSfile()
# Write the sample file
self._writeSAMPfile()
# Run
self._Run()
@property
def Length(self):
"""
Return the number of analysis to be executed and the length of INPUT arrays.
"""
return self.length
def SetLength(self, length):
"""
Define the number of analysis to be executed and the length of INPUT arrays.
Must be set before the variables.
Parameters
----------
length: int, obligatory
Number of analysis.
"""
# control variable
valuesset = False
# if at least one InValue is set can't change de length
for each in self.varInNames:
if self.varInValues[each] != None:
valuesset = True
exception = Exception('At least one value were already set to INPUT variables. \n'+
'To change the length you have to clear the variables values with ClearValues().')
raise exception
# if there is no value setted and length > 0 can change
if valuesset == False:
if length <= 0:
exception = Exception('The analysis length must be greater than 0.')
raise exception
else:
self.length = length
self._PrintR('Analysis lenght set to %d.' % self.length)
def CreateVarIn(self, name):
"""
Create an INPUT variable.
Parameters
----------
name : str, obligatory
Variable name.
Do not use spaces in the name!
The same name cannot be used with INPUT and OUTPUT variables.
It's not case sensitivy, in fact it will be saved in uppercase
because of ANSYS.
"""
if name in ['', None, ' ']:
exception = Exception('You must define an unique name to each variable.')
raise exception
else:
name = name.upper()
if name in self.varInNames or name in self.varOutNames:
exception = Exception('The same name cannot be used with INPUT and OUTPUT variables.')
raise exception
else:
self.varInNames.append(name)
self.varInValues[name] = None
self._PrintR('ANSYS input variable \"%s\" created.' % name)
def CreateVarOut(self, name):
"""
Create an OUTPUT variable.
Parameters
----------
name : str, obligatory
Variable name.
Do not use spaces in the name!
The same name cannot be used with INPUT and OUTPUT variables.
It's not case sensitivy, in fact it will be saved in uppercase
because of ANSYS.
"""
if name in ['', None, ' ']:
exception = Exception('You must define an unique name to each variable.')
raise exception
else:
name = name.upper()
if name in self.varInNames or name in self.varOutNames:
exception = Exception('The same name cannot be used with INPUT and OUTPUT variables.')
raise exception
else:
self.varOutNames.append(name)
self.varOutValues[name] = None
self._PrintR('Variable \"%s\" declared as ANSYS output variable.' % name)
def SetVarInValues(self, name, values):
"""
Set the values of an INPUT variable
Parameters
----------
name : str, obligatory
Input variable name that will receive the values.
values : 1D np.array of floats, obligatory
A 1D numpy.array() with the length of this class and the values to be analysed.
If the array is not 1D just the first column (0) will be used.
"""
# Verify the set length
if self.length <= 0:
exception = Exception('Before set the values you must define the length of the analysis with SetLength().')
raise exception
# Verify the name
if name in ['', None, ' ']:
exception = Exception('You must define the name of the variable that will receive the values.')
raise exception
else:
name = name.upper()
if name not in self.varInNames:
exception = Exception('This variable name is not declared.\n'+
'Please use CreateVarIn("name") to declare it.')
raise exception
# Verify if it's a list, in this case transf to an np.array
if type(values) == list:
values = np.array(values)
# Verify if it has more than one column
try:
values.shape[1]
except:
pass
else:
self._PrintR('The values that are trying to be set to \"%s\" has more than one column, just the first (0) will be used.' % (name))
values = np.copy(values[:,0])
# Verify if length is GT or LT the expected value
if values.shape[0] < self.length:
exception = Exception('The length of values that are trying to be set to \"%s\" is less than the defined length (%d).' % (name, self.length))
raise exception
elif values.shape[0] > self.length:
self._PrintR('The length of values that are trying to be set to \"%s\" is greater than the defined length (%d).\
\nJust the first %dth values will be used.' % (name, self.length, self.length))
values = values[:self.length]
# Apply
try:
self.varInValues[name] = values
except:
exception = Exception('Error setting the values of \"%s\".' % name)
raise exception
else:
self._PrintR('Values of \"%s\" were set.' % name)
def GetVarOutValues(self):
"""
Return the values of the Results file from ANSYS for all the OUTPUT variables
"""
# Verify the existence of results file "$jobname$_current.pdrs"
resultsfile = '%s\\%s_current.pdrs' % (self.ANSYSprops['run_location'], self.ANSYSprops['jobname'])
if os.path.isfile(resultsfile):
# Import the results file
self._PrintR('Importing results from PDS results file (\"%s\").' % resultsfile)
resultsALL = np.genfromtxt(resultsfile, names=True, skip_header=1)
# Verify the existence of ERRORS
errorcount = resultsALL['ERR'].sum()
if errorcount > 0:
self._PrintR('\n\n\nATTENTION:\nThe 4th column in the results file called ERR warns about simulations that results should not be used.\n'+
'Current file has %d errors, you can acces this in the column ERR returned with the results.\n\n\n' % errorcount)
_ = input('The execution is paused, press ENTER to continue.\n')
# Get all the columns in varOutNames and varInNames the specified length
results = {}
#results['ERR'] = np.copy(resultsALL['ERR'][:self.length])
results['ERR'] = np.copy(resultsALL['ERR'])
#for each in self.varInNames:
# results[each] = np.copy(resultsALL[each])
for each in self.varOutNames:
# This prevent bugs with 1 line results
results[each] = np.atleast_1d(resultsALL[each])
for each in results:
if results[each].size > self.length:
results[each] = results[each][:self.length]
return results
else:
# There is no results
exception = Exception('There is no results file in current ANSYS working directory. \n'+
'Please verify if the analysis was run.')
raise exception
def ClearAll(self):
"""
Clear all the properties (not from ANSYS object)
"""
# Clear all
self.Model = {}
self.varInNames = []
self.varOutNames = []
self.varInValues = {}
self.varOutValues = {}
self.length = 0
#self.PrintR = False
self._PrintR('All the properties were cleared (not from ANSYS object).')
def ClearValues(self):
"""
Clear the values of all variables.
"""
# Clear each input variable value
for each in self.varInNames:
self.varInValues[each] = None
# Clear each output variable value
for each in self.varOutNames:
self.varOutValues[each] = None
self._PrintR('The values of all variables were cleared. Now you can change the length parameter.')
|
dutitello/parAnsys
|
examples/AngTang_FORM_68.py
|
"""
Running example 6.8 from Ang & Tang 1984
"""
import paransys
# Call ParAnsys
form = paransys.FORM()
# Console log On
form.Info(True)
# Create random variables
form.CreateVar('y', 'logn', 40, cv=0.125)
form.CreateVar('z', 'logn', 50, cv=0.050)
form.CreateVar('m', 'gumbel', 1000, cv=0.200)
# Create limit state
form.SetLimState('y*z-m')
# Run
values = form.Run(dh=0.01, meth='iHLRF')
form.ExportDataCSV('AngTang-FORM-68', 'Comentarios?')
|
dutitello/parAnsys
|
paransys/form.py
|
<gh_stars>1-10
## -*- coding: UTF-8 -*-
"""
This module performns Reliability Analysis with First Order Reliability Method using Python.
Please read the class docstring for more.
Docs are available at https://dutitello.github.io/parAnsys/
"""
import os
import time
from paransys.ansys import ANSYS
import numpy as np
import scipy.stats
from math import * # It's necessry to evaluate limit states
import math
from inspect import isfunction
class FORM(object):
"""
This class applies the First Order Reliability Method (FORM) inside Python
using ParAnsys as a connection with ANSYS for evaluate FEM models.
It is possible to run simulations without using ANSYS, just
defining the limit state equation and all variables.
This code was made following the ideia of ANSYS being a tool for getting the
ultimate load of the structure. This works applying a displacement in the
loaded node, and then getting the biggest reaction force on that node,
following this way the limit state defined here is 'R-S', where R are the
values get from ANSYS and S the values generated in Python. It's also
possible to work applying the true load on ANSYS, it's just necessary to
formulate a valid limit state equation.
|
|
**Class methods:**
"""
#---------------------------------------------------------------------------
def __init__(self):
"""
"""
# Before
self._ANSYS = False
self.PrintR = True
self.limstate = None
self._userf = None
self.variableDistrib = {}
self.variableConst = {}
self.variableStartPt = {}
self.controls = {}
self.corlist = {}
self._last_ck = 0
# Options
self._options = {}
self._options['iHLRF_forced_lambdk'] = 'auto'
self._options['iHLRF_prod_ck'] = 2.00
self._options['iHLRF_add_ck'] = 0.00
self._options['iHLRF_par_a'] = 0.10
self._options['iHLRF_par_b'] = 0.50
self._options['iHLRF_step_lambdk_test'] = 4
self._options['rHLRF_relax'] = 0.50
self._options['APDLdebug'] = False
# After
self.variableDesPt = {}
self.results = {}
self._stnumb = 99
def ANSYS(self, exec_loc=None, run_location=os.getcwd()+'\\ansys_anl\\', jobname='file',
nproc=2, override=False, cleardir=False, add_flags=''):
"""
If ANSYS will be used it defines ANSYS properties, for initialize the
paransys.ANSYS class.
Parameters
----------
exec_loc : str, obligatory
Location of ANSYS executable file.
run_location : str, optional
ANSYS working directory. Recomended to be a separated directory.
Defaults to ansys_anl on current directory.
jobname : str, optional
ANSYS jobname. Defaults to 'file'.
nproc : int, optional
Number of processors. Defaults to 2.
override : bool, optional
Attempts to delete the .lock file at working directory.
It's useful when ANSYS was interrupted.
Defaults to False
cleardir : bool, optional
Delete all the files from ANSYS working directory when call the Run command.
Defaults to False
add_flags : str, optional
Additional flags to be called with ANSYS.
If it's an academic version use add_flags='-aa_r'
Do not use '-b -i -o'
Flags can be found at https://www.sharcnet.ca/Software/Ansys/16.2.3/en-us/help/ans_ope/Hlp_G_OPE3_1.html
"""
self.ansys = ANSYS(exec_loc=exec_loc, run_location=run_location,
jobname=jobname, nproc=nproc, override=override,
cleardir=cleardir, add_flags=add_flags)
self._ANSYS = True
def Info(self, act=False):
"""
Turn on/off the return of the commands to Python.
Parameters
----------
act : bool, obligatory
True turn On and False turn Off the return of the commands to Python.
"""
self.PrintR = act
if self._ANSYS:
self.ansys.Info(act)
#return self._PrintR('Now the commands will send a return to Python (like this).')
def _PrintR(self, value):
"""
Internal function to print or not the return of commands based on command Info()
"""
if self.PrintR:
return print(value, flush=True)
else:
pass
def SetANSYSModel(self, inputname, extrafiles=[], directory=os.getcwd()):
"""
Set the input script file to be used on ANSYS and extra files that should
be copied together.
All this files must be in the same directory set in parameter directory.
Parameters
----------
inputname : str, obligatory
Name with extension of the script that will be executed in the analysis.
The script must be done in function of the INPUT variables defined here,
(as parameters of ANSYS), and must define/calculate ANSYS parameters with
the results using the names defined here.
extrafiles : list of strings, optional
A list of strings containing extra files (with extension) that are necessary to
run the script analys, could be an MODEL with the MESH already generated,
for example.
An example of extrafiles list is:
``extrafiles = ['temps.txt', 'model1.ans', 'file.db']``
directory : str, optional
If the script is not in the current running Python directory you should
place the entire location, if it's in a subdirectory of current directory
you can use '/dirname/filename.ext'.
Defaults to current running Python directory.
"""
if self._ANSYS:
self.ansys.SetModel(inputname, extrafiles, directory)
else:
exception = Exception('ANSYS not declared yet. Before set ANSYS '+
'model you must define ANSYS properties with ANSYS(...).')
raise exception
def SetANSYSOutVar(self, name):
"""
Defines a parameter/variable from ANSYS APDL script as an variable to
return values for Python.
Parameters
----------
name : str, obligatory
Variable/Parameter name, as defined in APDL script.
"""
if self._ANSYS:
self.ansys.CreateVarOut(name)
else:
exception = Exception('ANSYS not declared yet. Before set ANSYS '+
'variables you must define ANSYS properties with ANSYS(...).')
raise exception
# Setting distribution of variables
def CreateVar(self, name, distrib, mean, std=0, cv=None, par1=None, par2=None):
"""
Create a Variable, random or not.
If it's used on ANSYS it need to be told, so after this use:
>>> form.SetANSYSVar(name)
Parameters
----------
name : str, obligatory
Name of variable.
distrib : str, obligatory
Probabilistic variable distribution type.
For all distributions Mean and Std are related to Normal distribution
(the code determines the parameters for the desired distribution).
Available types are:
* gaussian (or gauss, normal);
* lognormal (or log, logn, ln, lognorm);
* gumbel (or gumb, type1);
* constant (or const) - Constant value (doesn't need std).
mean : float, obligatory
Standard mean of variable values.
std : float, optional
Standard deviation of variable. You must define it or cv for variables
that aren't constant, if both (cv and std) declared std will be used.
For LogNormal variables it's recommend to use CV!
cv : float, optional
Coeficient of Variation of variable. You must define it or std for variables
that aren't constant, if both (cv and std) declared std will be used.
For LogNormal variables it's recommend to use CV!
par1 and par2 : float, optional
Parameters for future implementations.
"""
# Verify the variable name
if name in ['', None, ' ']:
exception = Exception('You must define the name of the variable that will receive the values.')
raise exception
# Set distribution name as lower case
name = name.lower()
distrib = distrib.lower()
# CV or STD?
if distrib not in ['constant', 'const', 'cons', 'c']:
if cv == None:
cv = std/mean
else:
std = cv*mean
# Verify the distribution and then store the parameters
# Gaussian variable
if distrib in ['gauss', 'gaus', 'gaussian', 'normal', 'norm']:
# Store values
self.variableDistrib[name] = ['gauss', mean, std, cv]
# Set the start point as the mean
self.variableStartPt[name] = mean
return self._PrintR('Variable \"%s\" defined as Gaussian with mean=%f, std. dev.=%f, CV=%f.'
% (name, mean, std, cv))
# Lognormal distribution
elif distrib in ['lognormal', 'logn', 'ln', 'log', 'lognorm']:
# Store values
self.variableDistrib[name] = ['logn', mean, std, cv]
# Set the start point as the mean
self.variableStartPt[name] = mean
return self._PrintR('Variable \"%s\" defined as LogNormal with mean=%f, std. dev.=%f, CV=%f.'
% (name, mean, std, cv))
# Gumbel distribution
elif distrib in ['gumbel', 'gumb', 'type1']:
# Store values
self.variableDistrib[name] = ['gumbel', mean, std, cv]
# Set the start point as the mean
self.variableStartPt[name] = mean
return self._PrintR('Variable \"%s\" defined as Gumbel with mean=%f, std. dev.=%f, CV=%f.'
% (name, mean, std, cv))
# Constant value
elif distrib in ['constant', 'const', 'cons', 'c']:
# Store value
self.variableConst[name] = mean
return self._PrintR('Variable \"%s\" defined as Constant with value=%f'
% (name, mean))
# or what?
else:
exception = Exception('Distribution \"%s\" set on variable \"%s\" is not recognized.'
% (distrib.upper(), name))
raise exception
def _SetControls(self, maxIter, tolRel, tolLS, dh, diff, meth):
"""
Internal from Run()
Set the controls of process.
Parameters
----------
maxIter : integer
Maximum of iterations that can be performed. After this the process
will stop with error.
tolRel : float
tolLS : float or string
dh : float
delta_h step when applying derivatives, value applied in reduced
space.
diff : str
meth : str
FORM method used. Available methods are:
* HLRF: Hasofer Lind Rackwitz and Fiessler method.
* iHLRF: improved Hasofer Lind Rackwitz and Fiessler method.
"""
if diff not in ['center', 'forward', 'backward']:
exception = Exception('Invalid derivative method.')
raise exception
if min(maxIter, tolRel, dh) >= 0 and meth in ['HLRF', 'iHLRF', 'rHLRF'] and \
diff in ['center', 'forward', 'backward']:
# Save controls variable
self.controls['maxIter'] = maxIter
self.controls['tolRel'] = tolRel
self.controls['tolLS'] = tolLS
self.controls['dh'] = dh
self.controls['diff'] = diff
self.controls['meth'] = meth
self._PrintR('Process controls set as:')
self._PrintR(' Limit of iterations: %d.' % maxIter)
self._PrintR(' Relative error tolerance: %2.3E.' % tolRel)
if isinstance(tolLS, str):
self._PrintR(' Absolute LS error tolerance: auto.')
else:
self._PrintR(' Absolute LS error tolerance: %2.3E.' % tolLS)
self._PrintR(' deltah (for derivatives): %2.3E.' % dh)
self._PrintR(' Finite difference method: %s.' % diff)
self._PrintR(' FORM Method: %s.' % meth)
else:
exception = Exception('Error while setting simulation controls. Please verify the set values.')
raise exception
def SetLimState(self, equat, userf=None):
"""
Set the limit state equation.
Parameters
----------
equat : obligatory
1) It could be a string with the equation of the limit state. It must be write as a
function of defined variables (In and Out).
2) It could be a Python function created by the user, just passing the function name in
the place of the string.
userf : function, optional
An user defined function that could be used inside the limit state
equation (string), called inside equat as ``userf()``.
It's similar to use a function instead of a string in the equat parameter.
For example, you can create a complex Python function with loops, ifs
and whatever for evaluate the R part of your limit state function
for a concrete beam. An example is showed after.
First example: if ANSYS returns the maximum load on a truss as variable
FxMAX, and applied loads to be tested are ``(g+q)*sin(theta)``, where
``g``, ``q``, theta are defined random variables created with ``CreateVar()``.
.. code-block:: python
form.SetLimState(equat='FxMAX-(g+q)*sin(theta)')
Note that you can use math expressions as ``sin()``, ``cos()``, ``tan()``, ``sqrt()``
from Python math module inside the equation.
|
Second example: you have a steel bar in tension that hasn't hardening.
It's stress is a function of ``(eps, fy, E)``, where ``eps`` is current
deformation, ``fy`` is yield stress and ``E`` the elastic moduli,
you can create inside your code an function like:
.. code-block:: python
def stress(eps, fy, E):
if eps > fy/E:
return fy
else:
return eps*E
And now defining ``userf=stress`` we can:
.. code-block:: python
form.SetLimState(equat='userf(eps,fy,E)-q', userf=stress)
where ``eps``, ``fy``, ``E`` and ``q`` are random variables.
Note that the function inside the limit state equation should be
called as ``userf()`` with the parameters from ``stress``.
Or we can do the same using the functions instead of the string:
.. code-block:: python
form.SetLimState(equat=stress)
"""
# Store it
self.limstate = equat
if(type(self.limstate) == str):
# String equation
# Change equation to lowcase
self.limstate = self.limstate.lower()
self._userf = userf
return self._PrintR('Limit state defined as "{}".'.format(equat))
else:
# LS is a Python Function
return self._PrintR('Limit state defined as "{}" function.'.format(self.limstate.__name__))
def SetANSYSVar(self, name):
"""
Set a variable as ANSYS variable.
Parameters
----------
name : str, obligatory
Name of variable.
"""
# Verify ANSYS object
if self._ANSYS == False:
exception = Exception('ANSYS not declared yet. Before set ANSYS '+
'variables you must define ANSYS properties with ANSYS(...).')
raise exception
# Verify the variable name
if name in ['', None, ' ']:
exception = Exception('You must define the name of the variable.')
raise exception
else:
name = name.lower()
if name not in self.variableDistrib and name not in self.variableConst:
exception = Exception('This variable name is not declared yet. '+
'Only Random variables can be set as ANSYS variables.\n'+
'Please use CreateVar() to declare it.')
raise exception
# Declare it on ansys object
self.ansys.CreateVarIn(name)
def SetStartPoint(self, name, value):
"""
Set the point, for each variable, that process will start.
If it's not declared it will start with the mean value.
Parameters
----------
name : str, obligatory
Variable name.
value : float, obligatory
Starting point for this variable.
"""
# Set lower
name = name.lower()
# Verify if it's already created
if name not in self.variableDistrib:
exception = Exception('This variable name is not declared yet. '+
'Before set the start value you must create the random '+
'variable with CreateVar().')
raise exception
# If no error: store the value
self.variableStartPt[name] = value
#
self._PrintR('Variable \"%s\" will start the process at point %f.' % (name, value))
def Options(self, option, value=None):
"""
Set extra options values.
Parameters
----------
option : str, obligatory
Name of option, listed next.
value : optional
Value to be set, type varies with option.
If not defined it will return current value.
** Valid options:**
For iHLRF method:
* iHLRF_forced_lambdk : float
Forced value when line search doesnt found a valid ``lambdak``.
Being ``lambdak`` the step size.
It could be set as ``'auto'``, when it
is the complement of ``|y*.gradG|/(|y*|.|gradG|)``.
Defaults to 'auto'.
* iHLRF_prod_ck : float
Scalar value that will be multiplied by calculated ``ck`` value. For a
fix ``ck`` value turn it to 0 and then use 'iHLRF_add_ck'.
* iHLRF_add_ck : float
Scalar value that will be added to ``ck`` value.
* iHLRF_par_a : float
Value presented as ``a`` in line search equation for iHLRF.
* iHLRF_par_b : float
Value presented as ``b`` in line search equation for iHLRF,
``lambdak`` value is ``b**nk``.
* iHLRF_step_lambdk_test : float
Size of ``lambdak`` test block, after each block convergence is checked.
For analyses using ANSYS:
* APDLdebug: bool
If it's true it will be print the dict with results imported from ANSYS
at each call. Great use for APDL debug.
** If an invalid option or value is set the process could stop (or not).**
"""
if value == None:
# Return current value
return self._options[option]
else:
self._options[option] = value
self._PrintR('Option \"%s\" set as \"%s\".' % (option, str(value)))
def SetCorrel(self, var1, var2, correl):
"""
Set the correlation betwen two variables. The values will be transformed
by the Nataf process before running.
Parameters
----------
var1 : str, obligatory
First variable name.
var2 : str, obligatory
Second variable name.
correl : float, obligatory
Correlation betwen var1 and var2.
"""
# Set lower
var1 = var1.lower()
var2 = var2.lower()
# Verify if it's already created
if var1 not in self.variableDistrib:
exception = Exception('Variable "%s" is not declared yet. ' % var1 +
'Before set the correlation you must create the random '+
'variable with CreateVar().')
raise exception
if var2 not in self.variableDistrib:
exception = Exception('Variable "%s" is not declared yet. ' % var2 +
'Before set the correlation you must create the random '+
'variable with CreateVar().')
raise exception
if var1 == var2:
exception = Exception('You cannot change the correlation from a variable with itself.')
raise exception
if correl < -1 or correl > 1:
exception = Exception('Correlation must be a value betwen -1 and +1.')
raise exception
# Store correlations on correlation list self.corlist
self.corlist[var1, var2] = correl
self.corlist[var2, var1] = correl
self._PrintR('Correlation betwen \"%s\" and \"%s\" set as %f.' %(var1, var2, correl))
def _EquivNormal(self, name, pt):
"""
Internal function that determines the equivalent normal distribution parameters
Parameters
----------
name : str, obligatory
Name of variable that is being transformed.
pt : float, obligatory
Point that is being transformed.
Returns
-------
[mean, std] where:
* mean : float
Equivalent normal mean
* std : float
Equivalent normal standard deviation.
"""
# Copy the data to var
name = name.lower()
var = self.variableDistrib[name]
# Gaussian
if var[0] == 'gauss':
# Doesn't need to transform
mean = var[1]
std = var[2]
# Lognormal
elif var[0] == 'logn':
# LogNormal parameters
qsi = math.sqrt(math.log(1 + (var[3])**2))
lmbd = math.log(var[1]) - 0.5*qsi**2
# Equiv parametes: analitycal since lognormal is derivated from normal
std = pt*qsi
mean = pt*(1-math.log(pt)+lmbd)
# Gumbel
elif var[0] == 'gumbel':
# Gumbel parameters
scl = math.sqrt(6)*var[2]/math.pi
loc = var[1] - 0.57721*scl
# Equiv with PDF/CDF functions
# Gumbel pdf: scipy.stats.gumbel_r.pdf(x, loc, scl)
# Gumbel cdf: scipy.stats.gumbel_r.cdf(x, loc, scl)
# Normal pdf: scipy.stats.norm.pdf(x, mu, std)
# Normal cdf: scipy.stats.norm.cdf(x, mu, std)
# Normal icdf: scipy.stats.norm.ppf(q, mu, std)
# invCum = icdf(gumbel_cdf())
invCum = scipy.stats.gumbel_r.cdf(pt, loc, scl)
invCum = scipy.stats.norm.ppf(invCum, 0, 1)
std = scipy.stats.norm.pdf(invCum, 0, 1) / scipy.stats.gumbel_r.pdf(pt, loc, scl)
mean = pt - invCum*std
# What??
else:
exception = Exception('This couldnt happen!')
raise exception
# Return
return [mean, std]
def Run(self, maxIter=50, tolRel=0.01, tolLS='auto', dh=0.05, diff='forward', meth='iHLRF'):
"""
Run the FORM process.
Parameters
----------
maxIter : integer, optional
Maximum of iterations that can be performed. After this the process
will stop with error.
Defaults to 50.
tolRel : float, optional
Maximum **relative** error tolerance, for example on search for X point
``|X_k - X_(k-1)|/|X_(k-1)|<=tolRel``. Defaults to 0.005.
tolLS : float, optional
Maximum **absolute** error tolerance for limit state function,
``|G(X)|~=tolLS``. It should be calibrated based on the magnitude of
limit state function.
It's possible to automatically determine it using tolLS='auto', it will be set
as (tolRel)*(first cycle limit state value).
Defaults to 'auto'.
dh : float, optional
delta_h step when applying derivatives, value applied over means, as
h=mean(x)*dh, so, ``f'(x) = (f(x+mean(x)*dh)-f(x)) / (mean(x)*dh)``.
Defaults to 0.05.
diff : str, optional
Numeric derivative calcultation method. The possible mehtods are:
* center: for finite difference method with central difference,
``f'(x) = (f(x+h)-f(x-h)) / (2h)``, it needs ``1 + 2*Nvars``
evaluations of the limit state function.
* forward: for finite difference method with forward difference,
``f'(x) = (f(x+h)-f(x)) / h``, it needs ``1 + Nvars``
evaluations of the limit state function.
* backward: for finite difference method with backward difference,
``f'(x) = (f(x)-f(x-h)) / h``, it needs ``1 + Nvars``
evaluations of the limit state function.
Defaults to forward.
meth : str, optional
FORM method used. Available methods are:
* HLRF: Hasofer Lind Rackwitz and Fiessler method.
* iHLRF: improved Hasofer Lind Rackwitz and Fiessler method.
Defaults to iHLRF.
**Returns a dictionary with:**
* status : integer
Status of solution, values can be found after this list.
* Pf : float
Probability of failure.
* Beta : float
Reliability index.
* {DesignPoint} : dictionary of values
Dictionary with the design points for each variable.
* {gradG} : dictionary of values
Dictionary with the final gradient for each variable.
* {alpha} : dictionary of values
Dictionary with the final director cossines for each variable.
* cycles : int
Number of iterations performed to obtain the solution.
**Status values:**
* 0: no problem;
* 1: warning, maximum of cycles reached with no convergence of CVPf;
* 99: undefined error!
"""
#-----------------------------------------------------------------------
# Set controls
self._SetControls(maxIter=maxIter, tolRel=tolRel, tolLS=tolLS, dh=dh, diff=diff, meth=meth)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Verify the limit state equation
if self.limstate == None:
exception = Exception('Before Run you must define the limit state'+
'equation with SetLimState().')
raise exception
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Verify if ANSYS is being used
#
if self._ANSYS:
# Verify if the model is set
if self.ansys.Model == {}:
exception = Exception('Before running ANSYS you must define the model that will be analysed with SetANSYSModel().')
raise exception
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Copy the start point to self.variableDesPt()
#
self.variableDesPt = self.variableStartPt.copy()
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Create the correlation Matrix, VarId list and Jyz/Jzy matrices
#
NInRandVars = len(self.variableDistrib)
NInConstVars = len(self.variableConst)
# NOutVars is ANSYS out vars
NOutVars = 0
# correlMat start as an eye matrix, just with RandomVars!
self.correlMat = np.eye(NInRandVars)
# Create var id list
varId = {}
idx = 0
# For random variables
for eachVar in self.variableDistrib:
varId[eachVar] = idx
idx += 1
# For constant variables
for eachVar in self.variableConst:
varId[eachVar] = idx
idx += 1
# and Now ANSYS output variables, if using ANSYS
if self._ANSYS:
NOutVars = len(self.ansys.varOutNames)
for eachVar in self.ansys.varOutNames:
eachVar = eachVar.lower()
varId[eachVar] = idx
idx += 1
# Save varId in the object
self.varId = varId
# Run all the declareted correls
for each in self.corlist:
i = varId[each[0]]
j = varId[each[1]]
cor = self.corlist[each]
# Apply Nataf to transform the correlation
var1props = self.variableDistrib[each[0]]
var2props = self.variableDistrib[each[1]]
# Both are gauss
if (var1props[0] == 'gauss' and var2props[0] == 'gauss'):
cor = cor
# Both are LN
elif var1props[0] == 'logn' and var2props[0] == 'logn':
cv1 = var1props[3]
cv2 = var2props[3]
cor = cor*(math.log(1+cor*cv1*cv2)/(cor*math.sqrt(math.log(1+cv1**2)*math.log(1+cv2**2))))
# Both are Gumbel
elif var1props[0] == 'gumbel' and var2props[0] == 'gumbel':
cor = cor*(1.064 - 0.069*cor + 0.005*cor**2)
# One is gauss and other is logn
elif (var1props[0] == 'gauss' and var2props[0] == 'logn') \
or (var2props[0] == 'gauss' and var1props[0] == 'logn'):
# who is logn?
if var1props[0] == 'logn':
cv = var1props[3]
else:
cv = var2props[3]
# cor is
cor = cor*cv/math.sqrt(math.log(1+cv**2))
# One is gauss and other is gumbel
elif (var1props[0] == 'gauss' and var2props[0] == 'gumbel') \
or (var2props[0] == 'gauss' and var1props[0] == 'gumbel'):
cor = 1.031*cor
# One is logn and other is gumbel
elif (var1props[0] == 'logn' and var2props[0] == 'gumbel') \
or (var2props[0] == 'logn' and var1props[0] == 'gumbel'):
# who is logn?
if var1props[0] == 'logn':
cv = var1props[3]
else:
cv = var2props[3]
# cor is
cor = cor*(1.029 + 0.001*cor + 0.014*cv + 0.004*cor**2 + 0.233*cv**2 - 0.197*cor*cv)
# Forbiden zone
else:
exception = Exception('When applying NATAF on variables \"%s\" and \"%s\" the variables '+
'conditions wasn\'t possible.')
raise exception
# Save it!
self.correlMat[i, j] = cor
self.correlMat[j, i] = cor
# Obtain the transformation matrices Jyz/Jzy
# Jzy is Lower matrix obtained by Cholesky at correlMat
Jzy = np.linalg.cholesky(self.correlMat)
# and Jyz is the inverse matrix
Jyz = np.linalg.inv(Jzy)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# CYCLEs
#
self._PrintR('\nStarting FORM process.')
timei = time.time()
# Start values
lastcycle = False
curVecRedPts = np.zeros(NInRandVars)
valG = 0
curBeta = 0
self.results['Beta'] = []
self.results['Beta'].append(0)
# Cycle (or iteration) start at 1, so +1
for cycle in range(1, 1+self.controls['maxIter']):
#self._PrintR('----------------------------------------------------------------------------\n')
self._PrintR('____________________________________________________________________________')
self._PrintR('Iteration cycle %d.' % cycle)
#-------------------------------------------------------------------
# Mount mean vector, stddev matrix and transformations (Jxy/Jyx) matrices
#
matStd = np.zeros([NInRandVars, NInRandVars])
vecMean = np.zeros(NInRandVars)
# Vector with current design point (and constant values)
vecPts = np.zeros(NInRandVars+NInConstVars)
for eachVar in self.variableDistrib:
# Variable id
id = varId[eachVar]
# Current X point
pt = self.variableDesPt[eachVar]
# Transform
[vecMean[id], matStd[id, id]] = self._EquivNormal(eachVar, pt)
# Put point in vecPts
vecPts[id] = pt
for eachVar in self.variableConst:
# Get variable id
id = varId[eachVar]
# Put point in vecPts
vecPts[id] = self.variableConst[eachVar]
# In case of correlations we need to apply that over Jxy/Jyx
Jxy = matStd.dot(Jzy)
Jyx = Jyz.dot(np.linalg.inv(matStd))
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Evaluate G and grad(G)
#
# Get dh
dh = self.controls['dh']
if diff == 'center':
# size is 1+2*NInRandVars
matEvalPts = np.zeros([(1+2*NInRandVars+NInConstVars), (NInRandVars+NInConstVars+NOutVars)])
# All lines starts with design point/constant values, after random variables replace it
matEvalPts[:, 0:(NInRandVars+NInConstVars)] = vecPts
# Run all lines from [1,end] with step=2
# curId is current varId
curId = 0
for eachLine in range(1, 1+2*NInRandVars, 2):
# vecdh is not zero on current var item
vecdh = np.zeros(NInRandVars)
vecdh[curId] = dh
# Odd line is +h
matEvalPts[eachLine, 0:NInRandVars] = vecPts[0:NInRandVars] + vecMean*vecdh
# and now -h
matEvalPts[eachLine+1, 0:NInRandVars] = vecPts[0:NInRandVars] - vecMean*vecdh
curId += 1
elif diff == 'forward':
# size is 1+NInRandVars
matEvalPts = np.zeros([(1+NInRandVars+NInConstVars), (NInRandVars+NInConstVars+NOutVars)])
# All lines starts with design point/constant values, after random variables replace it
matEvalPts[:, 0:(NInRandVars+NInConstVars)] = vecPts
curId = 0
for eachLine in range(1, 1+NInRandVars):
# vecdh is not zero on current var item
vecdh = np.zeros(NInRandVars)
vecdh[curId] = dh
matEvalPts[eachLine, 0:NInRandVars] = vecPts[0:NInRandVars] + vecMean*vecdh
curId += 1
elif diff == 'backward':
# size is 1+NInRandVars
matEvalPts = np.zeros([(1+NInRandVars+NInConstVars), (NInRandVars+NInConstVars+NOutVars)])
# All lines starts with design point/constant values, after random variables replace it
matEvalPts[:, 0:(NInRandVars+NInConstVars)] = vecPts
curId = 0
for eachLine in range(1, 1+NInRandVars):
# vecdh is not zero on current var item
vecdh = np.zeros(NInRandVars)
vecdh[curId] = dh
matEvalPts[eachLine, 0:NInRandVars] = vecPts[0:NInRandVars] - vecMean*vecdh
curId += 1
#-------------------------------------------------------------------
# If using ANSYS send it's variables dependents to simulation.
#
if self._ANSYS:
# Mount a list of matEvalPts lines that should be simunlated
# Line 0 with X value is used for all not calculated lines + G(x)
ansysSendingList = [0]
if diff == 'center':
for eachVar in self.ansys.varInNames:
# Current variable == random or constant?
if eachVar.lower() in self.variableDistrib:
eachVar = eachVar.lower()
ansysSendingList.append(2*varId[eachVar]+1)
ansysSendingList.append(2*varId[eachVar]+2)
elif diff == 'forward' or diff == 'backward':
for eachVar in self.ansys.varInNames:
# Current variable is random or constant?
if eachVar.lower() in self.variableDistrib:
eachVar = eachVar.lower()
ansysSendingList.append(varId[eachVar]+1)
# Set length as Ns
Ns = len(ansysSendingList)
self.ansys.ClearValues()
self.ansys.SetLength(Ns)
# Send from the list to ANSYS varInValues
for eachVar in self.ansys.varInNames:
eachVar = eachVar.lower()
self.ansys.SetVarInValues(eachVar, matEvalPts[ansysSendingList, varId[eachVar]])
# Run ANSYS
self.ansys.Run()
# Get results from ANSYS
resANSYS = self.ansys.GetVarOutValues()
# If control APDL debug is true!
if self._options['APDLdebug'] == True:
self._PrintR('---\nPrinting \'resANSYS\' dict:')
self._PrintR(resANSYS)
self._PrintR('---\n')
# Add ANSYS results to matEvalPts
for eachVar in self.ansys.varOutNames:
# First all lines has value from X
matEvalPts[:, varId[eachVar.lower()]] = resANSYS[eachVar][0]
# Now values are stored as in ansysSendingList
matEvalPts[ansysSendingList, varId[eachVar.lower()]] = resANSYS[eachVar]
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Evaluate Limit State (valG) and Gradient (in x) (gradGx)
#
self._PrintR('Evaluating limit state.')
# dic of values in each evaluation
varVal = {}
# Eval Limit State:
for eachVar in varId:
varVal[eachVar] = matEvalPts[0, varId[eachVar]]
# Test if limstate is a string or a function
if(type(self.limstate) == str):
varVal['userf'] = self._userf
valG = eval(self.limstate, globals(), varVal)
else:
valG = self.limstate(**varVal)
# tolLS 'auto' is tolRel*(initial valG)
if cycle == 1 and tolLS == 'auto':
tolLS = self.controls['tolRel']*valG
self.controls['tolLS'] = tolLS
self._PrintR('Limit state tolerance set to %f.' % tolLS)
# Print limit state value:
self._PrintR('Limit state value = %f (tolerance = %f).' % (valG, self.controls['tolLS']))
# Eval Gradient
self._PrintR('Evaluating gradient.')
curId = 0
gradGx = 0
gradGx = np.zeros(NInRandVars)
#Derivatives only for random variables/input var
if diff == 'center':
for eachLine in range(1, (1+2*NInRandVars), 2):
# G(X+dh) = val1
for eachVar in varId:
varVal[eachVar] = matEvalPts[eachLine, varId[eachVar]]
# Test if limstate is a string or a function
if(type(self.limstate) == str):
val1 = eval(self.limstate, globals(), varVal)
else:
val1 = self.limstate(**varVal)
# G(X-dh) = val2
for eachVar in varId:
varVal[eachVar] = matEvalPts[eachLine+1, varId[eachVar]]
# Test if limstate is a string or a function
if(type(self.limstate) == str):
val2 = eval(self.limstate, globals(), varVal)
else:
val2 = self.limstate(**varVal)
gradGx[curId] = (val1-val2)/(2*dh*vecMean[curId])
curId += 1
elif diff == 'forward':
for eachLine in range(1, (1+NInRandVars)):
# G(X+dh) = val1
for eachVar in varId:
varVal[eachVar] = matEvalPts[eachLine, varId[eachVar]]
# Test if limstate is a string or a function
if(type(self.limstate) == str):
val1 = eval(self.limstate, globals(), varVal)
else:
val1 = self.limstate(**varVal)
gradGx[curId] = (val1-valG)/(dh*vecMean[curId])
curId += 1
elif diff == 'backward':
for eachLine in range(1, (1+NInRandVars)):
# G(X-dh) = val1
for eachVar in varId:
varVal[eachVar] = matEvalPts[eachLine, varId[eachVar]]
# Test if limstate is a string or a function
if(type(self.limstate) == str):
val1 = eval(self.limstate, globals(), varVal)
else:
val1 = self.limstate(**varVal)
gradGx[curId] = (valG-val1)/(dh*vecMean[curId])
curId += 1
#---------------------------------------------------------------
#---------------------------------------------------------------
# Get gradG (of y) by gradGx and Jxy
#
gradG = (Jxy.T).dot(gradGx)
#---------------------------------------------------------------
#---------------------------------------------------------------
# Print gradient and alpha
#
self._PrintR('Gradient and direction cosines (alpha):')
self._PrintR(' VarName | grad(g(X_i)) | alpha(i)')
idx = 0
absgrad = math.sqrt(gradG.dot(gradG))
for eachVar in self.variableDesPt:
self._PrintR(' %15s | %+12.5E | %+9.5E' % (eachVar, gradG[idx], gradG[idx]/absgrad))
idx += 1
self._PrintR(' ')
#---------------------------------------------------------------
#---------------------------------------------------------------
# the min valu of grad must be greater than 0, if isn't print a warning
if min(abs(gradG)) == 0:
self._PrintR('\n WARNING: One or more terms from gradG are equal to zero. \n Please pay attention to that!\n')
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Get reduced uncorrelated points vector
#
# reduced uncorrelated points
curVecRedPts = Jyx.dot(vecPts[:NInRandVars]-vecMean)
# Current beta
if cycle == 1:
self.results['Beta'].append(math.sqrt(curVecRedPts.dot(curVecRedPts)))
curBeta = self.results['Beta'][cycle]
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# FORM Method
#
if meth in ['HLRF', 'iHLRF', 'rHLRF']:
#-------------------------------------------------------------------
# HLRF - Rackwitz and Fiessler recursive method - Not Improved
#
# New reduced design point:
newVecRedPts = gradG*(gradG.dot(curVecRedPts)-valG)*1/gradG.dot(gradG)
#-------------------------------------------------------------------
# Verify the convergence
#
if(abs(gradG.dot(curVecRedPts)) > 0.0):
schwarzYgradG = abs(gradG.dot(curVecRedPts))/math.sqrt(gradG.dot(gradG)*curVecRedPts.dot(curVecRedPts))
else:
schwarzYgradG = 0.0
self._PrintR('Schwarz inequality betwen y* and gradG = %f (it must be next to 1).' % schwarzYgradG)
if lastcycle == True:
if abs(valG) < self.controls['tolLS'] and (1-schwarzYgradG) < self.controls['tolRel']:
#self._PrintR('\nFinal design point found on cycle %d.' % cycle)
#self._PrintR('Performing a last cycle with final values.')
#lastcycle = True
self._PrintR('Convergence criterias checked.')
self._PrintR('That\'s all folks.')
self._stnumb = 0
break
else:
self._PrintR('Convergence criterias not checked.')
self._PrintR('Sorry, it wasn\'t the last cycle...')
lastcycle = False
#-------------------------------------------------------------------
# If not converged yet and meth is rHLRF or iHLRF
#
if meth == 'rHLRF':
dk = newVecRedPts - curVecRedPts
newVecRedPts = curVecRedPts + self._options['rHLRF_relax']*dk
if meth == 'iHLRF':
#-------------------------------------------------------------------
# iHLRF - improved Rackwitz and Fiessler recursive method
#
# parameters
par_a = self._options['iHLRF_par_a']
par_b = self._options['iHLRF_par_b']
# direction
dk = newVecRedPts - curVecRedPts
# Line search
self._PrintR('Starting line search for iHLRF step.')
# find ck
# ck by Beck 2019
val1 = math.sqrt(curVecRedPts.dot(curVecRedPts)/gradG.dot(gradG))
val2 = 0.00
if abs(valG) >= self.controls['tolLS']:
# yk+dk = newVecRedPts
val2 = 1/2*newVecRedPts.dot(newVecRedPts)/abs(valG)
ck = max(val1, val2)*self._options['iHLRF_prod_ck'] + self._options['iHLRF_add_ck']
# As ck must be greater than ||y*||/||gradG(y*)||
while ck < val1:
ck = 2*ck
# Ck must always grow
ck = max(ck, self._last_ck)
self._last_ck = ck
self._PrintR('ck value: %f.' % ck)
#-----------------------------------------------------------
# Find lambdk:
#
# Initial calcs
# m(y) = 1/2*y.y^T + c*|g(y)|
# gradMy = grad(m(y))
gradMy = np.zeros(NInRandVars)
gradMy = curVecRedPts + ck*valG/abs(valG)*gradG
#--
# Now we search for lambdk
#
# myk = m(y)
myk = 1/2*curVecRedPts.dot(curVecRedPts) + ck*abs(valG)
# Find maxnk
maxdk = max(abs(dk))
# If b**n*max(dk) is less than tolRel, y ~= y+b**n*dk
maxnk = math.ceil(math.log(self.controls['tolRel']/maxdk)/math.log(par_b))
# I'm not a good guy and so we will do less!
maxnk += 0
# But if limit state value doesn't change anymore it will stop!
stepnk = self._options['iHLRF_step_lambdk_test']
# We need max(b**n), since 0<b<1 and n start as 0, where
# the first value that the condition is true is the value,
# so we can use stepnk to avoid evaluate like 100 ANSYS
# simulations and use the first...
nk = 0
valG_nk = 99999999.0
done = False
forcenk = False
self._PrintR('iHLRF step range from %f to %f (%.0f steps), being test step size %d.' % (par_b**nk, par_b**maxnk, maxnk, stepnk))
for step in range(math.ceil(maxnk/stepnk)):
# current lenght
curlen = min(stepnk, maxnk-(step)*stepnk)
# points
#matEvalPts = np.zeros([(1+2*NInRandVars+NInConstVars), (NInRandVars+NInConstVars+NOutVars)])
matEvalPts = np.zeros([curlen, (NInRandVars+NInConstVars+NOutVars)])
matEvalPts[:, 0:(NInRandVars+NInConstVars)] = vecPts
lambdks = np.zeros(curlen)
for eachnk in range(curlen):
lambdks[eachnk] = par_b**nk
matEvalPts[eachnk, 0:NInRandVars] = vecMean + Jxy.dot(curVecRedPts + lambdks[eachnk]*dk)
nk += 1
# pass ANSYSvars to ANSYS
if self._ANSYS:
self.ansys.ClearValues()
self.ansys.SetLength(curlen)
# Send from the list to ANSYS varInValues
for eachVar in self.ansys.varInNames:
eachVar = eachVar.lower()
self.ansys.SetVarInValues(eachVar, matEvalPts[:, varId[eachVar]])
# Run ANSYS
self.ansys.Run()
# Get results from ANSYS
resANSYS = self.ansys.GetVarOutValues()
# If control APDL debug is true!
if self._options['APDLdebug'] == True:
self._PrintR('---\nPrinting \'resANSYS\' dict:')
self._PrintR(resANSYS)
self._PrintR('---\n')
# Add ANSYS results to matEvalPts
for eachVar in self.ansys.varOutNames:
matEvalPts[:, varId[eachVar.lower()]] = resANSYS[eachVar]
#print(matEvalPts)
#print(matEvalPts.shape)
# evaluate the limit state function for eachnk
for eachnk in range(curlen):
# dic of values in each evaluation
varVal = {}
# Save last valG_nk to compare it
valG_nk_old = valG_nk
# Eval Limit State
for eachVar in varId:
varVal[eachVar] = matEvalPts[eachnk, varId[eachVar]]
#valG_nk = eval(self.limstate, globals(), varVal)
# Test if limstate is a string or a function
if(type(self.limstate) == str):
varVal['userf'] = self._userf
valG_nk = eval(self.limstate, globals(), varVal)
else:
valG_nk = self.limstate(**varVal)
# Verify the condition
lambdk = lambdks[eachnk]
curYk = curVecRedPts + lambdk*dk
mynk = 1/2*curYk.dot(curYk)+ck*abs(valG_nk)
# Correct way - REAL ARMIJO RULE
target = -par_a*lambdk*gradMy.dot(dk)
# Some people talk about this: gradMy.dot(gradMy), but ??
#target = -par_a*lambdk*gradMy.dot(gradMy)
if (mynk - myk) <= target:
self._PrintR('iHLRF step size is %f.' % lambdk)
done = True
break
if valG_nk_old/valG_nk == 1:
#if valG_nk_old/valG_nk >= (1-self.controls['tolRel']):
forcenk = True
break
if done == True or forcenk == True:
break
if done == False or forcenk == True:
if self._options['iHLRF_forced_lambdk'] == 'auto':
lambdk = 1 - schwarzYgradG
else:
lambdk = self._options['iHLRF_forced_lambdk']
self._PrintR('iHLRF step not found, forcing to %f.' % lambdk)
# Save new reduced point
newVecRedPts = curVecRedPts + lambdk*dk
else:
exception = Exception('FORM method \"%s\" is not implemented.' % meth)
raise exception
#-------------------------------------------------------------------
# Transform reduced points back to real space
#
newBeta = math.sqrt(newVecRedPts.dot(newVecRedPts))
self.results['Beta'].append(newBeta)
NewVecPts = vecMean + Jxy.dot(newVecRedPts)
# Save it to self.variableDesPt
for eachVar in self.variableDesPt:
self.variableDesPt[eachVar] = NewVecPts[varId[eachVar]]
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Tell user how is it going
#
self._PrintR(' ')
self._PrintR(' Current Beta = %2.3f.' % curBeta)
self._PrintR(' ')
self._PrintR(' VarName | Next Design Point')
idx = 0
for eachVar in self.variableDesPt:
self._PrintR(' %15s | %8.5E' % (eachVar, self.variableDesPt[eachVar]))
idx += 1
self._PrintR(' ')
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Verify the convergence
#
# Here we have a problem: I don't know from where Beck get this schwarzYgradG verificarion!
# I've used that, but now I changed to the old fashion way.
# To use that again you need to change the next verification and remove the
# conditional lastcycle below too.
#
#### Sometimes it didn't converge because if this crap
if np.linalg.norm(newVecRedPts) > 0:
relErrorPoint = max(abs((curVecRedPts-newVecRedPts)/newVecRedPts))
else:
relErrorPoint = 1.0
absErrorBeta = abs(curBeta-newBeta)
self._PrintR('Maximum relative error on design point = %1.4f.' % relErrorPoint)
self._PrintR('Absolute error betwen current and next beta = %1.4f.' % absErrorBeta)
#### if abs(valG) < self.controls['tolLS'] and (1-schwarzYgradG) < self.controls['tolRel']:
#### if abs(valG) < self.controls['tolLS'] and relErrorPoint < self.controls['tolRel']:
if abs(valG) < self.controls['tolLS']:
# limstate value is ok
if absErrorBeta < self.controls['tolRel']:
# Converged by absolute difference betwen betas
self._PrintR('\nFinal design point found on cycle %d by absolute difference betwen two betas.' % cycle)
self._stnumb = 0
lastcycle = True
break
elif (1-schwarzYgradG) < self.controls['tolRel']:
lastcycle = True
self._PrintR('\nFinal design point was probably found on cycle %d by Schwarz inequality betwen y* and gradG.' % cycle)
self._PrintR('A new cycle will be started to confirm the limit state value and it\'s gradient.')
else:
lastcycle = False
else:
lastcycle = False
#-------------------------------------------------------------------
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# After CYCLEs
#
timef = time.time()
self.results['ElapsedTime'] = ((timef-timei)/60)
# Save last cycles
self._cycles = cycle
# Get results
#self.results['Beta'] = math.sqrt(newVecRedPts.dot(newVecRedPts))
self.results['Pf'] = scipy.stats.norm.cdf(-self.results['Beta'][-1])
# Put grad and alpha in self.results
self.results['grad'] = {}
self.results['alpha'] = {}
absgrad = math.sqrt(gradG.dot(gradG))
idx = 0
for eachVar in self.variableDistrib:
self.results['grad'][eachVar] = gradG[idx]
self.results['alpha'][eachVar] = gradG[idx]/absgrad
idx += 1
self._PrintR('\n\n============================================================================\n')
# Verify if stopped without convergence
if cycle == self.controls['maxIter']:
self._stnumb = 1
self._PrintR(' WARNING:')
self._PrintR(' The process was finished after reach the limit of iterations without')
self._PrintR(' reach the error tolerance.\n')
self._PrintR(' Total of iterations: %3.3E' % (self._cycles))
self._PrintR(' Probability of failure (Pf): %2.4E' % (self.results['Pf']))
self._PrintR(' Reliability index (Beta): %2.3f' % (self.results['Beta'][-1]))
self._PrintR(' Elapsed time: %4.2f minutes.' % (self.results['ElapsedTime']))
self._PrintR(' Final values:')
self._PrintR(' VarName | D. Point | grad(g(X_i)) | alpha(i) ')
for eachVar in self.variableDesPt:
self._PrintR(' %15s | %8.5E | %+12.5E | %+9.5E' % (eachVar, \
self.variableDesPt[eachVar], self.results['grad'][eachVar], self.results['alpha'][eachVar]))
#self._PrintR(' %s = %3.5E' % (eachVar, self.variableDesPt[eachVar]))
self._PrintR('\n============================================================================\n\n')
#-------------------------------------------------------------------
#-----------------------------------------------------------------------
# Send the return
#
# put all in a dic
finret = {}
finret['status'] = self._stnumb
finret['Pf'] = self.results['Pf']
finret['Beta'] = self.results['Beta'][-1]
finret['DesignPoint'] = self.variableDesPt
finret['gradG'] = self.results['grad']
finret['alpha'] = self.results['alpha']
finret['cycles'] = self._cycles
return finret
#-----------------------------------------------------------------------
def ExportDataCSV(self, filename, description=None):
"""
Exports process data to a CSV file.
Parameters
----------
filename : str, obligatory
Name of file that will receive the values, doesn't need the
extension ".csv", it will be placed automatically.
description : str, optional
A string that will be write in the beggining of the file.
"""
# Open file
filename = filename+'.csv'
try:
f = open(filename, 'wt')
except:
exception = Exception('Unable to open the \"%s\" file for write data.' % filename)
raise exception
else:
# Starts with sep=, for Microsoft Excel
f.write('sep=,\n')
# Description
if description != None:
f.write('%s\n\n' % description)
f.write('Input data:\n')
# Simulation Controllers:
f.write('Process Controllers:\n')
f.write(',Limit of iterations:,%d\n' % self.controls['maxIter'])
f.write(',Relative error tolerance:,%2.3E\n' % self.controls['tolRel'])
f.write(',Absolute LS error tolerance:,%2.3E\n' % self.controls['tolLS'])
f.write(',deltah (for derivatives):,%2.3E\n' % self.controls['dh'])
f.write(',Finite difference method:,%s\n' % self.controls['diff'])
f.write(',FORM Method:,%s\n' % self.controls['meth'])
# ANSYS Properties
if self._ANSYS:
f.write('\n')
f.write('ANSYS Properties:\n')
f.write(',ANSYS Model:\n')
f.write(',,Model:,%s\n' % self.ansys.Model['inputname'])
f.write(',,Extra files:,%s\n' % self.ansys.Model['extrafiles'])
f.write(',,Input dir.:,%s\n' % self.ansys.Model['directory'])
f.write(',ANSYS Input variables:\n')
for eachVar in self.ansys.varInNames:
f.write(',,%s\n' % eachVar)
f.write(',ANSYS Output variables:\n')
for eachVar in self.ansys.varOutNames:
f.write(',,%s\n' % eachVar)
f.write('\n')
# Random variables
f.write('Random variables:\n')
f.write(',Name,Distribution,Mean,Standard Deviation,CV,Par1,Par2\n')
for eachVar in self.variableDistrib:
values = self.variableDistrib[eachVar]
cmd = ',%s,%s' % (eachVar, values[0])
for eachVal in values[1:]:
cmd = '%s,%8.5E' % (cmd, eachVal)
f.write('%s\n' % cmd)
f.write('\n')
# Constant variables
f.write('Constant variables:\n')
f.write(',Name,Value\n')
for eachVar in self.variableConst:
cmd = ',%s,%8.5E' % (eachVar, self.variableConst[eachVar])
f.write('%s\n' % cmd)
f.write('\n')
# Correlation Matrix
f.write('Correlation matrix:\n')
# First line with Varnames:
cmd = ','
idx = 0
for eachVar in self.varId:
# Just random variables!
if idx >= len(self.variableDistrib):
break
cmd = '%s,%s' % (cmd, eachVar)
idx += 1
cmd = '%s\n' % cmd
f.write(cmd)
# Matrix lines with first column as Varname
idx = 0
for eachLine in self.correlMat:
cmd = ',%s' % list(self.varId.keys())[idx] # WTF DID I DO? But it works very well... xD
# GO HORSE, GO GO GO GO GO!
for eachVal in eachLine:
cmd = '%s,%f' % (cmd, eachVal)
cmd = '%s\n' % cmd
f.write(cmd)
idx += 1
f.write('\n')
# Limit state
f.write('Limit State:,"%s"\n' % (self.limstate))
f.write('\n')
# Initial point
f.write('Initial design point:\n')
for eachVar in self.variableStartPt:
f.write(',%s,%8.5E\n' % (eachVar, self.variableStartPt[eachVar]))
f.write('\n')
# Results part
f.write('\nResults:\n')
# Final results
f.write('FORM results:\n')
f.write(',Exit status:,%d,\n' % self._stnumb)
f.write(',Total of iterations:,%d\n' % (self._cycles))
f.write(',Probability of failure (Pf):,%2.4E\n' % self.results['Pf'])
f.write(',Reliability Index (Beta):,%2.3f\n' % self.results['Beta'][-1])
f.write(',Elapsed time (minutes):,%4.3f\n' % self.results['ElapsedTime'])
f.write('\n')
# Final Sampling Point
f.write('Final values:\n')
f.write(',Variable,D. Point,grad(g(X_i)),alpha(i)\n')
for eachVar in self.variableDesPt:
f.write(',%s,%8.5E,%11.5E,%9.5E\n' % (eachVar, \
self.variableDesPt[eachVar], self.results['grad'][eachVar], self.results['alpha'][eachVar]))
f.write('\n')
# Beta values
f.write('Beta indexes of cycles:\n')
f.write(',Cycle,Beta\n')
idx = 1
for eachBeta in self.results['Beta'][1:]:
f.write(',%d,%2.3f\n' % (idx, eachBeta))
idx += 1
# End
f.close()
self._PrintR('Simulation data exported to "%s".' % filename)
|
djivey/kinetic
|
_modules/generate.py
|
## generate various items on-demand. For use in sls files when no appropriate module exists.
import random
import string
from cryptography.fernet import Fernet
__virtualname__ = 'generate'
def __virtual__():
return __virtualname__
def mac(prefix='52:54:00'):
return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix,
random.randint(0, 0xff),
random.randint(0, 0xff),
random.randint(0, 0xff))
def erlang_cookie(length = 20):
return ''.join(random.choice(string.ascii_uppercase) for i in range(length))
def fernet_key():
return Fernet.generate_key().decode('utf-8')
|
djivey/kinetic
|
_modules/netcalc.py
|
<filename>_modules/netcalc.py
import salt.utils.network as network
__virtualname__ = 'netcalc'
def __virtual__():
return __virtualname__
def gethosts(cidr):
return network._network_hosts(cidr)
|
djivey/kinetic
|
_modules/minionmanage.py
|
<reponame>djivey/kinetic<filename>_modules/minionmanage.py
import salt.utils.network as network
import salt.modules.file as file
__virtualname__ = 'minionmanage'
def __virtual__():
return __virtualname__
def populate_cache(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_controller(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_controllerv2(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_storage(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_storagev2(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_compute(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_computev2(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_container(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
def populate_containerv2(path):
pending = file.readdir(path)
pending.remove('.')
pending.remove('..')
return pending
|
djivey/kinetic
|
_modules/cephx.py
|
<reponame>djivey/kinetic
## inspiration for simple salt-key module taken from
## https://github.com/ceph/ceph-ansible/blob/master/library/ceph_key.py#L26
import os, struct, time, base64
__virtualname__ = 'cephx'
def __virtual__():
return __virtualname__
def make_key():
key = os.urandom(16)
header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
secret = base64.b64encode(header + key)
return secret.decode('utf-8')
|
dendisuhubdy/pyannote-audio
|
hubconf.py
|
<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2019-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import os
import yaml
import pathlib
import typing
import functools
import shutil
import zipfile
import torch
from pyannote.audio.features import Pretrained as _Pretrained
from pyannote.pipeline import Pipeline as _Pipeline
dependencies = ['pyannote.audio', 'torch']
# shasum -a 256 models/sad_ami.zip
_MODELS = {
'sad_dihard': 'ee924bd1751e6960e4e4322425dcdfdc77abec33a5f3ac1b74759229c176ff70',
'scd_dihard': '809aabd69c8116783b1d5cc1ba2f043d2b2d2fe693b34520c948ad5b2940e07c',
'ovl_dihard': '0ae57e5fc099b498db19aabc1d4f29e21cad44751227137909bd500048830dbd',
'emb_voxceleb': None,
'sad_ami': 'cb77c5ddfeec41288f428ee3edfe70aae908240e724a44c6592c8074462c6707',
'scd_ami': 'd2f59569c485ba3674130d441e9519993f26b7a1d3ad7d106739da0fc1dccea2',
'ovl_ami': 'debcb45c94d36b9f24550faba35c234b87cdaf367ac25729e4d8a140ac44fe64',
'emb_ami': '93c40c6fac98017f2655066a869766c536b10c8228e6a149a33e9d9a2ae80fd8',
}
# shasum -a 256 pipelines/dia_ami.zip
_PIPELINES = {
'dia_dihard': None,
'dia_ami': '81bb175bbcdbcfe7989e09dd9afbbd853649d075a6ed63477cd8c288a179e77b',
}
_GITHUB = 'https://github.com/pyannote/pyannote-audio-models'
_URL = f'{_GITHUB}/raw/master/{{kind}}s/{{name}}.zip'
def _generic(name: str,
duration: float = None,
step: float = 0.25,
batch_size: int = 32,
device: typing.Optional[typing.Union[typing.Text, torch.device]] = None,
pipeline: typing.Optional[bool] = None,
force_reload: bool = False) -> typing.Union[_Pretrained, _Pipeline]:
"""Load pretrained model or pipeline
Parameters
----------
name : str
Name of pretrained model or pipeline
duration : float, optional
Override audio chunks duration.
Defaults to the one used during training.
step : float, optional
Ratio of audio chunk duration used for the internal sliding window.
Defaults to 0.25 (i.e. 75% overlap between two consecutive windows).
Reducing this value might lead to better results (at the expense of
slower processing).
batch_size : int, optional
Batch size used for inference. Defaults to 32.
device : torch.device, optional
Device used for inference.
pipeline : bool, optional
Wrap pretrained model in a (not fully optimized) pipeline.
force_reload : bool
Whether to discard the existing cache and force a fresh download.
Defaults to use existing cache.
Returns
-------
pretrained: `Pretrained` or `Pipeline`
Usage
-----
>>> sad_pipeline = torch.hub.load('pyannote/pyannote-audio', 'sad_ami')
>>> scores = model({'audio': '/path/to/audio.wav'})
"""
model_exists = name in _MODELS
pipeline_exists = name in _PIPELINES
if model_exists and pipeline_exists:
if pipeline is None:
msg = (
f'Both a pretrained model and a pretrained pipeline called '
f'"{name}" are available. Use option "pipeline=True" to '
f'load the pipeline, and "pipeline=False" to load the model.')
raise ValueError(msg)
if pipeline:
kind = 'pipeline'
zip_url = _URL.format(kind=kind, name=name)
sha256 = _PIPELINES[name]
return_pipeline = True
else:
kind = 'model'
zip_url = _URL.format(kind=kind, name=name)
sha256 = _MODELS[name]
return_pipeline = False
elif pipeline_exists:
if pipeline is None:
pipeline = True
if not pipeline:
msg = (
f'Could not find any pretrained "{name}" model. '
f'A pretrained "{name}" pipeline does exist. '
f'Did you mean "pipeline=True"?'
)
raise ValueError(msg)
kind = 'pipeline'
zip_url = _URL.format(kind=kind, name=name)
sha256 = _PIPELINES[name]
return_pipeline = True
elif model_exists:
if pipeline is None:
pipeline = False
kind = 'model'
zip_url = _URL.format(kind=kind, name=name)
sha256 = _MODELS[name]
return_pipeline = pipeline
if name.startswith('emb_') and return_pipeline:
msg = (
f'Pretrained model "{name}" has no associated pipeline. Use '
f'"pipeline=False" or remove "pipeline" option altogether.'
)
raise ValueError(msg)
else:
msg = (
f'Could not find any pretrained model nor pipeline called "{name}".'
)
raise ValueError(msg)
if sha256 is None:
msg = (
f'Pretrained {kind} "{name}" is not available yet but will be '
f'released shortly. Stay tuned...'
)
raise NotImplementedError(msg)
# path where pre-trained models and pipelines are downloaded and cached
hub_dir = pathlib.Path(os.environ.get("PYANNOTE_AUDIO_HUB",
"~/.pyannote/hub")).expanduser().resolve()
pretrained_dir = hub_dir / f'{kind}s'
pretrained_subdir = pretrained_dir / f'{name}'
pretrained_zip = pretrained_dir / f'{name}.zip'
if not pretrained_subdir.exists() or force_reload:
if pretrained_subdir.exists():
shutil.rmtree(pretrained_subdir)
from pyannote.audio.utils.path import mkdir_p
mkdir_p(pretrained_zip.parent)
try:
msg = (
f'Downloading pretrained {kind} "{name}" to "{pretrained_zip}".'
)
print(msg)
torch.hub.download_url_to_file(zip_url,
pretrained_zip,
hash_prefix=sha256,
progress=True)
except RuntimeError as e:
shutil.rmtree(pretrained_subdir)
msg = (
f'Failed to download pretrained {kind} "{name}".'
f'Please try again.')
raise RuntimeError(msg)
# unzip downloaded file
with zipfile.ZipFile(pretrained_zip) as z:
z.extractall(path=pretrained_dir)
if kind == 'model':
params_yml, = pretrained_subdir.glob('*/*/*/*/params.yml')
pretrained = _Pretrained(validate_dir=params_yml.parent,
duration=duration,
step=step,
batch_size=batch_size,
device=device)
if return_pipeline:
if name.startswith('sad_'):
from pyannote.audio.pipeline.speech_activity_detection import SpeechActivityDetection
pipeline = SpeechActivityDetection(scores=pretrained)
elif name.startswith('scd_'):
from pyannote.audio.pipeline.speaker_change_detection import SpeakerChangeDetection
pipeline = SpeakerChangeDetection(scores=pretrained)
elif name.startswith('ovl_'):
from pyannote.audio.pipeline.overlap_detection import OverlapDetection
pipeline = OverlapDetection(scores=pretrained)
else:
# this should never happen
msg = (
f'Pretrained model "{name}" has no associated pipeline. Use '
f'"pipeline=False" or remove "pipeline" option altogether.'
)
raise ValueError(msg)
return pipeline.load_params(params_yml)
return pretrained
elif kind == 'pipeline':
from pyannote.audio.pipeline.utils import load_pretrained_pipeline
params_yml, *_ = pretrained_subdir.glob('*/*/params.yml')
return load_pretrained_pipeline(params_yml.parent)
sad_dihard = functools.partial(_generic, 'sad_dihard')
scd_dihard = functools.partial(_generic, 'scd_dihard')
ovl_dihard = functools.partial(_generic, 'ovl_dihard')
dia_dihard = functools.partial(_generic, 'dia_dihard')
sad_ami = functools.partial(_generic, 'sad_ami')
scd_ami = functools.partial(_generic, 'scd_ami')
ovl_ami = functools.partial(_generic, 'ovl_ami')
emb_ami = functools.partial(_generic, 'emb_ami')
dia_ami = functools.partial(_generic, 'dia_ami')
emb_voxceleb = functools.partial(_generic, 'emb_voxceleb')
sad = sad_dihard
scd = scd_dihard
ovl = ovl_dihard
emb = emb_voxceleb
dia = dia_dihard
if __name__ == '__main__':
DOCOPT = """Create torch.hub zip file from validation directory
Usage:
hubconf.py <validate_dir>
hubconf.py (-h | --help)
hubconf.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from pyannote.audio.applications.base import create_zip
arguments = docopt(DOCOPT, version='hubconf')
validate_dir = pathlib.Path(arguments['<validate_dir>'])
hub_zip = create_zip(validate_dir)
print(f'Created file "{hub_zip.name}" in directory "{validate_dir}".')
|
dendisuhubdy/pyannote-audio
|
pyannote/audio/utils/path.py
|
<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import os
import errno
from pathlib import Path
from typing import Text
from typing import Union
from pyannote.database.protocol.protocol import ProtocolFile
from pyannote.core import Segment
from pyannote.core import SlidingWindowFeature
import numpy as np
def mkdir_p(path):
"""Create directory and all its parents if they do not exist
This is the equivalent of Unix 'mkdir -p path'
Parameter
---------
path : str
Path to new directory.
Reference
---------
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise exc
class Pre___ed:
"""Wrapper around Precomputed, Pretrained, or torch.hub model
This class allows user-facing APIs to support (torch.hub or locally)
pretrained models or precomputed output interchangeably.
* Pre___ed('sad_ami') is equivalent to
torch.hub.load('pyannote/pyannote-audio', 'sad_ami')
* Pre___ed('/path/to/xp/train/.../validate/...') is equivalent to
Pretrained('/path/to/xp/train/.../validate/...')
* Pre___ed('/path/to/xp/train/.../validate/.../apply/...') is equivalent to
Precomputed('/path/to/xp/train/.../validate/.../apply/...')
Bonus: Pre___ed('@scores') is equivalent to lambda f: f['scores']
Parameter
---------
placeholder : Text, Path, Precomputed, or Pretrained
"""
def __init__(self, placeholder: Union[Text, Path, 'Precomputed', 'Pretrained']):
super().__init__()
from pyannote.audio.features import Pretrained
from pyannote.audio.features import Precomputed
if isinstance(placeholder, (Pretrained, Precomputed)):
scorer = placeholder
# if the path to a directory is provided
elif Path(placeholder).is_dir():
directory = Path(placeholder)
# if this succeeds, it means that 'placeholder' was indeed a path
# to the output of "pyannote-audio ... apply"
try:
scorer = Precomputed(root_dir=directory)
except Exception as e:
scorer = None
if scorer is None:
# if this succeeds, it means that 'placeholder' was indeed a
# path to the output of "pyannote-audio ... validate"
try:
scorer = Pretrained(validate_dir=directory)
except Exception as e:
scorer = None
if scorer is None:
msg = (
f'"{placeholder}" directory does not seem to be the path '
f'to precomputed features nor the path to a model '
f'validation step.'
)
# otherwise it should be a string
elif isinstance(placeholder, Text):
# @key means that one should read the "key" key of protocol files
if placeholder.startswith('@'):
key = placeholder[1:]
scorer = lambda current_file: current_file[key]
# if string does not start with "@", it means that 'placeholder'
# is the name of a torch.hub model
else:
try:
import torch
scorer = torch.hub.load('pyannote/pyannote-audio',
placeholder)
except Exception as e:
msg = (
f'Could not load {placeholder} model from torch.hub. '
f'The following exception was raised:\n{e}')
scorer = None
# warn the user the something went wrong
if scorer is None:
raise ValueError(msg)
self.scorer_ = scorer
def crop(self, current_file: ProtocolFile,
segment: Segment,
mode: Text = 'center',
fixed: float = None) -> np.ndarray:
"""Extract frames from a specific region
Parameters
----------
current_file : ProtocolFile
Protocol file
segment : Segment
Region of the file to process.
mode : {'loose', 'strict', 'center'}, optional
In 'strict' mode, only frames fully included in 'segment' support are
returned. In 'loose' mode, any intersecting frames are returned. In
'center' mode, first and last frames are chosen to be the ones
whose centers are the closest to 'segment' start and end times.
Defaults to 'center'.
fixed : float, optional
Overrides 'segment' duration and ensures that the number of
returned frames is fixed (which might otherwise not be the case
because of rounding errors).
Returns
-------
frames : np.ndarray
Frames.
"""
from pyannote.audio.features import Precomputed
from pyannote.audio.features import Pretrained
if isinstance(self.scorer_, (Precomputed, Pretrained)):
return self.scorer_.crop(current_file,
segment,
mode=mode,
fixed=fixed)
return self.scorer_(current_file).crop(segment,
mode=mode,
fixed=fixed,
return_data=True)
def __call__(self, current_file) -> SlidingWindowFeature:
"""Extract frames from the whole file
Parameters
----------
current_file : ProtocolFile
Protocol file
Returns
-------
frames : np.ndarray
Frames.
"""
return self.scorer_(current_file)
# used to "inherit" most scorer_ attributes
def __getattr__(self, name):
return getattr(self.scorer_, name)
|
dendisuhubdy/pyannote-audio
|
pyannote/audio/labeling/tasks/base.py
|
<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import torch
import torch.nn.functional as F
import numpy as np
import scipy.signal
from pyannote.core import Segment
from pyannote.core import SlidingWindow
from pyannote.core import Timeline
from pyannote.core import Annotation
from pyannote.core import SlidingWindowFeature
from pyannote.database import get_unique_identifier
from pyannote.database import get_annotated
from pyannote.database.protocol.protocol import Protocol
from pyannote.core.utils.numpy import one_hot_encoding
from pyannote.audio.features import FeatureExtraction
from pyannote.audio.features import RawAudio
from pyannote.core.utils.random import random_segment
from pyannote.core.utils.random import random_subsegment
from pyannote.audio.train.trainer import Trainer
from pyannote.audio.train.generator import BatchGenerator
from pyannote.audio.train.task import Task, TaskType, TaskOutput
from pyannote.audio.train.model import Resolution
from pyannote.audio.train.model import RESOLUTION_CHUNK
from pyannote.audio.train.model import RESOLUTION_FRAME
class LabelingTaskGenerator(BatchGenerator):
"""Base batch generator for various labeling tasks
This class should be inherited from: it should not be used directy
Parameters
----------
feature_extraction : `pyannote.audio.features.FeatureExtraction`
Feature extraction
protocol : `pyannote.database.Protocol`
subset : {'train', 'development', 'test'}, optional
Protocol and subset.
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
Defaults to `feature_extraction.sliding_window`
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models that
include the feature extraction step (e.g. SincNet) and therefore use a
different cropping mode. Defaults to 'center'.
duration : float, optional
Duration of sub-sequences. Defaults to 3.2s.
step : `float`, optional
Sub-sequences step. Defaults to `duration`.
Only used when `exhaustive` is True.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Force total audio duration per epoch, in days.
Defaults to total duration of protocol subset.
exhaustive : bool, optional
Ensure training files are covered exhaustively (useful in case of
non-uniform label distribution).
shuffle : bool, optional
Shuffle exhaustive samples. Defaults to False.
mask_dimension : `int`, optional
When set, batches will have a "mask" key that provides a mask that has
the same length as "y". This "mask" will be passed to the loss function
has a way to weigh samples according to their "mask" value. The actual
value of `mask_dimension` is used to select which dimension to use.
This option assumes that `current_file["mask"]` contains a
`SlidingWindowFeature` that can be used as masking. Defaults to not use
masking.
mask_logscale : `bool`, optional
Set to True to indicate that mask values are log scaled. Will apply
exponential. Defaults to False. Has not effect when `mask_dimension`
is not set.
"""
def __init__(self,
feature_extraction: FeatureExtraction,
protocol: Protocol,
subset='train',
resolution: Resolution = None,
alignment=None,
duration=3.2,
step=None,
batch_size: int = 32,
per_epoch: float = None,
exhaustive=False,
shuffle=False,
mask_dimension=None,
mask_logscale=False):
self.feature_extraction = feature_extraction
self.duration = duration
# TODO: update 'step' semantics for consistency
# TODO: should mean "ratio of duration"
if step is None:
step = duration
self.step = step
# TODO: create a reusable guess_resolution(self) function
# TODO: in pyannote.audio.train.model
if resolution in [None, RESOLUTION_FRAME]:
resolution = self.feature_extraction.sliding_window
elif resolution == RESOLUTION_CHUNK:
resolution = SlidingWindow(duration=self.duration,
step=self.step)
self.resolution = resolution
if alignment is None:
alignment = 'center'
self.alignment = alignment
self.batch_size = batch_size
self.exhaustive = exhaustive
self.shuffle = shuffle
self.mask_dimension = mask_dimension
self.mask_logscale = mask_logscale
total_duration = self._load_metadata(protocol, subset=subset)
if per_epoch is None:
per_epoch = total_duration / (24 * 60 * 60)
self.per_epoch = per_epoch
def postprocess_y(self, Y):
"""This function does nothing but return its input.
It should be overriden by subclasses.
Parameters
----------
Y :
Returns
-------
postprocessed :
"""
return Y
def initialize_y(self, current_file):
"""Precompute y for the whole file
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
Returns
-------
y : `SlidingWindowFeature`
Precomputed y for the whole file
"""
y, _ = one_hot_encoding(current_file['annotation'],
get_annotated(current_file),
self.resolution,
labels=self.segment_labels_,
mode='center')
return SlidingWindowFeature(self.postprocess_y(y.data),
y.sliding_window)
def crop_y(self, y, segment):
"""Extract y for specified segment
Parameters
----------
y : `pyannote.core.SlidingWindowFeature`
Output of `initialize_y` above.
segment : `pyannote.core.Segment`
Segment for which to obtain y.
Returns
-------
cropped_y : (n_samples, dim) `np.ndarray`
y for specified `segment`
"""
return y.crop(segment, mode=self.alignment,
fixed=self.duration)
def _load_metadata(self, protocol, subset='train') -> float:
"""Load training set metadata
This function is called once at instantiation time, returns the total
training set duration, and populates the following attributes:
Attributes
----------
data_ : dict
{'segments': <list of annotated segments>,
'duration': <total duration of annotated segments>,
'current_file': <protocol dictionary>,
'y': <labels as numpy array>}
segment_labels_ : list
Sorted list of (unique) labels in protocol.
file_labels_ : dict of list
Sorted lists of (unique) file labels in protocol
Returns
-------
duration : float
Total duration of annotated segments, in seconds.
"""
self.data_ = {}
segment_labels, file_labels = set(), dict()
# loop once on all files
for current_file in getattr(protocol, subset)():
# ensure annotation/annotated are cropped to actual file duration
support = Segment(start=0, end=current_file['duration'])
current_file['annotated'] = get_annotated(current_file).crop(
support, mode='intersection')
current_file['annotation'] = current_file['annotation'].crop(
support, mode='intersection')
# keep track of unique segment labels
segment_labels.update(current_file['annotation'].labels())
# keep track of unique file labels
for key, value in current_file.items():
if isinstance(value, (Annotation, Timeline, SlidingWindowFeature)):
continue
if key not in file_labels:
file_labels[key] = set()
file_labels[key].add(value)
segments = [s for s in current_file['annotated']
if s.duration > self.duration]
# corner case where no segment is long enough
# and we removed them all...
if not segments:
continue
# total duration of label in current_file (after removal of
# short segments).
duration = sum(s.duration for s in segments)
# store all these in data_ dictionary
datum = {'segments': segments,
'duration': duration,
'current_file': current_file}
uri = get_unique_identifier(current_file)
self.data_[uri] = datum
self.file_labels_ = {k: sorted(file_labels[k]) for k in file_labels}
self.segment_labels_ = sorted(segment_labels)
for current_file in getattr(protocol, subset)():
uri = get_unique_identifier(current_file)
if uri in self.data_:
self.data_[uri]['y'] = self.initialize_y(current_file)
return sum(datum['duration'] for datum in self.data_.values())
@property
def specifications(self):
"""Task & sample specifications
Returns
-------
specs : `dict`
['task'] (`pyannote.audio.train.Task`) : task
['X']['dimension'] (`int`) : features dimension
['y']['classes'] (`list`) : list of classes
"""
specs = {
'task': Task(type=TaskType.MULTI_CLASS_CLASSIFICATION,
output=TaskOutput.SEQUENCE),
'X': {'dimension': self.feature_extraction.dimension},
'y': {'classes': self.segment_labels_},
}
return specs
def samples(self):
if self.exhaustive:
return self._sliding_samples()
else:
return self._random_samples()
def _random_samples(self):
"""Random samples
Returns
-------
samples : generator
Generator that yields {'X': ..., 'y': ...} samples indefinitely.
"""
uris = list(self.data_)
durations = np.array([self.data_[uri]['duration'] for uri in uris])
probabilities = durations / np.sum(durations)
while True:
# choose file at random with probability
# proportional to its (annotated) duration
uri = uris[np.random.choice(len(uris), p=probabilities)]
datum = self.data_[uri]
current_file = datum['current_file']
# choose one segment at random with probability
# proportional to its duration
segment = next(random_segment(datum['segments'], weighted=True))
# choose fixed-duration subsegment at random
subsegment = next(random_subsegment(segment, self.duration))
X = self.feature_extraction.crop(current_file,
subsegment, mode='center',
fixed=self.duration)
y = self.crop_y(datum['y'], subsegment)
sample = {'X': X, 'y': y}
if self.mask_dimension is not None:
# extract mask for current sub-segment
mask = current_file['mask'].crop(subsegment,
mode='center',
fixed=self.duration)
# use requested dimension (e.g. non-overlap scores)
mask = mask[:, self.mask_dimension]
if self.mask_logscale:
mask = np.exp(mask)
# it might happen that "mask" and "y" use different sliding
# windows. therefore, we simply resample "mask" to match "y"
if len(mask) != len(y):
mask = scipy.signal.resample(mask, len(y), axis=0)
sample['mask'] = mask
for key, classes in self.file_labels_.items():
sample[key] = classes.index(current_file[key])
yield sample
def _sliding_samples(self):
uris = list(self.data_)
durations = np.array([self.data_[uri]['duration'] for uri in uris])
probabilities = durations / np.sum(durations)
sliding_segments = SlidingWindow(duration=self.duration,
step=self.step)
while True:
np.random.shuffle(uris)
# loop on all files
for uri in uris:
datum = self.data_[uri]
# make a copy of current file
current_file = dict(datum['current_file'])
# compute features for the whole file
features = self.feature_extraction(current_file)
# randomly shift 'annotated' segments start time so that
# we avoid generating exactly the same subsequence twice
annotated = Timeline()
for segment in get_annotated(current_file):
shifted_segment = Segment(
segment.start + np.random.random() * self.duration,
segment.end)
if shifted_segment:
annotated.add(shifted_segment)
if self.shuffle:
samples = []
for sequence in sliding_segments(annotated):
X = features.crop(sequence, mode='center',
fixed=self.duration)
y = self.crop_y(datum['y'], sequence)
sample = {'X': X, 'y': y}
if self.mask_dimension is not None:
# extract mask for current sub-segment
mask = current_file['mask'].crop(sequence,
mode='center',
fixed=self.duration)
# use requested dimension (e.g. non-overlap scores)
mask = mask[:, self.mask_dimension]
if self.mask_logscale:
mask = np.exp(mask)
# it might happen that "mask" and "y" use different
# sliding windows. therefore, we simply resample "mask"
# to match "y"
if len(mask) != len(y):
mask = scipy.signal.resample(mask, len(y), axis=0)
sample['mask'] = mask
for key, classes in self.file_labels_.items():
sample[key] = classes.index(current_file[key])
if self.shuffle:
samples.append(sample)
else:
yield sample
if self.shuffle:
np.random.shuffle(samples)
for sample in samples:
yield sample
@property
def batches_per_epoch(self):
"""Number of batches needed to complete an epoch"""
duration_per_epoch = self.per_epoch * 24 * 60 * 60
duration_per_batch = self.duration * self.batch_size
return int(np.ceil(duration_per_epoch / duration_per_batch))
class LabelingTask(Trainer):
"""Base class for various labeling tasks
This class should be inherited from: it should not be used directy
Parameters
----------
duration : float, optional
Duration of sub-sequences. Defaults to 3.2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Total audio duration per epoch, in days.
Defaults to one day (1).
"""
def __init__(self, duration=3.2, batch_size=32, per_epoch=1):
super(LabelingTask, self).__init__()
self.duration = duration
self.batch_size = batch_size
self.per_epoch = per_epoch
def get_batch_generator(self, feature_extraction, protocol, subset='train',
resolution=None, alignment=None):
"""This method should be overriden by subclass
Parameters
----------
feature_extraction : `pyannote.audio.features.FeatureExtraction`
protocol : `pyannote.database.Protocol`
subset : {'train', 'development'}, optional
Defaults to 'train'.
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models
that include the feature extraction step (e.g. SincNet) and
therefore use a different cropping mode. Defaults to 'center'.
Returns
-------
batch_generator : `LabelingTaskGenerator`
"""
return LabelingTaskGenerator(
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=self.duration,
step=self.step,
per_epoch=self.per_epoch,
batch_size=self.batch_size)
@property
def weight(self):
"""Class/task weights
Returns
-------
weight : None or `torch.Tensor`
"""
return None
def on_train_start(self):
"""Set loss function (with support for class weights)
loss_func_ = Function f(input, target, weight=None) -> loss value
"""
self.task_ = self.model_.task
if self.task_.is_multiclass_classification:
self.n_classes_ = len(self.model_.classes)
def loss_func(input, target, weight=None, mask=None):
if mask is None:
return F.nll_loss(input, target, weight=weight,
reduction='mean')
else:
return torch.mean(
mask * F.nll_loss(input, target,
weight=weight,
reduction='none'))
if self.task_.is_multilabel_classification:
def loss_func(input, target, weight=None, mask=None):
if mask is None:
return F.binary_cross_entropy(input, target, weight=weight,
reduction='mean')
else:
return torch.mean(
mask * F.binary_cross_entropy(input, target,
weight=weight,
reduction='none'))
if self.task_.is_regression:
def loss_func(input, target, weight=None, mask=None):
if mask is None:
return F.mse_loss(input, target,
reduction='mean')
else:
return torch.mean(
mask * F.mse_loss(input, target,
reduction='none'))
self.loss_func_ = loss_func
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
['mask'] (`numpy.ndarray`, optional)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch['X'],
dtype=torch.float32,
device=self.device_)
fX = self.model_(X)
mask = None
if self.task_.is_multiclass_classification:
fX = fX.view((-1, self.n_classes_))
target = torch.tensor(
batch['y'],
dtype=torch.int64,
device=self.device_).contiguous().view((-1, ))
if 'mask' in batch:
mask = torch.tensor(
batch['mask'],
dtype=torch.float32,
device=self.device_).contiguous().view((-1, ))
elif self.task_.is_multilabel_classification or \
self.task_.is_regression:
target = torch.tensor(
batch['y'],
dtype=torch.float32,
device=self.device_)
if 'mask' in batch:
mask = torch.tensor(
batch['mask'],
dtype=torch.float32,
device=self.device_)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
return {
'loss': self.loss_func_(fX, target,
weight=weight,
mask=mask),
}
|
dendisuhubdy/pyannote-audio
|
pyannote/audio/pipeline/utils.py
|
<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import yaml
from pathlib import Path
from pyannote.core import Annotation
from pyannote.pipeline import Pipeline
from pyannote.core.utils.helper import get_class_by_name
def assert_string_labels(annotation: Annotation, name: str):
"""Check that annotation only contains string labels
Parameters
----------
annotation : `pyannote.core.Annotation`
Annotation.
name : `str`
Name of the annotation (used for user feedback in case of failure)
"""
if any(not isinstance(label, str) for label in annotation.labels()):
msg = f'{name} must contain `str` labels only.'
raise ValueError(msg)
def assert_int_labels(annotation: Annotation, name: str):
"""Check that annotation only contains integer labels
Parameters
----------
annotation : `pyannote.core.Annotation`
Annotation.
name : `str`
Name of the annotation (used for user feedback in case of failure)
"""
if any(not isinstance(label, int) for label in annotation.labels()):
msg = f'{name} must contain `int` labels only.'
raise ValueError(msg)
def load_pretrained_pipeline(train_dir: Path) -> Pipeline:
"""Load pretrained pipeline
Parameters
----------
train_dir : Path
Path to training directory (i.e. the one that contains `params.yml`
created by calling `pyannote-pipeline train ...`)
Returns
-------
pipeline : Pipeline
Pretrained pipeline
"""
config_yml = train_dir.parents[1] / 'config.yml'
with open(config_yml, 'r') as fp:
config = yaml.load(fp, Loader=yaml.SafeLoader)
pipeline_name = config['pipeline']['name']
Klass = get_class_by_name(
pipeline_name, default_module_name='pyannote.audio.pipeline')
pipeline = Klass(**config['pipeline'].get('params', {}))
return pipeline.load_params(train_dir / 'params.yml')
|
rumblefishrobotics/rf_common_ws
|
src/rf_display_ssd1306/nodes/rf_display_ssd1306_tester_node.py
|
<filename>src/rf_display_ssd1306/nodes/rf_display_ssd1306_tester_node.py
#!/usr/bin/env python3
"""
Node generating test messages for the display node. Could just use
rostopic pub to rf_display/rf_display_topic RFDisplayData <payload>,
but payload is a fair bit to keep typing / editing. This node is
purely a testing tool.
"""
import rospy
#from std_msgs.msg import String
from rf_display.msg import RFDisplayData
def publish_rf_display_messages():
# topic, message type, queue size
pub = rospy.Publisher('rf_display_ssd1306_topic',
RFDisplayData, queue_size=10)
rospy.init_node('rf_display_ssd1306_tester_node', anonymous=True)
rate = rospy.Rate(0.2) # 0.2 hz
# create custom message
display_msg = RFDisplayData()
count = 0
while not rospy.is_shutdown():
count = count + 1
# Count just adds some changing text, on each line, with each message
display_msg.screen_line1 = "line 1 (%s)" % count
display_msg.screen_line2 = "line 2 (%s)" % count
display_msg.screen_line3 = "line 3 (%s)" % count
display_msg.screen_line4 = "line 4 (%s)" % count
rospy.loginfo(display_msg)
pub.publish(display_msg)
rate.sleep()
if __name__ == '__main__':
try:
publish_rf_display_messages()
except rospy.ROSInterruptException:
pass
|
rumblefishrobotics/rf_common_ws
|
src/rf_display_ssd1306/nodes/rf_display_ssd1306_node.py
|
<gh_stars>0
#!/usr/bin/env python3
"""
display_ssd1306 provides a ROS wrapper for the SSD1306 LED, using the
Adafruit drivers for their display. It provides four lines of text,
stacked one above the other. Each line has up to 32 characters.
"""
import time
import subprocess
import rospy
from PIL import Image, ImageDraw, ImageFont
from rf_display.msg import RFDisplayData
from board import SCL, SDA
import busio
import adafruit_ssd1306
#-------------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------------
RF_DISPLAY_WIDTH = 128
RF_DISPLAY_HEIGHT = 32
#-------------------------------------------------------------------------------
# Functions
#-------------------------------------------------------------------------------
def shutdown_hook():
"""Clear the display on shutdown."""
global g_draw
# Clear the display
g_display.fill(0)
g_display.show()
def init_display():
"""Startup initilization of display."""
global g_i2c
global g_display
global g_draw
global g_image
global g_font
g_i2c = busio.I2C(SCL, SDA)
g_display = adafruit_ssd1306.SSD1306_I2C(RF_DISPLAY_WIDTH,
RF_DISPLAY_HEIGHT, g_i2c)
# Clear the display
g_display.fill(0)
g_display.show()
# Create a blank image for drawing
width = g_display.width
height = g_display.height
g_image = Image.new("1", (width, height))
# Draw a filled box to clear the image
g_draw = ImageDraw.Draw(g_image)
g_draw.rectangle((0, 0, width, height), outline=0, fill=0)
g_font = ImageFont.load_default()
def display_callback(display_data):
"""Pushes incomming data to display onto display"""
rospy.loginfo("(%s) heard: (%s),(%s),(%s),(%s)" % (rospy.get_name(),
display_data.screen_line1,
display_data.screen_line2,
display_data.screen_line3,
display_data.screen_line4))
g_draw.rectangle((0, 0, RF_DISPLAY_WIDTH, RF_DISPLAY_HEIGHT),
outline=0, fill=0)
top = -2
g_draw.text((0, top + 0), display_data.screen_line1, font=g_font, fill=255)
g_draw.text((0, top + 8), display_data.screen_line2, font=g_font, fill=255)
g_draw.text((0, top + 16), display_data.screen_line3, font=g_font, fill=255)
g_draw.text((0, top + 24), display_data.screen_line4, font=g_font, fill=255)
g_display.image(g_image)
g_display.show()
def listen_for_messages():
"""Creates ROS display node and starts listening for data to display."""
init_display()
rospy.init_node("rf_display_ssd1306_node")
#(topic),(custom message name),(name of callback function)
rospy.Subscriber("rf_display_ssd1306_topic",
RFDisplayData,
display_callback)
rospy.spin()
# Register the ROS shutdown hook
# rospy.on_shutdown(shutdown_hook)
#-------------------------------------------------------------------------------
# Startup source singleton
#-------------------------------------------------------------------------------
if __name__ == '__main__':
try:
listen_for_messages()
except rospy.ROSInterruptException:
pass
|
backyio/go-timezone
|
tools/gen-var-timezones.py
|
<filename>tools/gen-var-timezones.py
import json
import sys
from jinja2 import Template
def var_timezones():
data = {}
abbr_timezones_json = sys.argv[1]
with open(abbr_timezones_json) as f:
data = json.load(f)
tpl_text = '''var timezones = map[string][]string{
{%- for abbr in timezones %}
"{{ abbr }}": []string{
{%- for tz in timezones[abbr] %}
"{{ tz }}",
{%- endfor %}
},
{%- endfor %}
}'''
tpl = Template(tpl_text)
print(tpl.render({"timezones": data}))
if __name__ == "__main__":
var_timezones()
|
backyio/go-timezone
|
tools/gen-var-tzabbrinfos.py
|
import json
import sys
from jinja2 import Template
def var_tzabbrinfos():
data = {}
data2 = {}
abbrs_json = sys.argv[1]
military_abbrs_json = sys.argv[2]
with open(abbrs_json) as f:
data = json.load(f)
with open(military_abbrs_json) as f:
data2 = json.load(f)
tpl_text = '''var tzAbbrInfos = map[string][]*TzAbbreviationInfo{
{%- for abbr in tzinfos %}
"{{ abbr }}": []*TzAbbreviationInfo{
{%- for tz in tzinfos[abbr] %}
{
countryCode: "{{ tz["country_code"] }}",
isDST: {{ tz["is_dst"] | lower }},
name: "{{ tz["name"] }}",
offset: {{ tz["offset"] }},
offsetHHMM: "{{ tz["offset_hhmm"] }}",
},
{%- endfor %}
},
{%- endfor %}
// military timezones
{%- for abbr in military_tzinfos %}
"{{ abbr }}": []*TzAbbreviationInfo{
{
name: "{{ military_tzinfos[abbr]["name"] }}",
offset: {{ military_tzinfos[abbr]["offset"] }},
offsetHHMM: "{{ military_tzinfos[abbr]["offset_hhmm"] }}",
},
},
{%- endfor %}
}'''
tpl = Template(tpl_text)
print(tpl.render({"tzinfos": data, "military_tzinfos": data2}))
if __name__ == "__main__":
var_tzabbrinfos()
|
backyio/go-timezone
|
tools/gen-var-tzinfos.py
|
import json
import sys
from jinja2 import Template
def var_tzinfos():
data = {}
timezones_json = sys.argv[1]
with open(timezones_json) as f:
data = json.load(f)
tpl_text = '''var tzInfos = map[string]*TzInfo{
{%- for tz in tzinfos %}
"{{ tz }}": &TzInfo{
longGeneric: "{{ tzinfos[tz]["long"]["generic"] }}",
longStandard: "{{ tzinfos[tz]["long"]["standard"] }}",
longDaylight: "{{ tzinfos[tz]["long"]["daylight"] }}",
shortGeneric: "{{ tzinfos[tz]["short"]["generic"] }}",
shortStandard: "{{ tzinfos[tz]["short"]["standard"] }}",
shortDaylight: "{{ tzinfos[tz]["short"]["daylight"] }}",
standardOffset: {{ tzinfos[tz]["standard_offset"] }},
daylightOffset: {{ tzinfos[tz]["daylight_offset"] }},
standardOffsetHHMM: "{{ tzinfos[tz]["standard_offset_hhmm"] }}",
daylightOffsetHHMM: "{{ tzinfos[tz]["daylight_offset_hhmm"] }}",
countryCode: "{{ tzinfos[tz]["country_code"] }}",
isDeprecated: {{ tzinfos[tz]["is_deprecated"] | lower }},
linkTo: "{{ tzinfos[tz]["link_to"] }}",
lastDST: {{ tzinfos[tz]["last_dst"] }},
},
{%- endfor %}
}'''
tpl = Template(tpl_text)
print(tpl.render({"tzinfos": data}))
if __name__ == "__main__":
var_tzinfos()
|
kristoffer-paulsson/angelostools
|
src/angelostools/pyxscanner.py
|
<filename>src/angelostools/pyxscanner.py
"""
The .pyx scanner helps you to scan a whole package hierarchy for Cython pyx files and
encloses them in Extensions from setuptools.
"""
import glob
from pathlib import Path
from setuptools import Extension
class PyxScanner:
"""Scans hierarchically for .pyx files in given package."""
def __init__(self, base_path: str, glob: list = None, extra: dict = None, basic: dict = None):
self.__base_path = str(Path(base_path))
self.__globlist = glob if glob else ["**.pyx"]
self.__pkgdata = extra if extra else dict()
self.__data = basic if basic else dict()
def scan(self) -> list:
"""Build list of Extensions to be cythonized."""
glob_result = list()
for pattern in self.__globlist:
glob_path = str(Path(self.__base_path, pattern))
glob_result += glob.glob(glob_path, recursive=True)
extensions = list()
for module in glob_result:
package = ".".join(Path(module[len(self.__base_path) + 1:-4]).parts)
data = self.__pkgdata[package] if package in self.__pkgdata else {}
core = {"name": package, "sources": [module]}
kwargs = {**self.__data, **data, **core}
extensions.append(Extension(**kwargs))
return extensions
def list(self) -> list:
"""Build list of modules found."""
glob_result = list()
for pattern in self.__globlist:
glob_path = str(Path(self.__base_path, pattern))
glob_result += glob.glob(glob_path, recursive=True)
modules = list()
for module in glob_result:
package = ".".join(Path(module[len(self.__base_path) + 1:-4]).parts)
modules.append(package)
return modules
|
kristoffer-paulsson/angelostools
|
src/angelostools/setup/script.py
|
<filename>src/angelostools/setup/script.py
#
# Copyright (c) 2018-2020 by <NAME> <<EMAIL>>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# <NAME> - initial implementation
#
import subprocess
from pathlib import Path
from setuptools import Command
class Script(Command):
"""Compile the executable"""
user_options = [
("name=", "n", "Name of the script."),
("prefix=", "p", "Possible prefix where to link against.")
]
def initialize_options(self):
"""Initialize options"""
self.name = None
self.prefix = None
def finalize_options(self):
"""Finalize options"""
pass
def run(self):
dist = str(Path(self.prefix, "bin").resolve()) if self.prefix else str(Path("./bin").absolute())
home = str(Path("./").absolute())
subprocess.check_call(
"cp {0} {2}/{1}".format(
Path(home, "scripts", self.name + "_entry_point.sh"), self.name, dist), cwd=home, shell=True)
|
kristoffer-paulsson/angelostools
|
src/angelostools/nsscanner.py
|
<filename>src/angelostools/nsscanner.py
"""Scanners that comply with namespace packages."""
import os
from pathlib import Path
from typing import Union
class NamespacePackageScanner:
"""A scanner for namespace packages."""
def __init__(self, namespace: str, root: Path = None):
self.__namespace = namespace
self.__root = root.resolve() if root else Path(os.curdir).resolve()
self.__root_parts_cnt = len(self.__root.parts)
def pkg_iter(self) -> None:
"""Iterator over all namespace packages"""
for pkg_path in self.__root.glob(self.__namespace + "-*/"):
yield pkg_path
def pkg_name(self, pkg_path: Path) -> str:
"""Convert package path into its name."""
return pkg_path.parts[-1]
@property
def packages(self) -> list:
"""Property over all namespace packages."""
return [self.pkg_name(pkg_path) for pkg_path in self.pkg_iter()]
def _dir_iter(self, pkg_path: Path, rel_path: Union[str, list], pattern: str) -> None:
"""Internal iterator for directories and extensions in a namespace package."""
for file_path in pkg_path.joinpath(rel_path).rglob(pattern):
yield file_path
def mod_iter(self, pkg_path: Path) -> None:
"""Iterate over all modules in named namespace package."""
for mod_path in self._dir_iter(pkg_path, "src", "*.pyx"):
yield mod_path
def tests_iter(self, pkg_path: Path) -> None:
"""Iterate over all tests in named namespace package."""
for mod_path in self._dir_iter(pkg_path, "tests", "test_*.py"):
yield mod_path
def mod_imp_path(self, mod_path: Path) -> str:
"""Converts module path to full package name."""
return ".".join(mod_path.parts[self.__root_parts_cnt+2:-1] + (mod_path.stem,))
|
kristoffer-paulsson/angelostools
|
src/angelostools/setup/vendor.py
|
<reponame>kristoffer-paulsson/angelostools
#
# Copyright (c) 2018-2020 by <NAME> <<EMAIL>>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# <NAME> - initial implementation
#
"""Vendor installer, downloads, compiles and install libraries from source."""
import logging
import os
import platform
import shutil
import subprocess
import tarfile
import urllib
import zipfile
from pathlib import Path
from tempfile import TemporaryDirectory
from abc import ABC, abstractmethod
from setuptools import Command
class VendorLibrary(ABC):
"""Base class for vendors."""
@abstractmethod
def check(self) -> bool:
"""Check if target is satisfied."""
pass
@abstractmethod
def download(self):
"""Download source tarball."""
pass
@abstractmethod
def extract(self):
"""Extract source file."""
pass
def uncompress(self, archive, target):
"""Uncompress Zip or Tar files."""
if zipfile.is_zipfile(archive):
zar = zipfile.ZipFile(archive)
zar.extractall(target)
elif tarfile.is_tarfile(archive):
tar = tarfile.open(archive)
tar.extractall(target)
tar.close()
else:
raise OSError("Unkown zip/archive format")
@abstractmethod
def build(self):
"""Build sources."""
pass
@abstractmethod
def install(self):
"""Install binaries."""
pass
@abstractmethod
def close(self):
"""Clean up temporary files."""
pass
class VendorCompile(VendorLibrary):
def __init__(
self, base_dir: str, name: str, download: str,
local: str, internal: str, check: str, prefix: str = None
):
"""
Example:
name = "libsodium"
download = "https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable.tar.gz"
local = "libsodium-1.0.18.tar.gz"
internal = "libsodium-stable"
target = "./usr/local/lib/libsodium.a"
"""
self._base = base_dir
self._prefix = str(Path(prefix).resolve()) if isinstance(prefix, str) else ""
self._name = name
self._download = download
self._local = local
self._internal = internal
self._check = check
self._tarball = Path(self._base, "tarball", self._local)
self._temp = TemporaryDirectory()
self._archive = str(Path(self._temp.name, self._local))
self._target = str(Path(self._temp.name, self._name))
self._work = str(Path(self._temp.name, self._name, self._internal))
def check(self) -> bool:
"""Check if target is reached"""
return Path(self._base, self._check).exists()
def download(self):
"""Download sources tarball."""
if not self._tarball.exists():
urllib.request.urlretrieve(self._download, self._archive)
shutil.copyfile(self._archive, str(self._tarball))
else:
shutil.copyfile(str(self._tarball), self._archive)
def extract(self):
"""Extract source file."""
self.uncompress(self._archive, self._target)
def close(self):
"""Clean up temporary files."""
self._temp.cleanup()
class VendorDownload(VendorLibrary):
"""Vendor installer for third party libraries i source code form."""
def __init__(
self, base_dir: str, name: str, download: str,
local: str, internal: str, check: str, prefix: str = None
):
"""
Example:
name = "libsodium"
download = "https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable.tar.gz"
local = "libsodium-1.0.18.tar.gz"
internal = "libsodium-stable"
target = "./usr/local/lib/libsodium.a"
"""
self._base = base_dir
self._prefix = str(Path(prefix).resolve()) if isinstance(prefix, str) else ""
self._name = name
self._download = download
self._local = local
self._internal = internal
self._check = check
self._tarball = Path(self._base, "tarball", self._local)
self._target = str(Path(self._base, "tarball", self._name))
def check(self) -> bool:
"""Check if target is reached"""
return Path(self._base, self._check).exists()
def download(self):
"""Download sources tarball."""
if not self._tarball.exists():
urllib.request.urlretrieve(self._download, self._tarball)
def extract(self):
"""Extract source file."""
self.uncompress(str(self._tarball), self._target)
def build(self):
pass
def install(self):
pass
def close(self):
pass
class Vendor(Command):
"""Install third party vendor libraries."""
user_options = [
("base-dir=", "d", "Base directory."),
("compile=", "c", "Download, compile and install source tarball."),
("prefix=", "p", "Possible prefix where to install")
]
def initialize_options(self):
"""Initialize options"""
self.base_dir = None
self.compile = None
self.prefix = None
def finalize_options(self):
"""Finalize options"""
pass
def do_compile(self):
"""Execute the compile command."""
if not self.compile:
return
for value in self.compile:
logging.info(self.base_dir)
klass = value["class"]
del value["class"]
library = klass(self.base_dir, **value, prefix=self.prefix)
if not library.check():
library.download()
library.extract()
library.build()
library.install()
library.close()
def run(self):
"""Install vendors."""
self.do_compile()
|
kristoffer-paulsson/angelostools
|
src/angelostools/setup/executable.py
|
<reponame>kristoffer-paulsson/angelostools
#
# Copyright (c) 2018-2020 by <NAME> <<EMAIL>>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# <NAME> - initial implementation
#
import subprocess
import sys
import tempfile
from pathlib import Path
from setuptools import Command
class Executable(Command):
"""Compile the executable"""
user_options = [
("name=", "n", "Entry name."),
("prefix=", "p", "Possible prefix where to link against.")
]
def initialize_options(self):
"""Initialize options"""
self.name = None
self.prefix = None
def finalize_options(self):
"""Finalize options"""
pass
def run(self):
major, minor, _, _, _ = sys.version_info
PY_VER = "{0}.{1}".format(major, minor)
config = str(
Path(self.prefix, "bin", "python{}-config".format(PY_VER)).resolve()
) if self.prefix else "python{}-config".format(PY_VER)
dist = str(Path(self.prefix, "bin").resolve()) if self.prefix else str(Path("./bin").absolute())
temp = tempfile.TemporaryDirectory()
temp_name = str(Path(temp.name, self.name).absolute())
home = str(Path("./").absolute())
cflags = subprocess.check_output(
"{0} --cflags".format(config), stderr=subprocess.STDOUT, shell=True).decode()
# Debian 10 specific
cflags = cflags.replace("-specs=/usr/share/dpkg/no-pie-compile.specs", "")
# https://docs.python.org/3.8/whatsnew/3.8.html#debug-build-uses-the-same-abi-as-release-build
if major == 3 and minor >= 8:
ldflags = subprocess.check_output(
"{0} --ldflags --embed".format(config), stderr=subprocess.STDOUT, shell=True).decode()
else:
ldflags = subprocess.check_output(
"{0} --ldflags".format(config), stderr=subprocess.STDOUT, shell=True).decode()
subprocess.check_call(
"cython --embed -3 -o {0}.c {1}".format(
temp_name, Path("./scripts/", self.name+"_entry_point.pyx")), cwd=home, shell=True)
subprocess.check_call(
"gcc -o {0}.o -c {0}.c {1}".format(
temp_name, cflags), cwd=temp.name, shell=True)
subprocess.check_call(
"gcc -rdynamic -o {0} {1}.o {2}".format(
Path(dist, self.name), temp_name, ldflags), cwd=home, shell=True)
# -rdynamic --> --export-dynamic
temp.cleanup()
|
broadstripes/loggly-sidecar
|
run.py
|
import os, sys, re
if('LOGGLY_AUTH_TOKEN' not in os.environ):
print('Missing $LOGGLY_AUTH_TOKEN')
sys.exit(1)
if('LOGGLY_TAG' not in os.environ):
print('Missing $LOGGLY_TAG')
sys.exit(1)
auth_token = os.environ['LOGGLY_AUTH_TOKEN']
tags = []
for tag in os.environ['LOGGLY_TAG'].split(","):
tags.append('tag=\\"{}\\"'.format(tag))
with open('/etc/syslog-ng/syslog-ng.conf.tmpl') as conf_template_file:
conf_template = conf_template_file.read()
conf = re.sub('LOGGLY_AUTH_TOKEN', auth_token, conf_template)
conf = re.sub('LOGGLY_TAG', ' '.join(tags), conf)
with open('/etc/syslog-ng/syslog-ng.conf', 'w') as conf_file:
conf_file.write(conf)
os.execlp('syslog-ng', 'syslog-ng', '--foreground', '--stderr', '--verbose')
|
rkyoto/pipe_event
|
pipe_event2.py
|
<filename>pipe_event2.py
'''
pipe_event2
===========
This module provides a Event class which behaves just like threading.Event
but is based on two pipes created using os.pipe() functions.
Before Python 3.3, monotonic time is not introduced so adjusting system
clock may affect Event.wait() function if specific timeout is set.
Following notes can be found in PEP 0418:
"If a program uses the system time to schedule events or to implement
a timeout, it may fail to run events at the right moment or stop the
timeout too early or too late when the system time is changed manually
or adjusted automatically by NTP."
This module demonstrates an alternative Event implementation on Unix-like
systems which is not affected by the above issue.
'''
import sys
import os
import fcntl
import select
import threading
def _clear_pipe(fd):
try:
while True:
if not os.read(fd, 1024):
break
except OSError:
pass
class Event:
def __init__(self):
_r, _w = os.pipe() # create the pipe
# set pipe to non-blocking
fl = fcntl.fcntl(_r, fcntl.F_GETFL)
fcntl.fcntl(_r, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self._r_fd = _r
self._w_fd = _w
self._lock = threading.Lock()
def __del__(self):
os.close(self._r_fd)
os.close(self._w_fd)
def is_set(self):
return self.wait(0) # just poll the pipe
def isSet(self):
return self.is_set()
def set(self):
with self._lock:
if not self.is_set():
os.write(self._w_fd, b'\n')
def clear(self):
with self._lock:
_clear_pipe(self._r_fd)
def wait(self, timeout=None):
try:
ret = select.select([self._r_fd], [], [], timeout)[0]
if ret:
return True
except select.error as e:
sys.stderr.write(str(e) + '\n')
return False
|
rkyoto/pipe_event
|
pipe_event.py
|
'''
pipe_event
==========
This module provides a Event class which behaves just like threading.Event
but is based on two pipes created using os.pipe() functions.
Before Python 3.3, monotonic time is not introduced so adjusting system
clock may affect Event.wait() function if specific timeout is set.
Following notes can be found in PEP 0418:
"If a program uses the system time to schedule events or to implement
a timeout, it may fail to run events at the right moment or stop the
timeout too early or too late when the system time is changed manually
or adjusted automatically by NTP."
This module demonstrates an alternative Event implementation on Unix-like
systems which is not affected by the above issue.
'''
import os
import fcntl
import select
import threading
class Event:
def __init__(self):
r_fd, w_fd = os.pipe() # create the pipes
# set read() to non-blocking
fl = fcntl.fcntl(r_fd, fcntl.F_GETFL)
fcntl.fcntl(r_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
# create file objects
self.r_pipe = os.fdopen(r_fd, 'rb', 0)
self.w_pipe = os.fdopen(w_fd, 'wb', 0)
self.lock = threading.Lock() # create a lock to guard the pipes
def __del__(self):
self.r_pipe.close()
self.w_pipe.close()
def is_set(self):
return self.wait(0) # just poll the pipe
def isSet(self):
return self.is_set()
def set(self):
self.lock.acquire()
try:
if not self.is_set():
self.w_pipe.write(b'\n')
except:
self.lock.release()
raise
self.lock.release()
def clear(self):
self.lock.acquire()
try:
self.r_pipe.read()
except:
pass
self.lock.release()
def wait(self, timeout=None):
ret = select.select([self.r_pipe], [], [], timeout)[0]
return len(ret) > 0
|
chandru99/neuralnilm
|
neuralnilm/data/stridesource.py
|
from __future__ import print_function, division
from copy import copy
from datetime import timedelta
import numpy as np
import pandas as pd
import nilmtk
from nilmtk.timeframegroup import TimeFrameGroup
from nilmtk.timeframe import TimeFrame
from neuralnilm.data.source import Sequence
from neuralnilm.utils import check_windows
from neuralnilm.data.source import Source
from neuralnilm.consts import DATA_FOLD_NAMES
import logging
logger = logging.getLogger(__name__)
class StrideSource(Source):
"""
Attributes
----------
data : dict
Structure example:
{<train | unseen_appliances | unseen_activations_of_seen_appliances>: {
<building_name>: pd.DataFrame of with 2 cols: mains, target
}}
_num_seqs : pd.Series with 2-level hierarchical index
L0 : train, unseen_appliances, unseen_activations_of_seen_appliances
L1 : building_names
"""
def __init__(self, target_appliance,
seq_length, filename, windows, sample_period,
stride=None,
rng_seed=None):
self.target_appliance = target_appliance
self.seq_length = seq_length
self.filename = filename
check_windows(windows)
self.windows = windows
self.sample_period = sample_period
self.stride = self.seq_length if stride is None else stride
self._reset()
super(StrideSource, self).__init__(rng_seed=rng_seed)
# stop validation only when we've gone through all validation data
self.num_batches_for_validation = None
self._load_data_into_memory()
self._compute_num_sequences_per_building()
def _reset(self):
self.data = {}
self._num_seqs = pd.Series()
def _load_data_into_memory(self):
logger.info("Loading NILMTK data...")
# Load dataset
dataset = nilmtk.DataSet(self.filename)
for fold, buildings_and_windows in self.windows.items():
for building_i, window in buildings_and_windows.items():
dataset.set_window(*window)
elec = dataset.buildings[building_i].elec
building_name = (
dataset.metadata['name'] +
'_building_{}'.format(building_i))
# Mains
logger.info(
"Loading data for {}...".format(building_name))
mains_meter = elec.mains()
mains_good_sections = mains_meter.good_sections()
appliance_meter = elec[self.target_appliance]
good_sections = appliance_meter.good_sections(
sections=mains_good_sections)
def load_data(meter):
return meter.power_series_all_data(
sample_period=self.sample_period,
sections=good_sections).astype(np.float32).dropna()
mains_data = load_data(mains_meter)
appliance_data = load_data(appliance_meter)
df = pd.DataFrame(
{'mains': mains_data, 'target': appliance_data},
dtype=np.float32).dropna()
del mains_data
del appliance_data
if not df.empty:
self.data.setdefault(fold, {})[building_name] = df
logger.info(
"Loaded data from building {} for fold {}"
" from {} to {}."
.format(building_name, fold, df.index[0], df.index[-1]))
dataset.store.close()
logger.info("Done loading NILMTK mains data.")
def _compute_num_sequences_per_building(self):
index = []
all_num_seqs = []
for fold, buildings in self.data.items():
for building_name, df in buildings.items():
remainder = len(df) - self.seq_length
num_seqs = np.ceil(remainder / self.stride) + 1
num_seqs = max(0 if df.empty else 1, int(num_seqs))
index.append((fold, building_name))
all_num_seqs.append(num_seqs)
multi_index = pd.MultiIndex.from_tuples(
index, names=["fold", "building_name"])
self._num_seqs = pd.Series(all_num_seqs, multi_index)
def get_sequence(self, fold='train', enable_all_appliances=False):
if enable_all_appliances:
raise ValueError("`enable_all_appliances` is not implemented yet"
" for StrideSource!")
# select building
building_divisions = self._num_seqs[fold].cumsum()
total_seq_for_fold = self._num_seqs[fold].sum()
building_row_i = 0
building_name = building_divisions.index[0]
prev_division = 0
for seq_i in range(total_seq_for_fold):
if seq_i == building_divisions.iloc[building_row_i]:
prev_division = seq_i
building_row_i += 1
building_name = building_divisions.index[building_row_i]
seq_i_for_building = seq_i - prev_division
start_i = seq_i_for_building * self.stride
end_i = start_i + self.seq_length
data_for_seq = self.data[fold][building_name].iloc[start_i:end_i]
def get_data(col):
data = data_for_seq[col].values
n_zeros_to_pad = self.seq_length - len(data)
data = np.pad(
data, pad_width=(0, n_zeros_to_pad), mode='constant')
return data[:, np.newaxis]
seq = Sequence(self.seq_length)
seq.input = get_data('mains')
seq.target = get_data('target')
assert len(seq.input) == self.seq_length
assert len(seq.target) == self.seq_length
# Set mask
seq.weights = np.ones((self.seq_length, 1), dtype=np.float32)
n_zeros_to_pad = self.seq_length - len(data_for_seq)
if n_zeros_to_pad > 0:
seq.weights[-n_zeros_to_pad:, 0] = 0
# Set metadata
seq.metadata = {
'seq_i': seq_i,
'building_name': building_name,
'total_num_sequences': total_seq_for_fold,
'start_date': data_for_seq.index[0],
'end_date': data_for_seq.index[-1]
}
yield seq
@classmethod
def _attrs_to_remove_for_report(cls):
return ['data', '_num_seqs', 'rng']
|
chandru99/neuralnilm
|
neuralnilm/data/datathread.py
|
from __future__ import print_function
from threading import Thread, Event
from queue import Queue, Empty
class DataThread(Thread):
def __init__(self, data_pipeline, max_queue_size=8, **get_batch_kwargs):
super(DataThread, self).__init__(name='neuralnilm-data-process')
self._stop = Event()
self._queue = Queue(maxsize=max_queue_size)
self.data_pipeline = data_pipeline
self._get_batch_kwargs = get_batch_kwargs
def run(self):
while not self._stop.is_set():
batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)
self._queue.put(batch)
def get_batch(self, timeout=30):
if self.is_alive():
return self._queue.get(timeout=timeout)
else:
raise RuntimeError("Process is not running!")
def stop(self):
self._stop.set()
try:
self._queue.get(block=False)
except Empty:
pass
self.join()
|
isuhao/java-cef
|
tools/git_util.py
|
# Copyright (c) 2014 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file
from exec_util import exec_cmd
import os
def is_checkout(path):
""" Returns true if the path represents a git checkout. """
return os.path.exists(os.path.join(path, '.git'))
def get_hash(path = '.', branch = 'HEAD'):
""" Returns the git hash for the specified branch/tag/hash. """
cmd = "git rev-parse %s" % branch
result = exec_cmd(cmd, path)
if result['out'] != '':
return result['out'].strip()
return 'Unknown'
def get_url(path = '.'):
""" Returns the origin url for the specified path. """
cmd = "git config --get remote.origin.url"
result = exec_cmd(cmd, path)
if result['out'] != '':
return result['out'].strip()
return 'Unknown'
def get_commit_number(path = '.', branch = 'HEAD'):
""" Returns the number of commits in the specified branch/tag/hash. """
cmd = "git rev-list --count %s" % branch
result = exec_cmd(cmd, path)
if result['out'] != '':
return result['out'].strip()
return '0'
def get_changed_files(path = '.'):
""" Retrieves the list of changed files. """
# not implemented
return []
|
isuhao/java-cef
|
tools/make_readme.py
|
# Copyright (c) 2014 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from date_util import *
from file_util import *
from optparse import OptionParser
import os
import re
import shlex
import subprocess
import git_util as git
import sys
import zipfile
def get_readme_component(name):
""" Loads a README file component. """
paths = []
# platform directory
paths.append(os.path.join(script_dir, 'distrib', platform))
# shared directory
paths.append(os.path.join(script_dir, 'distrib'))
# load the file if it exists
for path in paths:
file = os.path.join(path, 'README.' +name + '.txt')
if path_exists(file):
return read_file(file)
raise Exception('Readme component not found: ' + name)
def create_readme():
""" Creates the README.TXT file. """
# gather the components
header_data = get_readme_component('header')
mode_data = get_readme_component('standard')
redistrib_data = get_readme_component('redistrib')
footer_data = get_readme_component('footer')
# format the file
data = header_data + '\n\n' + mode_data + '\n\n' + redistrib_data + '\n\n' + footer_data
data = data.replace('$JCEF_URL$', jcef_url)
data = data.replace('$JCEF_REV$', jcef_commit_hash)
data = data.replace('$JCEF_VER$', jcef_ver)
data = data.replace('$CEF_URL$', cef_url)
data = data.replace('$CEF_VER$', cef_ver)
data = data.replace('$CHROMIUM_URL$', chromium_url)
data = data.replace('$CHROMIUM_VER$', chromium_ver)
data = data.replace('$DATE$', date)
if platform == 'win32':
platform_str = 'Windows 32-bit'
elif platform == 'win64':
platform_str = 'Windows 64-bit'
elif platform == 'macosx64':
platform_str = 'Mac OS-X 64-bit'
elif platform == 'linux32':
platform_str = 'Linux 32-bit'
elif platform == 'linux64':
platform_str = 'Linux 64-bit'
data = data.replace('$PLATFORM$', platform_str)
write_file(os.path.join(output_dir, 'README.txt'), data)
if not options.quiet:
sys.stdout.write('Creating README.TXT file.\n')
# cannot be loaded as a module
if __name__ != "__main__":
sys.stderr.write('This file cannot be loaded as a module!')
sys.exit()
# parse command-line options
disc = """
This utility builds the JCEF README.txt for the distribution.
"""
parser = OptionParser(description=disc)
parser.add_option('--output-dir', dest='outputdir', metavar='DIR',
help='output directory [required]')
parser.add_option('--platform', dest='platform',
help='target platform for distribution [required]')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='do not output detailed status information')
(options, args) = parser.parse_args()
# the outputdir option is required
if options.outputdir is None or options.platform is None:
parser.print_help(sys.stderr)
sys.exit(1)
output_dir = options.outputdir
# Test the operating system.
platform = options.platform;
if (platform != 'linux32' and platform != 'linux64' and
platform != 'macosx64' and
platform != 'win32' and platform != 'win64'):
print 'Unsupported target \"'+platform+'\"'
sys.exit(1)
# script directory
script_dir = os.path.dirname(__file__)
# JCEF root directory
jcef_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
# Read and parse the CEF version file.
args = {}
read_readme_file(os.path.join(jcef_dir, 'jcef_build', 'README.txt'), args)
# retrieve url and revision information for CEF
if not git.is_checkout(jcef_dir):
raise Exception('Not a valid checkout: %s' % (jcef_dir))
jcef_commit_number = git.get_commit_number(jcef_dir)
jcef_commit_hash = git.get_hash(jcef_dir)
jcef_url = git.get_url(jcef_dir)
jcef_ver = '%s.%s.%s.g%s' % (args['CEF_MAJOR'], args['CEF_BUILD'], jcef_commit_number, jcef_commit_hash[:7])
date = get_date()
cef_ver = args['CEF_VER']
cef_url = args['CEF_URL']
chromium_ver = args['CHROMIUM_VER']
chromium_url = args['CHROMIUM_URL']
# create the README.TXT file
create_readme()
|
tehtechguy/annt
|
src/setup.py
|
# setup.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/30/15
#
# Description : Installs the annt project
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
# Native imports
from distutils.core import setup
import shutil
# Install the program
setup(
name='annt',
version='0.5.0',
description="Artificial Neural Network Toolbox",
author='<NAME>',
author_email='<EMAIL>',
url='http://techtorials.me',
packages=['annt', 'annt.examples', 'annt.experimental'],
package_data={'annt.examples':['data/mnist.pkl']}
)
# Remove the unnecessary build folder
try:
shutil.rmtree('build')
except:
pass
|
tehtechguy/annt
|
src/annt/util.py
|
<filename>src/annt/util.py
# util.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 04/02/15
#
# Description : Utility module.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Utility module. This module handles any sort of accessory items.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import os, pkgutil, cPickle
# Third party imports
import numpy as np
# Program imports
from annt.exception_handler import BaseException, wrap_error
###############################################################################
########## Exception Handling
###############################################################################
class InsufficientSamples(BaseException):
"""
Exception if too many samples were desired.
"""
def __init__(self, sample, navail, nsamples):
"""
Initialize this class.
@param sample: The desired sample.
@param navail: The number of available samples.
@param nsamples: The desired number of samples to extract.
"""
self.msg = wrap_error('The sample, {0}, only has {1} instances. The ' \
'requested number of samples of {2} is too large. Please reduce ' \
'the desired number of samples and try again'.format(sample,
navail, nsamples))
###############################################################################
########## Primary Functions
###############################################################################
def mnist_data():
"""
Return the example U{MNIST<http://yann.lecun.com/exdb/mnist/>} data. This
is merely a subset of the data. There are 80 samples per digit for the
training set (800 total items) and 20 samples per digit for the testing set
(200 total items).
@return: A tuple of tuples of the following format:
(train_data, train_labels), (test_data, test_labels)
"""
with open(os.path.join(pkgutil.get_loader('annt.examples').filename,
'data', 'mnist.pkl'), 'rb') as f:
return cPickle.load(f)
def one_hot(x, num_items):
"""
Convert an array into a one-hot encoding. The indices in x mark which bits
should be set. The length of each sub-array will be determined by
num_items.
@param x: The array indexes to mark as valid. This should be a numpy array.
@param num_items: The number of items each encoding should contain. This
should be at least as large as the max value in x + 1.
@return: An encoded array.
"""
y = np.repeat([np.zeros(num_items, dtype='uint8')], x.shape[0], 0)
for i, ix in enumerate(x):
y[i, ix] = 1
return y
def threshold(x, thresh, min_value=-1, max_value=1):
"""
Threshold all of the data in a given matrix (2D).
@param x: The array to threshold.
@param thresh: The value to threshold at.
@param min_value: The minimum value to set the data to.
@param max_value: The maximum value to set the data to.
@return: An encoded array.
"""
y = np.empty(x.shape); y.fill(min_value)
max_idx = x >= thresh
y[max_idx] = max_value
return y
def get_random_paired_indexes(y, nsamples, labels=None):
"""
Get a list of indexes corresponding to random selections of the data in y.
A total of nsamples will be returned for each specified label.
@param y: A numpy array consisting of labels.
@param nsamples: The number of samples to obtain for each unique value in
y.
@param labels: This parameter should be a list of the labels to use. If it
is None then all unqiue labels will be used.
@return: A dictionary containing the indexes in y corresponding to each
unique value in y. There will be a total of nsamples indexes.
@raise InsufficientSamples: Raised if too many samples were desired to be
selected.
"""
# Extract initial indexes
if labels is None:
keys = np.unique(y)
else:
keys = np.array(labels)
idx = [np.where(key == y)[0] for key in keys]
# Check to make sure it is possible
for key, ix in zip(keys, idx):
if ix.shape[0] < nsamples:
raise InsufficientSamples(key, ix.shape[0], nsamples)
# Shuffle the indexes
for i in xrange(len(idx)):
np.random.shuffle(idx[i])
# Build final result
return {key:ix[:nsamples] for key, ix in zip(keys, idx)}
def flip_random_bits(x, pct, states={-1:1, 1:-1}):
"""
Randomly flip bits in the input stream.
@param x: The input data to work with. This must be a vector.
@param pct: The percentage of bits to flip.
@param states: A dictionary containing the state representations. Each
valid state should be mapped to its own, unique, complement and vice-versa.
For example, if the bit stream consists of the set (1, -1) and '1' is the
inverse of '-1' and vice-versa, the states parameter should be set to
{-1:1, 1:-1}.
@return: The flipped array.
"""
y = np.copy(x)
max_bits = int(x.shape[0] * pct)
idx = np.arange(x.shape[0])
np.random.shuffle(idx)
for ix in idx[:max_bits]:
y[ix] = states[x[ix]]
return y
|
tehtechguy/annt
|
src/annt/plot.py
|
# plot.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/31/15
#
# Description : Module for plotting.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Module for plotting.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import itertools
# Third-Party imports
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_epoch(y_series, series_names=None, y_errs=None, y_label=None,
title=None, semilog=False, legend_location='best', out_path=None,
show=True):
"""
Basic plotter function for plotting various types of data against
training epochs. Each item in the series should correspond to a single
data point for that epoch.
@param y_series: A tuple containing all of the desired series to plot.
@param series_names: A tuple containing the names of the series.
@param y_errs: The error in the y values. There should be one per series
per datapoint. It is assumed this is the standard deviation, but any error
will work.
@param y_label: The label to use for the y-axis.
@param title: The name of the plot.
@param semilog: If True the y-axis will be plotted using a log(10) scale.
@param legend_location: The location of where the legend should be placed.
Refer to matplotlib's U{docs<http://matplotlib.org/api/pyplot_api.html#
matplotlib.pyplot.legend>} for more details.
@param out_path: The full path to where the image should be saved. The file
extension of this path will be used as the format type. If this value is
None then the plot will not be saved, but displayed only.
@param show: If True the plot will be show upon creation.
"""
# Construct the basic plot
fig, ax = plt.subplots()
if title is not None : plt.title(title)
if semilog : ax.set_yscale('log')
if y_label is not None : ax.set_ylabel(y_label)
ax.set_xlabel('Epoch')
plt.xlim((1, max([x.shape[0] for x in y_series])))
colormap = plt.cm.brg
colors = itertools.cycle([colormap(i) for i in np.linspace(0, 0.9,
len(y_series))])
markers = itertools.cycle(['.', ',', 'o', 'v', '^', '<', '>', '1', '2',
'3', '4', '8', 's', 'p', '*', 'p', 'h', 'H', '+', 'D', 'd', '|', '_',
'TICKLEFT', 'TICKRIGHT', 'TICKUP', 'TICKDOWN', 'CARETLEFT',
'CARETRIGHT', 'CARETUP', 'CARETDOWN'])
# Add the data
if y_errs is not None:
for y, err in zip(y_series, y_errs):
x = np.arange(1, x.shape[0] + 1)
ax.errorbar(x, y, yerr=err, color=colors.next(),
marker=markers.next())
else:
for y in y_series:
x = np.arange(1, x.shape[0] + 1)
ax.scatter(x, y, color=colors.next(), marker=markers.next())
# Create the legend
if series_names is not None: plt.legend(series_names, loc=legend_location)
# Save the plot
fig.set_size_inches(19.20, 10.80)
if out_path is not None:
plt.savefig(out_path, format=out_path.split('.')[-1], dpi = 100)
# Show the plot and close it after the user is done
if show: plt.show()
plt.close()
def plot_weights(weights, nrows, ncols, shape, title=None, cluster_titles=None,
out_path=None, show=True):
"""
Plot the weight matrices for the network.
@param weights: A numpy array containing a weight matrix. Each row in the
array corresponds to a unique node. Each column corresponds to a weight
value.
@param nrows: The number of rows of plots to create.
@param ncols: The number of columns of plots to create.
@param shape: The shape of the weights. It is assumed that a 1D shape was
used and is desired to be represented in 2D. Whatever shape is provided
will be used to reshape the weights. For example, if you had a 28x28 image
and each weight corresponded to one pixel, you would have a vector with a
shape of (784, ). This vector would then need to be resized to your desired
shape of (28, 28).
@param title: The name of the plot.
@param cluster_titles: The titles for each of the clusters.
@param out_path: The full path to where the image should be saved. The file
extension of this path will be used as the format type. If this value is
None then the plot will not be saved, but displayed only.
@param show: If True the plot will be show upon creation.
"""
# Construct the basic plot
fig = plt.figure()
if title is not None: fig.suptitle(title, fontsize=16)
if cluster_titles is None:
cluster_titles = ['Node {0}'.format(i) for i in xrange(len(weights))]
# Add all of the figures to the grid
for i, weight_set in enumerate(weights):
ax = plt.subplot(nrows, ncols, i + 1)
ax.set_title(cluster_titles[i])
ax.imshow(weight_set.reshape(shape), cmap=plt.cm.gray)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
# Save the plot
fig.set_size_inches(19.20, 10.80)
if out_path is not None:
plt.savefig(out_path, format=out_path.split('.')[-1], dpi = 100)
# Show the plot and close it after the user is done
if show: plt.show()
plt.close()
def make_grid(data):
"""
Convert the properly spaced, but unorganized data into a proper 3D grid.
@param data: A sequence containing of data of the form (x, y, z). x and y
are independent variables and z is the dependent variable.
@return: A tuple containing the new x, y, and z data.
"""
# Sort the data
x, y, z = np.array(sorted(data, key=lambda x: (x[0], x[1]))).T
xi = np.array(sorted(list(set(x))))
yi = np.array(sorted(list(set(y))))
xim, yim = np.meshgrid(xi, yi)
zi = z.reshape(xim.shape)
return (xim, yim, zi)
def plot_surface(x, y, z, x_label=None, y_label=None, z_label=None,
title=None, out_path=None, show=True):
"""
Basic plotter function for plotting surface plots
@param x: A sequence containing the x-axis data.
@param y: A sequence containing the y-axis data.
@param z: A sequence containing the z-axis data.
@param x_label: The label to use for the x-axis.
@param y_label: The label to use for the y-axis.
@param z_label: The label to use for the z-axis.
@param title: The name of the plot.
@param out_path: The full path to where the image should be saved. The file
extension of this path will be used as the format type. If this value is
None then the plot will not be saved, but displayed only.
@param show: If True the plot will be show upon creation.
"""
# Construct the basic plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(x, y, z, cmap=plt.cm.jet, rstride=1, cstride=1,
linewidth=0)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.view_init(azim=58, elev=28)
# Add the labels
if title is not None : plt.title(title)
if x_label is not None : ax.set_xlabel(x_label)
if y_label is not None : ax.set_ylabel(y_label)
if z_label is not None : ax.set_zlabel(z_label)
# Save the plot
fig.set_size_inches(19.20, 10.80)
if out_path is not None:
plt.savefig(out_path, format=out_path.split('.')[-1], dpi = 100)
# Show the plot and close it after the user is done
if show: plt.show()
plt.close()
|
tehtechguy/annt
|
src/annt/experimental/hopfield_network.py
|
# hopfield_network.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 04/07/15
#
# Description : Example showing how to create and use a hopfield network.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Example showing how to create and use a hopfield network. This example uses a
reduced set of data from the U{MNIST<http://yann.lecun.com/exdb/mnist/>}
dataset.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import os
# Third party imports
import numpy as np
# Program imports
from annt.util import mnist_data, threshold
from annt.util import flip_random_bits
from annt.experimental.hopfield_net import Hopfield
from annt.plot import plot_weights
def main(train_data, train_labels, nsamples=10, nstates=1, labels=[0],
pct_noise=0.3, plot=True):
"""
Demonstrates a hopfield network using MNIST.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param train_labels: The training labels. This must be an iterable with the
same length as train_data.
@param nsamples: The number of samples to use for generating the attractor
states.
@param nstates: The number of states to create. This will result in
creating a number of attractor states equal to the amount of unique labels
multiplied by this value.
@param labels: This parameter should be a list of the labels to use. If it
is None then all unique labels will be used.
@param pct_noise: The percentage of noise to be added to the attractor
state.
@param plot: If True one or more plots are generated showing the attractor
states.
@return: A list of lists containing the attractor state, the attractor
state with noise, and the found attractor state, respectively.
"""
# Create the network
net = Hopfield()
# Initialize the attractor states
net.create_attractor_states(train_data, train_labels, nstates, nsamples,
255 / 2, labels=labels)
# Check noise tolerance with all of the activation states
all_states = []
for state in net.attractor_states.keys():
# Get the states
states = []
states.append(np.array(state))
states.append(flip_random_bits(states[0], pct_noise))
states.append(net.step(states[1]))
all_states.append(np.copy(states))
# Make the plot
if plot:
plot_weights(states, 1, 3, (28, 28), title='Hopfield Network - '
'Noise Tolerance Example\nFlipped {0}% of the bits'.format(
pct_noise * 100), cluster_titles=('Attractor State',
'Attractor State with Noise', 'Found Attractor'))
return all_states
def basic_sim():
"""
Perform a basic simulation.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
# Run the network
main(threshold(train_data, 255 / 2), train_labels)
def vary_params(out_dir, show_plot=True, seed=None):
"""
Vary some parameters and generate some plots.
@param out_dir: The directory to save the plots in.
@param show_plot: If True the plot will be displayed upon creation.
@param seed: The seed for the random number generator. This will force the
network to work with the same data.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
train_d = threshold(train_data, 255 / 2)
# Make the output directory
try:
os.makedirs(out_dir)
except OSError:
pass
###########################################################################
###### Vary number of attractors
###########################################################################
cluster_titles = ['Attractor State', 'Input', 'Found Attractor']
for i in xrange(1, 4):
np.random.seed(seed)
states = np.array(main(train_d, train_labels, labels=np.arange(i),
pct_noise=0, plot=False))
plot_weights(states.reshape(states.shape[1] * states.shape[0],
states.shape[2]), i, 3, (28, 28), title='Hopfield Network - {0} '
'Attractor State(s)'.format(i), cluster_titles=cluster_titles * i,
out_path=os.path.join(out_dir, 'states_{0}.png'.format(i)),
show=show_plot)
###########################################################################
###### Vary amount of noise on input
###########################################################################
noise_ranges = (0.4, 0.6)
cluster_titles = ['Attractor State', 'Input with {0}% Noise',
'Found Attractor'] * len(noise_ranges)
for i, noise in enumerate(noise_ranges):
cluster_titles[i * 3 + 1] = cluster_titles[i * 3 + 1].format(noise *
100)
for i in xrange(1, 3):
new_title = [y for x in [[cluster_titles[0], cluster_titles[3 * j + 1],
cluster_titles[2]] * i for j in xrange(len(noise_ranges))]
for y in x]
states = []
for noise in noise_ranges:
np.random.seed(seed)
states.extend(main(train_d, train_labels, labels=np.arange(i),
pct_noise=noise, plot=False))
states = np.array(states)
plot_weights(states.reshape(states.shape[1] * states.shape[0],
states.shape[2]), len(noise_ranges) * i, 3, (28, 28),
title='Hopfield Network - Noise Tolerance'.format(noise * 100),
cluster_titles=new_title, out_path=os.path.join(out_dir,
'noise_states_{0}.png'.format(i)), show=show_plot)
if __name__ == '__main__':
basic_sim()
# vary_params(out_dir=r'D:\annt\test\Hopfield_Network', show_plot=False,
# seed=123456789)
|
tehtechguy/annt
|
src/annt/activation.py
|
<reponame>tehtechguy/annt
# activation.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/30/15
#
# Description : Module for various activation functions.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Module for various activation functions.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
from abc import ABCMeta, abstractmethod
# Third party imports
import numpy as np
# Program imports
from annt.exception_handler import BaseException, wrap_error
###############################################################################
########## Exception Handling
###############################################################################
class UnsupportedActivationType(BaseException):
"""
Exception if the activation type is invalid.
"""
def __init__(self, type):
"""
Initialize this class.
@param type: The type of activation function to use.
"""
self.msg = wrap_error('The type, {0}, is unsupported. The current '
'types are {1}'.format(name, ', '.join(['linear', 'sigmoid'])))
###############################################################################
########## Functions
###############################################################################
def get_activation(type):
"""
Returns a reference to an activation object.
@param type: The type of activation function to use.
@return: An activation object reference.
@raise UnsupportedActivationType: Raised if type is invalid.
"""
if type == 'linear':
return Linear
elif type == 'sigmoid':
return Sigmoid
else:
raise UnsupportedActivationType(type)
def create_activation(type, **kargs):
"""
Creates an activation object instance.
@param type: The type of activation function to use.
@param kargs: Any keyword arguments.
"""
return get_activation(type)(**kargs)
###############################################################################
########## Class Template
###############################################################################
class Activation(object):
"""
Base class description for an activation function.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
Initialize the class instance.
"""
@abstractmethod
def compute(self, x):
"""
Compute the activation function.
@param x: A numpy array representing the input data. This should be a
vector.
@return: A vector containing the element-wise result of applying the
activation function to the input.
"""
###############################################################################
########## Class Implementation
###############################################################################
class Linear(Activation):
"""
Base class for a liner activation function.
"""
def __init__(self, m=1):
"""
Initializes this linear object.
@param m: The slope of the line. Use the default value of "1" for the
unity function.
"""
# Store the params
self.m = m
def compute(self, x):
"""
Compute the activation function.
@param x: A numpy array representing the input data. This should be a
vector.
@return: A vector containing the element-wise result of applying the
activation function to the input.
"""
return self.m * x
def compute_derivative(self, x):
"""
Compute the activation function's derivative.
@param x: A numpy array representing the input data. This should be a
vector.
@return: A vector containing the element-wise result of applying the
activation function to the input.
"""
return np.repeat(self.m * len(x))
class Sigmoid(Activation):
"""
Base class for a sigmoid activation function.
"""
def __init__(self):
"""
Initializes this sigmoid object.
"""
pass
def compute(self, x):
"""
Compute the activation function.
@param x: A numpy array representing the input data. This should be a
vector.
@return: A vector containing the element-wise result of applying the
activation function to the input.
"""
return 1 / (1 + np.exp(-x))
def compute_derivative(self, x):
"""
Compute the activation function's derivative.
@param x: A numpy array representing the input data. This should be a
vector.
@return: A vector containing the element-wise result of applying the
activation function to the input.
"""
y = self.compute(x)
return y * (1 - y)
|
tehtechguy/annt
|
src/annt/timers.py
|
<reponame>tehtechguy/annt<filename>src/annt/timers.py
# timers.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 04/02/15
#
# Description : Module used for timing.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Module used for timing.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import time, csv
from itertools import izip
###############################################################################
########## Primary Functions
###############################################################################
def pretty_time(elapsed_time):
"""
Get a string representing a formatted time in a pretty (human-readable)
format.
@param elapsed_time: The number of elapsed seconds.
"""
# The time labels
labels = ('days', 'hours', 'minutes', 'seconds')
formatted_times = []
# Compute the times
days = elapsed_time / 86400.
i_days = int(days)
hours = (days - i_days) * 24
i_hours = int(hours)
mins = (hours - i_hours) * 60
i_mins = int(mins)
seconds = (mins - i_mins) * 60
times = (i_days, i_hours, i_mins, seconds)
# Format all times but the last element
for t, l in izip(times[:-1], labels[:-1]):
if t != 0:
formatted_times.append('{0} {1}'.format(t, l))
# Format the last element
if times[-3] != 0:
str = ', and {0:.3f} {1}'
elif times[-2] != 0:
str = ' and {0:.3f} {1}'
else:
str = '{0:.3f} {1}'
# Return the formatted time
return ', '.join(formatted_times) + str.format(times[-1], labels[-1])
###############################################################################
########## Class Implementations
###############################################################################
class SimpleTimer(object):
"""
Simple timer used to hold data just about a single timer.
"""
def __init__(self, name=None):
"""
Initialize this timer instance, starting the timer.
"""
# Init params
self.name = name
self.start()
def start(self):
"""
Start the timer.
"""
self.start_time = time.time()
def pause(self):
"""
Pauses the timer and compute the elapsed time.
"""
self.finish_time = time.time()
self.elapsed_time += self.finish_time - self.start_time
def stop(self):
"""
Stop the timer and compute the elapsed time.
"""
self.finish_time = time.time()
self.elapsed_time = self.finish_time - self.start_time
def get_elapsed_time(self, pretty=False):
"""
Return the elapsed time.
@param pretty: If False the time is returned as the elapsed time in
seconds. If True, the equivalent time breakdown is computed.
@return: The elapsed time.
"""
# Determine how to format the time
if not pretty:
return self.elapsed_time
else:
return pretty_time(self.elapsed_time)
class MultiTimer(object):
"""
Timer class used to work with multiple timers..
"""
def __init__(self):
"""
Initialize this timer instance.
"""
self.timers = {}
def add_timers(self, *timer_names):
"""
Adds a new timer to the class and starts the timer.
@param timer_names: The name of the timer.
"""
for timer in timer_names:
self.timers[timer] = SimpleTimer(timer)
self.timers[timer].start()
def stop_timers(self, *timer_names):
"""
Stop the specified timers.
@param timer_names: One or more timer names to be stopped.
"""
for timer in timer_names:
self.timers[timer].stop()
def start_timers(self, *timer_names):
"""
Starts the specified timers.
@param timer_names: One or more timer names to be started.
"""
for timer in timer_names:
self.timers[timer].start()
def pause_timers(self, *timer_names):
"""
Pauses the specified timers.
@param timer_names: One or more timer names to be paused.
"""
for timer in timer_names:
self.timers[timer].pause()
def get_elapsed_time(self, timer_name, pretty=False):
"""
Retrieve the elapsed time for the specified timer.
@param timer_name: The name of the timer to get the info for.
@param pretty: If False the time is returned as the elapsed time in
seconds. If True, the equivalent time breakdown is computed.
@return: The elapsed time.
"""
return self.timers[timer_name].get_elapsed_time(pretty)
def log_timers(self, out_path, header=True):
"""
Creates a log CSV file for all timers. Make sure to stop any timers
before calling this.
@param out_path: The full path to the CSV to write to.
@param header: Flag denoting whether the header should be printed or
not.
"""
with open(out_path, 'wb') as f:
writer = csv.writer(f)
if header:
writer.writerow(self.timers.keys())
writer.writerow([timer.get_elapsed_time() for timer in
self.timers.values()])
|
tehtechguy/annt
|
src/annt/examples/__init__.py
|
# __init__.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/31/15
#
# Description : Defines the examples package
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
This package contains some examples for working with the various types of
networks.
"""
__docformat__ = 'epytext'
|
tehtechguy/annt
|
src/annt/experimental/hopfield_net.py
|
# hopfield_net.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 04/07/15
#
# Description : Module for a Hopfield network.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Module for a Hopfield network.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Third party imports
import numpy as np
# Program imports
from annt.util import get_random_paired_indexes, threshold
###############################################################################
########## Class Implementation
###############################################################################
class Hopfield(object):
"""
Base class for a Hopfield network.
"""
def __init__(self, attractor_states=None):
"""
Initializes this Hopfield network.
@param attractor_states: The attractor states to use. This must be a
dictionary containing a unique state as the key (this should be a tuple
containing a vector of representing the current state). The value in
the dictionary should be the corresponding label of the state. If None,
the attractor states must be created using the
"create_attractor_states" method.
"""
# Store the params
self.attractor_states = attractor_states
# Construct the weights (if possible)
if self.attractor_states is not None:
self.initialize_weights()
def create_attractor_states(self, x, y, nstates, nsamples, thresh,
labels=None):
"""
Create the attractor states based off the labeled data. Additionally,
initialize the weights using the new attractor states.
@param x: A numpy array consisting of the data to initialize with.
@param y: A numpy array consisting of labels.
@param nstates: The number of states to create. This will result in
creating a number of attractor states equal to the amount of unique
labels multiplied by this value.
@param nsamples: The number of samples to use for each unique value in
y.
@param thresh: The value to threshold at.
@param labels: This parameter should be a list of the labels to use. If
it is None then all unique labels will be used.
"""
idxs = [get_random_paired_indexes(y, nsamples, labels)
for _ in xrange(nstates)]
self.attractor_states = {}
for idx in idxs:
for i, lbl in enumerate(idx):
states = [x[ix] for ix in idx[lbl]]
self.attractor_states[tuple(threshold(np.mean(states, 0), 0))
] = lbl
self.initialize_weights()
def initialize_weights(self):
"""
Initialize the weights of the network. Initialization is done based off
the attractor states.
"""
states = np.array(self.attractor_states.keys()).T
self.weights = np.zeros((states.shape[0], states.shape[0]))
for i in xrange(states.shape[0]):
for j in xrange(states.shape[0]):
self.weights[i][j] = np.dot(states[i], states[j]) / float(
states.shape[0])
def step(self, x):
"""
Compute a single step of the network.
@param x: The input data for this step.
@return: The found attractor state.
"""
return threshold(np.inner(self.weights, x), 0)
|
tehtechguy/annt
|
src/annt/examples/multilayer_perceptron_network.py
|
# multilayer_perceptron_network.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/31/15
#
# Description : Example showing how to create and use a multilayer
# perceptron network.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Example showing how to create and use a multilayer perceptron network. This
example uses a reduced set of data from the
U{MNIST<http://yann.lecun.com/exdb/mnist/>} dataset.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import os
# Third party imports
import numpy as np
# Program imports
from annt.util import one_hot, mnist_data
from annt.net import MultilayerPerception
from annt.plot import plot_epoch
def main(train_data, train_labels, test_data, test_labels, nepochs=1,
plot=True, verbose=True, hidden_layers=[100], bias=1, learning_rate=0.001,
min_weight=-1, max_weight=1, hidden_activation_type='sigmoid'):
"""
Demonstrates a multilayer perceptron network using MNIST.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param train_labels: The training labels. This must be an iterable with the
same length as train_data.
@param test_data: The data to test with. This must be an iterable returning
a numpy array.
@param test_labels: The testing labels. This must be an iterable with the
same length as train_data.
@param nepochs: The number of training epochs to perform.
@param plot: If True, a plot will be created.
@param verbose: If True, the network will print results after every
iteration.
@param hidden_layers: A sequence denoting the number of hidden layers and
the number of nodes per hidden layer. For example, [100, 50] will result in
the creation of two hidden layers with the first layer having 100 nodes and
the second layer having 50 nodes. There is no limit to the number of layers
and nodes.
@param bias: The bias input. Set to "0" to disable.
@param learning_rate: The learning rate to use.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
@param hidden_activation_type: The type activation function to use for the
hidden neurons. This must be one of the classes implemented in
L{annt.activation}.
@return: A tuple containing the training and testing results, respectively.
"""
# Create the network
shape = [train_data.shape[1]] + list(hidden_layers) + [10]
net = MultilayerPerception(
shape = shape,
bias = bias,
learning_rate = learning_rate,
min_weight = min_weight,
max_weight = max_weight,
hidden_activation_type = hidden_activation_type
)
# Simulate the network
train_results, test_results = net.run(train_data, train_labels,
test_data, test_labels, nepochs, verbose)
# Plot the results
if plot:
plot_epoch(y_series=(train_results * 100, test_results * 100),
series_names=('Train', 'Test'), y_label='Accuracy [%]',
title='MLP - Example', legend_location='upper left')
return train_results * 100, test_results * 100
def basic_sim(nepochs=100):
"""
Perform a basic simulation.
@param nepochs: The number of training epochs to perform.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
# Scale pixel values to be between 0 and 1
# Convert labeled data to one-hot encoding
# Run the network
main(train_data/255., one_hot(train_labels, 10), test_data/255.,
one_hot(test_labels, 10), nepochs=nepochs)
def bulk(niters, nepochs, verbose=True, plot=True, **kargs):
"""
Execute the main network across many networks.
@param niters: The number of iterations to run for statistical purposes.
@param nepochs: The number of training epochs to perform.
@param verbose: If True, a simple iteration status will be printed.
@param plot: If True, a plot will be generated.
@param kargs: Any keyword arguments to pass to the main network simulation.
@return: A tuple containing: (train_mean, train_std), (test_mean, test_std)
"""
# Simulate the network
train_results = np.zeros((niters, nepochs))
test_results = np.zeros((niters, nepochs))
for i in xrange(niters):
if verbose:
print 'Executing iteration {0} of {1}'.format(i + 1, niters)
train_results[i], test_results[i] = main(verbose=False, plot=False,
nepochs=nepochs, **kargs)
# Compute the mean costs
train_mean = np.mean(train_results, 0)
test_mean = np.mean(test_results, 0)
# Compute the standard deviations
train_std = np.std(train_results, 0)
test_std = np.std(test_results, 0)
if plot:
plot_epoch(y_series=(train_mean, test_mean),
legend_location='upper left', series_names=('Train', 'Test'),
y_errs=(train_std, test_std), y_label='Accuracy [%]',
title='MLP - Stats Example')
return (train_mean, train_std), (test_mean, test_std)
def bulk_sim(nepochs=100, niters=10):
"""
Perform a simulation across multiple iterations, for statistical purposes.
@param nepochs: The number of training epochs to perform.
@param niters: The number of iterations to run for statistical purposes.
"""
(train_data, train_labels), (test_data, test_labels) = mnist_data()
bulk(train_data=train_data/255., train_labels=one_hot(train_labels, 10),
test_data=test_data/255., test_labels=one_hot(test_labels, 10),
nepochs=nepochs, niters=niters)
def vary_params(out_dir, nepochs=100, niters=10, show_plot=True):
"""
Vary some parameters and generate some plots.
@param out_dir: The directory to save the plots in.
@param nepochs: The number of training epochs to perform.
@param niters: The number of iterations to run for statistical purposes.
@param show_plot: If True the plot will be displayed upon creation.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
train_d = train_data/255.; train_l = one_hot(train_labels, 10)
test_d = test_data/255.; test_l = one_hot(test_labels, 10)
# Make the output directory
try:
os.makedirs(out_dir)
except OSError:
pass
###########################################################################
###### Vary number of nodes for one layer
###########################################################################
print 'Varying one layer'
shapes = [[int(x)] for x in np.linspace(25, 250, 10)]
train_results = np.zeros((len(shapes), nepochs))
train_stds = np.zeros((len(shapes), nepochs))
test_results = np.zeros((len(shapes), nepochs))
test_stds = np.zeros((len(shapes), nepochs))
series_names = ['Shape = {0}'.format(x) for x in shapes]
for i, shape in enumerate(shapes):
print 'Executing iteration {0} of {1}'.format(i + 1, len(shapes))
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, train_labels=train_l, test_data=test_d,
test_labels=test_l, plot=False, nepochs=nepochs, niters=niters,
hidden_layers=shape, verbose=False)
# Make training plot
title = 'MLP - Training\n10 Iterations, Single Hidden Layer'
out_path = os.path.join(out_dir, 'single-train.png')
plot_epoch(y_series=train_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Accuracy [%]', out_path=out_path,
legend_location='upper left', show=show_plot)
# Make testing plot
title = 'MLP - Testing\n10 Iterations, Single Hidden Layer'
out_path = os.path.join(out_dir, 'single-test.png')
plot_epoch(y_series=test_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Accuracy [%]', out_path=out_path,
legend_location='upper left', show=show_plot)
###########################################################################
###### Vary number of nodes for two layers (only second layer varied)
###########################################################################
print '\nVarying two layers'
shapes = [[250, int(x)] for x in np.linspace(10, 100, 10)]
train_results = np.zeros((len(shapes), nepochs))
train_stds = np.zeros((len(shapes), nepochs))
test_results = np.zeros((len(shapes), nepochs))
test_stds = np.zeros((len(shapes), nepochs))
series_names = ['Shape = {0}'.format(x) for x in shapes]
for i, shape in enumerate(shapes):
print 'Executing iteration {0} of {1}'.format(i + 1, len(shapes))
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, train_labels=train_l, test_data=test_d,
test_labels=test_l, plot=False, nepochs=nepochs, niters=niters,
hidden_layers=shape, verbose=False)
# Make training plot
title = 'MLP - Training\n10 Iterations, Double Hidden Layer'
out_path = os.path.join(out_dir, 'double-train.png')
plot_epoch(y_series=train_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Accuracy [%]', out_path=out_path,
legend_location='upper left', show=show_plot)
# Make testing plot
title = 'MLP - Testing\n10 Iterations, Double Hidden Layer'
out_path = os.path.join(out_dir, 'double-test.png')
plot_epoch(y_series=test_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Accuracy [%]', out_path=out_path,
legend_location='upper left', show=show_plot)
###########################################################################
###### Vary number of nodes for three layers (only third layer varied)
###########################################################################
print '\nVarying three layers'
shapes = [[250, 100, int(x)] for x in np.linspace(5, 50, 10)]
train_results = np.zeros((len(shapes), nepochs))
train_stds = np.zeros((len(shapes), nepochs))
test_results = np.zeros((len(shapes), nepochs))
test_stds = np.zeros((len(shapes), nepochs))
series_names = ['Shape = {0}'.format(x) for x in shapes]
for i, shape in enumerate(shapes):
print 'Executing iteration {0} of {1}'.format(i + 1, len(shapes))
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, train_labels=train_l, test_data=test_d,
test_labels=test_l, plot=False, nepochs=nepochs, niters=niters,
hidden_layers=shape, verbose=False)
# Make training plot
title = 'MLP - Training\n10 Iterations, Triple Hidden Layer'
out_path = os.path.join(out_dir, 'triple-train.png')
plot_epoch(y_series=train_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Accuracy [%]', out_path=out_path,
legend_location='upper left', show=show_plot)
# Make testing plot
title = 'MLP - Testing\n10 Iterations, Triple Hidden Layer'
out_path = os.path.join(out_dir, 'triple-test.png')
plot_epoch(y_series=test_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Accuracy [%]', out_path=out_path,
legend_location='upper left', show=show_plot)
if __name__ == '__main__':
basic_sim()
# bulk_sim()
# vary_params(out_dir=r'D:\annt\test\MLP_Network', show_plot=False)
|
tehtechguy/annt
|
src/annt/experimental/__init__.py
|
<gh_stars>1-10
# __init__.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 04/07/15
#
# Description : Defines the experimental package
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
This package contains any experimental code.
"""
__docformat__ = 'epytext'
|
tehtechguy/annt
|
src/annt/examples/competitive_learning_network.py
|
<reponame>tehtechguy/annt<gh_stars>1-10
# competitive_learning_network.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 04/02/15
#
# Description : Example showing how to create and use a competitive learning
# network.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Example showing how to create and use a competitive learning network. This
example uses a reduced set of data from the
U{MNIST<http://yann.lecun.com/exdb/mnist/>} dataset.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import os
# Third party imports
import numpy as np
# Program imports
from annt.util import mnist_data
from annt.net import CompetitiveLearning
from annt.plot import plot_epoch, plot_weights, plot_surface, make_grid
def main(train_data, test_data, nepochs=1, plot=True, verbose=True,
nclusters=10, learning_rate=0.001, boost_inc=0.1, boost_dec=0.01,
duty_cycle=50, min_duty_cycle=5, min_weight=-1, max_weight=1, nrows=2,
ncols=5, shape=(28, 28)):
"""
Demonstrates a competitive learning network using MNIST.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param test_data: The data to test with. This must be an iterable returning
a numpy array.
@param nepochs: The number of training epochs to perform.
@param plot: If True, a plot will be created.
@param verbose: If True, the network will print results after every
iteration.
@param nclusters: The number of clusters.
@param learning_rate: The learning rate to use.
@param boost_inc: The amount to increment the boost by.
@param boost_dec: The amount to decrement the boost by.
@param duty_cycle: The history to retain for activations for each node.
This is the period minimum activation is compared across. It is a rolling
window.
@param min_duty_cycle: The minimum duty cycle. If a node has not been
active at least this many times, increment its boost value, else decrement
it.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
@param nrows: The number of rows of plots to create for the clusters.
@param ncols: The number of columns of plots to create for the clusters.
@param shape: The shape of the weights. It is assumed that a 1D shape was
used and is desired to be represented in 2D. Whatever shape is provided
will be used to reshape the weights. For example, if you had a 28x28 image
and each weight corresponded to one pixel, you would have a vector with a
shape of (784, ). This vector would then need to be resized to your desired
shape of (28, 28).
@return: A tuple containing the training results, testing results, and
weights, respectively.
"""
# Create the network
net = CompetitiveLearning(
ninputs = train_data.shape[1],
nclusters = nclusters,
learning_rate = learning_rate,
boost_inc = boost_inc,
boost_dec = boost_dec,
duty_cycle = duty_cycle,
min_duty_cycle = min_duty_cycle,
min_weight = min_weight,
max_weight = max_weight
)
# Simulate the network
train_results, test_results = net.run(train_data, test_data, nepochs,
verbose)
# Plot the results and clusters
if plot:
plot_epoch(y_series=(train_results, test_results),
series_names=('Train', 'Test'), y_label='Cost',
title='Clustering - Example', semilog=True)
plot_weights(net.weights.T, nrows, ncols, shape,
'Clustering Weights - Example')
return train_results, test_results, net.weights.T
def basic_sim(nepochs=100):
"""
Perform a basic simulation.
@param nepochs: The number of training epochs to perform.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
# Scale pixel values to be between 0 and 1
# Convert labeled data to one-hot encoding
# Run the network
main(train_data/255., test_data/255., nepochs=nepochs)
def bulk(niters, nepochs, verbose=True, plot=True, **kargs):
"""
Execute the main network across many networks.
@param niters: The number of iterations to run for statistical purposes.
@param nepochs: The number of training epochs to perform.
@param verbose: If True, a simple iteration status will be printed.
@param plot: If True, a plot will be generated.
@param kargs: Any keyword arguments to pass to the main network simulation.
@return: A tuple containing: (train_mean, train_std), (test_mean, test_std)
"""
# Simulate the network
train_results = np.zeros((niters, nepochs))
test_results = np.zeros((niters, nepochs))
for i in xrange(niters):
if verbose:
print 'Executing iteration {0} of {1}'.format(i + 1, niters)
train_results[i], test_results[i], _ = main(verbose=False,
plot=False, nepochs=nepochs, **kargs)
# Compute the mean costs
train_mean = np.mean(train_results, 0)
test_mean = np.mean(test_results, 0)
# Compute the standard deviations
train_std = np.std(train_results, 0)
test_std = np.std(test_results, 0)
if plot:
plot_epoch(y_series=(train_mean, test_mean),
legend_location='upper left', series_names=('Train', 'Test'),
y_errs=(train_std, test_std), y_label='Cost [%]',
title='Clustering - Stats Example')
return (train_mean, train_std), (test_mean, test_std)
def bulk_sim(nepochs=100, niters=10):
"""
Perform a simulation across multiple iterations, for statistical purposes.
@param nepochs: The number of training epochs to perform.
@param niters: The number of iterations to run for statistical purposes.
"""
(train_data, train_labels), (test_data, test_labels) = mnist_data()
bulk(train_data=train_data/255., test_data=test_data/255., nepochs=nepochs,
niters=niters)
def vary_params(out_dir, nepochs=100, niters=10, show_plot=True):
"""
Vary some parameters and generate some plots.
@param out_dir: The directory to save the plots in.
@param nepochs: The number of training epochs to perform.
@param niters: The number of iterations to run for statistical purposes.
@param show_plot: If True the plot will be displayed upon creation.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
train_d = train_data/255.; test_d = test_data/255.
# Make the output directory
try:
os.makedirs(out_dir)
except OSError:
pass
###########################################################################
###### Vary number of clusters
###########################################################################
print 'Varying number of clusters'
nclusters = np.arange(5, 55, 5)
weight_plot_params = [{'nrows':1, 'ncols':5}, {'nrows':2, 'ncols':5},
{'nrows':3, 'ncols':5}, {'nrows':4, 'ncols':5}, {'nrows':5, 'ncols':5},
{'nrows':3, 'ncols':10}, {'nrows':4, 'ncols':10},
{'nrows':4, 'ncols':10}, {'nrows':5, 'ncols':10},
{'nrows':5, 'ncols':10}]
weight_results = []
train_results = np.zeros((len(nclusters), nepochs))
train_stds = np.zeros((len(nclusters), nepochs))
test_results = np.zeros((len(nclusters), nepochs))
test_stds = np.zeros((len(nclusters), nepochs))
series_names = ['Clusters = {0}'.format(x) for x in nclusters]
for i, ncluster in enumerate(nclusters):
print 'Executing iteration {0} of {1}'.format(i + 1, len(nclusters))
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, test_data=test_d, plot=False,
nepochs=nepochs, nclusters=ncluster, verbose=False, niters=niters)
x, y, weights = main(train_data=train_d, test_data=test_d, plot=False,
nepochs=nepochs, nclusters=ncluster, verbose=False)
weight_results.append(weights)
# Make training plot
title = 'Competitive Learning Network - Training\n10 Iterations, ' \
'Varying Number of Clusters'
out_path = os.path.join(out_dir, 'clusters-train.png')
plot_epoch(y_series=train_results, series_names=series_names, title=title,
y_errs=train_stds, y_label='Cost', out_path=out_path, semilog=True,
show=show_plot)
# Make testing plot
title = 'Competitive Learning Network - Testing\n10 Iterations, ' \
'Varying Number of Clusters'
out_path = os.path.join(out_dir, 'clusters-test.png')
plot_epoch(y_series=test_results, series_names=series_names, title=title,
y_errs=test_stds, y_label='Cost', out_path=out_path, semilog=True,
show=show_plot)
# Make weight plots
title = 'Competitive Learning Network - Weights\n{0} Clusters'
for weights, params, ncluster in zip(weight_results, weight_plot_params,
nclusters):
out_path = os.path.join(out_dir, 'weights-{0}.png'.format(ncluster))
plot_weights(weights=weights, title=title.format(ncluster),
out_path=out_path, show=show_plot, shape=(28, 28), **params)
###########################################################################
###### Vary boost increment and decrement
###########################################################################
print 'Varying boost increment and decrement amounts'
space = np.linspace(0.001, .1, 100)
boost_pairs = np.array([(x, y) for x in space for y in space])
train_results = np.zeros((len(boost_pairs), nepochs))
train_stds = np.zeros((len(boost_pairs), nepochs))
test_results = np.zeros((len(boost_pairs), nepochs))
test_stds = np.zeros((len(boost_pairs), nepochs))
for i, pair in enumerate(boost_pairs):
print 'Executing iteration {0} of {1}'.format(i + 1, len(boost_pairs))
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, test_data=test_d, plot=False,
nepochs=nepochs, boost_inc=pair[0], boost_dec=pair[1],
verbose=False, niters=niters)
# Make training plot at last epoch
title = 'Competitive Learning Network - Training\n10 Iterations, ' \
'Epoch {0}, Varying Boost Increment and Decrement'.format(nepochs)
out_path = os.path.join(out_dir, 'boost-train.png')
plot_surface(*make_grid(np.array([boost_pairs.T[0], boost_pairs.T[1],
train_results.T[-1]]).T), x_label='Boost Increment', out_path=out_path,
y_label='Boost Decrement', title=title, show=show_plot, z_label='Cost')
# Make testing plot at last epoch
title = 'Competitive Learning Network - Testing\n10 Iterations, ' \
'Epoch {0}, Varying Boost Increment and Decrement'.format(nepochs)
out_path = os.path.join(out_dir, 'boost-test.png')
plot_surface(*make_grid(np.array([boost_pairs.T[0], boost_pairs.T[1],
test_results.T[-1]]).T), x_label='Boost Increment', out_path=out_path,
y_label='Boost Decrement', title=title, show=show_plot, z_label='Cost')
if __name__ == '__main__':
basic_sim()
# bulk_sim()
# vary_params(out_dir=r'D:\annt\test\Clustering_Network', show_plot=False)
|
tehtechguy/annt
|
src/annt/net.py
|
<reponame>tehtechguy/annt<gh_stars>1-10
# net.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/30/15
#
# Description : Module for various artificial neural networks.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Module for various artificial neural networks.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
from abc import ABCMeta, abstractmethod
# Third party imports
import numpy as np
# Program imports
from annt.activation import create_activation
from annt.timers import MultiTimer, pretty_time
###############################################################################
########## Class Templates
###############################################################################
class Net(object):
"""
Base class description for a network.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
Initialize the class instance.
"""
@abstractmethod
def step(self, x, y=None):
"""
Compute a single step of the network.
@param x: The input data to compute for this step.
@param y: The expected output.
"""
@abstractmethod
def run(self, train_data, train_labels, test_data, test_labels,
nepochs=1):
"""
Simulate the entire network.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param train_labels: The training labels. This must be an iterable
with the same length as train_data.
@param test_data: The data to test with. This must be an iterable
returning a numpy array.
@param test_labels: The testing labels. This must be an iterable
with the same length as train_data.
@param nepochs: The number of training epochs to perform.
@return: A tuple containing the training and test costs/accuracies.
"""
@abstractmethod
def initialize_weights(self, shape, min_weight=-1, max_weight=1):
"""
Initialize the weights of the network. Initialization is done randomly.
@param shape: The number of nodes in the entire network, excluding any
bias terms. This parameter must be a sequence.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
"""
def _run(self, x, y=None):
"""
Execute the network for this batch of data
@param x: The input data to compute for this step.
@param y: The expected output.
@return: The expected outputs for each input.
"""
if y is None:
return np.array(map(self.step, x))
else:
return np.array(map(self.step, x, y))
def enable_learning(self):
"""
Enables learning for the network.
"""
self.learning = True
def disable_learning(self):
"""
Disables learning for the network.
"""
self.learning = False
class SupervisedCostNet(Net):
"""
Base class description for network that computes a cost function using
unsupervised training.
"""
__metaclass__ = ABCMeta
@abstractmethod
def cost(self, y, y_exp):
"""
Compute the cost function
@param y: The true output.
@param y_exp: The expected output.
@return: The cost.
"""
def _print_train(self, epoch, nepochs, train_cost):
"""
Print the details about training.
@param epoch: The current epoch number.
@param nepochs: The total number of epochs.
@param train_cost: A numpy array containing the training cost at each
epoch.
"""
print '\nEpoch {0} of {1}:'.format(epoch + 1, nepochs)
print ' Training Cost : {0}'.format(train_cost[epoch])
print ' Training Time : {0}'.format(
self.timers.get_elapsed_time('train_epoch', True))
def _print_test(self, epoch, test_cost):
"""
Print the details about testing.
@param epoch: The current epoch number.
@param test_cost: A numpy array containing the testing cost at each
epoch.
"""
print ' Testing Cost : {0}'.format(test_cost[epoch])
print ' Testing Time : {0}'.format(
self.timers.get_elapsed_time('test_epoch', True))
def _print_final(self, nepochs, train_cost, test_cost):
"""
Print the details final details.
@param nepochs: The total number of epochs.
@param train_cost: A numpy array containing the training cost at each
epoch.
@param test_cost: A numpy array containing the testing cost at each
epoch.
"""
print '\n' + '*' * 79
print '\nBest Training Cost : {0} at Epoch {1}'.format(np.min(
train_cost), np.argmin(train_cost) + 1)
print 'Best Testing Cost : {0} at Epoch {1}'.format(np.min(
test_cost), np.argmin(test_cost) + 1)
print '\nTotal Execution Time : {0}'.format(
self.timers.get_elapsed_time('global', True))
print 'Total Training Time : {0}'.format(
self.timers.get_elapsed_time('train', True))
print 'Average Training Epoch Time : {0}'.format(
pretty_time(self.timers.get_elapsed_time('train') / nepochs))
print 'Total Testing Time : {0}'.format(
self.timers.get_elapsed_time('test', True))
print 'Average Testing Epoch Time : {0}'.format(
pretty_time(self.timers.get_elapsed_time('test') / nepochs))
def run(self, train_data, train_labels, test_data, test_labels,
nepochs=1, verbose=True):
"""
Simulate the entire network.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param train_labels: The training labels. This must be an iterable
with the same length as train_data.
@param test_data: The data to test with. This must be an iterable
returning a numpy array.
@param test_labels: The testing labels. This must be an iterable
with the same length as train_data.
@param nepochs: The number of training epochs to perform.
@param verbose: If True, details will be printed after each epoch.
@return: A tuple containing the training and test costs.
"""
# Make some timers
self.timers = MultiTimer()
self.timers.add_timers('global', 'train', 'train_epoch', 'test',
'test_epoch')
self.timers.stop_timers('train', 'train_epoch', 'test', 'test_epoch')
_run = self._run; cost = self.cost
train_cost = np.zeros(nepochs); test_cost = np.zeros(nepochs)
for i in xrange(nepochs):
# Compute training cost
self.timers.start_timers('train', 'train_epoch')
self.enable_learning()
train_cost[i] = cost(_run(train_data, train_labels), train_labels)
self.timers.pause_timers('train')
self.timers.stop_timers('train_epoch')
if verbose: self._print_train(i, nepochs, train_cost)
# Compute testing cost
self.timers.start_timers('test', 'test_epoch')
self.disable_learning()
test_cost[i] = cost(_run(test_data), test_labels)
self.timers.pause_timers('test')
self.timers.stop_timers('test_epoch')
if verbose: self._print_test(i, test_cost)
self.timers.stop_timers('global')
if verbose: self._print_final(nepochs, train_cost, test_cost)
return (train_cost, test_cost)
class UnsupervisedCostNet(SupervisedCostNet):
"""
Base class description for network that computes a cost function using
unsupervised training.
"""
__metaclass__ = ABCMeta
def run(self, train_data, test_data, nepochs=1, verbose=True):
"""
Simulate the entire network.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param test_data: The data to test with. This must be an iterable
returning a numpy array.
@param nepochs: The number of training epochs to perform.
@param verbose: If True, details will be printed after each epoch.
@return: A tuple containing the training and test costs.
"""
# Make some timers
self.timers = MultiTimer()
self.timers.add_timers('global', 'train', 'train_epoch', 'test',
'test_epoch')
self.timers.stop_timers('train', 'train_epoch', 'test', 'test_epoch')
_run = self._run; cost = self.cost
train_cost = np.zeros(nepochs); test_cost = np.zeros(nepochs)
for i in xrange(nepochs):
# Compute training cost
self.timers.start_timers('train', 'train_epoch')
self.enable_learning()
train_cost[i] = cost(_run(train_data), train_data)
self.timers.pause_timers('train')
self.timers.stop_timers('train_epoch')
if verbose: self._print_train(i, nepochs, train_cost)
# Compute testing cost
self.timers.start_timers('test', 'test_epoch')
self.disable_learning()
test_cost[i] = cost(_run(test_data), test_data)
self.timers.pause_timers('test')
self.timers.stop_timers('test_epoch')
if verbose: self._print_test(i, test_cost)
self.timers.stop_timers('global')
if verbose: self._print_final(nepochs, train_cost, test_cost)
return (train_cost, test_cost)
class SupervisedAccuracyNet(Net):
"""
Base class description for network that computes an accuracy using
supervised training.
"""
__metaclass__ = ABCMeta
@abstractmethod
def score(self, y, y_exp):
"""
Compute the accuracy. If there is competition between which node should
be the winner, a random one is chosen.
@param y: The true output.
@param y_exp: The expected output.
@return: The accuracy.
"""
def _print_train(self, epoch, nepochs, train_accuracy):
"""
Print the details about training.
@param epoch: The current epoch number.
@param nepochs: The total number of epochs.
@param train_accuracy: A numpy array containing the training accuracy
at each epoch.
"""
print '\nEpoch {0} of {1}:'.format(epoch + 1, nepochs)
print ' Training Accuracy : {0}%'.format(train_accuracy[epoch] * 100)
print ' Training Time : {0}'.format(
self.timers.get_elapsed_time('train_epoch', True))
def _print_test(self, epoch, test_accuracy):
"""
Print the details about testing.
@param epoch: The current epoch number.
@param test_accuracy: A numpy array containing the testing accuracy
at each epoch.
"""
print ' Testing Accuracy : {0}%'.format(test_accuracy[epoch] * 100)
print ' Testing Time : {0}'.format(
self.timers.get_elapsed_time('test_epoch', True))
def _print_final(self, nepochs, train_accuracy, test_accuracy):
"""
Print the details final details.
@param nepochs: The total number of epochs.
@param train_accuracy: A numpy array containing the training accuracy
at each epoch.
@param test_accuracy: A numpy array containing the testing accuracy
at each epoch.
"""
print '\n' + '*' * 79
print '\nBest Training Accuracy : {0}% at Epoch {1}'.format(np.max(
train_accuracy) * 100, np.argmax(train_accuracy) + 1)
print 'Best Testing Accuracy : {0}% at Epoch {1}'.format(np.max(
test_accuracy) * 100, np.argmax(test_accuracy) + 1)
print '\nTotal Execution Time : {0}'.format(
self.timers.get_elapsed_time('global', True))
print 'Total Training Time : {0}'.format(
self.timers.get_elapsed_time('train', True))
print 'Average Training Epoch Time : {0}'.format(
pretty_time(self.timers.get_elapsed_time('train') / nepochs))
print 'Total Testing Time : {0}'.format(
self.timers.get_elapsed_time('test', True))
print 'Average Testing Epoch Time : {0}'.format(
pretty_time(self.timers.get_elapsed_time('test') / nepochs))
def run(self, train_data, train_labels, test_data, test_labels,
nepochs=1, verbose=True):
"""
Simulate the entire network.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param train_labels: The training labels. This must be an iterable
with the same length as train_data.
@param test_data: The data to test with. This must be an iterable
returning a numpy array.
@param test_labels: The testing labels. This must be an iterable
with the same length as train_data.
@param nepochs: The number of training epochs to perform.
@param verbose: If True, details will be printed after each epoch.
@return: A tuple containing the training and test accuracies.
"""
# Make some timers
self.timers = MultiTimer()
self.timers.add_timers('global', 'train', 'train_epoch', 'test',
'test_epoch')
self.timers.stop_timers('train', 'train_epoch', 'test', 'test_epoch')
_run = self._run; score = self.score
train_accuracy = np.zeros(nepochs); test_accuracy = np.zeros(nepochs)
for i in xrange(nepochs):
# Compute training accuracy
self.timers.start_timers('train', 'train_epoch')
self.enable_learning()
train_accuracy[i] = score(_run(train_data, train_labels),
train_labels)
self.timers.pause_timers('train')
self.timers.stop_timers('train_epoch')
if verbose: self._print_train(i, nepochs, train_accuracy)
# Compute testing accuracy
self.timers.start_timers('test', 'test_epoch')
self.disable_learning()
test_accuracy[i] = score(_run(test_data), test_labels)
self.timers.pause_timers('test')
self.timers.stop_timers('test_epoch')
if verbose: self._print_test(i, test_accuracy)
self.timers.stop_timers('global')
if verbose: self._print_final(nepochs, train_accuracy, test_accuracy)
return (train_accuracy, test_accuracy)
###############################################################################
########## Class Implementation
###############################################################################
class LinearRegressionNetwork(SupervisedCostNet):
"""
Base class for a liner regression network.
"""
def __init__(self, ninputs, bias=1, learning_rate=0.001, min_weight=-1,
max_weight=1, activation_type='linear', activation_kargs={},
learning=True):
"""
Initializes this linear regression network.
@param ninputs: The number of inputs (excluding the bias).
@param bias: The bias input. Set to "0" to disable.
@param learning_rate: The learning rate to use.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
@param activation_type: The type activation function to use. This must
be one of the classes implemented in L{annt.activation}.
@param activation_kargs: Any keyword arguments for the activation
function.
@param learning: Boolean denoting if the network is currently learning.
"""
# Store the params
self.bias = np.array([bias])
self.learning_rate = learning_rate
self.learning = learning
# Initialize the activation function
self.activation = create_activation(activation_type,
**activation_kargs)
# Construct the weights
self.initialize_weights((ninputs + 1,), min_weight, max_weight)
def initialize_weights(self, shape, min_weight=-1, max_weight=1):
"""
Initialize the weights of the network. Initialization is done randomly.
@param shape: The number of nodes in the entire network, excluding any
bias terms. This parameter must be a sequence.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
"""
self.weights = np.random.uniform(min_weight, max_weight, shape[0])
def cost(self, y, y_exp):
"""
Compute the cost function
@param y: The true output.
@param y_exp: The expected output.
@return: The cost.
"""
return np.sum(np.power((y_exp - y), 2)) / (2. * y_exp.shape[0])
def step(self, x, y=None):
"""
Compute a single step of the network.
@param x: The input data to compute for this step. This must be a numpy
array with a shape of (self.ninputs, ).
@param y: The expected output.
@return: The computed output.
"""
# Add bias
full_x = np.concatenate((self.bias, x))
# Calculate the outputs
y_est = self.activation.compute(np.dot(full_x, self.weights))
# Update the error using online learning
if self.learning:
self.weights += self.learning_rate * full_x * (y - y_est)
return y_est
class MultilayerPerception(SupervisedAccuracyNet):
"""
Base class for a multilayer perception.
"""
def __init__(self, shape, bias=1, learning_rate=0.001, min_weight=-1,
max_weight=1, input_activation_type='linear',
input_activation_kargs={'m':1}, hidden_activation_type='sigmoid',
hidden_activation_kargs={},
learning=True):
"""
Initializes this multilayer perception network.
@param shape: The number of layers and the number of nodes per layer
(excluding the bias).
@param bias: The bias input. This is applied to all input and hidden
layers. Set to "0" to disable.
@param learning_rate: The learning rate to use.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
@param input_activation_type: The type activation function to use for
the input layer. This must be one of the classes implemented in
L{annt.activation}.
@param input_activation_kargs: Any keyword arguments for the input
activation function.
@param hidden_activation_type: The type activation function to use for
the hidden layer. This must be one of the classes implemented in
L{annt.activation}.
@param hidden_activation_kargs: Any keyword arguments for the hidden
activation function.
@param learning: Boolean denoting if the network is currently learning.
"""
# Store the params
self.bias = np.array([bias])
self.learning_rate = learning_rate
self.learning = learning
# Initialize the activation functions
self.input_activation = create_activation(input_activation_type,
**input_activation_kargs)
self.hidden_activation = create_activation(hidden_activation_type,
**hidden_activation_kargs)
# Construct the weights
self.initialize_weights(shape, min_weight, max_weight)
# Construct the internal outputs and deltas
new_shape = [1 + s for s in shape[:-1]] + [shape[-1]]
self.outputs = np.array([np.zeros(s) for s in new_shape])
self.deltas = self.outputs.copy()
def initialize_weights(self, shape, min_weight=-1, max_weight=1):
"""
Initialize the weights of the network. Initialization is done randomly.
@param shape: The number of nodes in the entire network, excluding any
bias terms. This parameter must be a sequence.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
"""
# Input weights aren't trained, so they are ignored. All other weights
# are set to be random. The Last dimension is incremented by 1 to allow
# for the bias.
self.weights = np.array([np.random.uniform(min_weight, max_weight,
(c, p + 1)) for c, p in zip(shape[1:], shape[:-1])])
def score(self, y, y_exp):
"""
Compute the accuracy. If there is competition between which node should
be the winner, a random one is chosen.
@param y: The true output.
@param y_exp: The expected output.
@return: The accuracy.
"""
accuracy = 0.
for predicted, expected in zip(y, y_exp):
indexes = np.where(predicted == np.max(predicted))[0]
np.random.shuffle(indexes)
accuracy += 1 if expected[indexes[0]] == 1 else 0
return accuracy / y_exp.shape[0]
def step(self, x, y=None):
"""
Compute a single step of the network.
@param x: The input data to compute for this step.
@param y: The expected output.
@return: The computed outputs from the output layer.
"""
#######################################################################
######## Calculate the outputs using forward propagation
#######################################################################
# Calculate the outputs for the input layer
self.outputs[0][0] = self.input_activation.compute(self.bias)
self.outputs[0][1:] = self.input_activation.compute(x)
# Calculate the outputs for the hidden layer(s)
# - First hidden layer -> last hidden layer
for layer, layer_weights in enumerate(self.weights[:-1], 1):
self.outputs[layer][0] = self.hidden_activation.compute(self.bias)
self.outputs[layer][1:] = self.hidden_activation.compute(np.inner(
self.outputs[layer - 1], layer_weights))
# Calculate the outputs for the output layer
self.outputs[-1] = self.hidden_activation.compute(np.inner(
self.outputs[-2], self.weights[-1]))
#######################################################################
######## Train the network using backpropagation
#######################################################################
if self.learning:
###################################################################
######## Calculate the deltas
###################################################################
# Calculate output deltas
self.deltas[-1] = self.outputs[-1] - y
# Calculate hidden deltas
# - Last hidden layer -> first hidden layer
# Note that deltas are not computed for the bias
for layer in xrange(-2, -self.deltas.shape[0], -1):
self.deltas[layer] = self.hidden_activation.compute_derivative(
self.outputs[layer][1:,]) * np.inner(self.deltas[layer +
1], self.weights[layer + 1].T[1:,])
###################################################################
######## Update the weights
###################################################################
# Update the weights
# - Output -> first hidden layer
# - Bias's weight -> last node's weight
for layer in xrange(-1, -self.deltas.shape[0], -1):
for i in xrange(self.weights[layer].shape[0]):
self.weights[layer][i] += -self.learning_rate * \
self.deltas[layer][i] * self.outputs[layer - 1]
# Return the outputs from the output layer
return self.outputs[-1]
class ExtremeLearningMachine(MultilayerPerception):
"""
Base class for an extreme learning machine.
"""
def step(self, x, y=None):
"""
Compute a single step of the network.
@param x: The input data to compute for this step.
@param y: The expected output.
"""
#######################################################################
######## Calculate the outputs using forward propagation
#######################################################################
# Calculate the outputs for the input layer
self.outputs[0][0] = self.input_activation.compute(self.bias)
self.outputs[0][1:] = self.input_activation.compute(x)
# Calculate the outputs for the hidden layer(s)
# - First hidden layer -> last hidden layer
for layer, layer_weights in enumerate(self.weights[:-1], 1):
self.outputs[layer][0] = self.hidden_activation.compute(self.bias)
self.outputs[layer][1:] = self.hidden_activation.compute(np.inner(
self.outputs[layer - 1], layer_weights))
# Calculate the outputs for the output layer
self.outputs[-1] = self.hidden_activation.compute(np.inner(
self.outputs[-2], self.weights[-1]))
#######################################################################
######## Train the network using backpropagation
#######################################################################
if self.learning:
###################################################################
######## Calculate the deltas
###################################################################
# Calculate output deltas
self.deltas[-1] = self.outputs[-1] - y
###################################################################
######## Update the weights
###################################################################
# Update the output weights
for i in xrange(self.weights[-1].shape[0]):
self.weights[-1][i] += -self.learning_rate * \
self.deltas[-1][i] * self.outputs[-2]
# Return the outputs from the output layer
return self.outputs[-1]
class CompetitiveLearning(UnsupervisedCostNet):
"""
Base class for a competitive learning network (clustering).
"""
def __init__(self, ninputs, nclusters, learning_rate=0.001, boost_inc=0.1,
boost_dec=0.01, duty_cycle=50, min_duty_cycle=5, min_weight=-1,
max_weight=1, learning=True):
"""
Initializes this competitive learning network.
@param ninputs: The number of inputs to the network.
@param nclusters: The number of clusters.
@param learning_rate: The learning rate to use.
@param boost_inc: The amount to increment the boost by.
@param boost_dec: The amount to decrement the boost by.
@param duty_cycle: The history to retain for activations for each node.
This is the period minimum activation is compared across. It is a
rolling window.
@param min_duty_cycle: The minimum duty cycle. If a node has not been
active at least this many times, increment its boost value, else
decrement it.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
@param learning: Boolean denoting if the network is currently learning.
"""
# Store the params
self.learning_rate = learning_rate
self.boost_inc = boost_inc
self.boost_dec = boost_dec
self.duty_cycle = duty_cycle
self.min_duty_cycle = min_duty_cycle
self.learning = learning
# Construct the weights
self.initialize_weights((ninputs, nclusters), min_weight, max_weight)
# Construct the boost values
self.boost = np.ones(nclusters)
# Construct the outputs
# - Each item represents a single cluster.
# - Each cluster maintains a history of length two of its update.
# - The first item refers to the current iteration.
# - The second item refers to the previous iteration.
# - The last item refers to the furthest iteration.
self.outputs = np.ones((nclusters, duty_cycle))
def _update_boost(self):
"""
Update the boost values.
"""
for i, active in enumerate(self.outputs):
if int(np.sum(active)) >= self.min_duty_cycle:
self.boost[i] += self.boost_inc
else:
self.boost[i] = max(self.boost[i] - self.boost_dec, 0)
def initialize_weights(self, shape, min_weight=-1, max_weight=1):
"""
Initialize the weights of the network. Initialization is done randomly.
@param shape: The number of nodes in the entire network. This parameter
must be a sequence.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
"""
self.weights = np.random.uniform(min_weight, max_weight, shape)
def cost(self, y, x):
"""
Compute the cost function
@param y: The true output.
@param x: The input.
@return: The cost.
"""
cost = 0.
for i, pattern in enumerate(x):
for j, weights in enumerate(self.weights.T):
cost += y[i][j] * np.sum((weights - pattern) ** 2)
return cost / 2
def step(self, x):
"""
Compute a single step of the network.
@param x: The input data to compute for this step.
@return: The computed outputs.
"""
# Shift outputs
self.outputs = np.roll(self.outputs, 1, 1)
# Calculate the outputs
for i, weights in enumerate(self.weights.T):
self.outputs[i][0] = self.boost[i] * np.sum((weights - x) ** 2)
min_ix = np.argmin(self.outputs.T[0])
self.outputs.T[0] = 0
self.outputs[min_ix][0] = 1
# Update the boosts
self._update_boost()
# Train the network
if self.learning:
for i, weights in enumerate(self.weights.T):
self.weights.T[i] += self.learning_rate * self.outputs[i][0] \
* (x - weights)
return self.outputs.T[0]
|
tehtechguy/annt
|
src/annt/examples/linear_regression_network.py
|
# linear_regression_network.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/31/15
#
# Description : Example showing how to create and use a linear regression
# network.
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
Example showing how to create and use a linear regression network. This example
uses a reduced set of data from the U{MNIST<http://yann.lecun.com/exdb/mnist/>}
dataset.
G{packagetree annt}
"""
__docformat__ = 'epytext'
# Native imports
import os
# Third party imports
import numpy as np
# Program imports
from annt.util import mnist_data
from annt.net import LinearRegressionNetwork
from annt.plot import plot_epoch
def main(train_data, train_labels, test_data, test_labels, nepochs=1,
plot=True, verbose=True, bias=1, learning_rate=0.001, min_weight=-1,
max_weight=1, activation_type='linear', activation_kargs={'m':1}):
"""
Demonstrates a linear regression network using MNIST.
@param train_data: The data to train with. This must be an iterable
returning a numpy array.
@param train_labels: The training labels. This must be an iterable with the
same length as train_data.
@param test_data: The data to test with. This must be an iterable returning
a numpy array.
@param test_labels: The testing labels. This must be an iterable with the
same length as train_data.
@param nepochs: The number of training epochs to perform.
@param plot: If True, a plot will be created.
@param verbose: If True, the network will print results after every
iteration.
@param bias: The bias input. Set to "0" to disable.
@param learning_rate: The learning rate to use.
@param min_weight: The minimum weight value.
@param max_weight: The maximum weight value.
@param activation_type: The type activation function to use. This must be
one of the classes implemented in L{annt.activation}.
@param activation_kargs: Any keyword arguments for the activation
function.
@return: A tuple containing the training and testing results, respectively.
"""
# Create the network
net = LinearRegressionNetwork(
ninputs = train_data.shape[1],
bias = bias,
learning_rate = learning_rate,
min_weight = min_weight,
max_weight = max_weight,
activation_type = activation_type,
activation_kargs = activation_kargs
)
# Simulate the network
train_results, test_results = net.run(train_data, train_labels, test_data,
test_labels, nepochs, verbose)
# Plot the results
if plot:
plot_epoch(y_series=(train_results, test_results),
series_names=('Train', 'Test'), y_label='Cost',
title='Linear Regression Network - Example', semilog=True)
return train_results, test_results
def basic_sim(nepochs=100):
"""
Perform a basic simulation.
@param nepochs: The number of training epochs to perform.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
# Scale pixel values to be between 0 and 1
# Scale label values to be between 0 and 1
# Run the network
main(train_data/255., train_labels/9., test_data/255., test_labels/9.,
nepochs=nepochs)
def bulk(niters, nepochs, verbose=True, plot=True, **kargs):
"""
Execute the main network across many networks.
@param niters: The number of iterations to run for statistical purposes.
@param nepochs: The number of training epochs to perform.
@param verbose: If True, a simple iteration status will be printed.
@param plot: If True, a plot will be generated.
@param kargs: Any keyword arguments to pass to the main network simulation.
@return: A tuple containing: (train_mean, train_std), (test_mean, test_std)
"""
# Simulate the network
train_results = np.zeros((niters, nepochs))
test_results = np.zeros((niters, nepochs))
for i in xrange(niters):
if verbose:
print 'Executing iteration {0} of {1}'.format(i + 1, niters)
train_results[i], test_results[i] = main(verbose=False, plot=False,
nepochs=nepochs, **kargs)
# Compute the mean costs
train_mean = np.mean(train_results, 0)
test_mean = np.mean(test_results, 0)
# Compute the standard deviations
train_std = np.std(train_results, 0)
test_std = np.std(test_results, 0)
if plot:
plot_epoch(y_series=(train_mean, test_mean), semilog=True,
series_names=('Train', 'Test'), y_errs=(train_std, test_std),
y_label='Cost', title='Linear Regression Network - Stats Example')
return (train_mean, train_std), (test_mean, test_std)
def bulk_sim(nepochs=100, niters=10):
"""
Perform a simulation across multiple iterations, for statistical purposes.
@param nepochs: The number of training epochs to perform.
@param niters: The number of iterations to run for statistical purposes.
"""
(train_data, train_labels), (test_data, test_labels) = mnist_data()
bulk(train_data=train_data/255., train_labels=train_labels/9.,
test_data=test_data/255., test_labels=test_labels/9., nepochs=nepochs,
niters=niters)
def vary_params(out_dir, nepochs=100, niters=10):
"""
Vary some parameters and generate some plots.
@param out_dir: The directory to save the plots in.
@param nepochs: The number of training epochs to perform.
@param niters: The number of iterations to run for statistical purposes.
"""
# Get the data
(train_data, train_labels), (test_data, test_labels) = mnist_data()
train_d = train_data/255.; train_l = train_labels/9.
test_d = test_data/255.; test_l = test_labels/9.
# Make the output directory
try:
os.makedirs(out_dir)
except OSError:
pass
###########################################################################
###### Vary learning rate
###########################################################################
print 'Varying the learning rate'
learning_rates = np.linspace(0.001, 0.01, 10)
train_results = np.zeros((learning_rates.shape[0], nepochs))
train_stds = np.zeros((learning_rates.shape[0], nepochs))
test_results = np.zeros((learning_rates.shape[0], nepochs))
test_stds = np.zeros((learning_rates.shape[0], nepochs))
series_names = ['Learning Rate = {0}'.format(x) for x in learning_rates]
for i, learning_rate in enumerate(learning_rates):
print 'Executing iteration {0} of {1}'.format(i + 1,
learning_rates.shape[0])
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, train_labels=train_l, test_data=test_d,
test_labels=test_l, plot=False, nepochs=nepochs, niters=niters,
learning_rate=learning_rate, verbose=False)
# Make training plot
title = 'Linear Regression Network - Training\n10 Iterations, ' \
'Varying Learning Rate'
out_path = os.path.join(out_dir, 'learning_rate-train.png')
plot_epoch(y_series=train_results, semilog=True, series_names=series_names,
y_errs=train_stds, y_label='Cost', title=title, out_path=out_path)
# Make testing plot
title = 'Linear Regression Network - Testing\n10 Iterations, ' \
'Varying Learning Rate'
out_path = os.path.join(out_dir, 'learning_rate-test.png')
plot_epoch(y_series=test_results, semilog=True, series_names=series_names,
y_errs=train_stds, y_label='Cost', title=title, out_path=out_path)
###########################################################################
###### Vary slope
###########################################################################
print '\nVarying the slope of the linear function'
slopes = np.linspace(1, 10, 10)
train_results = np.zeros((slopes.shape[0], nepochs))
train_stds = np.zeros((slopes.shape[0], nepochs))
test_results = np.zeros((slopes.shape[0], nepochs))
test_stds = np.zeros((slopes.shape[0], nepochs))
series_names = ['Slope = {0}'.format(x) for x in slopes]
for i, slope in enumerate(slopes):
print 'Executing iteration {0} of {1}'.format(i + 1,
slopes.shape[0])
(train_results[i], train_stds[i]), (test_results[i], test_stds[i]) = \
bulk(train_data=train_d, train_labels=train_l, test_data=test_d,
test_labels=test_l, plot=False, nepochs=nepochs, niters=niters,
activation_kargs={'m':slope}, verbose=False)
# Make training plot
title = 'Linear Regression Network - Training\n10 Iterations, ' \
"Varying Activation Function's Slope"
out_path = os.path.join(out_dir, 'slope-train.png')
plot_epoch(y_series=train_results, semilog=True, series_names=series_names,
y_errs=train_stds, y_label='Cost', title=title, out_path=out_path)
# Make testing plot
title = 'Linear Regression Network - Testing\n10 Iterations, ' \
"Varying Activation Function's Slope"
out_path = os.path.join(out_dir, 'slope-test.png')
plot_epoch(y_series=test_results, semilog=True, series_names=series_names,
y_errs=train_stds, y_label='Cost', title=title, out_path=out_path)
if __name__ == '__main__':
basic_sim()
# bulk_sim()
# vary_params(out_dir=r'D:\annt\test\Linear_Regression_Network')
|
tehtechguy/annt
|
src/annt/__init__.py
|
# __init__.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Date Created : 03/30/15
#
# Description : Defines the annt package
# Python Version : 2.7.8
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2015 <NAME>
"""
This is a collection of various components for creating artificial neural
networks in Python.
Legal
=====
This code is licensed under the U{MIT license<http://opensource.org/
licenses/mit-license.php>}. Any included datasets may be licensed
differently. Refer to the individual dataset for more details.
Prerequisites
=============
- U{Python 2.7.X<https://www.python.org/downloads/release/python-279/>}
- U{Numpy<http://www.numpy.org/>}
- U{matplotlib<http://matplotlib.org/>}
Installation
============
1. Install all prerequisites
Assuming you have U{pip<https://pip.pypa.io/en/latest/installing.html>}
installed, located in your X{Python27/Scripts} directory:
X{pip install numpy matplotlib}
2. Install this package: X{python setup.py install}. The setup file is
located in the "src" folder.
Getting Started
===============
- Click U{here<http://techtorials.me/mldata/index.html>} to access the
API.
- Check out the L{examples<annt.examples>}.
Package Organization
====================
The annt package contains the following subpackages and modules:
G{packagetree annt}
Connectivity
============
The following image shows how everything is connected:
G{importgraph}
Developer Notes
===============
The following notes are for developers only.
Installation
------------
1. Download and install U{graphviz<http://www.graphviz.org/Download..
php>}
2. Edit line 111 in X{dev/epydoc_config.txt} to point to the directory
containing "dot.exe". This is part of the graphviz installation.
3. Download this repo and execute X{python setup.py install}.
4. Download and install U{Epydoc<http://sourceforge.net/projects/
epydoc/files>}
Generating the API
------------------
From the root level, execute X{python epydoc --config=epydoc_config.txt
annt}
@group Examples: examples
@author: U{<NAME><http://techtorials.me>}
@requires: Python 2.7.X
@version: 0.4.0
@license: U{The MIT License<http://opensource.org/licenses/mit-license.php>}
@copyright: S{copy} 2015 <NAME>
"""
__docformat__ = 'epytext'
|
sqiangcao99/hgr_v2t
|
t2vretrieval/models/criterion.py
|
<filename>t2vretrieval/models/criterion.py<gh_stars>100-1000
import torch
import torch.nn as nn
import framework.configbase
import framework.ops
def cosine_sim(im, s):
'''cosine similarity between all the image and sentence pairs
'''
inner_prod = im.mm(s.t())
im_norm = torch.sqrt((im**2).sum(1).view(-1, 1) + 1e-18)
s_norm = torch.sqrt((s**2).sum(1).view(1, -1) + 1e-18)
sim = inner_prod / (im_norm * s_norm)
return sim
class ContrastiveLoss(nn.Module):
'''compute contrastive loss
'''
def __init__(self, margin=0, max_violation=False, direction='bi', topk=1):
'''Args:
direction: i2t for negative sentence, t2i for negative image, bi for both
'''
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.max_violation = max_violation
self.direction = direction
self.topk = topk
def forward(self, scores, margin=None, average_batch=True):
'''
Args:
scores: image-sentence score matrix, (batch, batch)
the same row of im and s are positive pairs, different rows are negative pairs
'''
if margin is None:
margin = self.margin
batch_size = scores.size(0)
diagonal = scores.diag().view(batch_size, 1) # positive pairs
# mask to clear diagonals which are positive pairs
pos_masks = torch.eye(batch_size).bool().to(scores.device)
batch_topk = min(batch_size, self.topk)
if self.direction == 'i2t' or self.direction == 'bi':
d1 = diagonal.expand_as(scores) # same collumn for im2s (negative sentence)
# compare every diagonal score to scores in its collumn
# caption retrieval
cost_s = (margin + scores - d1).clamp(min=0)
cost_s = cost_s.masked_fill(pos_masks, 0)
if self.max_violation:
cost_s, _ = torch.topk(cost_s, batch_topk, dim=1)
cost_s = cost_s / batch_topk
if average_batch:
cost_s = cost_s / batch_size
else:
if average_batch:
cost_s = cost_s / (batch_size * (batch_size - 1))
cost_s = torch.sum(cost_s)
if self.direction == 't2i' or self.direction == 'bi':
d2 = diagonal.t().expand_as(scores) # same row for s2im (negative image)
# compare every diagonal score to scores in its row
cost_im = (margin + scores - d2).clamp(min=0)
cost_im = cost_im.masked_fill(pos_masks, 0)
if self.max_violation:
cost_im, _ = torch.topk(cost_im, batch_topk, dim=0)
cost_im = cost_im / batch_topk
if average_batch:
cost_im = cost_im / batch_size
else:
if average_batch:
cost_im = cost_im / (batch_size * (batch_size - 1))
cost_im = torch.sum(cost_im)
if self.direction == 'i2t':
return cost_s
elif self.direction == 't2i':
return cost_im
else:
return cost_s + cost_im
|
sqiangcao99/hgr_v2t
|
framework/run_utils.py
|
import os
import json
import datetime
import numpy as np
import glob
import framework.configbase
def gen_common_pathcfg(path_cfg_file, is_train=False):
path_cfg = framework.configbase.PathCfg()
path_cfg.load(json.load(open(path_cfg_file)))
output_dir = path_cfg.output_dir
path_cfg.log_dir = os.path.join(output_dir, 'log')
path_cfg.model_dir = os.path.join(output_dir, 'model')
path_cfg.pred_dir = os.path.join(output_dir, 'pred')
if not os.path.exists(path_cfg.log_dir):
os.makedirs(path_cfg.log_dir)
if not os.path.exists(path_cfg.model_dir):
os.makedirs(path_cfg.model_dir)
if not os.path.exists(path_cfg.pred_dir):
os.makedirs(path_cfg.pred_dir)
if is_train:
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
path_cfg.log_file = os.path.join(path_cfg.log_dir, 'log-' + timestamp)
else:
path_cfg.log_file = None
return path_cfg
def find_best_val_models(log_dir, model_dir):
step_jsons = glob.glob(os.path.join(log_dir, 'val.step.*.json'))
epoch_jsons = glob.glob(os.path.join(log_dir, 'val.epoch.*.json'))
val_names, val_scores = [], []
for i, json_file in enumerate(step_jsons + epoch_jsons):
json_name = os.path.basename(json_file)
scores = json.load(open(json_file))
val_names.append(json_name)
val_scores.append(scores)
measure_names = list(val_scores[0].keys())
model_files = {}
for measure_name in measure_names:
# for metrics: the lower the better
if 'loss' in measure_name or 'medr' in measure_name or 'meanr' in measure_name:
idx = np.argmin([scores[measure_name] for scores in val_scores])
# for metrics: the higher the better
else:
idx = np.argmax([scores[measure_name] for scores in val_scores])
json_name = val_names[idx]
model_file = os.path.join(model_dir,
'epoch.%s.th'%(json_name.split('.')[2]) if 'epoch' in json_name \
else 'step.%s.th'%(json_name.split('.')[2]))
model_files.setdefault(model_file, [])
model_files[model_file].append(measure_name)
name2file = {'-'.join(measure_name): model_file for model_file, measure_name in model_files.items()}
return name2file
|
sqiangcao99/hgr_v2t
|
framework/modules/embeddings.py
|
<reponame>sqiangcao99/hgr_v2t
""" Embeddings module """
import math
import torch
import torch.nn as nn
class PositionalEncoding(nn.Module):
"""
Implements the sinusoidal positional encoding for
non-recurrent neural networks.
Implementation based on "Attention Is All You Need"
Args:
dim_embed (int): embedding size (even number)
"""
def __init__(self, dim_embed, max_len=100):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, dim_embed)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim_embed, 2, dtype=torch.float) *
-(math.log(10000.0) / dim_embed)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
self.pe = pe # size=(max_len, dim_embed)
self.dim_embed = dim_embed
def forward(self, emb, step=None):
if emb.device != self.pe.device:
self.pe = self.pe.to(emb.device)
if step is None:
# emb.size = (batch, seq_len, dim_embed)
emb = emb + self.pe[:emb.size(1)]
else:
# emb.size = (batch, dim_embed)
emb = emb + self.pe[step]
return emb
class Embedding(nn.Module):
"""Words embeddings for encoder/decoder.
Args:
word_vec_size (int): size of the dictionary of embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
position_encoding (bool): see :obj:`modules.PositionalEncoding`
"""
def __init__(self, word_vocab_size, word_vec_size,
position_encoding=False, fix_word_embed=False, max_len=100):
super(Embedding, self).__init__()
self.word_vec_size = word_vec_size
self.we = nn.Embedding(word_vocab_size, word_vec_size)
if fix_word_embed:
self.we.weight.requires_grad = False
self.init_weight()
self.position_encoding = position_encoding
if self.position_encoding:
self.pe = PositionalEncoding(word_vec_size, max_len=max_len)
def init_weight(self):
std = 1. / (self.word_vec_size**0.5)
nn.init.uniform_(self.we.weight, -std, std)
def forward(self, word_idxs, step=None):
"""Computes the embeddings for words.
Args:
word_idxs (`LongTensor`): index tensor
size = (batch, seq_len) or (batch, )
Return:
embeds: `FloatTensor`,
size = (batch, seq_len, dim_embed) or (batch, dim_embed)
"""
embeds = self.we(word_idxs)
if self.position_encoding:
embeds = self.pe(embeds, step=step)
return embeds
|
sqiangcao99/hgr_v2t
|
t2vretrieval/models/evaluation.py
|
import numpy as np
def eval_q2m(scores, q2m_gts):
'''
Image -> Text / Text -> Image
Args:
scores: (n_query, n_memory) matrix of similarity scores
q2m_gts: list, each item is the positive memory ids of the query id
Returns:
scores: (recall@1, 5, 10, median rank, mean rank)
gt_ranks: the best ranking of ground-truth memories
'''
n_q, n_m = scores.shape
gt_ranks = np.zeros((n_q, ), np.int32)
for i in range(n_q):
s = scores[i]
sorted_idxs = np.argsort(-s)
rank = n_m
for k in q2m_gts[i]:
tmp = np.where(sorted_idxs == k)[0][0]
if tmp < rank:
rank = tmp
gt_ranks[i] = rank
# compute metrics
r1 = 100 * len(np.where(gt_ranks < 1)[0]) / n_q
r5 = 100 * len(np.where(gt_ranks < 5)[0]) / n_q
r10 = 100 * len(np.where(gt_ranks < 10)[0]) / n_q
medr = np.median(gt_ranks) + 1
meanr = gt_ranks.mean() + 1
return (r1, r5, r10, medr, meanr)
|
sqiangcao99/hgr_v2t
|
t2vretrieval/readers/mpdata.py
|
<reponame>sqiangcao99/hgr_v2t<filename>t2vretrieval/readers/mpdata.py
import os
import json
import numpy as np
import torch.utils.data
BOS, EOS, UNK = 0, 1, 2
class MPDataset(torch.utils.data.Dataset):
def __init__(self, name_file, mp_ft_files, word2int_file, max_words_in_sent,
ref_caption_file=None, is_train=False, _logger=None):
if _logger is None:
self.print_fn = print
else:
self.print_fn = _logger.info
self.max_words_in_sent = max_words_in_sent
self.is_train = is_train
self.names = np.load(name_file)
self.word2int = json.load(open(word2int_file))
self.mp_fts = []
for mp_ft_file in mp_ft_files:
self.mp_fts.append(np.load(mp_ft_file))
self.mp_fts = np.concatenate(self.mp_fts, axis=-1)
self.num_videos = len(self.mp_fts)
self.print_fn('mp_fts size %s' % (str(self.mp_fts.shape)))
if ref_caption_file is None:
self.ref_captions = None
else:
self.ref_captions = json.load(open(ref_caption_file))
self.captions = set()
self.pair_idxs = []
for i, name in enumerate(self.names):
for j, sent in enumerate(self.ref_captions[name]):
self.captions.add(sent)
self.pair_idxs.append((i, j))
self.captions = list(self.captions)
self.num_pairs = len(self.pair_idxs)
self.print_fn('captions size %d' % self.num_pairs)
def process_sent(self, sent, max_words):
tokens = [self.word2int.get(w, UNK) for w in sent.split()]
# # add BOS, EOS?
# tokens = [BOS] + tokens + [EOS]
tokens = tokens[:max_words]
tokens_len = len(tokens)
tokens = np.array(tokens + [EOS] * (max_words - tokens_len))
return tokens, tokens_len
def __len__(self):
if self.is_train:
return self.num_pairs
else:
return self.num_videos
def __getitem__(self, idx):
out = {}
if self.is_train:
video_idx, cap_idx = self.pair_idxs[idx]
name = self.names[video_idx]
mp_ft = self.mp_fts[video_idx]
sent = self.ref_captions[name][cap_idx]
cap_ids, cap_len = self.process_sent(sent, self.max_words_in_sent)
out['caption_ids'] = cap_ids
out['caption_lens'] = cap_len
else:
name = self.names[idx]
mp_ft = self.mp_fts[idx]
out['names'] = name
out['mp_fts'] = mp_ft
return out
def iterate_over_captions(self, batch_size):
# the sentence order is the same as self.captions
for s in range(0, len(self.captions), batch_size):
e = s + batch_size
cap_ids, cap_lens = [], []
for sent in self.captions[s: e]:
cap_id, cap_len = self.process_sent(sent, self.max_words_in_sent)
cap_ids.append(cap_id)
cap_lens.append(cap_len)
yield {
'caption_ids': np.array(cap_ids, np.int32),
'caption_lens': np.array(cap_lens, np.int32),
}
def collate_fn(data):
outs = {}
for key in ['names', 'mp_fts', 'caption_ids', 'caption_lens']:
if key in data[0]:
outs[key] = [x[key] for x in data]
# reduce caption_ids lens
if 'caption_lens' in outs:
max_cap_len = np.max(outs['caption_lens'])
outs['caption_ids'] = np.array(outs['caption_ids'])[:, :max_cap_len]
return outs
|
sqiangcao99/hgr_v2t
|
t2vretrieval/encoders/video.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
class MPEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.dim_fts = [2048]
self.dim_embed = 1024
self.dropout = 0
class MPEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
input_size = sum(self.config.dim_fts)
self.ft_embed = nn.Linear(input_size, self.config.dim_embed, bias=True)
self.dropout = nn.Dropout(self.config.dropout)
def forward(self, inputs):
'''
Args:
inputs: (batch, dim_fts) or (batch, max_seq_len, dim_fts)
Return:
embeds: (batch, dim_embed) or (batch, max_seq_len, dim_fts)
'''
embeds = self.ft_embed(inputs)
embeds = self.dropout(embeds)
return embeds
|
sqiangcao99/hgr_v2t
|
t2vretrieval/encoders/graph.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class GCNLayer(nn.Module):
def __init__(self, embed_size, dropout=0.0):
super().__init__()
self.embed_size = embed_size
self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False)
self.layernorm = nn.LayerNorm(embed_size)
self.dropout = nn.Dropout(dropout)
def forward(self, node_fts, rel_edges):
'''Args:
node_fts: (batch_size, num_nodes, embed_size)
rel_edges: (batch_size, num_nodes, num_nodes)
'''
ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts))
node_embeds = node_fts + self.dropout(ctx_embeds)
node_embeds = self.layernorm(node_embeds)
return node_embeds
class AttnGCNLayer(GCNLayer):
def __init__(self, embed_size, d_ff, dropout=0.0):
super().__init__(embed_size, dropout=dropout)
self.edge_attn_query = nn.Linear(embed_size, d_ff)
self.edge_attn_key = nn.Linear(embed_size, d_ff)
self.attn_denominator = math.sqrt(d_ff)
def forward(self, node_fts, rel_edges):
'''
Args:
node_fts: (batch_size, num_nodes, embed_size)
rel_edges: (batch_size, num_nodes, num_nodes)
'''
# (batch_size, num_nodes, num_nodes)
attn_scores = torch.einsum('bod,bid->boi',
self.edge_attn_query(node_fts),
self.edge_attn_key(node_fts)) / self.attn_denominator
attn_scores = attn_scores.masked_fill(rel_edges == 0, -1e18)
attn_scores = torch.softmax(attn_scores, dim=2)
# some nodes do not connect with any edge
attn_scores = attn_scores.masked_fill(rel_edges == 0, 0)
ctx_embeds = self.ctx_layer(torch.bmm(attn_scores, node_fts))
node_embeds = node_fts + self.dropout(ctx_embeds)
node_embeds = self.layernorm(node_embeds)
return node_embeds
class GCNEncoder(nn.Module):
def __init__(self, dim_input, dim_hidden, num_hidden_layers,
embed_first=False, dropout=0, attention=False):
super().__init__()
self.dim_input = dim_input
self.dim_hidden = dim_hidden
self.num_hidden_layers = num_hidden_layers
self.embed_first = embed_first
self.attention = attention
if self.attention:
gcn_fn = AttnGCNLayer
else:
gcn_fn = GCNLayer
if self.embed_first:
self.first_embedding = nn.Sequential(
nn.Linear(self.dim_input, self.dim_hidden),
nn.ReLU())
self.layers = nn.ModuleList()
for k in range(num_hidden_layers):
if self.attention:
h2h = gcn_fn(self.dim_hidden, self.dim_hidden // 2, dropout=dropout)
else:
h2h = gcn_fn(self.dim_hidden, dropout=dropout)
self.layers.append(h2h)
def forward(self, node_fts, rel_edges):
if self.embed_first:
node_fts = self.first_embedding(node_fts)
for k in range(self.num_hidden_layers):
layer = self.layers[k]
node_fts = layer(node_fts, rel_edges)
# (batch_size, num_nodes, dim_hidden)
return node_fts
|
sqiangcao99/hgr_v2t
|
t2vretrieval/miscs/semantic_role_labeling.py
|
import os
import argparse
import json
from allennlp.predictors.predictor import Predictor
def main():
parser = argparse.ArgumentParser()
parser.add_argument('ref_caption_file')
parser.add_argument('out_file')
parser.add_argument('--cuda_device', default=-1, type=int)
opts = parser.parse_args()
predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/bert-base-srl-2019.06.17.tar.gz", cuda_device=opts.cuda_device)
ref_caps = json.load(open(opts.ref_caption_file))
uniq_sents = set()
for key, sents in ref_caps.items():
for sent in sents:
uniq_sents.add(sent)
uniq_sents = list(uniq_sents)
print('unique sents', len(uniq_sents))
outs = {}
if os.path.exists(opts.out_file):
outs = json.load(open(opts.out_file))
for i, sent in enumerate(uniq_sents):
if sent in outs:
continue
try:
out = predictor.predict_tokenized(sent.split())
except KeyboardInterrupt:
break
except:
continue
outs[sent] = out
if i % 1000 == 0:
print('finish %d / %d = %.2f%%' % (i, len(uniq_sents), i / len(uniq_sents) * 100))
with open(opts.out_file, 'w') as f:
json.dump(outs, f)
if __name__ == '__main__':
main()
|
sqiangcao99/hgr_v2t
|
t2vretrieval/readers/rolegraphs.py
|
import os
import json
import numpy as np
import h5py
import collections
import torch
import t2vretrieval.readers.mpdata
ROLES = ['V', 'ARG1', 'ARG0', 'ARG2', 'ARG3', 'ARG4',
'ARGM-LOC', 'ARGM-MNR', 'ARGM-TMP', 'ARGM-DIR', 'ARGM-ADV',
'ARGM-PRP', 'ARGM-PRD', 'ARGM-COM', 'ARGM-MOD', 'NOUN']
class RoleGraphDataset(t2vretrieval.readers.mpdata.MPDataset):
def __init__(self, name_file, attn_ft_files, word2int_file,
max_words_in_sent, num_verbs, num_nouns, ref_caption_file, ref_graph_file,
max_attn_len=20, load_video_first=False, is_train=False, _logger=None):
if _logger is None:
self.print_fn = print
else:
self.print_fn = _logger.info
self.max_words_in_sent = max_words_in_sent
self.is_train = is_train
self.attn_ft_files = attn_ft_files
self.max_attn_len = max_attn_len
self.load_video_first = load_video_first
self.names = np.load(name_file)
self.word2int = json.load(open(word2int_file))
self.num_videos = len(self.names)
self.print_fn('num_videos %d' % (self.num_videos))
if ref_caption_file is None:
self.ref_captions = None
else:
self.ref_captions = json.load(open(ref_caption_file))
self.captions = set()
self.pair_idxs = []
for i, name in enumerate(self.names):
for j, sent in enumerate(self.ref_captions[name]):
self.captions.add(sent)
self.pair_idxs.append((i, j))
self.captions = list(self.captions)
self.num_pairs = len(self.pair_idxs)
self.print_fn('captions size %d' % self.num_pairs)
if self.load_video_first:
self.all_attn_fts, self.all_attn_lens = [], []
for name in self.names:
attn_fts = self.load_attn_ft_by_name(name, self.attn_ft_files)
attn_fts, attn_len = self.pad_or_trim_feature(attn_fts, self.max_attn_len, trim_type='select')
self.all_attn_fts.append(attn_fts)
self.all_attn_lens.append(attn_len)
self.all_attn_fts = np.array(self.all_attn_fts)
self.all_attn_lens = np.array(self.all_attn_lens)
self.num_verbs = num_verbs
self.num_nouns = num_nouns
self.role2int = {}
for i, role in enumerate(ROLES):
self.role2int[role] = i
self.role2int['C-%s'%role] = i
self.role2int['R-%s'%role] = i
self.ref_graphs = json.load(open(ref_graph_file))
def load_attn_ft_by_name(self, name, attn_ft_files):
attn_fts = []
for i, attn_ft_file in enumerate(attn_ft_files):
with h5py.File(attn_ft_file, 'r') as f:
key = name.replace('/', '_')
attn_ft = f[key][...]
attn_fts.append(attn_ft)
attn_fts = np.concatenate([attn_ft for attn_ft in attn_fts], axis=-1)
return attn_fts
def pad_or_trim_feature(self, attn_ft, max_attn_len, trim_type='top'):
seq_len, dim_ft = attn_ft.shape
attn_len = min(seq_len, max_attn_len)
# pad
if seq_len < max_attn_len:
new_ft = np.zeros((max_attn_len, dim_ft), np.float32)
new_ft[:seq_len] = attn_ft
# trim
else:
if trim_type == 'top':
new_ft = attn_ft[:max_attn_len]
elif trim_type == 'select':
idxs = np.round(np.linspace(0, seq_len-1, max_attn_len)).astype(np.int32)
new_ft = attn_ft[idxs]
return new_ft, attn_len
def get_caption_outs(self, out, sent, graph):
graph_nodes, graph_edges = graph
#print(graph)
verb_node2idxs, noun_node2idxs = {}, {}
edges = []
out['node_roles'] = np.zeros((self.num_verbs + self.num_nouns, ), np.int32)
# root node
sent_ids, sent_len = self.process_sent(sent, self.max_words_in_sent)
out['sent_ids'] = sent_ids
out['sent_lens'] = sent_len
# graph: add verb nodes
node_idx = 1
out['verb_masks'] = np.zeros((self.num_verbs, self.max_words_in_sent), np.bool)
for knode, vnode in graph_nodes.items():
k = node_idx - 1
if k >= self.num_verbs:
break
if vnode['role'] == 'V' and np.min(vnode['spans']) < self.max_words_in_sent:
verb_node2idxs[knode] = node_idx
for widx in vnode['spans']:
if widx < self.max_words_in_sent:
out['verb_masks'][k][widx] = True
out['node_roles'][node_idx - 1] = self.role2int['V']
# add root to verb edge
edges.append((0, node_idx))
node_idx += 1
# graph: add noun nodes
node_idx = 1 + self.num_verbs
out['noun_masks'] = np.zeros((self.num_nouns, self.max_words_in_sent), np.bool)
for knode, vnode in graph_nodes.items():
k = node_idx - self.num_verbs - 1
if k >= self.num_nouns:
break
if vnode['role'] not in ['ROOT', 'V'] and np.min(vnode['spans']) < self.max_words_in_sent:
noun_node2idxs[knode] = node_idx
for widx in vnode['spans']:
if widx < self.max_words_in_sent:
out['noun_masks'][k][widx] = True
out['node_roles'][node_idx - 1] = self.role2int.get(vnode['role'], self.role2int['NOUN'])
node_idx += 1
# graph: add verb_node to noun_node edges
for e in graph_edges:
if e[0] in verb_node2idxs and e[1] in noun_node2idxs:
edges.append((verb_node2idxs[e[0]], noun_node2idxs[e[1]]))
edges.append((noun_node2idxs[e[1]], verb_node2idxs[e[0]]))
num_nodes = 1 + self.num_verbs + self.num_nouns
rel_matrix = np.zeros((num_nodes, num_nodes), dtype=np.float32)
for src_nodeidx, tgt_nodeidx in edges:
rel_matrix[tgt_nodeidx, src_nodeidx] = 1
# row norm
for i in range(num_nodes):
s = np.sum(rel_matrix[i])
if s > 0:
rel_matrix[i] /= s
out['rel_edges'] = rel_matrix
return out
def __getitem__(self, idx):
out = {}
if self.is_train:
video_idx, cap_idx = self.pair_idxs[idx]
name = self.names[video_idx]
sent = self.ref_captions[name][cap_idx]
out = self.get_caption_outs(out, sent, self.ref_graphs[sent])
else:
video_idx = idx
name = self.names[idx]
if self.load_video_first:
attn_fts, attn_len = self.all_attn_fts[video_idx], self.all_attn_lens[video_idx]
else:
attn_fts = self.load_attn_ft_by_name(name, self.attn_ft_files)
attn_fts, attn_len = self.pad_or_trim_feature(attn_fts, self.max_attn_len, trim_type='select')
out['names'] = name
out['attn_fts'] = attn_fts
out['attn_lens'] = attn_len
return out
def iterate_over_captions(self, batch_size):
# the sentence order is the same as self.captions
for s in range(0, len(self.captions), batch_size):
e = s + batch_size
data = []
for sent in self.captions[s: e]:
out = self.get_caption_outs({}, sent, self.ref_graphs[sent])
data.append(out)
outs = collate_graph_fn(data)
yield outs
def collate_graph_fn(data):
outs = {}
for key in ['names', 'attn_fts', 'attn_lens', 'sent_ids', 'sent_lens',
'verb_masks', 'noun_masks', 'node_roles', 'rel_edges']:
if key in data[0]:
outs[key] = [x[key] for x in data]
batch_size = len(data)
# reduce attn_lens
if 'attn_fts' in outs:
max_len = np.max(outs['attn_lens'])
outs['attn_fts'] = np.stack(outs['attn_fts'], 0)[:, :max_len]
# reduce caption_ids lens
if 'sent_lens' in outs:
max_cap_len = np.max(outs['sent_lens'])
outs['sent_ids'] = np.array(outs['sent_ids'])[:, :max_cap_len]
outs['verb_masks'] = np.array(outs['verb_masks'])[:, :, :max_cap_len]
outs['noun_masks'] = np.array(outs['noun_masks'])[:, :, :max_cap_len]
return outs
|
sqiangcao99/hgr_v2t
|
t2vretrieval/encoders/sentence.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
from framework.modules.embeddings import Embedding
import framework.ops
class SentEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.num_words = 0
self.dim_word = 300
self.fix_word_embed = False
self.rnn_type = 'gru' # gru, lstm
self.bidirectional = True
self.rnn_hidden_size = 1024
self.num_layers = 1
self.dropout = 0.5
def _assert(self):
assert self.rnn_type in ['gru', 'lstm'], 'invalid rnn_type'
class SentEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding = Embedding(self.config.num_words, self.config.dim_word,
fix_word_embed=self.config.fix_word_embed)
dim_word = self.config.dim_word
self.rnn = framework.ops.rnn_factory(self.config.rnn_type,
input_size=dim_word, hidden_size=self.config.rnn_hidden_size,
num_layers=self.config.num_layers, dropout=self.config.dropout,
bidirectional=self.config.bidirectional, bias=True, batch_first=True)
self.dropout = nn.Dropout(self.config.dropout)
self.init_weights()
def init_weights(self):
directions = ['']
if self.config.bidirectional:
directions.append('_reverse')
for layer in range(self.config.num_layers):
for direction in directions:
for name in ['i', 'h']:
weight = getattr(self.rnn, 'weight_%sh_l%d%s'%(name, layer, direction))
nn.init.orthogonal_(weight.data)
bias = getattr(self.rnn, 'bias_%sh_l%d%s'%(name, layer, direction))
nn.init.constant_(bias, 0)
if name == 'i' and self.config.rnn_type == 'lstm':
bias.data.index_fill_(0, torch.arange(
self.config.rnn_hidden_size, self.config.rnn_hidden_size*2).long(), 1)
def forward_text_encoder(self, word_embeds, seq_lens, init_states):
# outs.size = (batch, seq_len, num_directions * hidden_size)
outs, states = framework.ops.calc_rnn_outs_with_sort(
self.rnn, word_embeds, seq_lens, init_states)
return outs
def forward(self, cap_ids, cap_lens, init_states=None, return_dense=False):
'''
Args:
cap_ids: LongTensor, (batch, seq_len)
cap_lens: FloatTensor, (batch, )
Returns:
if return_dense:
embeds: FloatTensor, (batch, seq_len, embed_size)
else:
embeds: FloatTensor, (batch, embed_size)
'''
word_embeds = self.embedding(cap_ids)
hiddens = self.forward_text_encoder(
self.dropout(word_embeds), cap_lens, init_states)
batch_size, max_seq_len, hidden_size = hiddens.size()
if self.config.bidirectional:
splited_hiddens = torch.split(hiddens, self.config.rnn_hidden_size, dim=2)
hiddens = (splited_hiddens[0] + splited_hiddens[1]) / 2
if return_dense:
return hiddens
else:
sent_masks = framework.ops.sequence_mask(cap_lens, max_seq_len, inverse=False).float()
sent_embeds = torch.sum(hiddens * sent_masks.unsqueeze(2), 1) / cap_lens.unsqueeze(1).float()
return sent_embeds
class SentAttnEncoder(SentEncoder):
def __init__(self, config):
super().__init__(config)
self.ft_attn = nn.Linear(self.config.rnn_hidden_size, 1)
self.softmax = nn.Softmax(dim=1)
def forward(self, cap_ids, cap_lens, init_states=None, return_dense=False):
hiddens = super().forward(cap_ids, cap_lens, init_states=init_states, return_dense=True)
attn_scores = self.ft_attn(hiddens).squeeze(2)
cap_masks = framework.ops.sequence_mask(cap_lens, max_len=attn_scores.size(1), inverse=False)
attn_scores = attn_scores.masked_fill(cap_masks == 0, -1e18)
attn_scores = self.softmax(attn_scores)
if return_dense:
return hiddens, attn_scores
else:
return torch.sum(hiddens * attn_scores.unsqueeze(2), 1)
|
sqiangcao99/hgr_v2t
|
t2vretrieval/encoders/mlvideo.py
|
<filename>t2vretrieval/encoders/mlvideo.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
class MultilevelEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.dim_fts = [2048]
self.dim_embed = 1024
self.dropout = 0
self.num_levels = 3
self.share_enc = False
class MultilevelEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
input_size = sum(self.config.dim_fts)
self.dropout = nn.Dropout(self.config.dropout)
num_levels = 1 if self.config.share_enc else self.config.num_levels
self.level_embeds = nn.ModuleList([
nn.Linear(input_size, self.config.dim_embed, bias=True) for k in range(num_levels)
])
self.ft_attn = nn.Linear(self.config.dim_embed, 1, bias=True)
def forward(self, inputs, input_lens):
'''
Args:
inputs: (batch, max_seq_len, dim_fts)
Return:
sent_embeds: (batch, dim_embed)
verb_embeds: (batch, max_seq_len, dim_embed)
noun_embeds: (batch, max_seq_len, dim_embed)
'''
embeds = []
for k in range(self.config.num_levels):
if self.config.share_enc:
k = 0
embeds.append(self.dropout(self.level_embeds[k](inputs)))
attn_scores = self.ft_attn(embeds[0]).squeeze(2)
input_pad_masks = framework.ops.sequence_mask(input_lens,
max_len=attn_scores.size(1), inverse=True)
attn_scores = attn_scores.masked_fill(input_pad_masks, -1e18)
attn_scores = torch.softmax(attn_scores, dim=1)
sent_embeds = torch.sum(embeds[0] * attn_scores.unsqueeze(2), 1)
return sent_embeds, embeds[1], embeds[2]
|
sqiangcao99/hgr_v2t
|
framework/modelbase.py
|
import os
import time
import json
import numpy as np
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import framework.logbase
class ModelBase(object):
def __init__(self, config, _logger=None, gpu_id=0):
'''initialize model
(support single GPU, otherwise need to be customized)
'''
self.device = torch.device("cuda:%d"%gpu_id if torch.cuda.is_available() else "cpu")
self.config = config
if _logger is None:
self.print_fn = print
else:
self.print_fn = _logger.info
self.submods = self.build_submods()
for submod in self.submods.values():
submod.to(self.device)
self.criterion = self.build_loss()
self.params, self.optimizer, self.lr_scheduler = self.build_optimizer()
num_params, num_weights = 0, 0
for key, submod in self.submods.items():
for varname, varvalue in submod.state_dict().items():
self.print_fn('%s: %s, shape=%s, num:%d' % (
key, varname, str(varvalue.size()), np.prod(varvalue.size())))
num_params += 1
num_weights += np.prod(varvalue.size())
self.print_fn('num params %d, num weights %d'%(num_params, num_weights))
self.print_fn('trainable: num params %d, num weights %d'%(
len(self.params), sum([np.prod(param.size()) for param in self.params])))
def build_submods(self):
raise NotImplementedError('implement build_submods function: return submods')
def build_loss(self):
raise NotImplementedError('implement build_loss function: return criterion')
def forward_loss(self, batch_data, step=None):
raise NotImplementedError('implement forward_loss function: return loss and additional outs')
def validate(self, val_reader, step=None):
self.eval_start()
# raise NotImplementedError('implement validate function: return metrics')
def test(self, tst_reader, tst_pred_file, tst_model_file=None):
if tst_model_file is not None:
self.load_checkpoint(tst_model_file)
self.eval_start()
# raise NotImplementedError('implement test function')
########################## boilerpipe functions ########################
def build_optimizer(self):
trn_params = []
trn_param_ids = set()
per_param_opts = []
for key, submod in self.submods.items():
if self.config.subcfgs[key].freeze:
for param in submod.parameters():
param.requires_grad = False
else:
params = []
for param in submod.parameters():
# sometimes we share params in different submods
if param.requires_grad and id(param) not in trn_param_ids:
params.append(param)
trn_param_ids.add(id(param))
per_param_opts.append({
'params': params,
'lr': self.config.base_lr * self.config.subcfgs[key].lr_mult,
'weight_decay': self.config.subcfgs[key].weight_decay,
})
trn_params.extend(params)
if len(trn_params) > 0:
optimizer = optim.Adam(per_param_opts, lr=self.config.base_lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
milestones=self.config.decay_boundarys, gamma=self.config.decay_rate)
else:
optimizer, lr_scheduler = None, None
print('no traiable parameters')
return trn_params, optimizer, lr_scheduler
def train_start(self):
for key, submod in self.submods.items():
submod.train()
torch.set_grad_enabled(True)
def eval_start(self):
for key, submod in self.submods.items():
submod.eval()
torch.set_grad_enabled(False)
def save_checkpoint(self, ckpt_file, submods=None):
if submods is None:
submods = self.submods
state_dicts = {}
for key, submod in submods.items():
state_dicts[key] = {}
for varname, varvalue in submod.state_dict().items():
state_dicts[key][varname] = varvalue.cpu()
torch.save(state_dicts, ckpt_file)
def load_checkpoint(self, ckpt_file, submods=None):
if submods is None:
submods = self.submods
state_dicts = torch.load(ckpt_file, map_location=lambda storage, loc: storage)
num_resumed_vars = 0
for key, state_dict in state_dicts.items():
if key in submods:
own_state_dict = submods[key].state_dict()
new_state_dict = {}
for varname, varvalue in state_dict.items():
if varname in own_state_dict:
new_state_dict[varname] = varvalue
num_resumed_vars += 1
own_state_dict.update(new_state_dict)
submods[key].load_state_dict(own_state_dict)
self.print_fn('number of resumed variables: %d'%num_resumed_vars)
def pretty_print_metrics(self, prefix, metrics):
metric_str = []
for measure, score in metrics.items():
metric_str.append('%s %.4f'%(measure, score))
metric_str = ' '.join(metric_str)
self.print_fn('%s: %s' % (prefix, metric_str))
def get_current_base_lr(self):
return self.optimizer.param_groups[0]['lr']
def train_one_batch(self, batch_data, step):
self.optimizer.zero_grad()
loss = self.forward_loss(batch_data, step=step)
loss.backward()
self.optimizer.step()
loss_value = loss.data.item()
if step is not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0:
self.print_fn('\ttrn step %d lr %.8f %s: %.4f' % (step, self.get_current_base_lr(), 'loss', loss_value))
return {'loss': loss_value}
def train_one_epoch(self, step, trn_reader, val_reader, model_dir, log_dir):
self.train_start()
avg_loss, n_batches = {}, {}
for batch_data in trn_reader:
loss = self.train_one_batch(batch_data, step)
for loss_key, loss_value in loss.items():
avg_loss.setdefault(loss_key, 0)
n_batches.setdefault(loss_key, 0)
avg_loss[loss_key] += loss_value
n_batches[loss_key] += 1
step += 1
if self.config.save_iter > 0 and step % self.config.save_iter == 0:
self.save_checkpoint(os.path.join(model_dir, 'step.%d.th'%step))
if (self.config.save_iter > 0 and step % self.config.save_iter == 0) \
or (self.config.val_iter > 0 and step % self.config.val_iter == 0):
metrics = self.validate(val_reader, step=step)
with open(os.path.join(log_dir, 'val.step.%d.json'%step), 'w') as f:
json.dump(metrics, f, indent=2)
self.pretty_print_metrics('\tval step %d'%step, metrics)
self.train_start()
for loss_key, loss_value in avg_loss.items():
avg_loss[loss_key] = loss_value / n_batches[loss_key]
return avg_loss, step
def epoch_postprocess(self, epoch):
if self.lr_scheduler is not None:
self.lr_scheduler.step()
def train(self, trn_reader, val_reader, model_dir, log_dir, resume_file=None):
assert self.optimizer is not None
if resume_file is not None:
self.load_checkpoint(resume_file)
# first validate
metrics = self.validate(val_reader)
self.pretty_print_metrics('init val', metrics)
# training
step = 0
for epoch in range(self.config.num_epoch):
avg_loss, step = self.train_one_epoch(
step, trn_reader, val_reader, model_dir, log_dir)
self.pretty_print_metrics('epoch (%d/%d) trn'%(epoch, self.config.num_epoch), avg_loss)
self.epoch_postprocess(epoch)
if self.config.save_per_epoch:
self.save_checkpoint(os.path.join(model_dir, 'epoch.%d.th'%epoch))
if self.config.val_per_epoch:
metrics = self.validate(val_reader, step=step)
with open(os.path.join(log_dir,
'val.epoch.%d.step.%d.json'%(epoch, step)), 'w') as f:
json.dump(metrics, f, indent=2)
self.pretty_print_metrics('epoch (%d/%d) val' % (epoch, self.config.num_epoch), metrics)
|
sqiangcao99/hgr_v2t
|
t2vretrieval/models/globalmatch.py
|
import os
import numpy as np
import collections
import json
import torch
import framework.ops
import framework.configbase
import framework.modelbase
import t2vretrieval.encoders.video
import t2vretrieval.encoders.sentence
import t2vretrieval.models.criterion
import t2vretrieval.models.evaluation
from t2vretrieval.models.criterion import cosine_sim
VISENC = 'video_encoder'
TXTENC = 'text_encoder'
class GlobalMatchModelConfig(framework.configbase.ModelConfig):
def __init__(self):
super().__init__()
self.max_frames_in_video = None
self.max_words_in_sent = 30
self.margin = 0.2
self.max_violation = False
self.hard_topk = 1
self.loss_direction = 'bi'
self.subcfgs[VISENC] = t2vretrieval.encoders.video.MPEncoderConfig()
self.subcfgs[TXTENC] = t2vretrieval.encoders.sentence.SentEncoderConfig()
class GlobalMatchModel(framework.modelbase.ModelBase):
def build_submods(self):
submods = {
VISENC: t2vretrieval.encoders.video.MPEncoder(self.config.subcfgs[VISENC]),
TXTENC: t2vretrieval.encoders.sentence.SentEncoder(self.config.subcfgs[TXTENC]),
}
return submods
def build_loss(self):
criterion = t2vretrieval.models.criterion.ContrastiveLoss(
margin=self.config.margin,
max_violation=self.config.max_violation,
topk=self.config.hard_topk,
direction=self.config.loss_direction)
return criterion
def forward_video_embed(self, batch_data):
vid_fts = torch.FloatTensor(batch_data['mp_fts']).to(self.device)
vid_embeds = self.submods[VISENC](vid_fts)
return {'vid_embeds': vid_embeds}
def forward_text_embed(self, batch_data):
cap_ids = torch.LongTensor(batch_data['caption_ids']).to(self.device)
cap_lens = torch.LongTensor(batch_data['caption_lens']).to(self.device)
cap_embeds = self.submods[TXTENC](cap_ids, cap_lens)
return {'cap_embeds': cap_embeds}
def generate_scores(self, **kwargs):
# compute image-sentence similarity
vid_embeds = kwargs['vid_embeds']
cap_embeds = kwargs['cap_embeds']
scores = cosine_sim(vid_embeds, cap_embeds) # s[i, j] i: im_idx, j: s_idx
return scores
def forward_loss(self, batch_data, step=None):
vid_enc_outs = self.forward_video_embed(batch_data)
cap_enc_outs = self.forward_text_embed(batch_data)
cap_enc_outs.update(vid_enc_outs)
scores = self.generate_scores(**cap_enc_outs)
loss = self.criterion(scores)
if step is not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0:
neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10)
self.print_fn('\tstep %d: pos mean scores %.2f, hard neg mean scores i2t %.2f, t2i %.2f'%(
step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]),
torch.mean(torch.max(neg_scores, 0)[0])))
return loss
def evaluate_scores(self, tst_reader):
vid_names, all_scores = [], []
cap_names = tst_reader.dataset.captions
for vid_data in tst_reader:
vid_names.extend(vid_data['names'])
vid_enc_outs = self.forward_video_embed(vid_data)
all_scores.append([])
for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size):
cap_enc_outs = self.forward_text_embed(cap_data)
cap_enc_outs.update(vid_enc_outs)
scores = self.generate_scores(**cap_enc_outs)
all_scores[-1].append(scores.data.cpu().numpy())
all_scores[-1] = np.concatenate(all_scores[-1], axis=1)
all_scores = np.concatenate(all_scores, axis=0) # (n_img, n_cap)
return vid_names, cap_names, all_scores
def calculate_metrics(self, scores, i2t_gts, t2i_gts):
# caption retrieval
cr1, cr5, cr10, cmedr, cmeanr = t2vretrieval.models.evaluation.eval_q2m(scores, i2t_gts)
# image retrieval
ir1, ir5, ir10, imedr, imeanr = t2vretrieval.models.evaluation.eval_q2m(scores.T, t2i_gts)
# sum of recalls to be used for early stopping
rsum = cr1 + cr5 + cr10 + ir1 + ir5 + ir10
metrics = collections.OrderedDict()
metrics['ir1'] = ir1
metrics['ir5'] = ir5
metrics['ir10'] = ir10
metrics['imedr'] = imedr
metrics['imeanr'] = imeanr
metrics['cr1'] = cr1
metrics['cr5'] = cr5
metrics['cr10'] = cr10
metrics['cmedr'] = cmedr
metrics['cmeanr'] = cmeanr
metrics['rsum'] = rsum
return metrics
def evaluate(self, tst_reader, return_outs=False):
vid_names, cap_names, scores = self.evaluate_scores(tst_reader)
i2t_gts = []
for vid_name in vid_names:
i2t_gts.append([])
for i, cap_name in enumerate(cap_names):
if cap_name in tst_reader.dataset.ref_captions[vid_name]:
i2t_gts[-1].append(i)
t2i_gts = {}
for i, t_gts in enumerate(i2t_gts):
for t_gt in t_gts:
t2i_gts.setdefault(t_gt, [])
t2i_gts[t_gt].append(i)
metrics = self.calculate_metrics(scores, i2t_gts, t2i_gts)
if return_outs:
outs = {
'vid_names': vid_names,
'cap_names': cap_names,
'scores': scores,
}
return metrics, outs
else:
return metrics
def validate(self, val_reader, step=None):
self.eval_start()
metrics = self.evaluate(val_reader)
return metrics
def test(self, tst_reader, tst_pred_file, tst_model_file=None):
if tst_model_file is not None:
self.load_checkpoint(tst_model_file)
self.eval_start()
if tst_reader.dataset.ref_captions is None:
vid_names, cap_names, scores = self.evaluate_scores(tst_reader)
outs = {
'vid_names': vid_names,
'cap_names': cap_names,
'scores': scores,
}
metrics = None
else:
metrics, outs = self.evaluate(tst_reader, return_outs=True)
with open(tst_pred_file, 'wb') as f:
np.save(f, outs)
return metrics
|
sqiangcao99/hgr_v2t
|
t2vretrieval/driver/configs/prepare_globalmatch_configs.py
|
<gh_stars>100-1000
import os
import sys
import argparse
import numpy as np
import json
import t2vretrieval.models.globalmatch
from t2vretrieval.models.globalmatch import VISENC, TXTENC
def prepare_mp_globalmatch_model(root_dir):
anno_dir = os.path.join(root_dir, 'annotation', 'RET')
mp_ft_dir = os.path.join(root_dir, 'ordered_feature', 'MP')
split_dir = os.path.join(root_dir, 'public_split')
res_dir = os.path.join(root_dir, 'results', 'RET.released')
mp_ft_names = ['resnet152.pth']
dim_mp_fts = [np.load(os.path.join(mp_ft_dir, mp_ft_name, 'val_ft.npy')).shape[-1] \
for mp_ft_name in mp_ft_names]
num_words = len(np.load(os.path.join(anno_dir, 'int2word.npy')))
model_cfg = t2vretrieval.models.globalmatch.GlobalMatchModelConfig()
model_cfg.max_words_in_sent = 30
model_cfg.margin = 0.2
model_cfg.max_violation = True #False
model_cfg.hard_topk = 1
model_cfg.loss_direction = 'bi'
model_cfg.trn_batch_size = 128
model_cfg.tst_batch_size = 1000
model_cfg.monitor_iter = 1000
model_cfg.summary_iter = 1000
visenc_cfg = model_cfg.subcfgs[VISENC]
visenc_cfg.dim_fts = dim_mp_fts
visenc_cfg.dim_embed = 1024
visenc_cfg.dropout = 0.2
txtenc_cfg = model_cfg.subcfgs[TXTENC]
txtenc_cfg.num_words = num_words
txtenc_cfg.dim_word = 300
txtenc_cfg.fix_word_embed = False
txtenc_cfg.rnn_hidden_size = 1024
txtenc_cfg.num_layers = 1
txtenc_cfg.rnn_type = 'gru' # lstm, gru
txtenc_cfg.bidirectional = True
txtenc_cfg.dropout = 0.2
txtenc_name = '%s%s%s'%('bi' if txtenc_cfg.bidirectional else '', txtenc_cfg.rnn_type,
'.fix' if txtenc_cfg.fix_word_embed else '')
output_dir = os.path.join(res_dir, 'globalmatch',
'mp.vis.%s.txt.%s.%d.loss.%s%s.glove.init'%
('-'.join(mp_ft_names),
txtenc_name,
visenc_cfg.dim_embed,
model_cfg.loss_direction,
'.max.%d'%model_cfg.hard_topk if model_cfg.max_violation else '')
)
print(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_cfg.save(os.path.join(output_dir, 'model.json'))
path_cfg = {
'output_dir': output_dir,
'mp_ft_files': {},
'name_file': {},
'word2int_file': os.path.join(anno_dir, 'word2int.json'),
'int2word_file': os.path.join(anno_dir, 'int2word.npy'),
'ref_caption_file': {},
}
for setname in ['trn', 'val', 'tst']:
path_cfg['mp_ft_files'][setname] = [
os.path.join(mp_ft_dir, mp_ft_name, '%s_ft.npy'%setname) for mp_ft_name in mp_ft_names
]
path_cfg['name_file'][setname] = os.path.join(split_dir, '%s_names.npy'%setname)
path_cfg['ref_caption_file'][setname] = os.path.join(anno_dir, 'ref_captions.json')
with open(os.path.join(output_dir, 'path.json'), 'w') as f:
json.dump(path_cfg, f, indent=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('root_dir')
opts = parser.parse_args()
prepare_mp_globalmatch_model(opts.root_dir)
|
sqiangcao99/hgr_v2t
|
framework/modules/global_attention.py
|
<gh_stars>100-1000
""" Global attention modules (Luong / Bahdanau) """
import torch
import torch.nn as nn
import torch.nn.functional as F
class GlobalAttention(nn.Module):
'''
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`,
to an output of size `dim`.
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
However they differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)`
Args:
attn_size (int): dimensionality of query and key
attn_type (str): type of attention to use, options [dot,general,mlp]
'''
def __init__(self, query_size, attn_size, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.query_size = query_size
self.attn_size = attn_size
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(query_size, attn_size, bias=False)
elif self.attn_type == 'mlp':
self.linear_query = nn.Linear(query_size, attn_size, bias=True)
self.attn_w = nn.Linear(attn_size, 1, bias=False)
elif self.attn_type == 'dot':
assert self.query_size == self.attn_size
def forward(self, query, memory_keys, memory_values, memory_masks):
"""
Args:
query (`FloatTensor`): (batch, query_size)
memory_keys (`FloatTensor`): (batch, seq_len, attn_size)
memory_values (`FloatTensor`): (batch, seq_len, attn_size)
memory_masks (`LongTensor`): (batch, seq_len)
Returns:
attn_score: attention distributions (batch, seq_len)
attn_memory: computed context vector, (batch, attn_size)
"""
batch_size, seq_len, attn_size = memory_keys.size()
if self.attn_type == 'mlp':
query_hidden = self.linear_query(query.unsqueeze(1)).expand(
batch_size, seq_len, attn_size)
# attn_hidden: # (batch, seq_len, attn_size)
attn_hidden = torch.tanh(query_hidden + memory_keys)
# attn_score: (batch, seq_len, 1)
attn_score = self.attn_w(attn_hidden)
elif self.attn_type == 'dot':
# attn_score: (batch, seq_len, 1)
attn_score = torch.bmm(memory_keys, query.unsqueeze(2))
elif self.attn_type == 'general':
query_hidden = self.linear_in(query)
attn_score = torch.bmm(memory_keys, query_hidden.unsqueeze(2))
# attn_score: (batch, seq_len)
attn_score = attn_score.squeeze(2)
if memory_masks is not None:
attn_score = attn_score * memory_masks # memory mask [0, 1]
attn_score = attn_score.masked_fill(memory_masks == 0, -1e18)
attn_score = F.softmax(attn_score, dim=1)
# make sure no item is attended when all memory_masks are all zeros
if memory_masks is not None:
attn_score = attn_score.masked_fill(memory_masks == 0, 0)
attn_memory = torch.sum(attn_score.unsqueeze(2) * memory_values, 1)
return attn_score, attn_memory
#TODO
class AdaptiveAttention(nn.Module):
def __init__(self, query_size, attn_size):
super(AdaptiveAttention, self).__init__()
self.query_size = query_size
self.attn_size = attn_size
self.query_attn_conv = nn.Conv1d(query_size,
attn_size, kernel_size=1, stride=1, padding=0, bias=True)
self.sentinel_attn_conv = nn.Conv1d(query_size,
attn_size, kernel_size=1, stride=1, padding=0, bias=False)
self.attn_w = nn.Conv1d(attn_size, 1, kernel_size=1,
stride=1, padding=0, bias=False)
def forward(self, query, memory_keys, memory_values, memory_masks, sentinel):
batch_size, _, enc_seq_len = memory_keys.size()
query_hidden = self.query_attn_conv(query.unsqueeze(2))
sentinel_hidden = self.sentinel_attn_conv(sentinel.unsqueeze(2))
memory_keys_sentinel = torch.cat([memory_keys, sentinel_hidden], dim=2)
attn_score = self.attn_w(F.tanh(query_hidden + memory_keys_sentinel)).squeeze(1)
attn_score = F.softmax(attn_score, dim=1)
masks = torch.cat([memory_masks, torch.ones(batch_size, 1).to(memory_masks.device)], dim=1)
attn_score = attn_score * masks
attn_score = attn_score / (torch.sum(attn_score, 1, keepdim=True) + 1e-10)
attn_memory = torch.sum(attn_score[:, :-1].unsqueeze(1) * memory_values, 2)
attn_memory = attn_memory + attn_score[:, -1:] * sentinel
return attn_score, attn_memory
|
soumilbaldota/local_full-duplex_chatspace_using_sockets
|
CLIENT.py
|
<filename>CLIENT.py
from socket import *
import threading
import time
import sys
time.sleep(10)
def recv():
while True:
s=socket(AF_INET,SOCK_STREAM)
s.connect((sys.argv[1] ,5747))
s1=''
while True:
msg=(s.recv(8).decode('utf-8'))
s1+=msg
if len(msg)<=0:
s.close()
print(s1)
break
def send():
s=socket(AF_INET,SOCK_STREAM)
s.bind((gethostname(),5743))
s.listen(5)
while True:
s1=input('>>>')
conn,addr=s.accept()
conn.send(bytes(s1.encode('utf-8')))
conn.close()
t1=threading.Thread(target=recv)
t1.start()
t2=threading.Thread(target=send)
t2.start()
|
Aghassi/rules_spa
|
spa/private/build_routes.bzl
|
<reponame>Aghassi/rules_spa
"""A macro for creating a webpack federation route module"""
load("@aspect_rules_swc//swc:swc.bzl", "swc")
# Defines this as an importable module area for shared macros and configs
def build_route(name, entry, srcs, data, webpack, federation_shared_config):
"""
Macro that allows easy composition of routes from a multi route spa
Args:
name: name of a route (route)
entry: the entry file to the route
srcs: source files to be transpiled and bundled
data: any dependencies the route needs to build
webpack: the webpack module to invoke. The users must provide their own load statement for webpack before this macro is called
federation_shared_config: a nodejs module file that exposes a map of dependencies to their shared module spec https://webpack.js.org/plugins/module-federation-plugin/#sharing-hints. An example of this is located within this repository under the private/webpack folder.
"""
build_name = name + "_route"
# list of all transpilation targets from SWC to be passed to webpack
deps = [
":transpile_" + files.replace("//", "").replace("/", "_").split(".")[0]
for files in srcs
] + data
# buildifier: disable=no-effect
[
swc(
name = "transpile_" + s.replace("//", "").replace("/", "_").split(".")[0],
args = [
"-C jsc.parser.jsx=true",
"-C jsc.parser.syntax=typescript",
"-C jsc.transform.react.runtime=automatic",
"-C jsc.transform.react.development=false",
"-C module.type=commonjs",
],
srcs = [s],
)
for s in srcs
]
route_config = Label("//spa/private/webpack:webpack.route.config.js")
webpack(
name = name,
args = [
"--env name=" + build_name,
"--env entry=./$(execpath :transpile_" + name + ")",
"--env SHARED_CONFIG=$(location %s)" % federation_shared_config,
"--env BAZEL_SRC_PATH=$(execpath :transpile_" + name + ")",
"--output-path=$(@D)",
"--config=$(rootpath %s)" % route_config,
],
data = [
route_config,
federation_shared_config,
Label("//spa/private/webpack:webpack.common.config.js"),
] + deps,
output_dir = True,
visibility = ["//src/client/routes:__pkg__"],
)
|
Aghassi/rules_spa
|
spa/private/routes/generate_route_manifest.bzl
|
<filename>spa/private/routes/generate_route_manifest.bzl
"""A macro that wraps a script to generate an object that maps routes to their entry data"""
load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_binary", "npm_package_bin")
def generate_route_manifest(name, routes):
"""Generates a json file that is a representation of all routes.
Args:
name: name of the invocation
routes: the sources from the //src/routes rule to be used in the script
Returns:
the generated manifest
"""
nodejs_binary(
name = "bin",
data = [
Label("//spa/private/routes:generate-route-manifest.js"),
],
entry_point = Label("//spa/private/routes:generate-route-manifest.js"),
)
npm_package_bin(
name = "route_manifest",
tool = ":bin",
data = [routes],
args = [
"$(execpath %s)" % routes,
],
stdout = "route.manifest.json",
visibility = ["//visibility:public"],
)
|
Aghassi/rules_spa
|
internal_deps.bzl
|
<filename>internal_deps.bzl
"""Our "development" dependencies
Users should *not* need to install these. If users see a load()
statement from these, that's a bug in our distribution.
"""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
def rules_spa_internal_deps():
"Fetch deps needed for local development"
maybe(
http_archive,
name = "build_bazel_integration_testing",
urls = [
"https://github.com/bazelbuild/bazel-integration-testing/archive/165440b2dbda885f8d1ccb8d0f417e6cf8c54f17.zip",
],
strip_prefix = "bazel-integration-testing-165440b2dbda885f8d1ccb8d0f417e6cf8c54f17",
sha256 = "2401b1369ef44cc42f91dc94443ef491208dbd06da1e1e10b702d8c189f098e3",
)
maybe(
http_archive,
name = "io_bazel_rules_go",
sha256 = "2b1641428dff9018f9e85c0384f03ec6c10660d935b750e3fa1492a281a53b0f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.29.0/rules_go-v0.29.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.29.0/rules_go-v0.29.0.zip",
],
)
maybe(
http_archive,
name = "bazel_gazelle",
sha256 = "de69a09dc70417580aabf20a28619bb3ef60d038470c7cf8442fafcf627c21cb",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz",
],
)
# Override bazel_skylib distribution to fetch sources instead
# so that the gazelle extension is included
# see https://github.com/bazelbuild/bazel-skylib/issues/250
maybe(
http_archive,
name = "bazel_skylib",
sha256 = "07b4117379dde7ab382345c3b0f5edfc6b7cff6c93756eac63da121e0bbcc5de",
strip_prefix = "bazel-skylib-1.1.1",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/archive/1.1.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/archive/1.1.1.tar.gz",
],
)
maybe(
http_archive,
name = "io_bazel_stardoc",
sha256 = "c9794dcc8026a30ff67cf7cf91ebe245ca294b20b071845d12c192afe243ad72",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/stardoc/releases/download/0.5.0/stardoc-0.5.0.tar.gz",
"https://github.com/bazelbuild/stardoc/releases/download/0.5.0/stardoc-0.5.0.tar.gz",
],
)
maybe(
http_archive,
name = "aspect_bazel_lib",
sha256 = "8c8cf0554376746e2451de85c4a7670cc8d7400c1f091574c1c1ed2a65021a4c",
url = "https://github.com/aspect-build/bazel-lib/releases/download/v0.2.6/bazel_lib-0.2.6.tar.gz",
)
# The minimal version of rules_nodejs we need
maybe(
http_archive,
name = "build_bazel_rules_nodejs",
sha256 = "ddb78717b802f8dd5d4c01c340ecdc007c8ced5c1df7db421d0df3d642ea0580",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/4.6.0/rules_nodejs-4.6.0.tar.gz"],
)
# The minimal version of rules_swc that we need
maybe(
http_archive,
name = "aspect_rules_swc",
sha256 = "67d6020374627f60c6c1e5d5e1690fcdc4fa39952de8a727d3aabe265ca843be",
strip_prefix = "rules_swc-0.1.0",
url = "https://github.com/aspect-build/rules_swc/archive/v0.1.0.tar.gz",
)
maybe(
http_archive,
name = "rules_nodejs",
sha256 = "005c59bf299d15d1d9551f12f880b1a8967fa883654c897907a667cdbb77c7a6",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/4.6.0/rules_nodejs-core-4.6.0.tar.gz"],
)
# rules_js is needed for rules_swc
maybe(
http_archive,
name = "aspect_rules_js",
sha256 = "dd78b4911b7c2e6c6e919b85cd31572cc15e5baa62b9e7354d8a1065c67136e3",
strip_prefix = "rules_js-0.3.1",
url = "https://github.com/aspect-build/rules_js/archive/v0.3.1.tar.gz",
)
# Rules Docker requirements
maybe(
http_archive,
name = "io_bazel_rules_docker",
sha256 = "92779d3445e7bdc79b961030b996cb0c91820ade7ffa7edca69273f404b085d5",
strip_prefix = "rules_docker-0.20.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.20.0/rules_docker-v0.20.0.tar.gz"],
)
# Required to offer local dev with ibazel
maybe(
git_repository,
name = "com_github_ash2k_bazel_tools",
commit = "4daedde3ec61a03db841c8a9ca68288972e25a82",
remote = "https://github.com/ash2k/bazel-tools.git",
)
|
Aghassi/rules_spa
|
spa/defs.bzl
|
<reponame>Aghassi/rules_spa
"Public API re-exports"
load("//spa/private/routes:generate_route_manifest.bzl", _generate_route_manifest = "generate_route_manifest")
load("//spa/private:build_routes.bzl", _build_route = "build_route")
load("//spa/private:build_server.bzl", _build_server = "build_server")
load("//spa/private:host.bzl", _build_host = "build_host")
generate_route_manifest = _generate_route_manifest
build_route = _build_route
build_server = _build_server
build_host = _build_host
|
Aghassi/rules_spa
|
spa/private/build_server.bzl
|
<reponame>Aghassi/rules_spa
"""A macro for building the server bundle that will end up in a nodejs docker image"""
load("@build_bazel_rules_nodejs//:index.bzl", "js_library")
load("@aspect_rules_swc//swc:swc.bzl", "swc")
# Defines this as an importable module area for shared macros and configs
def build_server(name, srcs, data, **kwargs):
"""
Macro that construct the http server for the project
Args:
name: name of the package
srcs: source files from the package
data: any dependencies needed to build
**kwargs: any other inputs to js_library
"""
# list of all transpilation targets from SWC to be passed to webpack
deps = [
":transpile_" + files.replace("//", "").replace("/", "_").split(".")[0]
for files in srcs
]
# buildifier: disable=no-effect
[
swc(
name = "transpile_" + s.replace("//", "").replace("/", "_").split(".")[0],
args = [
"-C jsc.parser.jsx=true",
"-C jsc.parser.syntax=typescript",
"-C jsc.target=es2015",
"-C module.type=commonjs",
],
srcs = [s],
)
for s in srcs
]
js_library(
name = name,
srcs = deps + data,
**kwargs
)
|
Aghassi/rules_spa
|
spa/private/host.bzl
|
<reponame>Aghassi/rules_spa
"""A macro for creating a webpack federation host module"""
load("@aspect_rules_swc//swc:swc.bzl", "swc")
load("@build_bazel_rules_nodejs//:index.bzl", "pkg_web")
# Defines this as an importable module area for shared macros and configs
def build_host(entry, data, srcs, webpack, federation_shared_config):
"""
Macro that allows easy building of the main host of a SPA
In addition to the usage of this macro, you will be required to pass in all required node_modules
for compilation as well as anything the shared config relies on (most likely your package.json) into data
Args:
entry: the entry file to the route
data: any dependencies the route needs to build including npm modules
srcs: srcs files to be transpiled and sent to webpack
webpack: the webpack module to invoke. The users must provide their own load statement for webpack before this macro is called
federation_shared_config: a nodejs module file that exposes a map of dependencies to their shared module spec https://webpack.js.org/plugins/module-federation-plugin/#sharing-hints. An example of this is located within this repository under the private/webpack folder.
"""
# list of all transpilation targets from SWC to be passed to webpack
deps = [
":transpile_" + files.replace("//", "").replace("/", "_").split(".")[0]
for files in srcs
] + data
# buildifier: disable=no-effect
[
swc(
name = "transpile_" + s.replace("/", "_").split(".")[0],
args = [
"-C jsc.parser.jsx=true",
"-C jsc.parser.syntax=typescript",
],
srcs = [s],
)
for s in srcs
]
host_config = Label("//spa/private/webpack:webpack.host.config.js")
webpack(
name = "host_build",
args = [
"--env name=host",
"--env entry=./$(location :transpile_host)",
"--env SHARED_CONFIG=$(location %s)" % federation_shared_config,
"--env BAZEL_SRC_PATH=$(execpath :transpile_host)",
"--output-path=$(@D)",
"--config=$(rootpath %s)" % host_config,
],
data = [
host_config,
federation_shared_config,
Label("//spa/private/webpack:webpack.common.config.js"),
Label("//spa/private/webpack:webpack.module-federation.shared.js"),
] + deps,
output_dir = True,
)
pkg_web(
name = "host",
srcs = [
":host_build",
],
additional_root_paths = ["%s/host_build" % native.package_name()],
visibility = ["//visibility:public"],
)
|
jpardobl/pandavro
|
example/example.py
|
<reponame>jpardobl/pandavro<filename>example/example.py
import pandavro as pdx
def main():
weather = pdx.from_avro('weather.avro')
print(weather)
pdx.to_avro('weather_out.avro', weather)
if __name__ == '__main__':
main()
|
duhines/CC-M7
|
action.py
|
"""
Author: <NAME>
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
Class that allows characters to act.
Includes the following classes:
Actions -
used to store and initialize all the actions
Travel -
implements the possibilities and consequences of a traveling in the
world
Injure -
implements the consequences of one character injuring another
Heal -
implements the consequences of one character injuring another
Sleep -
implements the consequences of one character sleeping
Notes:
Any class implementing an action is expected to have a do() method that
does any book-keeping associated with the action being performed and
returns a string representing the consequences of the action.
Any action that involves one character impacting another will need to
be initialized for all the possible recipients of the action in the
get_actions function in Actions.
The path_finding function for the Traveling class currently assumes that
all the places are connected--this would need to be update with some kind
of path finding algorithm if every location was not connected to every
other location.
Sometimes, the justification function will be called when the clauses in
question are all false. No idea why this happens, but it shoudn't.
"""
import random
from random import choice
class Actions:
"""
Class used to store all the actions available to a character.
Implements the following method:
get_actions
"""
def __init__(self, character, locations, characters, nearby_characters, social_network):
self.character = character
self.locations = locations
self.characters = characters
self.nearby_characters = nearby_characters
self.social_network = social_network
self.actions = self.get_actions()
def get_actions(self):
"""
Purpose: return the actions available for a character
"""
# get list of characters that the acting character can interact with
other_characters = self.nearby_characters.copy()
if self.character in other_characters:
other_characters.remove(self.character)
actions = []
actions.append(Travel(self.character, self.locations))
actions.append(Sleep(self.character))
# since we can heal/injure other characters, we need an action for each
# possible character that can be a recipient of one of these actions
for character2 in other_characters:
actions.append(Injure(self.character, character2, self.social_network))
actions.append(Heal(self.character, character2, self.social_network))
return actions
class Travel:
"""
Class that implements a traveling action.
"""
def __init__(self, character, locations):
self.character = character
self.locations = locations
self.pre_condition = self.character.health > 30
def do(self):
"""
Purpose: either travel randomly or travel due to characters goal
"""
character = self.character
locations = self.locations
if self.character.goal != None:
if self.character.goal.place != None:
character.location = self.path_find(character.goal.place)
return character.name + ' traveled to ' + character.location + \
'in order to ' + character.goal.statement
else:
options = self.locations['names'].copy()
options.remove(self.character.location)
if len(options) == 0:
return '{} wanders around {}.'.format(self.character.name,
self.character.location)
character.location = choice(options)
return character.name + ' travels to ' + character.location
def path_find(self, goal):
"""
Purpose: if all places were not connected, then we would need to figure
out how to get to where we want to go.
TODO: extend this to work when there is not an edge between every
location!
"""
return goal
class Injure:
"""
Class that implements the injure action.
"""
def __init__(self, character1, character2, social_network):
self.character1 = character1
self.character2 = character2
self.social_network = social_network
self.clause1 = character1.personality.lawful_chaotic < -.5
self.clause2 = character1.personality.good_evil < -.5
self.clause3 = social_network.get_connection(character1, character2).brotherly < -1
self.clause4 = social_network.get_connection(character1, character2).lovers < -1
self.clause5 = character2.health > 1
self.pre_condition = (self.clause1 or self.clause2 or self.clause3 or self.clause4) and self.clause5
def do(self):
"""
Purpose: one character injures another.
"""
# TODO change this so that more animosity results in greater injury
damage = random.randint(10, 50)
self.character2.health -= damage
if damage < 30:
self.social_network.get_connection(self.character2, self.character1).adjust_all(-1)
return '{} injures {} because {}.'.format(self.character1.name,
self.character2.name, self.get_justification())
else:
self.social_network.get_connection(self.character2, self.character1).adjust_all(-2)
return '{} significantly injures {} because {}.'.format(
self.character1.name, self.character2.name, self.get_justification())
def get_justification(self):
"""
Purpose: return a string representing the clause that allowed this
action to happen.
"""
if self.clause1:
return '{} is chotic'.format(self.character1.name)
elif self.clause2:
return '{} is evil'.format(self.character1.name)
elif self.clause3:
return '{} feels brotherly hate towards {}'.format(
self.character1.name, self.character2.name)
elif self.clause4:
return '{} feels lover\'s hate towards {}'.format(
self.character1.name, self.character2.name)
else:
return '{} made a mistake'.format(self.character1.name)
class Heal:
"""
Class for the heal action.
"""
def __init__(self, character1, character2, social_network):
self.character1 = character1
self.character2 = character2
self.social_network = social_network
self.clause1 = character1.personality.lawful_chaotic > .5
self.clause2 = character1.personality.good_evil > .5
self.clause3 = social_network.get_connection(character1, character2).brotherly >= 1
self.clause4 = social_network.get_connection(character1, character2).lovers >= 1
self.clause5 = character2.health < 100 and self.character2.health > 0
self.pre_condition = (self.clause1 or self.clause2 or self.clause3 or self.clause4) and self.clause5
def do(self):
"""
Purpose: one character heals another character
"""
heal = random.randint(10, 50)
self.character2.health += heal
if heal < 30:
self.social_network.get_connection(self.character2, self.character1).adjust_all(1)
return '{} heals {} because {}.'.format(self.character1.name,
self.character2.name, self.get_justification())
else:
self.social_network.get_connection(self.character2, self.character1).adjust_all(2)
return '{} significantly heals {} because {}.'.format(
self.character1.name, self.character2.name, self.get_justification())
def get_justification(self):
"""
Purpose: return a string representing the clause that allowed this
action to happen.
"""
if self.clause1:
return '{} is lawful'.format(self.character1.name)
elif self.clause2:
return '{} is good'.format(self.character1.name)
elif self.clause3:
return '{} feels brotherly love towards {}'.format(
self.character1.name, self.character2.name)
elif self.clause4:
return '{} feels lover\'s love towards {}'.format(
self.character1.name, self.character2.name)
else:
return '{} made a mistake'.format(self.character1.name)
class Sleep:
"""
Class for the sleep action.
"""
def __init__(self, character):
self.character = character
self.pre_condition = self.character.health > 20
def do(self):
"""
Purpose: sleep to regain some health.
"""
self.character.health += 30
return '{} sleeps to regain some strength.'.format(self.character.name)
|
duhines/CC-M7
|
knowledge/define_actions.py
|
import action
actions = []
"""
Basic actions:
move from one place to another
fall in love with another character
attack another character
heal another character
"""
|
duhines/CC-M7
|
quests.py
|
<filename>quests.py
# THIS WHOLE FILE IS A BIG TODO
#"""
# Author: <NAME>
# Course: Computational Creativity Fall 2018
# Project: M7: Playing with Words
# Date: last modified 12/13
# Description:
# Notes:
# """
# """
# Want characters to have objectives--when a characters wishes to accomplish
# something in the long term, then they will gain a quest or the narrator
# will give characters quests by introducing random events.
# """
# class Quest:
# def __init__(self, statement):
# self.statement = statement
|
duhines/CC-M7
|
character.py
|
"""
Author: <NAME>
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
Notes:
* personality representation:
* -1 to 1 rating of the Big five personality traints
* and/or -1 to 1 rating of evil->good, chaotic->lawful scale.
* perhaps emotions are a modifier for actions:
* ____ washed the dishes, mod: ANGRILY/SADLY/...
"""
import random
import action as action_stuff
class Personality:
"""
Class for representing a character's personality. Personality is quantified
similarly to DnD (or the Bowdoin outing club LT exercies) where there are
two personality parameters, one measures a scale of good to evil, and the
other measures a scale of lawful to chaotic. These values are represented
on a scale from -1 to 1:
full good would be a value of 1, full lawful would be a value of 1
full evil would be a value of -1, full chaotic would be a value of -1
Implements the following methods:
rand_personality
for_narrative
"""
def __init__(self):
self.good_evil = self.rand_personality()
self.lawful_chaotic = self.rand_personality()
# TODO add the ability fo rpersonalities to change over time
# self.theta = random.random() + .01
# self.firmness = random.random()
def rand_personality(self):
"""
Purpose: return random value [-1, 1]
"""
absolute_value = random.random()
value = None
if random.random() < .5:
value = absolute_value * -1
else:
value = absolute_value
return value
def for_narrative(self):
"""
Purpose: return string representation of the personality.
"""
good = ''
lawful = ''
if self.good_evil < -.75:
good = 'very evil'
elif self.good_evil < -.5:
good = 'evil'
elif self.good_evil < -.25:
good = 'sort of evil'
elif self.good_evil < .25 and self.good_evil > -.25:
good = 'neither good nor evil'
elif self.good_evil < .5:
good = 'sort of good'
elif self.good_evil < .75:
good = 'good'
elif self.good_evil < 1:
good = 'very good'
lawful = ''
if self.lawful_chaotic < -.75:
lawful = 'very chaotic'
elif self.lawful_chaotic < -.5:
lawful = 'chaotic'
elif self.lawful_chaotic < -.25:
lawful = 'sort of chaotic'
elif self.lawful_chaotic < .25 and self.lawful_chaotic > -.25:
lawful = 'neither lawful nor chaotic'
elif self.lawful_chaotic < .5:
lawful = 'sort of lawful'
elif self.lawful_chaotic < .75:
lawful = 'lawful'
elif self.lawful_chaotic < 1:
lawful = 'very lawful'
return '{} and {}'.format(good, lawful)
# This would be a big TODO if there was more time
# def adjust_personality(self, demo_good_evil, demo_lawful_chaotic):
# """
# Purpose: If a character ends up making an action that contradicts
# their personality, then adjust their personality values with
# respect to this characters personality maleability (theta). Note,
# characters make actions outside their
# """
# self.good_evil = self.demo_good_evil * (1 - self.theta) + demo_good_evil * self.theta
# self.lawful_chaotic = self.lawful_chaotic * (1 - self.theta) + demo_lawful_chaotic * self.theta
class Connection:
"""
Purpose: detail the connection between one character and another.
Currently using the 4 types model from MEXICA:
1. brotherly-love / hate
2. lover's-love / hate
3. gratefulness / ingratitude
4. admiration, respect / disdain, disapproval
Includes the following methods:
init_value
adjust_all
adjust_brotherly
adjust_lovers
adjust_gratefulness
adjust_admiration
normalize_value
get_string
get_values
"""
def __init__(self):
self.brotherly = self.init_value()
self.lovers = self.init_value()
self.gratefulness = self.init_value()
self.admiration = self.init_value()
def init_value(self):
"""
Purpose: return an integer between -3 and 3 for now
"""
absolute_value = random.randint(0, 3)
value = None
if random.random() < .5:
value = absolute_value * -1
else:
value = absolute_value
return value
def adjust_all(self, amount):
"""
Purpose: adjust all the connection values.
"""
self.adjust_brotherly(amount)
self.adjust_lovers(amount)
self.adjust_gratefulness(amount)
self.adjust_admiration(amount)
def adjust_brotherly(self, amount):
"""
Purpose: adjust a single connection value.
"""
self.brotherly = self.normalize_value(self.brotherly + amount)
def adjust_lovers(self, amount):
"""
Purpose: adjust a single connection value.
"""
self.lovers = self.normalize_value(self.lovers + amount)
def adjust_gratefulness(self, amount):
"""
Purpose: adjust a single connection value.
"""
self.gratefulness = self.normalize_value(self.gratefulness + amount)
def adjust_admiration(self, amount):
"""
Purpose: adjust a single connection value.
"""
self.admiration = self.normalize_value(self.admiration + amount)
def normalize_value(self, value):
"""
Purpose: adjust a value so that it remains in the range[-3, 3]
TODO: expand this so it doesnt use magic numbers
"""
# keep values in the bounds (sorry that these are magic numbers)
# TODO: flesh out connection system more
if value > 3:
return 3
if value < -3:
return -3
return value
def get_string(self):
"""
Purpose: return a string representation of a connection.
brotherly-love / hate
2. lover's-love / hate
3. gratefulness / ingratitude
4. admiration, respect / disdain, disapproval
"""
string = 'brotherly-love / hate: {}, lover\'s-love / hate: {},' +\
' gratefulness/ ingratitude: {}, respect / disdain: {}. '
return string.format(self.brotherly, self.lovers, self.gratefulness,
self.admiration)
def get_values(self):
"""
Purpose: return a list of the 4 connection parameters.
"""
return [self.brotherly, self.lovers, self.gratefulness, self.admiration]
class SocialNetwork:
"""
Purpose: capture characters knowledge of each other and feelings towards the
other characters.
Use emotional links inspired by the MEXICA system:
network = [character1_name->character2_name: connection]
Includes the following methods:
generate_network
get_connection
for_narrative
for_fitness
"""
def __init__(self, characters):
self.characters = characters
self.network = self.generate_network()
def generate_network(self):
"""
Purpose: generate a connection between all the characters
"""
network = {}
for character1 in self.characters:
all_other_characters = self.characters.copy()
all_other_characters.remove(character1)
for character2 in all_other_characters:
# instantiate with a random connection
network[character1.name+character2.name] = Connection()
return network
def get_connection(self, character1, character2):
"""
Purpose: get the connection betweeen two characters.
"""
connection = self.network[character1.name+character2.name]
return connection
def for_narrative(self):
"""
Purpose: return a string summarizing the characters connections.
"""
connection_details = []
for character1 in self.characters:
all_other_characters = self.characters.copy()
all_other_characters.remove(character1)
for character2 in all_other_characters:
connection = self.get_connection(character1, character2)
connection_details.append('Connection from {} to {} is {}'\
.format(character1.name, character2.name, connection.get_string()))
return connection_details
def for_fitness(self):
"""
Purpose: return the connection information in a way that's easy to use
to evaluate narrative fitness.
"""
connection_details = []
for character1 in self.characters:
all_other_characters = self.characters.copy()
all_other_characters.remove(character1)
for character2 in all_other_characters:
connection = self.get_connection(character1, character2)
connection_details.append(connection.get_values())
return connection_details
class Character:
"""
Character class that includes the following methods:
check_health
can_act
action_maker
tell_personality
"""
def __init__(self, name, personality, goal, health, location):
self.name = name
self.personality = personality
self.goal = goal
self.health = health
self.location = location
self.alive = True
# TODO: maybe add emotions: self.emotions = emotions
def __str__(self):
return self.name
def __repr__(self):
return 'Character({}, {}, {}, {}, {})'.format(self.name, self.personality,
self.goal, self.health, self.location)
def check_health(self):
"""
Purpose: if a character has less than 1 health, then they are dead.
"""
if self.health < 1:
self.alive = False
def can_act(self, locations, characters, nearby_characters, social_network):
"""
Purpose: check to see if this character can do anything.
"""
all_actions = action_stuff.Actions(self, locations, characters,
nearby_characters, social_network).actions
for action in all_actions:
if not action.pre_condition:
all_actions.remove(action)
if len(all_actions) == 0:
return False
return True
def action_maker(self, locations, characters, nearby_characters, social_network):
"""
Purpose: choose an action that works the character towards their goal and is
consistent with personality + emotions.
"""
self.health -= 5
self.check_health()
if not self.alive:
return '{} has died.'.format(self.name)
all_actions = action_stuff.Actions(self, locations, characters,
nearby_characters, social_network).actions
for action in all_actions:
if not action.pre_condition:
all_actions.remove(action)
if len(all_actions) == 0:
return '{} does nothing.'.format(self.name)
act = random.choice(all_actions)
return act.do()
def tell_personality(self):
"""
Purpose: return a string epressing this characters personality.
"""
return '{} is {}.'.format(self.name, self.personality.for_narrative())
|
duhines/CC-M7
|
clean_locations.py
|
<filename>clean_locations.py
"""
Author: <NAME>
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
This script is used to clean the originally copied and pasted location
data.
Notes:
The way that locations are processed, each word of a multi-word location
becomes a unique location. This results in some odd location names, but
simplifies the locations by ensuring that all of them are just a single
word.
"""
file = open('knowledge/locations.txt')
locations = []
locations_so_far = set()
for line in file.readlines():
as_string = str(line).replace('*', '')
words = as_string.split()
for word in words:
if word not in locations_so_far:
locations.append(word + '\n')
locations_so_far.add(word)
write_to = open('knowledge/cleaned_locations.txt', 'w')
for location in locations:
write_to.write(location)
|
duhines/CC-M7
|
knowledge.py
|
<filename>knowledge.py
"""
Author: <NAME>
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
File to set up knowledge by reading in information from a names and
locations file in the knowledge base.
Notes:
Assumes that the names and locations file are a each a series
of items separated by a newline.
"""
LOCATIONS_FILE = 'cleaned_locations.txt'
NAMES_FILES = 'cleaned_names.txt'
ACTIONS_FILE = 'actions.json'
EXTENTION = 'knowledge/'
class Knowledge:
def __init__(self):
self.names = self.get_names()
self.locations = self.get_locations()
def get_names(self):
"""
Purpose: read in names from the cleaned_names files and save them
in a list object.
"""
file = open(EXTENTION + NAMES_FILES)
names = []
for line in file.readlines():
names.append(line.strip())
return names
def get_locations(self):
"""
Purpose: read in the locations from the cleaned_locations file and
load them into a list object.
The relationship between locations will be established by the narrator.
"""
file = open(EXTENTION + LOCATIONS_FILE)
locations = []
for line in file.readlines():
locations.append(line.strip())
return locations
|
duhines/CC-M7
|
narrator.py
|
<reponame>duhines/CC-M7
"""
Author: <NAME>
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
This module implements 3 classes:
1. Narrator -
the entity that maintains the story knowledge such as setting and
characters and keeps track of other meta data as the narrative
progresses
2. Event -
used an an object to store the time and event for a narrative
event.
3. Episode -
Structure that uses the narrator to generate a narrative of some
length.
Notes:
* output will be a text script of TV episodes (maybe build this up to
seasons?)
"""
import random
import knowledge as knows
import character
from random import choice
import os
NUM_CHARACTERS = 10
NUM_LOCATIONS = 5
RATE_LOCATION_CONNECTED = 1
NUM_ACTIONS = 25
NUM_SCRIPTS = 100
RESULTS_PATH = 'results'
RESULTS_NAME = 'script_'
class Narrator:
"""
Purpose: control the flow of the story / keep track of story information
Includes the following methods:
characters_at
can_act
change_location
init_locations
init_characters
"""
def __init__(self):
self.knowledge = knows.Knowledge()
self.locations = self.init_locations()
self.characters = self.init_characters()
self.current_location = choice(self.locations['names'])
self.social_network = character.SocialNetwork(self.characters)
self.story_over = False
def characters_at(self, location):
"""
Purpose: return a list of characters in a location.
"""
characters = []
for character in self.characters:
if character.location == location:
characters.append(character)
return characters
def can_act(self, nearby_characters):
"""
Purpose: return a sublist of characters than can actually make an
action.
"""
can_act = nearby_characters.copy()
for character in nearby_characters:
if not character.can_act(self.locations, self.characters,
nearby_characters, self.social_network):
can_act.remove(character)
if not character.alive:
can_act.remove(character)
return can_act
def change_location(self):
"""
Purpose: change the current_location to a new location.
"""
possibilities = self.locations['names'].copy()
# don't want to switch to the current location
possibilities.remove(self.current_location)
# don't want to focus on a location without any characters
with_characters = possibilities.copy()
for possibility in possibilities:
if self.characters_at(possibility) == []:
with_characters.remove(possibility)
if len(possibilities) == 0:
return 'There are no characters left.'
else:
new_location = choice(with_characters)
self.current_location = new_location
return 'We now shift our attention to {}'.format(self.current_location)
def init_locations(self, num_locations=NUM_LOCATIONS):
"""
Purpose: create a kind of world map where there are locations from the
knowledge base connected by edges.
"""
chosen_locations = []
for i in range(0, num_locations):
location = choice(self.knowledge.locations)
self.knowledge.locations.remove(location)
chosen_locations.append(location)
# dictionary to express connections between locations:
# connections['location'] = [True, True, False, False,...]
# ->True if there is a connection between the two locations, false
# otherwise
connections = {}
for location in chosen_locations:
options = chosen_locations.copy()
options.remove(location)
connected = []
for option in options:
connected.append(random.random() < RATE_LOCATION_CONNECTED)
connections[location] = connected
locations = {
'names': chosen_locations,
'connections': connections
}
return locations
def init_characters(self, num_characters=NUM_CHARACTERS):
"""
Purpose: create some character objects
"""
characters = []
for i in range(0, num_characters):
# def __init__(self, name, personality, goal, health, location):
name = choice(self.knowledge.names)
# don't want multiple characters to have the same name
self.knowledge.names.remove(name)
personality = character.Personality()
goal = None
health = random.randint(0, 100)
location = choice(self.locations['names'])
characters.append(character.Character(name, personality, goal, health, location))
return characters
class Event:
"""
Keep track of the time that an event occured during. Using a class
because dot notation is better than dictionaries.
"""
def __init__(self, time, action):
self.time = time
self.action = action
# TODO: flesh things out so that the same narrator can create multiple episodes
# of narrative (as long as there are still characters left!)
# class Season:
# """
# Purpose: preserve details of characters and setting accross episodes
# """
# def __init__(self, characters, setting):
# return
class Episode:
"""
Purpose: generated scripts organized into episodes, multiple
episodes can be told by the same narrator (i.e. same characters
and setting).
Includes the following methods:
write_narrative
write_script
get_narrative_from_action
evaluate
output_script
"""
def __init__(self, narrator, num_actions=NUM_ACTIONS):
self.narrator = narrator
self.narrative = []
self.script = []
self.num_actions = num_actions
self.time = 0
self.starting_connections = []
self.final_connections = []
self.score = -1
def write_narrative(self):
"""
Purpose: determine the sequences of actions that make up the script.
"""
narrator = self.narrator
curr_acts = 0
events = []
opener = 'We begin our story in {}. '.format(narrator.current_location)
for character in narrator.characters_at(narrator.current_location):
opener += '{} is here. '.format(character)
opener += '\n'
# this would be used for a string representation of the social
# structure, but we don't use this
initial_social_structre = narrator.social_network.for_narrative()
self.starting_connections = narrator.social_network.for_fitness()
for character in narrator.characters:
opener += character.tell_personality() + '\n'
events.append(Event(0, opener))
while curr_acts < self.num_actions:
# want to only be focused on locations where there are characters than
# can do things
possible_characters = narrator.characters_at(narrator.current_location)
can_act = narrator.can_act(possible_characters)
if len(can_act) == 0:
change_narrative_location = narrator.change_location()
events.append(Event(self.time, change_narrative_location))
if change_narrative_location == 'There are no characters left.':
# the story is OVER
narrator.story_over = True
break
else:
next_actor = choice(can_act)
act = next_actor.action_maker(narrator.locations,
narrator.characters, possible_characters, narrator.social_network)
events.append(Event(self.time, act))
curr_acts += 1
self.time += 1
self.final_connections = narrator.social_network.for_fitness()
closer = 'And thus ends the episode. \n'
# this would be used for a string representation of the social
# structure, but we don't use this
end_social_structre = narrator.social_network.for_narrative()
events.append(Event(self.time, closer))
self.narrative = events
def write_script(self):
"""
Purpose: from a list of actions, get the narrative elements from each
event.
"""
for event in self.narrative:
self.script.append(self.get_narrative_from_action(event))
def get_narrative_from_action(self, event):
"""
Purpose: translate a event into a string representing the event.
"""
# TODO: set up way of getting narrative form that's more interesting
# (BIG TODO)
return str(event.action.strip())
def evalutate(self):
"""
Purpose: evalutate an episode of the script by summing the change
in the connection parameters between the characters.
"""
changes = []
total_theta = 0
for i in range(0, len(self.starting_connections)):
for j in range(0, len(self.starting_connections[i])):
total_theta += abs(self.starting_connections[i][j] -
self.final_connections[i][j])
self.score = total_theta
return total_theta
def output_script(self):
"""
Purpose: write the script to the output folder.
"""
# list the number of things in results folder, use that number
# for a unique file name
num_results = len(os.listdir(RESULTS_PATH))
file_name = RESULTS_NAME + str(num_results) + '.txt'
file = open(RESULTS_PATH + '/' + file_name, 'w')
for event in self.script:
file.write(event + '\n')
def main():
narrator = Narrator()
episodes = []
bsf = []
bsf_score = 0
# generate 100 scripts using this narrator and choose the one with the
# highest fitness, which is currently a measure of how dramatically
# character connection values changed from the start of the episode
# to the end.
for i in range(0, NUM_SCRIPTS):
episode = Episode(narrator)
episode.write_narrative()
episode.write_script()
episode.evalutate()
if episode.score > bsf_score:
bsf = episode
bsf_score = episode.score
bsf.output_script()
if __name__ == '__main__':
main()
|
duhines/CC-M7
|
clean_names.py
|
"""
Author: <NAME>
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
This is a script to clean names. The names are from the top 100 boys
and girls names from 2018 from bounty.com
Notes:
Assumes the format of the original names file has the name first on
each line.
"""
file = open('knowledge/names.txt')
cleaned = []
for line in file.readlines():
if line == '\n':
continue
cleaned_name = line.split()[0]
cleaned.append(cleaned_name + '\n')
file.close()
cleaned_file = open('knowledge/cleaned_names.txt', 'w')
for name in cleaned:
cleaned_file.write(name)
cleaned_file.close()
|
GoHelpFund/aden_hash
|
setup.py
|
from distutils.core import setup, Extension
help_hash_module = Extension('help_hash',
sources = ['helpmodule.c',
'help.c',
'sha3/blake.c',
'sha3/bmw.c',
'sha3/groestl.c',
'sha3/jh.c',
'sha3/keccak.c',
'sha3/skein.c',
'sha3/cubehash.c',
'sha3/echo.c',
'sha3/luffa.c',
'sha3/simd.c',
'sha3/shavite.c'],
include_dirs=['.', './sha3'])
setup (name = 'help_hash',
version = '1.3.1',
description = 'Binding for Help X11 proof of work hashing.',
ext_modules = [help_hash_module])
|
GoHelpFund/aden_hash
|
test.py
|
<filename>test.py
import help_hash
from binascii import unhexlify, hexlify
import unittest
# help block #1
# moo@b1:~/.help$ helpd getblockhash 1
# 000007d91d1254d60e2dd1ae580383070a4ddffa4c64c2eeb4a2f9ecc0414343
# moo@b1:~/.help$ helpd getblock 000007d91d1254d60e2dd1ae580383070a4ddffa4c64c2eeb4a2f9ecc0414343
# {
# "hash" : "000007d91d1254d60e2dd1ae580383070a4ddffa4c64c2eeb4a2f9ecc0414343",
# "confirmations" : 169888,
# "size" : 186,
# "height" : 1,
# "version" : 2,
# "merkleroot" : "ef3ee42b51e2a19c4820ef182844a36db1201c61eb0dec5b42f84be4ad1a1ca7",
# "tx" : [
# "ef3ee42b51e2a19c4820ef182844a36db1201c61eb0dec5b42f84be4ad1a1ca7"
# ],
# "time" : 1390103681,
# "nonce" : 128987,
# "bits" : "1e0ffff0",
# "difficulty" : 0.00024414,
# "previousblockhash" : "00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6",
# "nextblockhash" : "00000bafcc571ece7c5c436f887547ef41b574e10ef7cc6937873a74ef1efeae"
# }
header_hex = ("02000000" +
"b67a40f3cd5804437a108f105533739c37e6229bc1adcab385140b59fd0f0000" +
"a71c1aade44bf8425bec0deb611c20b16da3442818ef20489ca1e2512be43eef"
"814cdb52" +
"f0ff0f1e" +
"dbf70100")
best_hash = '434341c0ecf9a2b4eec2644cfadf4d0a07830358aed12d0ed654121dd9070000'
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.block_header = unhexlify(header_hex)
self.best_hash = best_hash
def test_help_hash(self):
self.pow_hash = hexlify(help_hash.getPoWHash(self.block_header))
self.assertEqual(self.pow_hash.decode(), self.best_hash)
if __name__ == '__main__':
unittest.main()
|
LoYungSum/gaze_correction_loyungsum
|
gaze_correction_system/flx.py
|
import tensorflow as tf
import numpy as np
import tf_utils
import transformation
img_crop = 3
def gen_agl_map(inputs, height, width,feature_dims):
with tf.name_scope("gen_agl_map"):
batch_size = tf.shape(inputs)[0]
ret = tf.reshape(tf.tile(inputs,tf.constant([1,height*width])), [batch_size,height,width,feature_dims])
return ret
def encoder(inputs, height, width, tar_dim):
with tf.variable_scope('encoder'):
dnn_blk_0 = tf_utils.dnn_blk(inputs, 16, name = 'dnn_blk_0')
dnn_blk_1 = tf_utils.dnn_blk(dnn_blk_0, 16, name = 'dnn_blk_1')
dnn_blk_2 = tf_utils.dnn_blk(dnn_blk_1, tar_dim, name = 'dnn_blk_2')
agl_map = gen_agl_map(dnn_blk_2, height, width, tar_dim)
return agl_map
def apply_lcm(batch_img, light_weight):
with tf.name_scope('apply_lcm'):
img_wgts, pal_wgts = tf.split(light_weight, [1,1], 3)
img_wgts = tf.tile(img_wgts, [1,1,1,3])
pal_wgts = tf.tile(pal_wgts, [1,1,1,3])
palette = tf.ones(tf.shape(batch_img), dtype = tf.float32)
ret = tf.add(tf.multiply(batch_img, img_wgts), tf.multiply(palette, pal_wgts))
return ret
def trans_module(inputs, structures, phase_train, name="trans_module"):
with tf.variable_scope(name) as scope:
cnn_blk_0 = tf_utils.cnn_blk(inputs, structures['depth'][0], structures['filter_size'][0], phase_train, name = 'cnn_blk_0')
cnn_blk_1 = tf_utils.cnn_blk(cnn_blk_0, structures['depth'][1], structures['filter_size'][1], phase_train, name = 'cnn_blk_1')
cnn_blk_2 = tf_utils.cnn_blk(tf.concat([cnn_blk_0,cnn_blk_1], axis=3), structures['depth'][2], structures['filter_size'][2], phase_train, name = 'cnn_blk_2')
cnn_blk_3 = tf_utils.cnn_blk(tf.concat([cnn_blk_0,cnn_blk_1,cnn_blk_2], axis=3), structures['depth'][3], structures['filter_size'][3], phase_train, name = 'cnn_blk_3')
cnn_4 = tf.layers.conv2d(inputs=cnn_blk_3, filters=structures['depth'][4], kernel_size=structures['filter_size'][4], padding="same", activation=None, use_bias=False, name="cnn_4")
return cnn_4
def lcm_module(inputs, structures, phase_train, name="lcm_module"):
with tf.variable_scope(name) as scope:
cnn_blk_0 = tf_utils.cnn_blk(inputs, structures['depth'][0], structures['filter_size'][0], phase_train, name = 'cnn_blk_0')
cnn_blk_1 = tf_utils.cnn_blk(cnn_blk_0, structures['depth'][1], structures['filter_size'][1], phase_train, name = 'cnn_blk_1')
cnn_2 = tf.layers.conv2d(inputs=cnn_blk_1, filters=structures['depth'][2], kernel_size=structures['filter_size'][2], padding="same", activation=None, use_bias=False, name='cnn_2')
lcm_map = tf.nn.softmax(cnn_2)
return lcm_map
def inference(input_img, input_fp, input_agl, phase_train, conf):
"""Build the Deepwarp model.
Args: images, anchors_map of eye, angle
Returns: lcm images
"""
corse_layer = {'depth':(32,64,64,32,16), 'filter_size':([5,5],[3,3],[3,3],[3,3],[1,1])}
fine_layer = {'depth':(32,64,32,16,4), 'filter_size':([5,5],[3,3],[3,3],[3,3],[1,1])}
lcm_layer = {'depth':(8,8,2), 'filter_size':([3,3],[3,3],[1,1])}
with tf.variable_scope('warping_model'):
agl_map = encoder(input_agl, conf.height, conf.width, conf.encoded_agl_dim)
igt_inputs = tf.concat([input_img, input_fp, agl_map],axis=3)
with tf.variable_scope('warping_module'):
'''coarse module'''
resized_igt_inputs = tf.layers.average_pooling2d(inputs=igt_inputs, pool_size=[2,2], strides=2, padding='same')
cours_raw = trans_module(resized_igt_inputs, corse_layer, phase_train, name='coarse_level')
cours_act = tf.nn.tanh(cours_raw)
coarse_resize = tf.image.resize_images(cours_act, (conf.height, conf.width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
coarse_out = tf.layers.average_pooling2d(inputs=coarse_resize, pool_size=[2, 2], strides=1, padding='same')
'''fine module'''
fine_input = tf.concat([igt_inputs, coarse_out],axis=3, name='fine_input')
fine_out = trans_module(fine_input, fine_layer, phase_train, name='fine_level')
flow_raw, lcm_input = tf.split(fine_out, [2,2], 3)
flow = tf.nn.tanh(flow_raw)
cfw_img = transformation.apply_transformation(flows = flow, img = input_img, num_channels=3)
'''lcm module'''
lcm_map = lcm_module(lcm_input, lcm_layer, phase_train, name="lcm_module")
img_pred = apply_lcm(batch_img=cfw_img, light_weight=lcm_map)
return img_pred, flow_raw, lcm_map
def dist_loss(y_pred, y_, method="MAE"):
with tf.variable_scope('img_dist_loss'):
loss = 0
if(method == "L2"):
loss = tf.sqrt(tf.reduce_sum(tf.square(y_pred - y_), axis=3, keep_dims = True))
elif (method == "MAE"):
loss = tf.abs(y_pred - y_)
loss = loss[:,img_crop:(-1)*img_crop,img_crop:(-1)*img_crop,:]
loss = tf.reduce_sum(loss, axis = [1,2,3])
return tf.reduce_mean(loss, axis=0)
def TVloss(inputs):
with tf.variable_scope('TVloss'):
dinputs_dx = inputs[:, :-1, :, :] - inputs[:, 1:, :, :]
dinputs_dy = inputs[:, :, :-1, :] - inputs[:, :, 1:, :]
dinputs_dx = tf.pad(dinputs_dx, [[0,0],[0, 1], [0, 0],[0,0]], "CONSTANT")
dinputs_dy = tf.pad(dinputs_dy, [[0,0],[0, 0], [0, 1],[0,0]], "CONSTANT")
tot_var = tf.add(tf.abs(dinputs_dx), tf.abs(dinputs_dy))
tot_var = tf.reduce_sum(tot_var, axis =3, keep_dims=True)
return tot_var
def TVlosses(eye_mask, ori_img, flow, lcm_map):
with tf.variable_scope('TVlosses'):
# eyeball_TVloss
# calculate TV (dFlow(p)/dx + dFlow(p)/dy)
TV_flow = TVloss(flow)
# calculate the (1-D(p))
img_gray = tf.reduce_mean(ori_img, axis = 3, keep_dims=True)
ones = tf.ones(shape = tf.shape(img_gray))
bright = ones - img_gray
# calculate the F_e(p)
eye_mask = tf.expand_dims(eye_mask, axis = 3)
weights = tf.multiply(bright,eye_mask)
TV_eye = tf.multiply(weights,TV_flow)
# eyelid_TVloss
lid_mask = ones - eye_mask
TV_lid = tf.multiply(lid_mask,TV_flow)
TV_eye = tf.reduce_sum(TV_eye, axis = [1,2,3])
TV_lid = tf.reduce_sum(TV_lid, axis = [1,2,3])
# lcm_map loss
dist2cent = center_weight(tf.shape(lcm_map), base=0.005, boundary_penalty=3.0)
TV_lcm = dist2cent*TVloss(lcm_map)
TV_lcm = tf.reduce_sum(TV_lcm, axis = [1,2,3])
return tf.reduce_mean(TV_eye, axis=0), tf.reduce_mean(TV_lid, axis=0), tf.reduce_mean(TV_lcm, axis=0)
def center_weight(shape, base=0.005, boundary_penalty=3.0):
with tf.variable_scope('center_weight'):
temp = boundary_penalty - base
x = tf.pow(tf.abs(tf.lin_space(-1.0, 1.0,shape[1])),8)
y = tf.pow(tf.abs(tf.lin_space(-1.0, 1.0,shape[2])),8)
X, Y = tf.meshgrid(y, x)
X = tf.expand_dims(X, axis=2)
Y = tf.expand_dims(Y, axis=2)
dist2cent = temp*tf.sqrt(tf.reduce_sum(tf.square(tf.concat([X,Y], axis=2)), axis=2)) + base
dist2cent = tf.expand_dims(tf.tile(tf.expand_dims(dist2cent, axis=0), [shape[0],1,1]), axis=3)
return dist2cent
def lcm_adj(lcm_wgt):
dist2cent = center_weight(tf.shape(lcm_wgt), base=0.005, boundary_penalty=3.0)
with tf.variable_scope('lcm_adj'):
_, loss = tf.split(lcm_wgt, [1,1], 3)
loss = tf.reduce_sum(tf.abs(loss)*dist2cent, axis = [1,2,3])
return tf.reduce_mean(loss, axis=0)
def loss(img_pred, img_, eye_mask, input_img, flow, lcm_wgt):
with tf.variable_scope('losses'):
loss_img = dist_loss(img_pred, img_, method = "L2")
loss_eyeball, loss_eyelid, loss_lcm= TVlosses(eye_mask, input_img, flow, lcm_wgt)
loss_lcm_adj = lcm_adj(lcm_wgt)
losses = loss_img + loss_eyeball + loss_eyelid + loss_lcm_adj + loss_lcm
tf.add_to_collection('losses', losses)
return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss_img
|
LoYungSum/gaze_correction_loyungsum
|
training/code_tf/model_train/transformation.py
|
import tensorflow as tf
def repeat(x, num_repeats):
with tf.name_scope("repeat"):
ones = tf.ones((1, num_repeats), dtype='int32')
x = tf.reshape(x, shape=(-1,1))
x = tf.matmul(x, ones)
return tf.reshape(x, [-1])
def interpolate(image, x, y, output_size):
with tf.name_scope("interpolate"):
batch_size = tf.shape(image)[0]
height = tf.shape(image)[1]
width = tf.shape(image)[2]
num_channels = tf.shape(image)[3]
x = tf.cast(x , dtype='float32')
y = tf.cast(y , dtype='float32')
height_float = tf.cast(height, dtype='float32')
width_float = tf.cast(width, dtype='float32')
output_height = output_size[0]
output_width = output_size[1]
x = .5*(x + 1.0)*(width_float)
y = .5*(y + 1.0)*(height_float)
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
max_y = tf.cast(height - 1, dtype='int32')
max_x = tf.cast(width - 1, dtype='int32')
zero = tf.zeros([], dtype='int32')
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
flat_image_dimensions = height*width
pixels_batch = tf.range(batch_size)*flat_image_dimensions
flat_output_dimensions = output_height*output_width
base = repeat(pixels_batch, flat_output_dimensions)
base_y0 = base + y0*width
base_y1 = base + y1*width
indices_a = base_y0 + x0
indices_b = base_y1 + x0
indices_c = base_y0 + x1
indices_d = base_y1 + x1
flat_image = tf.reshape(image, shape=(-1, num_channels))
flat_image = tf.cast(flat_image, dtype='float32')
pixel_values_a = tf.gather(flat_image, indices_a)
pixel_values_b = tf.gather(flat_image, indices_b)
pixel_values_c = tf.gather(flat_image, indices_c)
pixel_values_d = tf.gather(flat_image, indices_d)
x0 = tf.cast(x0, 'float32')
x1 = tf.cast(x1, 'float32')
y0 = tf.cast(y0, 'float32')
y1 = tf.cast(y1, 'float32')
area_a = tf.expand_dims(((x1 - x) * (y1 - y)), 1)
area_b = tf.expand_dims(((x1 - x) * (y - y0)), 1)
area_c = tf.expand_dims(((x - x0) * (y1 - y)), 1)
area_d = tf.expand_dims(((x - x0) * (y - y0)), 1)
output = tf.add_n([area_a*pixel_values_a,
area_b*pixel_values_b,
area_c*pixel_values_c,
area_d*pixel_values_d])
return output
def meshgrid(height, width):
with tf.name_scope("meshgrid"):
y_linspace = tf.linspace(-1., 1., height)
x_linspace = tf.linspace(-1., 1., width)
x_coordinates, y_coordinates = tf.meshgrid(x_linspace, y_linspace)
y_coordinates = tf.expand_dims(tf.reshape(y_coordinates, [-1]),0)
x_coordinates = tf.expand_dims(tf.reshape(x_coordinates, [-1]),0)
indices_grid = tf.concat([x_coordinates, y_coordinates], 0)
return indices_grid
def apply_transformation(flows, img, num_channels):
with tf.name_scope("apply_transformation"):
batch_size = tf.shape(img)[0]
height = tf.shape(img)[1]
width = tf.shape(img)[2]
# num_channels = tf.shape(img)[3]
output_size = (height, width)
flow_channels = tf.shape(flows)[3]
flows = tf.reshape(tf.transpose(flows, [0, 3, 1, 2]), [batch_size, flow_channels, height*width])
indices_grid = meshgrid(height, width)
transformed_grid = tf.add(flows, indices_grid)
x_s = tf.slice(transformed_grid, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(transformed_grid, [0, 1, 0], [-1, 1, -1])
x_s_flatten = tf.reshape(x_s, [-1])
y_s_flatten = tf.reshape(y_s, [-1])
transformed_image = interpolate(img, x_s_flatten, y_s_flatten, (height, width))
transformed_image = tf.reshape(transformed_image, [batch_size, height, width, num_channels])
return transformed_image
|
LoYungSum/gaze_correction_loyungsum
|
training/code_tf/model_train/tf_utils.py
|
import tensorflow as tf
def batch_norm(x, train_phase, name='bn_layer'):
#with tf.variable_scope(name) as scope:
batch_norm = tf.layers.batch_normalization(
inputs=x,
momentum=0.9, epsilon=1e-5,
center=True, scale=True,
training = train_phase,
name=name
)
return batch_norm
def cnn_blk(inputs, filters, kernel_size, phase_train, name = 'cnn_blk'):
with tf.variable_scope(name) as scope:
cnn = tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding="same", activation=None, use_bias=False, name="cnn")
act = tf.nn.relu(cnn, name= "act")
ret = batch_norm(act, phase_train)
return ret
def dnn_blk(inputs, nodes, name = 'dnn_blk'):
with tf.variable_scope(name) as scope:
dnn = tf.layers.dense(inputs=inputs, units=nodes, activation=None, name="dnn")
ret = tf.nn.relu(dnn, name= "act")
return ret
|
LoYungSum/gaze_correction_loyungsum
|
training/code_tf/model_train/config.py
|
#-*- coding: utf-8 -*-
import argparse
model_config = argparse.ArgumentParser()
# model
model_config.add_argument('--height', type=eval, default=48, help='')
model_config.add_argument('--width', type=eval, default=64, help='')
model_config.add_argument('--channel', type=eval, default=3, help='')
model_config.add_argument('--agl_dim', type=eval, default=2, help='')
model_config.add_argument('--encoded_agl_dim', type=eval, default=16, help='')
model_config.add_argument('--early_stop', type=eval, default=16, help='')
# hyper parameter
model_config.add_argument('--lr', type=eval, default=0.001, help='')
model_config.add_argument('--epochs', type=eval, default=500, help='')
model_config.add_argument('--batch_size', type=eval, default=256, help='')
model_config.add_argument('--dataset', type=str, default='dirl_48x64_example', help='')
# training parameter
model_config.add_argument('--tar_model', type=str, default='flx', help='')
# model_config.add_argument('--tar_model', type=str, default='deepwarp', help='')
model_config.add_argument('--loss_combination', type=str, default='l2sc', help='')
model_config.add_argument('--ef_dim', type=eval, default=12, help='')
model_config.add_argument('--eye', type=str, default="L", help='')
#load trained weight
model_config.add_argument('--load_weights', type=bool, default=False, help='')
model_config.add_argument('--easy_mode', type=bool, default=True, help='')
# model_config.add_argument('--load_weights', type=bool, default=True, help='')
# model_config.add_argument('--easy_mode', type=bool, default=False, help='')
# folders' path
model_config.add_argument('--tb_dir', type=str, default='TFboard/', help='')
model_config.add_argument('--data_dir', type=str, default='../../dataset/', help='')
model_config.add_argument('--train_dir', type=str, default='training_inputs/', help='')
model_config.add_argument('--valid_dir', type=str, default='valid_inputs/', help='')
model_config.add_argument('--weight_dir', type=str, default='pt_ckpt/', help='')
def get_config():
config, unparsed = model_config.parse_known_args()
print(config)
return config, unparsed
|
LoYungSum/gaze_correction_loyungsum
|
gaze_correction_system/focal_length_calibration.py
|
<filename>gaze_correction_system/focal_length_calibration.py
#!/usr/bin/env python
# coding: utf-8
# # Parameter settings
# In[6]:
import dlib
# install dlib by "pip install cmake dlib"
import cv2
import numpy as np
# In[7]:
# Please place your head in front of the camera about 50 cm
d = 60 # cm
# Please set your interpupillary distance (the distance between two eyes) in the code
# or you can just set it to the average distance 6.3 cm
P_IPD = 6.3 # cm
# default image resolution
video_res = [1280,720]
# define the face detector from Dlib package
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("./lm_feat/shape_predictor_68_face_landmarks.dat")
# detect face size with smaller resolustion for detection efficiency
face_detect_size = [320,240]
# In[8]:
def get_eye_pos(shape, pos = "L"):
if(pos == "R"):
lc = 36 # idx for the left corner of the right eye
rc = 39 # idx for the right corner of the right eye
FP_seq = [36,37,38,39,40,41] # landmarkds for right eyes
elif(pos == "L"):
lc = 42 # idx for the left corner of the right eye
rc = 45 # idx for the right corner of the right eye
FP_seq = [45,44,43,42,47,46] # landmarkds for right eyes
else:
print("Error: Wrong pos parameter")
eye_cx = (shape.part(rc).x+shape.part(lc).x)*0.5
eye_cy = (shape.part(rc).y+shape.part(lc).y)*0.5
eye_center = [eye_cx, eye_cy]
eye_len = np.absolute(shape.part(rc).x - shape.part(lc).x)
bx_d5w = eye_len*3/4
bx_h = 1.5*bx_d5w
# Slightly moveing up the center of the bounding box
# because the upper lids are more dynamic than the lower lids
sft_up = bx_h*7/12
sft_low = bx_h*5/12
E_TL = (int(eye_cx-bx_d5w),int(eye_cy-sft_up))
E_RB = (int(eye_cx+bx_d5w),int(eye_cy+sft_low))
return eye_center, E_TL, E_RB
# # Starting to capture your face, push "q" to leave the program
# In[9]:
vs = cv2.VideoCapture(1)
while True:
ret, recv_frame = vs.read()
if ret == True:
gray = cv2.cvtColor(recv_frame, cv2.COLOR_BGR2GRAY)
face_detect_gray = cv2.resize(gray, (face_detect_size[0], face_detect_size[1]))
# Detect the facial landmarks
detections = detector(face_detect_gray, 0)
x_ratio = video_res[0]/face_detect_size[0]
y_ratio = video_res[1]/face_detect_size[1]
LE_ach_maps=[]
RE_ach_maps=[]
for k,bx in enumerate(detections):
target_bx = dlib.rectangle(left=int(bx.left()*x_ratio), right =int(bx.right()*x_ratio),
top =int(bx.top()*y_ratio), bottom=int(bx.bottom()*y_ratio))
shape = predictor(gray, target_bx)
# get the left and right eyes
LE_center, L_E_TL, L_E_RB = get_eye_pos(shape, pos="L")
RE_center, R_E_TL, R_E_RB = get_eye_pos(shape, pos="R")
f = int(np.sqrt((LE_center[0]-RE_center[0])**2 + (LE_center[1]-RE_center[1])**2)*d/P_IPD)
cv2.rectangle(recv_frame,
(video_res[0]-150,0),(video_res[0],40),
(255,255,255),-1
)
cv2.putText(recv_frame,
'f:'+str(f),
(video_res[0]-140,15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0,0,255),1,cv2.LINE_AA)
# draw the regions of two eyes with blue
cv2.rectangle(recv_frame,
(L_E_TL[0],L_E_TL[1]),(L_E_RB[0],L_E_RB[1]),
(255,0,0),1)
cv2.rectangle(recv_frame,
(R_E_TL[0],R_E_TL[1]),(R_E_RB[0],R_E_RB[1]),
(255,0,0),1)
# highlight the midlle point of the eye corners with green
cv2.circle(recv_frame,(int(LE_center[0]),int(LE_center[1])), 2, (0,255,0), -1)
cv2.circle(recv_frame,(int(RE_center[0]),int(RE_center[1])), 2, (0,255,0), -1)
# draw facial landmarks with red
for i in range(68):
cv2.circle(recv_frame,(shape.part(i).x,shape.part(i).y), 2, (0,0,255), -1)
cv2.imshow("Calibration", recv_frame)
k = cv2.waitKey(10)
if k == ord('q'):
vs.release()
cv2.destroyAllWindows()
break
else:
pass
# # remember to set the f value to the "config.py"#
# In[10]:
print("The focal length of your camera is", f, ",please set the value of f (--f) in the config.py")
|
LoYungSum/gaze_correction_loyungsum
|
gaze_correction_system/config.py
|
#-*- coding: utf-8 -*-
import argparse
model_config = argparse.ArgumentParser()
# model parameters
model_config.add_argument('--height', type=eval, default=48, help='')
model_config.add_argument('--width', type=eval, default=64, help='')
model_config.add_argument('--channel', type=eval, default=3, help='')
model_config.add_argument('--ef_dim', type=eval, default=12, help='')
model_config.add_argument('--agl_dim', type=eval, default=2, help='')
model_config.add_argument('--encoded_agl_dim', type=eval, default=16, help='')
#demo
model_config.add_argument('--mod', type=str, default="flx", help='')
model_config.add_argument('--weight_set', type=str, default="weights", help='')
model_config.add_argument('--record_time', type=bool, default=False, help='')
model_config.add_argument('--tar_ip', type=str, default='localhost', help='')
model_config.add_argument('--sender_port', type=int, default=5005, help='')
model_config.add_argument('--recver_port', type=int, default=5005, help='')
model_config.add_argument('--uid', type=str, default='local', help='')
model_config.add_argument('--P_IDP', type=eval, default=6.3, help='')
model_config.add_argument('--f', type=eval, default=1000, help='')
model_config.add_argument('--P_c_x', type=eval, default=0, help='')
model_config.add_argument('--P_c_y', type=eval, default=-9.5, help='')
model_config.add_argument('--P_c_z', type=eval, default=-1, help='')
model_config.add_argument('--S_W', type=eval, default=28.5, help='')
model_config.add_argument('--S_H', type=eval, default=18, help='')
def get_config():
config, unparsed = model_config.parse_known_args()
print(config)
return config, unparsed
|
LoYungSum/gaze_correction_loyungsum
|
training/generate_training_inputs_from_raw_dataset/load_dataset2.py
|
<reponame>LoYungSum/gaze_correction_loyungsum
import threading
import numpy as np
import tensorflow as tf
import pickle
import cv2
import os
sur_agl_limit = 16
dif_agl_limit_v = 21
dif_agl_limit_h = 26
def seperate_eye_and_lid(imgs, anchor_maps):
# start_time = time.time()
imgs_eye = []
imgs_lid = []
imgs_eye_mask = []
imgs_lid_mask = []
for user_idx in range(len(imgs)):
user_eye = []
user_lid = []
user_eye_mask = []
user_lid_mask = []
for img_idx in range(anchor_maps[user_idx].shape[0]):
# get eye anchors
anchors = []
for i in range(6):
anchor = [int(np.where(anchor_maps[user_idx][img_idx,0,:,2*i+0] ==0)[0]),int(np.where(anchor_maps[user_idx][img_idx,:,0,2*i+1] ==0)[0])]
anchors.append(anchor)
anchors = np.array(anchors, np.int32)
# create mask
mask_eye = np.zeros((imgs[0].shape[1],imgs[0].shape[2]),np.uint8)
cv2.fillPoly(mask_eye, [anchors], 1)
mask_lid = np.ones((imgs[0].shape[1],imgs[0].shape[2]),np.uint8) - mask_eye
# crop imaage
#temp = cv2.cvtColor(mask_lid,cv2.COLOR_GRAY2RGB)
img_eye = cv2.bitwise_and(imgs[user_idx][img_idx,...],imgs[user_idx][img_idx,...],mask = mask_eye)# + temp
img_lid = cv2.bitwise_and(imgs[user_idx][img_idx,...],imgs[user_idx][img_idx,...],mask = mask_lid)
user_eye.append(img_eye)
user_lid.append(img_lid)
user_eye_mask.append(mask_eye)
user_lid_mask.append(mask_lid)
user_eye = np.array(user_eye)
user_lid = np.array(user_lid)
user_eye_mask = np.array(user_eye_mask)
user_lid_mask = np.array(user_lid_mask)
imgs_eye.append(user_eye)
imgs_lid.append(user_lid)
imgs_eye_mask.append(user_eye_mask)
imgs_lid_mask.append(user_lid_mask)
# print("sep time %.4f" % (time.time()-start_time))
return imgs_eye, imgs_lid, imgs_eye_mask, imgs_lid_mask
### define your input schedual #####
def read_training_data(file_path):
f = open(file_path, 'rb')
data = pickle.load(f)
f.close()
return data
def load(data_dir, dirs, eye):
MyDict = {}
imgs = []
agls = []
ps = []
anchor_maps = []
for d in dirs:
print(os.path.join(data_dir, d))
data = read_training_data(os.path.join(data_dir, d) + '/' + d + str('_') + eye)
imgs.append(np.asarray(data['img'], dtype= np.float32)/255.0)
agls.append(np.concatenate([np.expand_dims(np.asarray(data['v'], dtype= np.float32), axis=1),
np.expand_dims(np.asarray(data['h'], dtype= np.float32), axis=1)],
axis = 1))
ps.append(np.asarray(data['p'], dtype= np.float32))
anchor_maps.append(np.asarray(data['anchor_map'], dtype= np.float32))
# sep image to eye and lid
imgs_eye, imgs_lid, msk_eye, msk_lid = seperate_eye_and_lid(imgs, anchor_maps)
MyDict['imgs_ori'] = imgs
MyDict['agls'] = agls
MyDict['ps'] = ps
MyDict['anchor_maps'] = anchor_maps
MyDict['imgs_eye'] = imgs_eye
MyDict['imgs_lid'] = imgs_lid
MyDict['msk_eye'] = msk_eye
MyDict['msk_lid'] = msk_lid
return MyDict
def img_pair_list(agls, pose):
'''
10 dims:
pose, uid,
src_img_idx,tar_img_idx,
src_v,src_h,
tar_v,tar_h,
agl_dif_v,agl_dif_h
'''
for uid in range(len(agls)):
n_agl = np.arange(len(agls[uid]))
sur, tar = np.meshgrid(n_agl, n_agl)
uid_pair = np.concatenate((np.expand_dims(np.repeat(pose, len(agls[uid])*len(agls[uid])), axis=1),
np.expand_dims(np.repeat(uid, len(agls[uid])*len(agls[uid])), axis=1),
np.expand_dims(np.reshape(sur,-1), axis=1),
np.expand_dims(np.reshape(tar,-1), axis=1)), axis=1)
if uid == 0:
pairs = uid_pair
src_agls = agls[uid][uid_pair[:,2],:]
tar_agls = agls[uid][uid_pair[:,3],:]
dif_agls = agls[uid][uid_pair[:,3],:] - agls[uid][uid_pair[:,2],:]
else:
pairs = np.concatenate((pairs, uid_pair), axis=0) # image index
src_agls = np.concatenate((src_agls, agls[uid][uid_pair[:,2],:]), axis=0) # sourse angle
tar_agls = np.concatenate((tar_agls, agls[uid][uid_pair[:,3],:]), axis=0)
dif_agls = np.concatenate((dif_agls, agls[uid][uid_pair[:,3],:] - agls[uid][uid_pair[:,2],:]), axis=0)
pairs = np.concatenate((pairs,src_agls,tar_agls,dif_agls), axis=1)
return pairs.astype(np.int32)
def data_iterator(input_dict, pairs, batch_size, shuffle = True):
# print(input_dict.keys())
t_batch = int(len(pairs)/batch_size)
while True:
idxs = np.arange(0, len(pairs))
if(shuffle):
np.random.shuffle(idxs)
for batch_idx in range(t_batch-1):
cur_idxs = idxs[(batch_idx*batch_size):((batch_idx+1)*batch_size)]
pairs_batch = pairs[cur_idxs]
out_dict = {}
b_pose =[]
b_uID =[]
b_img_ori = []
b_sur_agl = []
b_tar_agl = []
b_fp = []
b_img__ori = []
b_msk_eye = []
for pair_idx in range(len(pairs_batch)):
pose = str(pairs_batch[pair_idx,0])
uID = pairs_batch[pair_idx,1]
surID = pairs_batch[pair_idx,2]
tarID = pairs_batch[pair_idx,3]
b_pose.append(pose)
b_uID.append(uID)
b_img_ori.append(input_dict[pose]['imgs_ori'][uID][surID])
b_sur_agl.append(input_dict[pose]['agls'][uID][surID])
b_tar_agl.append(input_dict[pose]['agls'][uID][tarID])
b_fp.append(input_dict[pose]['anchor_maps'][uID][surID])
b_img__ori.append(input_dict[pose]['imgs_ori'][uID][tarID])
b_msk_eye.append(input_dict[pose]['msk_eye'][uID][surID])
out_dict['pose'] = np.asarray(b_pose)
out_dict['uID'] = np.asarray(b_uID)
out_dict['imgs_ori'] = np.asarray(b_img_ori)
out_dict['fp'] = np.asarray(b_fp)
out_dict['sur_agl'] = np.asarray(b_sur_agl)
out_dict['tar_agl'] = np.asarray(b_tar_agl)
out_dict['imgs__ori'] = np.asarray(b_img__ori)
out_dict['msk_eye'] = np.asarray(b_msk_eye)
yield out_dict
def shuffle_data_batch(data_batch):
idxs = np.arange(0, data_batch['imgs_ori'].shape[0])
np.random.shuffle(idxs)
for i in data_batch.keys():
# print(data_batch.keys())
data_batch[i] = data_batch[i][idxs,...]
return data_batch
def get_dict(conf, tar_path):
tar_dir = os.path.join(conf.data_dir, conf.dataset, tar_path)
pose_dirs = np.asarray([d for d in os.listdir(tar_dir) if os.path.isdir(os.path.join(tar_dir, d))])
print("Pose dirs", pose_dirs)
tar_dicts = {}
for p in pose_dirs:
print('pose', p)
tar_dirs = np.asarray([d for d in os.listdir(os.path.join(tar_dir, p)) if os.path.isdir(os.path.join(tar_dir, p, d))])
print("Dirs", tar_dirs)
# load training inputs
tar_dict = load(data_dir=os.path.join(tar_dir, p), dirs = tar_dirs, eye = conf.eye)
tar_dict['pairs'] = img_pair_list(tar_dict['agls'], int(p))
tar_dicts[p] = tar_dict
return tar_dicts
def get_easy_hard_iter(input_dicts, batch_size):
sur_agl_limit = 16
dif_agl_limit_v = 21
dif_agl_limit_h = 26
pairs = []
for pose in list(input_dicts.keys()):
if pose == list(input_dicts.keys())[0]:
pairs = input_dicts[pose]['pairs']
else:
pairs = np.concatenate((pairs, input_dicts[pose]['pairs']), axis = 0) # image index
all_idx = np.arange(len(pairs))
easy_idx = np.where((np.abs(pairs[:,4]) < sur_agl_limit) &
(np.abs(pairs[:,5]) < sur_agl_limit) &
(np.abs(pairs[:,8]) < dif_agl_limit_v) &
(np.abs(pairs[:,9]) < dif_agl_limit_h))[0]
hard_idx = np.setdiff1d(all_idx, easy_idx)
if (len(all_idx) != (len(hard_idx) + len(easy_idx))):
sys.exit("[T] Easy and Hard sets separation error")
print("E {}; H {}; ALL {}".format(len(easy_idx),len(hard_idx),len(all_idx)))
easy_iter_ = data_iterator(input_dicts, pairs[easy_idx,:], batch_size)
hard_iter_ = data_iterator(input_dicts, pairs[hard_idx,:], batch_size)
return easy_iter_, hard_iter_, len(easy_idx),len(hard_idx)
def merge_batches(cur_batch, tar_batch):
for i in cur_batch.keys():
cur_batch[i] = np.concatenate((cur_batch[i], tar_batch[i]), axis=0)
return cur_batch
|
sh4nth/maps
|
scratch/scripts.py
|
<reponame>sh4nth/maps
import pandas as pd
import urllib, json
from concurrent.futures import ThreadPoolExecutor
allDistricts = {}
with open('List_of_Districts.txt') as f:
for dist in f.read().splitlines():
allDistricts[dist.split(',')[0]] = dist.replace('_', ' ')
allDistricts['Andamans'] = None
allDistricts['Nicobars'] = None
allDistricts['Lakshadweep'] = None
allDistricts['Lahul_and_Spiti'] = 'Lahul, Himachal Pradesh, India'
allDistricts['Leh_Ladakh'] = 'Leh 194101'
allDistricts['East_Godavari'] = 'Kakinada, Andhra Pradesh, India'
allDistricts['South_Garo_Hills'] = 'Chokpot, Meghalaya, India'
allDistricts['Mayurbhanj'] = 'Baripada, Odisha, India'
allDistricts['Kurung_Kumey'] = 'Aalo 791001'
allDistricts['Upper_Dibang_Valley'] = 'Anini 792101'
allDistricts['Upper_Siang'] = 'Yingkiong 791002'
allDistricts['Upper_Subansiri'] = 'Gengi 791125'
allDistricts['Karbi_Anglong'] = 'Diphu, Assam, India'
def getDirections(address, mode='transit', origin='New Delhi', log=False):
apiKey = 'your apiKey from https://developers.google.com/maps/documentation/directions/ (Click get a Key)'
url = 'https://maps.googleapis.com/maps/api/directions/json?origin='+origin+'&key='+apiKey+'&destination=' + address + '&mode=' + mode
ntries = 5
response = None
for _ in range(ntries):
try:
response = urllib.urlopen(url)
break # success
except IOError as err:
pass
# noOp
else: # all ntries failed
raise err # re-raise the last timeout error
data = json.loads(response.read())
if log:
print data
time = data['routes']
if len(time) != 0:
time = time[0]['legs'][0]['duration']
return time
else:
return None
mode = 'driving' #,'walking','transit'
executor = ThreadPoolExecutor(max_workers=10)
origins_done = ['Kolkata', 'Delhi', 'Greater_Bombay', 'Bangalore', 'Chennai','Srinagar', 'Kohima', 'Nagpur', 'Gandhinagar']
origins = []
for origin_id in origins:
times[origin_id] = {}
origin = allDistricts[origin_id]
for dest_id in allDistricts:
dest = allDistricts[dest_id]
if dest is None or origin is None:
times[origin_id][dest_id] = None
else:
times[origin_id][dest_id] = executor.submit(getDirections, dest, mode, origin=origin)
for dest_id in allDistricts:
if times[origin_id][dest_id] is not None:
times[origin_id][dest_id] = times[origin_id][dest_id].result()
if times[origin_id][dest_id] is not None:
times[origin_id][dest_id] = times[origin_id][dest_id]['value']
print '.',
else:
print 'o',origin_id,'->',dest_id, '(',allDistricts[origin_id],allDistricts[dest_id],')'
with open("/tmp/data.csv","w") as f:
f.write('origin')
for dest_id in sorted(allDistricts):
f.write(',' + dest_id)
f.write('\n')
for origin_id in origins_done:
f.write(origin_id)
for dest_id in sorted(allDistricts):
f.write(','+str(times[origin_id][dest_id]))
f.write('\n')
|
Swipe650/Py-Timer
|
pytimer.py
|
<reponame>Swipe650/Py-Timer<filename>pytimer.py
#!/usr/bin/env python3
# encoding: utf-8
"""
Count down seconds from a given minute value
using the Tkinter GUI toolkit that comes with Python.
Basic Tk version by vegaseat (I extended this):
https://www.daniweb.com/programming/software-development/threads/464062/countdown-clock-with-python
<NAME>, alias <NAME> https://github.com/jabbalaci/Pomodoro-Timer
Modified by: Swipe650 https://github.com/Swipe650
"""
from tkinter import Tk, PhotoImage
import os
import re
import shlex
import socket
import sys
import time
import tkinter
from tkinter import *
from collections import OrderedDict
from subprocess import PIPE, STDOUT, Popen
from subprocess import call
import tkinter as tk
# Package dependencies:
"""
wmctrl, xdotool, sox and tkinter / tk packages:
Arh based distros: sudo pacman -S tk
Ubuntu: sudo apt-get install python3-tk
"""
go_on = None # will be set later
VERSION = '0.1'
MINUTES = 0 # 0 minutes is the default
WINDOW_TITLE = 'pytimer'
DEBUG = True
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
SOUND_FILE = '{d}/timer_done.wav'.format(d=PROJECT_DIR)
SOUND_VOLUME = '0.75'
hostname = socket.gethostname()
if hostname == 'laptop':
SOUND_VOLUME = 0.75
required_commands = [
'/usr/bin/wmctrl', # in package wmctrl
'/usr/bin/xdotool', # in package xdotool
'/usr/bin/play', # in package sox
]
def check_required_commands():
"""
Verify if the external binaries are available.
"""
for cmd in required_commands:
if not os.path.isfile(cmd):
print("Error: the command '{0}' is not available! Abort.".format(cmd))
sys.exit(1)
check_required_commands()
###################################
## window and process management ##
###################################
def get_simple_cmd_output(cmd, stderr=STDOUT):
"""
Execute a simple external command and get its output.
The command contains no pipes. Error messages are
redirected to the standard output by default.
"""
args = shlex.split(cmd)
return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0].decode("utf8")
def get_wmctrl_output():
"""
Parses the output of wmctrl and returns a list of ordered dicts.
"""
cmd = "wmctrl -lGpx"
lines = [line for line in get_simple_cmd_output(cmd)
.encode('ascii', 'ignore')
.decode('ascii').split("\n") if line]
res = []
for line in lines:
pieces = line.split()
d = OrderedDict()
d['wid'] = pieces[0]
d['desktop'] = int(pieces[1])
d['pid'] = int(pieces[2])
d['geometry'] = [int(x) for x in pieces[3:7]]
d['window_class'] = pieces[7]
d['client_machine_name'] = pieces[8]
d['window_title'] = ' '.join(pieces[9:])
res.append(d)
#
return res
def get_wid_by_title(title_regexp):
"""
Having the window title (as a regexp), return its wid.
If not found, return None.
"""
for d in get_wmctrl_output():
m = re.search(title_regexp, d['window_title'])
if m:
return d['wid']
#
return None
def activate_window_by_id(wid):
"""
Put the focus on and activate the the window with the given ID.
"""
os.system('xdotool windowactivate {wid}'.format(wid=wid))
def switch_to_window(title_regexp):
"""
Put the focus on the window with the specified title.
"""
wid = get_wid_by_title(title_regexp)
if wid:
if DEBUG:
print('# window id:', wid)
wid = int(wid, 16)
if DEBUG:
print('# switching to the other window')
activate_window_by_id(wid)
else:
if DEBUG:
print('# not found')
#########
## GUI ##
#########
def formatter(sec):
# format as 2 digit integers, fills with zero to the left
# divmod() gives minutes, seconds
#return "{:02d}:{:02d}".format(*divmod(sec, 60))
hours, remainder = divmod(sec, 3600)
mins, sec = divmod(remainder, 60)
#if MINUTES > 60:
return "{:1d}:{:02d}:{:02d}".format(hours, mins, sec)
#else:
#return "{:02d}:{:02d}".format(mins, sec)
#return "{:2d}:{:02d}:{:02d}".format(hours, mins, sec)
def play_sound():
os.system("play -q -v {vol} {fname} &".format(
vol=SOUND_VOLUME, fname=SOUND_FILE
))
def reset(event=None):
entry.delete(len(entry.get())-1)
entry.delete(len(entry.get())-1)
entry.delete(len(entry.get())-1)
global go_on
go_on = False
time_str.set(formatter(MINUTES * 60))
#clear_vars()
root.update()
def mute(event=None):
call(["kill", "-9", "play"])
def close(event=None):
call(["kill", "-9", "play"])
root.destroy()
# SET button code:
def onset(event=None):
global SET
SET = int(entry.get())
MINUTES = SET
global onset
onset = True
time_str.set(formatter(MINUTES * 60))
root.update()
#def count_down(event=None):
global go_on
go_on = True
if onset == True: MINUTES = SET
for t in range(MINUTES * 60 - 1, -1, -1):
if t == 0:
play_sound()
switch_to_window(WINDOW_TITLE)
time_str.set(formatter(t))
root.update()
# delay one second
for _ in range(2):
time.sleep(0.5) # if minimized then maximized,
root.update() # it's more responsive this way
if not go_on:
return
reset()
def center(win):
"""
centers a tkinter window
:param win: the root or Toplevel window to center
from http://stackoverflow.com/a/10018670/232485
"""
win.update_idletasks()
width = win.winfo_width()
frm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * frm_width
height = win.winfo_height()
titlebar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + titlebar_height + frm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
win.deiconify()
def print_usage():
print("""
Swipe650's Pytimer v{ver}
Usage: {fname} [parameter]
Parameters:
-h, --help this help
-play play the sound and quit (for testing the volume)
""".strip().format(ver=VERSION, fname=sys.argv[0]))
if len(sys.argv) > 1:
param = sys.argv[1]
if param in ['-h', '--help']:
print_usage()
sys.exit(0)
elif param == '-play':
# for testing the volume
play_sound()
sys.exit(0)
elif re.search(r'\d+', param):
try:
MINUTES = int(param)
except ValueError:
print("Error: unknown option.")
sys.exit(1)
else:
print("Error: unknown option.")
sys.exit(1)
root = tk.Tk()
root.wm_title(WINDOW_TITLE)
#root.wm_geometry ("-30-80")
root.wm_geometry ("-2030-0")
root.resizable(width=False, height=False)
#root.geometry('{}x{}'.format(195, 200))
img = PhotoImage(file='/home/swipe/bin/pytimer/pytimer_icon.png')
root.tk.call('wm', 'iconphoto', root._w, img)
mainframe = tk.Frame(root, padx="3",pady="3")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
time_str = tk.StringVar()
# create the time display label, give it a large font
# label auto-adjusts to the font
label_font = ('helvetica', 34)
tk.Label(root, textvariable=time_str, font=label_font, bg='white',
fg='red', relief='raised', bd=3).grid(padx=5, pady=5)
time_str.set(formatter(MINUTES * 60))
root.update()
# Input box
entry = Entry(root, width=3)
entry.grid(column=0, row=2, pady=3, sticky=(N))
entry.focus()
entry.bind('<Return>', onset)
# create buttons and activates them with enter key
#startbtn = tk.Button(root, text='Start', command=count_down)
#startbtn.grid(column=0, row=2, sticky=(E))
#startbtn.bind('<Return>', count_down)
#closebtn = tk.Button(root, text='Close', command=root.destroy)
closebtn = tk.Button(root, text='Close', command=close)
closebtn.grid(column=0, row=2, sticky=(W))
closebtn.bind('<Return>', close)
resetbtn = tk.Button(root, text='Reset', command=reset)
resetbtn.grid(column=0, row=2, sticky=(N))
resetbtn.bind('<Return>', reset)
mutebtn = tk.Button(root, text='Mute', command=mute)
mutebtn.grid(column=0, row=2, sticky=(E))
mutebtn.bind('<Return>', mute)
#setbtn = tk.Button(root, text=' Set ', command=onset)
#setbtn.grid(column=0, row=2, sticky=(W))
#setbtn.bind('<Return>', onset)
# start the GUI event loop
#root.wm_attributes("-topmost", 1) # always on top
root.wm_attributes("-topmost", 0) # never on top
#center(root)
root.mainloop()
|
all4dich/ldap_tools
|
src/main/test_python/call_hello.py
|
import re
import os
import logging
from ldap_tools.common import LDAPClient
logger = logging.getLogger("add-lge-users-to-nexus")
if __name__ == "__main__":
host = "192.168.3.11"
username = "lge\\allessunjoo.park"
password = input("Password: ")
a = LDAPClient(host, username, password)
a.search_root = 'ou=LGE Users,dc=lge,dc=net'
try:
lge_department_name = "TV DevOps개발"
except:
lge_department_name = input("LGE department name: ")
r = a.get_members(lge_department_name)
for member in r:
user_cn = str(member.cn)
user_mail = str(member.mail)
print(user_mail)
|
all4dich/ldap_tools
|
src/main/test_python/find-departments.py
|
<reponame>all4dich/ldap_tools
import re
import os
import logging
from ldap_tools.common import LDAPClient
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-u", dest="username", required=True)
arg_parser.add_argument("-p", dest="password", required=True)
args = arg_parser.parse_args()
if __name__ == "__main__":
host = "172.16.17.32"
username = f"lge\\{args.username}"
password = <PASSWORD>
a = LDAPClient(host, username, password)
a.search_root = 'ou=LGE Users,dc=lge,dc=net'
try:
lge_department_name = "TV DevOps개발"
except:
lge_department_name = input("LGE department name: ")
departments = a.find_department(lge_department_name)
for each_dept in departments:
print(each_dept.name.value)
|
all4dich/ldap_tools
|
src/main/test_python/test-call-ldap-server.py
|
<filename>src/main/test_python/test-call-ldap-server.py
from ldap_tools.common import LDAPClient
import subprocess
import re
if __name__ == "__main__":
host = "192.168.127.12"
username = "lge\\addhost"
password = input(f"Password for {username}")
a = LDAPClient(host,username,password)
a.search_root = 'ou=LGE Users,dc=lge,dc=net'
# b = a.get_child(oc="posixGroup")
# print(b)
# members = list(map(lambda x: x.entry_dn, b))
# print("\n".join(members))
# c = a.get_objects("ou=TV SW Engineering")
# print(c)
# user = "jaewooki.lee"
# d = a.get_objects(f"sAMAccountName=*{user}*")
# for e in d:
# print(e)
team = "TV SW CI팀"
r = a.get_members(team)
for member in r:
print(member.cn)
|
all4dich/ldap_tools
|
src/main/python/ldap_tools/common.py
|
from ldap3 import Server, Connection, ALL, NTLM
import logging
logger = logging.getLogger()
logger.setLevel(logging.WARN)
formatter = logging.Formatter('%(levelname)7s:%(filename)s:%(lineno)d:%(funcName)10s: %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
ch.setFormatter(formatter)
logger.addHandler(ch)
class LDAPClient:
def __init__(self, host, username, password, authentication=NTLM, search_root=None):
self.host = host
self._username = username
self._password = password
self.authentication = authentication
self._search_root = search_root
self._search_attributes = ['name', 'mail', 'mobile', 'cn', 'department', 'description', 'displayNamePrintable',
'displayName', 'objectClass']
@property
def username(self):
return self._username
@username.setter
def username(self, val):
self._username = val
@property
def password(self):
return self._password
@password.setter
def password(self, val):
self._password = val
@property
def search_root(self):
return self._search_root
@search_root.setter
def search_root(self, value):
self._search_root = value
@property
def search_attributes(self):
return self._search_attributes
@search_attributes.setter
def search_attributes(self, value):
self._search_attributes = value
def get_connection(self):
"""
Return a connection to LDAP server
:return: conn
"""
server = Server(self.host)
conn = Connection(server, user=self._username, password=self._password, authentication=self.authentication)
conn.bind()
return conn
def get_objects(self, obj_name):
"""
Return a list of objects with a query 'obj_name'
'obj_name' is a query statement written by a caller
Example. cn=<NAME>
:param obj_name:
:return:
"""
conn = self.get_connection()
if self._search_root is None:
logger.warning("Set 'search_root' and try again")
return None
conn.search(self._search_root, f"({obj_name})", attributes=self._search_attributes)
return conn.entries
def get_departments(self, dept_name):
return self.get_objects(f"cn={dept_name}")
def get_members(self, dept_name):
"""
Get a list of members who are under 'dept_name'
:param dept_name: Department name
:return:
"""
members = []
conn = self.get_connection()
if self._search_root is None:
logger.warning("Set 'search_root' and try again")
return None
# Get a dapartment object
depts = self.get_objects(f"&(objectClass=organizationalUnit)(ou=*{dept_name}*)")
for each_dept in depts:
member_search_base = each_dept.entry_dn
conn.search(member_search_base, '(objectClass=person)', attributes=self._search_attributes)
for each_entry in conn.entries:
members.append(each_entry)
return members
def get_child(self, oc="person"):
"""
Return all child elements under 'self._search_root'
:param oc:
:return:
"""
if self._search_root is None:
logger.warning("Set 'search_root' and try again")
return None
conn = self.get_connection()
conn.search(self._search_root, f"(objectClass={oc})", attributes=self._search_attributes)
return conn.entries
def find_department(self, dept_str):
"""
Find deparments that each has its deparment name with substring 'dept_str'
:param dept_str: Deparment name's subsring
:return: List of departments. Each department's objectClass is 'group'
"""
conn = self.get_connection()
if self._search_root is None:
logger.warning("Set 'search_root' and try again")
return None
# Get a dapartment object
dept_str_converted = dept_str.replace("(", "*").replace(")", "*")
depts = self.get_objects(f"&(objectClass=group)(cn=*{dept_str_converted}*)")
return depts
|
gauchm/mlstream
|
mlstream/utils.py
|
import sys
import pickle
import json
from pathlib import Path
from typing import Dict, List
from datetime import datetime
import h5py
import pandas as pd
import numpy as np
import scipy as sp
from tqdm import tqdm
from .datasets import LumpedBasin
from .datautils import store_static_attributes
def create_h5_files(data_root: Path,
out_file: Path,
basins: List,
dates: List,
forcing_vars: List,
seq_length: int,
allow_negative_target: bool):
"""Creates H5 training set.
Parameters
----------
data_root : Path
Path to the main directory of the data set
out_file : Path
Path of the location where the hdf5 file should be stored
basins : List
List containing the gauge ids
dates : List
List of start and end date of the discharge period to use, when combining the data.
forcing_vars : List
Names of forcing variables
seq_length : int
Length of the requested input sequences
allow_negative_target : bool, optional
If False, will remove samples with negative target value from the dataset.
Raises
------
FileExistsError
If file at this location already exists.
"""
if out_file.is_file():
raise FileExistsError(f"File already exists at {out_file}")
with h5py.File(out_file, 'w') as out_f:
input_data = out_f.create_dataset('input_data',
shape=(0, seq_length, len(forcing_vars)),
maxshape=(None, seq_length, len(forcing_vars)),
chunks=True,
dtype=np.float32,
compression='gzip')
target_data = out_f.create_dataset('target_data',
shape=(0, 1),
maxshape=(None, 1),
chunks=True,
dtype=np.float32,
compression='gzip')
q_stds = out_f.create_dataset('q_stds',
shape=(0, 1),
maxshape=(None, 1),
dtype=np.float32,
compression='gzip',
chunks=True)
sample_2_basin = out_f.create_dataset('sample_2_basin',
shape=(0, ),
maxshape=(None, ),
dtype="S10",
compression='gzip',
chunks=True)
scalers = None
for basin in tqdm(basins, file=sys.stdout):
dataset = LumpedBasin(data_root=data_root,
basin=basin,
forcing_vars=forcing_vars,
is_train=True,
train_basins=basins,
seq_length=seq_length,
dates=dates,
scalers=scalers,
allow_negative_target=allow_negative_target,
with_attributes=False)
if len(dataset) == 0:
print (f"No data for basin {basin}. Skipping it.")
continue
# Reuse scalers across datasets to save computation time
if scalers is None:
scalers = dataset.input_scalers, dataset.output_scalers, dataset.static_scalers
num_samples = len(dataset)
total_samples = input_data.shape[0] + num_samples
# store input and output samples
input_data.resize((total_samples, seq_length, len(forcing_vars)))
target_data.resize((total_samples, 1))
input_data[-num_samples:, :, :] = dataset.x
target_data[-num_samples:, :] = dataset.y
# additionally store std of discharge of this basin for each sample
q_stds.resize((total_samples, 1))
q_std_array = np.array([dataset.q_std] * num_samples, dtype=np.float32).reshape(-1, 1)
q_stds[-num_samples:, :] = q_std_array
sample_2_basin.resize((total_samples, ))
str_arr = np.array([basin.encode("ascii", "ignore")] * num_samples)
sample_2_basin[-num_samples:] = str_arr
out_f.flush()
def store_results(user_cfg: Dict, run_cfg: Dict, results: Dict):
"""Stores prediction results in a pickle file.
Parameters
----------
user_cfg : Dict
Dictionary containing the user entered evaluation config
run_cfg : Dict
Dictionary containing the run config loaded from the cfg.json file
results : Dict
DataFrame containing the observed and predicted discharge.
"""
if run_cfg["no_static"]:
file_name = user_cfg["run_dir"] / f"results_no_static_seed{run_cfg['seed']}.p"
else:
if run_cfg["concat_static"]:
file_name = user_cfg["run_dir"] / f"results_concat_static_seed{run_cfg['seed']}.p"
else:
file_name = user_cfg["run_dir"] / f"results_seed{run_cfg['seed']}.p"
with (file_name).open('wb') as fp:
pickle.dump(results, fp)
print(f"Successfully stored results at {file_name}")
def prepare_data(cfg: Dict, basins: List) -> Dict:
"""Pre-processes training data.
Parameters
----------
cfg : Dict
Dictionary containing the run config
basins : List
List containing the gauge ids
Returns
-------
Dict
Dictionary containing the updated run config.
"""
# create database file containing the static basin attributes
cfg["db_path"] = cfg["run_dir"] / "static_attributes.db"
store_static_attributes(cfg["data_root"], db_path=cfg["db_path"],
attribute_names=cfg["static_attributes"])
# create .h5 files for train and validation data
cfg["train_file"] = cfg["train_dir"] / 'train_data.h5'
create_h5_files(data_root=cfg["data_root"],
out_file=cfg["train_file"],
basins=basins,
dates=[cfg["start_date"], cfg["end_date"]],
forcing_vars=cfg["forcing_attributes"],
seq_length=cfg["seq_length"],
allow_negative_target=cfg["allow_negative_target"])
return cfg
def setup_run(cfg: Dict) -> Dict:
"""Creates the folder structure for the experiment.
Parameters
----------
cfg : Dict
Dictionary containing the run config
Returns
-------
Dict
Dictionary containing the updated run config
"""
cfg["start_time"] = str(datetime.now())
if not cfg["run_dir"].is_dir():
cfg["train_dir"] = cfg["run_dir"] / 'data' / 'train'
cfg["train_dir"].mkdir(parents=True)
cfg["val_dir"] = cfg["run_dir"] / 'data' / 'val'
cfg["val_dir"].mkdir(parents=True)
else:
raise RuntimeError('There is already a folder at {}'.format(cfg["run_dir"]))
# dump a copy of cfg to run directory
with (cfg["run_dir"] / 'cfg.json').open('w') as fp:
temp_cfg = {}
for key, val in cfg.items():
if isinstance(val, Path):
temp_cfg[key] = str(val)
elif isinstance(val, pd.Timestamp):
temp_cfg[key] = val.strftime(format="%d%m%Y")
elif isinstance(val, np.ndarray):
temp_cfg[key] = val.tolist() # np.ndarrays are not serializable
elif 'param_dist' in key:
temp_dict = {}
for k, v in val.items():
if isinstance(v, sp.stats._distn_infrastructure.rv_frozen):
temp_dict[k] = f"{v.dist.name}{v.args}, *kwds={v.kwds}"
else:
temp_dict[k] = str(v)
temp_cfg[key] = str(temp_dict)
else:
temp_cfg[key] = val
json.dump(temp_cfg, fp, sort_keys=True, indent=4)
return cfg
def nse(qsim: np.ndarray, qobs: np.ndarray) -> float:
"""Calculates NSE, ignoring NANs in ``qobs``.
.. math::
\\text{NSE} =
1 - \\frac{\\sum_{t=1}^T{(q_s^t - q_o^t)^2}}{\\sum_{t=1}^T{(q_o^t - \\bar{q}_o)^2}}
Parameters
----------
qsim : np.ndarray
Predicted streamflow
qobs : np.ndarray
Ground truth streamflow
Returns
-------
nse : float
The prediction's NSE
Raises
------
ValueError
If lenghts of qsim and qobs are not equal.
"""
if len(qsim) != len(qobs):
raise ValueError(f"Lenghts of qsim {len(qsim)} and qobs {len(qobs)} mismatch.")
qsim = qsim[~np.isnan(qobs)]
qobs = qobs[~np.isnan(qobs)]
return 1 - (np.sum(np.square(qsim - qobs)) / np.sum(np.square(qobs - np.mean(qobs))))
|
gauchm/mlstream
|
mlstream/__init__.py
|
#
# Machine Learning for Streamflow Prediction
#
|
gauchm/mlstream
|
mlstream/models/xgboost.py
|
<filename>mlstream/models/xgboost.py
from typing import Dict, List
from pathlib import Path
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import RandomizedSearchCV
from torch.utils.data import DataLoader
try:
import xgboost as xgb
except ImportError:
print("Importing the optional dependency xgboost failed, but required to train an XGBoost model.")
raise
from ..datasets import LumpedBasin, LumpedH5
from .base_models import LumpedModel
from .nseloss import XGBNSEObjective
class LumpedXGBoost(LumpedModel):
"""Wrapper for XGBoost model on lumped data. """
def __init__(self, no_static: bool = False, concat_static: bool = True,
use_mse: bool = False, run_dir: Path = None, n_jobs: int = 1,
seed: int = 0, n_estimators: int = 100,
learning_rate: float = 0.01,
early_stopping_rounds: int = None,
n_cv: int = 5,
param_dist: Dict = None,
param_search_n_estimators: int = None,
param_search_n_iter: int = None,
param_search_early_stopping_rounds: int = None,
reg_search_param_dist: Dict = None,
reg_search_n_iter: int = None,
model_path: Path = None):
if not no_static and not concat_static:
raise ValueError("XGBoost has to use concat_static.")
self.model = None
self.use_mse = use_mse
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.early_stopping_rounds = early_stopping_rounds
self.n_cv = n_cv
self.param_dist = param_dist
self.param_search_n_estimators = param_search_n_estimators
self.param_search_n_iter = param_search_n_iter
self.param_search_early_stopping_rounds = param_search_early_stopping_rounds
self.reg_search_param_dist = reg_search_param_dist
self.reg_search_n_iter = reg_search_n_iter
self.run_dir = run_dir
self.n_jobs = n_jobs
self.seed = seed
if model_path is not None:
self.load(model_path)
def load(self, model_path: Path):
self.model = pickle.load(open(model_path, 'rb'))
def train(self, ds: LumpedH5) -> None:
# Create train/val sets
loader = DataLoader(ds, batch_size=len(ds), num_workers=self.n_jobs)
data = next(iter(loader))
# don't use static variables
if len(data) == 3:
x, y, q_stds = data
# this shouldn't happen since we raise an exception if concat_static is False.
else:
raise ValueError("XGBoost has to use concat_static.")
x = x.reshape(len(x), -1).numpy()
y = y.reshape(-1).numpy()
q_stds = q_stds.reshape(-1).numpy()
# define loss function
if not self.use_mse:
# slight hack to enable NSE on XGBoost: replace the target with a unique id
# so we can figure out the corresponding q_std during the loss calculation.
y_actual = y.copy()
y = np.arange(len(y))
loss = XGBNSEObjective(y, y_actual, q_stds)
self.objective = loss.nse_objective_xgb_sklearn_api
self.eval_metric = loss.nse_metric_xgb
self.scoring = loss.neg_nse_metric_sklearn
else:
self.objective = 'reg:squarederror'
self.eval_metric = 'rmse'
self.scoring = 'neg_mean_squared_error'
num_val_samples = int(len(x) * 0.2)
val_indices = np.random.choice(range(len(x)), size=num_val_samples, replace=False)
train_indices = np.setdiff1d(range(len(x)), val_indices)
val = [(x[train_indices], y[train_indices]),
(x[val_indices], y[val_indices])]
if self.model is None:
print("Performing parameter searches.")
if self.param_dist is None or self.n_cv is None \
or self.param_search_n_iter is None \
or self.param_search_n_estimators is None \
or self.param_search_early_stopping_rounds is None \
or self.reg_search_param_dist is None \
or self.reg_search_n_iter is None:
raise ValueError("Need to pass parameter search configuration or load model.")
best_params = self._param_search(x[train_indices], y[train_indices], val,
self.param_search_n_estimators,
self.param_dist,
self.param_search_n_iter).best_params_
print(f"Best parameters: {best_params}.")
# Find regularization parameters in separate search
for k, v in best_params.items():
self.reg_search_param_dist[k] = [v]
model = self._param_search(x[train_indices], y[train_indices], val,
self.param_search_n_estimators,
self.reg_search_param_dist,
self.reg_search_n_iter)
print(f"Best regularization parameters: {model.best_params_}.")
cv_results = pd.DataFrame(model.cv_results_).sort_values(by='mean_test_score',
ascending=False)
print(cv_results.filter(regex='param_|mean_test_score|mean_train_score',
axis=1).head())
print(cv_results.loc[model.best_index_, ['mean_train_score', 'mean_test_score']])
xgb_params = model.best_params_
else:
print('Using model parameters from provided XGBoost model.')
xgb_params = self.model.get_xgb_params()
self.model = xgb.XGBRegressor()
self.model.set_params(**xgb_params)
self.model.n_estimators = self.n_estimators
self.model.learning_rate = self.learning_rate
self.model.objective = self.objective
self.model.random_state = self.seed
self.model.n_jobs = self.n_jobs
print(self.model.get_xgb_params())
print("Fitting model.")
self.model.fit(x[train_indices], y[train_indices],
eval_set=val, verbose=True,
eval_metric=self.eval_metric,
early_stopping_rounds=self.early_stopping_rounds)
model_path = self.run_dir / 'model.pkl'
with open(model_path, 'wb') as f:
pickle.dump(self.model, f)
print(f"Model saved as {model_path}.")
def predict(self, ds: LumpedBasin) -> np.ndarray:
loader = DataLoader(ds, batch_size=len(ds), shuffle=False, num_workers=4)
data = next(iter(loader))
if len(data) == 2:
x, y = data
# this shouldn't happen since we didn't allow concat_static = False in training.
else:
raise ValueError("XGBoost has to use concat_static or no_static.")
x = x.reshape(len(x), -1)
y = y.reshape(-1)
return self.model.predict(x), y
def _param_search(self, x_train: np.ndarray, y_train: np.ndarray, eval_set: List,
n_estimators: int, param_dist: Dict, n_iter: int) -> Dict:
"""Performs a cross-validated random parameter search.
Parameters
----------
x_train : np.ndarray
Training input
y_train : np.ndarray
Training ground truth
eval_set : List
List of evaluation sets to report metrics on
n_estimators : int
Number of trees to train
param_dist : Dict
Search space of parameter distributions
n_iter : int
Number of random parameter samples to test
Returns
-------
RandomizedSearchCV
Fitted random search instance (with ``refit=False``)
"""
model = xgb.XGBRegressor(n_estimators=n_estimators, objective=self.objective,
n_jobs=1, random_state=self.seed)
model = RandomizedSearchCV(model, param_dist, n_iter=n_iter,
cv=self.n_cv, return_train_score=True,
scoring=self.scoring, n_jobs=self.n_jobs,
random_state=self.seed, refit=False,
verbose=5, error_score='raise')
model.fit(x_train, y_train, eval_set=eval_set, eval_metric=self.eval_metric,
early_stopping_rounds=self.param_search_early_stopping_rounds, verbose=False)
return model
|
gauchm/mlstream
|
mlstream/datasets.py
|
from pathlib import Path
from typing import List, Tuple, Dict
import h5py
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset
from .datautils import (load_discharge,
load_forcings_lumped,
load_static_attributes,
reshape_data)
from .scaling import InputScaler, OutputScaler, StaticAttributeScaler
class LumpedBasin(Dataset):
"""PyTorch data set to work with the raw text files for lumped (daily basin-aggregated)
forcings and streamflow.
Parameters
----------
data_root : Path
Path to the main directory of the data set
basin : str
Gauge-id of the basin
forcing_vars : List
Names of forcing variables to use
dates : List
Start and end date of the period.
is_train : bool
If True, discharge observations are normalized and invalid discharge samples are removed
train_basins : List
List of basins used in the training of the experiment this Dataset is part of. Needed to
create the correct feature scalers (the ones that are calculated on these basins)
seq_length : int
Length of the input sequence
with_attributes : bool, optional
If True, loads and returns addtionaly attributes, by default False
concat_static : bool, optional
If true, adds catchment characteristics at each time step to the meteorological forcing
input data, by default True
db_path : str, optional
Path to sqlite3 database file containing the catchment characteristics, by default None
allow_negative_target : bool, optional
If False, will remove samples with negative target value from the dataset.
scalers : Tuple[InputScaler, OutputScaler, Dict[str, StaticAttributeScaler]], optional
Scalers to normalize and resale input, output, and static variables. If not provided,
the scalers will be initialized at runtime, which will result in poor performance if
many datasets are created. Instead, it makes sense to re-use the scalers across datasets.
"""
def __init__(self,
data_root: Path,
basin: str,
forcing_vars: List,
dates: List,
is_train: bool,
train_basins: List,
seq_length: int,
with_attributes: bool = False,
concat_static: bool = True,
db_path: str = None,
allow_negative_target: bool = False,
scalers: Tuple[InputScaler, OutputScaler,
Dict[str, StaticAttributeScaler]] = None):
self.data_root = data_root
self.basin = basin
self.forcing_vars = forcing_vars
self.seq_length = seq_length
self.is_train = is_train
self.train_basins = train_basins
self.dates = dates
self.with_attributes = with_attributes
self.concat_static = concat_static
self.db_path = db_path
self.allow_negative_target = allow_negative_target
if scalers is not None:
self.input_scalers, self.output_scalers, self.static_scalers = scalers
else:
self.input_scalers, self.output_scalers, self.static_scalers = None, None, {}
if self.input_scalers is None:
self.input_scalers = InputScaler(self.data_root, self.train_basins,
self.dates[0], self.dates[1],
self.forcing_vars)
if self.output_scalers is None:
self.output_scalers = OutputScaler(self.data_root, self.train_basins,
self.dates[0], self.dates[1])
# placeholder to store std of discharge, used for rescaling losses during training
self.q_std = None
# placeholder to store start and end date of entire period (incl warmup)
self.period_start = None
self.period_end = None
self.attribute_names = None
self.x, self.y = self._load_data()
if self.with_attributes:
self.attributes = self._load_attributes()
self.num_samples = self.x.shape[0]
def __len__(self):
return self.num_samples
def __getitem__(self, idx: int):
if self.with_attributes:
if self.concat_static:
x = torch.cat([self.x[idx], self.attributes.repeat((self.seq_length, 1))], dim=-1)
return x, self.y[idx]
else:
return self.x[idx], self.attributes, self.y[idx]
else:
return self.x[idx], self.y[idx]
def _load_data(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Loads input and output data from text files. """
# we use (seq_len) time steps before start for warmup
df = load_forcings_lumped(self.data_root, [self.basin])[self.basin]
qobs = load_discharge(self.data_root, basins=[self.basin]).set_index('date')['qobs']
if not self.is_train and len(qobs) == 0:
tqdm.write(f"Treating {self.basin} as validation basin (no streamflow data found).")
qobs = pd.Series(np.nan, index=df.index, name='qobs')
df = df.loc[self.dates[0]:self.dates[1]]
qobs = qobs.loc[self.dates[0]:self.dates[1]]
if len(qobs) != len(df):
print(f"Length of forcings {len(df)} and observations {len(qobs)} \
doesn't match for basin {self.basin}")
df['qobs'] = qobs
# store first and last date of the selected period
self.period_start = df.index[0]
self.period_end = df.index[-1]
# use all meteorological variables as inputs
x = np.array([df[var].values for var in self.forcing_vars]).T
y = np.array([df['qobs'].values]).T
# normalize data, reshape for LSTM training and remove invalid samples
x = self.input_scalers.normalize(x)
x, y = reshape_data(x, y, self.seq_length)
if self.is_train:
# Delete all samples where discharge is NaN
if np.sum(np.isnan(y)) > 0:
tqdm.write(f"Deleted {np.sum(np.isnan(y))} NaNs in basin {self.basin}.")
x = np.delete(x, np.argwhere(np.isnan(y)), axis=0)
y = np.delete(y, np.argwhere(np.isnan(y)), axis=0)
# Deletes all records with invalid discharge
if not self.allow_negative_target and np.any(y < 0):
tqdm.write(f"Deleted {np.sum(y < 0)} negative values in basin {self.basin}.")
x = np.delete(x, np.argwhere(y < 0)[:, 0], axis=0)
y = np.delete(y, np.argwhere(y < 0)[:, 0], axis=0)
# store std of discharge before normalization
self.q_std = np.std(y)
y = self.output_scalers.normalize(y)
# convert arrays to torch tensors
x = torch.from_numpy(x.astype(np.float32))
y = torch.from_numpy(y.astype(np.float32))
return x, y
def _load_attributes(self) -> torch.Tensor:
df = load_static_attributes(self.db_path, [self.basin], drop_lat_lon=True)
# normalize data
for feature in [f for f in df.columns if f[:7] != 'onehot_']:
if feature not in self.static_scalers or self.static_scalers[feature] is None:
self.static_scalers[feature] = \
StaticAttributeScaler(self.db_path, self.train_basins, feature)
df[feature] = self.static_scalers[feature].normalize(df[feature])
# store attribute names
self.attribute_names = df.columns
# store feature as PyTorch Tensor
attributes = df.loc[df.index == self.basin].values
return torch.from_numpy(attributes.astype(np.float32))
class LumpedH5(Dataset):
"""PyTorch data set to work with pre-packed hdf5 data base files.
Should be used only in combination with the files processed from `create_h5_files` in the
`utils` module.
Parameters
----------
h5_file : Path
Path to hdf5 file, containing the bundled data
basins : List
List containing the basin ids
db_path : str
Path to sqlite3 database file, containing the catchment characteristics
concat_static : bool
If true, adds catchment characteristics at each time step to the meteorological forcing
input data, by default True
cache : bool, optional
If True, loads the entire data into memory, by default False
no_static : bool, optional
If True, no catchment attributes are added to the inputs, by default False
"""
def __init__(self,
h5_file: Path,
basins: List,
db_path: str,
concat_static: bool = True,
cache: bool = False,
no_static: bool = False):
self.h5_file = h5_file
self.basins = basins
self.db_path = db_path
self.concat_static = concat_static
self.cache = cache
self.no_static = no_static
# Placeholder for catchment attributes stats
self.df = None
self.attribute_names = None
# preload data if cached is true
if self.cache:
(self.x, self.y, self.sample_2_basin, self.q_stds) = self._preload_data()
# load attributes into data frame
self._load_attributes()
# determine number of samples once
if self.cache:
self.num_samples = self.y.shape[0]
else:
with h5py.File(h5_file, 'r') as f:
self.num_samples = f["target_data"].shape[0]
def __len__(self):
return self.num_samples
def __getitem__(self, idx: int):
if self.cache:
x = self.x[idx]
y = self.y[idx]
basin = self.sample_2_basin[idx]
q_std = self.q_stds[idx]
else:
with h5py.File(self.h5_file, 'r') as f:
x = f["input_data"][idx]
y = f["target_data"][idx]
basin = f["sample_2_basin"][idx]
basin = basin.decode("ascii")
q_std = f["q_stds"][idx]
if not self.no_static:
# get attributes from data frame and create 2d array with copies
attributes = self.df.loc[self.df.index == basin].values
if self.concat_static:
attributes = np.repeat(attributes, repeats=x.shape[0], axis=0)
# combine meteorological obs with static attributes
x = np.concatenate([x, attributes], axis=1).astype(np.float32)
else:
attributes = torch.from_numpy(attributes.astype(np.float32))
# convert to torch tensors
x = torch.from_numpy(x.astype(np.float32))
y = torch.from_numpy(y.astype(np.float32))
q_std = torch.from_numpy(q_std)
if self.no_static:
return x, y, q_std
else:
if self.concat_static:
return x, y, q_std
else:
return x, attributes, y, q_std
def _preload_data(self):
print("Preloading training data.")
with h5py.File(self.h5_file, 'r') as f:
x = f["input_data"][:]
y = f["target_data"][:]
str_arr = f["sample_2_basin"][:]
str_arr = [x.decode("ascii") for x in str_arr]
q_stds = f["q_stds"][:]
return x, y, str_arr, q_stds
def _get_basins(self):
if self.cache:
basins = list(set(self.sample_2_basin))
else:
with h5py.File(self.h5_file, 'r') as f:
str_arr = f["sample_2_basin"][:]
str_arr = [x.decode("ascii") for x in str_arr]
basins = list(set(str_arr))
return basins
def _load_attributes(self):
df = load_static_attributes(self.db_path, self.basins, drop_lat_lon=True)
# normalize data
self.attribute_scalers = {}
for feature in [f for f in df.columns if f[:7] != 'onehot_']:
self.attribute_scalers[feature] = \
StaticAttributeScaler(self.db_path, self.basins, feature)
df[feature] = self.attribute_scalers[feature].normalize(df[feature])
# store attribute names
self.attribute_names = df.columns
self.df = df
|
gauchm/mlstream
|
mlstream/experiment.py
|
<filename>mlstream/experiment.py
import json
import random
from pathlib import Path
from typing import Dict, List, Tuple
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
from .datasets import LumpedBasin, LumpedH5
from .scaling import InputScaler, OutputScaler, StaticAttributeScaler
from .utils import setup_run, prepare_data, nse
from .datautils import load_static_attributes
class Experiment:
"""Main entrypoint for training and prediction. """
def __init__(self, data_root: Path, is_train: bool, run_dir: Path,
start_date: str = None, end_date: str = None,
basins: List = None, forcing_attributes: List = None,
static_attributes: List = None, seq_length: int = 10,
concat_static: bool = False, no_static: bool = False,
cache_data: bool = False, n_jobs: int = 1, seed: int = 0,
allow_negative_target: bool = False,
run_metadata: Dict = {}):
"""Initializes the experiment.
Parameters
----------
data_root : Path
Path to the base directory of the data set.
is_train : bool
If True, will setup folder structure for training.
run_dir: Path
Path to store experiment results in.
start_date : str, optional
Start date (training start date if ``is_train``, else
validation start date, ddmmyyyy)
end_date : str, optional
End date (training end date if ``is_train``, else
validation end date, ddmmyyyy)
basins : List, optional
List of basins to use during training,
or basins to predict during prediction.
forcing_attributes : List, optional
Names of forcing attributes to use.
static_attributes : List, optional
Names of static basin attributes to use.
seq_length : int, optional
Length of historical forcings to feed the model. Default 10
cache_data : bool, optional
If True, will preload all data in memory for training. Default False
concat_static : bool, optional
If True, will concatenate static basin attributes with forcings for model input.
no_static : bool, optional
If True, will not use static basin attributes as model input.
n_jobs : int, optional
Number of workers to use for training. Default 1
seed : int, optional
Seed to use for training. Default 0
allow_negative_target : bool, optional
If False, will ignore training samples with negative target value from the dataset, and will
clip predictions to values >= 0. This value should be False when predicting discharge, but True
when predicting values that can be negative.
run_metadata : dict, optional
Optional dictionary of values to store in cfg.json for documentation purpose.
"""
self.model = None
self.results = {}
self.cfg = {
"data_root": data_root,
"run_dir": run_dir,
"start_date": pd.to_datetime(start_date, format='%d%m%Y'),
"end_date": pd.to_datetime(end_date, format='%d%m%Y'),
"basins": basins,
"forcing_attributes": forcing_attributes,
"static_attributes": static_attributes,
"seq_length": seq_length,
"cache_data": cache_data,
"concat_static": concat_static,
"no_static": no_static,
"seed": seed,
"n_jobs": n_jobs,
"allow_negative_target": allow_negative_target
}
self.cfg.update(run_metadata)
if is_train:
# create folder structure for this run
self.cfg = setup_run(self.cfg)
# prepare data for training
self.cfg = prepare_data(cfg=self.cfg, basins=basins)
def set_model(self, model) -> None:
"""Set the model to use in the experiment. """
self.model = model
def train(self) -> None:
"""Train model. """
if self.model is None:
raise AttributeError("Model is not set.")
# fix random seeds
random.seed(self.cfg["seed"])
np.random.seed(self.cfg["seed"])
torch.cuda.manual_seed(self.cfg["seed"])
torch.manual_seed(self.cfg["seed"])
# prepare PyTorch DataLoader
ds = LumpedH5(h5_file=self.cfg["train_file"],
basins=self.cfg["basins"],
db_path=self.cfg["db_path"],
concat_static=self.cfg["concat_static"],
cache=self.cfg["cache_data"],
no_static=self.cfg["no_static"])
self.model.train(ds)
def predict(self) -> Dict:
"""Generates predictions with a trained model.
Returns
-------
results : Dict
Dictionary containing the DataFrame of predictions and observations for each basin.
"""
with open(self.cfg["run_dir"] / 'cfg.json', 'r') as fp:
run_cfg = json.load(fp)
if self.model is None:
raise AttributeError("Model is not set.")
# self.cfg["start_date"] contains validation start date,
# run_cfg["start_date"] the training start date
run_cfg["start_date"] = pd.to_datetime(run_cfg["start_date"], format='%d%m%Y')
run_cfg["end_date"] = pd.to_datetime(run_cfg["end_date"], format='%d%m%Y')
self.cfg["start_date"] = pd.to_datetime(self.cfg["start_date"], format='%d%m%Y')
self.cfg["end_date"] = pd.to_datetime(self.cfg["end_date"], format='%d%m%Y')
# create scalers
input_scalers = InputScaler(self.cfg["data_root"], run_cfg["basins"],
run_cfg["start_date"], run_cfg["end_date"],
run_cfg["forcing_attributes"])
output_scalers = OutputScaler(self.cfg["data_root"], run_cfg["basins"],
run_cfg["start_date"], run_cfg["end_date"])
static_scalers = {}
db_path = self.cfg["run_dir"] / "static_attributes.db"
df = load_static_attributes(db_path, run_cfg["basins"], drop_lat_lon=True)
for feature in [f for f in df.columns if 'onehot' not in f]:
static_scalers[feature] = StaticAttributeScaler(db_path, run_cfg["basins"],
feature)
# self.cfg["basins"] contains the test basins, run_cfg["basins"] the train basins.
for basin in tqdm(self.cfg["basins"]):
ds_test = LumpedBasin(data_root=Path(self.cfg["data_root"]),
basin=basin,
forcing_vars=run_cfg["forcing_attributes"],
dates=[self.cfg["start_date"], self.cfg["end_date"]],
is_train=False,
train_basins=run_cfg["basins"],
seq_length=run_cfg["seq_length"],
with_attributes=not run_cfg["no_static"],
concat_static=run_cfg["concat_static"],
db_path=db_path,
allow_negative_target=run_cfg["allow_negative_target"],
scalers=(input_scalers, output_scalers, static_scalers))
preds, obs = self.predict_basin(ds_test, allow_negative_target=run_cfg["allow_negative_target"])
date_range = pd.date_range(start=self.cfg["start_date"]
+ pd.DateOffset(days=run_cfg["seq_length"] - 1),
end=self.cfg["end_date"])
df = pd.DataFrame(data={'qobs': obs.flatten(), 'qsim': preds.flatten()},
index=date_range)
self.results[basin] = df
return self.results
def predict_basin(self, ds: LumpedBasin, allow_negative_target: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""Predicts a single basin.
Parameters
----------
ds : LumpedBasin
Dataset for the basin to predict
allow_negative_target : bool, default False
If False, will clip predictions to values >= 0
Returns
-------
preds : np.ndarray
Array containing the (rescaled) network prediction for the entire data period
obs : np.ndarray
Array containing the observed discharge for the entire data period
"""
preds, obs = self.model.predict(ds)
preds = ds.output_scalers.rescale(preds)
if not allow_negative_target:
preds[preds < 0] = 0
return preds, obs
def get_nses(self) -> Dict:
"""Calculates the experiment's NSE for each calibration basin.
Validation basins are ignored since they don't provide ground truth.
Returns
-------
nses : Dict
Dictionary mapping basin ids to their NSE
Raises
------
AttributeError
If called before predicting
"""
if len(self.results) == 0:
raise AttributeError("No results to evaluate.")
nses = {}
for basin, df in self.results.items():
# ignore validation basins that have no ground truth
if not all(pd.isna(df['qobs'])):
nses[basin] = nse(df['qsim'].values, df['qobs'].values)
return nses
|
gauchm/mlstream
|
mlstream/datautils.py
|
import re
from typing import Dict, List, Tuple
from pathlib import Path
import sqlite3
import pandas as pd
import numpy as np
import netCDF4 as nc
from numba import njit
def get_basin_list(data_root: Path, basin_type: str) -> List:
"""Returns the list of basin names.
If basin_type is 'C' or 'V', the gauge_info.csv needs to contain a column
'Cal_Val' that indicates the basin type.
Parameters
----------
data_root : Path
Path to base data directory, which contains a folder 'gauge_info'
with the ``gauge_info.csv`` file
basin_type : str
'C' to return calibration stations only,
'V' to return validation stations only,
'*' to return all stations
Returns
-------
list
List of basin name strings
"""
if basin_type not in ['*', 'C', 'V']:
raise ValueError('Illegal basin type')
gauge_info_file = data_root / 'gauge_info' / 'gauge_info.csv'
gauge_info = pd.read_csv(gauge_info_file)
if basin_type != '*':
if 'Cal_Val' not in gauge_info.columns:
raise RuntimeError('gauge_info.csv needs column "Cal_Val" to filter for basin types.')
gauge_info = gauge_info[gauge_info['Cal_Val'] == basin_type]
if 'Gauge_ID' not in gauge_info.columns:
raise RuntimeError('gauge_info.csv has no column "Gauge_ID".')
basins = gauge_info['Gauge_ID'].str.zfill(8).values
return np.unique(basins).tolist()
def load_discharge(data_root: Path, basins: List = None) -> pd.DataFrame:
"""Loads observed discharge for (calibration) gauging stations.
Parameters
----------
data_root : Path
Path to base data directory, which contains a directory 'discharge'
with one or more nc-files.
basins : List, optional
List of basins for which to return data. If None (default), all basins are returned
Returns
-------
pd.DataFrame
A DataFrame with columns [date, basin, qobs], where 'qobs' contains the streamflow.
"""
discharge_dir = data_root / 'discharge'
files = [f for f in discharge_dir.glob('*') if f.is_file()]
data_streamflow = pd.DataFrame(columns=['date', 'basin', 'qobs'])
found_basins = []
for f in files:
try:
file_format = f.name.split(".")[-1]
if file_format == "nc":
q_nc = nc.Dataset(f, 'r')
file_basins = np.array([f.zfill(8) for f in q_nc['station_id'][:]])
if basins is not None:
# some basins might be in multiple NC-files. We only load them once.
target_basins = [i for i, b in enumerate(file_basins)
if b in basins and b not in found_basins]
else:
target_basins = [i for i, b in enumerate(file_basins)
if b not in found_basins]
if len(target_basins) > 0:
time = nc.num2date(q_nc['time'][:], q_nc['time'].units, q_nc['time'].calendar)
data = pd.DataFrame(q_nc['Q'][target_basins, :].T, index=time,
columns=file_basins[target_basins])
data = data.unstack().reset_index().rename({'level_0': 'basin',
'level_1': 'date',
0: 'qobs'}, axis=1)
found_basins += data['basin'].unique().tolist()
data_streamflow = data_streamflow.append(data, ignore_index=True, sort=True)
q_nc.close()
else:
raise NotImplementedError(f"Discharge format {file_format} not supported.")
except Exception as e:
print (f"Couldn't load discharge from {f.name}. Skipping ...")
return data_streamflow
def load_forcings_lumped(data_root: Path, basins: List = None) -> Dict:
"""Loads basin-lumped forcings.
Parameters
----------
data_root : Path
Path to base data directory, which contains the directory 'forcings/lumped/',
which contains one .rvt/.csv/.txt -file per basin.
basins : List, optional
List of basins for which to return data. Default (None) returns data for all basins.
Returns
-------
dict
Dictionary of forcings (pd.DataFrame) per basin
"""
lumped_dir = data_root / 'forcings' / 'lumped'
basin_files = [f for f in lumped_dir.glob('*') if f.is_file()]
basin_forcings = {}
for f in basin_files:
try:
basin = f.name.split('_')[-1][:-4].zfill(8)
file_format = f.name.split(".")[-1]
if basins is not None and basin not in basins:
continue
if file_format == 'rvt':
with open(f) as fp:
next(fp)
start_date = next(fp)[:10]
columns = re.split(r',\s+', next(fp).replace('\n', ''))[1:]
data = pd.read_csv(f, sep=r',\s*', skiprows=4, skipfooter=1, names=columns, dtype=float,
header=None, usecols=range(len(columns)), engine='python')
elif file_format in ['txt', 'csv']:
with open(f) as fp:
columns = re.split(',', next(fp).replace('\n', ''))[1:]
start_date = next(fp)[:10]
data = pd.read_csv(f, dtype=float, usecols=range(1, len(columns)+1))
else:
raise NotImplementedError(f"Forcing format {file_format} not supported.")
data.index = pd.date_range(start_date, periods=len(data), freq='D')
basin_forcings[basin] = data
except Exception as e:
print (f"Couldn't load lumped forcings from {f.name}. Skipping ...")
return basin_forcings
def store_static_attributes(data_root: Path, db_path: Path = None, attribute_names: List = None):
"""Loads catchment characteristics from text file and stores them in a sqlite3 table
Parameters
----------
data_root : Path
Path to the main directory of the data set
db_path : Path, optional
Path to where the database file should be saved. If None, stores the database in
data_root/static_attributes.db. Default: None
attribute_names : List, optional
List of attribute names to use. Default: use all attributes.
Raises
------
RuntimeError
If attributes folder can not be found.
"""
f = data_root / 'gauge_info' / 'gauge_info.csv'
gauge_info = pd.read_csv(f).rename({'Gauge_ID': 'basin'}, axis=1)
gauge_info["basin"] = gauge_info["basin"].str.zfill(8)
gauge_info.set_index('basin', inplace=True)
if attribute_names is not None:
static_attributes = gauge_info.loc[:, attribute_names]
else:
static_attributes = gauge_info
if db_path is None:
db_path = data_root / 'static_attributes.db'
with sqlite3.connect(str(db_path)) as conn:
# insert into databse
static_attributes.to_sql('basin_attributes', conn)
print(f"Successfully stored basin attributes in {db_path}.")
def load_static_attributes(db_path: Path,
basins: List,
drop_lat_lon: bool = True,
keep_features: List = None) -> pd.DataFrame:
"""Loads attributes from database file into DataFrame and one-hot-encodes non-numerical features.
Parameters
----------
db_path : Path
Path to sqlite3 database file
basins : List
List containing the basin id
drop_lat_lon : bool
If True, drops latitude and longitude column from final data frame, by default True
keep_features : List
If a list is passed, a pd.DataFrame containing these features will be returned. By default,
returns a pd.DataFrame containing the features used for training.
Returns
-------
pd.DataFrame
Attributes in a pandas DataFrame. Index is basin id.
"""
with sqlite3.connect(str(db_path)) as conn:
df = pd.read_sql("SELECT * FROM 'basin_attributes'", conn, index_col='basin')
# drop lat/lon col
if drop_lat_lon:
df = df.drop(['Lat_outlet', 'Lon_outlet'], axis=1, errors='ignore')
# drop invalid attributes
if keep_features is not None:
drop_names = [c for c in df.columns if c not in keep_features]
df = df.drop(drop_names, axis=1)
# one-hot-encoding
non_numeric_features = [f for f in df.columns if df[f].dtype == object]
df = pd.get_dummies(df, columns=non_numeric_features,
prefix=[f"onehot_{f}" for f in non_numeric_features])
# drop rows of basins not contained in data set
df = df.loc[basins]
return df
@njit
def reshape_data(x: np.ndarray, y: np.ndarray, seq_length: int) -> Tuple[np.ndarray, np.ndarray]:
"""Reshape data into LSTM many-to-one input samples
Parameters
----------
x : np.ndarray
Input features of shape [num_samples, num_features]
y : np.ndarray
Output feature of shape [num_samples, 1]
seq_length : int
Length of the requested input sequences.
Returns
-------
x_new : np.ndarray
Reshaped input features of shape [num_samples*, seq_length, num_features], where
num_samples* is equal to num_samples - seq_length + 1, due to the need of a warm start at
the beginning
y_new : np.ndarray
The target value for each sample in x_new
"""
num_samples, num_features = x.shape
x_new = np.zeros((num_samples - seq_length + 1, seq_length, num_features))
y_new = np.zeros((num_samples - seq_length + 1, 1))
for i in range(0, x_new.shape[0]):
x_new[i, :, :num_features] = x[i:i + seq_length, :]
y_new[i, :] = y[i + seq_length - 1, 0]
return x_new, y_new
|
gauchm/mlstream
|
mlstream/models/sklearn_models.py
|
<filename>mlstream/models/sklearn_models.py<gh_stars>1-10
from pathlib import Path
import pickle
import numpy as np
from sklearn.base import BaseEstimator
from torch.utils.data import DataLoader
from ..datasets import LumpedBasin, LumpedH5
from .base_models import LumpedModel
class LumpedSklearnRegression(LumpedModel):
"""Wrapper for scikit-learn regression models on lumped data. """
def __init__(self, model: BaseEstimator,
no_static: bool = False, concat_static: bool = True,
run_dir: Path = None, n_jobs: int = 1):
if not no_static and not concat_static:
raise ValueError("Sklearn regression has to use concat_static.")
self.model = model
self.run_dir = run_dir
self.n_jobs = n_jobs
def load(self, model_file: Path) -> None:
self.model = pickle.load(open(model_file, 'rb'))
def train(self, ds: LumpedH5) -> None:
# Create train/val sets
loader = DataLoader(ds, batch_size=len(ds), num_workers=self.n_jobs)
data = next(iter(loader))
# don't use static variables
if len(data) == 3:
x, y, _ = data # ignore q_stds
# this shouldn't happen since we raise an exception if concat_static is False.
else:
raise ValueError("Sklearn regression has to use concat_static.")
x = x.reshape(len(x), -1)
y = y.reshape(len(y))
print("Fitting model.")
self.model.fit(x, y)
model_path = self.run_dir / 'model.pkl'
with open(model_path, 'wb') as f:
pickle.dump(self.model, f)
print(f"Model saved as {model_path}.")
def predict(self, ds: LumpedBasin) -> np.ndarray:
loader = DataLoader(ds, batch_size=len(ds), shuffle=False, num_workers=4)
data = next(iter(loader))
if len(data) == 2:
x, y = data
# this shouldn't happen since we didn't allow concat_static = False in training.
else:
raise ValueError("sklearn regression has to use concat_static or no_static.")
x = x.reshape(len(x), -1)
y = y.reshape(len(y))
return self.model.predict(x), y
|
gauchm/mlstream
|
setup.py
|
from setuptools import setup
import os
def readme():
with open(os.path.dirname(os.path.realpath(__file__)) + '/README.md') as f:
return f.read()
requires = [
'pandas',
'numpy',
'h5py',
'tqdm',
'netCDF4',
'numba',
'scipy',
]
# to save build resources, we mock torch and xgboost while building the docs
if not os.getenv('READTHEDOCS'):
requires.append('torch')
setup(name='mlstream',
version='0.1.2',
description='Machine learning for streamflow prediction',
long_description=readme(),
long_description_content_type='text/markdown',
keywords='ml hydrology streamflow machine learning',
url='http://github.com/gauchm/mlstream',
author='<NAME>',
author_email='<EMAIL>',
license='Apache-2.0',
packages=['mlstream', 'mlstream.models'],
install_requires=requires,
include_package_data=True,
zip_safe=False)
|
gauchm/mlstream
|
mlstream/models/base_models.py
|
<gh_stars>1-10
from pathlib import Path
import numpy as np
from ..datasets import (LumpedBasin, LumpedH5)
class LumpedModel:
"""Model that operates on lumped (daily, basin-averaged) inputs. """
def load(self, model_file: Path) -> None:
"""Loads a trained and pickled model.
Parameters
----------
model_file : Path
Path to the stored model.
"""
pass
def train(self, ds: LumpedH5) -> None:
"""Trains the model.
Parameters
----------
ds : LumpedH5
Training dataset
"""
pass
def predict(self, ds: LumpedBasin) -> np.ndarray:
"""Generates predictions for a basin.
Parameters
----------
ds : LumpedBasin
Dataset of the basin to predict.
Returns
-------
np.ndarray
Array of predictions.
"""
pass
|
gauchm/mlstream
|
mlstream/models/nseloss.py
|
<gh_stars>1-10
import torch
import numpy as np
class NSELoss(torch.nn.Module):
"""Calculates (batch-wise) NSE Loss.
Each sample i is weighted by 1 / (std_i + eps)^2, where std_i is the standard deviation of the
discharge of the basin to which the sample belongs.
Parameters
----------
eps : float
Constant, added to the weight for numerical stability and smoothing, default to 0.1
"""
def __init__(self, eps: float = 0.1):
super().__init__()
self.eps = eps
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor, q_stds: torch.Tensor):
"""Calculates the batch-wise NSE loss function.
Parameters
----------
y_pred : torch.Tensor
Tensor containing the network prediction.
y_true : torch.Tensor
Tensor containing the true discharge values
q_stds : torch.Tensor
Tensor containing the discharge std (calculated over training period) of each sample
Returns
-------
torch.Tenor
The batch-wise NSE-Loss
"""
squared_error = (y_pred - y_true)**2
weights = 1 / (q_stds + self.eps)**2
scaled_loss = weights * squared_error
return torch.mean(scaled_loss)
class XGBNSEObjective:
"""Custom NSE XGBoost objective.
This is a bit of a hack: We use a unique dummy target value for each sample,
allowing us to look up the q_std that corresponds to the sample's station.
When calculating the loss, we replace the dummy with the actual target so
the model learns the right thing.
"""
def __init__(self, dummy_target, actual_target, q_stds, eps: float = 0.1):
self.dummy_target = dummy_target.reshape(-1)
self.actual_target = actual_target.reshape(-1)
self.q_stds = q_stds.reshape(-1)
self.eps = eps
def nse_objective_xgb_sklearn_api(self, y_true, y_pred):
"""NSE objective for XGBoost (sklearn API). """
indices = np.searchsorted(self.dummy_target, y_true)
normalization = ((self.q_stds[indices] + self.eps)**2)
grad = 2 * (y_pred - self.actual_target[indices]) / normalization
hess = 2.0 / normalization
return grad, hess
def nse_objective_xgb(self, y_pred, dtrain):
"""NSE objective for XGBoost (non-sklearn API). """
y_true = dtrain.get_label()
indices = np.searchsorted(self.dummy_target, y_true)
normalization = ((self.q_stds[indices] + self.eps)**2)
grad = 2 * (y_pred - self.actual_target[indices]) / normalization
hess = 2.0 / normalization
return grad, hess
def nse(self, y_pred, y_true, q_stds):
squared_error = (y_pred - y_true)**2
weights = 1 / (q_stds + self.eps)**2
return np.mean(weights * squared_error)
def nse_metric_xgb(self, y_pred, y_true):
"""NSE metric for XGBoost. """
indices = np.searchsorted(self.dummy_target, y_true.get_label())
nse = self.nse(y_pred, self.actual_target[indices], self.q_stds[indices])
return 'nse', nse
def neg_nse_metric_sklearn(self, estimator, X, y_true):
"""Negative NSE metric for sklearn. """
y_pred = estimator.predict(X)
indices = np.searchsorted(self.dummy_target, y_true)
return -self.nse(y_pred, self.actual_target[indices], self.q_stds[indices])
|
gauchm/mlstream
|
mlstream/models/lstm.py
|
"""
Large parts of this implementation are taken over from
https://github.com/kratzert/ealstm_regional_modeling.
"""
import sys
from pathlib import Path
from typing import Tuple, Dict
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, SubsetRandomSampler
from ..datasets import LumpedBasin, LumpedH5
from .base_models import LumpedModel
from .nseloss import NSELoss
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class LumpedLSTM(LumpedModel):
"""(EA-)LSTM model for lumped data. """
def __init__(self, num_dynamic_vars: int, num_static_vars: int, use_mse: bool = True,
no_static: bool = False, concat_static: bool = False,
run_dir: Path = None, n_jobs: int = 1, hidden_size: int = 256,
learning_rate: float = 1e-3, learning_rates: Dict = {}, epochs: int = 30,
initial_forget_bias: int = 5, dropout: float = 0.0, batch_size: int = 256,
clip_norm: bool = True, clip_value: float = 1.0):
input_size_stat = 0 if no_static else num_static_vars
input_size_dyn = num_dynamic_vars if (no_static or not concat_static) \
else num_static_vars + num_dynamic_vars
self.no_static = no_static
self.run_dir = run_dir
self.epochs = epochs
self.batch_size = batch_size
self.learning_rates = learning_rates
self.learning_rates[0] = learning_rate
self.clip_norm = clip_norm
self.clip_value = clip_value
self.n_jobs = n_jobs
self.use_mse = use_mse
self.model = Model(input_size_dyn=input_size_dyn,
input_size_stat=input_size_stat,
concat_static=concat_static,
no_static=no_static,
hidden_size=hidden_size,
initial_forget_bias=initial_forget_bias,
dropout=dropout).to(DEVICE)
self.loss_func = nn.MSELoss() if use_mse else NSELoss()
def load(self, model_file: Path) -> None:
self.model.load_state_dict(torch.load(model_file, map_location=DEVICE))
def train(self, ds: LumpedH5) -> None:
val_indices = np.random.choice(len(ds), size=int(0.1 * len(ds)), replace=False)
train_indices = [i for i in range(len(ds)) if i not in val_indices]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
self.train_loader = DataLoader(ds, self.batch_size,
sampler=train_sampler,
drop_last=False,
num_workers=self.n_jobs)
self.val_loader = DataLoader(ds, self.batch_size,
sampler=val_sampler,
drop_last=False,
num_workers=self.n_jobs)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rates[0])
for epoch in range(1, self.epochs + 1):
# set new learning rate
if epoch in self.learning_rates.keys():
for param_group in self.optimizer.param_groups:
param_group["lr"] = self.learning_rates[epoch]
self._train_epoch(epoch)
val_loss = self._val_epoch()
print(f"# Epoch {epoch}: validation loss: {val_loss:.7f}.")
model_path = self.run_dir / f"model_epoch{epoch}.pt"
torch.save(self.model.state_dict(), str(model_path))
print(f"Model saved as {model_path}.")
def predict(self, ds: LumpedBasin) -> np.ndarray:
self.model.eval()
loader = DataLoader(ds, batch_size=1024, shuffle=False, num_workers=4)
preds, obs = None, None
with torch.no_grad():
for data in loader:
if len(data) == 2:
x, y = data
x = x.to(DEVICE)
p = self.model(x)[0]
elif len(data) == 3:
x_d, x_s, y = data
x_d, x_s = x_d.to(DEVICE), x_s.to(DEVICE)
p = self.model(x_d, x_s[:, 0, :])[0]
if preds is None:
preds = p.detach().cpu()
obs = y
else:
preds = torch.cat((preds, p.detach().cpu()), 0)
obs = torch.cat((obs, y), 0)
return preds.numpy(), obs.numpy()
def _train_epoch(self, epoch: int):
"""Trains model for a single epoch.
Parameters
----------
epoch : int
Current Number of epoch
"""
self.model.train()
# process bar handle
pbar = tqdm(self.train_loader, file=sys.stdout)
pbar.set_description(f'# Epoch {epoch}')
# Iterate in batches over training set
running_loss = 0
for i, data in enumerate(pbar):
# delete old gradients
self.optimizer.zero_grad()
# forward pass through LSTM
if len(data) == 3:
x, y, q_stds = data
x, y, q_stds = x.to(DEVICE), y.to(DEVICE), q_stds.to(DEVICE)
predictions = self.model(x)[0]
# forward pass through EALSTM
elif len(data) == 4:
x_d, x_s, y, q_stds = data
x_d, x_s, y = x_d.to(DEVICE), x_s.to(DEVICE), y.to(DEVICE)
predictions = self.model(x_d, x_s[:, 0, :])[0]
# MSELoss
if self.use_mse:
loss = self.loss_func(predictions, y)
# NSELoss needs std of each basin for each sample
else:
q_stds = q_stds.to(DEVICE)
loss = self.loss_func(predictions, y, q_stds)
# calculate gradients
loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_value)
# perform parameter update
self.optimizer.step()
running_loss += loss.item()
pbar.set_postfix_str(f"Loss: {loss.item():.6f} / Mean: {running_loss / (i+1):.6f}")
def _val_epoch(self) -> float:
"""Calculates loss on validation set during training.
Returns
-------
loss : float
Mean validation loss
"""
self.model.eval()
loss = 0.0
with torch.no_grad():
for data in self.val_loader:
# forward pass through LSTM
if len(data) == 3:
x, y, q_stds = data
x, y, q_stds = x.to(DEVICE), y.to(DEVICE), q_stds.to(DEVICE)
predictions = self.model(x)[0]
# forward pass through EALSTM
elif len(data) == 4:
x_d, x_s, y, q_stds = data
x_d, x_s, y = x_d.to(DEVICE), x_s.to(DEVICE), y.to(DEVICE)
predictions = self.model(x_d, x_s[:, 0, :])[0]
# MSELoss
if self.use_mse:
loss += self.loss_func(predictions, y).item()
# NSELoss needs std of each basin for each sample
else:
q_stds = q_stds.to(DEVICE)
loss += self.loss_func(predictions, y, q_stds).item()
return loss / len(self.val_loader)
class Model(nn.Module):
"""Wrapper class that connects LSTM/EA-LSTM with fully connceted layer"""
def __init__(self,
input_size_dyn: int,
input_size_stat: int,
hidden_size: int,
initial_forget_bias: int = 5,
dropout: float = 0.0,
concat_static: bool = False,
no_static: bool = False):
"""Initializes the model.
Parameters
----------
input_size_dyn : int
Number of dynamic input features.
input_size_stat : int
Number of static input features (used in the EA-LSTM input gate).
hidden_size : int
Number of LSTM cells/hidden units.
initial_forget_bias : int
Value of the initial forget gate bias. (default: 5)
dropout : float
Dropout probability in range(0,1). (default: 0.0)
concat_static : bool
If True, uses standard LSTM otherwise uses EA-LSTM
no_static : bool
If True, runs standard LSTM
"""
super(Model, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.initial_forget_bias = initial_forget_bias
self.dropout_rate = dropout
self.concat_static = concat_static
self.no_static = no_static
if self.concat_static or self.no_static:
self.lstm = LSTM(input_size=input_size_dyn,
hidden_size=hidden_size,
initial_forget_bias=initial_forget_bias)
else:
self.lstm = EALSTM(input_size_dyn=input_size_dyn,
input_size_stat=input_size_stat,
hidden_size=hidden_size,
initial_forget_bias=initial_forget_bias)
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(hidden_size, 1)
def forward(self, x_d: torch.Tensor, x_s: torch.Tensor = None) \
-> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Run forward pass through the model.
Parameters
----------
x_d : torch.Tensor
Tensor containing the dynamic input features of shape [batch, seq_length, n_features]
x_s : torch.Tensor, optional
Tensor containing the static catchment characteristics, by default None
Returns
-------
out : torch.Tensor
Tensor containing the network predictions
h_n : torch.Tensor
Tensor containing the hidden states of each time step
c_n : torch,Tensor
Tensor containing the cell states of each time step
"""
if self.concat_static or self.no_static:
h_n, c_n = self.lstm(x_d)
else:
h_n, c_n = self.lstm(x_d, x_s)
last_h = self.dropout(h_n[:, -1, :])
out = self.fc(last_h)
return out, h_n, c_n
class EALSTM(nn.Module):
"""Implementation of the Entity-Aware-LSTM (EA-LSTM)
Model details: https://arxiv.org/abs/1907.08456
Parameters
----------
input_size_dyn : int
Number of dynamic features, which are those, passed to the LSTM at each time step.
input_size_stat : int
Number of static features, which are those that are used to modulate the input gate.
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self,
input_size_dyn: int,
input_size_stat: int,
hidden_size: int,
batch_first: bool = True,
initial_forget_bias: int = 0):
super(EALSTM, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
# create tensors of learnable parameters
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 * hidden_size))
self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat, hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size))
# initialize parameters
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
nn.init.orthogonal_(self.weight_sh)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
nn.init.constant_(self.bias_s.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, x_d: torch.Tensor, x_s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Performs a forward pass on the model.
Parameters
----------
x_d : torch.Tensor
Tensor, containing a batch of sequences of the dynamic features. Shape has to match
the format specified with batch_first.
x_s : torch.Tensor
Tensor, containing a batch of static features.
Returns
-------
h_n : torch.Tensor
The hidden states of each time step of each sample in the batch.
c_n : torch.Tensor
The cell states of each time step of each sample in the batch.
"""
if self.batch_first:
x_d = x_d.transpose(0, 1)
seq_len, batch_size, _ = x_d.size()
h_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
c_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
h_x = (h_0, c_0)
# empty lists to temporally store all intermediate hidden/cell states
h_n, c_n = [], []
# expand bias vectors to batch size
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# calculate input gate only once because inputs are static
bias_s_batch = (self.bias_s.unsqueeze(0).expand(batch_size, *self.bias_s.size()))
i = torch.sigmoid(torch.addmm(bias_s_batch, x_s, self.weight_sh))
# perform forward steps over input sequence
for t in range(seq_len):
h_0, c_0 = h_x
# calculate gates
gates = (torch.addmm(bias_batch, h_0, self.weight_hh)
+ torch.mm(x_d[t], self.weight_ih))
f, o, g = gates.chunk(3, 1)
c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
# store intermediate hidden/cell state in list
h_n.append(h_1)
c_n.append(c_1)
h_x = (h_1, c_1)
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
h_n = h_n.transpose(0, 1)
c_n = c_n.transpose(0, 1)
return h_n, c_n
class LSTM(nn.Module):
"""Implementation of the standard LSTM.
Parameters
----------
input_size : int
Number of input features
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self,
input_size: int,
hidden_size: int,
batch_first: bool = True,
initial_forget_bias: int = 0):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
# create tensors of learnable parameters
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 4 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 * hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size))
# initialize parameters
self.reset_parameters()
def reset_parameters(self):
"""Initializes all learnable parameters of the LSTM. """
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 4)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Performs a forward pass on the model.
Parameters
----------
x : torch.Tensor
Tensor, containing a batch of input sequences. Format must match the specified format,
defined by the batch_first agrument.
Returns
-------
h_n : torch.Tensor
The hidden states of each time step of each sample in the batch.
c_n : torch.Tensor
The cell states of each time step of each sample in the batch.
"""
if self.batch_first:
x = x.transpose(0, 1)
seq_len, batch_size, _ = x.size()
h_0 = x.data.new(batch_size, self.hidden_size).zero_()
c_0 = x.data.new(batch_size, self.hidden_size).zero_()
h_x = (h_0, c_0)
# empty lists to temporally store all intermediate hidden/cell states
h_n, c_n = [], []
# expand bias vectors to batch size
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# perform forward steps over input sequence
for t in range(seq_len):
h_0, c_0 = h_x
# calculate gates
gates = (torch.addmm(bias_batch, h_0, self.weight_hh) + torch.mm(x[t], self.weight_ih))
f, i, o, g = gates.chunk(4, 1)
c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
# store intermediate hidden/cell state in list
h_n.append(h_1)
c_n.append(c_1)
h_x = (h_1, c_1)
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
h_n = h_n.transpose(0, 1)
c_n = c_n.transpose(0, 1)
return h_n, c_n
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.