repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
lijun99/altar | models/cudalinear/cudalinear/cudaLinear.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
from altar.cuda import cublas
from altar.cuda import libcuda
from altar.cuda.models.cudaBayesian import cudaBayesian
# declaration
class cudaLinear(cudaBayesian, family="altar.models.cudalinear"):
"""
Cuda implementation of a linear model
A linear model is defined as data = G theta
"""
# data observations
dataobs = altar.cuda.data.data()
dataobs.default = altar.cuda.data.datal2()
dataobs.doc = "the observed data"
# the file based inputs
green = altar.properties.path(default="green.txt")
green.doc = "the name of the file with the Green functions"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
super().initialize(application=application)
# chain up
self.cublas_handle = self.device.get_cublas_handle()
# convert the input filenames into data
self.GF = self.loadGF()
self.prepareGF()
# prepare the residuals matrix
self.gDataPred = altar.cuda.matrix(shape=(self.samples, self.observations), dtype=self.precision)
# all done
return self
def _forwardModel(self, theta, prediction, batch, observation=None):
"""
Linear Forward Model prediction= G theta
"""
# whether data observation is provided
# gemm C = alpha A B + beta C
if observation is None:
beta = 0.0
else:
# make a copy
prediction.copy(observation)
# to be subtracted in gemm
beta = -1.0
# forward model
# prediction = Green * theta
# in cublas col-major
# prediction (obs x samples) = green^T(obs x param) x theta(param, samples)
green = self.gGF
libcuda.cublas_gemm(self.cublas_handle,
1, 0, # transa, transb
prediction.shape[1], batch, theta.shape[1], # m, n, k
1.0, # alpha
green.data, green.shape[1], # A, lda
theta.data, theta.shape[1], # B, ldb
beta,
prediction.data, prediction.shape[1])
# all done
return self
def cuEvalLikelihood(self, theta, likelihood, batch):
"""
to be loaded by super class cuEvalLikelihood which already decides where the local likelihood is added to
"""
residuals = self.gDataPred
# call forward to caculate the data prediction or its difference between dataobs
self._forwardModel(theta=theta, prediction=residuals, batch=batch,
observation= self.dataobs.gdataObsBatch)
# call data to calculate the l2 norm
self.dataobs.cuEvalLikelihood(prediction=residuals, likelihood=likelihood,
residual=True, batch=batch)
# return the likelihood
return likelihood
def loadGF(self):
"""
Load the data in the input files into memory
"""
# grab the input dataspace
ifs = self.ifs
# first the green functions
try:
# get the path to the file
gf = ifs[self.green]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing Green functions: no '{self.green}' in '{self.case}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the matrix
green = altar.matrix(shape=(self.observations, self.parameters))
# and load the file contents into memory
green.load(gf.uri)
# all done
return green
def prepareGF(self):
"""
copy green function to gpu and merge cd with green function
"""
# make a gpu copy
self.gGF = altar.cuda.matrix(source=self.GF, dtype=self.precision)
# merge cd with Green's function
cd_inv = self.dataobs.gcd_inv
green = self.gGF
# (obsxobs) x (obsxparameters) = (obsxparameters)
cublas.trmm(cd_inv, green, out=green, side=cublas.SideLeft, uplo=cublas.FillModeUpper,
transa = cublas.OpNoTrans, diag=cublas.DiagNonUnit, alpha=1.0,
handle = self.cublas_handle)
#cublas.gemm(cd_inv, green, out=green)
return
# private data
GF = None # the Green functions
gGF = None
gDataPred = None
cublas_handle=None
# end of file
|
lijun99/altar | models/cdm/cdm/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# access to the CDM source
from .Source import Source as source
# and the layout of the input file
from .Data import Data as data
# implementations
@altar.foundry(implements=altar.models.model, tip="a multi-parameter CDM model")
def cdm():
# grab the factory
from .CDM import CDM as cdm
# attach its docstring
__doc__ = cdm.__doc__
# and return it
return cdm
# end of file
|
lijun99/altar | cuda/cuda/bayesian/cudaMetropolisVaryingSteps.py | <filename>cuda/cuda/bayesian/cudaMetropolisVaryingSteps.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# externals
import math
# the package
import altar
import altar.cuda
from altar.cuda import curand
from altar.cuda import cublas
from altar.cuda import libcudaaltar
# my protocol
from altar.bayesian.Sampler import Sampler
# declaration
class cudaMetropolisVaryingSteps(altar.component, family="altar.samplers.metropolis", implements=Sampler):
"""
The Metropolis algorithm as a sampler of the posterior distribution
"""
# types
from .cudaCoolingStep import cudaCoolingStep
# user configurable state
scaling = altar.properties.float(default=.1)
scaling.doc = 'the parameter covariance Σ is scaled by the square of this'
acceptanceWeight = altar.properties.float(default=8)
acceptanceWeight.doc = 'the weight of accepted samples during covariance rescaling'
rejectionWeight = altar.properties.float(default=1)
rejectionWeight.doc = 'the weight of rejected samples during covariance rescaling'
max_mc_steps = altar.properties.int(default=100000)
max_mc_steps.doc = 'the maximum Monte-Carlo steps for one beta step'
corr_check_steps = altar.properties.int(default=1000)
corr_check_steps.doc = 'the Monte-Carlo steps to compute the de'
target_correlation = altar.properties.float(default=0.6)
target_correlation.doc = 'the threshold of correlation to stop the chain'
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize me and my parts given an {application} context
"""
# pull the chain length from the job specification
self.mcsteps = application.job.steps
# get the capsule of the random number generator
self.device = application.controller.worker.device
self.curng = self.device.curand_generator
self.precision = application.job.gpuprecision
# all done
return self
def cuInitialize(self, application):
self.initialize(application=application)
return self
@altar.export
def samplePosterior(self, annealer, step):
"""
Sample the posterior distribution
Arguments:
annealer - the controller
step - cpu CoolingStep
Return:
statistics (accepted/rejected/invalid) or (accepted/unlikely/rejected)
"""
# grab the dispatcher
dispatcher = annealer.dispatcher
# notify we have started sampling the posterior
dispatcher.notify(event=dispatcher.samplePosteriorStart, controller=annealer)
# prepare the sampling pdf, copy step to gpu step
self.prepareSamplingPDF(annealer=annealer, step=step)
# check whether model parameters needed to be updated, e.g., Cp
model = annealer.model
if model.updateModel(annealer=annealer):
# if updated, recompute datalikelihood and posterior
gstep = self.gstep
batch = gstep.samples
gstep.prior.zero(), gstep.data.zero(), gstep.posterior.zero()
model.likelihoods(annealer=annealer, step=gstep, batch=batch)
# walk the chains
statistics = self.walkChains(annealer=annealer, step=self.gstep)
# finish the sampling pdf, copy gpu step back
self.finishSamplingPDF(step=step)
# notify we are done sampling the posterior
dispatcher.notify(event=dispatcher.samplePosteriorFinish, controller=annealer)
# all done
return statistics
@altar.provides
def resample(self, annealer, statistics):
"""
Update my statistics based on the results of walking my Markov chains
"""
# update the scaling of the parameter covariance matrix
self.adjustCovarianceScaling(*statistics)
# all done
return
# implementation details
def prepareSamplingPDF(self, annealer, step):
"""
Re-scale and decompose the parameter covariance matrix, in preparation for the
Metropolis update
"""
# get the dispatcher
dispatcher = annealer.dispatcher
# notify we have started preparing the sampling PDF
dispatcher.notify(event=dispatcher.prepareSamplingPDFStart, controller=annealer)
# allocate local gpu data if not allocated
self.gstep = annealer.worker.gstep
if self.ginit is not True:
self.allocateGPUData(step.samples, step.parameters)
# copy cpu step state
self.gstep.copyFromCPU(step=step)
# unpack what i need
self.gsigma_chol.copy_from_host(source=step.sigma)
# compute its Cholesky decomposition
self.gsigma_chol.Cholesky(uplo=cublas.FillModeUpper)
# scale it
self.gsigma_chol *= self.scaling
# notify we are done preparing the sampling PDF
dispatcher.notify(event=dispatcher.prepareSamplingPDFFinish, controller=annealer)
# all done
return
def finishSamplingPDF(self, step):
"""
procedures after sampling, e.g, copy data back to cpu
"""
# copy gpu step back to cpu
self.gstep.copyToCPU(step=step)
return
def walkChains(self, annealer, step):
"""
Run the Metropolis algorithm on the Markov chains
Arguments:
annealer: cudaAnnealer
step: cudaCoolingStep
Return:
statistics = (accepted, rejected, unlikely)
"""
# get the model
model = annealer.model
# and the event dispatcher
dispatcher = annealer.dispatcher
# unpack what i need from the cooling step
β = step.beta
θ = step.theta
prior = step.prior
data = step.data
posterior = step.posterior
# get the parameter covariance
Σ_chol = self.gsigma_chol
# the sample geometry
samples = step.samples
parameters = step.parameters
# a couple of functions from the math module
# reset the accept/reject counters
# note the difference from CPU Metropolis
# invalid is for proposed samples out of range
# accepted is for samples being updated
# rejected is for samples rejected by M-H proposals
accepted = rejected = invalid = 0
# allocate some vectors that we use throughout the following
# candidate likelihoods
candidate = self.gcandidate
θproposal = self.gproposal
cprior = candidate.prior
cdata = candidate.data
cpost = candidate.posterior
cθ = candidate.theta
# the mask of samples rejected due to model constraint violations
invalid_flags = self.ginvalid_flags
valid_indices = self.gvalid_indices
acceptance_flags = self.gacceptance_flags
valid_samples = self.gvalid_samples
# and a vector with random numbers for the Metropolis acceptance
dice = self.gdice
# copy the beta over
candidate.beta = step.beta
# make a copy of the starting samples
θstart = θ.clone()
correlation = 1.0
mcsteps = 0
while correlation > self.target_correlation and mcsteps < self.max_mc_steps:
for ihop in range(self.corr_check_steps):
# notify we are advancing the chains
dispatcher.notify(event=dispatcher.chainAdvanceStart, controller=annealer)
# notify we are starting the verification process
dispatcher.notify(event=dispatcher.verifyStart, controller=annealer)
# the random displacement may have generated candidates that are outside the
# support of the model, so we must give it an opportunity to reject them;
# initialize the candidate sample by randomly displacing the current one
self.displace(displacement=θproposal)
θproposal += θ
# reset the mask and ask the model to verify the sample validity
# note that I have redefined model.verify to use theta as input
model.cuVerify(theta=θproposal, mask=invalid_flags.zero())
invalid_step = int(invalid_flags.sum())
valid = samples - invalid_step
# if valid = 0, continue to next MC step
if valid == 0:
invalid += invalid_step
continue
# if valid > 0, proceed to Metropolis accept-reject
# set indices for valid samples, return valid samples count
libcudaaltar.cudaMetropolis_setValidSampleIndices(valid_indices.data, invalid_flags.data,
valid_samples.data)
# get the invalid samples count
invalid += invalid_step
# queue valid samples to first rows of cθ
libcudaaltar.cudaMetropolis_queueValidSamples(cθ.data, θproposal.data, valid_indices.data, valid)
# notify that the verification process is finished
dispatcher.notify(event=dispatcher.verifyFinish, controller=annealer)
# initialize the likelihoods
likelihoods = cprior.zero(), cdata.zero(), cpost.zero()
# compute the probabilities/likelihoods
model.likelihoods(annealer=annealer, step=candidate, batch=valid)
# randomize the Metropolis acceptance vector
dice = curand.uniform(self.curng, out=dice)
# notify we are starting accepting samples
dispatcher.notify(event=dispatcher.acceptStart, controller=annealer)
# accept/reject: go through all the samples
libcudaaltar.cudaMetropolis_metropolisUpdate(
θ.data, prior.data, data.data, posterior.data, # original
cθ.data, cprior.data, cdata.data, cpost.data, # candidate
dice.data, acceptance_flags.zero().data, valid_indices.data, valid)
# counting the acceptance/rejection
accepted_step = int(acceptance_flags.sum())
accepted += accepted_step
rejected += valid - accepted_step
# notify we are done accepting samples
dispatcher.notify(event=dispatcher.acceptFinish, controller=annealer)
# notify we are done advancing the chains
dispatcher.notify(event=dispatcher.chainAdvanceFinish, controller=annealer)
correlation = altar.cuda.stats.correlation(θstart, θ, axis=0).amax()
mcsteps += self.corr_check_steps
print(f"correlation {correlation} at {mcsteps}")
# all done
# print(f'stats: accepted {accepted}, invalid {invalid}, rejected {rejected}')
return accepted, invalid, rejected
def displace(self, displacement):
"""
Construct a set of displacement vectors for the random walk from a distribution with zero
mean and my covariance
"""
# get my decomposed covariance
Σ_chol = self.gsigma_chol
# generate gaussian random numbers (samples x parameters)
altar.cuda.curand.gaussian(out=displacement)
# multiply by the sigma_cholesky
cublas.trmm(Σ_chol, displacement, out=displacement,
alpha=1.0, uplo=cublas.FillModeUpper, side=cublas.SideRight,
transa = cublas.OpNoTrans, diag=cublas.DiagNonUnit)
# and return
return displacement
def adjustCovarianceScaling(self, accepted, invalid, rejected):
"""
Compute a new value for the covariance sacling factor based on the acceptance/rejection
ratio
"""
# unpack my weights
aw = self.acceptanceWeight
rw = self.rejectionWeight
# compute the acceptance ratio
acceptance = accepted / (accepted + rejected + invalid)
# the fudge factor
kc = (aw*acceptance + rw)/(aw+rw)
# don't let it get too small
if kc < .1: kc = .1
# or too big
if kc > 1.: kc = 1.
# store it
self.scaling = kc
# and return
return self
def allocateGPUData(self, samples, parameters):
"""
initialize gpu work data
"""
precision = self.precision
# allocate of cudaCoolingStep
self.gcandidate = self.cudaCoolingStep.alloc(samples, parameters, dtype=precision)
self.gproposal = altar.cuda.matrix(shape=(samples, parameters), dtype=precision)
# allocate sigma_chol
self.gsigma_chol = altar.cuda.matrix(shape=(parameters, parameters), dtype=precision)
# allocate local
self.ginvalid_flags = altar.cuda.vector(shape=samples, dtype='int32')
self.gacceptance_flags = altar.cuda.vector(shape=samples, dtype='int32')
self.gvalid_indices = altar.cuda.vector(shape=samples, dtype='int32')
self.gvalid_samples = altar.cuda.vector(shape=1, dtype='int32')
self.gdice = altar.cuda.vector(shape=samples, dtype=precision)
# set initialized flag = 1
self.ginit = True
return
# private data
mcsteps = 1 # the length of each Markov chain
dispatcher = None # a reference to the event dispatcher
ginit = False # whether gpu data are allocated
gstep = None # cuda/gpu step for keeping sampling states
gcandidate = None # cuda/gpu candidate state
gproposal = None # save theta jumps
gsigma_chol = None # placeholder for the scaled and decomposed parameter covariance matrix
gvalid_indices = None
gvalid_samples = None
ginvalid_flags = None
gacceptance_flags = None
precision = None
gdice = None
curng = None
# end of file
|
lijun99/altar | altar/altar/data/DataL2.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# my protocol
from .DataObs import DataObs as data
# declaration
class DataL2(altar.component, family="altar.data.datal2", implements=data):
"""
The observed data with L2 norm
"""
data_file = altar.properties.path(default="data.txt")
data_file.doc = "the name of the file with the observations"
observations = altar.properties.int(default=1)
observations.doc = "the number of observed data"
cd_file = altar.properties.path(default=None)
cd_file.doc = "the name of the file with the data covariance matrix"
cd_std = altar.properties.float(default=1.0)
cd_std.doc = "the constant covariance for data"
merge_cd_with_data = altar.properties.bool(default=False)
merge_cd_with_data.doc = "whether to merge cd with data"
norm = altar.norms.norm()
norm.default = altar.norms.l2()
norm.doc = "the norm to use when computing the data log likelihood"
@altar.export
def initialize(self, application):
"""
Initialize data obs from model
"""
# get the input path from model
self.error = application.error
# get the number of samples
self.samples = application.job.chains
# load the data and covariance
self.ifs = application.pfs["inputs"]
self.loadData()
# compute inverse of covariance, normalization
self.initializeCovariance(cd=self.cd)
# all done
return self
def evalLikelihood(self, prediction, likelihood, residual=True, batch=None):
"""
compute the datalikelihood for prediction (samples x observations)
"""
#depending on convenience, users can
# copy dataobs to their model and use the residual as input of prediction
# or compute prediction from forward model and subtract the dataobs here
batch = batch if batch is not None else likelihood.shape
# go through the residual of each sample
for idx in range(batch):
# extract it
dp = prediction.getRow(idx)
# subtract the dataobs if residual is not pre-calculated
if not residual:
dp -= self.dataobs
if self.merge_cd_with_data:
# cd already merged, no need to multiply it by cd
likelihood[idx] = self.normalization - 0.5 * self.norm.eval(v=dp)
else:
likelihood[idx] = self.normalization - 0.5 * self.norm.eval(v=dp, sigma_inv=self.cd_inv)
# all done
return self
def dataobsBatch(self):
"""
Get a batch of duplicated dataobs
"""
if self.dataobs_batch is None:
self.dataobs_batch = altar.matrix(shape=(self.samples, self.observations))
# for each sample
for sample in range(self.samples):
# make the corresponding column a copy of the data vector
self.dataobs_batch.setColumn(sample, self.dataobs)
return self.dataobs_batch
def loadData(self):
"""
load data and covariance
"""
# grab the input dataspace
ifs = self.ifs
# next, the observations
try:
# get the path to the file
df = ifs[self.data_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing observations: no '{self.data_file}' {ifs.path()}")
# and raise the exception again
raise
# if all goes well
else:
# allocate the vector
self.dataobs= altar.vector(shape=self.observations)
# and load the file contents into memory
self.dataobs.load(df.uri)
if self.cd_file is not None:
# finally, the data covariance
try:
# get the path to the file
cf = ifs[self.cd_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing data covariance matrix: no '{self.cd_file}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the matrix
self.cd = altar.matrix(shape=(self.observations, self.observations))
# and load the file contents into memory
self.cd.load(cf.uri)
else:
# use a constant covariance
self.cd = self.cd_std
return
def initializeCovariance(self, cd):
"""
For a given data covariance cd, compute L2 likelihood normalization, inverse of cd in Cholesky decomposed form,
and merge cd with data observation, d-> L*d with cd^{-1} = L L*
:param cd:
:return:
"""
# grab the number of observations
observations = self.observations
if isinstance(cd, altar.matrix):
# normalization
self.normalization = self.computeNormalization(observations=observations, cd=cd)
# inverse matrix
self.cd_inv = self.computeCovarianceInverse(cd=cd)
# merge cd to data
if self.merge_cd_with_data:
Cd_inv = self.cd_inv
self.dataobs = altar.blas.dtrmv( Cd_inv.upperTriangular, Cd_inv.opNoTrans, Cd_inv.nonUnitDiagonal,
Cd_inv, self.dataobs)
elif isinstance(cd, float):
# cd is standard deviation
from math import log, pi as π
self.normalization = -0.5*log(2*π*cd)*observations;
self.cd_inv = 1.0/self.cd
if self.merge_cd_with_data:
self.dataobs *= self.cd_inv
# all done
return self
def updateCovariance(self, cp):
"""
Update data covariance with cp, cd -> cd + cp
:param cp: a matrix with shape (obs, obs)
:return:
"""
# make a copy of cp
cchi = cp.clone()
# add cd (scalar or matrix)
cchi += self.cd
self.initializeCovariance(cd=cchi)
return self
def computeNormalization(self, observations, cd):
"""
Compute the normalization of the L2 norm
"""
# support
from math import log, pi as π
# make a copy of cd
cd = cd.clone()
# compute its LU decomposition
decomposition = altar.lapack.LU_decomposition(cd)
# use it to compute the log of its determinant
logdet = altar.lapack.LU_lndet(*decomposition)
# all done
return - (log(2*π)*observations + logdet) / 2;
def computeCovarianceInverse(self, cd):
"""
Compute the inverse of the data covariance matrix
"""
# make a copy so we don't destroy the original
cd = cd.clone()
# perform the LU decomposition
lu = altar.lapack.LU_decomposition(cd)
# invert; this creates a new matrix
inv = altar.lapack.LU_invert(*lu)
# compute the Cholesky decomposition
inv = altar.lapack.cholesky_decomposition(inv)
# and return it
return inv
# local variables
normalization = 0
ifs = None
samples = None
dataobs = None
dataobs_batch = None
cd = None
cd_inv = None
error = None
# end of file
|
lijun99/altar | models/cdm/tests/sanity.py | <filename>models/cdm/tests/sanity.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
def test():
# get the model
from altar.models import cdm
# and publish it
return cdm
# bootstrap
if __name__ == "__main__":
# run the driver
test()
# report success
raise SystemExit(0)
# end of file
|
lijun99/altar | cuda/cuda/models/__init__.py | <filename>cuda/cuda/models/__init__.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# the base
from altar.models.Model import Model as model
from altar.models.ParameterSet import ParameterSet as parameters
# implementations
@altar.foundry(implements=model, tip="a cuda AlTar model")
def bayesian():
# grab the factory
from .cudaBayesian import cudaBayesian as bayesian
# attach its docstring
__doc__ = bayesian.__doc__
# and publish it
return bayesian
@altar.foundry(implements=model, tip="a collection of cuda AlTar model")
def bayesianensemble():
# grab the factory
from .cudaBayesianEnsemble import cudaBayesianEnsemble as bayesianensemble
# attach its docstring
__doc__ = bayesianensemble.__doc__
# and publish it
return bayesianensemble
@altar.foundry(implements=parameters, tip="a cuda parameter set")
def parameterset():
# grab the factory
from .cudaParameterSet import cudaParameterSet as parameterset
# attach its docstring
__doc__ = parameterset.__doc__
# and publish it
return parameterset
# end of file
|
lijun99/altar | models/cdm/cdm/libcdm.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2018-2021 jet propulsion laboratory
# (c) 2018-2021 california institute of technology
# all rights reserved
#
# United States Government Sponsorship acknowledged. Any commercial use must be negotiated with
# the Office of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting this software, the user
# agrees to comply with all applicable U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority as may be required before
# exporting such information to foreign countries or providing access to foreign persons.
#
# externals
import numpy
# utility functions
def cosd(angd):
# return cosine of angle expressed in degrees
return numpy.cos(numpy.radians(angd))
def sind(angd):
# return sine of angle expressed in degrees
return numpy.sin(numpy.radians(angd))
def norm(v):
# return 2-norm of a numpy.array
return numpy.sqrt(v.dot(v))
def CDM(X, Y, X0, Y0, depth, ax, ay, az, omegaX, omegaY, omegaZ, opening, nu):
"""
CDM
calculates the surface displacements and potency associated with a CDM
that is composed of three mutually orthogonal rectangular dislocations in
a half-space.
CDM: Compound Dislocation Model
RD: Rectangular Dislocation
EFCS: Earth-Fixed Coordinate System
RDCS: Rectangular Dislocation Coordinate System
ADCS: Angular Dislocation Coordinate System
(The origin of the RDCS is the RD centroid. The axes of the RDCS are
aligned with the strike, dip and normal vectors of the RD, respectively.)
INPUTS
X and Y:
Horizontal coordinates of calculation points in EFCS (East, North, Up).
X and Y must have the same size.
X0, Y0 and depth:
Horizontal coordinates (in EFCS) and depth of the CDM centroid. The depth
must be a positive value. X0, Y0 and depth have the same unit as X and Y.
omegaX, omegaY and omegaZ:
Clockwise rotation angles about X, Y and Z axes, respectively, that
specify the orientation of the CDM in space. The input values must be in
degrees.
ax, ay and az:
Semi-axes of the CDM along the X, Y and Z axes, respectively, before
applying the rotations. ax, ay and az have the same unit as X and Y.
opening:
The opening (tensile component of the Burgers vector) of the RDs that
form the CDM. The unit of opening must be the same as the unit of ax, ay
and az.
nu:
Poisson's ratio.
OUTPUTS
ue, un and uv:
Calculated displacement vector components in EFCS. ue, un and uv have the
same unit as opening and the CDM semi-axes in inputs.
DV:
Potency of the CDM. DV has the unit of volume (the unit of displacements,
opening and CDM semi-axes to the power of 3).
Example: Calculate and plot the vertical displacements on a regular grid.
[X,Y] = numpy.meshgrid(-7:.02:7,-5:.02:5);
X0 = 0.5; Y0 = -0.25; depth = 2.75; omegaX = 5; omegaY = -8; omegaZ = 30;
ax = 0.4; ay = 0.45; az = 0.8; opening = 1e-3; nu = 0.25;
import te[ [ue,un,uv,DV] = CDM(X,Y,X0,Y0,depth,omegaX,omegaY,omegaZ,ax,ay,az,...
opening,nu);
figure
surf(X,Y,reshape(uv,size(X)),'edgecolor','none')
view(2)
axis equal
axis tight
set(gcf,'renderer','painters')
Reference journal article:
<NAME>., <NAME>., <NAME>., <NAME>. (2016):
Compound dislocation models (CDMs) for volcano deformation analyses.
Submitted to Geophysical Journal International
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
I appreciate any comments or bug reports.
<NAME>
Created: 2015.5.22
Last modified: 2016.10.18
Section 2.1, Physics of Earthquakes and Volcanoes
Department 2, Geophysics
Helmholtz Centre Potsdam
German Research Centre for Geosciences (GFZ)
email:
<EMAIL>
<EMAIL>
website:
http://www.volcanodeformation.com
Converted from Matlab to Python
April 2018 by <NAME>
Jet Propulsion Lab/Caltech
"""
ue=0
un=0
uv=0
DV=0
# [X, Y] is a meshgrid with X = repeated rows and Y = repeated columns
# rows = linspace(xmin, xmax, (xmax-xmin)/dx +1)
# columns = linspace(ymin, ymax, (ymax-ymin)/dy +1)
# We will use numpy.ndarray for the array objects rather than matrices
# (see, https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html)
#
# this matlab code flattens a matrix in column major order and returns a column vector
# X = X(:);
# Y = Y(:);
# in Python use method 'flatten' to flatten the 2-D array as an ndarray. We will not
# reshape ndarrays into column or row vectors until they are used in a matrix operation
# where it would matter, which is maybe never since the common sense thing is done correctly
# using the ndarray (shape of ndarray is coerced to make sense in the matrix operation).
# In the following use of flatten, argument order='F' would create a column vector
# (see docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html)
# X = X.flatten()
# Y = Y.flatten()
# convert the semi-axes (lengths) to axes
ax = 2*ax
ay = 2*ay
az = 2*az
# the axes coordinate rotation matrices
Rx = numpy.array([
[1., 0., 0. ],
[0., cosd(omegaX), sind(omegaX)],
[0., -sind(omegaX), cosd(omegaX)]
])
Ry = numpy.array([
[cosd(omegaY), 0., -sind(omegaY)],
[0., 1., 0. ],
[sind(omegaY), 0., cosd(omegaY)]
])
Rz = numpy.array([
[ cosd(omegaZ), sind(omegaZ), 0.],
[-sind(omegaZ), cosd(omegaZ), 0.],
[ 0., 0., 1.]
])
# the coordinate rotation matrix
R = Rz.dot(Ry.dot(Rx))
# The centroid
P0 = numpy.array([X0, Y0, -depth])
P1 = (P0+ay*R[:,1]/2. + az*R[:,2]/2.)
P2 = (P1-ay*R[:,1])
P3 = P2-az*R[:,2]
P4 = P1-az*R[:,2]
Q1 = P0-ax*R[:,0]/2. + az*R[:,2]/2.
Q2 = Q1+ax*R[:,0]
Q3 = Q2-az*R[:,2]
Q4 = Q1-az*R[:,2]
R1 = P0+ax*R[:,0]/2. + ay*R[:,1]/2.
R2 = R1-ax*R[:,0]
R3 = R2-ay*R[:,1]
R4 = R1-ay*R[:,1]
VertVec = numpy.array([P1[2], P2[2], P3[2], P4[2], Q1[2], Q2[2], Q3[2], Q4[2], R1[2], R2[2],
R3[2], R4[2]])
if numpy.any(VertVec>0):
raise ValueError('Half-space solution: The CDM must be under the free surface!' +
' VertVec={}'.format(VertVec))
if ax == 0 and ay == 0 and az == 0:
ue = numpy.zeros(numpy.shape(X))
un = numpy.zeros(numpy.shape(X))
uv = numpy.zeros(numpy.shape(X))
elif ax == 0 and ay !=0 and az !=0:
[ue, un, uv] = RDdispSurf(X, Y, P1, P2, P3, P4, opening, nu)
elif ax != 0 and ay == 0 and az != 0:
[ue, un, uv] = RDdispSurf(X, Y, Q1, Q2, Q3, Q4, opening, nu)
elif ax != 0 and ay != 0 and az == 0:
[ue, un, uv] = RDdispSurf(X, Y, R1, R2, R3, R4, opening, nu)
else:
[ue1, un1, uv1] = RDdispSurf(X, Y, P1, P2, P3, P4, opening, nu)
[ue2, un2, uv2] = RDdispSurf(X, Y, Q1, Q2, Q3, Q4, opening, nu)
[ue3, un3, uv3] = RDdispSurf(X, Y, R1, R2, R3, R4, opening, nu)
ue = ue1+ue2+ue3
un = un1+un2+un3
uv = uv1+uv2+uv3
# Calculate the CDM potency (aX, aY and aZ were converted to full axes)
DV = (ax*ay+ax*az+ay*az)*opening
return ue, un, uv
def RDdispSurf(X, Y, P1, P2, P3, P4, opening, nu):
"""
RDdispSurf calculates surface displacements associated with a rectangular
dislocation in an elastic half-space.
"""
bx = opening
Vnorm = numpy.cross(P2-P1, P4-P1)
Vnorm = Vnorm/norm(Vnorm)
bX = bx*Vnorm[0]
bY = bx*Vnorm[1]
bZ = bx*Vnorm[2]
[u1,v1,w1] = AngSetupFSC(X,Y,bX,bY,bZ,P1,P2,nu) # Side P1P2
[u2,v2,w2] = AngSetupFSC(X,Y,bX,bY,bZ,P2,P3,nu) # Side P2P3
[u3,v3,w3] = AngSetupFSC(X,Y,bX,bY,bZ,P3,P4,nu) # Side P3P4
[u4,v4,w4] = AngSetupFSC(X,Y,bX,bY,bZ,P4,P1,nu) # Side P4P1
ue = u1+u2+u3+u4
un = v1+v2+v3+v4
uv = w1+w2+w3+w4
return ue, un, uv
def CoordTrans(x1, x2, x3, A):
"""
CoordTrans transforms the coordinates of the vectors, from
x1x2x3 coordinate system to X1X2X3 coordinate system. "A" is the
transformation matrix, whose columns e1,e2 and e3 are the unit base
vectors of the x1x2x3. The coordinates of e1,e2 and e3 in A must be given
in X1X2X3. The transpose of A (i.e., A') will transform the coordinates
from X1X2X3 into x1x2x3.
"""
# In Matlab these three lines ensure that x1, x2, x3 are column vectors, which is assumed in the
# Matlab version of this routine. There is no need to do this with numpy.array.
# x1 = x1(:);
# x2 = x2(:);
# x3 = x3(:);
# check that the vectors x1, x2, x3 are of the correct length
# if not (len(x1)==3 and len(x2)==3 and len(x3)==3):
# raise ValueError("not all of x1, x2, x3 are of length 3")
# no need to convert x1, x2, x3 from column vectors into row vectors. They are
# numpy ndarrays and ready to work properly; the following uses them as rows in an array.
r = A.dot(numpy.array([x1, x2, x3]))
if len(r.shape) == 2:
X1 = r[0,:]
X2 = r[1,:]
X3 = r[2,:]
else:
X1 = r[0]
X2 = r[1]
X3 = r[2]
return X1, X2, X3
def AngSetupFSC(X, Y, bX, bY, bZ, PA, PB, nu):
"""
AngSetupSurf calculates the displacements associated with an angular
dislocation pair on each side of an RD in a half-space.
"""
SideVec = PB-PA
eZ = numpy.array([0, 0, 1])
beta = numpy.arccos(-SideVec.dot(eZ)/norm(SideVec))
eps = numpy.spacing(1) # distance between 1 and the nearest floating point number
if numpy.abs(beta)<eps or numpy.abs(numpy.pi-beta)<eps :
ue = numpy.zeros(numpy.shape(X))
un = numpy.zeros(numpy.shape(X))
uv = numpy.zeros(numpy.shape(X))
else:
ey1 = numpy.array([*SideVec[0:2],0])
ey1 = ey1/norm(ey1)
ey3 = -eZ
ey2 = numpy.cross(ey3,ey1)
A = numpy.array([ey1, ey2, ey3]) # Transformation matrix
# Transform coordinates from EFCS to the first ADCS
[y1A, y2A, unused] = CoordTrans(X-PA[0], Y-PA[1], numpy.zeros(X.size)-PA[2], A)
# Transform coordinates from EFCS to the second ADCS
[y1AB, y2AB, unused] = CoordTrans(SideVec[0], SideVec[1], SideVec[2], A)
y1B = y1A-y1AB
y2B = y2A-y2AB
# Transform slip vector components from EFCS to ADCS
[b1, b2, b3] = CoordTrans(bX, bY, bZ, A)
# Determine the best artefact-free configuration for the calculation
# points near the free surface
I = (beta*y1A)>=0
J = numpy.logical_not(I)
v1A = numpy.zeros(I.shape)
v2A = numpy.zeros(I.shape)
v3A = numpy.zeros(I.shape)
v1B = numpy.zeros(I.shape)
v2B = numpy.zeros(I.shape)
v3B = numpy.zeros(I.shape)
# Configuration I
v1A[I], v2A[I], v3A[I] = AngDisDispSurf(y1A[I], y2A[I], -numpy.pi+beta, b1, b2, b3, nu,
-PA[2])
v1B[I], v2B[I], v3B[I] = AngDisDispSurf(y1B[I], y2B[I], -numpy.pi+beta, b1, b2, b3, nu,
-PB[2])
# Configuration II
v1A[J], v2A[J], v3A[J] = AngDisDispSurf(y1A[J], y2A[J], beta, b1, b2, b3, nu, -PA[2])
v1B[J], v2B[J], v3B[J] = AngDisDispSurf(y1B[J], y2B[J], beta, b1, b2, b3, nu, -PB[2])
# Calculate total displacements in ADCS
v1 = v1B-v1A
v2 = v2B-v2A
v3 = v3B-v3A
# Transform total displacements from ADCS to EFCS
[ue, un, uv] = CoordTrans(v1, v2, v3, A.transpose())
return ue, un, uv
def AngDisDispSurf(y1, y2, beta, b1, b2, b3, nu, a):
"""
AngDisDispSurf calculates the displacements associated with an angular dislocation
in a half-space.
"""
sinB = numpy.sin(beta)
cosB = numpy.cos(beta)
cotB = 1.0/numpy.tan(beta)
z1 = y1*cosB + a*sinB
z3 = y1*sinB - a*cosB
r2 = y1**2 + y2**2 + a**2
r = numpy.sqrt(r2)
Fi = 2*numpy.arctan2(y2, (r+a)/numpy.tan(beta/2)-y1) # The Burgers function
v1b1 = b1/2/numpy.pi*(
(1-(1-2*nu)*cotB**2)*Fi +
y2/(r+a)*((1-2*nu)*(cotB+y1/2/(r+a))-y1/r) -
y2*(r*sinB-y1)*cosB/r/(r-z3)
)
v2b1 = b1/2/numpy.pi*(
(1-2*nu)*((.5+cotB**2)*numpy.log(r+a)-cotB/sinB*numpy.log(r-z3)) -
1./(r+a)*((1-2*nu)*(y1*cotB-a/2-y2**2/2/(r+a))+y2**2/r) +
y2**2*cosB/r/(r-z3)
)
v3b1 = b1/2/numpy.pi*(
(1-2*nu)*Fi*cotB+y2/(r+a)*(2*nu+a/r) -
y2*cosB/(r-z3)*(cosB+a/r)
)
v1b2 = b2/2/numpy.pi*(
-(1-2*nu)*((.5-cotB**2)*numpy.log(r+a) + cotB**2*cosB*numpy.log(r-z3) ) -
1/(r+a)*((1-2*nu)*(y1*cotB+.5*a+y1**2/2/(r+a)) - y1**2/r) +
z1*(r*sinB-y1)/r/(r-z3)
)
v2b2 = b2/2/numpy.pi*(
(1+(1-2*nu)*cotB**2)*Fi -
y2/(r+a)*((1-2*nu)*(cotB+y1/2/(r+a))-y1/r) -
y2*z1/r/(r-z3)
)
v3b2 = b2/2/numpy.pi*(
-(1-2*nu)*cotB*(numpy.log(r+a)-cosB*numpy.log(r-z3)) -
y1/(r+a)*(2*nu+a/r) +
z1/(r-z3)*(cosB+a/r)
)
v1b3 = b3/2/numpy.pi*(y2*(r*sinB-y1)*sinB/r/(r-z3))
v2b3 = b3/2/numpy.pi*(-y2**2*sinB/r/(r-z3))
v3b3 = b3/2/numpy.pi*(Fi + y2*(r*cosB+a)*sinB/r/(r-z3))
v1 = v1b1 + v1b2 + v1b3
v2 = v2b1 + v2b2 + v2b3
v3 = v3b1 + v3b2 + v3b3
return v1, v2, v3
# end-of-file
|
lijun99/altar | cuda/cuda/norms/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# publish the protocol for norms
from altar.norms.Norm import Norm as norm
# implementations
@altar.foundry(implements=norm, tip="the cudaL2 norm")
def l2():
# grab the factory
from .cudaL2 import cudaL2 as l2
# attach its docstring
__doc__ = l2.__doc__
# and return it
return l2
# end of file
|
lijun99/altar | altar/altar/actions/Forward.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
# Author(s): <NAME>
#
# get the package
import altar
# get my model package
import altar.models.seismic
# declaration
class Forward(altar.panel(), family='altar.actions.forward'):
"""
Sample the posterior distribution of a model
"""
# commands
@altar.export(tip="perform the forward modeling with a given parameter set")
def default(self, plexus, **kwds):
"""
Sample the model posterior distribution
"""
# get the model
model = plexus.model
# set the model forwardonly flag
model.forwardonly = True
# call the forwardProblem method
return model.forwardProblem(application=plexus)
# end of file
|
lijun99/altar | altar/altar/models/__init__.py | <filename>altar/altar/models/__init__.py<gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# the protocols
from .Model import Model as model
from .ParameterSet import ParameterSet as parameters
# the model base class
from .Bayesian import Bayesian as bayesian
# implementations
@altar.foundry(implements=model, tip="a trivial AlTar model")
def null():
# grab the factory
from .Null import Null as null
# attach its docstring
__doc__ = null.__doc__
# and publish it
return null
@altar.foundry(implements=model, tip="a collection of models that comprise an AlTar model")
def ensemble():
# grab the factory
from .Ensemble import Ensemble as ensemble
# attach its docstring
__doc__ = ensemble.__doc__
# and publish it
return ensemble
@altar.foundry(implements=model, tip="a models that implements psets and dataobs with l2 norm")
def bayesianl2():
# grab the factory
from .BayesianL2 import BayesianL2 as bayesianl2
# attach its docstring
__doc__ = bayesianl2.__doc__
# and publish it
return bayesianl2
@altar.foundry(implements=parameters, tip="a contiguous parameter set")
def contiguous():
# grab the factory
from .Contiguous import Contiguous as contiguous
# attach its docstring
__doc__ = contiguous.__doc__
# and publish it
return contiguous
# end of file
|
lijun99/altar | altar/altar/shells/Application.py | <reponame>lijun99/altar
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# support
import altar
# the simple application shell
class Application(altar.application, family="altar.shells.application"):
"""
The base class for simple AlTar applications
"""
# user configurable state
job = altar.simulations.run()
job.doc = "the job input parameters"
model = altar.models.model()
model.doc = "the AlTar model to sample"
rng = altar.simulations.rng()
rng.doc = "the random number generator"
controller = altar.bayesian.controller()
controller.doc = "my simulation controller"
monitors = altar.properties.dict(schema=altar.simulations.monitor())
monitors.doc = "a collection of event handlers"
# protocol obligations
@altar.export
def main(self, *args, **kwds):
"""
The main entry point
"""
# N.B.: the initialization phase must be respectful of the interdependencies of these
# components; e.g., both {controller} and {model} expect an initialized {rng}
# initialize the job parameters
self.job.initialize(application=self)
# the random number generator
self.rng.initialize()
# the controller
self.controller.initialize(application=self)
# and the model; attach whatever the model initialization returns, just in case the
# model selects an implementation strategy based on my context
self.model = self.model.initialize(application=self)
# sample the posterior distribution
return self.model.posterior(application=self)
# pyre framework hooks
# interactive session management
def pyre_interactiveSessionContext(self, context):
"""
Go interactive
"""
# protect against bad context
if context is None:
# by initializing an empty one
context = {}
# add some symbols
context["altar"] = altar # my package
# and chain up
return super().pyre_interactiveSessionContext(context=context)
# machine layout adjustments for MPI runs
def pyre_mpi(self):
"""
Transfer my {job} settings to the MPI shell
"""
# get my shell
shell = self.shell
# if the programming model is not {MPI}
if shell.model != "mpi":
# something really bad has happened
self.firewall.log(f"the {pyre_mpi} hook with model={shell.model}")
# get my job parameters
job = self.job
# transfer the job settings
shell.hosts = job.hosts
shell.tasks = job.tasks
# all done
return self
# end of file
|
lijun99/altar | altar/altar/bayesian/Recorder.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>. aïvázis, <NAME>
# the package
import altar
# an implementation of the archiver protocol
class Recorder(
altar.component,
family="altar.simulations.archivers.recorder",
implements=altar.simulations.archiver):
"""
Recorder stores the intermediate simulation state in memory
"""
# user configurable traits
output_dir = altar.properties.path(default="results")
output_dir.doc = "the directory to save results"
output_freq = altar.properties.int(default=1)
output_freq.doc = "the frequency to write step data to files"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize me given an {application} context
"""
# create a statistics list
self.statistics= []
# all done
return self
@altar.export
def record(self, step, iteration, psets, **kwds):
"""
Record the final state of the calculation
"""
# save the statistics
self.saveStats()
# save the last step
step.save_hdf5(path=self.output_dir, iteration=None, psets=psets)
# all done
return self
def recordstep(self, step, stats, psets):
"""
Record step to file for ce
"""
iteration = stats['iteration']
# output CoolingStep
if iteration%self.output_freq == 0:
step.save_hdf5(path=self.output_dir, iteration=iteration, psets=psets)
# record statistics information
statcopy = stats.copy()
self.statistics.append(statcopy)
return self
def saveStats(self):
"""
Save the statistics information to file
"""
# output filename
import os
filename = os.path.join(self.output_dir.path, "BetaStatistics.txt")
# open the file
statfile = open(filename, "w")
# write the header
statfile.write("iteration, beta, scaling, (accepted, invalid, rejected)\n")
# write the statistics
for item in self.statistics:
stats = [str(k) for k in item.values()]
statfile.writelines(", ".join(stats) + "\n")
# close the file
statfile.close()
#return
return self
# unconfigurable traits
statistics = None
# end of file
|
lijun99/altar | cuda/cuda/data/cudaDataL2.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
from altar.cuda import libcuda
from altar.cuda import cublas as cublas
import numpy
# my protocol
from altar.data.DataL2 import DataL2
# declaration
class cudaDataL2(DataL2, family="altar.data.cudadatal2"):
"""
The observed data with L2 norm
"""
# configuration states copied from cpu counterpart
data_file = altar.properties.path(default="data.txt")
data_file.doc = "the name of the file with the observations"
observations = altar.properties.int(default=1)
observations.doc = "the number of observed data"
cd_file = altar.properties.path(default=None)
cd_file.doc = "the name of the file with the data covariance matrix"
cd_std = altar.properties.float(default=1.0)
cd_std.doc = "the constant covariance for data, sigma^2"
dtype_cd = altar.properties.str(default=None)
dtype_cd.doc = "the data type (float32/64) for Cd computations if different from others"
# the norm to use for computing the data log likelihood
# the only implementation that works for now
norm = altar.cuda.norms.norm()
norm.default = altar.cuda.norms.l2()
norm.doc = "l2 norm for calculating likelihood"
# constant variables
merge_cd_to_data = True
@altar.export
def initialize(self, application):
"""
Initialize data obs from model
"""
# load the gpu part at first
# initCovariance depends on precision
self.device = application.controller.worker.device
self.precision = application.job.gpuprecision
self.dtype_cd = self.dtype_cd or self.precision
# get the input path from model
self.ifs = application.pfs["inputs"]
self.error = application.error
# get the number of samples
self.samples = application.job.chains
# get the number of observations
observations = self.observations
# load the observed data to numpy array
self.dataobs = self.loadFile(filename=self.data_file, shape=observations)
# load the data covariance
if self.cd_file is not None:
self.cd = self.loadFile(filename=self.cd_file, shape=(observations, observations))
else:
# use a constant covariance
self.cd = numpy.zeros(shape=(observations, observations), dtype=self.dtype_cd)
numpy.fill_diagonal(self.cd, self.cd_std**2)
# compute inverse of covariance, normalization
self.initializeCovariance()
# all done
return self
def cuEvalLikelihood(self, prediction, likelihood, residual=True, batch=None):
"""
compute the datalikelihood for prediction
:param prediction: (samples x observations) input of predicted data
:param likelihood: (samples) pre-allocated likelihood/norm
:param residual: whether prediction is already subtracted by observed data
:param batch: number of (first few) samples to be computed
:return: likelihood
"""
# get the batch / number of samples
batch = batch or prediction.shape[0]
# depending on convenience, users may
# either copy dataobs to their model and use the residual as input of prediction
# or compute prediction from forward model and subtract the dataobs here
# subtract dataobs from prediction to get residual
if not residual:
prediction -= self.gdataObsBatch
# call L2 norm to calculate the likelihood
normalization = self.normalization # norm constant
likelihood = self.norm.cuEvalLikelihood(data=prediction, constant=normalization,
out=likelihood, batch=batch)
# all done
return likelihood
@property
def cd_inv(self):
"""
Inverse of data covariance, in Cholesky decomposed form
"""
return self.gcd_inv
def release_cd(self):
"""
release gcd_inv
"""
self.gcd_inv = None
return
@property
def dataobsBatch(self):
"""
A batch of duplicated observations
"""
return self.gdataObsBatch
def loadFile(self, filename, shape, dataset=None, dtype=None):
"""
Load an input file to a numpy array (for both float32/64 support)
Supported format:
1. text file in '.txt' suffix, stored in prescribed shape
2. binary file with '.bin' or '.dat' suffix,
the precision must be same as the desired gpuprecision,
and users must specify the shape of the data
3. (preferred) hdf5 file in '.h5' suffix (preferred)
the metadata of shape, precision is included in .h5 file
:param filename: str, the input file name
:param shape: list of int
:param dataset: str, name/key of dataset for h5 input only
:return: output numpy.array
"""
# decide the data type of the loaded vector/matrix
dtype = dtype or self.precision
ifs = self.ifs
channel = self.error
try:
# get the path to the file
file = ifs[filename]
except not ifs.NotFoundError:
channel.log(f"no file '{filename}' found in '{ifs.path()}'")
raise
else:
# get the suffix to determine type
suffix = file.uri.suffix
# use .txt for non-binary input
if suffix == '.txt':
# load to a cpu array
cpuData = numpy.loadtxt(file.uri.path, dtype = dtype).reshape(shape)
# binary data
elif suffix == '.bin' or suffix == '.dat':
# read and reshape, users need to check the precision
cpuData = numpy.fromfile(file.uri.path, dtype=dtype).reshape(shape)
# hdf5 file
elif suffix == '.h5':
# get support
import h5py
# open
h5file = h5py.File(file.uri.path, 'r')
# get the desired dataset
if dataset is None:
# if not provided, assume the first dataset available
dataset = list(h5file.keys())[0]
cpuData = numpy.asarray(h5file.get(dataset), dtype=dtype).reshape(shape)
h5file.close()
# all done
return cpuData
def initializeCovariance(self):
"""
initialize gpu data and data covariance
"""
# initialize arrays kept in GPU memory
observations = self.observations
samples = self.samples
self.gdataObsBatch = altar.cuda.matrix(shape=(samples, observations), dtype=self.precision)
# initialize Cd
self.updateCovariance()
# all done
return self
def updateCovariance(self, cp=None):
"""
Update the data covariance C_chi = Cd + Cp
:param cp: cuda matrix with shape(obs, obs), data covariance due to model uncertainty
:return:
"""
from math import log, pi as π
# process cd info
observations = self.observations
# obtain Cd from cpu
gCchi = altar.cuda.matrix(source=self.cd, dtype=self.dtype_cd)
# add Cp if provided
if cp is not None:
# check cp data type
gCp = cp if cp.dtype == self.dtype_cd else cp.copy_to_device(dtype=self.dtype_cd)
# add cp to cd
gCchi += gCp
#self.checkPositiveDefiniteness(matrix=gCchi, name='Cchi')
# Inverse
gCchi.inverse()
#self.checkPositiveDefiniteness(matrix=gCchi, name='Cchi inverse')
# Choleseky decomposition
gCchi.Cholesky(uplo=cublas.FillModeUpper)
# normalization
logdet = libcuda.matrix_logdet_triangular(gCchi.data)
self.normalization = -0.5*log(2*π)*observations + logdet
# keep a copy of inverse Cchi to gcd_inv for other operations
self.gcd_inv = gCchi if gCchi.dtype == self.precision else gCchi.copy_to_host(dtype=self.precision)
# load data to gpu
gDataVec = altar.cuda.vector(source=self.dataobs, dtype=self.precision)
# merge Cchi to data
gDataVec = self.mergeCdtoData(cd_inv=self.gcd_inv, data=gDataVec)
# make duplicates of data vector to a matrix
self.gdataObsBatch.duplicateVector(src=gDataVec)
# all done
return self
def checkPositiveDefiniteness(self, matrix, name=None):
"""
Check positive definiteness of a GPU matrix
:param matrix: a real symmetric (GPU) matrix
:return: true or false
"""
import numpy
name = name or 'Matrix'
cm = matrix.copy_to_host(type='numpy')
eval= numpy.linalg.eigvalsh(cm)
n = eval.shape[0]
if eval[n-1] < 0:
print(name, " is not positive definite!")
print(eval)
return False
print(name, eval.min(), eval.max())
return True
def mergeCdtoData(self, cd_inv, data):
"""
Merge the data covariance matrix to observed data
:param cd_inv: the inverse of covariance matrix in Cholesky-decomposed form, with Lower matrix filled
:param data: raw observed data
:return: cd_inv*data, a cuda vector
"""
# make a copy of observed data
gDataVec = data.clone()
# Cd^{-1} = LL^T
# d -> d (1, obs) x L (obs, obs)
cublas.trmv(A=cd_inv, x=gDataVec,
uplo=cublas.FillModeUpper,
transa = cublas.OpTrans
)
# all done
return gDataVec
# local variables
### from cpu class
# dataobs = None
# cd = None
# cd_inv = None
### gpu
normalization = 0
precision = None
gdataObsBatch = None # kept in memory
gcd_inv = None # kept in memory, can be released upon request
# end of file
|
lijun99/altar | altar/altar/bayesian/Controller.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# get the package
import altar
# the controller protocol
class Controller(altar.protocol, family="altar.controllers"):
"""
The protocol that all AlTar controllers must implement
"""
# required user configurable state
dispatcher = altar.simulations.dispatcher()
dispatcher.doc = "the event dispatcher that activates the registered handlers"
archiver = altar.simulations.archiver()
archiver.doc = "the archiver of simulation state"
# required behavior
@altar.provides
def posterior(self, model):
"""
Sample the posterior distribution of the given {model}
"""
@altar.provides
def initialize(self, application):
"""
Initialize me and my parts given an {application} context
"""
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Supply a default implementation
"""
# by default, we do CATMIP as encapsulated by the {Annealer} class
from .Annealer import Annealer as default
# and return it
return default
# end of file
|
lijun99/altar | cuda/cuda/__init__.py | <reponame>lijun99/altar<filename>cuda/cuda/__init__.py<gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# export my parts
from . import (
# norms
norms,
# probability distribution functions
distributions,
# support for Bayesian explorations using Markov chain Monte Carlo
bayesian,
models,
data,
ext,
)
from cuda import (
vector,
matrix,
curand,
cublas,
Device,
manager,
stats,
cuda as libcuda,
)
# my extension modules
from .ext import cudaaltar as libcudaaltar
def get_current_device():
"""
Return current cuda device
"""
return manager.current_device
def use_device(id):
"""
Set current device to device with id
"""
return manager.device(did=id)
def curand_generator():
device = get_current_device()
return device.get_curand_generator()
def cublas_handle():
device = get_current_device()
return device.get_cublas_handle()
# administrative
def copyright():
"""
Return the altar copyright note
"""
return print(meta.header)
def license():
"""
Print the altar license
"""
# print it
return print(meta.license)
def version():
"""
Return the altar version
"""
return meta.version
def credits():
"""
Print the acknowledgments
"""
# print it
return print(meta.acknowledgments)
|
lijun99/altar | models/reverso/reverso/Source.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME>
#
# (c) 2018-2021 california institute of technology
# all rights reserved
#
#
# framework
import altar
# externals
import numpy
from math import sqrt, pi as π
# library
from .libreverso import Reverso
# declaration
class Source:
"""
The source response for a Reverso (Connected Two Magma Chambers Volcano) Model.
"""
# public data
# radius of the hydraulic pipe connecting two magma reservoirs
ac = 0
# radius of the shallow magma reservoir
as = 0
# radius of the deep magma reservoir
ad = 0
# depth of the shallow reservoir
hs = 0
# depth of the deep reservoir
hd = 0
# basal magma inflow rate from below the deep chamber
q = 0
# material properties
v = .25 # Poisson ratio
# interface
def displacements(self, locations, los):
"""
Compute the expected displacements at a set of observation locations from a
two magma chamber (reverso) volcano model
"""
# the radius of the shallow reservoir
as_src = self.as
# the radius of the deep reservoir
ad_src = self.ad
# the radius of the connecting pipe between the two reservoirs
ac_src = self.ac
# depth of the shallow reservoir
hs_src = self.hs
# depth of the deep reservoir
hd_src = self.hd
# the basal magma inflow rate
q_src = self.q
# get the material properties
v = self.v
# from locations, a vector of (x,y) tuples, create the flattened vectors Xf, Yf required by
# CDM
Xf = numpy.zeros(len(locations), dtype=float)
Yf = numpy.zeros(len(locations), dtype=float)
for i, loc in enumerate(locations):
Xf[i] = loc[0]
Yf[i] = loc[1]
# allocate space for the result
u = altar.vector(shape=len(locations))
# compute the displacements
ue, un, uv = Reverso(X=Xf, Y=Yf, X0=x_src, Y0=y_src, depth=d_src,
ax=ax_src, ay=ay_src, az=az_src,
omegaX=omegaX_src, omegaY=omegaY_src, omegaZ=omegaZ_src,
opening=opening, nu=v)
# go through each observation location
for idx, (ux,uy,uz) in enumerate(zip(ue, un, uv)):
# project the expected displacement along LOS and store
u[idx] = ux * los[idx,0] + uy * los[idx,1] + uz * los[idx,2]
# all done
return u
# meta-methods
def __init__(self, x=x, y=y, d=d,
ax=ax, ay=ay, az=az, omegaX=omegaX, omegaY=omegaY, omegaZ=omegaZ,
opening=opening, v=v, **kwds):
# chain up
super().__init__(**kwds)
# store the location
self.x = x
self.y = y
self.d = d
# the semi-axes
self.ax = ax
self.ay = ay
self.az = az
# the rotation angles
self.omegaX = omegaX
self.omegaY = omegaY
self.omegaZ = omegaZ
# the opening
self.opening = opening
# and the Poisson ratio
self.v = v
# the strength
self.dV = 4*(ax*ay + ax*az + ay*az) * opening
# all done
return
# end of file
|
lijun99/altar | cuda/cuda/ext/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# pull the extension module; this must exist, so let import errors bubble up
from . import cudaaltar as libcudaaltar
# end of file
|
lijun99/altar | altar/altar/bayesian/CUDAAnnealing.py | <reponame>lijun99/altar<filename>altar/altar/bayesian/CUDAAnnealing.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# superclass
from .AnnealingMethod import AnnealingMethod
import altar.cuda
# declaration
class CUDAAnnealing(AnnealingMethod):
"""
Implementation that takes advantage of CUDA on gpus to accelerate the computation
"""
from altar.cuda.bayesian.cudaCoolingStep import cudaCoolingStep
# public data
wid = 0 # my worker id
workers = 1 # i don't manage anybody else
def initialize(self, application):
"""
initialize worker
"""
super().initialize(application=application)
self.cuInitialize(application=application)
# all done
return self
def cuInitialize(self, application):
"""
Initialize the cuda worker
"""
gpuids = application.job.gpuids
tasks = application.job.tasks # jobs per host
# set gpu ids for current worker
self.device=altar.cuda.use_device(gpuids[self.wid % tasks])
application.info.log(f'current worker {self.wid} with device {self.device} id {self.device.id}')
return self
# interface
def start(self, annealer):
"""
Start the annealing process
"""
# chain up
super().start(annealer=annealer)
# assign a cuda device to worker in sequence of the worker id
# create both cpu/gpu steps to hold the state of the problem
self.step = self.CoolingStep.allocate(annealer=annealer)
self.gstep = self.cudaCoolingStep.start(annealer=annealer)
# initialize it
model = annealer.model
gstep = self.gstep
model.cuInitSample(theta=gstep.theta)
# compute the likelihoods
model.likelihoods(annealer=annealer, step=gstep, batch=gstep.samples)
# return to cpu
gstep.copyToCPU(step=self.step)
# all done
return self
device = None
gstep = None
# end of file
|
lijun99/altar | altar/altar/shells/AlTar.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# support
import altar
# the plexus
class AlTar(altar.plexus, family="altar.shells.altar", namespace="altar"):
"""
The main action dispatcher for the simple AlTar application
"""
# types
from .Action import Action as pyre_action
# user configurable state
job = altar.simulations.run()
job.doc = "the job input parameters"
model = altar.models.model()
model.doc = "the AlTar model to sample"
rng = altar.simulations.rng()
rng.doc = "the random number generator"
controller = altar.bayesian.controller()
controller.doc = "my simulation controller"
monitors = altar.properties.dict(schema=altar.simulations.monitor())
monitors.doc = "a collection of event handlers"
# protocol obligations
@altar.export
def main(self, *args, **kwds):
"""
The main entry point
"""
# initialize the job parameters
self.job.initialize(application=self)
# the random number generator
self.rng.initialize()
# the controller
self.controller.initialize(application=self)
# and the model; attach whatever the model initialization returns, just in case the
# model selects an implementation strategy based on my context
self.model = self.model.initialize(application=self)
# chain up
return super().main(*args, **kwds)
def initialize(self):
"""
Initialize without running, for debug purpose only
"""
# initialize the job parameters
self.job.initialize(application=self)
# the random number generator
self.rng.initialize()
# the controller
self.controller.initialize(application=self)
# and the model; attach whatever the model initialization returns, just in case the
# model selects an implementation strategy based on my context
self.model = self.model.initialize(application=self)
return self
# pyre framework hooks
# support for the help system
def pyre_banner(self):
"""
Place the application banner in the {info} channel
"""
# show the package header
return altar.meta.header
# interactive session management
def pyre_interactiveSessionContext(self, context):
"""
Go interactive
"""
# protect against bad context
if context is None:
# by initializing an empty one
context = {}
# add some symbols
context["altar"] = altar # my package
# and chain up
return super().pyre_interactiveSessionContext(context=context)
# end of file
|
lijun99/altar | altar/altar/bayesian/Solver.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# <NAME>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# get the package
import altar
# the scheduler protocol
class Solver(altar.protocol, family="altar.bayesian.solvers"):
"""
The protocol that all δβ solvers must implement
"""
# user configurable state
tolerance = altar.properties.float()
tolerance.doc = 'the fractional tolerance for achieving convergence'
# required behavior
@altar.provides
def initialize(self, application, scheduler):
"""
Initialize me and my parts given an {application} context and a {scheduler}
"""
@altar.provides
def solve(self, llk, weight):
"""
Compute the next temperature in the cooling schedule
"""
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Provide a default implementation
"""
# by default, use the naive grid solver
from .Grid import Grid
# and return it
return Grid
# end of file
|
lijun99/altar | models/seismic/seismic/Static.py | <reponame>lijun99/altar
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# declaration
class Static(altar.models.bayesian, family="altar.models.seismic.static"):
"""
Static inversion with cuda (d = G theta)
Modeled as N patches with dip and slip displacements
"""
# user configurable state
# parameter sets and their prior distributions
# dip (ususally Moment - ) and slip (usually Gaussian)
parametersets = altar.properties.dict(schema=altar.models.parameters())
parametersets.doc = "the set of parameters in the model"
parameters = altar.properties.int(default=None)
parameters.doc = "total number of parameters in the model"
observations = altar.properties.int(default=None)
observations.doc = "the number of data samples"
patches = altar.properties.int(default=None)
# the norm to use for computing the data log likelihood
norm = altar.cuda.norms.norm()
norm.default = altar.norms.l2()
norm.doc = "the norm to use when computing the data log likelihood"
# the name of the test case
case = altar.properties.path(default="patch-9")
case.doc = "the directory with the input files"
# the file based inputs
green_file = altar.properties.path(default="green.txt")
green_file.doc = "the name of the file with the Green functions"
data_file = altar.properties.path(default="data.txt")
data_file.doc = "the name of the file with the observations"
cd_file = altar.properties.path(default="cd.txt")
cd_file.doc = "the name of the file with the data covariance matrix"
# output
output_path = altar.properties.path(default="results")
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
super().initialize(application=application)
if application.job.gpus > 0:
self.processor = 'gpu'
# find out how many samples I will be working with; this equal to the number of chains
samples = application.job.chains
# initialize the parameter sets and their priors
self.initializeParameterSets()
# mount my input data space
self.ifs = self.mountInputDataspace(pfs=application.pfs)
# convert the input filenames into data
self.G, self.d, self.Cd = self.loadInputs()
# initialize covariance and merge it to data and green's function
self.initializeCovariance(samples=samples)
# grab a channel
channel = self.debug
channel.line("run info:")
# show me the model
channel.line(f" -- model: {self}")
# the model state
channel.line(f" -- model state:")
channel.line(f" parameters: {self.parameters}")
channel.line(f" observations: {self.observations}")
# the test case name
channel.line(f" -- case: {self.case}")
# the contents of the data filesystem
channel.line(f" -- contents of '{self.case}':")
channel.line("\n".join(self.ifs.dump(indent=2)))
# the loaded data
channel.line(f" -- inputs in memory:")
channel.line(f" green functions: shape={self.G.shape}")
channel.line(f" observations: shape={self.d.shape}")
channel.line(f" data covariance: shape={self.Cd.shape}")
# distributions
#channel.line(f" -- parametersets:")
#channel.line(f" prior: {self.prior}")
#channel.line(f" initializer: {self.prep}")
# flush
channel.log()
# all done
return self
@altar.export
def initializeSample(self, step):
"""
Fill {step.θ} with an initial random sample from my prior distribution.
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# fill it with random numbers from my initializer
for pset in self.parametersets.values():
# and ask each one to {prep} the sample
pset.initializeSample(theta=θ)
# and return
return self
@altar.export
def computePrior(self, step):
"""
Fill {step.prior} with the densities of the samples in {step.theta} in the prior
distribution
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# and the storage for the prior densities
prior = step.prior
# go through each parameter set
for pset in self.parametersets.values():
# and ask each one to {prep} the sample
pset.computePrior(theta=θ, density=prior)
# all done
return self
@altar.export
def computeDataLikelihood(self, step):
"""
Fill {step.data} with the densities of the samples in {step.theta} given the available
data. This is what is usually referred to as the "forward model"
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# the green functions
G = self.G
# the observations
d = self.d
# the inverse of the data covariance
Cd_inv = self.Cd_inv
# the normalization
normalization = self.normalization
# and the storage for the data densities
dataLLK = step.data
# clone the residuals since the operations that follow write in-place
residuals = self.residuals.clone()
# compute G * transpose(θ) - d
# we must transpose θ because its shape is (samples x parameters)
# while the shape of G is (observations x parameters)
residuals = self.forwardModel(theta=θ, green=self.G, data_observations=self.residuals)
# go through the residual of each sample
for idx in range(residuals.columns):
# extract it
residual = residuals.getColumn(idx)
# compute its norm, normalize, and store it as the data log likelihood
# dataLLK[idx] = normalization - self.norm.eval(v=residual, sigma_inv=Cd_inv)/2
dataLLK[idx] = normalization - 0.5*self.norm.eval(v=residual)
# all done
return self
@altar.export
def verify(self, step, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# grab my prior
# pdf = self.prior
# ask it to verify my samples
# pdf.verify(theta=θ, mask=mask)
# use pset verify instead
# go through each parameter set
for pset in self.parametersets.values():
# and ask each one to verify the sample
pset.verify(theta=θ, mask=mask)
# all done; return the rejection map
return mask
# implementation details
def initializeParameterSets(self):
"""
Initialize the parameter set
"""
# get the parameter sets
psets = self.parametersets
# initialize the offset
parameters = 0
# go through my parameter sets
for name, pset in psets.items():
# initialize the parameter set
parameters += pset.initialize(model=self, offset=pset.offset)
# the total number of parameters is now known, so record it
self.parameters = parameters
# all done
return
# implementation details
def mountInputDataspace(self, pfs):
"""
Mount the directory with my input files
"""
# attempt to
try:
# mount the directory with my input data
ifs = altar.filesystem.local(root=self.case)
# if it fails
except altar.filesystem.MountPointError as error:
# grab my error channel
channel = self.error
# complain
channel.log(f"bad case name: '{self.case}'")
channel.log(str(error))
# and bail
raise SystemExit(1)
# if all goes well, explore it and mount it
pfs["inputs"] = ifs.discover()
# all done
return ifs
def loadInputs(self):
"""
Load the data in the input files into memory
"""
# grab the input dataspace
ifs = self.ifs
# first the green functions
try:
# get the path to the file
gf = ifs[self.green_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing Green functions: no '{self.green_file}' in '{self.case}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the matrix
green = altar.matrix(shape=(self.observations, self.parameters))
# and load the file contents into memory
green.load(gf.uri)
# next, the observations
try:
# get the path to the file
df = ifs[self.data_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing observations: no '{self.data_file}' in '{self.case}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the vector
data = altar.vector(shape=self.observations)
# and load the file contents into memory
data.load(df.uri)
# finally, the data covariance
try:
# get the path to the file
cf = ifs[self.cd_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing data covariance matrix: no '{self.cd_file}' in '{self.case}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the matrix
cd = altar.matrix(shape=(self.observations, self.observations))
# and load the file contents into memory
cd.load(cf.uri)
# all done
return green, data, cd
def initializeCovariance(self, samples):
"""
Compute the Cholesky decomposition of the inverse of the data covariance
and merge it to data
"""
# compute the normalization
self.normalization = self.computeNormalization(observations=self.observations, cd=self.Cd)
# compute the inverse of {Cd}
self.Cd_inv = self.computeCovarianceInverse(self.Cd)
# merge Cd to green and d
# G = Cd_inv x G; d = Cd_inv x d
Cd_inv = self.Cd_inv
self.G = altar.blas.dtrmm(Cd_inv.sideLeft, Cd_inv.upperTriangular, Cd_inv.opNoTrans,
Cd_inv.nonUnitDiagonal, 1, Cd_inv, self.G)
self.d = altar.blas.dtrmv( Cd_inv.upperTriangular, Cd_inv.opNoTrans, Cd_inv.nonUnitDiagonal,
Cd_inv, self.d)
# prepare the residuals matrix
self.residuals = self.initializeResiduals(samples=samples, data=self.d)
# all done
return self
def computeCovarianceInverse(self, cd):
"""
Compute the inverse of the data covariance matrix
"""
# make a copy so we don't destroy the original
cd = cd.clone()
# perform the LU decomposition
lu = altar.lapack.LU_decomposition(cd)
# invert; this creates a new matrix
inv = altar.lapack.LU_invert(*lu)
# compute the Cholesky decomposition
inv = altar.lapack.cholesky_decomposition(inv)
# and return it
return inv
def computeNormalization(self, observations, cd):
"""
Compute the normalization of the L2 norm
"""
# support
from math import log, pi as π
# make a copy of cd
cd = cd.clone()
# compute its LU decomposition
decomposition = altar.lapack.LU_decomposition(cd)
# use it to compute the log of its determinant
logdet = altar.lapack.LU_lndet(*decomposition)
# all done
return - (log(2*π)*observations + logdet) / 2;
def initializeResiduals(self, samples, data):
"""
Prime the matrix that will hold the residuals (G θ - d) for each sample by duplicating the
observation vector as many times as there are samples
"""
# allocate the residual matrix
r = altar.matrix(shape=(data.shape, samples))
# for each sample
for sample in range(samples):
# make the corresponding column a copy of the data vector
r.setColumn(sample, data)
# all done
return r
@altar.export
def update(self, annealer):
"""
Model updating at the bottom of each annealing step
Output step data
"""
# get current worker
worker = annealer.worker
# check master
if worker.rank == worker.manager:
altar.utils.save_step(step=worker.step, path=self.output_path)
# all done
return self
def forwardModel(theta, green, data_residuals=None, data_observations=None, batches=None):
"""
Forward model: compute data prediction or data residuals from a set of theta
Args:
theta [in, cuarray] parameters with shape=(samples, parameters)
green [in, cuarray] Green's function with shape = (observations, parameters)
batches [in, integer, optional] number of samples needto be computed <=samples
data_observations [in, cuarray, optional] data observations
data_residuals[inout, cuarray, optional] data predictions or residuals shape=(observations, samples)
Returns:
data predictions or residuals if data_observations is provides
"""
# determine different sizes
samples, parameters = theta.shape
assert green.ndim == 2, "Green's function must be a 2D array"
observations, parameters_g = green.shape
assert parameters_g == parameters, "parameters in theta and Green's function don't match"
# allocate a new data_residuals if not present
if data_residuals is None:
data_residuals = altar.cuda.matrix(shape=(observations, samples))
# if data_observations is available, copy it over
if data_observations is not None:
assert data_observations.shape == data_residuals.shape, "the shape of data_obs is not (obs, samples)"
data_residuals = data_observations.copy()
beta = -1.0
else:
data_residuals.fill(0)
beta = 0.0
if batches is None:
batches = samples
# grab cublas handle
handle = altar.cuda.device.get_cublas_handle()
# call cublas matrix multiplication
# d_residuals = theta * Green - d_obs
# (samples x observations) = (samples x parameters) x (obs x obs)
cublas.gemm(handle,
1, # transas
0, # transb
batches, observations, parameters, #m,n,k
1.0, # alpha
theta.data.ptr, parameters, # A, lda
green.data.ptr, parameters, #B, ldb
beta, # beta
data_residuals.data.ptr, samples) #C, ldc
#all done
return data_residuals
# private data
ifs = None # the filesystem with the input files
# inputs
G = None # the Green functions
d = None # the vector with the observations
Cd = None # the data covariance matrix
# computed
Cd_inv = None # the inverse of the data covariance matrix
residuals = None # matrix that holds (G θ - d) for each sample
normalization = 1 # the normalization of the L2 norm
# forwardModel method
processor = 'cpu'
# end of file
|
lijun99/altar | altar/altar/data/__init__.py | <filename>altar/altar/data/__init__.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# publish the protocol for norms
from .DataObs import DataObs as data
from .DataL2 import DataL2
# implementations
@altar.foundry(implements=data, tip="the data with L2 norm")
def datal2():
# grab the factory
from .DataL2 import DataL2 as datal2
# attach its docstring
__doc__ = datal2.__doc__
# and return it
return datal2
# end of file
|
lijun99/altar | models/cudalinear/examples/PInversion.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
#! /usr/bin/env python3
import numpy
import h5py
def PseudoInverse(gf_file, data_file):
"""
Use pseudo inverse to solve linear inversion problem
"""
#load green's function
gf = numpy.loadtxt(gf_file)
# load data
d = numpy.loadtxt(data_file)
# pinv
gfinv = numpy.linalg.pinv(gf)
# inversion
theta = numpy.dot(gfinv, d)
# print out the result
print("The parameters given by the pseudoinverse matrix\n", theta)
# if needed, uncomment the following line to save it as well
#numpy.savetxt("theta_by_pinverse.txt", theta)
h5 = h5py.File("results/step_final.h5", "r")
thetab = numpy.array(h5['ParameterSets/pset'])
theta_m = numpy.mean(thetab, axis=0)
theta_std = numpy.std(thetab, axis=0)
print("The parameters given by the Bayesian inversion (mean values)\n", theta_m)
print("The difference between them are\n", theta-theta_m)
print("which are expected to be smaller than the standard deviations from Bayesian simultion\n", theta_std)
# all done
return theta
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = 'Use pseudoInverse matrix for inversion')
parser.add_argument('--green', help="green's function file name", default="input/green.txt")
parser.add_argument('--data', help="data file name", default="input/data.txt")
args = parser.parse_args()
theta = PseudoInverse(args.green, args.data)
|
lijun99/altar | models/seismic/seismic/cuda/cudaKinematicG.py | <filename>models/seismic/seismic/cuda/cudaKinematicG.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# my base
from altar.cuda.models.cudaBayesian import cudaBayesian
# extensions
from altar.cuda import cublas
from altar.cuda import libcuda
from altar.models.seismic.ext import cudaseismic as libcudaseismic
import numpy
# declaration
class cudaKinematicG(cudaBayesian, family="altar.models.seismic.cuda.kinematicg"):
"""
KinematicG model with cuda
"""
# configurable traits
# data observations
dataobs = altar.cuda.data.data()
dataobs.default = altar.cuda.data.datal2()
dataobs.doc = "the observed data"
# the file based inputs
green = altar.properties.path(default="green.txt")
green.doc = "the name of the file with the Green functions"
Nas = altar.properties.int(default=1)
Nas.doc = "number of patches along strike direction"
Ndd = altar.properties.int(default=1)
Ndd.doc = "number of patches along dip direction"
Nmesh = altar.properties.int(default=1)
Nmesh.doc = "number of mesh points for each patch for fastsweeping"
dsp = altar.properties.float(default=10.0)
dsp.doc = "the distance unit for each patch, in km"
Nt = altar.properties.int(default=1)
Nt.doc = "number of time intervals for kinematic process"
Npt = altar.properties.int(default=1)
Npt.doc = "number of mesh points for each time interval for fastsweeping"
dt = altar.properties.float(default=1.0)
dt.doc = "the time unit for each time interval (in s)"
t0s = altar.properties.array(default=None)
t0s.doc = "the start time for each patch"
# public data
cmodel = None
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
super().initialize(application=application)
# get a cublas handle
self.cublas_handle = self.device.get_cublas_handle()
# load the green's function
self.NGbparameters = 2*self.Nas*self.Ndd*self.Nt
self.GF=self.loadFile(filename=self.green, shape=(self.NGbparameters, self.observations))
# prepare the GF in gpu
self.gGF = altar.cuda.matrix(shape=self.GF.shape, dtype=self.precision)
# merge covariance to gf
if not self.forwardonly:
self.mergeCovarianceToGF()
# prepare the residuals matrix
self.gDprediction = altar.cuda.matrix(shape=(self.samples, self.observations), dtype=self.precision)
# prepare the initial arrival time
self.gt0s = altar.cuda.vector(source=numpy.asarray(self.t0s, dtype=self.precision))
# create a cuda/c model object
dtype = self.gGF.dtype.num
self.cmodel = libcudaseismic.kinematicg_alloc(
self.Nas, self.Ndd, self.Nmesh, self.dsp,
self.Nt, self.Npt, self.dt,
self.gt0s.data,
self.samples, self.parameters, self.observations,
self.gidx_map.data, dtype)
# all done
return self
def forwardModelBatched(self, theta, gf, prediction, batch, observation=None):
"""
KinematicG forward model in batch: cast Mb(x,y,t)
:param theta: matrix (samples, parameters), sampling parameters
:param gf: matrix (2*Ndd*Nas*Nt, observations), kinematicG green's function
:param prediction: matrix (samples, observations), the predicted data or residual between predicted and observed data
:param batch: integer, the number of samples to be computed batch<=samples
:param observation: matrix (samples, observations), duplicates of observed data
:return: prediction as predicted data(observation=None) or residual (observation is provided)
"""
if observation is None:
return_residual = False
else:
prediction.copy(other=observation)
return_residual = True
# call cuda/c library
libcudaseismic.kinematicg_forward_batched(self.cublas_handle, self.cmodel,
theta.data, gf.data, prediction.data, theta.shape[1], batch, return_residual)
# all done
return prediction
def forwardModel(self, theta, gf, prediction, observation=None):
"""
KinematicG forward model for single sample: cast Mb(x,y,t)
:param theta: vector (parameters), sampling parameters
:param gf: matrix (2*Ndd*Nas*Nt, observations), kinematicG green's function
:param prediction: vector (observations), the predicted data or residual between predicted and observed data
:param observation: vector (observations), duplicates of observed data
:return: prediction as predicted data(observation=None) or residual (observation is provided)
"""
if observation is None:
return_residual = False
else:
prediction.copy(other=observation)
return_residual = True
parameters = theta.shape
# call cuda/c extension
libcudaseismic.kinematicg_forward(self.cublas_handle, self.cmodel,
theta.data, gf.data, prediction.data, parameters, return_residual)
# all done
return prediction
def castSlipsOfTime(self, theta, Mb=None):
"""
Compute Mb (slips of patches over time) from a given set of parameters
:param theta: a vector arranged in [slip (strike and dip), risetime, ...]
:param Mb:
:return: Mb
"""
# allocate Mb if not provided
Mb = Mb or altar.cuda.vector(shape=self.NGbparameters, dtype=self.precision)
parameters = self.parameters
# call cuda/c extension method
libcudaseismic.kinematicg_castMb(self.cmodel, theta.data, Mb.data, parameters)
# all done
return Mb
def linearGM(self, gf, Mb, prediction=None, observation=None):
"""
Perform prediction = Gb * Mb
:param Gb:
:param Mb:
:param prediction:
:return: prediction
"""
observations = gf.shape[1]
prediction = prediction or altar.cuda.vector(shape=observations, dtype=self.precision)
if observation is None :
return_residual = False
else :
prediction.copy(other=observation)
return_residual = True
# perform matrix vector multiplication
libcudaseismic.kinematicg_linearGM(self.cublas_handle, self.cmodel,
gf.data, Mb.data, prediction.data, return_residual)
# all done
return prediction
def cuEvalLikelihood(self, theta, likelihood, batch):
"""
Compute the likelihood from my forward problem
"""
# residuals = dataPrediction - dataObservation
residuals = self.gDprediction
# call forward model to calculate the data prediction or its difference between dataobs
self.forwardModelBatched(theta=theta, gf=self.gGF, prediction=residuals, batch=batch,
observation= self.dataobs.gdataObsBatch)
# call data method to calculate the l2 norm
self.dataobs.cuEvalLikelihood(prediction=residuals, likelihood=likelihood,
residual=True, batch=batch)
# return the likelihood
return likelihood
def mergeCovarianceToGF(self):
"""
merge data covariance (cd) with green function
"""
# get references for data covariance
cd_inv = self.dataobs.gcd_inv
# get a reference for green's function
green = self.gGF
# copy from CPU
green.copy_from_host(source=self.GF)
# check whether cd is a constant or a matrix
if isinstance(cd_inv, float):
green *= cd_inv
elif isinstance(cd_inv, altar.cuda.matrix):
# (NGbparameters x obs) x (obs x obs) = (NGbparameters x obs)
cublas.trmm(cd_inv, green, out=green,
side=cublas.SideRight,
uplo=cublas.FillModeUpper,
transa = cublas.OpTrans,
diag=cublas.DiagNonUnit,
alpha=1.0,
handle = self.cublas_handle)
# release gcd_inv from gpu memory
self.dataobs.release_cd()
# all done
return
@altar.export
def forwardProblem(self, application, theta=None):
"""
Perform the forward modeling with given {theta}
"""
import h5py
# get theta
gtheta = theta or self.loadFileToGPU(filename=self.theta_input,
dataset=self.theta_dataset)
# castBigM from fast sweeping
gMb = self.castSlipsOfTime(theta=gtheta)
# get a reference of green's function
gGF = self.gGF
# copy from CPU
gGF.copy_from_host(source=self.GF)
# get data prediction
gDataPred = self.linearGM(gf=gGF, Mb=gMb)
# save BigM to an h5 file
h5file = h5py.File(name=self.forward_output.path, mode='a')
# if already exists, del the old dataset
if 'kinematic.Mb' in h5file.keys():
del h5file['kinematic.Mb']
if 'kinematic.Data' in h5file.keys():
del h5file['kinematic.Data']
h5file.create_dataset(name='kinematic.Mb', data=gMb.copy_to_host(type='numpy'))
h5file.create_dataset(name='kinematic.Data', data=gDataPred.copy_to_host(type='numpy'))
h5file.close()
# all done
return
# private data
# inputs
GF = None # the Green functions
gGF = None
gDprediction = None
cublas_handle=None
NGbparameters = None
gt0s = None
# end of file
|
lijun99/altar | altar/altar/models/ParameterSet.py | <reponame>lijun99/altar
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# the parameter set protocol
class ParameterSet(altar.protocol, family="altar.models.parameterset"):
"""
The protocol that all AlTar parameter sets must implement
"""
# required state
count = altar.properties.int(default=1)
count.doc = "the number of parameters in this set"
prior = altar.distributions.distribution()
prior.doc = "the prior distribution"
prep = altar.distributions.distribution()
prep.doc = "the distribution to use to initialize this parameter set"
# required behavior
@altar.provides
def initialize(self, model, offset):
"""
Initialize the parameter set given the {model} that owns it
"""
@altar.provides
def initializeSample(self, theta):
"""
Fill {theta} with an initial random sample from my prior distribution.
"""
@altar.provides
def priorLikelihood(self, theta, priorLLK):
"""
Fill {priorLLK} with the likelihoods of the samples in {theta} in my prior distribution
"""
@altar.provides
def verify(self, theta, mask):
"""
Check whether the samples in {theta} are consistent with the model requirements and update
the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Supply a default implementation
"""
# there is currently only one option...
from .Contiguous import Contiguous as contiguous
# so publish it
return contiguous
# end of file
|
lijun99/altar | models/seismic/seismic/Moment.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# get the package
import altar
# get the protocol
from altar.distributions import distribution
# and my base class
from altar.distributions.Uniform import Uniform as uniform
# the declaration
class Moment(uniform, family="altar.distributions.moment"):
"""
The probability distribution for displacements (D) conforming to a given Moment magnitude scale
Mw = (log M0 - 9.1)/1.5 (Hiroo Kanamori)
M0 = Mu A D
It inherits uniform distribution for verification and density calculations,
while generates samples for a combined gaussian and dirichlet distributions
"""
# user configurable state
# patches = altar.properties.int(default=1)
# patches.doc = "number of patches"
# The value of patches is provided by parameters
area_patches_file = altar.properties.path(default=None)
area_patches_file.doc = "input file for area of each patch, in unit of km^2"
area = altar.properties.float(default=1.0)
area.doc = "total area in unit of km^2"
Mw_mean = altar.properties.float(default=1.0)
Mw_mean.doc = " the mean moment magnitude scale"
Mw_sigma = altar.properties.float(default=0.5)
Mw_sigma.doc = " the variance of moment magnitude scale"
Mu = altar.properties.float(default = 32)
Mu.doc = "the shear modulus in unit of GPa"
# also include support = (low, high) for parent uniform distribution
# protocol obligations
@altar.export
def initialize(self, rng):
"""
Initialize with the given random number generator
"""
# initialize the parent uniform distribution
super().initialize(rng=rng)
# initialize the area for each patches
self.patches = self.parameters
# by default, assign the constant patch_area to each patch
self.area_patches = altar.vector(shape=self.patches).fill(self.area)
# if a file is provided, load it
if self.area_patches_file is not None:
self.area_patches.load(self.area_patches_file.uri)
# all done
return self
@altar.export
def initializeSample(self, theta):
"""
Fill my portion of {theta} with initial random values from my distribution.
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=theta)
# grab the number of samples (rows of theta)
samples = θ.rows
# grab the number of patches/parameters
parameters = self.patches
# grab the area of patches
area_patches = self.area_patches
# create a gaussian distribution to generate Mw for each sample
gaussian_Mw = altar.pdf.gaussian(mean=self.Mw_mean, sigma=self.Mw_sigma, rng=self.rng)
# create a dirichlet distribution to generate displacements
alpha = altar.vector(shape=parameters).fill(1) # power 0, or (alpha_i = 1)
dirichlet_D = altar.pdf.dirichlet(alpha=alpha, rng=self.rng)
# create a tempory vector for theta of samples
theta_sample = altar.vector(shape=parameters)
# iterate through samples to initialize samples
for sample in range(samples):
# generate a Mw sample
Mw = gaussian_Mw.sample()
# Pentiar = M0/Mu = \sum (A_i D_i)
# 15 here is for GPa * Km^2, instead of Pa * m^2
Pentier = pow(10, 1.5*Mw + 9.1 - 15)/self.Mu
# generate a dirichlet sample \sum x_i = 1
dirichlet_D.vector(vector=theta_sample)
# D_i = P * x_i /A_i
for parameter in range (parameters):
theta_sample[parameter]*=Pentier/area_patches[parameter]
# set theta
θ.setRow(sample, theta_sample)
# all done and return
return self
# use other methods from uniform
# private member variables
area_patches = None
patches = None
# end of file
|
lijun99/altar | models/seismic/seismic/cuda/cudaMoment.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> (<EMAIL>)
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# get the package
import altar
import altar.cuda
# get the protocol
# and my base class
from altar.cuda.distributions.cudaUniform import cudaUniform
# the declaration
class cudaMoment(cudaUniform, family="altar.cuda.distributions.moment"):
"""
The probability distribution for displacements (D) conforming to a given Moment magnitude scale
Mw = (log M0 - 9.1)/1.5 (Hiroo Kanamori)
M0 = Mu A D
It serves to initialize samples only, with combined gaussian and dirichlet distributions.
It inherits uniform distribution for verification and density calculations.
"""
# user configurable state
# patches = altar.properties.int(default=1)
# patches.doc = "number of patches"
# The value of patches is provided by parameters
area_patch_file = altar.properties.path(default=None)
area_patch_file.doc = "input file for area of each patch, in unit of km^2"
area = altar.properties.array(default=[1.0])
area.doc = "area of each patch in unit of km^2, provide one value if the same for all patches"
Mw_mean = altar.properties.float(default=1.0)
Mw_mean.doc = " the mean moment magnitude scale"
Mw_sigma = altar.properties.float(default=0.5)
Mw_sigma.doc = " the variance of moment magnitude scale"
Mu = altar.properties.array(default = [32])
Mu.doc = "the shear modulus for each patch in GPa, provide one value if the same for all patches"
slip_sign = altar.properties.str(default='positive')
slip_sign.validators = altar.constraints.isMember("positive", "negative")
slip_sign.doc = "the sign of slips, all positive or all negative"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize with the given random number generator
"""
# all done
return self
def cuInitialize(self, application):
"""
cuda interface of initialization
"""
# initialize the parent uniform distribution
super().cuInitialize(application=application)
# get the input path
ifs = application.pfs["inputs"]
# assign the rng
self.rng = application.rng.rng
# set the number of patches
self.patches = self.parameters
# initialize the area for each patch
if len(self.area) == 1:
# by default, assign the constant patch_area to each patch
self.area_patches = altar.vector(shape=self.patches).fill(self.area[0])
elif len(self.area) != self.patches:
# if the size doesn't match
channel = self.error
raise channel.log("the size of area doesn't match the number of patches")
else:
#
self.area_patches = self.area
# if a file is provided, load it
if self.area_patch_file is not None:
try:
# get the path to the file
areafile = ifs[self.area_patch_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing area_patch_file: no '{self.area_patch_file}' {ifs.path()}")
# and raise the exception again
raise
# if all goes well
else:
# allocate the vector
self.area_patches = altar.vector(shape=self.patches)
# and load the file contents into memory
self.area_patches.load(self.areafile.uri)
# initialize the shear modulus for each patch
if len(self.Mu) == 1:
# by default, assign the constant to each patch
self.mu_patches = altar.vector(shape=self.patches).fill(self.Mu[0])
elif len(self.Mu) != self.patches:
# if the size doesn't match
channel = self.error
raise channel.log("the size of Mu doesn't match the number of patches")
else:
#
self.mu_patches = self.Mu
# all done
return self
def cuInitSample(self, theta):
"""
Fill my portion of {theta} with initial random values from my distribution.
"""
# use cpu to generate a batch of samples
samples = theta.shape[0]
parameters = self.parameters
θ = altar.matrix(shape=(samples, parameters))
# grab the references for area/shear modulus
area_patches = self.area_patches
mu_patches = self.mu_patches
# create a gaussian distribution to generate Mw for each sample
gaussian_Mw = altar.pdf.gaussian(mean=self.Mw_mean, sigma=self.Mw_sigma, rng=self.rng)
# create a dirichlet distribution to generate displacements
alpha = altar.vector(shape=parameters).fill(1) # power 0, or (alpha_i = 1)
dirichlet_D = altar.pdf.dirichlet(alpha=alpha, rng=self.rng)
# create a tempory vector for theta of samples
theta_sample = altar.vector(shape=parameters)
# get the range
low, high = self.support
# iterate through samples to initialize samples
for sample in range(samples):
within_range = False
# iterate until all samples are within support
while within_range is False:
# assume within_range is true in the beginning
within_range = True
# generate a Mw sample
Mw = gaussian_Mw.sample()
# Pentiar = M0 = \sum (A_i D_i Mu_i)
# 15 here is for GPa * Km^2, instead of Pa * m^2
Pentier = pow(10, 1.5*Mw + 9.1 - 15)
# if a negative sign is desired
if self.slip_sign == 'negative':
Pentier = - Pentier
# generate a dirichlet sample \sum x_i = 1
dirichlet_D.vector(vector=theta_sample)
# D_i = P * x_i /A_i
for patch in range(parameters):
theta_sample[patch]*=Pentier/(area_patches[patch]*mu_patches[patch])
# check the range
if(theta_sample[patch]>=high or theta_sample[patch]<=low):
within_range = False
break
# set theta
θ.setRow(sample, theta_sample)
# make a copy to gpu
gθ = altar.cuda.matrix(source=θ, dtype=self.precision)
# insert into theta according to the idx_range
theta.insert(src=gθ, start=(0,self.idx_range[0]))
# and return
return self
# private member variables
area_patches = None
mu_patches = None
patches = None
rng = None
# end of file
|
lijun99/altar | models/seismic/examples/utils/checkDataPrediction.py | #!/usr/bin/env python3
# -*- python -*-
# -*- coding: utf-8 -*-
#
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
import h5py
import numpy
import sys
def checkDataDiff():
"""
Check the difference between data predictions and observations
"""
# file names for data, modify them accordingly
dataPrediction = "forward_prediction.h5"
staticData = "9patch/static.data.h5"
kinematicData = "9patch/kinematicG.data.h5"
# open data prediction file and get predictions for one or both models
h5file = h5py.File(dataPrediction, 'r')
staticDataPrediction = numpy.asarray(h5file.get("static.Data"))
kinematicDataPrediction = numpy.asarray(h5file.get("kinematic.Data"))
h5file.close()
# get data observation for static model
h5file = h5py.File(staticData, 'r')
staticDataObservation = numpy.asarray(h5file.get("static.data"))
h5file.close()
# get data observation for kinematic model
h5file = h5py.File(kinematicData, 'r')
kinematicDataObservation = numpy.asarray(h5file.get("kinematicG.data"))
h5file.close()
# check difference
# max error and relative error
error_max = 1.e-3
error_rel_max = 1.e-1
print("checking static model ...")
# compute the relative difference
diff = staticDataPrediction - staticDataObservation
diff_ratio =diff/staticDataObservation
diff_count = 0
for i in range(diff.size):
if abs(diff[i]) > error_max and abs(diff_ratio[i])> error_rel_max:
print(f"Difference at {i}, with " +
f"pred {staticDataPrediction[i]} " +
f"obs {staticDataObservation[i]}")
diff_count += 1
print(f"There are {diff_count} data points out of {diff.size} with large differences")
print("checking kinematic model ...")
# compute the relative difference
diff = kinematicDataPrediction - kinematicDataObservation
diff_ratio = diff/kinematicDataObservation
diff_count = 0
for i in range(diff.size):
if abs(diff[i]) > error_max and abs(diff_ratio[i])> error_rel_max:
print(f"Difference at {i}, with " +
f"pred {kinematicDataPrediction[i]} " +
f"obs {kinematicDataObservation[i]}")
diff_count += 1
print(f"There are {diff_count} data points out of {diff.size} with large differences")
# all done
return
if __name__ == "__main__":
checkDataDiff()
|
lijun99/altar | cuda/cuda/distributions/cudaPreset.py | <reponame>lijun99/altar
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# get the package
import altar
import altar.cuda
# get the base
from .cudaDistribution import cudaDistribution
# the declaration
class cudaPreset(cudaDistribution, family="altar.cuda.distributions.preset"):
"""
The cuda preset distribution - initialize samples from a file
Note that a preset distribution cannot be used for prior_run.
"""
# user configurable state
input_file = altar.properties.path(default=None)
input_file.doc = "input file in hdf5 format"
dataset = altar.properties.str(default=None)
dataset.doc = "the name of dataset in hdf5"
@altar.export
def initialize(self, application):
"""
Initialize with the given random number generator
"""
# all done
return self
def cuInitialize(self, application):
"""
cuda initialize distribution
:param application:
:return:
"""
# super class process
super().cuInitialize(application=application)
# get information from application
# rank is used for different thread to load different samples
self.rank = application.controller.worker.wid
# convert to desired precision if needed
self.precision = application.job.gpuprecision
# error report
self.error = application.error
# get the input path
self.ifs = application.model.ifs
# all done
return self
def cuInitSample(self, theta):
"""
Fill my portion of {theta} with initial random values from my distribution.
"""
# load from hdf5
self._loadhdf5(theta=theta)
# and return
return self
# local methods
def _loadhdf5(self, theta):
"""
load from hdf5 file
"""
import h5py
import numpy
# grab th error channel
channel = self.error
# grab the input dataspace
ifs = self.ifs
# check the file existence
try:
# get the path to the file
df = ifs[self.input_file]
# if the file doesn't exist
except ifs.NotFoundError:
# complain
channel.log(f"missing preset samples file: no '{self.input_file}' {ifs.path()}")
# and raise the exception again
raise
# if all goes well
# open file
h5file = h5py.File(df.uri.path, 'r')
# get the desired dataset
if self.dataset is None:
raise channel.log(f"missing dataset name e.g. ParameterSets/theta")
dataset = h5file.get(self.dataset)
# get dataset info
dsamples, dparameters = dataset.shape
# decide the range to copy
# users need to check
# 1. there are enough samples to draw
# 2. the numbers of parameters should be the same
samples = theta.shape[0]
sample_start = samples * self.rank
sample_end = sample_start + samples
parameter_start = 0
parameter_end = parameter_start + self.parameters
# read the data out as a ndarray
hmatrix = numpy.asarray(dataset[sample_start:sample_end, parameter_start:parameter_end], dtype=theta.dtype)
# copy data to a cuda matrix
dmatrix = altar.cuda.matrix(source=hmatrix, dtype=hmatrix.dtype)
# copy it to the assigned position (row = 0, column = distribution/pset offset in theta)
theta.insert(src=dmatrix, start=(0, self.offset))
h5file.close()
# all done
return theta
# local variables
rank = 0
precision = None
ifs = None
error = None
# end of file
|
lijun99/altar | altar/altar/models/Contiguous.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# the protocol
from .ParameterSet import ParameterSet as parameters
# component
class Contiguous(altar.component,
family="altar.models.parameters.contiguous", implements=parameters):
"""
A contiguous parameter set
"""
# user configurable state
count = altar.properties.int(default=1)
count.doc = "the number of parameters in this set"
prior = altar.distributions.distribution()
prior.doc = "the prior distribution"
prep = altar.distributions.distribution()
prep.doc = "the distribution to use to initialize this parameter set"
# state set by the model
offset = 0 # adjusted by the model after the full set of parameters is known
# interface
@altar.export
def initialize(self, model, offset):
"""
Initialize my state given the {model} that owns me
"""
# set my offset
self.offset = offset
# get my count
count = self.count
# adjust the number of parameters of my distributions
self.prep.parameters = self.prior.parameters = count
# get the random number generator
rng = model.rng
# initialize my distributions
self.prep.initialize(rng=rng)
self.prior.initialize(rng=rng)
# return my parameter count so the next set can be initialized properly
return count
@altar.export
def initializeSample(self, theta):
"""
Fill {theta} with an initial random sample from my prior distribution.
"""
# grab the portion of the sample that belongs to me
θ = self.restrict(theta=theta)
# fill it with random numbers from my {prep} distribution
self.prep.initializeSample(theta=θ)
# all done
return self
@altar.export
def priorLikelihood(self, theta, priorLLK):
"""
Fill {priorLLK} with the log likelihoods of the samples in {theta} in my prior distribution
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=theta)
# delegate
self.prior.priorLikelihood(theta=θ, likelihood=priorLLK)
# all done
return self
@altar.export
def verify(self, theta, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=theta)
# grab my prior
pdf = self.prior
# ask it to verify my samples
pdf.verify(theta=θ, mask=mask)
# all done; return the rejection map
return mask
# implementation details
def restrict(self, theta):
"""
Return my portion of the sample matrix {theta}
"""
# find out how many samples in the set
samples = theta.rows
# get my parameter count
parameters = self.count
# get my offset in the samples
offset = self.offset
# find where my samples live within the overall sample matrix:
start = 0, offset
# form the shape of the sample matrix that's mine
shape = samples, parameters
# return a view to the portion of the sample that's mine: i own data in all sample
# rows, starting in the column indicated by my {offset}, and the width of my block is
# determined by my parameter count
return theta.view(start=start, shape=shape)
# end of file
|
lijun99/altar | models/reverso/reverso/libreverso.py | #!/usr/bin/env python3
# The Two-Reservoir Model
# This model assumes an elastic half-space and incompressible magma. The two magma
# reservoirs comprise a deep reservoir connected to a shallow reservoir by an
# hydraulic pipe ["A two-magma chamber model as a source of deformation at Grimvsvotn
# Volcano, Iceland by Reverso etal (2014) Journal of Geophysical Research: Solid Earth
import numpy
from matplotlib import pyplot
def Urmat(Hs, Hd, gammas, gammad, r, G, a_s, a_d, v):
# distance from the shallow reservoir to the surface displacement observation station
Rs = numpy.sqrt(r**2 + Hs**2)
# distance from the deep reservoir to the surface displacement observation station
Rd = numpy.sqrt(r**2 + Hd**2)
if gammas == 1.0:
# setting if shallow reservoir is spherical
alphas = 1.0
else:
# setting if shallow reservoir is sill-like reservoir [Reverso p.9]
alphas = 4.*Hs**2/(numpy.pi*Rs**2)
if gammad == 1.0:
# setting if deep reservoir is spherical
alphad = 1.0
else:
# setting if deep reservoir is sill-like reservoir [Reverso p.9]
alphad = 4.*Hd**2/(numpy.pi*Rd**2)
# eq. (17) for Ur(t): the horizontal surface displacement of a point at
# (cylindrical) radius r from the pipe connecting the reservoirs to the
# observation point. R = (Hs**2 + r**2)**(0.5)
H = ([
[r*a_s**3*alphas*(1-v)/(G*(Hs**2+r**2)**1.5),
r*a_d**3*alphad*(1-v)/(G*(Hd**2+r**2)**1.5)]
])
return H
def Uzmat(Hs, Hd, gammas, gammad, r, G, a_s, a_d, v):
Rs = numpy.sqrt(r**2 + Hs**2)
Rd = numpy.sqrt(r**2 + Hd**2)
if gammas == 1.0:
alphas = 1.0
else:
alphas = 4.*Hs**2/(numpy.pi*Rs**2)
if gammad == 1.0:
alphad = 1.0
else:
alphad = 4.*Hd**2/(numpy.pi*Rd**2)
H = ([
[Hs*a_s**3*alphas*(1-v)/(G*(Hs**2+r**2)**1.5),
Hd*a_d**3*alphad*(1-v)/(G*(Hd**2+r**2)**1.5)]
])
return H
def losmat(Hs, Hd, gammas, gammad, x, y, G, a_s, a_d, v, theta, phi):
r = numpy.sqrt(x**2 + y**2)
Rs = numpy.sqrt(r**2 + Hs**2)
Rd = numpy.sqrt(r**2 + Hd**2)
if gammas == 1.0:
alhpas = 1.0
else:
alphas = (4.0*Hs**2)/(numpy.pi*Rs**2)
if gammad == 1.0:
alhpad = 1.0
else:
alphad = (4.0*Hd**2)/(numpy.pi*Rd**2)
# Constants
GAMMA = (1.0-v)/G
Ds = alphas * (a_s/Rs)**3
Dd = alphad * (a_d/Rd)**3
H = ([
[GAMMA*Ds*(numpy.sin(theta)*(numpy.sin(phi)*y -
numpy.sin(theta)*numpy.cos(phi)*x) +
numpy.cos(theta)*Hs) ,
GAMMA*Dd*(numpy.sin(theta)*(numpy.sin(phi)*y -
numpy.sin(theta)*numpy.cos(phi)*x) +
numpy.cos(theta)*Hd)
]
]
)
return H
def analytic(t, mu, G, g, Hc, gammas, gammad, k, a_s, ac, dPd0, dPs0, drho, Qin):
# Analytic solution
# Calculate the characteristic time constant: tau = 1/ξ eq. (10)
tau = (8.0*mu*Hc**gammas*gammad*k*a_s**3)/(G*ac**4*(gammas+gammad*k))
A = gammad*k/(gammas + gammad*k)
A *= dPd0 - dPs0 + drho*g*Hc - 8.*gammas*mu*Qin*Hc/(numpy.pi*ac**4*(gammas+gammad*k))
f0 = A*(1. - numpy.exp(-t/tau))
f1 = G*Qin*t/(numpy.pi*a_s**3*(gammas+gammad*k))
dPs_anal = f0 + f1 + dPs0
dPd_anal = -f0*gammas/(gammad*k) + f1 + dPd0
return dPs_anal, dPd_anal
def main(plot=False):
## Physical parameters
# shear modulus, [Pa, kg-m/s**2]
G = 20.0E9
# Poisson's ratio
nu = 0.25
# Viscosity [Pa-s]
mu = 2000.0
# Density difference (ρ_r-ρ_m), [kg/m**3]
drho = 300.0
# Gravitational acceleration [m/s**2]
g = 9.81
# Basal conditions
Qin = 0.6 # Basal magma inflow rate [m**3/s]
# Initial conditions
# Shallow reservoir overpressure at t=0 [Pa]
dPs0 = 0.0
# Deep reservoir overpressure at t=0 [Pa]
dPd0 = 0.0
# Geometry
# radius of the hydraulic pipe
ac = 1.5
# radius of the shallow reservoir
a_s = 2.0e3
# radius of the deep reservoir
a_d = 2.2e3
# ratio of the reservoir volumes
k = (a_d/a_s)**3
# depth of the deep reservoir
Hd = 4.0e3
# depth of the shallow reservoir
Hs = 3.0e3
# length of the hydraulic connection (no vertical extensions of the reservoirs? Fig 6.)
Hc = Hd - Hs
gammas = 8.0*(1.0-nu)/(3.*numpy.pi)
gammad = 8.0*(1.0-nu)/(3.*numpy.pi)
# time-step in seconds (1 day)
dt = 86400.0
# max time (1 year) in seconds
tmax = dt*365.0 *1.0
## differential equation
# the time array in seconds
t = numpy.arange(0, tmax, dt)
# the time array as fraction of total duration
tfrac = t/tmax
nt = len(t)
print("Number of time samples = {}".format(nt))
## Initialization
dPs = numpy.zeros(nt)
dPs[0] = dPs0
dPd = numpy.zeros(nt)
dPd[0] = dPd0
# Simplifying the equations
# Eq. (10) 1/ξ = *γ_d*k/(γ_s+γ_d*k)
C1 = (G*ac**4)/(8*mu*Hc*a_s**3*gammas)
# A in eq (11) modified to incorporate initial overpressures
A1 = drho*g*Hc + dPd0 - dPs0
A2 = G*Qin / (gammad*numpy.pi*a_d**3)
C2 = gammas / (gammad*k)
for i in range(1, nt):
dPs[i] = dt*C1*(A1 + dPd[i-1] - dPs[i-1]) + dPs[i-1]
dPd[i] = A2*dt - C2*(dPs[i] - dPs[i-1]) + dPd[i-1]
if plot:
pyplot.plot(tfrac, dPs/1.0e6, label='Differential')
pyplot.legend(loc=2, prop={'size':14}, framealpha=0.5)
pyplot.show()
pyplot.plot(tfrac, dPd/1.e6, label='Differential')
pyplot.legend(loc=2, prop={'size':14}, framealpha=0.5)
pyplot.show()
if 1:
# Analytic solution
dPs_anal, dPd_anal = analytic(t, mu, G, g, Hc, gammas, gammad, k, a_s, ac, dPd0, dPs0, drho, Qin)
else:
# Calculate the characteristic time constant: tau = 1/ξ eq. (10)
tau = (8.0*mu*Hc**gammas*gammad*k*a_s**3)/(G*ac**4*(gammas+gammad*k))
A = gammad*k/(gammas + gammad*k)
A *= dPd0 - dPs0 + drho*g*Hc - 8.*gammas*mu*Qin*Hc/(numpy.pi*ac**4*(gammas+gammad*k))
f0 = A*(1. - numpy.exp(-t/tau))
f1 = G*Qin*t/(numpy.pi*a_s**3*(gammas+gammad*k))
dPs_anal = f0 + f1 + dPs0
dPd_anal = -f0*gammas/(gammad*k) + f1 + dPd0
# Comparing Analytical solution with the differential equation
if plot:
pyplot.plot(tfrac, dPs/1.0e6, label='Differential')
pyplot.plot(tfrac, dPs_anal/1.0e6, ls='--', lw=6, alpha=0.6, label='Analytical')
pyplot.legend(loc=2, prop={'size':14}, framealpha=0.5)
pyplot.title('Shallow Overpressure (MPa)')
pyplot.show()
pyplot.plot(tfrac, dPd/1.0e6, label='Differential')
pyplot.plot(tfrac, dPd_anal/1.0e6, ls='--', lw=6, alpha=0.6, label='Analytical')
pyplot.legend(loc=2, prop={'size':14}, framealpha=0.5)
pyplot.title('Deep Overpressure (MPa)')
pyplot.show()
# Generate r-array of GNSS stations.
# r is the distance from the center of the volcano and the GNSS station (or InSAR points)
rr = numpy.arange(1000, 6000, 1000)
# Number of observations
nObs = len(rr)
print("Number of spacial observations = {}".format(nObs))
# H-matrix for the radial displacement
H_Ur = numpy.squeeze([Urmat(Hs, Hd, gammas, gammad, r, G, a_s, a_d, nu) for i, r in enumerate(rr)])
# H-matrix for the vertical displacement
H_Uz = numpy.squeeze([Uzmat(Hs, Hd, gammas, gammad, r, G, a_s, a_d, nu) for i, r in enumerate(rr)])
# Generate the corresponding displacements
# H-matrix for the radial displacement
Ur = numpy.squeeze([numpy.mat(H_Ur) * numpy.mat([[dPs[i]], [dPd[i]]]) for i in range(nt)])
Uz = numpy.squeeze([numpy.mat(H_Uz) * numpy.mat([[dPs[i]], [dPd[i]]]) for i in range(nt)])
print("Ur = {}".format(Ur))
print("Uz = {}".format(Uz))
if plot:
#Plot radial displacement
for i, label in enumerate(rr):
pyplot.plot(tfrac, Ur[:,i], label='r={}'.format(label/1000.)+' km')
pyplot.legend()
pyplot.title('Radial Displacement (m)')
pyplot.show()
#Plot vertical displacement
for i, label in enumerate(rr):
pyplot.plot(tfrac, Uz[:,i], label='r={}'.format(label/1000.)+' km')
pyplot.legend()
pyplot.title('Vertical Displacement (m)')
pyplot.show()
# Synthetic InSAR dataset
# Incidence angle, theta
theta = numpy.pi * (41./180.)
# Azimuth angle, phi
phi = numpy.pi * (-169./180.)
# create meshgrid
x = numpy.arange(-5000, 5100, 1000)
y = x
X, Y = numpy.meshgrid(x, y)
H_los = [losmat(Hs, Hd, gammas, gammad, x, y, G, a_s, a_d, nu, theta, phi)]
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
plot = True
else:
plot = False
status = main(plot)
raise SystemExit(status)
|
lijun99/altar | altar/altar/bayesian/MPIAnnealing.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# externals
import mpi
import journal
# the framework
import altar
# superclass
from .AnnealingMethod import AnnealingMethod
# declaration
class MPIAnnealing(AnnealingMethod):
"""
A distributed implementation of the annealing method that uses MPI
"""
# interface
def initialize(self, application):
"""
Initialize me and my parts given an {application} context
"""
# chain up
super().initialize(application=application)
# ask the application context for the rng component
rng = application.rng
# make a rank dependent seed
seed = rng.seed + 29*(self.rank+1) + 1
# seed the rng
rng.rng.seed(seed=seed)
# show me
application.info.log(f"mpi annealing: worker {self.wid} out of total {self.workers}, {self.worker}")
# initialize worker
self.worker.wid = self.rank
self.worker.initialize(application=application)
# turn off info channel for non-managers
if self.rank != self.manager:
application.info.active = False
# all done
return self
def start(self, annealer):
"""
Start the annealing process
"""
# chain up
super().start(annealer=annealer)
# everybody has to get ready
self.worker.start(annealer=annealer)
# collect the global state: at the master task, I get the entire state of the problem;
# at the other tasks, I just get a reference to the local state so I have uniform
# access to the annealing temperature
self.step = self.collect()
# all done
return self
def top(self, annealer):
"""
Notification that we are at the beginning of a β update
"""
# if i am the manager
if self.rank == self.manager:
# chain up
return super().top(annealer=annealer)
# otherwise, do nothing
return self
def cool(self, annealer):
"""
Push my state forward along the cooling schedule
"""
# if I am the manager
if self.rank == self.manager:
# i have the global state; cool it
super().cool(annealer=annealer)
# all done
return self
def walk(self, annealer):
"""
Explore configuration space by walking the Markov chains
"""
# partition and synchronize my state
self.partition()
# all workers walk their chains
stats = self.worker.walk(annealer=annealer)
# collect my state
self.step = self.collect()
# return the statistics
return stats
def resample(self, annealer, statistics):
"""
Analyze the acceptance statistics and take the problem state to the end of the
annealing step
"""
# who is the boss?
manager = self.manager
# unpack the acceptance/rejection statistics
accepted, rejected, unlikely = statistics
# add up the acceptance/rejection statistics from all the nodes
accepted = int(self.communicator.sum(accepted))
rejected = int(self.communicator.sum(rejected))
unlikely = int(self.communicator.sum(unlikely))
# chain up
statistics = super().resample(annealer=annealer, statistics=(accepted,rejected,unlikely))
# all done
return statistics
def archive(self, annealer, scaling, stats):
"""
Notify archiver to record annealer information
"""
# if i am the manager
if self.rank == self.manager:
super().archive(annealer=annealer, scaling=scaling, stats=stats)
# otherwise, do nothing
return self
def bottom(self, annealer):
"""
Notification that we are at the end of a β update
"""
# if i am the manager
if self.rank == self.manager:
# chain up
super().bottom(annealer=annealer)
# otherwise, do nothing
return self
def finish(self, annealer):
"""
Shut down the annealing process
"""
# if i am the manager
if self.rank == self.manager:
# chain up
return super().finish(annealer=annealer)
# otherwise, do nothing
return self
# for cuda worker
@property
def device(self):
return self.worker.device
@property
def gstep(self):
return self.worker.gstep
# for cuda worker
@property
def device(self):
return self.worker.device
@property
def gstep(self):
return self.worker.gstep
# meta-methods
def __init__(self, annealer, worker, communicator=None, **kwds):
# chain up
super().__init__(annealer=annealer, **kwds)
# make sure i have a valid communicator
comm = communicator or mpi.world
# attach it
self.communicator = comm
# store the number of tasks
self.tasks = comm.size
# and my rank
self.rank = comm.rank
# save the annealing method for each of my tasks
self.worker = worker
# assign them a worker id
self.worker.wid = self.rank
self.wid = self.rank
# compute the total number workers
workers = comm.sum(item=worker.workers)
# the result is meaningful only on the manager task
self.workers = int(workers)
# all done
return
# implementation details
def collect(self):
"""
Assemble my global state
"""
# get the communicator
communicator = self.communicator
# who's the boss?
manager = self.manager
# ask my worker for its local state
step = self.worker.step
# get the temperature
β = step.beta
# assemble the sample set
θ = altar.matrix.collect(
matrix=step.theta, communicator=communicator, destination=manager)
# the prior
prior = altar.vector.collect(
vector=step.prior, communicator=communicator, destination=manager)
# the data
data = altar.vector.collect(
vector=step.data, communicator=communicator, destination=manager)
# the prior
posterior = altar.vector.collect(
vector=step.posterior, communicator=communicator, destination=manager)
# if I am not the manager task
if self.rank != self.manager:
# just return the local state
return step
# the manager packs the state of the problem and returns it; everybody has the same
# covariance matrix, so the local copy is good enough
return self.CoolingStep(
beta=β, theta=θ,
likelihoods=(prior,data,posterior), sigma=step.sigma)
def partition(self):
"""
Distribute my global state
"""
# who is the boss
manager = self.manager
# am i the boss?
if self.rank == manager:
# grab my global state
step = self.step
# unpack it
β = step.beta
θ = step.theta
Σ = step.sigma
prior = step.prior
data = step.data
posterior = step.posterior
# the others
else:
# know nothing
β = θ = Σ = prior = data = posterior = None
# cache my communicator
comm = self.communicator
# the partitioning modifies my local state, which kept on my behalf by the manager of
# my local workers
step = self.worker.step
# everybody gets the temperature
step.beta = comm.bcast(item=β, source=manager)
# it is important not to disturb the memory held by the manager: threaded managers have
# their workers set up views on the local state and we don't want to mess that up
# grab my portion of the sample set
step.theta.excerpt(matrix=θ, source=manager, communicator=comm)
# my portion of the likelihoods
step.prior.excerpt(vector=prior, source=manager, communicator=comm)
step.data.excerpt(vector=data, source=manager, communicator=comm)
step.posterior.excerpt(vector=posterior, source=manager, communicator=comm)
# finally, the covariance matrix
step.sigma.copy(altar.matrix.bcast(matrix=Σ, source=manager, communicator=comm))
# all done
return step
# private data
manager = 0 # the rank responsible for distributing and collecting the workload
worker = None # the annealing method implementation; deduced at start up time
# end of file
|
lijun99/altar | altar/altar/simulations/Archiver.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# the archiver protocol
class Archiver(altar.protocol, family="altar.simulations.archivers"):
"""
The protocol that all AlTar simulation archivers must implement
Archivers persist intermediate simulation state and can be used to restart a simulation
"""
# required behavior
@altar.provides
def initialize(self, application):
"""
Initialize me given an {application} context
"""
@altar.provides
def record(self, step):
"""
Record the final state of the simulation
"""
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Supply a default implementation
"""
# pull the in-memory archiver
from .Recorder import Recorder as default
# and return it
return default
# end of file
|
lijun99/altar | models/seismic/seismic/StaticCp.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
from .Static import Static
# declaration
class StaticCp(Static, family="altar.models.seismic.staticCp"):
"""
Linear Model with prediction uncertainty Cp, in addition to data uncertainty Cd
Cp = Kp Cmu Kp^T
Kp = Kmu * <θ>
Kp.shape = (observations, nCmu)
Cmu.shape = (nCmu, nCmu)
Kmu.shape =(observations, parameters) (same as the green's function)
"""
# implementation notes
# we keep Cd as Cd0, data observation as d0
# after each beta step, we calculate Cp from the mean model
# Cd = Cd0 + Cp
# Cd_inv
# The properties in super class - Linear Model are included
# extra properties for Cp
# mu is named after shear modulus, but is used for general model parameters
nCmu = altar.properties.int(default=0)
nCmu.doc = "the number of model parameter sets"
cmu_file = altar.properties.path(default="cmu.txt")
cmu_file.doc = "the covariance describing the uncertainty of model parameter"
# kmu are a set (nCmu) of derivations of Green's functions shape=(observations, parameters)
kmu_file = altar.properties.str(default="kmu[n].txt")
kmu_file.doc = "the sensitivity kernel of model parameter: input as kmu1.txt, ..."
# initial model
initialModel_file = altar.properties.str(default="init_model.txt")
initialModel_file.doc = "the initial mean model"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
super().initialize(application=application)
# convert the input filenames into data
self.Kmu, self.Cmu, self.meanModel = self.loadInputsCp()
# set Cp
self.Cp = self.computeCp(theta_mean=self.meanModel)
# all done
return self
def loadInputsCp(self):
"""
Load the additional data (for Cp problem) in the input files into memory
"""
# grab the input dataspace
ifs = self.ifs
# the covariance/uncertainty for model parameter Cmu
try:
# get the path to the file
cmuf = ifs[self.cmu_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing data covariance matrix: no '{self.cmu_file}' in '{self.case}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the matrix
cmu = altar.matrix(shape=(self.nCmu, self.nCmu))
# and load the file contents into memory
cmu.load(cmuf.uri)
# the sensitivity kernel, Kmu ususally
nCmu = self.nCmu
prefix, suffix = self.kmu_file.split("[n]")
kmu =[]
kmu_i = altar.matrix(shape=(self.observations, self.parameters))
for i in range (nCmu):
kmufn = prefix+str(i+1)+suffix
try:
kmuf = ifs[kmufn]
except ifs.NotFoundErr:
channel.log(f"missing sensitivity kernel: no '{kmufn}' in '{self.case}'")
raise
else:
kmu_i.load(kmuf.uri)
kmu.append(kmu_i)
# the initial model
try:
# get the path to the file
initModelf = ifs[self.initialModel_file]
# if the file doesn't exist
except ifs.NotFoundError:
channel.log(f"missing initial model file: no '{initModelf}' in '{self.case}'")
raise
# if all goes well
else:
# and load the file contents into memory
initModel = altar.vector(shape=self.parameters)
initModel.load(initModelf.uri)
# all done
return kmu, cmu, initModel
#Cp - related functions
def initializeCovariance(self, samples):
"""
initialize data covariance related variables
"""
# make copies of the original Cd, data_obs, green
self.Cd0 = self.Cd.clone()
self.d0 = self.d.clone()
self.G0 = self.G.clone()
# compute the normalization
self.normalization = self.computeNormalization(observations=self.observations, cd=self.Cd)
# compute the inverse of {Cd}
self.Cd_inv = self.computeCovarianceInverse(self.Cd)
# merge Cd to green and d
# G = Cd_inv x G; d = Cd_inv x d
Cd_inv = self.Cd_inv
self.G = altar.blas.dtrmm(Cd_inv.sideLeft, Cd_inv.upperTriangular, Cd_inv.opNoTrans,
Cd_inv.nonUnitDiagonal, 1, Cd_inv, self.G)
self.d = altar.blas.dtrmv( Cd_inv.upperTriangular, Cd_inv.opNoTrans, Cd_inv.nonUnitDiagonal,
Cd_inv, self.d)
# prepare the residuals matrix
self.residuals = self.initializeResiduals(samples=samples, data=self.d)
# all done
return self
def computeCp(self, theta_mean):
"""
Calculate Cp
"""
# grab the samples shape=(samples, parameters)
parameters = self.parameters
observations = self.observations
nCmu = self.nCmu
Cp = altar.matrix(shape=(observations, observations))
# calculate
kv = altar.vector(shape=observations)
cmu = self.Cmu
kmu = self.Kmu
Kp = altar.matrix(shape=(observations, nCmu))
for i in range(nCmu):
# get kmu_i from list, shape=(observations, parameters)
kmu_i = kmu[i]
# kv = Kmu_i * thetha_mean
# dgemv y = alpha Op(A) x + beta y
altar.blas.dgemv(kmu_i.opNoTrans, 1.0, kmu_i, theta_mean, 0.0, kv)
Kp.setColumn(i, kv)
# KpC = Kp * Cmu
KpC = altar.matrix(shape=(observations, nCmu))
altar.blas.dsymm(cmu.sideRight, cmu.upperTriangular, 1.0, cmu, Kp, 0.0, KpC)
# Cp = KpC*Kp
altar.blas.dgemm(KpC.opNoTrans, Kp.opTrans, 1.0, KpC, Kp, 0.0, Cp)
# all done
return Cp
@altar.export
def update(self, annealer):
"""
Model update interface
"""
# call super class update
super().update(annealer=annealer)
# get the work samples
step = annealer.worker.step
θ = self.restrict(theta=step.theta)
# calculate the mean model
theta_mean = self.meanModel
for i in range(self.parameters):
param_v = θ.getColumn(i)
param_mean = param_v.mean()
theta_mean[i] = param_mean
# compute Cp from the mean model
self.Cp = self.computeCp(theta_mean = theta_mean)
# update Cd
self.Cd.copy(self.Cd0)
self.Cd += self.Cp
# compute the normalization
self.normalization = self.computeNormalization(observations=self.observations, cd=self.Cd)
# compute the inverse of {Cd}
self.Cd_inv = self.computeCovarianceInverse(self.Cd)
# merge Cd to green and d
# G = Cd_inv x G; d = Cd_inv x d
Cd_inv = self.Cd_inv
self.G.copy(self.G0)
self.G = altar.blas.dtrmm(Cd_inv.sideLeft, Cd_inv.upperTriangular, Cd_inv.opNoTrans,
Cd_inv.nonUnitDiagonal, 1, Cd_inv, self.G)
self.d.copy(self.d0)
self.d = altar.blas.dtrmv( Cd_inv.upperTriangular, Cd_inv.opNoTrans, Cd_inv.nonUnitDiagonal,
Cd_inv, self.d)
# prepare the residuals matrix
samples = step.samples
self.residuals = self.initializeResiduals(samples=samples, data=self.d)
# recalculate densities
# self.densities(annealer=annealer, step=step)
#all done
return self
# inputs
G0 = None # the original Green functions
d0 = None # the vector with the original observations
Cd0 = None # the original data covariance matrix
Cmu = None # the covariance of sensitivity kernel
Kmu = None # the sensitivity kernel
# computed
Cp = None # the covariance matrix associated with model uncertainty
meanModel = None
# end of file
|
lijun99/altar | altar/altar/norms/L2.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# my protocol
from .Norm import Norm
# declaration
class L2(altar.component, family="altar.norms.l2", implements=Norm):
"""
The L2 norm
"""
# interface
@altar.export
def eval(self, v, sigma_inv=None):
"""
Compute the L2 norm of the given vector, with or without a covariance matrix
"""
# if we have a covariance matrix
if sigma_inv is not None:
# use the specialized implementation
return self.withCovariance(v=v, sigma_inv=sigma_inv)
# otherwise, compute the norm and return it
return altar.blas.dnrm2(v)
# implementation details
def withCovariance(self, v, sigma_inv):
"""
Compute the L2 norm of the given vector using the given Cholesky decomposed inverse
covariance matrix
"""
# we assume {sigma_inv} is Cholesky decomposed, so we can pre-multiply the vector by
# the lower triangle, and then just take the norm
# use the lower triangle, no transpose, non-unit diagonal
if isinstance(sigma_inv, altar.matrix):
v = altar.blas.dtrmv(
sigma_inv.lowerTriangular, sigma_inv.opNoTrans, sigma_inv.nonUnitDiagonal,
sigma_inv, v)
elif isinstance(sigma_inv, float):
v *= sigma_inv
else:
raise ValueError("L2 norm, sigma_inv should be a matrix or constant")
# compute the dot product and return it
return altar.blas.dnrm2(v)
# end of file
|
lijun99/altar | models/seismic/seismic/cuda/cudaStaticCp.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
from altar.cuda import cublas
from altar.cuda import libcuda
from .cudaStatic import cudaStatic
import numpy
# declaration
class cudaStaticCp(cudaStatic, family="altar.models.seismic.cuda.staticcp"):
"""
Static inversion with Cp (prediction error due to model parameter uncertainty)
"""
# extra configurable traits for cp
# mu is shear modulus; we use it for any generic model parameter
nCmu = altar.properties.int(default=0)
nCmu.doc = "the number of model parameters with uncertainties (or to be considered)"
cmu_file = altar.properties.path(default="static.Cmu.th5")
cmu_file.doc = "the covariance describing the uncertainty of model parameter, a nCmu x nCmu matrix"
# kmu are a set sensitivity kernels (derivatives of Green's functions) with shape=(observations, parameters)
kmu_file = altar.properties.path(default="static.kernel.h5")
kmu_file.doc = "the sensitivity kernel of model parameters, a hdf5 file including nCmu kernel data sets"
# initial model
initial_model_file = altar.properties.path(default=None)
initial_model_file.doc = "the initial mean model"
beta_cp_start = altar.properties.float(default=0)
beta_cp_start.doc = "for beta >= beta_cp_start, incorporate Cp into Cd"
beta_use_initial_model = altar.properties.float(default=0)
beta_use_initial_model.doc = "for beta <= beta_use_initial_model, use initial_model instead of mean model"
dtype_cp = altar.properties.str(default='None')
dtype_cp.doc = "single/double precision to compute Cp"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
# static model without Cp
super().initialize(application=application)
# initialize cp-specific parameters
self.dtype_cp = self.dtype_cp or self.dataobs.dtype_cd
self.initializeCp()
# all done
return self
def initializeCp(self):
"""
Initialize Cp related
:return:
"""
# load Cmu
self.gCmu = self.loadFileToGPU(filename=self.cmu_file, shape=(self.nCmu, self.nCmu), dtype=self.dtype_cp)
# load initial model if provided
if self.initial_model_file is not None:
self.gInitModel = self.loadFileToGPU(filename=self.initial_model_file, shape=self.parameters, dtype=self.dtype_cp)
# allocate a gpu vector to record mean model
self.gMeanModel = altar.cuda.vector(shape=self.parameters, dtype=self.dtype_cp)
# allocate a gpu matrix to record Cp
self.Cp = altar.cuda.matrix(shape=(self.observations, self.observations), dtype=self.dtype_cp)
return self
def updateModel(self, annealer):
"""
Model method called by Sampler before Metropolis sampling for each beta step starts,
employed to compute Cp and merge Cp with data covariance
:param annealer: the annealer for application
:return: True or False if model parameters are updated or remain the same
"""
# check beta and decide whether to incorporate cp
step = annealer.worker.step
beta = step.beta
if beta < self.beta_cp_start:
return False
# get the threads information
wid = annealer.worker.wid
workers = annealer.worker.workers
# calculate cp on master thread only
# only master thread stores the mean_model for all samples (from previous beta step)
if wid == 0:
if beta <= self.beta_use_initial_model:
# use initial(input) model
mean_model = self.gInitModel
else:
# copy the current mean model from all samples
self.gMeanModel.copy_from_host(source=step.mean)
# use the mean model
mean_model = self.gMeanModel
# compute Cp with mean model
self.computeCp(model=mean_model, cp=self.Cp)
# if more than one workers, bcast Cp
if workers > 1:
self.Cp.bcast(communicator=annealer.worker.communicator, source=0)
# recompute covariance = cp + cd,
# and merge covariance with observed data
self.dataobs.updateCovariance(cp=self.Cp)
# merge covariance with green's function
self.mergeCovarianceToGF()
# all done
return True
def computeCp(self, model, cp=None):
"""
Compute Cp with a mean model
:param model:
:return:
"""
# force float64 computation
import h5py
import numpy
# grab the samples shape=(samples, parameters)
parameters = self.parameters
observations = self.observations
nCmu = self.nCmu
# allocate Cp if not pre-allocated
Cp = cp or altar.cuda.matrix(shape=(observations, observations), dtype=self.dtype_cp)
# get cmu, shape=(nCmu, nCmu); kmu are loaded on the fly
Cmu = self.gCmu
# allocate work arrays
kmu = altar.cuda.matrix(shape=self.gGF.shape, dtype=self.dtype_cp)
Kp = altar.cuda.matrix(shape=(nCmu, observations), dtype = self.dtype_cp)
kpv = altar.cuda.vector(shape=observations, dtype=Kp.dtype)
# check the existence of kernel h5 file
h5kernelfile = self.ifs[self.kmu_file]
# open h5 file
h5kernel = h5py.File(h5kernelfile.uri.path, 'r')
# get the keys for datasets (kernels)
h5keys =list(h5kernel.keys())
for i in range(nCmu):
# load kmu_np(cpu) from h5, shape=(observations, parameters)
kmu_np = numpy.asarray(h5kernel.get(h5keys[i]), dtype=self.dtype_cp).reshape(kmu.shape)
# copy it gpu
kmu.copy_from_host(source=kmu_np)
# call the forward model
self.forwardModel(theta=model, green=kmu, prediction=kpv)
# copy the vector result to matrix
Kp.set_row(kpv, row=i)
# KpC = Cm Kp (nCmu, obs) = (nCmu, nCmu)x(nCmu, obs)
KpC = cublas.symm(A=Cmu, B=Kp, handle=self.cublas_handle)
# Cp = Kp^T KpC (obs, obs) = (obs, nCmu) x (nCmu, obs)
Cp = cublas.gemm(A=KpC, transa= cublas.OpTrans, B=Kp, transb = cublas.OpNoTrans,
out =Cp, handle = self.cublas_handle)
#libcuda.cublas_gemm(self.cublas_handle,
# 0, 1, # transa, transb
# Kp.shape[1], Cp.shape[1], Kp.shape[0], # m, n, k
# 1.0, # alpha
# Kp.data, Kp.shape[1], # A, lda
# KpC.data, KpC.shape[1], # B, ldb
# 0.0,
# Cp.data, Cp.shape[1])
# close the kernel h5 file
h5kernel.close()
# all done
return Cp
# private data
gCmu = None
gInitModel = None
gMeanModel = None
# end of file
|
IvanAgafonov/get-all-chat-members | bot.py | <reponame>IvanAgafonov/get-all-chat-members
# importing all required libraries
from boto.s3.connection import S3Connection
import os
from telethon.sync import TelegramClient
from telethon.tl.functions.channels import GetParticipantsRequest
from telethon.tl.types import InputPeerUser, InputPeerChannel, ChannelParticipantsSearch
from telethon import TelegramClient, sync, events
from telethon.tl.functions.messages import ImportChatInviteRequest, CheckChatInviteRequest
import asyncio
from telethon import functions, types
import time
# get your api_id, api_hash, token
# from telegram as described above
api_id = os.environ['api_id']
api_hash = os.environ['api_hash']
# your phone number
phone = os.environ['phone']
# creating a telegram session and assigning
# it to a variable client
client = TelegramClient('session', api_id, api_hash)
# connecting and building the session
client.connect()
# in case of script ran first time it will
# ask either to input token or otp sent to
# number or sent or your telegram id
if not client.is_user_authorized():
client.send_code_request(phone)
# signing in the client
client.sign_in(phone, input('Enter the code: '))
async def get_members(dialog):
offset = 0
limit = 100
all_participants = []
if not dialog.is_channel:
participants = await client.get_participants(dialog, aggressive=True)
all_participants = participants
else:
while True:
participants = await client(GetParticipantsRequest(
dialog, ChannelParticipantsSearch(''), offset, limit, hash=0
))
if not participants.users:
break
all_participants.extend(participants.users)
offset += len(participants.users)
user_names = []
for participant in all_participants:
if participant.username is not None:
user_names.append(participant.username)
message = ""
messages = []
cur_len = 0
for username in user_names:
if username != "GetAllChatMembers":
message += "@" + username + " "
cur_len += len("@" + username + " ")
if cur_len > 4000:
messages.append(message)
message = ""
cur_len = 0
if message != "":
messages.append(message)
return messages
async def get_dialog_by_name(name):
dialogs = await client.get_dialogs()
for dialog in dialogs:
if hasattr(dialog, "entity"):
if hasattr(dialog.entity, "username"):
if dialog.entity.username == name:
return dialog
return None
async def get_dialog_by_id(id):
dialogs = await client.get_dialogs()
for dialog in dialogs:
if hasattr(dialog, "entity"):
if hasattr(dialog.entity, "id"):
if dialog.entity.id == id:
return dialog
return None
@client.on(events.NewMessage)
async def handler(event):
if event.is_private:
message = event.message.message
argv = message.split(" ")
if len(argv) != 1:
return
chat = None
hash = None
if "/joinchat/" in argv[0]:
hash = argv[0].split("/")[-1]
try:
updates = await client(ImportChatInviteRequest(hash))
chat = updates.chats[0]
chat_id = chat.id
except Exception as e:
updates = await client(CheckChatInviteRequest(hash))
chat_id = updates.chat.id
chat = await get_dialog_by_id(chat_id)
if "@" in argv[0]:
chat_name = argv[0][1:]
try:
await client(functions.channels.JoinChannelRequest(
channel=chat_name
))
except Exception as e:
pass
chat = await get_dialog_by_name(chat_name)
res = await get_members(chat)
for mes in res:
await event.reply(mes)
time.sleep(1)
if hash is not None:
await client(functions.channels.LeaveChannelRequest(
chat
))
else:
await client(functions.channels.LeaveChannelRequest(
chat
))
client.loop.run_forever()
|
pogginicolo98/start2impact_python_project | main.py | # Start2Impact: Python project
# Cryptocurrency reporting system
import json
import os
from password import COINMARKETCAP_API_KEY
import requests
import time
class CoinmarketcapHandler:
""" Coinmarketcap APIs handler. Connect and fetch data from Coinmarketcap APIs """
def __init__(self):
self.url = ''
self.parameters = {}
self.headers = {}
def fetch_currencies_data(self):
# Get and return data via Coinmarketcap APIs
response = requests.get(url=self.url, headers=self.headers, params=self.parameters).json()
return response['data']
class CryptoReport(CoinmarketcapHandler):
""" Cryptocurrencies reporting system.
Generate 6 types of reports about cryptocurrencies using Coinmarketcap APIs """
def __init__(self):
super(CryptoReport, self).__init__()
self.url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
self.headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': COINMARKETCAP_API_KEY,
}
self.reports = self.get_reports()
def get_reports(self):
# Return a dict with 6 types of reports about cryptocurrencies
reports = {
'most traded': self.most_traded_currency(),
'best 10': self.best_ten_currencies(),
'worst 10': self.worst_ten_currencies(),
'amount top 20': self.amount_top_twenty_currencies(),
'amount by volumes': self.amount_by_volumes_currencies(),
'gain top 20': self.gain_top_twenty_currencies()
}
return reports
def most_traded_currency(self):
# Return the cryptocurrency with the largest volume (in $) of the last 24 hours
self.parameters = {
'start': 1,
'limit': 1,
'sort': 'volume_24h',
'sort_dir': 'desc',
'convert': 'USD'
}
currencies = self.fetch_currencies_data()
return currencies[0]
def best_ten_currencies(self):
# Return the best 10 cryptocurrencies by percentage increase in the last 24 hours
self.parameters = {
'start': 1,
'limit': 10,
'sort': 'percent_change_24h',
'sort_dir': 'desc',
'convert': 'USD'
}
currencies = self.fetch_currencies_data()
return currencies
def worst_ten_currencies(self):
# Return the worst 10 cryptocurrencies by percentage increase in the last 24 hours
self.parameters = {
'start': 1,
'limit': 10,
'sort': 'percent_change_24h',
'sort_dir': 'asc',
'convert': 'USD'
}
currencies = self.fetch_currencies_data()
return currencies
def amount_top_twenty_currencies(self):
# Return the amount of money required to purchase one unit of each of the top 20 cryptocurrencies in order of capitalization
amount = 0
self.parameters = {
'start': 1,
'limit': 20,
'sort': 'market_cap',
'sort_dir': 'desc',
'convert': 'USD'
}
currencies = self.fetch_currencies_data()
for currency in currencies:
amount += currency['quote']['USD']['price']
return round(amount, 2)
def amount_by_volumes_currencies(self):
# Return the amount of money required to purchase one unit of all cryptocurrencies whose last 24-hour volume exceeds $ 76,000,000
amount = 0
self.parameters = {
'start': 1,
'limit': 100,
'volume_24h_min': 76000000,
'convert': 'USD'
}
currencies = self.fetch_currencies_data()
for currency in currencies:
amount += currency['quote']['USD']['price']
return round(amount, 2)
def gain_top_twenty_currencies(self):
# Return the percentage of gain or loss you would have made if you had bought one unit of each of the top 20 cryptocurrencies the day before
initial_amount = 0
final_amount = 0
self.parameters = {
'start': 1,
'limit': 20,
'sort': 'market_cap',
'sort_dir': 'desc',
'convert': 'USD'
}
currencies = self.fetch_currencies_data()
for currency in currencies:
old_price = currency['quote']['USD']['price'] / (1 + (currency['quote']['USD']['percent_change_24h'] / 100))
initial_amount += old_price
final_amount += currency['quote']['USD']['price']
gain = round((((final_amount - initial_amount) / initial_amount) * 100), 1)
return gain
def make_json(report):
# Create a json file named with the actual date into the 'Report' directory
file_name = time.strftime('Report_%d_%m_%Y.json', time.localtime())
script_dir = os.path.dirname(os.path.abspath(__file__))
destination_dir = os.path.join(script_dir, 'report')
path = os.path.join(destination_dir, file_name)
# Create new directory if does not already exists
try:
os.mkdir(destination_dir)
except OSError:
pass # Already exists
with open(path, 'w') as f:
json.dump(report, f)
def main():
seconds = 60
minutes = 60
hours = 24
while True:
report = CryptoReport()
# Display essential reports
print('------------------------------------------------------------')
print('Crypto currencies reports of ' + time.strftime('%d/%m/%Y', time.localtime()))
print('Most traded: ' + str(report.reports['most traded']['symbol']))
print('Best 10:', end=' ')
for currency in report.reports['best 10']:
print(str(currency['symbol']), end=' ')
print('')
print('Worst 10:', end=' ')
for currency in report.reports['worst 10']:
print(str(currency['symbol']), end=' ')
print('')
print('Amount top 20: ' + str(report.reports['amount top 20']) + '$')
print('Amount by volumes: ' + str(report.reports['amount by volumes']) + '$')
print('Gain top 20: ' + str(report.reports['gain top 20']) + '%')
print('------------------------------------------------------------')
make_json(report.reports)
time.sleep(seconds * minutes * hours)
if __name__ == '__main__':
main()
|
maurcz/studying | discord-bot/using_bot.py | import os
import random
import discord
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
# interesting - with `Client` you get more flexibility, with `commands.Bot` you're
# bound to the well-known pattern of using prefixes.
# update - there's actually more to it - list of cmds, converting args, validations
bot = commands.Bot(command_prefix="!")
@bot.event
async def on_ready():
print(f"{bot.user.name} has connected to Discord!")
@bot.command(name="sepultura", help="Responds with a random excerpt from Sepultura lyrics")
async def sepultura(ctx):
sepultura_lyrics = [
"Look at me, my feelings turn STRONGER THAN HATE",
"Life ends, feeling death... SLAVES OF PAIN!",
("Nonconformity in my inner self," "Only I guide my inner self"),
]
response = random.choice(sepultura_lyrics)
await ctx.send(response)
# Function annotations will force the conversion from `str` to desired type
@bot.command(name="roll_dice", help="Simulates rolling dice.")
async def roll_dice(ctx, number_of_dice: int, number_of_sides: int):
dice = [str(random.choice(range(1, number_of_sides + 1))) for _ in range(number_of_dice)]
await ctx.send(", ".join(dice))
@bot.command(name="create-channel")
@commands.has_role("admin")
async def create_channel(ctx, channel_name):
guild = ctx.guild
existing_channel = discord.utils.get(guild.channels, name=channel_name)
if existing_channel:
await ctx.send(f"Channel {channel_name} already exists.")
return
print(f"Creating channel {channel_name}...")
await guild.create_text_channel(channel_name)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.errors.CheckFailure):
await ctx.send("You do not have the correct role for this command.")
bot.run(TOKEN)
|
maurcz/studying | blockchain/blockchain.py | import hashlib
import json
from time import time
from urllib.parse import urlparse
import requests
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
self.nodes = set() # nodes must be unique
# Genesis block
self.add_block(previous_hash=1, proof=100)
@property
def last_block(self) -> dict:
return self.chain[-1]
@staticmethod
def generate_hash(block: dict) -> str:
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def add_block(self, proof: str, previous_hash=None) -> dict:
block = {
"index": len(self.chain) + 1,
"timestamp": time(),
"transactions": self.current_transactions,
"proof": proof,
"previous_hash": previous_hash or self.generate_hash(self.chain[-1]),
}
self.current_transactions = []
self.chain.append(block)
return block
def add_transaction(self, sender: str, recipient: str, amount: int) -> int:
"""
`current_transations` go into the next mined Block
"""
self.current_transactions.append(
{
"sender": sender,
"recipient": recipient,
"amount": amount,
}
)
return self.last_block["index"] + 1
def mine(self, last_proof: str) -> int:
"""
Very basic Proof of Work (PoW) algo: generate hashes until it gets one where
the last 4 digits are equal to 0.
Generally, solutions from a Proof of Work algo should be hard to find but easy
to verify. Even in this naive implementation, increasing the number of characters
to find in the string increases runtime significantly while still making it super
easy to validate the "solution".
Using `last_proof` is important not only to potentially increase the difficulty of
finding the solution over time, but also to validate the integrity of the chain.
(see `validate_chain`)
"""
proof = 0
while not self._check_valid_proof(last_proof, proof):
proof += 1
return proof
@staticmethod
def _check_valid_proof(last_proof: int, proof: int) -> bool:
"""
Finds a hash that ends with 0000, based on the previous
encountered proof + current.
"""
guess = f"{last_proof}{proof}".encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
"""
What follows is a simple representation of how nodes can interact with
each other, validating if chains from neighbors are valid and replacing
their own chains if they see any conflicts.
"""
def register_node(self, address: str):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def validate_chain(self, chain: list) -> bool:
"""
Validates the integrity of the chain by verifying it hashes and proofs are
consistent across all sequential pairs of blocks.
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block["previous_hash"] != self.generate_hash(last_block):
return False
if not self._check_valid_proof(last_block["proof"], block["proof"]):
return False
last_block = block
current_index += 1
return True
def resolve_conflicts(self) -> bool:
"""
The example in the article is not really resolving conflicts, it just
replaces the current chain with the longer one found in other nodes.
It won't take into account concurrent transactions/mining operations that
might have been made to two or more different nodes - meaning data might be
lost.
"""
neighbors = self.nodes
largest_chain_size = len(self.chain)
new_chain = None
for node in neighbors:
response = requests.get(f"http://{node}/chain") # implies knowledge about the API...
if not response.status_code == 200:
continue
length = response.json()["length"]
chain = response.json()["chain"]
if length > largest_chain_size and self.validate_chain(chain):
largest_chain_size = length
new_chain = chain
if new_chain:
self.chain = new_chain
return True
return False
|
maurcz/studying | blockchain/api.py | from uuid import uuid4
from flask import Flask, jsonify, request
from blockchain import Blockchain
app = Flask(__name__)
node_id = str(uuid4()).replace("-", "")
blockchain = Blockchain()
@app.route("/mine", methods=["GET"])
def mine():
last_block = blockchain.last_block
last_proof = last_block["proof"]
proof = blockchain.mine(last_proof)
blockchain.add_transaction(sender="0", recipient=node_id, amount=1) # means this node has mined a new coin
previous_hash = blockchain.generate_hash(last_block)
block = blockchain.add_block(proof, previous_hash)
response = {
"message": "New Block Forged",
"index": block["index"],
"transactions": block["transactions"],
"proof": block["proof"],
"previous_hash": block["previous_hash"],
}
return jsonify(response), 200
@app.route("/transaction/new", methods=["POST"])
def transaction_new():
values = request.get_json()
required = ["sender", "recipient", "amount"]
if not all(k in values for k in required):
return "Missing values", 400
index = blockchain.add_transaction(values["sender"], values["recipient"], values["amount"])
response = {"message": f"Transaction will be added to Block {index}"}
return jsonify(response), 201
@app.route("/chain", methods=["GET"])
def chain():
response = {"chain": blockchain.chain, "length": len(blockchain.chain)}
return jsonify(response), 200
@app.route("/nodes/register", methods=["POST"])
def register_nodes():
nodes = request.get_json().get("nodes")
if not nodes:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.register_node(node)
response = {"message": "New nodes have been added", "total_nodes": list(blockchain.nodes)}
return jsonify(response), 201
@app.route("/nodes/resolve", methods=["GET"])
def consensus():
replaced = blockchain.resolve_conflicts()
msg = "Our chain was replaced" if replaced else "Our chain is authoritative"
response = {"message": msg, "chain": blockchain.chain}
return jsonify(response), 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
|
maurcz/studying | discord-bot/using_client.py | <filename>discord-bot/using_client.py
import os
import random
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
GUILD = os.getenv("DISCORD_GUILD")
# -------
# Example - Responding to events with subclasses
# class CustomClient(discord.Client):
# async def on_ready(self):
# print(f"{self.user} has connected to Discord!")
# client = CustomClient()
# client.run(TOKEN)
# -------
# Intents - new since the article came out. Toggles in the dev portal.
intents = discord.Intents.all()
client = discord.Client(intents=intents)
@client.event
async def on_ready():
# queries for info:
# guild = discord.utils.find(lambda g: g.name == GUILD, client.guilds)
# same data with helpers
guild = discord.utils.get(client.guilds, name=GUILD)
print(f"{client.user} has connected to Discord!")
print(f"Currently connected to guild {guild.name}(id: {guild.id}) ")
members = ", ".join([member.name for member in guild.members])
print(f"Guild members: {members}")
@client.event
async def on_member_join(member):
await member.create_dm()
await member.dm_channel.send(f"Hi {member.name}, welcome to my Discord server!")
@client.event
async def on_message(message):
# important to prevent the bot from responding to its own messages
if message.author == client.user:
return
sepultura_lyrics = [
"Look at me, my feelings turn STRONGER THAN HATE",
"Life ends, feeling death... SLAVES OF PAIN!",
("Nonconformity in my inner self," "Only I guide my inner self"),
]
if message.content == "Sepultura!":
response = random.choice(sepultura_lyrics)
await message.channel.send(response)
# way to force an exception to play with error-handling scenarios
elif message.content == "raise-exception":
raise discord.DiscordException
@client.event
async def on_error(event, *args, **kwargs):
with open("error.log", "a") as f:
if event == "on_message":
f.write(f"Unhandled message: {args[0]}\n")
else:
raise
client.run(TOKEN)
|
steveroch-rs/MCRcon | mcrcon.py | import argparse
import getpass
import os
import socket
import ssl
import select
import struct
import time
import platform
if platform.system() != "Windows":
import signal
class MCRconException(Exception):
pass
def timeout_handler(signum, frame):
raise MCRconException("Connection timeout error")
class MCRcon(object):
"""A client for handling Remote Commands (RCON) to a Minecraft server
The recommend way to run this client is using the python 'with' statement.
This ensures that the socket is correctly closed when you are done with it
rather than being left open.
Example:
In [1]: from mcrcon import MCRcon
In [2]: with MCRcon("10.1.1.1", "sekret") as mcr:
...: resp = mcr.command("/whitelist add bob")
...: print(resp)
While you can use it without the 'with' statement, you have to connect
manually, and ideally disconnect:
In [3]: mcr = MCRcon("10.1.1.1", "sekret")
In [4]: mcr.connect()
In [5]: resp = mcr.command("/whitelist add bob")
In [6]: print(resp)
In [7]: mcr.disconnect()
"""
socket = None
def __init__(self, host, password, port=25575, tlsmode=0, timeout=5):
self.host = host
self.password = password
self.port = port
self.tlsmode = tlsmode
self.timeout = timeout
if platform.system() != "Windows":
signal.signal(signal.SIGALRM, timeout_handler)
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, tb):
self.disconnect()
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Enable TLS
if self.tlsmode > 0:
ctx = ssl.create_default_context()
# Disable hostname and certificate verification
if self.tlsmode > 1:
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.socket = ctx.wrap_socket(self.socket, server_hostname=self.host)
self.socket.connect((self.host, self.port))
self._send(3, self.password)
def disconnect(self):
if self.socket is not None:
self.socket.close()
self.socket = None
def _read(self, length):
if platform.system() != "Windows":
signal.alarm(self.timeout)
data = b""
while len(data) < length:
data += self.socket.recv(length - len(data))
if platform.system() != "Windows":
signal.alarm(0)
return data
def _send(self, out_type, out_data):
if self.socket is None:
raise MCRconException("Must connect before sending data")
# Send a request packet
out_payload = (
struct.pack("<ii", 0, out_type) + out_data.encode("utf8") + b"\x00\x00"
)
out_length = struct.pack("<i", len(out_payload))
self.socket.send(out_length + out_payload)
# Read response packets
in_data = ""
while True:
# Read a packet
(in_length,) = struct.unpack("<i", self._read(4))
in_payload = self._read(in_length)
in_id, in_type = struct.unpack("<ii", in_payload[:8])
in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
# Sanity checks
if in_padding != b"\x00\x00":
raise MCRconException("Incorrect padding")
if in_id == -1:
raise MCRconException("Login failed")
# Record the response
in_data += in_data_partial.decode("utf8")
# If there's nothing more to receive, return the response
if len(select.select([self.socket], [], [], 0)[0]) == 0:
return in_data
def command(self, command):
result = self._send(2, command)
time.sleep(0.003) # MC-72390 workaround
return result
def mcrcon_cli():
try:
parser = argparse.ArgumentParser(
description="connect to and use Minecraft Server remote console protocol"
)
parser.add_argument("host", metavar="HOST", help="the host to connect to")
parser.add_argument(
"--password",
metavar="PASSWORD",
help="the password to connect with, default is a prompt or envvar RCON_PASSWORD.",
)
parser.add_argument(
"-p",
"--port",
metavar="PORT",
dest="port",
type=int,
default=25575,
help="the port to connect to",
)
parser.add_argument(
"-t",
"--tls",
dest="tlsmode",
action="store_true",
help="connect to the server with tls encryption",
)
args = parser.parse_args()
if not args.password and not os.environ.get("RCON_PASSWORD"):
password = get<PASSWORD>pass("Password: ")
elif os.environ.get("RCON_PASSWORD"):
password = os.environ.get("RCON_PASSWORD")
else:
password = args.password
try:
with MCRcon(args.host, password, args.port, args.tlsmode) as mcr:
while True:
cmd = input("> ")
if cmd.strip() == "exit":
break
else:
try:
resp = mcr.command(cmd)
print(resp)
except (ConnectionResetError, ConnectionAbortedError):
print(
"The connection was terminated, the server may have been stopped."
)
break
if cmd == "stop":
break
except ConnectionRefusedError:
print("The connection could not be made as the server actively refused it.")
except ConnectionError as e:
print(e)
except KeyboardInterrupt:
pass
|
keystonetowersystems/fuzzyfloat | fuzzyfloat/types.py | from .meta import FuzzyFloatMeta
class rel_fp(metaclass=FuzzyFloatMeta):
pass
class abs_fp(metaclass=FuzzyFloatMeta, rel_tol=0.0, atol=1e-07):
pass
|
keystonetowersystems/fuzzyfloat | tests/test_fp_rel.py | import pytest
from fuzzyfloat import rel_fp
def expect_fp(value, expected):
assert type(value) == rel_fp
assert value == expected
def test_cmp_eq():
value = rel_fp(100.5)
assert value == 100.5
assert value == 100.5000001
assert value == 100.4999999
value = rel_fp(1e-20)
assert value == 0
def test_cmp_le():
value = rel_fp(100)
assert value <= 500
assert value <= 100
assert value <= 99.9999999
assert not value <= 50
def test_cmp_ge():
value = rel_fp(100)
assert value >= 50
assert value >= 100
assert value >= 99.9999999
assert not value >= 500
def test_add():
value = rel_fp(100)
expect_fp(value + value, 200)
expect_fp(value + 100, 200)
expect_fp(100 + value, 200)
value += 100
expect_fp(value, 200)
def test_sub():
value = rel_fp(100)
expect_fp(value - value, 0)
expect_fp(value - 100, 0)
expect_fp(100 - value, 0)
value -= value
expect_fp(value, 0)
value -= 100
expect_fp(value, -100)
def test_mul():
value = rel_fp(100)
expect_fp(value * value, 100 * 100)
expect_fp(value * 100, 100 * 100)
expect_fp(100 * value, 100 * 100)
value *= 100
expect_fp(value, 100 * 100)
def test_div():
value = rel_fp(100)
expect_fp(value / value, 1.0)
expect_fp(value / 100, 1.0)
expect_fp(100 / value, 1.0)
value /= value
expect_fp(value, 1.0)
def test_floordiv():
value = rel_fp(111)
expect_fp(value // value, 1)
expect_fp(value // 10, 11)
expect_fp(11 // value, 0)
value //= 10
expect_fp(value, 11)
def test_exp():
value = rel_fp(3)
expect_fp(value ** value, 3 ** 3)
expect_fp(value ** 3, 3 ** 3)
expect_fp(3 ** value, 3 ** 3)
def test_divmod():
pass
def test_mod():
pass
def test_abs():
value = rel_fp(-100)
expect_fp(abs(value), 100)
def test_neg():
value = rel_fp(100)
expect_fp(-value, -100)
|
keystonetowersystems/fuzzyfloat | fuzzyfloat/meta.py | def eq_with_tolerances(ftype, rel_tol=1e-09, abs_tol=0.0):
def fp_eq(a, b):
return ftype.__le__(abs(a - b), max(rel_tol * max(abs(a), abs(b), 1), abs_tol))
return fp_eq
class FuzzyFloatMeta(type):
def __new__(metaclass, name, bases, clsdict, ftype=float, rel_tol=1e-09, atol=0.0):
bases = (*bases, ftype)
fp = type.__new__(metaclass, name, bases, clsdict)
fp.__eq__ = eq_with_tolerances(ftype, rel_tol, atol)
fp.__le__ = lambda self, other: self < other or self == other
fp.__ge__ = lambda self, other: self > other or self == other
fp.__add__ = fp.__iadd__ = fp.__radd__ = lambda self, other: fp(ftype.__add__(self, other))
fp.__mul__ = fp.__imul__ = fp.__rmul__ = lambda self, other: fp(ftype.__mul__(self, other))
fp.__sub__ = fp.__isub__ = lambda self, other: fp(ftype.__sub__(self, other))
fp.__rsub__ = lambda self, other: fp(ftype.__rsub__(self, other))
fp.__truediv__ = fp.__itruediv__ = lambda self, other: fp(ftype.__truediv__(self, other))
fp.__rtruediv__ = lambda self, other: fp(ftype.__rtruediv__(self, other))
fp.__floordiv__ = lambda self, other: fp(ftype.__floordiv__(self, other))
fp.__rfloordiv__ = lambda self, other: fp(ftype.__rfloordiv__(self, other))
fp.__mod__ = lambda self, other: fp(ftype.__mod__(self, other))
fp.__rmod__ = lambda self, other: fp(ftype.__rmod__(self, other))
fp.__pow__ = lambda self, other: fp(ftype.__pow__(self, other))
fp.__rpow__ = lambda self, other: fp(ftype.__rpow__(self, other))
fp.__pos__ = lambda self: fp(ftype.__pos__(self))
fp.__neg__ = lambda self: fp(ftype.__neg__(self))
fp.__abs__ = lambda self: fp(ftype.__abs__(self))
fp.__round__ = lambda self: fp(ftype.__round__(self))
fp.__divmod__ = lambda self, other: tuple(fp(r) for r in ftype.__divmod__(self, other))
fp.__rdivmod__ = lambda self, other: tuple(fp(r) for r in ftype.__rdivmod__(self, other))
fp.__str__ = lambda self: '%s' % ftype.__str__(self)
fp.__repr__ = lambda self: '%s(%s)' % (name, ftype.__repr__(self))
return fp
|
keystonetowersystems/fuzzyfloat | setup.py | from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='fuzzyfloat',
version='1.0.3',
description='Utility library that provides a floating point type with tolerance for equality comparisons',
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/keystonetowersystems/fuzzyfloat',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(exclude=('tests',)),
install_requires=[],
tests_require=[
"pytest",
"pytest-cov"
],
setup_requires=[
'tox>=3',
'coverage>=4'
],
python_requires='>=3',
zip_safe=True,
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
],
)
|
keystonetowersystems/fuzzyfloat | fuzzyfloat/__init__.py | """
"""
from .types import rel_fp, abs_fp
|
damm89/zgw-auth-backend | zgw_auth_backend/apps.py | <filename>zgw_auth_backend/apps.py
from django.apps import AppConfig, apps
class ZgwAuthBackendConfig(AppConfig):
name = "zgw_auth_backend"
def ready(self):
register_spectacular_extensions()
def register_spectacular_extensions():
if not apps.is_installed("drf_spectacular"):
return
from .contrib import drf_spectacular # noqa
|
damm89/zgw-auth-backend | zgw_auth_backend/__init__.py | <reponame>damm89/zgw-auth-backend
default_app_config = "zgw_auth_backend.apps.ZgwAuthBackendConfig"
|
damm89/zgw-auth-backend | zgw_auth_backend/migrations/0001_initial.py | <reponame>damm89/zgw-auth-backend
# Generated by Django 2.2.13 on 2020-12-15 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ApplicationCredentials",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"client_id",
models.CharField(
help_text="Client ID to identify external API's and applications that access this API.",
max_length=50,
unique=True,
verbose_name="client ID",
),
),
(
"secret",
models.CharField(
help_text="Secret belonging to the client ID.",
max_length=255,
verbose_name="secret",
),
),
],
options={
"verbose_name": "client credential",
"verbose_name_plural": "client credentials",
},
),
]
|
damm89/zgw-auth-backend | zgw_auth_backend/admin.py | from django.contrib import admin
from .models import ApplicationCredentials
@admin.register(ApplicationCredentials)
class ApplicationCredentialsAdmin(admin.ModelAdmin):
list_display = ("client_id",)
search_fields = ("clien_id",)
|
damm89/zgw-auth-backend | zgw_auth_backend/models.py | <filename>zgw_auth_backend/models.py<gh_stars>0
from django.db import models
from django.utils.translation import gettext_lazy as _
class ApplicationCredentials(models.Model):
client_id = models.CharField(
_("client ID"),
max_length=50,
unique=True,
help_text=_(
"Client ID to identify external API's and applications that access this API."
),
)
secret = models.CharField(
_("secret"), max_length=255, help_text=_("Secret belonging to the client ID.")
)
class Meta:
verbose_name = _("client credential")
verbose_name_plural = _("client credentials")
def __str__(self):
return self.client_id
|
damm89/zgw-auth-backend | zgw_auth_backend/contrib/drf_spectacular.py | <reponame>damm89/zgw-auth-backend
from drf_spectacular.extensions import OpenApiAuthenticationExtension
class ZGWAuthenticationScheme(OpenApiAuthenticationExtension):
target_class = "zgw_auth_backend.authentication.ZGWAuthentication"
name = "ZGWAuthentication"
def get_security_definition(self, auto_schema):
return {
"type": "http",
"in": "beader",
"bearerFormat": "JWT",
}
|
damm89/zgw-auth-backend | tests/test_authentication.py | <reponame>damm89/zgw-auth-backend
from django.contrib.auth import get_user_model
from django.test import override_settings
from django.urls import path
from rest_framework import permissions, status
from rest_framework.response import Response
from rest_framework.test import APITestCase
from rest_framework.views import APIView
from zds_client import ClientAuth
from zgw_auth_backend.authentication import ZGWAuthentication
from zgw_auth_backend.models import ApplicationCredentials
User = get_user_model()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (ZGWAuthentication,)
def get(self, request):
return Response({"a": 1})
urlpatterns = [
path("mock", MockView.as_view(), name="test"),
]
@override_settings(ROOT_URLCONF=__name__)
class ZGWAuthTests(APITestCase):
def test_missing_header(self):
response = self.client.get("/mock")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertFalse(User.objects.exists())
def test_invalid_credentials(self):
auth = ClientAuth(client_id="dummy", secret="secret")
response = self.client.get(
"/mock", HTTP_AUTHORIZATION=auth.credentials()["Authorization"]
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertFalse(User.objects.exists())
def test_missing_claims(self):
ApplicationCredentials.objects.create(client_id="dummy", secret="secret")
auth = ClientAuth(client_id="dummy", secret="secret")
response = self.client.get(
"/mock", HTTP_AUTHORIZATION=auth.credentials()["Authorization"]
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertFalse(User.objects.exists())
def test_valid_credentials(self):
ApplicationCredentials.objects.create(client_id="dummy", secret="secret")
auth = ClientAuth(
client_id="dummy",
secret="secret",
user_id="some-user",
user_representation="Some User",
email="<EMAIL>",
)
response = self.client.get(
"/mock", HTTP_AUTHORIZATION=auth.credentials()["Authorization"]
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(User.objects.count(), 1)
user = User.objects.get()
username = getattr(user, User.USERNAME_FIELD)
self.assertEqual(username, "some-user")
email = getattr(user, User.EMAIL_FIELD)
self.assertEqual(email, "<EMAIL>")
def test_no_duplicate_users(self):
ApplicationCredentials.objects.create(client_id="dummy", secret="secret")
User.objects.create(**{User.USERNAME_FIELD: "some-user"})
auth = ClientAuth(
client_id="dummy",
secret="secret",
user_id="some-user",
user_representation="Some User",
)
response = self.client.get(
"/mock", HTTP_AUTHORIZATION=auth.credentials()["Authorization"]
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(User.objects.count(), 1)
def test_add_email_to_user_with_empty_email_address(self):
ApplicationCredentials.objects.create(client_id="dummy", secret="secret")
user = User.objects.create(**{User.USERNAME_FIELD: "some-user"})
auth = ClientAuth(
client_id="dummy",
secret="secret",
user_id="some-user",
user_representation="Some User",
email="<EMAIL>",
)
response = self.client.get(
"/mock", HTTP_AUTHORIZATION=auth.credentials()["Authorization"]
)
user = User.objects.get(**{User.USERNAME_FIELD: "some-user"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.email, "<EMAIL>")
def test_change_email_of_user(self):
ApplicationCredentials.objects.create(client_id="dummy", secret="secret")
user = User.objects.create(
**{
User.USERNAME_FIELD: "some-user",
User.EMAIL_FIELD: "<EMAIL>",
}
)
auth = ClientAuth(
client_id="dummy",
secret="secret",
user_id="some-user",
user_representation="Some User",
email="<EMAIL>",
)
response = self.client.get(
"/mock", HTTP_AUTHORIZATION=auth.credentials()["Authorization"]
)
user = User.objects.get(**{User.USERNAME_FIELD: "some-user"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.email, "<EMAIL>")
|
damm89/zgw-auth-backend | zgw_auth_backend/zgw.py | <reponame>damm89/zgw-auth-backend
import logging
from typing import Any, Dict, Optional
from django.utils.translation import gettext_lazy as _
import jwt
from rest_framework import exceptions
from .models import ApplicationCredentials
logger = logging.getLogger(__name__)
ALG = "HS256"
class ZGWAuth:
def __init__(self, encoded: str):
self.encoded = encoded
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.payload)
@property
def payload(self) -> Optional[Dict[str, Any]]:
if self.encoded is None:
return None
if not hasattr(self, "_payload"):
# decode the JWT and validate it
# jwt check
try:
payload = jwt.decode(
self.encoded,
options={"verify_signature": False},
algorithms=[ALG],
)
except jwt.DecodeError:
logger.info("Invalid JWT encountered")
raise exceptions.AuthenticationFailed(
_(
"JWT could not be decoded. Possibly you made a copy-paste mistake."
),
code="jwt-decode-error",
)
# get client_id
try:
client_id = payload["client_id"]
except KeyError:
raise exceptions.AuthenticationFailed(
_("`client_id` claim is missing in the JWT."),
code="missing-client-identifier",
)
# find client_id in DB and retrieve its secret
try:
jwt_secret = ApplicationCredentials.objects.exclude(secret="").get(
client_id=client_id
)
except ApplicationCredentials.DoesNotExist:
raise exceptions.AuthenticationFailed(
_("Client identifier does not exist"),
code="invalid-client-identifier",
)
else:
key = jwt_secret.secret
# check signature of the token
try:
payload = jwt.decode(
self.encoded,
key,
algorithms=[ALG],
)
except jwt.InvalidSignatureError:
logger.exception("Invalid signature - possible payload tampering?")
raise exceptions.AuthenticationFailed(
_("Client credentials are invalid."), code="invalid-jwt-signature"
)
self._payload = payload
return self._payload
|
damm89/zgw-auth-backend | zgw_auth_backend/authentication.py | # Mostly taken from https://github.com/VNG-Realisatie/vng-api-common/blob/master/vng_api_common/middleware.py
#
# We can't use vng-api-common at the moment because of the hard pinned dependencies on
# DRF 3.10 and drf-yasg 1.16.0
import logging
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from rest_framework.request import Request
from .zgw import ZGWAuth
logger = logging.getLogger(__name__)
class ZGWAuthentication(BaseAuthentication):
www_authenticate_realm = "api"
def authenticate(self, request: Request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b"bearer":
return None
if len(auth) == 1:
msg = _("Invalid bearer header. No credentials provided.")
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _(
"Invalid bearer header. Credentials string should not contain spaces."
)
raise exceptions.AuthenticationFailed(msg)
auth = ZGWAuth(auth[1].decode("utf-8"))
user_id = auth.payload.get("user_id")
if not user_id:
msg = _("Invalid 'user_id' claim. The 'user_id' should not be empty.")
raise exceptions.AuthenticationFailed(msg)
email = auth.payload.get("email", "")
return self.authenticate_user_id(user_id, email)
def authenticate_user_id(self, username: str, email: str):
UserModel = get_user_model()
fields = {UserModel.USERNAME_FIELD: username}
user, created = UserModel._default_manager.get_or_create(**fields)
if created:
msg = "Created user object for username %s" % username
logger.info(msg)
if email:
email_field = UserModel.get_email_field_name()
email_value = getattr(user, email_field)
if not email_value or email_value != email:
setattr(user, email_field, email)
user.save()
msg = "Set email to %s of user with username %s" % (email, username)
logger.info(msg)
return (user, None)
def authenticate_header(self, request):
return 'Bearer realm="%s"' % self.www_authenticate_realm
|
genaforvena/street_art_nn_api | tests/test_street_art_nn.py | <reponame>genaforvena/street_art_nn_api
#!flask/bin/python
import os
import tempfile
import pytest
from street_art_nn import street_art_nn
@pytest.fixture
def client(request):
db_fd, street_art_nn.app.config['DATABASE'] = tempfile.mkstemp()
street_art_nn.app.config['TESTING'] = True
client = street_art_nn.app.test_client()
with street_art_nn.app.app_context():
street_art_nn.init_db()
def teardown():
os.close(db_fd)
os.unlink(street_art_nn.app.config['DATABASE'])
request.addfinalizer(teardown)
return client
def login(client, username, password):
return client.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(client):
return client.get('/logout', follow_redirects=True)
def test_empty_db(client):
"""Start with a blank database."""
rv = client.get('/')
assert b'No entries here so far' in rv.data
def test_login_logout(client):
"""Make sure login and logout works"""
rv = login(client, street_art_nn.app.config['USERNAME'],
street_art_nn.app.config['PASSWORD'])
assert b'You were logged in' in rv.data
rv = logout(client)
assert b'You were logged out' in rv.data
rv = login(client, street_art_nn.app.config['USERNAME'] + 'x',
street_art_nn.app.config['PASSWORD'])
assert b'Invalid username' in rv.data
rv = login(client, street_art_nn.app.config['USERNAME'],
street_art_nn.app.config['PASSWORD'] + 'x')
assert b'Invalid password' in rv.data
|
genaforvena/street_art_nn_api | street_art_nn/__init__.py | from .street_art_nn import app
|
genaforvena/street_art_nn_api | street_art_nn/street_art_nn.py | #!flask/bin/python
import json
import sqlite3
import os
from geopy.distance import vincenty
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, jsonify
DATABASE = 'art_nn.db'
app = Flask(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, DATABASE),
DEBUG=True,
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='<PASSWORD>'
))
app.config['JSON_AS_ASCII'] = False
app.config.from_envvar('STREET_ART_SETTINGS', silent=True)
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
print('Starting db init')
init_db()
print('Initialized the database.')
@app.cli.command('import_data')
def import_data():
filename = os.path.join(os.path.dirname(__file__), '../util/data.json')
with open(filename) as f:
data = json.load(f)
db = get_db()
rows = []
for artwork in data:
try:
rows.append([artwork["artist"], artwork["name"], artwork["year"], artwork["image"],
artwork["location"]["address"],
artwork["location"]["lng"],
artwork["location"]["lat"]])
except:
# That's probably ok to ignore data errors here
continue
for row in rows:
db.execute('INSERT INTO art (artist, title, year, image, address, lng, lat) values (?, ?, ?, ?, ?, ?, ?)',
row)
db.commit()
@app.cli.command('add_test_data')
def add_test_data():
db = get_db()
db.execute('INSERT INTO art (artist, title, year, image, address, lng, lat) values ("artist", "title", "year", "image", "address", 24.54, 24.54)')
db.commit()
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/artworks/api/v1.0/closest', methods=['GET'])
def get_closest_artworks():
""" Returns closest artworks to given location
Should pass lat, lng and limit params with GET request
If one of them is missing - request will just fail
"""
lat = float(request.args['lat'])
lng = float(request.args['lng'])
limit = int(request.args['limit'])
artworks_json = _get_all_artworks()
def distance_km(artwork_from):
return vincenty((artwork_from["lat"], artwork_from["lng"]), (lat, lng)).km
def compare_by_distance(artwork1, artwork2):
distance1 = distance_km(artwork1)
distance2 = distance_km(artwork2)
return int(distance1 - distance2)
sorted_by_distance = sorted(artworks_json, cmp=compare_by_distance)
return jsonify({'artworks': sorted_by_distance[:limit]})
def _get_all_artworks():
cur = get_db().execute("SELECT * FROM art")
rv = cur.fetchall()
cur.close()
return [dict(x) for x in rv]
@app.route('/artworks/api/v1.0/artworks', methods=['GET'])
def get_artworks():
artworks_json = _get_all_artworks()
return jsonify({'artworks': artworks_json})
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('INSERT INTO art (artist, title, year, image, address, lng, lat) values (?, ?, ?, ?, ?, ?, ?)',
[request.form["artist"], request.form["title"],
request.form["year"],
request.form["image"],
request.form["address"], request.form["lng"], request.form["lat"]])
db.commit()
return redirect(url_for('show_entries'))
@app.route('/delete', methods=['POST'])
def delete_entry():
if not session.get('logged_in'):
abort('401')
db = get_db()
db.execute('DELETE FROM art WHERE id=?', [request.form["id"]])
db.commit()
return redirect(url_for('show_entries'))
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select id, title, artist, image, address from art order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run(debug=True)
|
jbuenof45401/ProyectoProgramacionCorte120191 | funciones.py | def calcular_precio_producto(coste_producto):
'''
(num) -> float: valor del producto
calcular el coste del producto aumentando el 50% de su valor como comision
>>> calcular_precio_producto(5000)
7500.0
>>> calcular_precio_producto(1000)
1500.0
:param coste_producto: representa el coste del producto
:return: valor del producto
'''
return coste_producto + (coste_producto * 0.5)
def calcular_precio_servicio(cantidad_horas):
'''
(num) -> num: precio por horas trabajadas
>>> calcular_precio_servicio(5)
500000
>>> calcular_precio_servicio(4)
400000
:param cantidad_horas: horas de servicio prestado
:return: valor de las horas prestadas
'''
return cantidad_horas * 100000
def calcular_precio_servicio_extras(cantidad_horas):
'''
(num) -> float
recibe las horas extra trabajadas, devuelve el valor por las horas extra
>>> calcular_precio_servicio_extras(1)
125000.0
>>> calcular_precio_servicio_extras(2)
250000.0
:param cantidad_horas: cantidad de horas extra trabajadas
:return: valor de las horas extra
'''
precio_hora_normal = calcular_precio_servicio(cantidad_horas)
return precio_hora_normal + (precio_hora_normal*0.25)
def calcular_costo_envio(kilometros):
'''
(num) -> num
recibe las horas extra trabajadas, devuelve el valor por las horas extra
>>> calcular_costo_envio(2)
230
>>> calcular_costo_envio(3)
345
:param kilometros: cantidad de kilometros recorridos
:return: valor por los kilometros recorridos
'''
return kilometros * 115
def calcular_precio_producto_fuera(coste_producto,
kilometros):
'''
(num:,num) -> num
Calcular el costo del producto mas los kilometros recorridos.
>>> calcular_precio_producto_fuera(1000,2)
1730.0
>>> calcular_precio_producto_fuera(2000,1)
3115.0
:param coste_producto: num: costo del producto para jose
:param kilometros: num: kilometros recorridos por jose
:return: float: precio del producto mas recargo de envio
'''
return calcular_precio_producto(coste_producto) + calcular_costo_envio(kilometros)
def calcular_iva_producto(coste_producto, tasa):
'''
(num, float)-> float
>>> calcular_iva_producto(500,0.19)
142.25
>>> calcular_iva_producto(1000,0.19)
285.0
:param coste_producto: Costo bruto del producto
:param tasa:
:return: float: iva del producto
'''
return calcular_precio_producto(coste_producto) * tasa;
def calcular_iva_servicio(cantidad_horas, tasa):
'''
(num, float) -> float
Iva de las horas trabajadas
>>> calcular_iva_servicio(2,0.19)
38000.0
>>> calcular_iva_servicio(4,0.19)
76000.0
:param cantidad_horas: num: cantidad de horas de trabajo
:param tasa: float: tasa de iva a aplicar
:return: float: iva de las horas trabajadas
'''
return calcular_precio_servicio(cantidad_horas)*tasa
def calcular_iva_envio(kilometros, tasa):
'''
(num, float) -> float
Calcula el iva del envio
>>> calcular_iva_envio(1,0.19)
21.85
>>> calcular_iva_envio(2,0.19)
43.7
:param kilometros: num: kilometros recorridos
:param tasa: float: iva a aplicar
:return: float: el iva aplicado al envio
'''
return calcular_costo_envio(kilometros) * tasa
def calcular_iva_servicio_extra(cantidad_horas, tasa):
'''
(num,float)-> float
Calcula el iva de las horas extra
>>> calcular_iva_servicio_extra(2,0.19)
47500.0
>>> calcular_iva_servicio_extra(3,0.19)
71250.0
:param cantidad_horas: Cantidad de horas trabajadas
:param tasa: Tasa de iva a aplicar
:return: el iva por la cantidad de horas trabajadas
'''
return calcular_precio_servicio_extras(cantidad_horas) * tasa
def calcular_recaudo_locales(coste_producto_1,
coste_producto_2,
coste_producto_3):
'''
(num,num,num) -> num
Calcular recuaudo por la venta de 3 productos
>>> calcular_recaudo_locales(1000,2000,3000)
9000.0
>>> calcular_recaudo_locales(2000,3000,4000)
13500.0
:param coste_producto_1: Costo del producto 1
:param coste_producto_2: Costo del producto 2
:param coste_producto_3: Costo del producto 3
:return: recaudo total de los productos
'''
recaudo = 0
recaudo += calcular_precio_producto(coste_producto_1)
recaudo += calcular_precio_producto(coste_producto_2)
recaudo += calcular_precio_producto(coste_producto_3)
return recaudo
def calcular_recaudo_horas_extra(horas_1,
horas_2,
horas_3,
horas_4):
'''
(num,num,num,num) -> num
Calcula el recaudo de las horas de servicio
>>> calcular_recaudo_horas_extra(1,2,3,4)
1250000.0
>>> calcular_recaudo_horas_extra(2,3,4,5)
1750000.0
:param horas_1: horas trabajadas 1
:param horas_2: horas trabajadas 2
:param horas_3: horas trabajadas 3
:param horas_4: horas trabajadas 4
:return: recuado por el total de las horas trabajadas
'''
recaudo=0
recaudo += calcular_precio_servicio_extras(horas_1)
recaudo += calcular_precio_servicio_extras(horas_2)
recaudo += calcular_precio_servicio_extras(horas_3)
recaudo += calcular_precio_servicio_extras(horas_4)
return recaudo
def calcular_recaudo_mixto_local(coste_producto_1,
coste_producto_2,
horas_1,
horas_2):
'''
(num,num,num,num) -> num
Calcula el recaudo de los productos vendidos y las horas de servicio prestado
>>> calcular_recaudo_mixto_local(1000,2000,1,2)
304500.0
>>> calcular_recaudo_mixto_local(2000,3000,2,3)
507500.0
:param coste_producto_1: Costo del producto 1
:param coste_producto_2: Costo del producto 2
:param horas_1: Horas de servicio 1
:param horas_2: horas de servico 2
:return: reacudo total
'''
recaudo=0
recaudo+=calcular_precio_producto(coste_producto_1)
recaudo+=calcular_precio_producto(coste_producto_2)
recaudo+=calcular_precio_servicio(horas_1)
recaudo+=calcular_precio_servicio(horas_2)
return recaudo |
jbuenof45401/ProyectoProgramacionCorte120191 | pruebas.py | import unittest
import funciones as f
class pruebas(unittest.TestCase):
def test_calcular_precio_producto(self):
self.assertEqual(f.calcular_precio_producto(1000), 1500)
self.assertEqual(f.calcular_precio_producto(0), 0)
def test_calcular_precio_servicio(self):
self.assertEqual(f.calcular_precio_servicio(1000), 100000000)
self.assertNotEqual(f.calcular_precio_servicio(3), 30000)
self.assertEqual(f.calcular_precio_servicio(0), 0)
def test_calcular_precio_servicio_extras(self):
self.assertEqual(f.calcular_precio_servicio_extras(1), 125000.0)
self.assertEqual(f.calcular_precio_servicio_extras(3), 375000.0)
self.assertNotEqual(f.calcular_precio_servicio_extras(0), 0.25)
def test_calcular_costo_envio(self):
self.assertEqual(f.calcular_costo_envio(1), 115)
self.assertNotEqual(f.calcular_costo_envio(3), 315)
self.assertEqual(f.calcular_costo_envio(0), 0)
def test_calcular_precio_producto_fuera(self):
self.assertEqual(f.calcular_precio_producto_fuera(2000,4), 3460)
self.assertEqual(f.calcular_precio_producto_fuera(9000,50),19250)
self.assertNotEqual(f.calcular_precio_producto_fuera(40,1), 615)
def test_calcular_iva_producto(self):
self.assertEqual(f.calcular_iva_producto(800,0.19), 228.0)
self.assertNotEqual(f.calcular_iva_producto(0,0.19), 0.19)
self.assertEqual(f.calcular_iva_producto(589621,0.19), 168041.98500000002)
def test_calcular_iva_servicio(self):
self.assertNotEqual(f.calcular_iva_servicio(0,0.19), 0.19)
self.assertEqual(f.calcular_iva_servicio(800,0.19), 228.0)
self.assertEqual(f.calcular_iva_servicio(589621,0.19), 168041.98500000002)
def test_calcular_iva_envio(self):
self.assertEqual(f.calcular_iva_servicio(5,0.19), 95000.0)
self.assertEqual(f.calcular_iva_servicio(0,0.19), 0.0)
self.assertNotEqual(f.calcular_iva_servicio(8,0.19), 800000.19)
def test_calcular_iva_servicio_extra(self):
self.assertNotEqual(f.calcular_iva_servicio_extra(8,0.19), 152000.0)
self.assertEqual(f.calcular_iva_servicio_extra(3,0.19), 71250.0)
self.assertEqual(f.calcular_iva_servicio_extra(9,0.19), 213750.0)
def test_calcular_recaudo_locales(self):
self.assertNotEqual(f.calcular_recaudo_locales(8,10,0), 0.0)
self.assertEqual(f.calcular_recaudo_locales(3252,2000,5500), 16128.0)
self.assertEqual(f.calcular_recaudo_locales(46000,0,15000), 91500.0)
def test_calcular_recaudo_horas_extra(self):
self.assertEqual(f.calcular_recaudo_horas_extra(1,9,9,7), 3250000)
self.assertEqual(f.calcular_recaudo_horas_extra(4,0,25,21), 6250000)
self.assertNotEqual(f.calcular_recaudo_horas_extra(2,0,1,9), 0)
def test_calcular_recaudo_mixto_local(self):
self.assertEqual(f.calcular_recaudo_horas_extra(1000,56900,5,3), 886850)
self.assertEqual(f.calcular_recaudo_horas_extra(49500,5000,25,9), 3481750)
self.assertNotEqual(f.calcular_recaudo_horas_extra(2,0,1,9), 0)
if __name__ == 'main':
unittest.main()
|
crusaderky/pshell | pshell/tests/test_call.py | <reponame>crusaderky/pshell<gh_stars>1-10
import io
import os
import sys
import tempfile
import time
import pytest
import pshell as sh
from . import DATADIR, StubError, unix_only, windows_only
if os.name == "nt":
HELLO_CMD = [os.path.join(DATADIR, "hello.bat")]
EXIT1_CMD = [os.path.join(DATADIR, "exit1.bat")]
SLEEP_CMD = [os.path.join(DATADIR, "sleep20.bat")]
else:
HELLO_CMD = ["bash", "-c", "echo Hello world!"]
EXIT1_CMD = ["bash", "-c", "exit 1"]
SLEEP_CMD = ["bash", "-c", "sleep 2"]
def test_real_fh_none():
# Required by call, check_call, check_output
with sh.real_fh(None) as rfh:
assert rfh is None
def test_real_fh_trivial():
# Real POSIX-backed file handle
with tempfile.TemporaryFile() as fh:
with sh.real_fh(fh) as rfh:
assert rfh is fh
def test_real_fh_stringio():
fh = io.StringIO()
with sh.real_fh(fh) as rfh:
assert rfh.fileno() > 2
rfh.write("Hello world")
assert fh.getvalue() == "Hello world"
def test_real_fh_bytesio():
fh = io.BytesIO()
with sh.real_fh(fh) as rfh:
assert rfh.fileno() > 2
rfh.write(b"Hello world")
assert fh.getvalue() == b"Hello world"
def test_real_fh_crash():
# Test that the output copy is wrapped by a `finally` clause,
# so that it is not lost if the wrapped code raises an Exception
fh = io.StringIO()
with pytest.raises(StubError):
with sh.real_fh(fh) as rfh:
rfh.write("Hello world")
raise StubError()
assert fh.getvalue() == "Hello world"
@pytest.mark.skip("no way of testing this with pytest")
def test_real_fh_nosetests(): # pragma: nocover
# sys.stdout and sys.stderr have been monkey-patched by nosetests
# with a custom class (not io.StringIO!)
with sh.real_fh(sys.stdout) as rfh:
assert rfh is not sys.stdout
assert rfh.fileno() > 2
rfh.write("Hello world")
with sh.real_fh(sys.stderr) as rfh:
assert rfh is not sys.stderr
assert rfh.fileno() > 2
rfh.write("Hello world")
def test_real_fh_fullpipe():
# Exceed the typical size of a pipe (64 kbytes on Linux)
# in an attempt to trigger a deadlock if the pipe isn't
# continuously flushed.
fh = io.StringIO()
payload = "x" * int(2 ** 20) # 1MB payload
with sh.real_fh(fh) as rfh:
rfh.write(payload)
assert fh.getvalue() == payload
@unix_only
def test_call():
assert sh.call('echo "Hello world!" > /dev/null') == 0
@unix_only
def test_call_quotes():
assert sh.call("echo 'Hello world!' > /dev/null") == 0
@unix_only
def test_call_errexit():
assert sh.call("notexist.sh") == 127
@unix_only
def test_call_nounset():
assert sh.call("echo $NOT_EXISTING_VARIABLE") == 1
@unix_only
def test_call_pipefail():
assert sh.call("cat NOTEXIST | cat") == 1
@unix_only
def test_call_obfuscate_pwd():
# TODO intercept logging
assert sh.call("echo -P mypass", obfuscate_pwd="<PASSWORD>") == 0
def test_call_noshell1():
assert sh.call(HELLO_CMD, shell=False) == 0
def test_call_noshell2():
with pytest.raises(FileNotFoundError):
sh.call("notexist", shell=False)
@unix_only
def test_call_timeout():
ts_start = time.time()
with pytest.raises(sh.TimeoutExpired):
sh.call("sleep 2", timeout=0.1)
assert time.time() - ts_start < 0.5
@unix_only
def test_call_real_fh_stringio():
stderr = io.StringIO()
stdout = io.StringIO()
assert sh.call("echo hello 1>&2 && echo world", stdout=stdout, stderr=stderr) == 0
assert stderr.getvalue() == "hello\n"
assert stdout.getvalue() == "world\n"
@unix_only
def test_call_real_fh_nosetests():
assert (
sh.call("echo hello 1>&2 && echo world", stdout=sys.stdout, stderr=sys.stderr)
== 0
)
@unix_only
def test_check_call():
sh.check_call('echo "Hello world!" > /dev/null')
@unix_only
def test_check_call_quotes():
sh.check_call("echo 'Hello world!' > /dev/null")
@unix_only
def test_check_call_errexit():
with pytest.raises(sh.CalledProcessError):
sh.check_call("notexist")
@unix_only
def test_check_call_nounset():
with pytest.raises(sh.CalledProcessError):
sh.check_call("echo $NOT_EXISTING_VARIABLE")
@unix_only
def test_check_call_pipefail():
with pytest.raises(sh.CalledProcessError):
sh.check_call("cat NOTEXIST | cat")
@unix_only
def test_check_call_obfuscate_pwd():
# TODO intercept logging
sh.check_call("echo -P mypass", obfuscate_pwd="<PASSWORD>")
def test_check_call_noshell1():
sh.check_call(HELLO_CMD, shell=False)
def test_check_call_noshell2():
with pytest.raises(sh.CalledProcessError):
sh.check_call(EXIT1_CMD, shell=False)
def test_check_call_noshell3():
with pytest.raises(FileNotFoundError):
sh.check_call("notexist", shell=False)
@unix_only
def test_check_call_timeout():
ts_start = time.time()
with pytest.raises(sh.TimeoutExpired):
sh.check_call("sleep 2", timeout=0.1)
assert time.time() - ts_start < 0.5
@unix_only
def test_check_call_real_fh_stringio():
stderr = io.StringIO()
stdout = io.StringIO()
sh.check_call("echo hello 1>&2 && echo world", stdout=stdout, stderr=stderr)
assert stderr.getvalue() == "hello\n"
assert stdout.getvalue() == "world\n"
@unix_only
def test_check_call_real_fh_nosetests():
assert (
sh.call("echo hello 1>&2 && echo world", stdout=sys.stdout, stderr=sys.stderr)
== 0
)
@unix_only
def test_check_output():
assert sh.check_output('echo -n "Hello world"') == "Hello world"
@unix_only
def test_check_output_quotes():
assert sh.check_output("echo 'Hello world!'") == "Hello world!\n"
@unix_only
def test_check_output_nodecode():
assert sh.check_output('echo -n "Hello world"', decode=False) == b"Hello world"
@unix_only
def test_check_output_unicode():
assert sh.check_output(r"printf '\xE2\x98\xA0'") == "☠"
# Test invalid unicode character
assert sh.check_output(r"printf '\x85'") == "�"
assert sh.check_output(r"printf '\x85'", errors="replace") == "�"
assert sh.check_output(r"printf '\x85'", errors="ignore") == ""
with pytest.raises(UnicodeDecodeError):
sh.check_output(r"printf '\x85'", errors="strict")
@unix_only
def test_check_output_errexit():
with pytest.raises(sh.CalledProcessError):
sh.check_output("notexist.sh")
@unix_only
def test_check_output_nounset():
with pytest.raises(sh.CalledProcessError):
sh.check_output("echo $NOT_EXISTING_VARIABLE")
@unix_only
def test_check_output_pipefail():
with pytest.raises(sh.CalledProcessError):
sh.check_output("cat NOTEXIST | cat")
@unix_only
def test_check_output_obfuscate_pwd():
# TODO intercept logging
assert sh.check_output("echo -P mypass", obfuscate_pwd="<PASSWORD>") == "-P <PASSWORD>"
@unix_only
def test_check_output_noshell1_unix():
assert sh.check_output(HELLO_CMD, shell=False) == "Hello world!\n"
@windows_only
def test_check_output_noshell1_win():
assert (
sh.check_output(HELLO_CMD, shell=False).splitlines()[-1].strip()
== "Hello world!"
)
def test_check_output_noshell2():
with pytest.raises(sh.CalledProcessError):
sh.check_output(EXIT1_CMD, shell=False)
def test_check_output_noshell3():
with pytest.raises(FileNotFoundError):
sh.check_output("notexist", shell=False)
# Do not change shell=False to True
# If the timeout expires, the child process will be killed
# and then waited for again. The TimeoutExpired exception will
# be re-raised after the child process has terminated.
# Also the timeout simply does not work in Windows.
@unix_only
def test_check_output_timeout():
ts_start = time.time()
with pytest.raises(sh.TimeoutExpired):
sh.check_output(SLEEP_CMD, timeout=0.1, shell=False)
assert time.time() - ts_start < 0.5
@unix_only
def test_check_output_real_fh_stringio():
stderr = io.StringIO()
sh.check_output("echo hello 1>&2", stderr=stderr)
assert stderr.getvalue() == "hello\n"
@unix_only
def test_check_output_real_fh_nosetests():
sh.check_output("echo hello 1>&2", stderr=sys.stderr)
@unix_only
def test_call_bad_cmd():
with pytest.raises(TypeError):
sh.call(HELLO_CMD, shell=True)
|
crusaderky/pshell | pshell/tests/test_env.py | <reponame>crusaderky/pshell
import os
import pytest
import pshell as sh
from . import DATADIR, StubError, unix_only
@unix_only
def test_source(str_or_path):
os.environ.pop("UNITTEST_DATA_1", None)
os.environ["UNITTEST_DATA_2"] = "old"
# Also test variable name resolution
os.environ["UNITTEST_DATADIR"] = DATADIR
sh.source(str_or_path("$UNITTEST_DATADIR/source.sh"))
assert os.getenv("UNITTEST_DATA_1") == "foo"
assert os.getenv("UNITTEST_DATA_2") == "bar"
def test_resolve_env(str_or_path):
os.environ["UNITTEST_FOO"] = "foo"
os.environ["UNITTEST_BAR"] = "bar"
out = sh.resolve_env(str_or_path("$UNITTEST_FOO.${UNITTEST_BAR}"))
assert str(out) == "foo.bar"
assert isinstance(out, str_or_path)
with pytest.raises(EnvironmentError):
sh.resolve_env("$NOT_EXISTING_VARIABLE")
def test_putenv(str_or_path):
# Base use case
os.environ.pop("landgbashTEST1", None)
sh.putenv("landgbashTEST1", str_or_path("foo"))
assert os.environ["landgbashTEST1"] == "foo"
# Variable value contains another variable that must be resolved
os.environ.pop("landgbashTEST2", None)
sh.putenv("landgbashTEST2", "$landgbashTEST1/bar")
assert os.environ["landgbashTEST2"] == "foo/bar"
# Delete variable when it exists
sh.putenv("landgbashTEST1", None)
assert "landgbashTEST1" not in os.environ
# Delete variable when it does not exist
sh.putenv("landgbashTEST1", None)
assert "landgbashTEST1" not in os.environ
# Set blank variable (not the same as setting None, which deletes it)
sh.putenv("landgbashTEST1", "")
assert os.environ["landgbashTEST1"] == ""
def test_override_env(str_or_path):
os.environ.pop("landgbashTEST3", None)
os.environ["landgbashTEST4"] = "original"
with sh.override_env("landgbashTEST3", str_or_path("foo")):
with sh.override_env("landgbashTEST4", "$landgbashTEST3/bar"):
assert os.getenv("landgbashTEST3") == "foo"
assert os.getenv("landgbashTEST4") == "foo/bar"
assert "landgbashTEST3" not in os.environ
assert os.environ["landgbashTEST4"] == "original"
# Test that the cleanup also happens in case of Exception
with pytest.raises(StubError):
with sh.override_env("landgbashTEST3", "foo"):
assert os.getenv("landgbashTEST3") == "foo"
raise StubError()
assert "landgbashTEST3" not in os.environ
|
crusaderky/pshell | pshell/tests/test_search.py | import os
import pickle
from pathlib import Path
import pytest
import pshell as sh
def test_glob_iglob(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
# Create sample data
results = [
str_or_path(os.path.join(str(tmpdir), f"test{i}.txt")) for i in (1, 2, 3)
]
for fname in results:
with open(fname, "w"):
pass
# There's no guaranteed that glob will return the files in
# alphabetical order
assert sorted(sh.glob(str_or_path("$UNITTEST_BASH/test*.txt"))) == results
assert sorted(sh.iglob(str_or_path("$UNITTEST_BASH/test*.txt"))) == results
assert (
sorted(
sh.glob(
str_or_path("$UNITTEST_BASH/test*.txt"), min_results=3, max_results=3
)
)
== results
)
assert (
sorted(
sh.iglob(
str_or_path("$UNITTEST_BASH/test*.txt"), min_results=3, max_results=3
)
)
== results
)
# glob exceptions
with pytest.raises(sh.FileMatchError) as e:
sh.glob(str_or_path("$UNITTEST_BASH/test*.txt"), min_results=4)
assert (
str(e.value) == "File match '$UNITTEST_BASH/test*.txt' produced "
"3 results; expected at least 4"
)
with pytest.raises(sh.FileMatchError) as e:
sh.glob(str_or_path("$UNITTEST_BASH/test*.txt"), max_results=2)
assert (
str(e.value) == "File match '$UNITTEST_BASH/test*.txt' produced "
"3 results; expected up to 2"
)
with pytest.raises(sh.FileMatchError) as e:
sh.glob(str_or_path("$UNITTEST_BASH/test*.txt"), min_results=1, max_results=2)
assert (
str(e.value) == "File match '$UNITTEST_BASH/test*.txt' produced "
"3 results; expected between 1 and 2"
)
with pytest.raises(sh.FileMatchError) as e:
sh.glob(str_or_path("$UNITTEST_BASH/test*.txt"), min_results=2, max_results=2)
assert (
str(e.value) == "File match '$UNITTEST_BASH/test*.txt' produced "
"3 results; expected exactly 2"
)
# iglob exceptions
it = sh.iglob(str_or_path("$UNITTEST_BASH/test*.txt"), max_results=1)
# Make no assumption about the order
assert next(it) in results
with pytest.raises(sh.FileMatchError) as e:
next(it)
assert (
str(e.value) == "File match '$UNITTEST_BASH/test*.txt' produced at least 2 "
"results; expected up to 1"
)
it = sh.iglob(str_or_path("$UNITTEST_BASH/notfound"), min_results=1)
with pytest.raises(sh.FileMatchError) as e:
next(it)
assert (
str(e.value) == "File match '$UNITTEST_BASH/notfound' produced 0 results; "
"expected at least 1"
)
def test_glob_iglob_recursive(tmpdir):
# Test recursive glob and iglob
# Create sample data
expect = []
tmpdir.mkdir("a").mkdir("b")
tmpdir.mkdir("c")
for d in (os.path.join("a", "b"), "c"):
for i in (1, 2, 3):
fname = os.path.join(str(tmpdir), d, f"test{i}.txt")
expect.append(fname)
with open(fname, "w"):
pass
# Test recursive and non-recursive wildcards
# Make no assumptions about order
assert sorted(sh.glob(f"{tmpdir}/**/*.txt")) == expect
assert sorted(sh.iglob(f"{tmpdir}/**/*.txt")) == expect
assert sorted(sh.glob(f"{tmpdir}/*/*.txt")) == expect[3:]
assert sorted(sh.iglob(f"{tmpdir}/*/*.txt")) == expect[3:]
def test_glob_iglob_bad_args():
with pytest.raises(ValueError):
sh.glob(".", min_results=-1)
with pytest.raises(ValueError):
next(sh.iglob(".", min_results=-1))
with pytest.raises(ValueError):
sh.glob(".", min_results=2, max_results=1)
with pytest.raises(ValueError):
next(sh.iglob(".", min_results=2, max_results=1))
@pytest.mark.parametrize(
"args,s",
[
(
("foo", 1, None, 0),
"File match 'foo' produced 0 results; expected at least 1",
),
(
(Path("foo"), 1, None, 0),
"File match 'foo' produced 0 results; expected at least 1",
),
(("foo", 1, 1, 0), "File match 'foo' produced 0 results; expected exactly 1"),
(
("foo", 2, 3, 0),
"File match 'foo' produced 0 results; expected between 2 and 3",
),
(("foo", 0, 3, 4), "File match 'foo' produced 4 results; expected up to 3"),
(
("foo", 0, 3, 4, True),
"File match 'foo' produced at least 4 results; expected up to 3",
),
],
)
def test_filematcherror(args, s):
e = sh.FileMatchError(*args)
assert str(e) == s
# Exception with required arguments typically fail to unpickle
e2 = pickle.loads(pickle.dumps(e))
assert str(e2) == s
|
crusaderky/pshell | pshell/manipulate.py | """Functions for manipulating files
"""
from pathlib import Path
from typing import Sequence, Union
from . import log
from .open import pshell_open
__all__ = ("concatenate",)
PathLike = Union[str, Path]
def concatenate(
input_fnames: Sequence[PathLike], output_fname: PathLike, mode: str = "w", **kwargs
) -> None:
"""Concatenate files. Python equivalent of
:command:`cat input_fnames[0] input_fnames[1] ... > output_fname`.
:param input_fnames:
sequence of str. Paths to one or more input text files, to be appended
one after the other to the output.
:param output_fname:
Path to output text file, which may or may not already exist.
:param str mode:
Mode for opening the output file e.g. 'w' or 'ab'.
Defaults to text mode unless 'b' is explicitly declared.
:param kwargs:
Passed verbatim to all the underlying :func:`pshell.open` calls.
Among other things, this means that this function can transparently
deal with compressed files by inspecting their extension; different
files can use different compression algorithms as long as you use
``compression='auto'`` (the default).
If the output is opened in text mode, the inputs will be too; if any file
does not terminate with ``\\n``, it will be added. If the output is opened
in binary mode, the inputs will too; no extra bytes will be added between
files.
"""
log.info("Appending files: %s to: %s", input_fnames, output_fname)
if "b" in mode:
_concatenate_binary(input_fnames, output_fname, mode, **kwargs)
else:
_concatenate_text(input_fnames, output_fname, mode, **kwargs)
def _concatenate_binary(
input_fnames: Sequence[PathLike], output_fname: PathLike, mode: str, **kwargs
) -> None:
"""Implementation of concatenate for binary files
"""
with pshell_open(output_fname, mode, **kwargs) as ofh:
for fname in input_fnames:
with pshell_open(fname, "rb", **kwargs) as ifh:
for chunk in iter(lambda: ifh.read(65536), b""):
ofh.write(chunk)
def _concatenate_text(
input_fnames: Sequence[PathLike], output_fname: PathLike, mode: str, **kwargs
) -> None:
"""Implementation of concatenate for text files
"""
prepend_newline = False
if "a" in mode:
# Check if the last line of the first file ends with a \n
try:
# Discard from kwargs all parameters that are only applicable
# to text mode
kwargs_peek = kwargs.copy()
kwargs_peek.pop("newline", None)
kwargs_peek.pop("encoding", None)
kwargs_peek.pop("errors", None)
with pshell_open(output_fname, "rb", **kwargs_peek) as fh:
# Read last character
fh.seek(-1, 2)
# Won't work with \r terminator, which nobody cares about
# anyway. We really only care about \n (Unix and MacOSX)
# and \r\n (Windows).
prepend_newline = fh.read() != b"\n"
except FileNotFoundError as e:
log.info("%s", e)
except OSError:
# Empty file
log.info("Empty file: %s", output_fname)
with pshell_open(output_fname, mode, **kwargs) as ofh:
if prepend_newline:
ofh.write("\n")
for fname in input_fnames:
with pshell_open(fname, "r", **kwargs) as ifh:
for line in ifh:
ofh.write(line.rstrip("\r\n"))
ofh.write("\n")
|
crusaderky/pshell | pshell/tests/test_open.py | import bz2
import gzip
import io
import lzma
import os
import psutil
import pytest
import pshell as sh
compression_param = pytest.mark.parametrize(
"openfunc,ext,compression",
[
(open, "", "auto"),
(gzip.open, ".gz", "auto"),
(bz2.open, ".bz2", "auto"),
(lzma.open, ".xz", "auto"),
(gzip.open, ".GZ", "auto"),
(bz2.open, ".BZ2", "auto"),
(lzma.open, ".XZ", "auto"),
(open, "", False),
(gzip.open, "", "gzip"),
(bz2.open, "", "bzip2"),
(lzma.open, "", "lzma"),
],
)
def check_fd_was_closed(fname):
fname = os.path.basename(fname)
for tup in psutil.Process().open_files():
assert fname not in tup.path
def test_check_fd_was_closed(tmpdir):
check_fd_was_closed("notexist")
with open(f"{tmpdir}/test_open.123", "w"):
with pytest.raises(AssertionError):
check_fd_was_closed("test_open")
check_fd_was_closed("test_open")
@compression_param
def test_open_context(str_or_path, tmpdir, openfunc, ext, compression):
os.environ["UNITTEST_BASH"] = str(tmpdir)
fname = f"{tmpdir}/test_open{ext}"
fname_env = str_or_path(f"$UNITTEST_BASH/test_open{ext}")
with sh.open(fname_env, "w", compression=compression) as fh:
fh.write("Hello world")
check_fd_was_closed("test_open")
with openfunc(fname, "rt") as fh:
assert fh.read() == "Hello world"
with sh.open(fname_env, "a", compression=compression) as fh:
fh.write(" and universe")
check_fd_was_closed("test_open")
with sh.open(fname_env, "r", compression=compression) as fh:
assert fh.read() == "Hello world and universe"
check_fd_was_closed("test_open")
@compression_param
def test_open_nocontext(str_or_path, tmpdir, openfunc, ext, compression):
fname = str_or_path(f"{tmpdir}/test_open{ext}")
fh = sh.open(fname, "w", compression=compression)
fh.write("Hello world")
fh.close()
check_fd_was_closed("test_open")
with openfunc(fname, "rt") as fh:
assert fh.read() == "Hello world"
@compression_param
def test_open_exclusive_success(str_or_path, tmpdir, openfunc, ext, compression):
fname = str_or_path(f"{tmpdir}/test_open{ext}")
with sh.open(fname, "x", compression=compression) as fh:
fh.write("Hello world")
with openfunc(fname, "rt") as fh:
assert fh.read() == "Hello world"
@compression_param
def test_open_exclusive_failure(tmpdir, openfunc, ext, compression):
fname = f"{tmpdir}/test_open{ext}"
with open(fname, "w"):
pass
with pytest.raises(FileExistsError):
sh.open(fname, "x", compression=compression)
@compression_param
def test_open_binary(str_or_path, tmpdir, openfunc, ext, compression):
fname = str_or_path(f"{tmpdir}/test_open{ext}")
with sh.open(fname, "wb", compression=compression) as fh:
fh.write(b"Hello world")
with openfunc(fname, "rb") as fh:
assert fh.read() == b"Hello world"
with sh.open(fname, "ab", compression=compression) as fh:
fh.write(b" and universe")
with sh.open(fname, "rb", compression=compression) as fh:
assert fh.read() == b"Hello world and universe"
@compression_param
def test_open_encoding(tmpdir, openfunc, ext, compression):
TEXT = "Crème brûlée"
TEXT_REPLACED = "Cr�me br�l�e"
fname_utf8 = f"{tmpdir}/test_utf8{ext}"
fname_latin1 = f"{tmpdir}/test_latin1{ext}"
with openfunc(fname_utf8, "wt", encoding="utf-8") as fh:
fh.write(TEXT)
with openfunc(fname_latin1, "wt", encoding="latin1") as fh:
fh.write(TEXT)
# sh.open must always default to utf-8
with sh.open(fname_utf8, compression=compression) as fh:
assert fh.read() == TEXT
with sh.open(fname_latin1, compression=compression, encoding="latin1") as fh:
assert fh.read() == TEXT
# sh.open must always default to replace unrecognized characters with ?
with sh.open(fname_latin1, compression=compression) as fh:
assert fh.read() == TEXT_REPLACED
with pytest.raises(UnicodeDecodeError):
with sh.open(fname_latin1, errors="strict", compression=compression) as fh:
fh.read()
@compression_param
@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"])
def test_open_kwargs(tmpdir, openfunc, ext, compression, newline):
# **kwargs are passed verbatim to the underlying function
fname = f"{tmpdir}/test_open{ext}"
with sh.open(fname, "w", compression=compression, newline=newline) as fh:
fh.write("Hello\nworld")
with openfunc(fname, "rb") as fh:
assert fh.read() == b"Hello" + newline.encode("utf8") + b"world"
# no compression support
def test_open_fd():
r, w = os.pipe()
with sh.open(r, "rb", buffering=0) as fh_r:
with sh.open(w, "wb", buffering=0) as fh_w:
fh_w.write(b"hello world\n")
assert fh_r.readline() == b"hello world\n"
def test_open_invalid_compression():
with pytest.raises(ValueError):
sh.open("foo", compression="unk")
def test_open_fd_invalid_compression():
r, _ = os.pipe()
with pytest.raises(TypeError):
sh.open(r, "rb", compression="gzip")
@pytest.mark.parametrize(
"decompress,compression",
[(gzip.decompress, "gzip"), (bz2.decompress, "bzip2"), (lzma.decompress, "lzma")],
)
def test_open_fh_compression(decompress, compression):
buf = io.BytesIO()
with sh.open(buf, "w", compression=compression) as fh:
fh.write("hello world")
assert decompress(buf.getvalue()) == b"hello world"
buf.seek(0)
with sh.open(buf, "r", compression=compression) as fh:
assert fh.read() == "hello world"
@pytest.mark.parametrize("compression", [False, "auto"])
def test_open_fh_no_compression(compression):
buf = io.BytesIO()
with pytest.raises(TypeError):
sh.open(buf, compression=compression)
|
crusaderky/pshell | pshell/search.py | <reponame>crusaderky/pshell<filename>pshell/search.py
"""Search and file system traversal functions
"""
import glob as _glob
from pathlib import Path
from typing import Iterator, List, Optional, Union, overload
from . import log
from .env import resolve_env
__all__ = ("FileMatchError", "glob", "iglob")
class FileMatchError(Exception):
""":func:`glob` or :func:`iglob` returned not enough or too many matches
"""
@property
def pathname(self) -> Union[str, Path]:
return self.args[0]
@property
def min_results(self) -> int:
return self.args[1]
@property
def max_results(self) -> Optional[int]:
return self.args[2]
@property
def got_results(self) -> int:
return self.args[3]
@property
def maybe_extra_results(self) -> bool:
try:
return self.args[4]
except IndexError:
return False
def __str__(self) -> str:
msg = f"File match '{self.pathname}' produced "
if self.maybe_extra_results:
msg += "at least "
msg += f"{self.got_results} results; expected"
if self.max_results is None:
return f"{msg} at least {self.min_results}"
elif self.max_results == self.min_results:
return f"{msg} exactly {self.min_results}"
elif self.min_results > 0:
return f"{msg} between {self.min_results} and {self.max_results}"
else:
return f"{msg} up to {self.max_results}"
@overload
def glob(pathname: str, *, min_results: int = 0, max_results: int = None) -> List[str]:
... # pragma: nocover
@overload
def glob(
pathname: Path, *, min_results: int = 0, max_results: int = None
) -> List[Path]:
... # pragma: nocover
def glob(pathname, *, min_results=0, max_results=None):
"""Like :func:`glob.glob`, but in addition it supports environment
variables in pathname, logs the number of results, and incorporates
protection from non-existing paths.
:param pathname:
Bash-like wildcard expression. Can be a string or a :class:`pathlib.Path`.
:param int min_results:
Minimum number of expected results
:param int max_results:
Maximum number of expected results. Omit for no maximum.
:raises FileMatchError:
If found less results than min_results or more than max_results
:returns:
List of matching files or directories.
The return type of the outputs matches the type of pathname.
"""
if min_results < 0:
raise ValueError("min_results must be greater than 0")
if max_results is not None and max_results < min_results:
raise ValueError("max_results must be greater or equal to min_results")
results = _glob.glob(resolve_env(str(pathname)), recursive=True)
if len(results) < min_results or (
max_results is not None and len(results) > max_results
):
raise FileMatchError(pathname, min_results, max_results, len(results))
log.info("File match %s produced %d results", pathname, len(results))
return [Path(r) for r in results] if isinstance(pathname, Path) else results
@overload
def iglob(
pathname: str, *, min_results: int = 0, max_results: int = None
) -> Iterator[str]:
... # pragma: nocover
@overload
def iglob(
pathname: Path, *, min_results: int = 0, max_results: int = None
) -> Iterator[Path]:
... # pragma: nocover
def iglob(pathname, *, min_results=0, max_results=None):
"""Like :func:`glob`, but returns an iterator instead.
Notice that, unlike with glob, you may have time to process some of the
results before :class:`FileMatchError` is raised.
In case ``max_results`` is exceeded, the iteration will stop
immediately - which will save time and memory.
Example::
>>> for fname in glob("test*.txt", max_results=2):
>>> print(fname)
FileMatchError: File match test*.txt produced 4 results, expected up
to 2
>>> for fname in iglob("test*.txt", max_results=2):
>>> print(fname)
test1.txt
test2.txt
FileMatchError: File match test*.txt produced 3 or more results,
expected up to 2
"""
if min_results < 0:
raise ValueError("min_results must be greater than 0")
if max_results is not None and max_results < min_results:
raise ValueError("max_results must be greater or equal to min_results")
count = 0
for result in _glob.iglob(resolve_env(str(pathname)), recursive=True):
count += 1
if max_results is not None and count > max_results:
raise FileMatchError(pathname, min_results, max_results, count, True)
yield Path(result) if isinstance(pathname, Path) else result
if count < min_results:
raise FileMatchError(pathname, min_results, max_results, count)
log.info("File match %s produced %d results", pathname, count)
|
crusaderky/pshell | pshell/tests/data/sleep20_sigterm_ignore.py | <gh_stars>1-10
#!/usr/bin/env python
"""Simple script that executes for 20s and ignores SIGTERM.
"""
import os
import signal
import time
def _handler(signum, _frame):
"""Print the incoming signal, then do nothing."""
print("Receive signal {signum}".format(signum=signum))
def main():
"""Register signal handler then sleep 1s for 20 times."""
pid = os.getpid()
signal.signal(signal.SIGTERM, _handler)
for i in range(1, 20 + 1):
time.sleep(1)
print("{pid}: count {i}".format(pid=pid, i=i))
if __name__ == "__main__":
main()
|
crusaderky/pshell | pshell/tests/test_procs.py | <filename>pshell/tests/test_procs.py<gh_stars>1-10
import getpass
import multiprocessing
import os
import socket
import subprocess
import sys
import time
import psutil
import pytest
import pshell as sh
from . import DATADIR
def spawn_test_proc():
"""Start a long-running process
"""
if os.name == "nt":
cmd = [os.path.join(DATADIR, "sleep20.bat")]
else:
cmd = ["bash", os.path.join(DATADIR, "sleep20.sh")]
popen = subprocess.Popen(cmd)
return psutil.Process(popen.pid)
def get_other_users_proc():
"""Find a process belonging to another user, which is not a parent of the
current process
"""
current = psutil.Process()
for proc in psutil.process_iter():
try:
if proc.username() == getpass.getuser():
continue # pragma: nocover
if all(c != current for c in proc.children(recursive=True)):
return proc
except psutil.AccessDenied: # pragma: nocover
continue
raise EnvironmentError(
"All processes belong to the current user"
) # pragma: nocover
def test_find_kill_procs(str_or_path):
"""Test pshell.find_procs_by_cmdline and pshell.kill
"""
os.environ["TEST_DATADIR"] = DATADIR
assert sh.find_procs_by_cmdline("this won't match anything") == []
assert sh.find_procs_by_cmdline("$TEST_DATADIR") == []
assert sh.find_procs_by_cmdline(str_or_path("$TEST_DATADIR")) == []
test_proc = spawn_test_proc()
after = sh.find_procs_by_cmdline(str_or_path("$TEST_DATADIR"))
# Both the bash and cmd variants of the test process spawn short-lived
# subprocesses. Testing for an exact match of 1 result causes instability
# in the unit tests.
assert test_proc in after
# Test substrings and OR'ed matches
after2 = sh.find_procs_by_cmdline("this won't match anything", DATADIR)
assert test_proc in after2
t1 = time.time()
sh.kill(test_proc)
t2 = time.time()
# Test that kill() did not wait the full 10 seconds since the process
# graciously responded to SIGTERM
assert t2 - t1 < 2
with pytest.raises(psutil.NoSuchProcess):
test_proc.status()
assert sh.find_procs_by_cmdline(str_or_path("$TEST_DATADIR")) == []
def test_killall(str_or_path):
spawn_test_proc()
# Test for 1+ processes.
# Don't test for exactly 1 process (see comment above)
assert sh.find_procs_by_cmdline(str_or_path(DATADIR))
sh.killall(str_or_path(DATADIR))
assert not sh.find_procs_by_cmdline(str_or_path(DATADIR))
def test_kill2():
"""Test pshell.kill:
- procs expressed as int PIDs
- silently skip processes not owned by current user
- silently skip non-existing processes
- silently skip None
- silently skip current process and ancestors of current process
- raise TypeError on unknown parameters
"""
current = psutil.Process()
sh.kill(
70000,
None,
current,
current.parent(),
current.parent().parent(),
get_other_users_proc(),
)
with pytest.raises(TypeError):
sh.kill("foo")
@pytest.mark.slow
@pytest.mark.skipif(
os.name == "nt",
reason="On Windows, os.kill() and psutil.kill() calls TerminateProcess "
"API which does not process signals (such as SIGTERM, SIGKILL "
"etc..) as ANSI/POSIX prescribed. The TerminateProcess API "
"unconditionally terminates the target process.",
)
def test_sigkill_sigterm_delay5():
"""Test that kill() will send a SIGTERM to kill the target first. Process
that shuts itself downupon receiving SIGTERM will be able to do so
gracefully.
"""
cmd = [sys.executable, os.path.join(DATADIR, "sleep20_sigterm_delay5.py")]
subprocess.Popen(cmd)
time.sleep(1) # to allow enough time for python to start
procs = sh.find_procs_by_cmdline(DATADIR)
assert len(procs) == 1
t1 = time.time()
sh.kill(procs[0])
t2 = time.time()
duration_of_kill = t2 - t1
assert not sh.find_procs_by_cmdline(DATADIR)
assert duration_of_kill > 5 # target process SIGTERM handler delay is 5s
assert duration_of_kill < 10 # sh.kill() will retry SIGKILL in 10s
@pytest.mark.slow
@pytest.mark.skipif(
os.name == "nt",
reason="On Windows, os.kill() and psutil.kill() calls TerminateProcess "
"API which does not process signals (such as SIGTERM, SIGKILL "
"etc..) as ANSI/POSIX prescribed. The TerminateProcess API "
"unconditionally terminates the target process.",
)
@pytest.mark.parametrize(
"kwargs,min_elapsed,max_elapsed",
[({}, 10, 12), ({"term_timeout": 3}, 3, 5), ({"term_timeout": 0}, 0, 2)],
)
def test_sigkill_sigterm_ignore(kwargs, min_elapsed, max_elapsed):
"""Test terminating processes resilient to SIGTERM, which would ignore the
initial SIGTERM it receives. The kill() will attempt to shut the process
again later forcefully.
"""
cmd = [sys.executable, os.path.join(DATADIR, "sleep20_sigterm_ignore.py")]
subprocess.Popen(cmd)
time.sleep(1) # to allow enough time for python to start
procs = sh.find_procs_by_cmdline(DATADIR)
assert len(procs) == 1
t1 = time.time()
sh.kill(procs[0], **kwargs)
t2 = time.time()
elapsed = t2 - t1
assert not sh.find_procs_by_cmdline(DATADIR)
if min_elapsed > 0:
assert min_elapsed < elapsed
assert elapsed < max_elapsed
class ListenProcess(multiprocessing.Process):
""""Context manager that starts a subprocess that listens on one or more ports.
If sleep is set, it waits <sleep> seconds before listening on each port.
e.g.::
with ListenProcess(2000, 2001, sleep=1) as proc:
# start; not listening on any ports
# sleep 1 second
# open port 2000
# sleep 1 second
# open port 2001
# terminate on context exit
"""
def __init__(self, *ports: int, sleep: float = 0):
self.ports = ports
self.sleep = sleep
super().__init__()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
self.join()
def run(self): # pragma: nocover
sockets = []
for port in self.ports:
time.sleep(self.sleep)
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sk.bind(("localhost", port))
sk.listen(10)
sockets.append(sk)
while True:
time.sleep(1)
def test_wait_for_server():
# Test with PID; the process is not listening straight away
with ListenProcess(9123, sleep=0.1) as proc:
port = sh.wait_for_server(proc.pid)
assert port == 9123
# Test with psutil.Process; the process is already listening
psproc = psutil.Process(proc.pid)
port = sh.wait_for_server(psproc)
assert port == 9123
# Test dead process
with pytest.raises(psutil.NoSuchProcess):
sh.wait_for_server(psproc)
def test_wait_for_server_timeout():
with ListenProcess(9123, sleep=0.4) as proc:
with pytest.raises(TimeoutError):
sh.wait_for_server(proc.pid, timeout=0.05)
port = sh.wait_for_server(proc.pid, timeout=1)
assert port == 9123
def test_wait_for_server_multiport_whitelist():
with ListenProcess(9123, 9124, sleep=0.2) as proc:
port = sh.wait_for_server(proc.pid)
assert port == 9123
port = sh.wait_for_server(proc.pid, 9124)
assert port == 9124
def test_wait_for_server_multiport_blacklist():
with ListenProcess(9123, 9124, sleep=0.2) as proc:
port = sh.wait_for_server(proc.pid, ignore_ports=[9123])
assert port == 9124
|
crusaderky/pshell | pshell/file.py | <gh_stars>1-10
"""Functions for handling files and directories
"""
import datetime
import errno
import os
import shutil
import stat
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator, Optional, Union, overload
from . import log
from .env import resolve_env
__all__ = (
"remove",
"chdir",
"pushd",
"move",
"copy",
"backup",
"symlink",
"exists",
"lexists",
"mkdir",
"owner",
)
PathLike = Union[str, Path]
def _unix_only() -> None:
"""Crash if running on Windows
"""
if os.name == "nt":
raise EnvironmentError("Not supported on Windows")
def remove(
path: PathLike,
*,
recursive: bool = False,
force: bool = True,
ignore_readonly: bool = False,
rename_on_fail: bool = False,
):
"""Remove file or directory
:param path:
Target file or directory
:param bool recursive:
If True, recursively delete tree starting at path
:param bool force:
If True, don't raise OSError if path doesn't exist
:param bool ignore_readonly:
If True, also delete files and directories with the read-only flag
:param bool rename_on_fail:
If True, don't raise OSError if deletion fails.
This typically happens if the user does not have enough permissions
to delete the file or directory, or in case of NFS locks.
In this case, rename the file to <path>.DELETEME.<timestamp>.
If the rename also fails, then raise OSError.
:raise FileNotFoundError:
If ``force==False`` and path doesn't exist
:raise OSError:
- if ``rename_on_fail==False`` and path can't be deleted
- if ``rename_on_fail==True`` and path can be neither deleted nor
renamed
"""
realpath = resolve_env(path)
log.info("Deleting %s", path)
try:
if os.path.islink(realpath):
os.remove(realpath)
elif recursive and os.path.isdir(realpath):
if ignore_readonly:
# Potentially perform a two-pass deletion
# On the first round, every time there is a failure deleting
# something do chmod u+w on the failed path and continue
has_errors = False
def onerror(function, path, excinfo):
# Do not act only on PermissionError.
# It could also be OSError('Directory not empty').
nonlocal has_errors
has_errors = True
try:
# chmod u+w
mode = os.stat(path).st_mode
os.chmod(path, mode | stat.S_IWUSR)
except OSError:
pass
shutil.rmtree(realpath, onerror=onerror)
# If there were any errors on the first round, perform a second
# deletion pass this time with no error control. At this point,
# if the only problems were caused by read-only files owned by
# the current user they should not crop up anymore. Raise
# exception in eny other case (e.g. directory owned by another
# user, file not found)
if has_errors:
shutil.rmtree(realpath)
else: # not ignore_readonly
# Directly perform recursive deletion with no error control.
# Raise exception in case of read-only files.
shutil.rmtree(realpath)
elif os.path.isdir(realpath):
os.rmdir(realpath)
else:
os.remove(realpath)
except OSError as e:
if force and e.errno == errno.ENOENT:
# Graciously do nothing if the file does not exist to begin with.
# Note: this is different than testing for existence and then
# deleting, as it prevents race conditions when the same path is
# being deleted from multiple scripts in parallel.
log.info("%s", e)
elif rename_on_fail and e.errno != errno.ENOENT:
log.warning("%s", e)
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
backup(path, suffix="DELETEME." + timestamp, action="move")
else:
raise
def chdir(path: PathLike) -> None:
"""Move the present-working directory (pwd) into the target directory.
"""
path = Path(path)
log.info("chdir %s", path)
os.chdir(resolve_env(path))
@contextmanager
def pushd(path: PathLike) -> Iterator[None]:
"""Context manager that moves the pwd into target directory. When leaving
the context, the pwd is changed back to what it originally was.
Usage::
with pushd("mydir"):
...
Is equivalent to the bash commands::
pushd mydir
...
popd
"""
path = Path(path)
cwd = os.getcwd()
log.info("pushd %s", path)
os.chdir(resolve_env(path))
try:
yield
finally:
log.info("popd")
os.chdir(cwd)
def move(src: PathLike, dst: PathLike) -> None:
"""Recursively move a file or directory (src) to another location (dst).
If the destination is a directory or a symlink to a directory, then src is
moved inside that directory. The destination directory must not already
exist. If the destination already exists but is not a directory, it may be
overwritten depending on :func:`os.rename` semantics.
"""
log.info("Moving %s to %s", src, dst)
shutil.move(resolve_env(str(src)), resolve_env(str(dst)))
def copy(src: PathLike, dst: PathLike, *, ignore=None) -> None:
"""Recursively copy a file or directory. If src is a regular file and dst
is a directory, a file with the same basename as src is created (or
overwritten) in the directory specified. Permission bits and last modified
dates are copied. Symlinks are preserved. Users and groups are discarded.
.. note::
This function behaves slightly differently from bash when src is a
directory. bash alters its behaviour if dst exists or not, e.g.::
$ mkdir foo
$ touch foo/hello.txt
$ cp -r foo bar # First copy; bar does not exist
$ cp -r foo bar # Identical command as before;
# but it will behave differently!
$ find
./bar
./bar/hello.txt
./bar/foo
./bar/foo/hello.txt
./foo
./foo/hello.txt
This function instead always requires the full destination path; the
second invocation of ``copy('foo', 'bar')`` will raise
:class:`FileExistsError` because ``bar`` already exists.
:param ignore:
Only effective when copying a directory. See :func:`shutil.copytree`.
"""
log.info("Copying %s to %s", src, dst)
src = resolve_env(src)
dst = resolve_env(dst)
if os.path.isdir(src):
if os.path.exists(dst):
raise FileExistsError(errno.EEXIST, "File exists", dst)
shutil.copytree(src, dst, symlinks=True, ignore=ignore)
else:
shutil.copy2(src, dst)
@overload
def backup(
path: str, *, suffix: str = None, force: bool = False, action: str = "copy"
) -> Optional[str]:
... # pragma: nocover
@overload
def backup(
path: Path, *, suffix: str = None, force: bool = False, action: str = "copy"
) -> Optional[Path]:
... # pragma: nocover
def backup(path, *, suffix=None, force=False, action="copy"):
"""Recursively copy or move a file of directory from <path> to
<path>.<suffix>.
:param path:
File or directory to back up. Can be a string or a :class:`pathlib.Path`.
:param str suffix:
suffix for the backup file. Default: .YYYYMMDD-HHMMSS
:param bool force:
if True, silently do nothing if file doesn't exist.
:param str action:
copy|move
:raise FileNotFoundError:
if path does not exist and force=False
:returns:
renamed path, or None if no backup was performed.
If path is a :class:`~pathlib.Path`, then the return value is also a
:class:`~pathlib.Path`.
"""
assert action in ("copy", "move")
if force and not os.path.lexists(resolve_env(path)):
# Do nothing
log.info("%s does not exist, skipping backup", path)
return None
if suffix is None:
suffix = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
path_bak = f"{path}.{suffix}"
# In case of collision, call the subsequent backups as .2, .3, etc.
i = 2
while os.path.lexists(resolve_env(path_bak)):
log.info("%s already exists, generating a unique name")
path_bak = f"{path}.{suffix}.{i}"
i += 1
if action == "copy":
copy(path, path_bak)
else:
move(path, path_bak)
return type(path)(path_bak)
def symlink(
src: PathLike, dst: PathLike, *, force: bool = False, abspath: bool = False
) -> None:
"""Create a symbolic link pointing to src named dst.
This exclusively works in Unix, on POSIX-compatible filesystems.
:param bool force:
if True, remove previous dst if it exists and it's a different symlink.
If it's the same symlink, do not replace it in order to prevent race
conditions.
:param bool abspath:
if False, build the shortest possible relative link. If True, generate
a link using absolute paths. This is regardless of src and dst being
absolute or relative paths, and regardless of the current working
directory (cwd).
Examples::
>>> symlink('/common/foo', '/common/bar')
/common/foo => bar
>>> symlink('/common/foo', '/common/bar', abspath=True)
/common/foo => /common/bar
>>> chdir('/common')
>>> symlink('foo', 'bar')
/common/foo => bar
>>> chdir('/common')
>>> symlink('foo', 'bar', abspath=True)
/common/foo => /common/bar
"""
_unix_only()
real_src = os.path.abspath(resolve_env(src))
real_dst = os.path.abspath(resolve_env(dst))
if force and os.path.islink(real_dst):
if os.path.abspath(os.path.realpath(real_dst)) == real_src:
log.info("Symlink %s => %s already exists", src, dst)
return
remove(dst)
log.info("Creating symlink %s => %s", src, dst)
if abspath:
os.symlink(real_src, real_dst)
else:
cwd_backup = os.getcwd()
os.chdir(os.path.realpath(os.path.dirname(real_dst)))
try:
# Generate shortest possible relative path
real_src = os.path.relpath(os.path.realpath(real_src))
real_dst = os.path.relpath(os.path.realpath(real_dst))
os.symlink(real_src, real_dst)
finally:
os.chdir(cwd_backup)
def exists(path: PathLike) -> bool:
"""Wrapper around :func:`os.path.exists`, with automated resolution of
environment variables and logging.
"""
respath = resolve_env(path)
if os.path.exists(respath):
log.debug("File exists: %s", path)
return True
log.debug("File does not exist or is a broken symlink: %s", path)
return False
def lexists(path: PathLike) -> bool:
"""Wrapper around :func:`os.path.lexists`, with automated resolution of
environment variables and logging.
"""
respath = resolve_env(path)
if os.path.lexists(respath):
log.debug("File exists: %s", path)
return True
log.debug("File does not exist: %s", path)
return False
def mkdir(path: PathLike, *, parents: bool = True, force: bool = True) -> None:
"""Create target directory.
This function is safe for use in concurrent environments, where multiple
actors try to simultaneously create the same directory.
:param path:
directory to be created
:param bool parents:
if True, also create parent directories if necessary.
:param bool force:
if True, do nothing if <path> already exists.
"""
respath = resolve_env(path)
log.info("Creating directory %s", path)
try:
if parents:
os.makedirs(respath)
else:
os.mkdir(respath)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if force and os.path.isdir(respath):
log.info("Directory %s already exists", path)
else:
raise
def owner(fname: PathLike) -> str:
"""Return the username of the user owning a file.
This function is not available on Windows.
"""
_unix_only()
# Unix-only module
import pwd
fname = resolve_env(fname)
numeric_uid = os.stat(fname).st_uid
return pwd.getpwuid(numeric_uid).pw_name
|
crusaderky/pshell | pshell/tests/test_log.py | <reponame>crusaderky/pshell
import logging
import pshell as sh
from pshell import log
def test_log(caplog):
caplog.set_level(10)
log.debug("%d", 1)
assert sh.get_logger().name == "pshell"
sh.set_global_logger(logging.getLogger("g1"))
log.info("%d", 2)
assert sh.get_logger().name == "g1"
sh.set_global_logger("g2")
log.warning("%d", 3)
assert sh.get_logger().name == "g2"
tok = sh.context_logger.set(logging.getLogger("c"))
log.error("%d", 4)
assert sh.get_logger().name == "c"
sh.context_logger.reset(tok)
log.critical("%d", 5)
assert sh.get_logger().name == "g2"
sh.set_global_logger(None)
log.info("%d", 6)
assert sh.get_logger().name == "pshell"
assert caplog.record_tuples == [
("pshell", 10, "1"),
("g1", 20, "2"),
("g2", 30, "3"),
("c", 40, "4"),
("g2", 50, "5"),
("pshell", 20, "6"),
]
|
crusaderky/pshell | pshell/log.py | """Functions and global variables related to logging
"""
from contextvars import ContextVar
from logging import Logger, getLogger
from typing import Optional, Union
_global_logger: Optional[Logger] = None
context_logger: ContextVar[Optional[Logger]] = ContextVar(
"context_logger", default=None
)
def set_global_logger(logger: Union[Logger, str, None]) -> Optional[Logger]:
"""Set the pshell global logger. This logger will be used by all pshell functions
unless ``context_logger`` is defined.
:returns:
Previous global logger
"""
global _global_logger
prev = _global_logger
if isinstance(logger, str):
logger = getLogger(logger)
_global_logger = logger
return prev
def get_logger() -> Logger:
"""
#. If ``context_logger`` is set, return it.
#. Otherwise, if :func:`set_global_logger` was called, return the global logger.
#. Otherwise, return the **pshell** logger.
"""
ctx = context_logger.get()
if ctx:
return ctx
elif _global_logger:
return _global_logger
else:
return getLogger("pshell")
def debug(msg, *args, **kwargs) -> None:
"""Wrapper around :meth:`logging.Logger.debug` which uses the logger returned by
:func:`~pshell.get_logger`.
"""
get_logger().debug(msg, *args, **kwargs)
def info(msg, *args, **kwargs) -> None:
"""Wrapper around :meth:`logging.Logger.info` which uses the logger returned by
:func:`~pshell.get_logger`.
"""
get_logger().info(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""Wrapper around :meth:`logging.Logger.warning` which uses the logger returned by
:func:`~pshell.get_logger`.
"""
get_logger().warning(msg, *args, **kwargs)
def error(msg, *args, **kwargs) -> None:
"""Wrapper around :meth:`logging.Logger.error` which uses the logger returned by
:func:`~pshell.get_logger`.
"""
get_logger().error(msg, *args, **kwargs)
def critical(msg, *args, **kwargs) -> None:
"""Wrapper around :meth:`logging.Logger.critical` which uses the logger returned by
:func:`~pshell.get_logger`.
"""
get_logger().critical(msg, *args, **kwargs)
|
crusaderky/pshell | pshell/__init__.py | <reponame>crusaderky/pshell
"""Convenience aggregator for all submodules
"""
from subprocess import CalledProcessError, TimeoutExpired # noqa: F401
import pkg_resources
from .call import call, check_call, check_output, real_fh # noqa: F401
from .env import override_env, putenv, resolve_env, source # noqa: F401
from .file import ( # noqa: F401
backup,
chdir,
copy,
exists,
lexists,
mkdir,
move,
owner,
pushd,
remove,
symlink,
)
from .log import context_logger, get_logger, set_global_logger # noqa: F401
from .manipulate import concatenate # noqa: F401
from .open import pshell_open as open # noqa: F401
from .procs import find_procs_by_cmdline, kill, killall, wait_for_server # noqa: F401
from .search import FileMatchError, glob, iglob # noqa: F401
try:
__version__ = pkg_resources.get_distribution("pshell").version
except Exception: # pragma: nocover
# Local copy, not installed with setuptools
__version__ = "999"
|
crusaderky/pshell | pshell/tests/test_file.py | import getpass
import glob
import os
import subprocess
import pytest
import pshell as sh
from . import StubError, unix_only
def test_remove(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
testpath = str_or_path(f"{tmpdir}/test_remove")
testpath_env = str_or_path("$UNITTEST_BASH/test_remove")
# remove file
with open(testpath, "w"):
pass
assert os.path.exists(testpath)
sh.remove(testpath_env)
assert not os.path.exists(testpath)
# remove dir
os.mkdir(testpath)
assert os.path.exists(testpath)
sh.remove(testpath_env)
assert not os.path.exists(testpath)
# recursive
os.mkdir(testpath)
os.mkdir(f"{testpath}/dir2")
sh.remove(testpath_env, recursive=True)
assert not os.path.exists(testpath)
# recursive must also work on a file
with open(testpath, "w"):
pass
assert os.path.exists(testpath)
sh.remove(testpath_env, recursive=True)
assert not os.path.exists(testpath)
@unix_only
def test_remove_symlinks(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
testpath = f"{tmpdir}/test_remove"
testpath_env = str_or_path("$UNITTEST_BASH/test_remove")
# remove dir and symlink to dir
os.mkdir(testpath)
os.symlink(testpath, testpath + ".lnk")
assert os.path.exists(testpath)
assert os.path.exists(testpath + ".lnk")
sh.remove(f"{testpath_env}.lnk")
sh.remove(testpath_env)
assert not os.path.exists(testpath)
assert not os.path.exists(testpath + ".lnk")
# recursive on a symlink to dir must delete the symlink
os.mkdir(testpath)
with open(f"{testpath}/donttouch", "w"):
pass
os.symlink(testpath, testpath + ".lnk")
sh.remove(f"{testpath_env}.lnk", recursive=True)
assert not os.path.exists(testpath + ".lnk")
assert os.path.exists(f"{testpath}/donttouch")
os.remove(f"{testpath}/donttouch")
os.rmdir(testpath)
def test_remove_force1():
with pytest.raises(FileNotFoundError):
sh.remove("NOTEXIST.txt", force=False)
def test_remove_force2():
sh.remove("NOTEXIST.txt", force=True)
def test_remove_noperm(tmpdir):
testpath = "%s/test_remove_noperm" % tmpdir
os.makedirs(testpath + "/foo/bar")
os.chmod(testpath + "/foo/bar", 0)
with pytest.raises(PermissionError):
sh.remove(testpath + "/foo", recursive=True)
sh.remove(testpath + "/foo", recursive=True, rename_on_fail=True)
assert not os.path.exists(testpath + "/foo")
assert len(glob.glob(testpath + "/foo.DELETEME.*")) == 1
def test_ignore_readonly1(tmpdir):
"""Test the ignore_readonly=True flag
"""
os.makedirs(f"{tmpdir}/foo/bar/baz")
os.chmod(f"{tmpdir}/foo/bar/baz", 0o500)
os.chmod(f"{tmpdir}/foo/bar", 0o500)
os.chmod(f"{tmpdir}/foo", 0o500)
with pytest.raises(PermissionError):
sh.remove(f"{tmpdir}/foo", recursive=True)
assert os.path.exists(f"{tmpdir}/foo/bar/baz")
sh.remove(f"{tmpdir}/foo", force=False, recursive=True, ignore_readonly=True)
assert not os.path.exists(f"{tmpdir}/foo")
def test_ignore_readonly2(tmpdir):
"""Test the case where there was no permission issue to begin with,
so a double call to shutil.rmtree would raise FileNotFoundError
"""
os.makedirs(f"{tmpdir}/foo/bar")
sh.remove(f"{tmpdir}/foo", force=False, recursive=True, ignore_readonly=True)
assert not os.path.exists(f"{tmpdir}/foo")
def test_chdir(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
assert os.getcwd() != str(tmpdir)
sh.chdir(str_or_path("$UNITTEST_BASH"))
assert os.getcwd() == str(tmpdir)
@pytest.mark.parametrize("use_env", [False, True])
def test_pushd(str_or_path, use_env, tmpdir):
d0 = os.getcwd()
assert d0 != str(tmpdir)
if use_env:
os.environ["UNITTEST_BASH"] = str(tmpdir)
dir_to = str_or_path("$UNITTEST_BASH")
else:
dir_to = str_or_path(tmpdir)
with sh.pushd(dir_to):
assert os.getcwd() == str(tmpdir)
# test that context manager is reentrant
tmpdir.mkdir("d1")
with sh.pushd(str_or_path("d1")):
assert os.getcwd() == os.path.join(str(tmpdir), "d1")
assert os.getcwd() == str(tmpdir)
assert os.getcwd() == d0
# Test that the cleanup also happens in case of Exception
with pytest.raises(StubError):
with sh.pushd(dir_to):
assert os.getcwd() == str(tmpdir)
raise StubError()
assert os.getcwd() == d0
def test_move(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
tmpdir.mkdir("test_move1")
sh.move(
str_or_path("$UNITTEST_BASH/test_move1"),
str_or_path("$UNITTEST_BASH/test_move2"),
)
assert not os.path.exists(f"{tmpdir}/test_move1")
assert os.path.exists(f"{tmpdir}/test_move2")
def test_copy(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
# single file - copy to file
with open(f"{tmpdir}/test_cp1", "w"):
pass
sh.copy(
str_or_path("$UNITTEST_BASH/test_cp1"), str_or_path("$UNITTEST_BASH/test_cp2")
)
assert os.path.exists(f"{tmpdir}/test_cp1")
assert os.path.exists(f"{tmpdir}/test_cp2")
# single file - copy to directory
tmpdir.mkdir("test_cp3")
sh.copy(
str_or_path("$UNITTEST_BASH/test_cp1"), str_or_path("$UNITTEST_BASH/test_cp3")
)
assert os.path.exists(f"{tmpdir}/test_cp1")
assert os.path.exists(f"{tmpdir}/test_cp3/test_cp1")
# recursive
tmpdir.mkdir("test_cp4")
tmpdir.mkdir("test_cp4/dir2")
sh.copy(
str_or_path("$UNITTEST_BASH/test_cp4"), str_or_path("$UNITTEST_BASH/test_cp5")
)
assert os.path.exists(f"{tmpdir}/test_cp4/dir2")
assert os.path.exists(f"{tmpdir}/test_cp5/dir2")
# input does not exist
def test_copy_err_input_not_found():
with pytest.raises(FileNotFoundError):
sh.copy("/does/not/exist", "$UNITTEST_BASH/")
# single file to non-existing directory
def test_copy_err_target_dir_not_found(tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
with open(f"{tmpdir}/test_cp_err2", "w"):
pass
with pytest.raises(FileNotFoundError):
sh.copy("$UNITTEST_BASH/test_cp_err2", "$UNITTEST_BASH/does/not/exist")
# directory to non-existing parent directory automatically creates parents
def test_copy_dir_to_missing_parent(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
tmpdir.mkdir("test_cpdir")
sh.copy(
str_or_path("$UNITTEST_BASH/test_cpdir"),
str_or_path("$UNITTEST_BASH/does/not/exist"),
)
assert os.path.exists(f"{tmpdir}/does/not/exist")
# directory to already existing target
def test_copy_err_fileexist(tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
tmpdir.mkdir("test_cp_err4a")
tmpdir.mkdir("test_cp_err4b")
with pytest.raises(FileExistsError):
sh.copy("$UNITTEST_BASH/test_cp_err4a", "$UNITTEST_BASH/test_cp_err4b")
def test_backup(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
fname = f"{tmpdir}/test"
fname_env = str_or_path("$UNITTEST_BASH/test")
with open(fname, "w"):
pass
# Auto extension
new_fname = sh.backup(fname_env, action="copy")
assert os.path.exists(fname)
assert os.path.exists(sh.resolve_env(new_fname))
# Manual extension
new_fname = sh.backup(fname_env, suffix="bak", action="copy")
assert os.path.exists(f"{tmpdir}/test.bak")
assert str(new_fname) == "$UNITTEST_BASH/test.bak"
assert isinstance(new_fname, str_or_path)
# Collisions in the backup name will generate a unique new name
new_fname = sh.backup(fname_env, suffix="bak", action="copy")
assert os.path.exists(f"{tmpdir}/test.bak.2")
assert str(new_fname) == "$UNITTEST_BASH/test.bak.2"
assert isinstance(new_fname, str_or_path)
# action='move'
new_fname = sh.backup(fname_env, action="move")
assert not os.path.exists(fname)
assert os.path.exists(sh.resolve_env(new_fname))
assert isinstance(new_fname, str_or_path)
def test_backup_notexist():
with pytest.raises(FileNotFoundError):
sh.backup("notexist.txt")
def test_backup_notexist_force():
assert sh.backup("notexist.txt", force=True) is None
@unix_only
def test_symlink(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
os.chdir("/")
with open(f"{tmpdir}/test_ln1", "w"):
pass
with open(f"{tmpdir}/test_ln2", "w"):
pass
# abspath = False
sh.symlink(
str_or_path("$UNITTEST_BASH/test_ln1"),
str_or_path("$UNITTEST_BASH/test_ln3"),
abspath=False,
)
assert (
subprocess.check_output(
"ls -l %s/test_ln3 | awk '{print $NF}'" % tmpdir, shell=True
)
== b"test_ln1\n"
)
os.remove(f"{tmpdir}/test_ln3")
# abspath = True
sh.symlink(
str_or_path("$UNITTEST_BASH/test_ln1"),
str_or_path("$UNITTEST_BASH/test_ln3"),
abspath=True,
)
assert (
subprocess.check_output(
"ls -l %s/test_ln3 | awk '{print $NF}'" % tmpdir, shell=True
).decode("utf-8")
== f"{tmpdir}/test_ln1\n"
)
# no force
with pytest.raises(FileExistsError):
sh.symlink(
str_or_path("$UNITTEST_BASH/test_ln2"),
str_or_path("$UNITTEST_BASH/test_ln3"),
force=False,
)
# force must work only to override another symlink, NOT another regular file
with pytest.raises(FileExistsError):
sh.symlink(
str_or_path("$UNITTEST_BASH/test_ln1"),
str_or_path("$UNITTEST_BASH/test_ln2"),
force=True,
)
# force; old symlink is different
sh.symlink(
str_or_path("$UNITTEST_BASH/test_ln2"),
str_or_path("$UNITTEST_BASH/test_ln3"),
force=True,
)
assert (
subprocess.check_output(
"ls -l %s/test_ln3 | awk '{print $NF}'" % tmpdir, shell=True
)
== b"test_ln2\n"
)
# force; old symlink is identical
sh.symlink(
str_or_path("$UNITTEST_BASH/test_ln2"),
str_or_path("$UNITTEST_BASH/test_ln3"),
force=True,
)
assert (
subprocess.check_output(
"ls -l %s/test_ln3 | awk '{print $NF}'" % tmpdir, shell=True
)
== b"test_ln2\n"
)
# Test that chdir didn't change
assert os.getcwd() == "/"
def test_exists(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
assert not sh.exists(str_or_path("$UNITTEST_BASH/test_exists"))
assert not sh.lexists(str_or_path("$UNITTEST_BASH/test_exists"))
with open(f"{tmpdir}/test_exists", "w"):
pass
assert sh.exists(str_or_path("$UNITTEST_BASH/test_exists"))
assert sh.lexists(str_or_path("$UNITTEST_BASH/test_exists"))
@unix_only
def test_exists_symlink(str_or_path, tmpdir):
os.symlink(f"{tmpdir}/a", f"{tmpdir}/b")
assert not sh.exists(str_or_path(f"{tmpdir}/b"))
assert sh.lexists(str_or_path(f"{tmpdir}/b"))
with open(f"{tmpdir}/a", "w"):
pass
assert sh.exists(f"{tmpdir}/b")
assert sh.lexists(f"{tmpdir}/b")
def test_mkdir(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
sh.mkdir(str_or_path("$UNITTEST_BASH/test_mkdir"), force=False, parents=False)
assert os.path.isdir(f"{tmpdir}/test_mkdir")
# Already existing
with pytest.raises(FileExistsError):
sh.mkdir(str_or_path("$UNITTEST_BASH/test_mkdir"), force=False, parents=False)
sh.mkdir(str_or_path("$UNITTEST_BASH/test_mkdir"), force=True, parents=False)
assert os.path.isdir(f"{tmpdir}/test_mkdir")
# Accidentally overwrite a non-directory
with open(f"{tmpdir}/test_mkdir_file", "w"):
pass
with pytest.raises(FileExistsError):
sh.mkdir(
str_or_path("$UNITTEST_BASH/test_mkdir_file"), force=True, parents=False
)
# Missing middle path
with pytest.raises(FileNotFoundError):
sh.mkdir(
str_or_path("$UNITTEST_BASH/middle/test_mkdir"), parents=False, force=False
)
sh.mkdir(str_or_path("$UNITTEST_BASH/middle/test_mkdir"), parents=True, force=False)
assert os.path.isdir(f"{tmpdir}/middle/test_mkdir")
@unix_only
def test_owner(str_or_path, tmpdir):
os.environ["UNITTEST_BASH"] = str(tmpdir)
with open(f"{tmpdir}/test_owner", "w"):
pass
assert sh.owner(str_or_path("$UNITTEST_BASH/test_owner")) == getpass.getuser()
|
crusaderky/pshell | pshell/tests/__init__.py | import os
import pytest
DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
class StubError(Exception):
"""Phony exception used to test that the cleanup in context managers always
happens
"""
pass
unix_only = pytest.mark.skipif(os.name == "nt", reason="Unix only")
windows_only = pytest.mark.skipif(os.name != "nt", reason="Windows only")
|
crusaderky/pshell | pshell/procs.py | <filename>pshell/procs.py<gh_stars>1-10
"""Utilities to manage running processes
"""
import getpass
import os
import time
from pathlib import Path
from typing import Collection, List, Union
import psutil
from . import log
from .env import resolve_env
__all__ = ("find_procs_by_cmdline", "kill")
def find_procs_by_cmdline(*cmdlines: Union[str, Path]) -> List[psutil.Process]:
"""Search all processes that have a partial match for at least one of the
given command lines. Command lines are parsed through :func:`resolve_env`.
For example, the command::
find_procs_by_cmdline('$MYROOT')
will return a match for the following processes:
- ``$MYROOT/static/scripts/something.sh``
- ``tail -f $LOGDIR/mylog.log``
- ``myservice.sh -c /algodata/someuser/root/cfg/myservice.cfg``
where:
- ``MYROOT=/algodata/someuser/root``
- ``LOGDIR=/algodata/someuser/root/log``
This method will only return processes for the current user.
.. warning::
Invoking this with relative paths can give erroneous results.
For example, invoking it with 'foo' will match, for example,
'foo.pl', 'find_foos.sh', and 'vim foobar.cfg'.
.. warning::
This command can't match commands invoked with a relative path
if the search parameter is an absolute path.
e.g. ``find_procs_by_cmdline('$MYROOT')`` won't be able to match
``cd $MYROOT/bin && ./myscript``.
:param cmdlines:
one or more paths command lines to search for
:returns:
list of :class:`psutil.Process` objects
"""
matches = [resolve_env(str(x)) for x in cmdlines]
log.debug(
"Finding processes that match command lines:\n - %s", "\n - ".join(matches)
)
procs = []
for proc in psutil.process_iter():
try:
# On Windows, proc.username() ALWAYS fails
if os.name != "nt" and proc.username() != getpass.getuser():
continue
cmdline = " ".join(proc.cmdline())
for match in matches:
if cmdline.find(match) != -1:
log.debug("Process %d matches: %s", proc.pid, cmdline)
procs.append(proc)
break
except (psutil.NoSuchProcess, psutil.ZombieProcess):
# Process already died
pass
except psutil.AccessDenied:
# Windows-specific exception that makes psutil.Process.cmdline()
# fail for processes belonging to other users
pass
return procs
def kill(
*procs: Union[int, psutil.Process], term_timeout: Union[int, float] = 10
) -> None:
"""Send SIGTERM to one or more processes. After ``term_timeout`` seconds,
send SIGKILL to the surviving processes.
This function will return before ``term_timeout`` if all processes close
themselves following SIGTERM.
This function graciously skips processes that do not exist or for which the
user doesn't have enough permissions. It also automatically skips the
current process and its parents.
:param procs:
one or more PIDs (int) or :class:`psutil.Process` objects, e.g. as
returned by :func:`find_procs_by_cmdline`.
:param float term_timeout:
seconds to wait between SIGTERM and SIGKILL.
If ``term_timeout==0``, immediately send SIGKILL.
"""
# Strip list from current process and its parents
psutil_procs: List[psutil.Process] = []
my_pid = os.getpid()
for proc in procs:
# Convert any int PIDs to psutil.Process
if isinstance(proc, int):
try:
proc = psutil.Process(proc)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
log.debug(f"PID {proc} does not exist")
continue
elif proc is None:
# Silently skip - useful as e.g. psutil.Process.parent() can
# return None
continue
elif not isinstance(proc, psutil.Process):
raise TypeError(f"Expected int or psutil.Process; got {type(proc)}")
try:
if proc.pid == my_pid:
log.debug(
f"Not terminating PID {proc.pid} as it is the current process"
)
continue
children = (child.pid for child in proc.children(recursive=True))
if my_pid in children:
log.debug(
f"Not terminating PID {proc.pid} as it is a parent of "
"the current process",
)
continue
except (psutil.NoSuchProcess, psutil.ZombieProcess):
log.debug(f"PID {proc.pid} does not exist")
continue
psutil_procs.append(proc)
if not psutil_procs:
log.info("No processes terminated")
return
if term_timeout == 0:
kill_procs = psutil_procs
else:
kill_procs = []
log.info(
"Sending SIGTERM to PIDs %s",
",".join(str(proc.pid) for proc in psutil_procs),
)
for proc in psutil_procs:
try:
proc.terminate()
kill_procs.append(proc)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
# Process already died
pass
except psutil.AccessDenied:
log.info(f"Failed to send SIGTERM to PID {proc.pid}: access denied")
# Wait up to <term_timeout> seconds for SIGTERM to be received
_, kill_procs = psutil.wait_procs(kill_procs, term_timeout)
if kill_procs:
log.info(
"Sending SIGKILL to PIDs %s", ",".join(str(proc.pid) for proc in kill_procs)
)
for proc in kill_procs:
try:
proc.kill()
except (psutil.NoSuchProcess, psutil.ZombieProcess):
# Process already died
pass
except psutil.AccessDenied:
log.info(f"Failed to send SIGKILL to PID {proc.pid}: access denied")
log.info("All processes terminated")
def killall(*cmdlines: Union[str, Path], term_timeout: Union[int, float] = 10) -> None:
"""Find all processes with the target command line(s), send SIGTERM, and
then send SIGKILL to the survivors.
See :func:`find_procs_by_cmdline` and :func:`kill`.
"""
kill(*find_procs_by_cmdline(*cmdlines), term_timeout=term_timeout)
def wait_for_server(
proc: Union[int, psutil.Process],
port: int = None,
*,
ignore_ports: Collection[int] = None,
timeout: Union[int, float] = None,
) -> int:
"""Wait until either the process starts listening on the given port, or
it crashes because the port is occupied by something else.
:param proc:
psutil.Process or Process ID to observe
:param int port:
Port that needs to be opened in listening mode. If omitted, return when any one
port is opened.
:param ignore_ports:
List or set of ports to ignore (only meaningful when port is None).
:param int timeout:
Number of seconds to wait before giving up; omit for no timeout
:returns:
Opened port number
:raises psutil.NoSuchProcess:
If the process dies while waiting
:raises TimeoutError:
Timeout expired
Example:
.. code-block:: python
import subprocess
import pshell
proc = subprocess.Popen(["redis-server"])
port = pshell.wait_for_server(proc.pid)
assert port == 6379
This can also be used to start a server on port 0, which makes it
atomically pick up a random free port, and then retrieve said port.
"""
if isinstance(proc, int):
proc = psutil.Process(proc)
ignore_ports = set(ignore_ports) if ignore_ports else set()
if timeout is not None:
t0 = time.time()
while True:
# proc.connections() will raise Exception if the process dies
open_ports = {
conn.laddr.port for conn in proc.connections() if conn.status == "LISTEN"
}
open_ports -= ignore_ports
if port is None and open_ports:
return open_ports.pop()
if port is not None and port in open_ports:
return port
if timeout is not None and time.time() - t0 > timeout:
raise TimeoutError("Timeout expired while waiting for port to open")
time.sleep(0.01)
|
crusaderky/pshell | pshell/env.py | """Functions related to environment variables
"""
import os
import string
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Iterator, Union, overload
from . import log
from .call import check_output
__all__ = ("source", "putenv", "override_env", "resolve_env")
def source(bash_file: Union[str, Path], *, stderr: IO = None) -> None:
"""Emulate the bash command ``source <bash_file>``.
The stdout of the command, if any, will be redirected to stderr.
The acquired variables are injected into ``os.environment`` and are
exposed to any subprocess invoked afterwards.
.. note::
This function is not available on Windows.
The script is always executed with bash. This includes when running in
Ubuntu and derivatives, where /bin/sh is actually dash.
The script is run with errexit, pipefail, nounset.
:param bash_file:
Path to the bash file. It can contain environment variables.
:param stderr:
standard error file handle. Omit for sys.stderr.
Unlike the same parameter for :func:`subprocess.call`, which must be
backed by a OS-level file descriptor, this can be a
pseudo-stream like e.g. :class:`io.StringIO`.
:raise CalledProcessError:
if the command returns with non-zero exit status
"""
log.info("Sourcing environment variables from %s", bash_file)
stdout = check_output(f'source "{bash_file}" 1>&2 && env', stderr=stderr)
for line in stdout.splitlines():
(key, _, value) = line.partition("=")
if key not in ("_", "", "SHLVL") and os.getenv(key) != value:
log.debug("Setting environment variable: %s=%s", key, value)
os.environ[key] = value
def putenv(key: str, value: Union[str, Path, None]) -> None:
"""Set environment variable. The new variable will be visible to the
current process and all subprocesses forked from it.
Unlike :func:`os.putenv`, this method resolves environment variables in the
value, and it is immediately visible to the current process.
:param key:
Variable name
:param value:
Variable value. String to set a value, or None to delete the variable.
It can be a reference other variables, e.g. ``${FOO}.${BAR}``.
:class:`~pathlib.Path` objects are transparently converted to strings.
"""
if value is None:
log.info("Deleting environment variable %s", key)
os.environ.pop(key, None)
else:
log.info("Setting environment variable %s=%s", key, value)
# Do NOT use os.putenv() - see python documentation
os.environ[key] = resolve_env(str(value))
@contextmanager
def override_env(key: str, value: Union[str, Path, None]) -> Iterator[None]:
"""Context manager that overrides an environment variable, returns control,
and then restores it to its original value (or deletes it if it did not
exist before).
:param key:
Variable name
:param value:
Variable value. String to set a value, or None to delete the variable.
It can be a reference other variables, e.g. ``${FOO}.${BAR}``.
:class:`~pathlib.Path` objects are transparently converted to strings.
Example::
>>> print(os.environ['X'])
foo
>>> with override_env('X', 'bar'):
... print(os.environ['X'])
bar
>>> print(os.environ['X'])
foo
"""
orig = os.getenv(key)
putenv(key, value)
try:
yield
finally:
putenv(key, orig)
@overload
def resolve_env(s: str) -> str:
... # pragma: nocover
@overload
def resolve_env(s: Path) -> Path:
... # pragma: nocover
def resolve_env(s):
"""Resolve all environment variables in target string or :class:`~pathlib.Path`.
This command always uses the bash syntax ``$VARIABLE`` or ``${VARIABLE}``.
This also applies in Windows. Windows native syntax ``%VARIABLE%`` is not
supported.
Unlike in :func:`os.path.expandvars`, undefined variables raise an
exception instead of being silently replaced by an empty string.
:param s:
string or :class:`~pathlib.Path` potentially containing environment variables
:returns:
resolved string, or :class:`~pathlib.Path` if the input is a
:class:`~pathlib.Path`
:raise EnvironmentError:
in case of missing environment variable
"""
try:
return type(s)(string.Template(str(s)).substitute(os.environ))
except KeyError as e:
raise EnvironmentError(f"Environment variable {e} not found")
|
crusaderky/pshell | pshell/tests/test_manipulate.py | <reponame>crusaderky/pshell
import pytest
import pshell as sh
@pytest.mark.parametrize("newline", ["\n", "\r\n"])
def test_concatenate_t1(str_or_path, tmpdir, newline):
# Output file already exists and is non-empty. Files end without a newline.
# Test compression.
out = str_or_path(f"{tmpdir}/out.gz")
in1 = str_or_path(f"{tmpdir}/in1")
in2 = str_or_path(f"{tmpdir}/in2.bz2")
with sh.open(out, "w") as fh:
fh.write("1")
with sh.open(in1, "w") as fh:
fh.write("2\n3")
with sh.open(in2, "w") as fh:
fh.write("4")
n = newline.encode("utf-8")
sh.concatenate([in1, in2], out, "a", newline=newline)
with sh.open(out, "rb") as fh:
assert fh.read() == b"1" + n + b"2" + n + b"3" + n + b"4" + n
# Defaults to mode='w'
sh.concatenate([in1, in2], out, newline=newline)
with sh.open(out, "rb") as fh:
assert fh.read() == b"2" + n + b"3" + n + b"4" + n
@pytest.mark.parametrize("newline", ["\n", "\r\n"])
def test_concatenate_t2(str_or_path, tmpdir, newline):
# Output file already exists and is non-empty. Files end with a newline.
out = str_or_path(f"{tmpdir}/out")
in1 = str_or_path(f"{tmpdir}/in1")
in2 = str_or_path(f"{tmpdir}/in2")
with sh.open(out, "w", newline=newline) as fh:
fh.write("1\n")
with sh.open(in1, "w", newline=newline) as fh:
fh.write("2\n3\n")
with sh.open(in2, "w", newline=newline) as fh:
fh.write("4\n")
n = newline.encode("utf-8")
sh.concatenate([in1, in2], out, "a", newline=newline)
with sh.open(out, "rb") as fh:
assert fh.read() == b"1" + n + b"2" + n + b"3" + n + b"4" + n
sh.concatenate([in1, in2], out, newline=newline)
with sh.open(out, "rb") as fh:
assert fh.read() == b"2" + n + b"3" + n + b"4" + n
def test_concatenate_t3(str_or_path, tmpdir):
# Output file already exists and it is empty
out = str_or_path(f"{tmpdir}/out")
in1 = str_or_path(f"{tmpdir}/in1")
in2 = str_or_path(f"{tmpdir}/in2")
with sh.open(out, "w") as fh:
pass
with sh.open(in1, "w") as fh:
fh.write("2\n")
with sh.open(in2, "w") as fh:
fh.write("3\n")
sh.concatenate([in1, in2], out, "a")
with sh.open(out) as fh:
assert fh.read() == "2\n3\n"
sh.concatenate([in1, in2], out)
with sh.open(out) as fh:
assert fh.read() == "2\n3\n"
def test_concatenate_t4(str_or_path, tmpdir):
# Output file does not already exist
out = str_or_path(f"{tmpdir}/out")
in1 = str_or_path(f"{tmpdir}/in1")
in2 = str_or_path(f"{tmpdir}/in2")
with sh.open(in1, "w") as fh:
fh.write("2")
with sh.open(in2, "w") as fh:
fh.write("3")
sh.concatenate([in1, in2], out, "a")
with sh.open(out) as fh:
assert fh.read() == "2\n3\n"
sh.concatenate([in1, in2], out)
with sh.open(out) as fh:
assert fh.read() == "2\n3\n"
def test_concatenate_b(str_or_path, tmpdir):
# Binary mode
out = str_or_path(f"{tmpdir}/out")
in1 = str_or_path(f"{tmpdir}/in1")
in2 = str_or_path(f"{tmpdir}/in2")
with sh.open(out, "wb") as fh:
fh.write(b"1")
with sh.open(in1, "wb") as fh:
fh.write(b"2")
with sh.open(in2, "wb") as fh:
fh.write(b"3")
sh.concatenate([in1, in2], out, "ab")
with sh.open(out, "rb") as fh:
assert fh.read() == b"123"
sh.concatenate([in1, in2], out, "wb")
with sh.open(out, "rb") as fh:
assert fh.read() == b"23"
|
crusaderky/pshell | pshell/tests/conftest.py | from pathlib import Path
import pytest
@pytest.fixture(params=[str, Path])
def str_or_path(request):
"""Run a test that uses this fixture twice, with and without pathlib
Usage::
def test_open(str_or_path):
sh.open(str_or_path("foo/bar/baz"))
"""
return request.param
|
crusaderky/pshell | pshell/call.py | <gh_stars>1-10
"""Functions to execute shell commands in a subprocess
"""
import io
import os
import subprocess
import threading
from contextlib import contextmanager
from typing import IO, List, Optional, Tuple, Union
from . import log
__all__ = ("real_fh", "call", "check_call", "check_output")
_BASH_INIT = "set -o errexit -o pipefail -o nounset && "
"""Sane initialization string for new bash instances.
Set errexit, pipefail, and nounset.
"""
@contextmanager
def real_fh(fh: Optional[IO]):
"""The :mod:`io` module offers file-like objects which can be used to spoof
a file handle. Among other things, they are extensively used by nosetests
and py.test to capture stdout/stderr.
In most cases, this is transparent; however there are exceptions, like
the :mod:`subprocess` module, which require a file handle with a real
file descriptor underlying it - that is, an object which defines the
``fileno()`` method.
This context manager transparently detects these cases and automatically
converts pseudo file handlers from the :mod:`io` module into real
POSIX-based file handles.
:param fh:
Any of:
- A file handle opened for write and backed by a POSIX file descriptor,
e.g. as returned by :func:`open` or the default value of `sys.stdout`
or `sys.stderr`.
- A pseudo file handle such as :class:`io.StringIO`,
:class:`io.BytesIO`, or the stub used by nosetests to mock
`sys.stdout` and `sys.stderr`.
- None (default for most subprocess functions)
Usage::
buf = io.StringIO()
with real_fh(buf) as real_buf:
subprocess.check_call(cmd, stderr=real_buf)
All pshell functions that wrap around :mod:`subprocess` internally use this
context manager. You don't need to use it explicitly::
buf = io.StringIO()
pshell.check_call(cmd, stderr=buf)
"""
if fh is None:
yield fh
return
try:
fh.fileno()
except (AttributeError, OSError, io.UnsupportedOperation):
pass
else:
# File handle is backed by POSIX file descriptor
yield fh
return
# File handle isn't backed by POSIX fd
# Detect if it's a text or binary file handle
try:
fh.write("")
bin_flag = ""
except TypeError:
fh.write(b"")
bin_flag = "b"
# 1. Create a pipe
# 2. pass its write end to the context
# 3. read from its read end
# 4. dump contents into the pseudo file handle
fd_in, fd_out = os.pipe()
real_fh_in = open(fd_in, "r" + bin_flag, closefd=True)
real_fh_out = open(fd_out, "w" + bin_flag, closefd=True)
# The size of a pipe is 64 kbytes on Linux.
# If you try writing more than that without reading from the other
# side, the write will lock indefinitely, resulting in a deadlock.
# It's very easy to exceed this limit, e.g. when calling sh.check_call().
# Use a thread to continuously move data beetween file handles.
def flush():
while True:
data = real_fh_in.read(4096)
if not data:
# real_fh_out has been closed and the pipe is empty
break
fh.write(data)
flusher = threading.Thread(target=flush)
flusher.start()
try:
yield real_fh_out
finally:
# Cause real_fh_in.read(4096) to return '' inside the flush
# thread, instead of blocking and waiting for more data
real_fh_out.close()
# Wait for the pipe to be completeley emptied
flusher.join()
real_fh_in.close()
def _call_cmd(
cmd: Union[str, List[str]], obfuscate_pwd: Optional[str], shell: bool
) -> Tuple[Union[str, List[str]], bool]:
"""Common internal helper of check_call, call, and check_output
that pre-processes the command to be executed
"""
log_cmd = cmd
if not isinstance(log_cmd, str):
log_cmd = '"' + '" "'.join(log_cmd) + '"'
if obfuscate_pwd:
log_cmd = log_cmd.replace(obfuscate_pwd, "XXXX")
if shell:
if not isinstance(cmd, str):
raise TypeError("cmd must be a string when shell=True")
if os.name != "nt":
cmd = [
"bash",
"-c",
"set -o errexit; set -o nounset; set -o pipefail; " + cmd,
]
shell = False
log.info("Executing: %s", log_cmd)
return cmd, shell
def call(
cmd: Union[str, List[str]],
*,
stdout: IO = None,
stdin: IO = None,
stderr: IO = None,
obfuscate_pwd: str = None,
shell: bool = True,
timeout: Union[int, float] = None,
) -> int:
"""Run another program in a subprocess and wait for it to terminate.
:param cmd:
Command to be executed (str or list). If shell=True, it must be a str.
:param stdout:
standard output file handle. Omit for sys.stdout.
Unlike the same parameter for :func:`subprocess.call`, which must be
backed by a OS-level file descriptor, this can be a
pseudo-stream like e.g. :class:`io.StringIO`.
:param stdin:
standard input file handle. Omit for no input.
:param stderr:
standard error file handle. Omit for sys.stderr.
Unlike the same parameter for :func:`subprocess.call`, which must be
backed by a OS-level file descriptor, this can be a
pseudo-stream like e.g. :class:`io.StringIO`.
:param str obfuscate_pwd:
if set, search for the target password and replace it with XXXX
before logging it.
:param bool shell:
Invoke inside the shell. This differes from the same parameter of
:func:`subprocess.call` in several ways:
- It is True by default instead of False
- In Linux and MacOSX, it sets some sane settings:
errexit, nounset, pipefail
- In Linux and MacOSX, it is always guaranteed to be bash.
This differs from :func:`subprocess.call`, which on Ubuntu will
invoke dash and RedHat will invoke bash.
On Windows it is CMD.
:param float timeout:
kill command if doesn't return within timeout limit
:returns:
command exit code
:rtype:
int
"""
cmd, shell = _call_cmd(cmd, obfuscate_pwd, shell)
with real_fh(stdout) as rstdout, real_fh(stderr) as rstderr:
return subprocess.call(
cmd,
stdin=stdin,
stdout=rstdout,
stderr=rstderr,
timeout=timeout,
shell=shell,
)
def check_call(
cmd: Union[str, List[str]],
*,
stdout: IO = None,
stdin: IO = None,
stderr: IO = None,
obfuscate_pwd: str = None,
shell: bool = True,
timeout: Union[int, float] = None,
) -> None:
"""Run another program in a subprocess and wait for it to terminate; raise
exception in case of non-zero exit code.
See :func:`call` for parameters documentation.
:returns:
None
:raise CalledProcessError:
if the command returns a non-zero exit code
"""
cmd, shell = _call_cmd(cmd, obfuscate_pwd, shell)
with real_fh(stdout) as rstdout, real_fh(stderr) as rstderr:
subprocess.check_call(
cmd,
stdin=stdin,
stdout=rstdout,
stderr=rstderr,
timeout=timeout,
shell=shell,
)
def check_output(
cmd: Union[str, List[str]],
*,
stdin: IO = None,
stderr: IO = None,
obfuscate_pwd: str = None,
shell: bool = True,
timeout: Union[int, float] = None,
decode: bool = True,
encoding: str = "utf-8",
errors: str = "replace",
):
"""Run another program in a subprocess and wait for it to terminate; return
its stdout. Raise exception in case of non-zero exit code.
See :func:`call` for parameters documentation.
:param bool decode:
If True, decode the raw output to UTF-8 and return a str object.
If False, return the raw bytes object.
The default is to decode to UTF-8. This differs
from :func:`subprocess.check_output`, which always returns the raw
output.
:param str encoding:
Encoding of the raw bytes output. Ignored if decode=False.
:param str errors:
'replace', 'ignore', or 'strict'. See :meth:`bytes.decode`.
Ignored if decode=False. Note that the default value is ``replace``,
whereas the default in :meth:`bytes.decode` is ``strict``.
:returns:
command stdout
:rtype:
str or bytes (see ``decode`` parameter)
:raise CalledProcessError:
if the command returns a non-zero exit code
"""
cmd, shell = _call_cmd(cmd, obfuscate_pwd, shell)
with real_fh(stderr) as rstderr:
raw_output = subprocess.check_output(
cmd, stdin=stdin, stderr=rstderr, timeout=timeout, shell=shell
)
if decode:
return raw_output.decode(encoding=encoding, errors=errors)
return raw_output
|
crusaderky/pshell | pshell/open.py | """Functions to open file descriptors
"""
import os.path
from pathlib import Path
from typing import IO, BinaryIO, Callable, Union
from . import log
from .env import resolve_env
__all__ = ("pshell_open",)
# When importing in __init__, we're going to rename pshell_open to just open
def pshell_open(
file: Union[str, Path, int, BinaryIO],
mode: str = "r",
*,
encoding: str = None,
errors: str = None,
compression: Union[str, bool] = "auto",
**kwargs,
) -> IO:
"""Open a file handle to target file name or file descriptor.
Unlike the builtin function, this wrapper:
- performs automatic environment variable resolution in the file name
- logs the file access
- supports transparent compression
:param file:
Path to the file to be opened or file descriptor to be wrapped.
If compression is set to 'gzip', 'bzip2' or 'lzma', file can also be a binary
file handle.
:param str mode:
As in the builtin :func:`open` function. It always defaults to text
mode unless 'b' is explicitly specified; this is unlike
:func:`gzip.open`, :func:`bz2.open`, and :func:`lzma.open` which
instead default to binary mode.
:param str encoding:
Character encoding when in text mode. Unlike the builtin :func:`open`
function, it always defaults to utf-8 instead of being
platform-specific.
:param str errors:
As in the builtin :func:`open` function, but it defaults to ``replace``
instead of ``strict``.
:param compression:
One of:
False
No compression (use builtin :func:`open`)
'gzip'
gzip compression (use :func:`gzip.open`)
'bzip2':
bzip2 compression (use :func:`bz2.open`)
'lzma':
lzma compression (use :func:`lzma.open`)
'auto':
Automatically set compression if the file extension is ``.gz``,
``.bz2``, or ``.xz`` (case insensitive)
:param kwargs:
Passed verbatim to the underlying open function
"""
# Build log message and override default mode, encoding and errors
if "b" in mode:
mode_label = "binary "
else:
# Default to text mode if the user doesn't specify text or binary. This
# overrides gzip.open, bz2.open, lzma.open which default to binary.
if "t" not in mode:
mode += "t"
mode_label = ""
if encoding is None:
encoding = "utf-8"
if errors is None:
errors = "replace"
if "w" in mode:
mode_label += "write"
elif "x" in mode:
mode_label += "exclusive create"
elif "a" in mode:
mode_label += "append"
else:
mode_label += "read"
# Parse compression
if compression == "auto":
if isinstance(file, (str, Path)):
_, ext = os.path.splitext(str(file))
ext = ext.lower()
if ext == ".gz":
compression = "gzip"
elif ext == ".bz2":
compression = "bzip2"
elif ext == ".xz":
compression = "lzma"
else:
compression = False
else:
compression = False
if compression:
compress_label = " (%s compression)" % compression
else:
compress_label = ""
# resolve env variables and write log message.
if isinstance(file, (str, Path)):
log.info("Opening '%s' for %s%s", file, mode_label, compress_label)
file = resolve_env(file)
elif isinstance(file, int):
if compression:
raise TypeError("compression not supported when opening a file descriptor")
log.info("Opening file descriptor %d for %s", file, mode_label)
else:
log.info("Opening file handle for %s%s%s", file, mode_label, compress_label)
open_func: Callable[..., IO]
if compression is False:
open_func = open
elif compression == "gzip":
import gzip
open_func = gzip.open
elif compression == "bzip2":
import bz2
open_func = bz2.open
elif compression == "lzma":
import lzma
open_func = lzma.open
else:
raise ValueError(
"compression must be False, 'auto', 'gzip', 'bzip2', or 'lzma'"
)
return open_func(file, mode, encoding=encoding, errors=errors, **kwargs)
|
OverEuro/swarmtools | src/SwarmTools.py | <reponame>OverEuro/swarmtools
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 18:46:11 2019
@author: EuroBrother
"""
import numpy as np
def func(x):
y = np.sum(x**2)
return y
class BasicPSO:
def __init__(self, num_params,
lbound,
ubound,
w = 0.5,
c1 = 1,
c2 = 2,
popsize = 15,
ada_w = False,
ada_c = False,
dire = 0,
bound_check = False):
self.num_params = num_params
self.lbound = lbound # array-like and size = num_params
self.ubound = ubound # same above
self.w = w
self.c1 = c1
self.c2 = c2
self.popsize = popsize
self.ada_w = ada_w
self.ada_c = ada_c
self.dire = dire # 0: descent; 1: ascent
self.bound_check = bound_check # True or False
self.lbounds = np.empty((self.popsize, self.num_params))
self.ubounds = np.empty((self.popsize, self.num_params))
self.solutions = np.empty((self.popsize, self.num_params))
self.velocitys = np.empty((self.popsize, self.num_params))
if self.dire == 0:
self.g_fit = np.inf
self.p_fits = np.ones(self.popsize) * np.inf
else:
self.g_fit = -np.inf
self.p_fits = -np.ones(self.popsize) * np.inf
self.g_pop = np.empty(self.num_params)
self.p_pops = np.empty((self.popsize, self.num_params))
def start(self):
'''initialize particles and velocity'''
self.lbounds = np.tile(self.lbound, (self.popsize, 1))
self.ubounds = np.tile(self.ubound, (self.popsize, 1))
self.solutions = self.lbounds + np.random.rand(self.popsize, self.num_params) * \
(self.ubounds - self.lbounds)
self.velocitys = (self.lbounds - self.ubounds) + np.random.rand(self.popsize, self.num_params) * \
(self.ubounds - self.lbounds) * 2
return self.solutions
def ask(self):
'''update all particles based on the basic PSO rule'''
g_pops = np.tile(self.g_pop, (self.popsize, 1))
R1 = np.random.rand(self.popsize, self.num_params)
R2 = np.random.rand(self.popsize, self.num_params)
self.velocitys = self.w*self.velocitys + self.c1*R1*(self.p_pops-self.solutions) + \
self.c2*R2*(g_pops-self.solutions)
self.solutions += self.velocitys
# bound check
if self.bound_check:
# reflect bound check
idb = np.where(self.solutions<self.lbounds)
self.solutions[idb[0],idb[1]] = 2*self.lbounds[idb[0],idb[1]]
idu = np.where(self.solutions>self.ubounds)
self.solutions[idu[0],idu[1]] = 2*self.ubounds[idu[0],idu[1]]
return self.solutions
def tell(self, fit_array):
'''update p_best and g_best'''
if self.dire == 0:
idx = np.where(fit_array < self.p_fits)[0]
self.p_fits[idx] = fit_array[idx]
self.p_pops[idx, :] = np.copy(self.solutions[idx, :])
idb = np.argmin(fit_array)
if fit_array[idb] < self.g_fit:
self.g_fit = fit_array[idb]
self.g_pop = np.copy(self.solutions[idb, :])
else:
idx = np.where(fit_array > self.p_fits)[0]
self.p_fits[idx] = fit_array[idx]
self.p_pops[idx, :] = np.copy(self.solutions[idx, :])
idb = np.argmax(fit_array)
if fit_array[idb] > self.g_fit:
self.g_fit = fit_array[idb]
self.g_pop = np.copy(self.solutions[idb, :])
def current_best(self):
'''get best params and cost function value'''
best_params = np.copy(self.g_pop)
best_fit = np.copy(self.g_fit)
best_pops = np.copy(self.p_pops)
return (best_params, best_fit)
def step(self, step_size=0.001):
'''Implement decrease linearly weight'''
assert self.ada_w,'Please set ada_w=True if you want to use adaptive weight'
self.w -= step_size
if self.w <= 0:
self.w = step_size
if __name__=="__main__":
dim = 2
lb = np.array([-10, -10])
ub = np.array([10, 10])
PSO = BasicPSO(dim, lb, ub, popsize=15, ada_w=True, dire=0, bound_check=True)
solutions = PSO.start() # initial
fit_array = np.empty(PSO.popsize)
for i in range(500):
for j in range(PSO.popsize):
fit_array[j] = func(solutions[j, :])
PSO.tell(fit_array)
solutions = PSO.ask()
res = PSO.current_best()
PSO.step()
print('Iter:', i, ' bestv:', res[1])
|
OverEuro/swarmtools | src/exp_training_nn4rl/swarmtools.py | <filename>src/exp_training_nn4rl/swarmtools.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
def func(x):
# y = np.sum(x**2)
y = 100 * np.sum((x[:-1]**2-x[1:])**2) + np.sum((x[:-1]-1)**2)
return y
'''Optimizers Class'''
class Optimizer(object):
def __init__(self, obj, epsilon=1e-08):
self.obj = obj
self.dim = obj.dim
self.eps = epsilon
self.t = 0
def update(self, der):
self.t += 1
dir_ = self._compute_step(der)
the = self.obj.mu
ratio = np.linalg.norm(dir_) / (np.linalg.norm(the) + self.eps)
self.obj.mu = the + dir_
return ratio
def _compute_step(self, der):
raise NotImplementedError
class Adam(Optimizer):
def __init__(self, obj, stepsize, beta1=0.99, beta2=0.999):
Optimizer.__init__(self, obj)
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def _compute_step(self, der):
w = self.stepsize * np.sqrt(1-self.beta2**self.t)/(1-self.beta1**self.t)
self.m = self.beta1 * self.m + (1-self.beta1) * der
self.v = self.beta2 * self.v + (1-self.beta2) * (der * der)
dir_ = w * self.m / (np.sqrt(self.v) + self.eps)
return dir_
class BasicSGD(Optimizer):
def __init__(self, obj, stepsize):
Optimizer.__init__(self, obj)
self.stepsize = stepsize
def _compute_step(self, der):
dir_ = self.stepsize * der
return dir_
class SGD(Optimizer):
def __init__(self, obj, stepsize, momentum=0.9):
Optimizer.__init__(self, obj)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize = stepsize
self.m = momentum
def _compute_step(self, der):
self.v = self.m * self.v + self.stepsize*der
dir_ = self.v
return dir_
class StepLR:
def __init__(self, optimizer, step=10, gamma=.1):
self.step = step
self.gamma = gamma
self.optimizer = optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
def update_lr(self):
if self.optimizer.t % self.step == 0:
self.optimizer.stepsize *= self.gamma
class BasicPSO:
def __init__(self, num_params,
low_bound,
up_bound,
w=0.9,
c1=2,
c2=2,
popsize=15,
ada_w=False,
ada_c=False,
dire=0,
bound_check=False):
self.num_params = num_params
self.w = w
self.c1 = c1
self.c2 = c2
self.popsize = popsize
self.ada_w = ada_w
self.ada_c = ada_c
self.dire = dire # 0: descent; 1: ascent
self.bound_check = bound_check # True or False
self.lbound = low_bound # array-like and size = num_params
self.ubound = up_bound # same above
self.solutions = np.empty((self.popsize, self.num_params))
self.velocity = np.empty((self.popsize, self.num_params))
if self.ada_w:
self.step_size = np.empty(1)
if self.dire == 0:
self.g_fit = np.inf
self.p_fits = np.ones(self.popsize) * np.inf
else:
self.g_fit = -np.inf
self.p_fits = -np.ones(self.popsize) * np.inf
self.g_pop = np.empty(self.num_params)
self.p_pops = np.empty((self.popsize, self.num_params))
def start(self):
# initialize particles and velocity
self.solutions = self.lbound + np.random.rand(self.popsize, self.num_params) * \
(self.ubound - self.lbound)
self.velocity = (self.lbound - self.ubound)/2 + np.random.rand(self.popsize, self.num_params) * \
(self.ubound - self.lbound)
return self.solutions
def ask(self, check_type=None):
# update all particles based on the basic PSO rule
r1 = np.random.rand(self.popsize, self.num_params)
r2 = np.random.rand(self.popsize, self.num_params)
self.velocity = self.w*self.velocity + self.c1*r1*(self.p_pops-self.solutions) + \
self.c2*r2*(self.g_pop-self.solutions)
self.solutions += self.velocity
# bound check
if self.bound_check:
# bound check
if check_type == 'box':
self.solutions = np.clip(self.solutions, self.lbound, self.ubound)
if check_type == 'restart':
rand_mat = self.lbound + np.random.rand(self.popsize, self.num_params) * \
(self.ubound - self.lbound)
idb = np.where(self.solutions < self.lbound)
self.solutions[idb[0], idb[1]] = rand_mat[idb[0], idb[1]]
idu = np.where(self.solutions > self.ubound)
self.solutions[idu[0], idu[1]] = rand_mat[idu[0], idu[1]]
return self.solutions
def tell(self, fit_array):
# update p_best and g_best
if self.dire == 0:
idx = np.where(fit_array < self.p_fits)[0]
self.p_fits[idx] = fit_array[idx]
self.p_pops[idx, :] = np.copy(self.solutions[idx, :])
idb = np.argmin(fit_array)
if fit_array[idb] < self.g_fit:
self.g_fit = fit_array[idb]
self.g_pop = np.copy(self.solutions[idb, :])
else:
idx = np.where(fit_array > self.p_fits)[0]
self.p_fits[idx] = fit_array[idx]
self.p_pops[idx, :] = np.copy(self.solutions[idx, :])
idb = np.argmax(fit_array)
if fit_array[idb] > self.g_fit:
self.g_fit = fit_array[idb]
self.g_pop = np.copy(self.solutions[idb, :])
def current_best(self):
# get best params and cost function value
best_params = np.copy(self.g_pop)
best_fit = np.copy(self.g_fit)
return [best_params, best_fit]
def step(self, epoch_tol, epoch, end_w):
# Implement decrease linearly weight
assert self.ada_w, 'Please set ada_w=True if you want to use adaptive weight'
if epoch == 0:
self.step_size = (self.w - end_w) / epoch_tol
self.w -= self.step_size
if self.w <= 0:
self.w = self.step_size
class BasicNES:
def __init__(self, num_params,
lbound,
ubound,
mu,
mu_lr=0.5,
sigma_init=1.0,
sigma_lr=0.2,
sigma_decay=0.999,
sigma_db=0.01,
popsize=30,
elite_rt=1.0,
optim='SGD',
bound_check=False,
mirror_sample=True,
step=100,
mu_decay=0.1):
self.dim = num_params
self.popsize = popsize
self.lbounds = np.tile(lbound, (self.popsize, 1))
self.ubounds = np.tile(ubound, (self.popsize, 1))
self.mu = mu
self.mu_lr = mu_lr
self.sigma = np.ones(self.dim) * sigma_init
self.sigma_lr = sigma_lr
self.sigma_decay = sigma_decay
self.sigma_db = sigma_db
self.elite_rt = elite_rt
self.bound_check = bound_check
self.solutions = np.empty((self.popsize, self.dim))
if optim == 'Adam':
self.optimizer = Adam(self, mu_lr, beta1=0.99, beta2=0.999)
elif optim == 'BasicSGD':
self.optimizer = BasicSGD(self, mu_lr)
elif optim == 'SGD':
self.optimizer = SGD(self, mu_lr, momentum=0.5)
self.best = np.inf
self.shapevec = np.linspace(0.5, -0.5, int(self.popsize*self.elite_rt))
if step > 0 and 0 < mu_decay < 1:
self.schlr = StepLR(self.optimizer, step=step, gamma=mu_decay)
self.best_mu = np.zeros(self.dim)
self.mirror_sample = mirror_sample # If mirror_sample=True, the popsize must be even
self.step = step
self.mu_decay = mu_decay
def ask(self, check_type=None):
if self.mirror_sample:
assert self.popsize % 2 == 0, "If mirror_sample=True, the popsize must be even"
half_popsize = int(self.popsize / 2)
epsilon_half = np.random.randn(half_popsize, self.dim)*self.sigma
self.epsilon = np.concatenate([epsilon_half, -epsilon_half])
else:
self.epsilon = np.random.randn(self.popsize, self.dim)*self.sigma
self.solutions = self.mu + self.epsilon
if self.bound_check:
# check type
if check_type == "box":
idb = np.where(self.solutions < self.lbounds)
self.solutions[idb[0], idb[1]] = self.lbounds[idb[0], idb[1]]
idu = np.where(self.solutions > self.ubounds)
self.solutions[idu[0], idu[1]] = self.ubounds[idu[0], idu[1]]
if check_type == "restart":
randmaxt = self.lbounds + np.random.rand(self.popsize, self.dim) * \
(self.ubounds - self.lbounds)
idb = np.where(self.solutions < self.lbounds)
self.solutions[idb[0], idb[1]] = randmaxt[idb[0], idb[1]]
idu = np.where(self.solutions > self.ubounds)
self.solutions[idu[0], idu[1]] = randmaxt[idu[0], idu[1]]
return self.solutions
def tell(self, fit_array):
if self.step > 0 and 0 < self.mu_decay < 1:
self.schlr.update_lr()
index = np.argsort(fit_array)
eps_cut = self.epsilon[index[0:int(self.popsize*self.elite_rt)], :]
fit_cut = self.shapevec
# update mean
gol_mu = np.sum(eps_cut*fit_cut.reshape(len(fit_cut), 1), axis=0)
self.update_ratio = self.optimizer.update(gol_mu)
# update sigma
gol_sg = np.sum(fit_cut.reshape(len(fit_cut), 1)*(eps_cut**2-self.sigma**2)/self.sigma, axis=0) / (self.popsize*self.elite_rt)
self.sigma += self.sigma_lr*gol_sg
if fit_array[index[0]] < self.best:
self.best = fit_array[index[0]]
self.best_mu = np.copy(self.solutions[index[0], :])
def current_best(self):
return (self.best, self.best_mu, self.update_ratio)
if __name__=="__main__":
dim = 30
epochs = 10000
lb = np.ones(dim) * -30
ub = np.ones(dim) * 30
mu = np.ones(dim) * -30
NES = BasicNES(dim, lb, ub, mu, mu_lr=0.5, popsize=50, elite_rt=0.8, optim='SGD', mirror_sample=True,
step=int(epochs/3), mu_decay=0.9)
fit_arr = np.empty(NES.popsize)
res_cur = []
rat_cur = []
sig_cur = []
for i in range(epochs):
solutions = NES.ask()
for j in range(NES.popsize):
fit_arr[j] = func(solutions[j, :])
NES.tell(fit_arr)
res = NES.current_best()
# print('Iter:', i, ' bestv:', res[0])
res_cur.append(res[0])
rat_cur.append(res[2])
sig_cur.append(np.linalg.norm(NES.sigma))
# print(best)
plt.figure()
plt.plot(res_cur)
plt.yscale('log')
plt.show()
plt.figure()
plt.plot(rat_cur)
plt.yscale('log')
plt.show()
plt.figure()
plt.plot(sig_cur)
plt.yscale('log')
plt.show()
|
OverEuro/swarmtools | src/training.py | <reponame>OverEuro/swarmtools
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
class con_bandit():
'''stationary bandit problem'''
def __init__(self):
self.state = 0
self.bandits = np.array([[.9,.9,.6,-5],[.7,-5,1,.8],[-5,.9,.6,.8],
[.5,.6,1,-5],[.4,.5,-5,.8],[.4,-5,.7,.8]])
self.num_bandits = self.bandits.shape[0]
self.num_actions = self.bandits.shape[1]
def getState(self):
self.state = np.random.randint(self.num_bandits)
return self.state
def pullArm(self, action):
bandit = self.bandits[self.state, action]
result = np.random.rand()
if result > bandit:
# return a postive reward
return 1
else:
# return a negative reward
return -1
class agent(nn.Module):
def __init__(self, input_s, output_s):
super(agent, self).__init__()
self.net = nn.Sequential(
nn.Linear(input_s, output_s),
nn.Sigmoid())
def forward(self, x):
return self.net(x)
class loss(nn.Module):
def __init__(self):
super(loss, self).__init__()
def forward(self, output, action, reward):
loss = -(th.log(output[0, action])*reward)
return loss
''' training loop '''
env = con_bandit()
learner = agent(env.num_bandits, env.num_actions).cuda()
learner.eval()
loss_fun = loss()
one_hot = F.one_hot(th.arange(0, env.num_bandits)).float().cuda()
epochs = 30000
e = 0.5 # epsilon for exploration
#optimizer = th.optim.SGD(learner.parameters(), lr = 0.001, momentum=0.9)
#optimizer = th.optim.RMSprop(learner.parameters(), lr = 0.001, momentum=0.8)
optimizer = th.optim.Adam(learner.parameters(), lr = 0.001)
#lr_sche = th.optim.lr_scheduler.MultiStepLR(optimizer, [6000, 8000], gamma=0.1)
total_reward = np.zeros([env.num_bandits,env.num_actions])
rew_curs = np.empty((epochs, env.num_bandits))
sum_cur = []
for epoch in range(epochs):
s = env.getState() #Get a state from the environment.
#Choose either a random action or one from our network.
if np.random.rand() < e:
pro_list = learner.forward(one_hot[s,:].unsqueeze(0))
action = np.random.randint(env.num_actions)
else:
pro_list = learner.forward(one_hot[s,:].unsqueeze(0))
action = th.argmax(pro_list)
reward = env.pullArm(action) #reward for taking an action given a bandit.
#Update the network.
loss = loss_fun(pro_list, action, reward)
# loss_cur.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# lr_sche.step()
total_reward[s, action] += reward
if epoch % 500 == 0:
print("Mean reward for each of the " + str(env.num_bandits) +
" bandits: " + str(np.mean(total_reward,axis=1)))
rew_curs[epoch, :] = np.mean(total_reward,axis=1)
output = learner.forward(one_hot).cpu().detach().numpy()
for a in range(env.num_bandits):
print("The agent thinks action " + str(np.argmax(output[a,:])+1) + " for bandit " + str(a+1) + " is the most promising")
if np.argmax(output[a,:]) == np.argmin(env.bandits[a,:]):
print("and it was right!")
else:
print("and it was wrong!")
plt.figure()
for i in range(env.num_bandits):
plt.plot(rew_curs[:, i], label='No.'+str(i+1))
plt.legend()
#plt.savefig('res.png',dpi=600)
plt.show()
|
OverEuro/swarmtools | src/ver03.py | import numpy as np
import matplotlib.pyplot as plt
def testfun(x):
# loss = np.sum((x - np.ones(len(x))*10)**2)
loss = 100 * np.sum((x[:-1]**2-x[1:])**2) + np.sum((x[:-1]-1)**2)
return loss
def GetExp(pop, popf, xnes, sigma, dim):
sp = np.ones((len(popf), dim))
for i in range(len(popf)):
sp[i,:] = popf[i]*((1/np.sqrt(2*np.pi*sigma**2))*np.exp(-(pop[i,:]-xnes)**2/(2*sigma**2)))
return np.sum(sp) / (len(popf)*dim)
def SimpleGauss(gen, sig, num, dim):
xsga = -np.ones(dim) * 30
res = []
best = 1e+10
for i in range(gen):
if (i+1)%200 == 0:
sig /= 10
for j in range(num):
tril = xsga + np.random.randn(dim) * sig
fit = testfun(tril)
if fit < best:
best = fit
xsga = tril
res.append(best)
return xsga, res
def NES(gen, sig, num, dim):
xnes = -np.ones(dim) * 30
res = []
exp_1 = []
exp_2 = []
best = 1e+10
pop = np.ones((num, dim))
popf = np.ones(num)
sigma = np.ones(dim) * sig
shapevec = np.linspace(0.5, -0.5, num)
delta_m = np.zeros(dim)
delta_s = np.zeros(dim)
for i in range(gen):
for j in range(num):
tril = xnes + np.random.randn(dim) * sigma
pop[j, :] = tril
fit = testfun(tril)
popf[j] = fit
if fit < best:
best = fit
# xnes = tril
res.append(best)
index = np.argsort(popf)
popf[index] = shapevec
exp_1.append(GetExp(pop, popf, xnes, sigma, dim))
# Update the xmean and sigam
sum_m = 0
sum_s = 0
for q in range(num):
sum_m = sum_m + popf[q] * (pop[q, :] - xnes)
sum_s = sum_s + popf[q] * ((pop[q, :] - xnes)**2 - sigma**2) / (sigma)
delta_m = 0.5 * delta_m + sum_m
delta_s = 0.5 * delta_s + sum_s/num
xnes += 0.5 * delta_m
sigma += 0.2 * delta_s
# print(sigma)
exp_2.append(GetExp(pop, popf, xnes, sigma, dim))
return xnes, res, exp_1, exp_2
def NESelite(gen, sig, num, dim):
xnes = -np.ones(dim) * 30
res = []
exp_1 = []
exp_2 = []
best = 1e+10
pop = np.ones((num, dim))
popf = np.ones(num)
sigma = np.ones(dim) * sig
elites = int(np.ceil(num*0.8))
shapevec = np.linspace(0.5, -0.5, elites)
delta_m = np.zeros(dim)
delta_s = np.zeros(dim)
for i in range(gen):
for j in range(num):
tril = xnes + np.random.randn(dim) * sigma
pop[j, :] = tril
fit = testfun(tril)
popf[j] = fit
if fit < best:
best = fit
# xnes = tril
res.append(best)
index = np.argsort(popf)
popn = pop[index[0:elites], :]
popfn = popf[index[0:elites]]
popfn = shapevec
exp_1.append(GetExp(popn, popfn, xnes, sigma, dim))
# Update the xmean and sigam
sum_m = 0
sum_s = 0
for q in range(elites):
sum_m = sum_m + popfn[q] * (popn[q, :] - xnes)
sum_s = sum_s + popfn[q] * ((popn[q, :] - xnes)**2 - sigma**2) / (sigma)
delta_m = delta_m * 0.5 + sum_m
delta_s = delta_s * 0.5 + sum_s/num
xnes += 0.5 * delta_m
sigma += 0.2 * delta_s
# print(sigma)
exp_2.append(GetExp(popn, popfn, xnes, sigma, dim))
return xnes, res, exp_1, exp_2
if __name__ == "__main__":
xsga, sga = SimpleGauss(6000, 1, 30, 10)
xnes, nes, exp_1, exp_2 = NES(6000, 1, 30, 10)
xeli, eli, exp_3, exp_4 = NESelite(6000, 1, 30, 10)
plt.figure()
plt.plot(sga, label='SimpleGauss')
plt.plot(nes, label='NES')
plt.plot(eli, label='NESelite')
plt.yscale('log')
plt.legend()
plt.grid()
# plt.savefig('com_1.png', dpi=600)
plt.show()
plt.figure()
plt.plot(exp_3, label='1')
plt.plot(exp_4, label='2')
# plt.yscale('log')
plt.legend()
plt.grid()
# plt.savefig('com_2.png', dpi=600)
plt.show() |
OverEuro/swarmtools | src/exp_training_nn4rl/training_nes.py | <reponame>OverEuro/swarmtools
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import swarmtools as sts
class con_bandit():
'''stationary bandit problem'''
def __init__(self):
self.state = 0
self.bandits = np.array([[.9,.9,.6,.2],[.7,.2,1,.8],[.2,.9,.6,.8],
[.5,.6,1,.2],[.4,.5,.2,.8],[.4,.2,.7,.8],
[.5,.6,1,.2],[.4,.5,.2,.8],[.4,.2,.7,.8]])
self.num_bandits = self.bandits.shape[0]
self.num_actions = self.bandits.shape[1]
def getState(self):
self.state = np.random.randint(self.num_bandits)
return self.state
def pullArm(self, action):
bandit = self.bandits[self.state, action]
result = np.random.rand()
if result > bandit:
# return a postive reward
return 1
else:
# return a negative reward
return -1
class agent(nn.Module):
def __init__(self, input_s, output_s):
super(agent, self).__init__()
self.net = nn.Sequential(
nn.Linear(input_s, output_s),
nn.Sigmoid())
def forward(self, x):
return self.net(x)
def update_model(flat_param, model, model_shapes):
idx = 0
i = 0
for param in model.parameters():
delta = np.product(model_shapes[i])
block = flat_param[idx:idx+delta]
block = np.reshape(block, model_shapes[i])
i += 1
idx += delta
block_data = th.from_numpy(block).float()
block_data = block_data.cuda()
param.data = block_data
'''NES training loop'''
env = con_bandit()
learner = agent(env.num_bandits, env.num_actions).cuda()
orig_params = []
model_shapes = []
for param in learner.parameters():
p = param.data.cpu().detach().numpy()
model_shapes.append(p.shape)
orig_params.append(p.flatten())
orig_params_flat = np.concatenate(orig_params)
NPARAMS = len(orig_params_flat)
print("The number of NN's params =", NPARAMS)
learner.eval()
one_hot = F.one_hot(th.arange(0, env.num_bandits)).float().cuda()
eval_num = 300000 # the number of samples
lb = np.ones(NPARAMS) * -1
ub = np.ones(NPARAMS) * 1
mu = np.zeros(NPARAMS)
optimizer = sts.BasicNES(NPARAMS, lb, ub, mu, mu_lr=0.1, popsize=30, elite_rt=0.8, optim='SGD', mirror_sample=True,
step=10, mu_decay=0.9)
# solutions = optimizer.start(lbound, ubound)
fits = np.empty(optimizer.popsize)
evals = 0
batch_size = env.num_bandits * 15
epoch = 0
best_f = []
epochs = int(eval_num / (optimizer.popsize * batch_size))
while evals < eval_num:
solutions = optimizer.ask()
# compute all particles' fitness:
for i in range(optimizer.popsize):
update_model(solutions[i, :], learner, model_shapes)
sum_r = 0
for j in range(batch_size):
s = env.getState() # get an random state from env
pro_list = learner.forward(one_hot[s, :].unsqueeze(0))
action = th.argmax(pro_list)
reward = env.pullArm(action)
sum_r += reward
fits[i] = -sum_r
optimizer.tell(fits)
# optimizer.step(epochs, epoch, end_w=0.1)
# print evolution process
best_f.append(optimizer.current_best()[0])
print('EPOCH:', epoch, 'Fitness:', optimizer.current_best()[0])
epoch += 1
# update evals
evals += optimizer.popsize * batch_size
best_params = optimizer.current_best()[1]
update_model(best_params, learner, model_shapes)
output = learner.forward(one_hot).cpu().detach().numpy()
for a in range(env.num_bandits):
print("The agent thinks action " + str(np.argmax(output[a,:])+1) + " for bandit " + str(a+1) + " is the most promising")
if np.argmax(output[a,:]) == np.argmin(env.bandits[a,:]):
print("and it was right!")
else:
print("and it was wrong!")
plt.figure()
plt.plot(best_f)
plt.xlabel('Epochs')
plt.ylabel('Fitness')
#plt.savefig('res_nes.png',dpi=600)
plt.show()
|
ZHUchichi/TSP_GA | TSP_GA.py | <gh_stars>1-10
# ^(* ̄(oo) ̄)^
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
import operator
import csv
N_CITIES = 48 #城市数
POP_SIZE = 200 #每一代个体数
ELITE_SIZE = 100 #保留的精英个体数量
MUTATE_RATE = 0.002 #基因变异概率
N_GENERATIONS = 1000 #生成子代数量
#勾股定理求两城市间距离
def distance(city1, city2):
x = abs(city1[0] - city2[0])
y = abs(city1[1] - city2[1])
distance = np.sqrt((x ** 2) + (y ** 2))
return distance
#适应度函数,定义为总路径的倒数
def fitness(route):
fitness= 0.0
path_distance = 0
for i in range(0, len(route)):
city_from = route[i]
city_to = None
if i + 1 < len(route):
city_to = route[i + 1]
#判断是最后一个城市时,计算到第一个城市的距离
else:
city_to = route[0]
dist = distance(city_from, city_to)
path_distance += dist
fitness = 1 / float(path_distance)
return fitness
#创建一个城市的随机访问路径 --->个体
def create_routes(citylist):
route = random.sample(citylist, len(citylist))
return route
#创建足够多的个体 --->初始种群
def initial_population(population_size, citylist):
population = []
for i in range(0, population_size):
population.append(create_routes(citylist))
return population
#生成种群内每个个体的适应度,并将它们降序排列,个体用在种群中的序号代替
def rank_routes(population):
fitness_results = {}
for i in range(0,len(population)):
fitness_results[i] = fitness(population[i])
return sorted(fitness_results.items(), key = operator.itemgetter(1), reverse = True)
#筛选个体
def select_routes(rank_result, elite_size):
select_result = []
#将排序完的个体转换为DataFrame格式,按顺序计算出累积和cun_sum,再得出所占总适应度百分比cum_perc
#格式样例:
# Index Fitness cum_sum cum_perc
# 0 1 0.5 0.5 50.0
# 1 0 0.3 0.8 80.0
# 2 2 0.2 1.0 100.0
df = pd.DataFrame(np.array(rank_result), columns=["Index","Fitness"])
df['cum_sum'] = df.Fitness.cumsum()
df['cum_perc'] = 100 * df.cum_sum / df.Fitness.sum()
#先保留精英个体,将剩下个体进行轮盘赌选择
for i in range(0, elite_size):
select_result.append(rank_result[i][0])
for i in range(0, len(rank_result) - elite_size):
pick = 100*random.random()
for i in range(0, len(rank_result)):
if pick <= df.iat[i,3]:
select_result.append(rank_result[i][0])
break
return select_result
#将筛选完的个体的序号转化为具体的路由信息 --->生成交配池
def matingPool(population, select_result):
matingpool = []
for i in range(0, len(select_result)):
index = select_result[i]
matingpool.append(population[index])
return matingpool
#两个个体间通过交叉产生后代
def crossover(parent1, parent2):
child = []
p1 = []
P2 = []
#从第一个个体中随机选择一段城市,拼接第二个个体中所有不重复的城市,生成的新的个体
geneA = int(random.random() * len(parent1))
geneB = int(random.random() * len(parent1))
start = min(geneA, geneB)
end = max(geneA, geneB)
for i in range(start, end):
p1.append(parent1[i])
p2 = [item for item in parent2 if item not in p1]
child = p1 + p2
return child
#在整个种群上,先保留精英个体,剩下的个体两两交叉直到生成足够的数量
def crossover_population(matingpool, elite_size):
children = []
length = len(matingpool) - elite_size
pool = random.sample(matingpool, len(matingpool))
for i in range(0,elite_size):
children.append(matingpool[i])
for i in range(0, length):
child = crossover(pool[i], pool[len(matingpool)-i-1])
children.append(child)
return children
#个体中的每个城市都有概率与其他城市发生顺序交换 ---> 变异
def mutate(route, mutation_rate):
for swap1 in range(len(route)):
if(random.random() < mutation_rate):
swap2 = int(random.random() * len(route))
city1 = route[swap1]
city2 = route[swap2]
route[swap1] = city2
route[swap2] = city1
return route
#种群中所有个体都有机会变异
def mutate_population(population, mutation_rate):
mutate_population = []
for route in range(0, len(population)):
mutate_route = mutate(population[route], mutation_rate)
mutate_population.append(mutate_route)
return mutate_population
#繁衍:父代种群 => 排序 -> 选择 -> 交配池 -> 交叉 -> 变异 =>子代种群
def multiply(current_generation, elite_size, mutation_rate):
rank = rank_routes(current_generation)
select = select_routes(rank, elite_size)
mate = matingPool(current_generation, select)
children = crossover_population(mate, elite_size)
next_generation = mutate_population(children, mutation_rate)
return next_generation
#绘图
def plotting(route, best_route, show_time):
x = []
y = []
plt.cla()
for i in range(0, len(route)):
x.append(route[i][0])
y.append(route[i][1])
x.append(route[0][0])
y.append(route[0][1])
plt.plot(x, y, 'r-s', markerfacecolor = 'y')
plt.text(0, 5500, "Total distance = %.3f" % best_route, fontdict={'size': 20, 'color': 'blue'})
plt.pause(show_time)
#读取城市坐标
def read_city_file(citylist, N_CITIES):
csv_file = csv.reader(open('city-' + str(N_CITIES) + '.csv'))
for row in csv_file:
citylist.append(row)
for i in range(0, len(citylist)):
for j in range(0, 2):
citylist[i][j] = int(citylist[i][j])
return citylist
#输出结果到文件中
def output_city_path(route, best_route, N_CITIES):
output = open('result-' + str(N_CITIES) + '.txt' ,'w')
output.write('----------Distance----------\n\n')
output.write(str(best_route) + '\n\n')
output.write('---------Circle Path--------\n\n')
for i in route:
output.write(str(i) + '\n')
output.write(str(route[0]))
#繁衍
def multiply_of_each_generation(generation, population, elite_size, mutation_rate):
population = multiply(population, elite_size, mutation_rate)
best = 1 / rank_routes(population)[0][1]
print('Gen:', generation,'|| best fit:', best)
#注释下一行可以只绘制最终结果的图像
plotting(population[0], best, 0.01)
return population
#繁衍最后一代
def multiply_of_last_generation(generation, population, city_size, elite_size, mutation_rate):
population = multiply(population, elite_size, mutation_rate)
best = 1 / rank_routes(population)[0][1]
print('Gen:', generation,'|| best fit:', best)
#最后一张绘图停留两秒后消失
plotting(population[0], best, 2)
print("Final Distance = %.3f" % best)
output_city_path(population[0], best, city_size)
def GA(city_size, pop_size, elite_size, mutation_rate, generation):
citylist = []
read_city_file(citylist, city_size)
population = initial_population(pop_size, citylist)
for gen in range(1, generation):
population = multiply_of_each_generation(gen, population, elite_size, mutation_rate)
multiply_of_last_generation(generation, population, city_size, elite_size, mutation_rate)
#-------------------------------------------——————————————————————------------------------------#
GA(N_CITIES, POP_SIZE, ELITE_SIZE, MUTATE_RATE, N_GENERATIONS)
#-----------------------------------------------------------------------------------------------#
|
pouyatafti/gmailcli | gmailcli.py | import argparse
import sys
import csv
import os
from gmail import gmail
def parse_args():
parser = argparse.ArgumentParser(description="gmailcli.py")
parser.add_argument("-c", "-cred", dest="cred_fn", required=True, help="credential file")
parser.add_argument("-t", "-tok", dest="tok_fn", required=True, help="token file")
parser.add_argument("-q", "-query", dest="q", required=True, help="query")
parser.add_argument("-o", "-outdir", dest="outdir", required=False, default=".", help="output directory")
parser.add_argument(dest="action", nargs=1, help="action (print_info, save_raw, save_attachments)")
return parser.parse_args()
ctxt = parse_args()
gm = gmail.Gmail(ctxt.cred_fn, ctxt.tok_fn)
csv_writer = csv.writer(sys.stdout)
if not os.path.exists(ctxt.outdir):
os.makedirs(ctxt.outdir)
act = {
"print_info": lambda mid, info: None,
"save_raw": lambda mid, info: gm.save_message_by_id(mid, filename=os.path.join(ctxt.outdir, mid+".eml")),
"save_attachments": lambda mid, info: gm.save_attachments_by_id(mid, savedir=os.path.join(ctxt.outdir, mid))
}
message_ids = gm.get_message_ids_by_query(ctxt.q)
for mid in message_ids:
info = gm.get_info_by_id(mid)
csv_writer.writerow([info["id"], info["Date"], info["From"], info["To"], info["Subject"]])
act[ctxt.action[0]](mid, info)
|
pouyatafti/gmailcli | gmail/gmail.py | <filename>gmail/gmail.py
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from collections import namedtuple
import base64
import email
import os
def mime_header_item(msg, item):
hi = msg.get(item)
hid = email.header.decode_header(hi)[0]
if isinstance(hid[0], bytes):
hidec = hid[0].decode(hid[1])
else:
hidec = hid[0]
return hidec
def mime_header(msg):
items = {"From", "To", "Date", "Subject"}
header = {item: mime_header_item(msg, item) for item in items}
header["Date"] = email.utils.parsedate_to_datetime(header["Date"])
header["From"] = email.utils.parseaddr(header["From"])
header["To"] = email.utils.parseaddr(header["To"])
return header
class Gmail:
def __init__(self, creds_file="credentials.json", token_file="token.json"):
self._SCOPES = "https://www.googleapis.com/auth/gmail.readonly"
self._store = None
self._creds = None
self._service = None
self.auth(creds_file, token_file)
def auth(self, creds_file, token_file):
self._store = file.Storage(token_file)
self._creds = self._store.get()
if not self._creds or self._creds.invalid:
flow = client.flow_from_clientsecrets(creds_file, self._SCOPES)
flags = tools.argparser.parse_args([])
self._creds = tools.run_flow(flow, self._store, flags)
self._service = build('gmail', 'v1', http=self._creds.authorize(Http()))
def get_message_ids_by_query(self, q, uid="me"):
response = self._service.users().messages().list(userId=uid, q=q).execute()
messages = []
if "messages" in response:
messages.extend(response["messages"])
while "nextPageToken" in response:
page_token = response["nextPageToken"]
response = service.users().messages().list(userId=uid, q=q, pageToken=page_token).execute()
messages.extend(response["messages"])
return [m["id"] for m in messages]
def get_info_by_id(self, mid, uid="me"):
message = self._service.users().messages().get(userId=uid, id=mid, format="metadata").execute()
info = dict()
info["id"] = mid
info["snippet"] = message["snippet"]
for hdr in message["payload"]["headers"]:
info[hdr["name"]] = hdr["value"]
return info
def get_message_by_id(self, mid, uid="me"):
message = self._service.users().messages().get(userId=uid, id=mid, format="raw").execute()
msg_bytes = base64.urlsafe_b64decode(message["raw"].encode("ASCII"))
msg_mime = email.message_from_bytes(msg_bytes)
return msg_mime
def save_message_by_id(self, mid, uid="me", filename=None):
if filename is None:
filename = mid + ".eml"
message = self._service.users().messages().get(userId=uid, id=mid, format="raw").execute()
msg_bytes = base64.urlsafe_b64decode(message["raw"].encode("ASCII"))
with open(filename, "wb") as f:
f.write(msg_bytes)
def save_attachments_by_id(self, mid, uid="me", savedir="."):
message = self._service.users().messages().get(userId=uid, id=mid).execute()
for part in message["payload"]["parts"]:
if part["filename"] and len(part["filename"]) > 0:
if "data" in part["body"]:
data=part["body"]["data"]
else:
aid=part["body"]["attachmentId"]
att=self._service.users().messages().attachments().get(userId=uid, messageId=mid,id=aid).execute()
data=att["data"]
data = base64.urlsafe_b64decode(data.encode("UTF-8"))
if not os.path.exists(savedir):
os.makedirs(savedir)
with open(os.path.join(savedir, part["filename"]), "wb") as f:
f.write(data)
|
parnedo/medicaments.france | code/pharmacien_parser.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pharmacien import pharmacien
import xml.etree.ElementTree as ET
from HTMLParser import HTMLParser
from BeautifulSoup import BeautifulSoup
import HTMLParser
import re
class pharmacien_parser:
def __init__(self, data):
self.removeHTMLTags = re.compile(r'<.*?>')
self.mTree = BeautifulSoup (data, convertEntities=BeautifulSoup.HTML_ENTITIES)
self.mHtml_parser = HTMLParser.HTMLParser()
def procLine(self, line):
l = str(line.encode('utf-8').strip())
return ' '.join(l.split())
def parse(self):
p = pharmacien()
for s in self.mTree.findAll('strong'):
if s is not None:
print s, [s.nextSibling]
if "Prénom" in str(s):
try:
p.name = s.nextSibling
except (AttributeError, TypeError):
pass
if "Nom" in str(s):
try:
p.familyname = s.nextSibling
except (AttributeError, TypeError):
pass
if "Titre" in str(s):
try:
p.title = s.nextSibling.replace('\n','')
except (AttributeError, TypeError):
pass
if "Numéro RPPS" in str(s):
try:
p.nRPPS = s.nextSibling.replace('\n','')
except (AttributeError, TypeError):
pass
#try:
# m.mGenericGroup = self.procLine(self.mTree.find('a',attrs={'title':unicode(u'Ouvrir la page de détail sur ce groupe générique')}).text)
#except (AttributeError, TypeError):
# pass
#try:
# m.mTherapy = self.procLine(self.mTree.find('p', attrs={'class':'AmmCorpsTexte'}).text)
# m.mTherapy += ' '.join([self.procLine(a.text) for a in self.mTree.findAll('p', attrs={'class':re.compile("^AmmListePuces")})])
#except (AttributeError, TypeError):
# pass
#try:
# m.mComposition = self.procLine(self.mTree.find('li', attrs={'class':'element'}).text)
# m.mComposition += self.procLine(self.mTree.find('li', attrs={'class':'composant'}).text)
#except (AttributeError, TypeError):
# pass
#try:
# m.mPresentation = self.mTree.find('h2', attrs={'class':'titrePresentation'}).text.encode('utf-8').strip()
#except (AttributeError, TypeError):
# pass
#for br in self.mTree.findAll('br'):
# if br.previousSibling is not None:
# if "CIP" in br.previousSibling:
# try:
# m.mCodeCIP13 = re.search('Code CIP : .*? ou (.*)', str(br.previousSibling)).groups()[0].replace(' ','')
# except:
# m.mCodeCIP13 = re.search('Code CIP : (.*)', str(br.previousSibling)).groups()[0].replace(' ','')
#try:
# htmlSMR = self.mTree.find('table', attrs={'summary':unicode(u'Liste des avis de SMR rendus par la commission de la transparence')})
# m.mSMR = ' '.join([a.text.encode('utf-8').strip() for a in htmlSMR.findAll('td', attrs={'class':re.compile("^ligne")})])
#except (AttributeError, TypeError):
# pass
#try:
# htmlASMR = self.mTree.find('table', attrs={'summary':unicode(u'Liste des avis d\'ASMR rendus par la commission de la transparence')})
# m.mASMR = ' '.join([self.procLine(a.text) for a in htmlASMR.findAll('td', attrs={'class':re.compile("^ligne")})])
#except (AttributeError, TypeError):
# pass
#try:
# htmlOther = self.mTree.find('div', attrs={'id':'autreInfo'})
# m.mOther = ' '.join([self.procLine(a.text) for a in htmlOther.findAll('li')])
#except (AttributeError, TypeError):
# pass
return p
if __name__ == "__main__":
import sys
with open (sys.argv[1], "r") as myfile:
data=myfile.read()
print pharmacien_parser(data).parse()
|
parnedo/medicaments.france | code/pharmacy.py | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
class pharmacy:
def __init__(self):
self.idx =''
self.commercial=''
self.social =''
self.addresse =''
self.postalcode=''
self.city =''
self.telephone =''
self.fax =''
def clean(self):
import re
self.commercial= re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.commercial)))))
self.social = re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.social)))))
self.addresse = re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.addresse )))))
self.postalcode= re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.postalcode)))))
self.city = re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.city )))))
self.telephone = re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.telephone )))))
self.fax = re.sub(' +',' ',re.sub('\n*','',re.sub('\s*$','',re.sub('^\s*','',str(self.fax )))))
def __str__(self):
self.clean()
return str(self.idx ) + "|" +\
str(self.social ) + "|" +\
str(self.commercial) + "|" +\
str(self.addresse ) + "|" +\
str(self.postalcode) + "|" +\
str(self.city ) + "|" +\
str(self.telephone ) + "|" +\
str(self.fax )
def __repr__(self):
return self.__str__()
def escape (self, data):
import MySQLdb
return MySQLdb.escape_string(str(data))
@staticmethod
def fromCsv(line):
res = pharmacy()
csv = line.split('|')
res.idx = csv[0].replace('None','')
res.social = csv[1].replace('None','')
res.commercial= csv[2].replace('None','')
res.addresse = csv[3].replace('None','')
res.postalcode= csv[4].replace('None','')
res.city = csv[5].replace('None','')
res.telephone = csv[6].replace('None','')
res.fax = csv[7].replace('None','')
return res
def sql(self):
self.clean()
res = "INSERT INTO `Pharma` ("\
+ "`pharma_id`, "\
+ "`pharma_title`, "\
+ "`pharma_empl`, "\
+ "`pharma_desc`, "\
+ "`pharma_hours`, "\
+ "`pharma_adress`, "\
+ "`pharma_lat`, "\
+ "`pharma_long`, "\
+ "`pharma_contact_mail`, "\
+ "`pharma_contact_tel`, "\
+ "`pharma_validated`"\
+") VALUES ("\
+ str(self.idx) +","\
+ "'" +self.escape(self.social )+ "',"\
+ "'" +self.escape(self.commercial)+ "',"\
+ "'',"\
+ "'',"\
+ "'" +self.escape(self.addresse + ", " + self.postalcode + ", " + self.city) + "',"\
+ "0,"\
+ "0,"\
+ "'',"\
+ "'" +self.escape(self.telephone) + "',"\
+ "0"\
+ ");"
return res
|
parnedo/medicaments.france | code/medicament_main.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
class main:
def __init__(self):
self.mMedicament_list = []
self.mBaseUrl = 'base-donnees-publique.medicaments.gouv.fr'
def fetchMedicaments(self, filename):
from medicament_parser import medicament_parser
for upperCase in map(chr, range(ord('a'), ord('z')+1)):
for med in self.getMedURL_List(self.fetchURL(upperCase)):
medData = self.getMedData(med)
medicament = medicament_parser(self.getMedData(med)).parse()
medicament.mCodeCIS = med[19:]
print med, medicament.mTitle
self.mMedicament_list += [medicament]
f = open(filename, 'a')
f.write(str(medicament)+'\n')
f.close()
print self.mMedicament_list
def getMedData(self, url):
import urllib2
return urllib2.urlopen('http://'+self.mBaseUrl+'/'+url).read()
def fetchURL(self, key):
import httplib, urllib
params = urllib.urlencode({
'page':'1',
'affliste':'0',
'affNumero':'0',
'isAlphabet':'1',
'inClauseSubst':'0',
'nomSubstances':'',
'typeRecherche':'0',
'choixRecherche':'medicament',
'txtCaracteres':key,
'radLibelle':'2',
'txtCaracteresSub':'',
'radLibelleSub':'4'
})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = httplib.HTTPConnection(self.mBaseUrl + ":80")
conn.request("POST", "/index.php", params, headers)
response = conn.getresponse()
print key, response.status, response.reason
data = response.read()
conn.close()
return data
def getMedURL_List(self, html):
from BeautifulSoup import BeautifulSoup
import re
site = BeautifulSoup(html)
return [x.attrMap['href'] for x in site.findAll('a',attrs={'href':re.compile ("^extrait.php")})]
if __name__ == "__main__":
main().fetchMedicaments("med.txt")
|
parnedo/medicaments.france | code/csv2sql.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pharmacy import pharmacy
if __name__ == "__main__":
import sys
for l in open (sys.argv[1], "r").readlines():
print pharmacy.fromCsv(l).sql()
|
parnedo/medicaments.france | code/pharmacy_parser.py | <reponame>parnedo/medicaments.france
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pharmacy import pharmacy
import xml.etree.ElementTree as ET
from HTMLParser import HTMLParser
from BeautifulSoup import BeautifulSoup
import HTMLParser
import re
class pharmacy_parser:
def __init__(self, data):
self.removeHTMLTags = re.compile(r'<.*?>')
self.mTree = BeautifulSoup (data, convertEntities=BeautifulSoup.HTML_ENTITIES)
self.mHtml_parser = HTMLParser.HTMLParser()
def parse(self):
p = pharmacy()
for s in self.mTree.findAll('strong'):
if s is not None:
#print s, [s.nextSibling]
if "Dén. commerciale" in str(s):
try:
p.commercial = s.nextSibling
except (AttributeError, TypeError):
pass
if "Raison sociale" in str(s):
try:
p.social= s.nextSibling
except (AttributeError, TypeError):
pass
if "Adresse" in str(s):
try:
p.addresse = s.nextSibling
except (AttributeError, TypeError):
pass
if "Code postal - ville :" in str(s):
try:
(p.postalcode,p.city) = re.search('(\d+)[\n*](.*)', str(s.nextSibling)).groups()
except (AttributeError, TypeError):
pass
if "Téléphone" in str(s):
try:
(p.telephone,)= re.search('(\d+)', str(s.nextSibling)).groups()
except (AttributeError, TypeError):
pass
if "Télécopie" in str(s):
try:
(p.fax,)= re.search('(\d+)', str(s.nextSibling)).groups()
except (AttributeError, TypeError):
pass
return p
if __name__ == "__main__":
import sys
with open (sys.argv[1], "r") as myfile:
data=myfile.read()
print [pharmacy_parser(data).parse()]
p = pharmacy_parser(data).parse()
p.idx = 62971
print p.sql()
|
parnedo/medicaments.france | code/medicament.py | <reponame>parnedo/medicaments.france
#!/usr/bin/python
# -*- coding: utf-8 -*-
class medicament :
def __init__(self):
self.mTitle = None
self.mCodeCIS = None
self.mCodeCIP13 = None
self.mTherapy = None
self.mGenericGroup = None
self.mComposition = None
self.mPresentation = None
self.mSMR = None # Service Medical rendu
self.mASMR = None
self.mOther = None
def __str__(self):
return str(self.mTitle ) + "|" +\
str(self.mCodeCIS ) + "|" +\
str(self.mCodeCIP13 ) + "|" +\
str(self.getType() ) + "|" +\
str(self.mTherapy ) + "|" +\
str(self.mGenericGroup) + "|" +\
str(self.mComposition ) + "|" +\
str(self.mPresentation) + "|" +\
str(self.mSMR ) + "|" +\
str(self.mASMR ) + "|" +\
str(self.mOther )
def __repr__(self):
return self.__str__()
def getType(self):
if self.mComposition is None: return 0
if "Compartiment" in self.mComposition: return 10
if "émulsion" in self.mComposition: return 10
if "Liquide" in self.mComposition: return 10
if "Petit" in self.mComposition: return 10
if "Poudre" in self.mComposition: return 10
if "Sirop" in self.mComposition: return 10
if "Solution" in self.mComposition: return 10
if "Suspension" in self.mComposition: return 10
if "Gomme" in self.mComposition: return 12
if "Crème" in self.mComposition: return 13
if "Gel" in self.mComposition: return 13
if "Granules" in self.mComposition: return 13
if "Lotion" in self.mComposition: return 13
if "Pâte" in self.mComposition: return 13
if "Pommade" in self.mComposition: return 13
if "Bain" in self.mComposition: return 14
if "Bâton" in self.mComposition: return 14
if "Capsule" in self.mComposition: return 14
if "Cartouche" in self.mComposition: return 14
if "Compartiment" in self.mComposition: return 14
if "Compresse" in self.mComposition: return 14
if "Dispersion" in self.mComposition: return 14
if "Dispositif" in self.mComposition: return 14
if "éluat" in self.mComposition: return 14
if "Emplâtre" in self.mComposition: return 14
if "émulsion" in self.mComposition: return 14
if "éponge" in self.mComposition: return 14
if "Film" in self.mComposition: return 14
if "Gaz" in self.mComposition: return 14
if "Gelée" in self.mComposition: return 14
if "Graines" in self.mComposition: return 14
if "Implant" in self.mComposition: return 14
if "Insert" in self.mComposition: return 14
if "Lyophilisat" in self.mComposition: return 14
if "Mélange" in self.mComposition: return 14
if "Microsphère" in self.mComposition: return 14
if "Mousse" in self.mComposition: return 14
if "Ovule" in self.mComposition: return 14
if "Pansement" in self.mComposition: return 14
if "Pastille" in self.mComposition: return 14
if "Plante" in self.mComposition: return 14
if "Poudre" in self.mComposition: return 14
if "Shampooing" in self.mComposition: return 14
if "Solide" in self.mComposition: return 14
if "Solution" in self.mComposition: return 14
if "Solvant" in self.mComposition: return 14
if "Suppositoire" in self.mComposition: return 14
if "Système" in self.mComposition: return 14
if "Tampon" in self.mComposition: return 14
if "Vernis" in self.mComposition: return 14
if "Collutoire" in self.mComposition: return 1
if "Collyre" in self.mComposition: return 3
if "Gélule" in self.mComposition: return 5
if "Comprimé" in self.mComposition: return 7
if "Pilule" in self.mComposition: return 7
if "Comprimé" in self.mComposition: return 9
if "Flacon" in self.mComposition: return 9
if "Gélule" in self.mComposition: return 9
if "Granules" in self.mComposition: return 9
if "Granulés" in self.mComposition: return 9
if "Lyophilisat" in self.mComposition: return 9
if "Microgranule" in self.mComposition: return 9
if "Poche" in self.mComposition: return 9
if "Poudre" in self.mComposition: return 9
if "Sachet" in self.mComposition: return 9
if "Solvant" in self.mComposition: return 9
|
parnedo/medicaments.france | code/pharmacien.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
class pharmacien:
def __init__(self):
self.name = None
self.familyname= None
self.title = None
self.nRPPS = None
def __str__(self):
return str(self.name) + "|" +\
str(self.familyname) + "|" +\
str(self.title) + "|" +\
str(self.nRPPS)
def __repr__(self):
return self.__str__()
|
parnedo/medicaments.france | code/pharmacy_main.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
class main:
def __init__(self):
self.mPharmacy_list = []
self.mBaseUrl = 'http://www.ordre.pharmacien.fr/annuaire/etablissement'
def fetchFarmacies(self, filename):
from pharmacy_parser import pharmacy_parser
import urlparse
for page in range(1,15000):
for farmacy in self.getFarmacyList(page):
r = requests.get(self.mBaseUrl+'/'+farmacy)
pharmacy = pharmacy_parser(r.text).parse()
pharmacy.idx = urlparse.parse_qs(farmacy)['sid'][0]
print pharmacy
self.mPharmacy_list += [pharmacy]
f = open(filename, 'a')
f.write(str(pharmacy)+'\n')
f.close()
#print self.mPharmacy_list
def getFarmacyList(self, page):
payload = {'type': 'P', 'page': str(page)}
r = requests.get(self.mBaseUrl, params=payload)
from BeautifulSoup import BeautifulSoup
import re
site = BeautifulSoup(r.text)
return [x.attrMap['href'] for x in site.findAll('a',attrs={'href':re.compile (".*type.*sid.*")})]
if __name__ == "__main__":
main().fetchFarmacies("pharma.txt")
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/model/tests/test_event.py | <filename>ramp-database/ramp_database/model/tests/test_event.py
import datetime
import shutil
import pytest
from ramp_utils import read_config
from ramp_utils.utils import encode_string
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from rampwf.prediction_types.base import BasePrediction
from rampwf.score_types.accuracy import Accuracy
from ramp_database.model.base import set_query_property
from ramp_database.model import CVFold
from ramp_database.model import EventAdmin
from ramp_database.model import EventScoreType
from ramp_database.model import EventTeam
from ramp_database.model import Model
from ramp_database.model import Submission
from ramp_database.model import SubmissionScore
from ramp_database.model import Workflow
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.testing import create_toy_db
from ramp_database.tools.event import get_event
from ramp_database.tools.user import get_team_by_name
@pytest.fixture(scope='module')
def session_scope_module():
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
try:
create_toy_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
yield session
finally:
shutil.rmtree(
ramp_config['ramp']['deployment_dir'], ignore_errors=True
)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_event_model_property(session_scope_module):
event = get_event(session_scope_module, 'iris_test')
assert repr(event) == 'Event(iris_test)'
assert issubclass(event.Predictions, BasePrediction)
assert isinstance(event.workflow, Workflow)
assert event.workflow.name == 'Classifier'
assert event.n_participants == 2
assert event.n_jobs == 2
@pytest.mark.parametrize(
"opening, public_opening, closure, properties, expected_values",
[(None, None, None, ['is_open'], [True]),
(None, None, datetime.datetime.utcnow(), ['is_open', 'is_closed'],
[False, True]),
(datetime.datetime.utcnow() + datetime.timedelta(days=1), None, None,
['is_open', 'is_closed'], [False, False]),
(None, None, datetime.datetime.utcnow(), ['is_public_open', 'is_closed'],
[False, True]),
(None, datetime.datetime.utcnow() + datetime.timedelta(days=1), None,
['is_public_open', 'is_closed'], [False, False])]
)
def test_even_model_timestamp(session_scope_module, opening, public_opening,
closure, properties, expected_values):
# check the property linked to the opening/closure of the event.
event = get_event(session_scope_module, 'iris_test')
# store the original timestamp before to force them
init_opening = event.opening_timestamp
init_public_opening = event.public_opening_timestamp
init_closure = event.closing_timestamp
# set to non-default values the date if necessary
event.opening_timestamp = opening if opening is not None else init_opening
event.public_opening_timestamp = (public_opening
if public_opening is not None
else init_public_opening)
event.closing_timestamp = closure if closure is not None else init_closure
for prop, exp_val in zip(properties, expected_values):
assert getattr(event, prop) is exp_val
# reset the event since that we are sharing the dataset across all the
# module tests.
event.opening_timestamp = init_opening
event.public_opening_timestamp = init_public_opening
event.closing_timestamp = init_closure
def test_event_model_score(session_scope_module):
# Make Model usable in declarative mode
set_query_property(Model, session_scope_module)
event = get_event(session_scope_module, 'iris_test')
assert repr(event) == 'Event(iris_test)'
assert issubclass(event.Predictions, BasePrediction)
assert isinstance(event.workflow, Workflow)
assert event.workflow.name == 'Classifier'
event_type_score = event.official_score_type
assert event_type_score.name == 'acc'
event_type_score = event.get_official_score_type(session_scope_module)
assert event_type_score.name == 'acc'
assert event.combined_combined_valid_score_str is None
assert event.combined_combined_test_score_str is None
assert event.combined_foldwise_valid_score_str is None
assert event.combined_foldwise_test_score_str is None
event.combined_combined_valid_score = 0.1
event.combined_combined_test_score = 0.2
event.combined_foldwise_valid_score = 0.3
event.combined_foldwise_test_score = 0.4
assert event.combined_combined_valid_score_str == '0.1'
assert event.combined_combined_test_score_str == '0.2'
assert event.combined_foldwise_valid_score_str == '0.3'
assert event.combined_foldwise_test_score_str == '0.4'
@pytest.mark.parametrize(
'backref, expected_type',
[('score_types', EventScoreType),
('event_admins', EventAdmin),
('event_teams', EventTeam),
('cv_folds', CVFold)]
)
def test_event_model_backref(session_scope_module, backref, expected_type):
event = get_event(session_scope_module, 'iris_test')
backref_attr = getattr(event, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
def test_event_score_type_model_property(session_scope_module):
event = get_event(session_scope_module, 'iris_test')
# get only the accuracy score
event_type_score = \
(session_scope_module.query(EventScoreType)
.filter(EventScoreType.event_id == event.id)
.filter(EventScoreType.name == 'acc')
.one())
assert repr(event_type_score) == "acc: Event(iris_test)"
assert isinstance(event_type_score.score_type_object, Accuracy)
assert event_type_score.is_lower_the_better is False
assert event_type_score.minimum == pytest.approx(0)
assert event_type_score.maximum == pytest.approx(1)
assert event_type_score.worst == pytest.approx(0)
assert callable(event_type_score.score_type_object.score_function)
@pytest.mark.parametrize(
'backref, expected_type',
[('submissions', SubmissionScore)]
)
def test_event_score_type_model_backref(session_scope_module, backref,
expected_type):
event = get_event(session_scope_module, 'iris_test')
# get only the accuracy score
event_type_score = \
(session_scope_module.query(EventScoreType)
.filter(EventScoreType.event_id == event.id)
.filter(EventScoreType.name == 'acc')
.one())
backref_attr = getattr(event_type_score, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
def test_event_team_model(session_scope_module):
event = get_event(session_scope_module, 'iris_test')
team = get_team_by_name(session_scope_module, 'test_user')
event_team = (session_scope_module.query(EventTeam)
.filter(EventTeam.event_id == event.id)
.filter(EventTeam.team_id == team.id)
.one())
assert repr(event_team) == "Event(iris_test)/Team({})".format(
encode_string('test_user'))
@pytest.mark.parametrize(
'backref, expected_type',
[('submissions', Submission)]
)
def test_event_team_model_backref(session_scope_module, backref, expected_type):
event = get_event(session_scope_module, 'iris_test')
team = get_team_by_name(session_scope_module, 'test_user')
event_team = (session_scope_module.query(EventTeam)
.filter(EventTeam.event_id == event.id)
.filter(EventTeam.team_id == team.id)
.one())
backref_attr = getattr(event_team, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type) |
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_ramp.py | import os
import pytest
from ramp_utils.testing import ramp_config_template
from ramp_utils import read_config
from ramp_utils import generate_ramp_config
@pytest.mark.parametrize(
"config",
[ramp_config_template(),
read_config(ramp_config_template()),
read_config(ramp_config_template(), filter_section='ramp')]
)
def test_generate_ramp_config(config):
ramp_config = generate_ramp_config(config)
expected_config = {
'event': 'iris',
'event_name': 'iris_test',
'event_title': 'Iris event',
'event_is_public': True,
'sandbox_name': 'starting_kit',
'deployment_dir': '/tmp/databoard_test',
'ramp_kits_dir': os.path.join('/tmp/databoard_test', 'ramp-kits'),
'ramp_data_dir': os.path.join('/tmp/databoard_test', 'ramp-data'),
'ramp_kit_submissions_dir': os.path.join('/tmp/databoard_test',
'ramp-kits', 'iris',
'submissions'),
'ramp_submissions_dir': os.path.join('/tmp/databoard_test',
'submissions'),
'ramp_sandbox_dir': os.path.join('/tmp/databoard_test', 'ramp-kits',
'iris', 'submissions', 'starting_kit')
}
assert ramp_config == expected_config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.