repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
player1537-forks/spack
|
var/spack/repos/builtin/packages/dpcpp/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Dpcpp(CMakePackage):
"""Data Parallel C++ compiler: Intel's implementation of SYCL programming model"""
homepage = 'https://intel.github.io/llvm-docs/'
git = 'https://github.com/intel/llvm.git'
version('develop', branch='sycl')
version('2021.09', commit='bd68232bb96386bf7649345c0557ba520e73c02d')
maintainers = ['ravil-mobile']
variant('cuda', default=False, description='switch from OpenCL to CUDA')
variant('rocm', default=False, description='switch from OpenCL to ROCm')
variant('rocm-platform', default='AMD', values=('AMD', 'NVIDIA'), multi=False, description='choose ROCm backend')
variant('openmp', default=False, description='build with OpenMP without target offloading')
variant('esimd-cpu', default=False, description='build with ESIMD_CPU support')
variant('assertions', default=False, description='build with assertions')
variant('docs', default=False, description='build Doxygen documentation')
variant('werror', default=False, description='treat warnings as errors')
variant('shared', default=False, description='build shared libraries')
variant('remangle_libclc', default=True, description='remangle libclc gen. variants')
variant('lld', default=False, description='use LLD linker for build')
depends_on('cmake@3.16.2:', type='build')
depends_on('ninja@1.10.0:', type='build')
depends_on('cuda@10.2.0:11.4.999', when='+cuda')
# NOTE: AMD HIP needs to be tested; it will be done in the next update
# depends_on('cuda@10.2.0:10.2.999', when='rocm-platform=NVIDIA', type='build')
# depends_on('hip@4.0.0:', when='+rocm', type='build')
root_cmakelists_dir = 'llvm'
def cmake_args(self):
llvm_external_projects = 'sycl;llvm-spirv;opencl;libdevice;xpti;xptifw'
if '+openmp' in self.spec:
llvm_external_projects += ';openmp'
sycl_dir = os.path.join(self.stage.source_path, 'sycl')
spirv_dir = os.path.join(self.stage.source_path, 'llvm-spirv')
xpti_dir = os.path.join(self.stage.source_path, 'xpti')
xptifw_dir = os.path.join(self.stage.source_path, 'xptifw')
libdevice_dir = os.path.join(self.stage.source_path, 'libdevice')
llvm_enable_projects = 'clang;' + llvm_external_projects
libclc_targets_to_build = ''
sycl_build_pi_rocm_platform = self.spec.variants['rocm-platform'].value
if self.spec.satisfies('target=x86_64:'):
llvm_targets_to_build = 'X86'
elif self.spec.satisfies('target=aarch64:'):
llvm_targets_to_build = 'ARM;AArch64'
else:
raise InstallError('target is not supported. '
'This package only works on x86_64 or aarch64')
is_cuda = '+cuda' in self.spec
is_rocm = '+rocm' in self.spec
if is_cuda or is_rocm:
llvm_enable_projects += ';libclc'
if is_cuda:
llvm_targets_to_build += ';NVPTX'
libclc_targets_to_build = 'nvptx64--;nvptx64--nvidiacl'
if is_rocm:
if sycl_build_pi_rocm_platform == 'AMD':
llvm_targets_to_build += ';AMDGPU'
libclc_targets_to_build += ';amdgcn--;amdgcn--amdhsa'
elif sycl_build_pi_rocm_platform and not is_cuda:
llvm_targets_to_build += ';NVPTX'
libclc_targets_to_build += ';nvptx64--;nvptx64--nvidiacl'
args = [
self.define_from_variant('LLVM_ENABLE_ASSERTIONS', 'assertions'),
self.define('LLVM_TARGETS_TO_BUILD', llvm_targets_to_build),
self.define('LLVM_EXTERNAL_PROJECTS', llvm_external_projects),
self.define('LLVM_EXTERNAL_SYCL_SOURCE_DIR', sycl_dir),
self.define('LLVM_EXTERNAL_LLVM_SPIRV_SOURCE_DIR', spirv_dir),
self.define('LLVM_EXTERNAL_XPTI_SOURCE_DIR', xpti_dir),
self.define('XPTI_SOURCE_DIR', xpti_dir),
self.define('LLVM_EXTERNAL_XPTIFW_SOURCE_DIR', xptifw_dir),
self.define('LLVM_EXTERNAL_LIBDEVICE_SOURCE_DIR', libdevice_dir),
self.define('LLVM_ENABLE_PROJECTS', llvm_enable_projects),
self.define('LIBCLC_TARGETS_TO_BUILD', libclc_targets_to_build),
self.define_from_variant('SYCL_BUILD_PI_CUDA', 'cuda'),
self.define_from_variant('SYCL_BUILD_PI_ROCM', 'rocm'),
self.define('SYCL_BUILD_PI_ROCM_PLATFORM', sycl_build_pi_rocm_platform),
self.define('LLVM_BUILD_TOOLS', True),
self.define_from_variant('SYCL_ENABLE_WERROR', 'werror'),
self.define('SYCL_INCLUDE_TESTS', True),
self.define_from_variant('LIBCLC_GENERATE_REMANGLED_VARIANTS',
'remangle_libclc'),
self.define_from_variant('LLVM_ENABLE_DOXYGEN', 'docs'),
self.define_from_variant('LLVM_ENABLE_SPHINX', 'docs'),
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
self.define('SYCL_ENABLE_XPTI_TRACING', 'ON'),
self.define_from_variant('LLVM_ENABLE_LLD', 'lld'),
self.define_from_variant('SYCL_BUILD_PI_ESIMD_CPU', 'esimd-cpu'),
]
if is_cuda or (is_rocm and sycl_build_pi_rocm_platform == 'NVIDIA'):
args.append(
self.define('CUDA_TOOLKIT_ROOT_DIR', self.spec['cuda'].prefix)
)
if '+openmp' in self.spec:
omp_dir = os.path.join(self.stage.source_path, 'openmp')
args.extend([
self.define('LLVM_EXTERNAL_OPENMP_SOURCE_DIR', omp_dir),
self.define('OPENMP_ENABLE_LIBOMPTARGET', False),
])
if self.compiler.name == 'gcc':
gcc_prefix = ancestor(self.compiler.cc, 2)
args.append(self.define('GCC_INSTALL_PREFIX', gcc_prefix))
return args
def setup_build_environment(self, env):
if '+cuda' in self.spec:
env.set('CUDA_LIB_PATH', '{0}/lib64/stubs'.format(self.spec['cuda'].prefix))
@run_after("install")
def post_install(self):
clang_cpp_path = os.path.join(self.spec.prefix.bin, 'clang++')
dpcpp_path = os.path.join(self.spec.prefix.bin, 'dpcpp')
real_clang_cpp_path = os.path.realpath(clang_cpp_path)
os.symlink(real_clang_cpp_path, dpcpp_path)
def setup_run_environment(self, env):
bin_path = self.spec.prefix.bin
for env_var_name, compiler in zip(['CC', 'CXX'], ['clang', 'clang++']):
env.set(env_var_name, os.path.join(bin_path, compiler))
include_env_vars = ['C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH', 'INCLUDE']
for var in include_env_vars:
env.prepend_path(var, self.prefix.include)
env.prepend_path(var, self.prefix.include.sycl)
sycl_build_pi_rocm_platform = self.spec.variants['rocm-platform'].value
if '+cuda' in self.spec or sycl_build_pi_rocm_platform == 'NVIDIA':
env.prepend_path('PATH', self.spec['cuda'].prefix.bin)
env.set('CUDA_TOOLKIT_ROOT_DIR', self.spec['cuda'].prefix)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/butter/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Butter(Package):
"""butter: Bowtie UTilizing iTerative placEment of Repetitive small rnas.
A wrapper for bowtie to produce small RNA-seq alignments where
multimapped small RNAs tend to be placed near regions of confidently
high density."""
homepage = "https://github.com/MikeAxtell/butter"
url = "https://github.com/MikeAxtell/butter/archive/v0.3.3.tar.gz"
version('0.3.3', sha256='862cbe06d51fdefca18e5fe4cfa8e4df6ce92686ccbc389affe05b199ea9243b')
depends_on('perl', type=('build', 'run'))
depends_on('samtools')
depends_on('bowtie')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('butter', prefix.bin)
install('bam2wig', prefix.bin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/xsdk/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from copy import deepcopy
from spack import *
def xsdk_depends_on_accl(accl_name, accl_var, *args, **kwargs):
if accl_name == 'cuda':
accl_arch_name = 'cuda_arch'
accl_arch_values = list(deepcopy(CudaPackage.cuda_arch_values))
elif accl_name == 'rocm':
accl_arch_name = 'amdgpu_target'
accl_arch_values = list(deepcopy(ROCmPackage.amdgpu_targets))
# require ~cuda when xsdk~cuda (and '?cuda' not used)
usedep = 1
args_new = list(deepcopy(args))
if not isinstance(accl_var, list):
accl_var = [accl_var]
for idx, var in enumerate(accl_var):
# skip variants starting with '?' so that
# that that they are left unspecified by xsdk
if not var.startswith('?'):
args_new[0] += ' ~%s' % var
else:
accl_var[idx] = var.replace('?', '')
# if '?cuda' skip adding '~cuda' dep
if var == '?' + accl_name:
usedep = 0
kwargs_new = deepcopy(kwargs)
if 'when' in kwargs_new:
kwargs_new['when'] += ' ~' + accl_name
else:
kwargs_new['when'] = '~' + accl_name
if usedep:
depends_on(*args_new, **kwargs_new)
# require +cuda when xsdk+cuda, and match the arch
for arch in accl_arch_values:
args_new = list(deepcopy(args))
kwargs_new = deepcopy(kwargs)
args_new[0] += '+%s %s=%s' % ('+'.join(accl_var), accl_arch_name, str(arch))
if 'when' in kwargs_new:
kwargs_new['when'] += ' +%s %s=%s' % (accl_name, accl_arch_name, str(arch))
else:
kwargs_new['when'] = '+%s %s=%s' % (accl_name, accl_arch_name, str(arch))
depends_on(*args_new, **kwargs_new)
def xsdk_depends_on(spec, cuda_var='', rocm_var='', **kwargs):
"""
Wrapper for depends_on which can handle propagating cuda and rocm
variants.
Currently, it propagates +cuda_var when xsdk+cuda and rocm_var
when xsdk+rocm. When xsdk~[cuda|rocm], then ~[cuda|rocm]_var is
selected unless the variant string is prefixed with a '?'
(see the tasmanian use below). When '?' prefix is used, then
the variant is left unspecified.
[cuda|rocm]_var can be an array of variant strings or just a single
variant string. The spack '+' and '~' symbols should not appear
in the strings.
"""
if bool(cuda_var):
xsdk_depends_on_accl('cuda', cuda_var, spec, **kwargs)
if bool(rocm_var):
xsdk_depends_on_accl('rocm', rocm_var, spec, **kwargs)
if not bool(cuda_var) and not bool(rocm_var):
depends_on(spec, **kwargs)
class Xsdk(BundlePackage, CudaPackage, ROCmPackage):
"""Xsdk is a suite of Department of Energy (DOE) packages for numerical
simulation. This is a Spack bundle package that installs the xSDK
packages
"""
homepage = "https://xsdk.info"
maintainers = ['balay', 'luszczek', 'balos1']
version('develop')
version('0.7.0')
version('0.6.0')
version('0.5.0', deprecated=True)
version('0.4.0', deprecated=True)
version('0.3.0', deprecated=True)
variant('trilinos', default=True, description='Enable trilinos package build')
variant('datatransferkit', default=True, description='Enable datatransferkit package build')
variant('omega-h', default=True, description='Enable omega-h package build')
variant('strumpack', default=True, description='Enable strumpack package build')
variant('dealii', default=True, description='Enable dealii package build')
variant('alquimia', default=True, description='Enable alquimia package build')
variant('phist', default=True, description='Enable phist package build')
variant('ginkgo', default=True, description='Enable ginkgo package build')
variant('libensemble', default=True, description='Enable py-libensemble package build')
variant('precice', default=(sys.platform != 'darwin'),
description='Enable precice package build')
variant('butterflypack', default=True, description='Enable butterflypack package build')
variant('heffte', default=True, description='Enable heffte package build')
variant('slate', default=True, description='Enable slate package build')
variant('arborx', default=True, description='Enable ArborX build')
xsdk_depends_on('hypre@develop+superlu-dist+shared', when='@develop',
cuda_var='cuda')
xsdk_depends_on('hypre@2.23.0+superlu-dist+shared', when='@0.7.0',
cuda_var='cuda')
xsdk_depends_on('hypre@2.20.0+superlu-dist+shared', when='@0.6.0')
xsdk_depends_on('hypre@2.18.2+superlu-dist+shared', when='@0.5.0')
xsdk_depends_on('hypre@2.15.1~internal-superlu', when='@0.4.0')
xsdk_depends_on('hypre@2.12.1~internal-superlu', when='@0.3.0')
xsdk_depends_on('mfem@develop+mpi+superlu-dist+petsc+sundials+examples+miniapps',
when='@develop', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('mfem@4.3.0+mpi+superlu-dist+petsc+sundials+examples+miniapps',
when='@0.7.0', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('mfem@4.2.0+mpi+superlu-dist+petsc+sundials+examples+miniapps',
when='@0.6.0', cuda_var='cuda')
xsdk_depends_on('mfem@4.0.1-xsdk+mpi~superlu-dist+petsc+sundials+examples+miniapps',
when='@0.5.0')
xsdk_depends_on('mfem@3.4.0+mpi+superlu-dist+petsc+sundials+examples+miniapps',
when='@0.4.0')
xsdk_depends_on('mfem@3.3.2+mpi+superlu-dist+petsc+sundials+examples+miniapps',
when='@0.3.0')
xsdk_depends_on('superlu-dist@develop', when='@develop')
xsdk_depends_on('superlu-dist@7.1.1', when='@0.7.0')
xsdk_depends_on('superlu-dist@6.4.0', when='@0.6.0')
xsdk_depends_on('superlu-dist@6.1.1', when='@0.5.0')
xsdk_depends_on('superlu-dist@6.1.0', when='@0.4.0')
xsdk_depends_on('superlu-dist@5.2.2', when='@0.3.0')
xsdk_depends_on('trilinos@develop+hypre+superlu-dist+hdf5~mumps+boost' +
'~suite-sparse+tpetra+nox+ifpack2+zoltan+zoltan2+amesos2' +
'~exodus~dtk+intrepid2+shards+stratimikos gotype=int' +
' cxxstd=14', when='@develop +trilinos')
xsdk_depends_on('trilinos@13.2.0+hypre+superlu-dist+hdf5~mumps+boost' +
'~suite-sparse+tpetra+nox+ifpack2+zoltan+zoltan2+amesos2' +
'~exodus~dtk+intrepid2+shards+stratimikos gotype=int' +
' cxxstd=14', when='@0.7.0 +trilinos')
xsdk_depends_on('trilinos@13.0.1+hypre+superlu-dist+hdf5~mumps+boost' +
'~suite-sparse+tpetra+nox+ifpack2+zoltan+zoltan2+amesos2' +
'~exodus~dtk+intrepid2+shards gotype=int' +
' cxxstd=11', when='@0.6.0 +trilinos')
xsdk_depends_on('trilinos@12.18.1+hypre+superlu-dist+hdf5~mumps+boost' +
'~suite-sparse+tpetra+nox+ifpack2+zoltan+zoltan2+amesos2' +
'~exodus+dtk+intrepid2+shards', when='@0.5.0 +trilinos')
xsdk_depends_on('trilinos@12.14.1+hypre+superlu-dist+hdf5~mumps+boost' +
'~suite-sparse+tpetra+nox+ifpack2+zoltan+zoltan2+amesos2' +
'~exodus+dtk+intrepid2+shards', when='@0.4.0 +trilinos')
xsdk_depends_on('trilinos@12.12.1+hypre+superlu-dist+hdf5~mumps+boost' +
'~suite-sparse~tpetra~ifpack2~zoltan~zoltan2~amesos2'
'~exodus', when='@0.3.0 +trilinos')
xsdk_depends_on('datatransferkit@master',
when='@develop +trilinos +datatransferkit')
dtk7ver = '3.1-rc2' if sys.platform == 'darwin' else '3.1-rc3'
xsdk_depends_on('datatransferkit@' + dtk7ver,
when='@0.7.0 +trilinos +datatransferkit')
xsdk_depends_on('datatransferkit@3.1-rc2',
when='@0.6.0 +trilinos +datatransferkit')
xsdk_depends_on('petsc +trilinos', when='+trilinos @:0.6.0')
xsdk_depends_on('petsc +batch', when='platform=cray @0.5.0:')
xsdk_depends_on('petsc@main+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@develop', cuda_var='cuda')
xsdk_depends_on('petsc@3.16.1+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@0.7.0', cuda_var='cuda')
xsdk_depends_on('petsc@3.14.1+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@0.6.0', cuda_var='cuda')
xsdk_depends_on('petsc@3.12.1+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@0.5.0')
xsdk_depends_on('petsc@3.10.3+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@0.4.0')
xsdk_depends_on('petsc@3.8.2+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@0.3.0')
xsdk_depends_on('dealii +trilinos~adol-c', when='+trilinos +dealii')
xsdk_depends_on('dealii ~trilinos', when='~trilinos +dealii')
xsdk_depends_on('dealii@master~assimp~python~doc~gmsh+petsc+slepc+mpi~int64+hdf5' +
'~netcdf+metis~sundials~ginkgo~symengine~nanoflann~simplex~arborx',
when='@develop +dealii')
xsdk_depends_on('dealii@9.3.2~assimp~python~doc~gmsh+petsc+slepc+mpi~int64+hdf5' +
'~netcdf+metis~sundials~ginkgo~symengine~simplex~arborx',
when='@0.7.0 +dealii')
xsdk_depends_on('dealii@9.2.0~assimp~python~doc~gmsh+petsc+slepc+mpi~int64+hdf5' +
'~netcdf+metis~sundials~ginkgo~symengine~simplex~arborx',
when='@0.6.0 +dealii')
xsdk_depends_on('dealii@9.1.1~assimp~python~doc~gmsh+petsc+slepc+mpi~int64+hdf5' +
'~netcdf+metis~sundials~ginkgo~symengine',
when='@0.5.0 +dealii')
xsdk_depends_on('dealii@9.0.1~assimp~python~doc~gmsh+petsc~slepc+mpi~int64+hdf5' +
'~netcdf+metis~ginkgo~symengine',
when='@0.4.0 +dealii')
xsdk_depends_on('pflotran@develop', when='@develop')
xsdk_depends_on('pflotran@3.0.2', when='@0.7.0')
xsdk_depends_on('pflotran@xsdk-0.6.0', when='@0.6.0')
xsdk_depends_on('pflotran@xsdk-0.5.0', when='@0.5.0')
xsdk_depends_on('pflotran@xsdk-0.4.0', when='@0.4.0')
xsdk_depends_on('pflotran@xsdk-0.3.0', when='@0.3.0')
xsdk_depends_on('alquimia@develop', when='@develop +alquimia')
xsdk_depends_on('alquimia@1.0.9', when='@0.7.0 +alquimia')
xsdk_depends_on('alquimia@xsdk-0.6.0', when='@0.6.0 +alquimia')
xsdk_depends_on('alquimia@xsdk-0.5.0', when='@0.5.0 +alquimia ')
xsdk_depends_on('alquimia@xsdk-0.4.0', when='@0.4.0 +alquimia')
xsdk_depends_on('alquimia@xsdk-0.3.0', when='@0.3.0 +alquimia')
xsdk_depends_on('sundials +trilinos', when='+trilinos @0.6.0:')
xsdk_depends_on('sundials@develop~int64+hypre+petsc+superlu-dist',
when='@develop', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('sundials@5.8.0~int64+hypre+petsc+superlu-dist',
when='@0.7.0', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('sundials@5.5.0~int64+hypre+petsc+superlu-dist',
when='@0.6.0', cuda_var='cuda')
xsdk_depends_on('sundials@5.0.0~int64+hypre+petsc+superlu-dist', when='@0.5.0')
xsdk_depends_on('sundials@3.2.1~int64+hypre', when='@0.4.0')
xsdk_depends_on('sundials@3.1.0~int64+hypre', when='@0.3.0')
xsdk_depends_on('plasma@develop:', when='@develop %gcc@6.0:')
xsdk_depends_on('plasma@21.8.29:', when='@0.7.0 %gcc@6.0:')
xsdk_depends_on('plasma@20.9.20:', when='@0.6.0 %gcc@6.0:')
xsdk_depends_on('plasma@19.8.1:', when='@0.5.0 %gcc@6.0:')
xsdk_depends_on('plasma@18.11.1:', when='@0.4.0 %gcc@6.0:')
xsdk_depends_on('magma@master', when='@develop', cuda_var='?cuda', rocm_var='?rocm')
xsdk_depends_on('magma@2.6.1', when='@0.7.0', cuda_var='?cuda', rocm_var='?rocm')
xsdk_depends_on('magma@2.5.4', when='@0.6.0', cuda_var='?cuda')
xsdk_depends_on('magma@2.5.1', when='@0.5.0', cuda_var='?cuda')
xsdk_depends_on('magma@2.4.0', when='@0.4.0', cuda_var='?cuda')
xsdk_depends_on('magma@2.2.0', when='@0.3.0', cuda_var='?cuda')
xsdk_depends_on('amrex@develop+sundials',
when='@develop %intel', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('amrex@develop+sundials',
when='@develop %gcc', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('amrex@develop+sundials',
when='@develop %cce', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('amrex@21.10+sundials',
when='@0.7.0 %intel', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('amrex@21.10+sundials',
when='@0.7.0 %gcc', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('amrex@21.10+sundials',
when='@0.7.0 %cce', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('amrex@20.10', when='@0.6.0 %intel')
xsdk_depends_on('amrex@20.10', when='@0.6.0 %gcc')
xsdk_depends_on('amrex@19.08', when='@0.5.0 %intel')
xsdk_depends_on('amrex@19.08', when='@0.5.0 %gcc')
xsdk_depends_on('amrex@18.10.1', when='@0.4.0 %intel')
xsdk_depends_on('amrex@18.10.1', when='@0.4.0 %gcc')
xsdk_depends_on('slepc@main', when='@develop')
xsdk_depends_on('slepc@3.16.0', when='@0.7.0')
xsdk_depends_on('slepc@3.14.0', when='@0.6.0')
xsdk_depends_on('slepc@3.12.0', when='@0.5.0')
xsdk_depends_on('slepc@3.10.1', when='@0.4.0')
xsdk_depends_on('omega-h +trilinos', when='+trilinos +omega-h')
xsdk_depends_on('omega-h ~trilinos', when='~trilinos +omega-h')
xsdk_depends_on('omega-h@main', when='@develop +omega-h')
xsdk_depends_on('omega-h@9.34.1', when='@0.7.0 +omega-h')
xsdk_depends_on('omega-h@9.32.5', when='@0.6.0 +omega-h')
xsdk_depends_on('omega-h@9.29.0', when='@0.5.0 +omega-h')
xsdk_depends_on('omega-h@9.19.1', when='@0.4.0 +omega-h')
xsdk_depends_on('strumpack ~cuda', when='~cuda @0.6.0: +strumpack')
xsdk_depends_on('strumpack@master~slate~openmp', when='@develop +strumpack')
xsdk_depends_on('strumpack@6.1.0~slate~openmp', when='@0.7.0 +strumpack')
xsdk_depends_on('strumpack@5.0.0~slate~openmp', when='@0.6.0 +strumpack')
xsdk_depends_on('strumpack@3.3.0~slate~openmp', when='@0.5.0 +strumpack')
xsdk_depends_on('strumpack@3.1.1~slate~openmp', when='@0.4.0 +strumpack')
xsdk_depends_on('pumi@master', when='@develop')
xsdk_depends_on('pumi@2.2.6', when='@0.7.0')
xsdk_depends_on('pumi@2.2.5', when='@0.6.0')
xsdk_depends_on('pumi@2.2.1', when='@0.5.0')
xsdk_depends_on('pumi@2.2.0', when='@0.4.0')
tasmanian_openmp = '~openmp' if sys.platform == 'darwin' else '+openmp'
xsdk_depends_on('tasmanian@develop+xsdkflags+blas' + tasmanian_openmp,
when='@develop',
cuda_var=['cuda', '?magma'], rocm_var=['rocm', '?magma'])
xsdk_depends_on('tasmanian@7.7+xsdkflags+mpi+blas' + tasmanian_openmp,
when='@0.7.0', cuda_var=['cuda', '?magma'])
xsdk_depends_on('tasmanian@7.3+xsdkflags+mpi+blas' + tasmanian_openmp,
when='@0.6.0', cuda_var=['cuda', '?magma'])
xsdk_depends_on('tasmanian@7.0+xsdkflags+mpi+blas' + tasmanian_openmp,
when='@0.5.0', cuda_var=['cuda', '?magma'])
xsdk_depends_on('tasmanian@6.0+xsdkflags+blas~openmp', when='@0.4.0',
cuda_var=['cuda', '?magma'])
xsdk_depends_on('arborx@master', when='@develop +arborx')
xsdk_depends_on('arborx@1.1', when='@0.7.0 +arborx')
# the Fortran 2003 bindings of phist require python@3:, but this
# creates a conflict with other packages like petsc@main. Actually
# these are type='build' dependencies, but spack reports a conflict anyway.
# This will be fixed once the new concretizer becomes available
# (says @adamjstewart)
xsdk_depends_on('phist kernel_lib=tpetra', when='+trilinos +phist')
xsdk_depends_on('phist kernel_lib=petsc', when='~trilinos +phist')
xsdk_depends_on('phist@develop ~fortran ~scamac ~openmp ~host ~int64',
when='@develop +phist')
xsdk_depends_on('phist@1.9.5 ~fortran ~scamac ~openmp ~host ~int64',
when='@0.7.0 +phist')
xsdk_depends_on('phist@1.9.3 ~fortran ~scamac ~openmp ~host ~int64',
when='@0.6.0 +phist')
xsdk_depends_on('phist@1.8.0 ~fortran ~scamac ~openmp ~host ~int64',
when='@0.5.0 +phist')
xsdk_depends_on('phist@1.7.5 ~fortran ~scamac ~openmp ~host ~int64',
when='@0.4.0 +phist')
xsdk_depends_on('ginkgo@develop ~openmp', when='@develop +ginkgo',
cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('ginkgo@1.4.0 ~openmp',
when='@0.7.0 +ginkgo', cuda_var='cuda', rocm_var='rocm')
xsdk_depends_on('ginkgo@1.3.0 ~openmp',
when='@0.6.0 +ginkgo', cuda_var='cuda')
xsdk_depends_on('ginkgo@1.1.0 ~openmp', when='@0.5.0 +ginkgo')
xsdk_depends_on('py-libensemble@develop+petsc4py', when='@develop +libensemble')
xsdk_depends_on('py-petsc4py@main', when='@develop +libensemble')
xsdk_depends_on('py-libensemble@0.8.0+petsc4py', when='@0.7.0 +libensemble')
xsdk_depends_on('py-petsc4py@3.16.1', when='@0.7.0 +libensemble')
xsdk_depends_on('py-libensemble@0.7.1+petsc4py', when='@0.6.0 +libensemble')
xsdk_depends_on('py-petsc4py@3.14.0', when='@0.6.0 +libensemble')
xsdk_depends_on('py-libensemble@0.5.2+petsc4py', when='@0.5.0 +libensemble')
xsdk_depends_on('py-petsc4py@3.12.0', when='@0.5.0 +libensemble')
xsdk_depends_on('precice ~petsc', when='platform=cray +precice')
xsdk_depends_on('precice@develop', when='@develop +precice')
xsdk_depends_on('precice@2.3.0', when='@0.7.0 +precice')
xsdk_depends_on('precice@2.1.1', when='@0.6.0 +precice')
xsdk_depends_on('precice@1.6.1', when='@0.5.0 +precice')
xsdk_depends_on('butterflypack@master', when='@develop +butterflypack')
xsdk_depends_on('butterflypack@2.0.0', when='@0.7.0 +butterflypack')
xsdk_depends_on('butterflypack@1.2.1', when='@0.6.0 +butterflypack')
xsdk_depends_on('butterflypack@1.1.0', when='@0.5.0 +butterflypack')
xsdk_depends_on('heffte@develop+fftw', when='@develop +heffte',
cuda_var=['cuda', '?magma'], rocm_var=['rocm', '?magma'])
xsdk_depends_on('heffte@2.2.0+fftw', when='@0.7.0 +heffte',
cuda_var=['cuda', '?magma'], rocm_var=['rocm', '?magma'])
xsdk_depends_on('heffte@2.0.0+fftw', when='@0.6.0 +heffte',
cuda_var=['cuda', '?magma'])
xsdk_depends_on('slate@master', when='@develop +slate %gcc@6.0:', cuda_var='cuda')
xsdk_depends_on('slate@2021.05.02', when='@0.7.0 +slate %gcc@6.0:', cuda_var='cuda')
xsdk_depends_on('slate@2020.10.00', when='@0.6.0 +slate %gcc@6.0:', cuda_var='cuda')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-pmw-patched/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyPmwPatched(PythonPackage):
"""Schrodinger's Fork of Python megawidgets with essential patches applied.
Pmw is a toolkit for building high-level compound widgets, or
megawidgets, constructed using other widgets as component parts."""
homepage = "https://github.com/schrodinger/pmw-patched"
git = "https://github.com/schrodinger/pmw-patched"
version('02-10-2020', commit='<PASSWORD>')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-cli/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCli(RPackage):
"""Helpers for Developing Command Line Interfaces.
A suite of tools to build attractive command line interfaces ('CLIs'), from
semantic elements: headings, lists, alerts, paragraphs, etc. Supports
custom themes via a 'CSS'-like language. It also contains a number of lower
level 'CLI' elements: rules, boxes, trees, and 'Unicode' symbols with
'ASCII' alternatives. It integrates with the 'crayon' package to support
'ANSI' terminal colors."""
cran = "cli"
version('3.2.0', sha256='cd5a1b754d09de33f088f25ecdb0494100f9a42bc0a66622bfd7d8ec5498e862')
version('3.1.1', sha256='c8b3e6014ad60593ba21897255acfe90c0e3f98bd4f7e22c1f3acb2644cf54cf')
version('3.1.0', sha256='c70a61830bf706a84c59eb74a809978846cee93742198ab4192742a5df1ace11')
version('3.0.1', sha256='d89a25b6cd760e157605676e104ce65473a7d8d64c289efdd9640e949968b4fd')
version('2.2.0', sha256='39a77af61724f8cc1f5117011e17bb2a488cbac61a7c112db078a675d3ac40b8')
version('2.0.2', sha256='490834e5b80eb036befa0e150996bcab1c4d5d168c3d45209926e52d0d5413b6')
version('1.1.0', sha256='4fc00fcdf4fdbdf9b5792faee8c7cf1ed5c4f45b1221d961332cda82dbe60d0a')
version('1.0.1', sha256='ef80fbcde15760fd55abbf9413b306e3971b2a7034ab8c415fb52dc0088c5ee4')
version('1.0.0', sha256='8fa3dbfc954ca61b8510f767ede9e8a365dac2ef95fe87c715a0f37d721b5a1d')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-glue', type=('build', 'run'), when='@2:')
depends_on('r-assertthat', type=('build', 'run'), when='@:2.3')
depends_on('r-crayon@1.3.4:', type=('build', 'run'), when='@:2.2')
depends_on('r-fansi', type=('build', 'run'), when='@2:2.2')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-vipor/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RVipor(RPackage):
"""Plot Categorical Data Using Quasirandom Noise and Density Estimates.
Generate a violin point plot, a combination of a violin/histogram plot and
a scatter plot by offsetting points within a category based on their
density using quasirandom noise."""
cran = "vipor"
version('0.4.5', sha256='7d19251ac37639d6a0fed2d30f1af4e578785677df5e53dcdb2a22771a604f84')
version('0.4.4', sha256='5abfd7869dae42ae2e4f52206c23433a43b485b1220685e445877ee5864a3f5c')
depends_on('r@3.0.0:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-geonames/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGeonames(RPackage):
"""Interface to the "Geonames" Spatial Query Web Service.
The web service at <https://www.geonames.org/> provides a number of spatial
data queries, including administrative area hierarchies, city locations and
some country postal code queries. A (free) username is required and rate
limits exist."""
cran = "geonames"
version('0.999', sha256='1dd7bbd82d9425d14eb36f8e5bf431feaccfe3b0c4e70bf38f44f13dfc59e17b')
depends_on('r@2.2.0:', type=('build', 'run'))
depends_on('r-rjson', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-rgexf/package.py
|
<filename>var/spack/repos/builtin/packages/r-rgexf/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRgexf(RPackage):
"""Build, Import and Export GEXF Graph Files.
Create, read and write GEXF (Graph Exchange XML Format) graph files (used
in Gephi and others). Using the XML package, it allows the user to easily
build/read graph files including attributes, GEXF viz attributes (such as
color, size, and position), network dynamics (for both edges and nodes) and
edge weighting. Users can build/handle graphs element-by-element or
massively through data-frames, visualize the graph on a web browser through
"sigmajs" (a javascript library) and interact with the igraph package."""
cran = "rgexf"
version('0.16.2', sha256='6ee052b0de99d0c7492366b991d345a51b3d0cc890d10a68b8670e1bd4fc8201')
version('0.16.0', sha256='2a671df9ac70cfefd4092754317cb28e32a33df345b80e1975bf838e838245ee')
version('0.15.3', sha256='2e8a7978d1fb977318e6310ba65b70a9c8890185c819a7951ac23425c6dc8147')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-igraph', type=('build', 'run'))
depends_on('r-servr', type=('build', 'run'), when='@0.16.0:')
depends_on('r-rook', type=('build', 'run'), when='@:0.15.3')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/mpilander/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpilander(CMakePackage):
"""There can only be one (MPI process)!"""
homepage = "https://github.com/MPILander/MPILander"
git = "https://github.com/MPILander/MPILander.git"
maintainers = ['ax3l']
version('develop', branch='master')
# variant('cuda', default=False, description='Enable CUDA support')
# variant(
# 'schedulers',
# description='List of supported schedulers',
# values=('alps', 'lsf', 'tm', 'slurm', 'sge', 'loadleveler'),
# multi=True
# )
depends_on('cmake@3.9.2:', type='build')
provides('mpi@:3.1')
# compiler support
conflicts('%gcc@:4.7')
conflicts('%clang@:3.8')
conflicts('%apple-clang@:7.4')
conflicts('%intel@:16')
def cmake_args(self):
args = [
# tests and examples
self.define('BUILD_TESTING', self.run_tests),
self.define('BUILD_EXAMPLES', self.run_tests),
]
return args
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/dysco/package.py
|
<filename>var/spack/repos/builtin/packages/dysco/package.py<gh_stars>1-10
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dysco(CMakePackage):
"""Dysco is a compressing storage manager for Casacore mearement sets."""
homepage = "https://github.com/aroffringa/dysco"
url = "https://github.com/aroffringa/dysco/archive/v1.2.tar.gz"
version('1.2', sha256='dd992c5a13df67173aa1d3f6dc5df9b51b0bea2fe77bc08f5be7a839be741269')
depends_on('casacore')
depends_on('gsl')
depends_on('boost+date_time+python')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/cppad/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cppad(CMakePackage):
"""A Package for Differentiation of C++ Algorithms."""
homepage = "https://www.coin-or.org/CppAD/"
url = "http://www.coin-or.org/download/source/CppAD/cppad-20170114.gpl.tgz"
git = "https://github.com/coin-or/CppAD.git"
version('develop', branch='master')
version('20170114', sha256='fa3980a882be2a668a7522146273a1b4f1d8dabe66ad4aafa8964c8c1fd6f957')
def cmake_args(self):
# This package does not obey CMAKE_INSTALL_PREFIX
args = [
"-Dcppad_prefix=%s" % (self.prefix),
"-Dcmake_install_docdir=share/cppad/doc"
]
return args
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/distcc/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Distcc(AutotoolsPackage):
"""distcc is a program to distribute compilation of C or C++
code across several machines on a network."""
homepage = "https://github.com/distcc/distcc"
url = "https://github.com/distcc/distcc/archive/v3.3.3.tar.gz"
version('3.3.3', sha256='b7f37d314704fbaf006d747514ff6e4d0d722102ef7d2aea132f97cf170f5169')
depends_on('popt')
depends_on('libiberty')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-pyface/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyPyface(PythonPackage):
"""The pyface project contains a toolkit-independent GUI abstraction layer,
which is used to support the "visualization" features of the Traits
package. Thus, you can write code in terms of the Traits API (views, items,
editors, etc.), and let pyface and your selected toolkit and back-end take
care of the details of displaying them."""
homepage = "https://docs.enthought.com/pyface"
pypi = "pyface/pyface-6.1.2.tar.gz"
version('7.3.0', sha256='a7031ec4cfff034affc822e47ff5e6c1a0272e576d79465cdbbe25f721740322')
version('6.1.2', sha256='7c2ac3d5cbec85e8504b3b0b63e9307be12c6d710b46bae372ce6562d41f4fbc')
variant('backend', default='pyqt5', description='Default backend',
values=('wx', 'pyqt', 'pyqt5', 'pyside', 'pyside2'), multi=False)
depends_on('py-setuptools', type='build')
depends_on('py-importlib-metadata', when='@7.2:', type=('build', 'run'))
depends_on('py-importlib-resources@1.1:', when='@7.2:', type=('build', 'run'))
depends_on('py-traits@6.2:', when='@7.3:', type=('build', 'run'))
depends_on('py-traits@6:', when='@7:', type=('build', 'run'))
depends_on('py-traits', type=('build', 'run'))
conflicts('backend=pyside', when='@7.3:')
conflicts('backend=pyside2', when='@:6')
# Backends
with when('backend=wx'):
depends_on('py-wxpython@4:', when='@7:', type=('build', 'run'))
depends_on('py-wxpython@2.8.10:', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
with when('backend=pyqt'):
depends_on('py-pyqt4@4.10:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
with when('backend=pyqt5'):
depends_on('py-pyqt5@5:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
with when('backend=pyside'):
depends_on('py-pyside@1.2:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
with when('backend=pyside2'):
depends_on('py-pyside2', type=('build', 'run'))
depends_on('py-shiboken2', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/openipmi/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openipmi(AutotoolsPackage):
"""The Open IPMI project aims to develop an open code base
to allow access to platform information using Intelligent
Platform Management Interface (IPMI)."""
homepage = "https://sourceforge.net/projects/openipmi/"
url = "https://sourceforge.net/projects/openipmi/files/OpenIPMI%202.0%20Library/OpenIPMI-2.0.29.tar.gz"
version('2.0.29', sha256='2244124579afb14e569f34393e9ac61e658a28b6ffa8e5c0d2c1c12a8ce695cd')
version('2.0.28', sha256='8e8b1de2a9a041b419133ecb21f956e999841cf2e759e973eeba9a36f8b40996')
version('2.0.27', sha256='f3b1fafaaec2e2bac32fec5a86941ad8b8cb64543470bd6d819d7b166713d20b')
depends_on('popt', type='link')
depends_on('python', type=('build', 'link', 'run'))
depends_on('perl', type=('build', 'link', 'run'))
depends_on('termcap', type='link')
depends_on('ncurses', type='link')
depends_on('readline', type='link')
patch('readline.patch', when='@2.0.27')
def configure_args(self):
args = ['LIBS=' + self.spec['ncurses'].libs.link_flags]
return args
def install(self, spec, prefix):
make('install', parallel=False)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-reticulate/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReticulate(RPackage):
"""Interface to 'Python'.
Interface to 'Python' modules, classes, and functions. When calling into
'Python', R data types are automatically converted to their equivalent
'Python' types. When values are returned from 'Python' to R they are
converted back to R types. Compatible with all versions of 'Python' >=
2.7."""
cran = "reticulate"
version('1.24', sha256='b918c5204916601f757ad0fc629b2ae1eabab7cdf7f6aa2e219d26e506d916cc')
version('1.23', sha256='fea04a3ff33c71f1910d65000a93c6882180ca03f8657ee118ea9e79786c36d6')
version('1.18', sha256='b33f855a58f446eefbe38df8a1a4865390f5d4ebd64b2c72266baaee64628513')
version('1.15', sha256='47db3e9c9424263ade15287da8e74f6ba261a936b644b197dba6772853b7b50d')
version('1.13', sha256='adbe41d556b667c4419d563680f8608a56b0f792b8bc427b3bf4c584ff819de3')
depends_on('r@3.0:', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-rcpp@0.12.7:', type=('build', 'run', 'link'))
depends_on('r-rcpptoml', type=('build', 'run', 'link'), when='@1.23:')
depends_on('r-here', type=('build', 'run', 'link'), when='@1.23:')
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('r-png', type=('build', 'run', 'link'), when='@1.23:')
depends_on('r-rappdirs', type=('build', 'run'), when='@1.15:')
depends_on('r-withr', type=('build', 'run', 'link'), when='@1.23:')
depends_on('python@2.7.0:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-meldmd/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMeldmd(PythonPackage, CudaPackage):
"""MELD is a tool for inferring the structure of
biomolecules from sparse, ambiguous, or noisy data."""
homepage = "http://meldmd.org/"
url = "https://github.com/maccallumlab/meld/archive/refs/tags/0.4.20.tar.gz"
version('0.4.20', sha256='8c8d2b713f8dc0ecc137d19945b3957e12063c8dda569696e47c8820eeac6c92')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('amber')
depends_on('openmm')
depends_on('py-netcdf4', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
depends_on('py-parmed', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-mpi4py', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/hpcx-mpi/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
class HpcxMpi(Package):
"""The HPC-X MPI implementation from NVIDIA/Mellanox based on OpenMPI.
This package is for external specs only."""
homepage = "https://developer.nvidia.com/networking/hpc-x"
maintainers = ['mwkrentel']
has_code = False
provides('mpi')
def install(self, spec, prefix):
raise InstallError('HPC-X MPI is not buildable, it is for external '
'specs only.')
def setup_dependent_package(self, module, dependent_spec):
# This works for AOCC (AMD), Intel and GNU.
self.spec.mpicc = os.path.join(self.prefix.bin, 'mpicc')
self.spec.mpicxx = os.path.join(self.prefix.bin, 'mpicxx')
self.spec.mpif77 = os.path.join(self.prefix.bin, 'mpif77')
self.spec.mpifc = os.path.join(self.prefix.bin, 'mpif90')
def make_base_environment(self, prefix, env):
env.set('MPICC', os.path.join(prefix.bin, 'mpicc'))
env.set('MPICXX', os.path.join(prefix.bin, 'mpicxx'))
env.set('MPIF77', os.path.join(prefix.bin, 'mpif77'))
env.set('MPIF90', os.path.join(prefix.bin, 'mpif90'))
env.prepend_path('LD_LIBRARY_PATH', prefix.lib)
env.set('OPAL_PREFIX', prefix)
def setup_dependent_build_environment(self, env, dependent_spec):
self.make_base_environment(self.prefix, env)
def setup_run_environment(self, env):
self.make_base_environment(self.prefix, env)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/ppopen-math-vis/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PpopenMathVis(MakefilePackage):
"""
ppOpen-MATH/VIS is a set of libraries for parallel visualization.
Capabilities of ppOpen-MATH/VIS (ver.0.2.0) are as follows:
Using background voxels with adaptive mesh refinement (AMR).
Single UCD file.
Flat MPI parallel programming models.
(OpenMP/MPI hybrid will be supported in the future).
Can be called from programs written in both of Fortran 90 and C.
Only FDM-type structured meshes are supported.
"""
homepage = "http://ppopenhpc.cc.u-tokyo.ac.jp/ppopenhpc/"
git = "https://github.com/Post-Peta-Crest/ppOpenHPC.git"
version('master', branch='MATH/VIS')
depends_on('mpi')
def edit(self, spec, prefix):
makefile_in = FileFilter('Makefile.in')
makefile_in.filter('mpifccpx', spec['mpi'].mpicc)
makefile_in.filter('mpiFCCpx', spec['mpi'].mpicxx)
makefile_in.filter('mpifrtpx', spec['mpi'].mpifc)
makefile_in.filter('-Kfast', '-O3')
makefile_in.filter(r'~/ppOpen-HPC/.*', prefix)
mkdirp('include')
mkdirp('lib')
def install(self, spec, prefix):
make('install')
mkdir(join_path(prefix, 'examples'))
copy_tree('examples', join_path(prefix, 'examples'))
mkdir(join_path(prefix, 'doc'))
copy_tree('doc', join_path(prefix, 'doc'))
@property
def libs(self):
return find_libraries(
['libfppohvisfdm3d', 'libppohvisfdm3d'],
root=self.prefix, shared=False, recursive=True)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/batctl/package.py
|
<reponame>player1537-forks/spack<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Batctl(MakefilePackage):
"""B.A.T.M.A.N. advanced control and management tool"""
homepage = "https://github.com/open-mesh-mirror/batctl"
url = "https://github.com/open-mesh-mirror/batctl/archive/v2019.5.tar.gz"
version('2019.5', sha256='ffe5857a33068ec174140c154610d76d833524d840a2fc2d1a15e16686213cad')
version('2019.4', sha256='a3564eb9727335352dc0cfa2f2b29474c2c837384689ac5fcb387784a56e7685')
version('2019.3', sha256='2bd93fa14925a8dc63a67e64266c8ccd2fa3ac44b10253d93e6f8a630350070c')
version('2019.2', sha256='fb656208ff7d4cd8b1b422f60c9e6d8747302a347cbf6c199d7afa9b80f80ea3')
depends_on('libnl')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('batctl', prefix.bin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-affyrnadegradation/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffyrnadegradation(RPackage):
"""Analyze and correct probe positional bias in microarray data due to RNA
degradation.
The package helps with the assessment and correction of RNA degradation
effects in Affymetrix 3' expression arrays. The parameter d gives a
robust and accurate measure of RNA integrity. The correction removes the
probe positional bias, and thus improves comparability of samples that
are affected by RNA degradation."""
bioc = "AffyRNADegradation"
version('1.40.0', commit='8539a91ee464d692a267bb17c91dc1ef9a231f41')
version('1.36.0', commit='<KEY>')
version('1.30.0', commit='620c464fb09248e1c7a122828eab59a4fb778cc1')
version('1.28.0', commit='<KEY>')
version('1.26.0', commit='6ab03ad624701464280bf7dfe345d200e846298a')
version('1.24.0', commit='1f85f3da4720cef94623828713eb84d8accbcf8a')
version('1.22.0', commit='0fa78f8286494711a239ded0ba587b0de47c15d3')
depends_on('r@2.9.0:', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
|
player1537-forks/spack
|
lib/spack/spack/build_systems/perl.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
from llnl.util.filesystem import filter_file
from spack.directives import extends
from spack.package import PackageBase, run_after
from spack.util.executable import Executable
class PerlPackage(PackageBase):
"""Specialized class for packages that are built using Perl.
This class provides four phases that can be overridden if required:
1. :py:meth:`~.PerlPackage.configure`
2. :py:meth:`~.PerlPackage.build`
3. :py:meth:`~.PerlPackage.check`
4. :py:meth:`~.PerlPackage.install`
The default methods use, in order of preference:
(1) Makefile.PL,
(2) Build.PL.
Some packages may need to override
:py:meth:`~.PerlPackage.configure_args`,
which produces a list of arguments for
:py:meth:`~.PerlPackage.configure`.
Arguments should not include the installation base directory.
"""
#: Phases of a Perl package
phases = ['configure', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'PerlPackage'
#: Callback names for build-time test
build_time_test_callbacks = ['check']
extends('perl')
def configure_args(self):
"""Produces a list containing the arguments that must be passed to
:py:meth:`~.PerlPackage.configure`. Arguments should not include
the installation base directory, which is prepended automatically.
:return: list of arguments for Makefile.PL or Build.PL
"""
return []
def configure(self, spec, prefix):
"""Runs Makefile.PL or Build.PL with arguments consisting of
an appropriate installation base directory followed by the
list returned by :py:meth:`~.PerlPackage.configure_args`.
:raise RuntimeError: if neither Makefile.PL or Build.PL exist
"""
if os.path.isfile('Makefile.PL'):
self.build_method = 'Makefile.PL'
self.build_executable = inspect.getmodule(self).make
elif os.path.isfile('Build.PL'):
self.build_method = 'Build.PL'
self.build_executable = Executable(
os.path.join(self.stage.source_path, 'Build'))
else:
raise RuntimeError('Unknown build_method for perl package')
if self.build_method == 'Makefile.PL':
options = ['Makefile.PL', 'INSTALL_BASE={0}'.format(prefix)]
elif self.build_method == 'Build.PL':
options = ['Build.PL', '--install_base', prefix]
options += self.configure_args()
inspect.getmodule(self).perl(*options)
# It is possible that the shebang in the Build script that is created from
# Build.PL may be too long causing the build to fail. Patching the shebang
# does not happen until after install so set '/usr/bin/env perl' here in
# the Build script.
@run_after('configure')
def fix_shebang(self):
if self.build_method == 'Build.PL':
pattern = '#!{0}'.format(self.spec['perl'].command.path)
repl = '#!/usr/bin/env perl'
filter_file(pattern, repl, 'Build', backup=False)
def build(self, spec, prefix):
"""Builds a Perl package."""
self.build_executable()
# Ensure that tests run after build (if requested):
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Runs built-in tests of a Perl package."""
self.build_executable('test')
def install(self, spec, prefix):
"""Installs a Perl package."""
self.build_executable('install')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/libp11/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libp11(AutotoolsPackage):
"""The PKCS#11 API is an abstract API to perform operations on
cryptographic objects such as private keys, without requiring
access to the objects themselves. That is, it provides a logical
separation of the keys from the operations. The PKCS #11 API is
mainly used to access objects in smart cards and Hardware or
Software Security Modules (HSMs). That is because in these modules
the cryptographic keys are isolated in hardware or software and
are not made available to the applications using them."""
homepage = "https://github.com/OpenSC/libp11/wiki"
url = "https://github.com/OpenSC/libp11/archive/libp11-0.4.10.tar.gz"
version('0.4.11', sha256='56d6149879bda379613d89adfd3486ce5a3c20af6c1e3f9e83d15d900ab9e4bc')
version('0.4.10', sha256='123c1525fa7ce7a34060f9a4148a30717482c517a378f428b704459820c1bf35')
version('0.4.9', sha256='9d1c76d74c21ca224f96204982097ebc6b956f645b2b0b5f9c502a20e9ffcfd8')
version('0.4.8', sha256='acccd56b736942dfcc490d102d2cb2b6afa6b2e448dd1dc5a1b773eadb98f83d')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('pkgconfig', type='build')
depends_on('openssl')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./bootstrap')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-pygpu/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPygpu(PythonPackage):
"""Python packge for the libgpuarray C library."""
homepage = "https://github.com/Theano/libgpuarray"
url = "https://github.com/Theano/libgpuarray/archive/v0.6.1.tar.gz"
version('0.7.6', sha256='ad1c00dd47c3d36ee1708e5167377edbfcdb7226e837ef9c68b841afbb4a4f6a')
version('0.7.5', sha256='39c4d2e743848be43c8819c736e089ae51b11aa446cc6ee05af945c2dfd63420')
version('0.7.2', sha256='ef11ee6f8d62d53831277fd3dcab662aa770a5b5de2d30fe3018c4af959204da')
version('0.7.1', sha256='4d0f9dd63b0595a8c04d8cee91b2619847c033b011c71d776caa784322382ed6')
version('0.7.0', sha256='afe7907435dcbf78b3ea9b9f6c97e5a0d4a219a7170f5025ca0db1c289bb88df')
version('0.6.9', sha256='689716feecb4e495f4d383ec1518cf3ba70a2a642a903cc445b6b6ffc119bc25')
version('0.6.2', sha256='04756c6270c0ce3b91a9bf01be38c4fc743f5356acc18d9f807198021677bcc8')
version('0.6.1', sha256='b2466311e0e3bacdf7a586bba0263f6d232bf9f8d785e91ddb447653741e6ea5')
version('0.6.0', sha256='a58a0624e894475a4955aaea25e82261c69b4d22c8f15ec07041a4ba176d35af')
depends_on('python', type=('build', 'link', 'run'))
depends_on('libgpuarray@0.7.6', when='@0.7.6')
depends_on('libgpuarray@0.7.5', when='@0.7.5')
depends_on('libgpuarray')
# not just build-time, requires pkg_resources
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-cython@0.25:', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'link', 'run'))
depends_on('py-mako@0.7:', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/mscgen/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mscgen(AutotoolsPackage):
"""Mscgen is a small program that parses Message Sequence Chart descriptions
and produces PNG, SVG, EPS or server side image maps (ismaps) as the
output."""
homepage = "https://www.mcternan.me.uk/mscgen/"
url = "https://www.mcternan.me.uk/mscgen/software/mscgen-src-0.20.tar.gz"
version('0.20', sha256='3c3481ae0599e1c2d30b7ed54ab45249127533ab2f20e768a0ae58d8551ddc23')
depends_on('flex')
depends_on('bison')
depends_on('pkgconfig')
depends_on('libgd')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-mco/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMco(RPackage):
"""Multiple Criteria Optimization Algorithms and Related Functions.
A collection of function to solve multiple criteria optimization problems
using genetic algorithms (NSGA-II). Also included is a collection of test
functions."""
cran = "mco"
version('1.15.6', sha256='17ebe279cb9c89b7cd8054ac50d3b657d2b10dadbc584b88da7e79c3a9680582')
version('1.0-15.1', sha256='3c13ebc8c1f1bfa18f3f95b3998c57fde5259876e92456b6c6d4c59bef07c193')
version('1.0-15', sha256='a25e3effbb6dcae735fdbd6c0bfc775e9fbbcc00dc00076b69c53fe250627055')
depends_on('r@3.0.0:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-nvidia-ml-py/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyNvidiaMlPy(PythonPackage):
"""Python Bindings for the NVIDIA Management Library."""
homepage = "https://www.nvidia.com/"
pypi = "nvidia-ml-py/nvidia-ml-py-11.450.51.tar.gz"
version('11.450.51', sha256='5aa6dd23a140b1ef2314eee5ca154a45397b03e68fd9ebc4f72005979f511c73')
# pip silently replaces distutils with setuptools
depends_on('py-setuptools', type='build')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r3d/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class R3d(CMakePackage):
"""Fast, robust polyhedral intersections, analytic integration, and
conservative voxelization."""
homepage = "https://github.com/devonmpowell/r3d"
git = "https://github.com/devonmpowell/r3d.git"
maintainers = ['raovgarimella', 'gaber']
version('master', branch='master')
version('2021-03-16', commit='<PASSWORD>')
version('2019-04-24', commit='<PASSWORD>', deprecated=True)
version('2018-12-19', commit='<PASSWORD>', deprecated=True)
version('2018-01-07', commit='<PASSWORD>', deprecated=True)
variant("r3d_max_verts", default='0', description="Maximum number of vertices allowed in a polyhedron (versions 2021-03-10 or later)")
# Bypass CMake for older builds
variant("test", default=False, description="Build R3D regression tests (versions 2019-04-24 or earlier)")
@when('@:2019-04-24')
def cmake(self, spec, prefix):
pass
@when('@:2019-04-24')
def build(self, spec, prefix):
make_args = [
'CC={0}'.format(spack_cc),
]
make('libr3d.a', *make_args)
if '+test' in spec:
with working_dir('tests'):
make('all', *make_args)
@when('@:2019-04-24')
def install(self, spec, prefix):
# R3D does not have an install target so create our own here.
mkdirp(prefix.include)
my_headers = find('.', '*.h', recursive=False)
for my_header in my_headers:
install(my_header, prefix.include)
mkdirp(prefix.lib)
install('libr3d.a', prefix.lib)
if '+test' in spec:
with working_dir('tests'):
# R3D does not have an install target so create our own here.
mkdirp(prefix.test)
install('r2d_unit_tests', prefix.test)
install('r3d_unit_tests', prefix.test)
install('rNd_unit_tests', prefix.test)
# CMake support was added in 2021-03-10
@when('@2021-03-10:')
def cmake_args(self):
options = []
r3d_max_verts = self.spec.variants['r3d_max_verts'].value
if (r3d_max_verts != '0'):
options.append('-DR3D_MAX_VERTS=' + r3d_max_verts)
if self.run_tests:
options.append('-DENABLE_UNIT_TESTS=ON')
else:
options.append('-DENABLE_UNIT_TESTS=OFF')
return options
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/opencv/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Opencv(CMakePackage, CudaPackage):
"""OpenCV (Open Source Computer Vision Library) is an open source computer
vision and machine learning software library."""
homepage = "https://opencv.org/"
url = "https://github.com/opencv/opencv/archive/4.5.0.tar.gz"
git = "https://github.com/opencv/opencv.git"
maintainers = ["bvanessen", "adamjstewart", "glennpj"]
version("master", branch="master")
version(
"4.5.4",
sha256="c20bb83dd790fc69df9f105477e24267706715a9d3c705ca1e7f613c7b3bad3d",
)
version(
"4.5.2",
sha256="ae258ed50aa039279c3d36afdea5c6ecf762515836b27871a8957c610d0424f8",
)
version(
"4.5.1",
sha256="e27fe5b168918ab60d58d7ace2bd82dd14a4d0bd1d3ae182952c2113f5637513",
)
version(
"4.5.0",
sha256="dde4bf8d6639a5d3fe34d5515eab4a15669ded609a1d622350c7ff20dace1907",
)
version(
"4.2.0",
sha256="9ccb2192d7e8c03c58fee07051364d94ed7599363f3b0dce1c5e6cc11c1bb0ec",
)
version(
"4.1.2",
sha256="385dd0a9c25e67ef0dd60e022d2a2d7b17e2f36819cf3cb46aa8cdff5c5282c9",
)
version(
"4.1.1",
sha256="5de5d96bdfb9dad6e6061d70f47a0a91cee96bb35afb9afb9ecb3d43e243d217",
)
version(
"4.1.0",
sha256="8f6e4ab393d81d72caae6e78bd0fd6956117ec9f006fba55fcdb88caf62989b7",
)
version(
"4.0.1",
sha256="7b86a0ee804244e0c407321f895b15e4a7162e9c5c0d2efc85f1cadec4011af4",
)
version(
"4.0.0",
sha256="3787b3cc7b21bba1441819cb00c636911a846c0392ddf6211d398040a1e4886c",
)
version(
"3.4.12",
sha256="c8919dfb5ead6be67534bf794cb0925534311f1cd5c6680f8164ad1813c88d13",
)
version(
"3.4.6",
sha256="e7d311ff97f376b8ee85112e2b536dbf4bdf1233673500175ed7cf21a0089f6d",
)
version(
"3.4.5",
sha256="0c57d9dd6d30cbffe68a09b03f4bebe773ee44dc8ff5cd6eaeb7f4d5ef3b428e",
)
version(
"3.4.4",
sha256="a35b00a71d77b484f73ec485c65fe56c7a6fa48acd5ce55c197aef2e13c78746",
)
version(
"3.4.3",
sha256="4eef85759d5450b183459ff216b4c0fa43e87a4f6aa92c8af649f89336f002ec",
)
version(
"3.4.1",
sha256="f1b87684d75496a1054405ae3ee0b6573acaf3dad39eaf4f1d66fdd7e03dc852",
)
version(
"3.4.0",
sha256="678cc3d2d1b3464b512b084a8cca1fad7de207c7abdf2caa1fed636c13e916da",
)
version(
"3.3.1",
sha256="5dca3bb0d661af311e25a72b04a7e4c22c47c1aa86eb73e70063cd378a2aa6ee",
)
version(
"3.3.0",
sha256="<KEY>",
)
contrib_vers = [
"3.3.0",
"3.3.1",
"3.4.0",
"3.4.1",
"3.4.3",
"3.4.4",
"3.4.5",
"3.4.6",
"3.4.12",
"4.0.0",
"4.0.1",
"4.1.0",
"4.1.1",
"4.1.2",
"4.2.0",
"4.5.0",
"4.5.1",
"4.5.2",
"4.5.4",
]
for cv in contrib_vers:
resource(
name="contrib",
git="https://github.com/opencv/opencv_contrib.git",
tag="{0}".format(cv),
when="@{0}".format(cv),
)
# Patch to fix conflict between CUDA and OpenCV (reproduced with 3.3.0
# and 3.4.1) header file that have the same name. Problem is fixed in
# the current development branch of OpenCV. See #8461 for more information.
patch("dnn_cuda.patch", when="@3.3.0:3.4.1+cuda+dnn")
patch("opencv3.2_cmake.patch", when="@3.2:3.4.1")
# do not prepend system paths
patch("cmake_no-system-paths.patch")
patch("opencv4.1.1_clp_cmake.patch", when="@4.1.1:")
patch("opencv4.0.0_clp_cmake.patch", when="@4.0.0:4.1.0")
patch("opencv3.4.12_clp_cmake.patch", when="@3.4.12")
patch("opencv3.3_clp_cmake.patch", when="@:3.4.6")
patch("opencv3.4.4_cvv_cmake.patch", when="@3.4.4:")
patch("opencv3.3_cvv_cmake.patch", when="@:3.4.3")
# OpenCV prebuilt apps (variants)
# Defined in `apps/*/CMakeLists.txt` using
# `ocv_add_application(...)`
apps = [
"annotation",
"createsamples",
"interactive-calibration",
"model-diagnostics",
"traincascade",
"version",
"visualisation",
]
# app variants
for app in apps:
variant(app, default=False, description="Install {0} app".format(app))
# app conflicts
with when("+annotation"):
conflicts("~highgui")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~videoio")
with when("+createsamples"):
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~highgui")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~objdetect")
conflicts("~videoio")
with when("+interactive-calibration"):
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~highgui")
conflicts("~imgproc")
conflicts("~videoio")
with when("+model-diagnostics"):
conflicts("~dnn")
with when("+traincascade"):
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~highgui")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~objdetect")
with when("+visualisation"):
conflicts("~highgui")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~videoio")
# OpenCV modules (variants)
# Defined in `modules/*/CMakeLists.txt` using
# `ocv_add_module(...)` and `ocv_define_module(...)`
modules = [
"calib3d",
"dnn",
"features2d",
"flann",
"gapi",
"highgui",
"imgcodecs",
"imgproc",
"java",
"java_bindings_generator",
"ml",
"objc",
"objc_bindings_generator",
"objdetect",
"photo",
"python2",
"python3",
"python_bindings_generator",
"python_tests",
"stitching",
"ts",
"video",
"videoio",
"world",
]
# These need additional spack packages
# js needs Emscripten
modules_pending = [
"js",
"js_bindings_generator",
]
# module variants
for mod in modules:
# At least one of these modules must be enabled to build OpenCV
variant(mod, default=False, description="Include opencv_{0} module".format(mod))
# module conflicts and dependencies
with when("+calib3d"):
conflicts("~features2d")
conflicts("~flann")
conflicts("~imgproc")
with when("+dnn"):
conflicts("~imgproc")
conflicts("~protobuf")
with when("+features2d"):
conflicts("~imgproc")
with when("+gapi"):
conflicts("~ade")
conflicts("~imgproc")
with when("+highgui"):
conflicts("~imgcodecs")
conflicts("~imgproc")
with when("+imgcodecs"):
conflicts("~imgproc")
with when("+java"):
conflicts("~imgproc")
conflicts("~java_bindings_generator")
conflicts("~python2~python3")
with when("+java_bindings_generator"):
depends_on("java")
depends_on("ant")
with when("+objc"):
conflicts("~imgproc")
conflicts("~objc_bindings_generator")
with when("+objc_bindings_generator"):
conflicts("~imgproc")
with when("+objdetect"):
conflicts("~calib3d")
conflicts("~dnn")
conflicts("~imgproc")
with when("+photo"):
conflicts("~imgproc")
with when("+python2"):
conflicts("+python3")
conflicts("~python_bindings_generator")
depends_on("python@2.7:2.8", type=("build", "link", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-numpy", type=("build", "run"))
extends("python", when="+python2")
with when("+python3"):
conflicts("+python2")
conflicts("~python_bindings_generator")
depends_on("python@3.2:", type=("build", "link", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-numpy", type=("build", "run"))
extends("python", when="+python3")
with when("+stitching"):
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~flann")
conflicts("~imgproc")
with when("+ts"):
conflicts("~highgui")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~videoio")
with when("+video"):
conflicts("~imgproc")
with when("+videoio"):
conflicts("~ffmpeg")
conflicts("~imgcodecs")
conflicts("~imgproc")
# OpenCV contrib modules (variants)
contrib_modules = [
"alphamat",
"aruco",
"barcode",
"bgsegm",
"bioinspired",
"ccalib",
"cudaarithm",
"cudabgsegm",
"cudacodec",
"cudafeatures2d",
"cudafilters",
"cudaimgproc",
"cudalegacy",
"cudaobjdetect",
"cudaoptflow",
"cudastereo",
"cudawarping",
"cudev",
"cvv",
"datasets",
"dnn_objdetect",
"dnn_superres",
"dpm",
"face",
"freetype",
"fuzzy",
"hdf",
"hfs",
"img_hash",
"intensity_transform",
"line_descriptor",
"matlab",
"mcc",
"optflow",
"phase_unwrapping",
"plot",
"quality",
"rapid",
"reg",
"rgbd",
"saliency",
"sfm",
"shape",
"stereo",
"structured_light",
"superres",
"surface_matching",
"text",
"tracking",
"videostab",
"viz",
"wechat_qrcode",
"xfeatures2d",
"ximgproc",
"xobjdetect",
"xphoto",
]
contrib_modules_pending = [
"julia", # need a way to manage the installation prefix
"ovis", # need ogre
]
for mod in contrib_modules:
variant(
mod,
default=False,
description="Include opencv_{0} contrib module".format(mod),
)
# contrib module conflicts and dependencies
with when("+alphamat"):
conflicts("~eigen")
conflicts("~imgproc")
with when("+aruco"):
conflicts("~calib3d")
conflicts("~imgproc")
with when("+barcode"):
conflicts("~dnn")
conflicts("~imgproc")
with when("+bgsegm"):
conflicts("~calib3d")
conflicts("~imgproc")
conflicts("~video")
with when("+ccalib"):
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~highgui")
conflicts("~imgproc")
with when("+cublas"):
conflicts("~cuda")
conflicts("~cudev")
with when("+cuda"):
conflicts("~cudev")
with when("+cudaarithm"):
conflicts("~cuda")
conflicts("~cublas")
conflicts("~cudev")
conflicts("~cufft")
with when("+cudabgsegm"):
conflicts("~cuda")
conflicts("~cudev")
conflicts("~video")
with when("+cudacodec"):
conflicts("~cudev")
conflicts("~videoio")
with when("+cudafeatures2d"):
conflicts("~cuda")
conflicts("~cudafilters")
conflicts("~cudawarping")
conflicts("~cudev")
conflicts("~features2d")
with when("+cudafilters"):
conflicts("~cuda")
conflicts("~cudaarithm")
conflicts("~cudev")
conflicts("~imgproc")
with when("+cudaimgproc"):
conflicts("~cuda")
conflicts("~cudev")
conflicts("~imgproc")
with when("+cudalegacy"):
conflicts("~cuda")
conflicts("~cudev")
conflicts("~video")
with when("+cudaobjdetect"):
conflicts("~cuda")
conflicts("~cudaarithm")
conflicts("~cudawarping")
conflicts("~cudev")
conflicts("~objdetect")
with when("+cudaoptflow"):
conflicts("~cuda")
conflicts("~cudaarithm")
conflicts("~cudaimgproc")
conflicts("~cudawarping")
conflicts("~cudev")
conflicts("~optflow")
conflicts("~video")
with when("+cudastereo"):
conflicts("~calib3d")
conflicts("~cuda")
conflicts("~cudev")
with when("+cudawarping"):
conflicts("~cuda")
conflicts("~cudev")
conflicts("~imgproc")
with when("+cudev"):
conflicts("~cuda")
with when("+cvv"):
conflicts("~features2d")
conflicts("~imgproc")
conflicts("~qt")
with when("+datasets"):
conflicts("~flann")
conflicts("~imgcodecs")
conflicts("~ml")
with when("+dnn_objdetect"):
conflicts("~dnn")
conflicts("~imgproc")
with when("+dnn_superres"):
conflicts("~dnn")
conflicts("~imgproc")
with when("+dpm"):
conflicts("~imgproc")
conflicts("~objdetect")
with when("+face"):
conflicts("~calib3d")
conflicts("~imgproc")
conflicts("~objdetect")
conflicts("~photo")
with when("+fuzzy"):
conflicts("~imgproc")
with when("+freetype"):
conflicts("~imgproc")
depends_on("freetype")
depends_on("harfbuzz")
with when("+hdf"):
depends_on("hdf5")
with when("+hfs"):
with when("+cuda"):
conflicts("~cudev")
conflicts("~imgproc")
with when("+img_hash"):
conflicts("~imgproc")
with when("+intensity_transform"):
conflicts("~imgproc")
with when("+line_descriptor"):
conflicts("~imgproc")
with when("+matlab"):
conflicts("~python2~python3")
depends_on("matlab")
depends_on("py-jinja2")
with when("+mcc"):
conflicts("~calib3d")
conflicts("~dnn")
conflicts("~imgproc")
with when("+optflow"):
conflicts("~calib3d")
conflicts("~flann")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~video")
conflicts("~ximgproc")
with when("+phase_unwrapping"):
conflicts("~imgproc")
with when("+plot"):
conflicts("~imgproc")
with when("+quality"):
conflicts("~imgproc")
conflicts("~ml")
with when("+rapid"):
conflicts("~calib3d")
conflicts("~imgproc")
with when("+reg"):
conflicts("~imgproc")
with when("+rgbd"):
conflicts("~calib3d")
conflicts("~eigen")
conflicts("~imgproc")
with when("+saliency"):
conflicts("%intel")
conflicts("~features2d")
conflicts("~imgproc")
with when("+sfm"):
conflicts("~calib3d")
conflicts("~eigen")
conflicts("~features2d")
conflicts("~imgcodecs")
conflicts("~xfeatures2d")
depends_on("ceres-solver")
depends_on("gflags")
depends_on("glog")
with when("+shape"):
conflicts("~calib3d")
conflicts("~imgproc")
with when("+stereo"):
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~imgproc")
conflicts("~tracking")
with when("+structured_light"):
conflicts("~calib3d")
conflicts("~imgproc")
conflicts("~phase_unwrapping")
with when("+superres"):
with when("+cuda"):
conflicts("~cudev")
conflicts("~imgproc")
conflicts("~optflow")
conflicts("~video")
with when("+surface_matching"):
conflicts("~flann")
with when("+text"):
conflicts("~dnn")
conflicts("~features2d")
conflicts("~imgproc")
conflicts("~ml")
with when("+tracking"):
conflicts("~imgproc")
conflicts("~plot")
conflicts("~video")
with when("+videostab"):
with when("+cuda"):
conflicts("~cudev")
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~imgproc")
conflicts("~photo")
conflicts("~video")
with when("+viz"):
conflicts("~vtk")
with when("+wechat_qrcode"):
conflicts("~dnn")
conflicts("~imgproc")
depends_on("libiconv")
with when("+xfeatures2d"):
with when("+cuda"):
conflicts("~cudev")
conflicts("~calib3d")
conflicts("~features2d")
conflicts("~imgproc")
with when("+ximgproc"):
conflicts("~calib3d")
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~video")
with when("+xobjdetect"):
conflicts("~imgcodecs")
conflicts("~imgproc")
conflicts("~objdetect")
with when("+xphoto"):
conflicts("~imgproc")
conflicts("~photo")
# Optional 3rd party components (variants)
# Defined in `CMakeLists.txt` and `modules/gapi/cmake/init.cmake`
# using `OCV_OPTION(WITH_* ...)`
components = [
"1394",
"ade",
"android_mediandk",
"android_native_camera",
"avfoundation",
"cap_ios",
"carotene",
"clp",
"cpufeatures",
"cublas",
"cuda",
"cudnn",
"cufft",
"directx",
"dshow",
"eigen",
"ffmpeg",
"gdal",
"gtk",
"hpx",
"imgcodec_hdr",
"imgcodec_pfm",
"imgcodec_pxm",
"imgcodec_sunraster",
"ipp",
"itt",
"jasper",
"jpeg",
"lapack",
"msmf",
"msmf_dxva",
"onnx",
"opencl",
"opencl_d3d11_nv",
"openexr",
"opengl",
"openjpeg",
"openmp",
"plaidml",
"png",
"protobuf",
"pthreads_pf",
"qt",
"quirc",
"tbb",
"tengine",
"tesseract",
"tiff",
"v4l",
"vtk",
"vulcan",
"webp",
"win32ui",
]
# These likely need additional spack packages
components_pending = [
"aravis",
"gdcm",
"gphoto2",
"gstreamer",
"gtk_2_x", # deprecated in spack
"halide",
"inf_engine",
"librealsense",
"mfx",
"ngraph",
"nvcuvid", # disabled, details: https://github.com/opencv/opencv/issues/14850
"opencl_svm",
"openclamdblas",
"openclamdfft",
"openni",
"openni2",
"openvx",
"pvapi",
"ueye",
"va",
"va_intel",
"ximea",
"xine",
]
# components and modules with the same name
# used in `def cmake_args(self)`
component_and_module = ["freetype", "julia", "matlab"]
for component in components:
variant(
component,
default=False,
description="Include {0} support".format(component),
)
# Other (variants)
variant("shared", default=True, description="Enables the build of shared libraries")
variant("powerpc", default=False, description="Enable PowerPC for GCC")
variant(
"fast-math",
default=False,
description="Enable -ffast-math (not recommended for GCC 4.6.x)",
)
variant("nonfree", default=False, description="Enable non-free algorithms")
# Required (dependencies)
depends_on("cmake@3.5.1:", type="build")
depends_on("python@2.7:2.8,3.2:", type="build")
depends_on("java", type="build")
depends_on("zlib@1.2.3:")
# Optional 3rd party components (dependencies)
depends_on("clp", when="+clp")
depends_on("cuda@6.5:", when="+cuda")
depends_on("cuda@:10.2", when="@4.0:4.2+cuda")
depends_on("cuda@:9.0", when="@3.3.1:3.4+cuda")
depends_on("cuda@:8", when="@:3.3.0+cuda")
depends_on("cudnn", when="+cudnn")
depends_on("cudnn@:7.6", when="@4.0:4.2+cudnn")
depends_on("cudnn@:7.3", when="@3.3.1:3.4+cudnn")
depends_on("cudnn@:6", when="@:3.3.0+cudnn")
depends_on("eigen", when="+eigen")
depends_on("ffmpeg+avresample", when="+ffmpeg")
depends_on("gdal", when="+gdal")
depends_on("gtkplus", when="+gtk")
depends_on("hpx", when="+hpx")
depends_on("ipp", when="+ipp")
depends_on("jasper", when="+jasper")
depends_on("jpeg", when="+jpeg")
depends_on("lapack", when="+lapack")
depends_on("onnx", when="+onnx")
depends_on("opencl", when="+opencl")
depends_on("openexr", when="+openexr")
depends_on("gl", when="+opengl")
depends_on("openjpeg@2:", when="+openjpeg")
depends_on("libpng", when="+png")
depends_on("protobuf@3.5.0:", when="@3.4.1: +protobuf")
depends_on("protobuf@3.1.0", when="@3.3.0:3.4.0 +protobuf")
depends_on("qt@5:", when="+qt")
depends_on("qt@5:+opengl", when="+qt+opengl")
depends_on("tbb", when="+tbb")
depends_on("libtiff+jpeg+libdeflate+lzma+zlib", when="+tiff")
depends_on("vtk", when="+vtk")
depends_on("libwebp", when="+webp")
depends_on("tesseract", when="+tesseract")
depends_on("leptonica", when="+tesseract")
depends_on("libdc1394", when="+1394")
# Optional 3rd party components (conflicts)
# Defined in `CMakeLists.txt` and `modules/gapi/cmake/init.cmake`
# using `OCV_OPTION(WITH_* ...)`
conflicts("+android_mediandk", when="platform=darwin", msg="Android only")
conflicts("+android_mediandk", when="platform=linux", msg="Android only")
conflicts("+android_mediandk", when="platform=cray", msg="Android only")
conflicts("+android_native_camera", when="platform=darwin", msg="Android only")
conflicts("+android_native_camera", when="platform=linux", msg="Android only")
conflicts("+android_native_camera", when="platform=cray", msg="Android only")
conflicts("+avfoundation", when="platform=linux", msg="iOS/macOS only")
conflicts("+avfoundation", when="platform=cray", msg="iOS/macOS only")
conflicts("+cap_ios", when="platform=darwin", msg="iOS only")
conflicts("+cap_ios", when="platform=linux", msg="iOS only")
conflicts("+cap_ios", when="platform=cray", msg="iOS only")
conflicts("+carotene", when="target=x86:", msg="ARM/AARCH64 only")
conflicts("+carotene", when="target=x86_64:", msg="ARM/AARCH64 only")
conflicts("+cpufeatures", when="platform=darwin", msg="Android only")
conflicts("+cpufeatures", when="platform=linux", msg="Android only")
conflicts("+cpufeatures", when="platform=cray", msg="Android only")
conflicts("+cublas", when="~cuda")
conflicts("+cudnn", when="~cuda")
conflicts("+cufft", when="~cuda")
conflicts("+directx", when="platform=darwin", msg="Windows only")
conflicts("+directx", when="platform=linux", msg="Windows only")
conflicts("+directx", when="platform=cray", msg="Windows only")
conflicts("+dshow", when="platform=darwin", msg="Windows only")
conflicts("+dshow", when="platform=linux", msg="Windows only")
conflicts("+dshow", when="platform=cray", msg="Windows only")
conflicts("+gtk", when="platform=darwin", msg="Linux only")
conflicts("+ipp", when="target=aarch64:", msg="x86 or x86_64 only")
conflicts("+jasper", when="+openjpeg")
conflicts("+msmf", when="platform=darwin", msg="Windows only")
conflicts("+msmf", when="platform=linux", msg="Windows only")
conflicts("+msmf", when="platform=cray", msg="Windows only")
conflicts("+msmf_dxva", when="platform=darwin", msg="Windows only")
conflicts("+msmf_dxva", when="platform=linux", msg="Windows only")
conflicts("+msmf_dxva", when="platform=cray", msg="Windows only")
conflicts("+opencl_d3d11_nv", when="platform=darwin", msg="Windows only")
conflicts("+opencl_d3d11_nv", when="platform=linux", msg="Windows only")
conflicts("+opencl_d3d11_nv", when="platform=cray", msg="Windows only")
conflicts("+opengl", when="~qt")
conflicts("+tengine", when="platform=darwin", msg="Linux only")
conflicts("+tengine", when="target=x86:", msg="ARM/AARCH64 only")
conflicts("+tengine", when="target=x86_64:", msg="ARM/AARCH64 only")
conflicts("+v4l", when="platform=darwin", msg="Linux only")
conflicts("+win32ui", when="platform=darwin", msg="Windows only")
conflicts("+win32ui", when="platform=linux", msg="Windows only")
conflicts("+win32ui", when="platform=cray", msg="Windows only")
def cmake_args(self):
spec = self.spec
args = [
self.define(
"OPENCV_EXTRA_MODULES_PATH",
join_path(self.stage.source_path, "opencv_contrib/modules"),
),
self.define("BUILD_opencv_core", "on"),
]
# OpenCV pre-built apps
apps_list = []
for app in self.apps:
if "+{0}".format(app) in spec:
apps_list.append(app)
if apps_list:
args.append(self.define("BUILD_opencv_apps", "on"))
args.append(self.define("OPENCV_INSTALL_APPS_LIST", ",".join(apps_list)))
else:
args.append(self.define("BUILD_opencv_apps", "off"))
# OpenCV modules
for mod in self.modules:
args.append(self.define_from_variant("BUILD_opencv_" + mod, mod))
if mod in self.component_and_module:
args.append(self.define_from_variant("WITH_" + mod.upper(), mod))
for mod in self.modules_pending:
args.append(self.define("BUILD_opencv_" + mod, "off"))
if mod in self.component_and_module:
args.append(self.define("WITH_" + mod.upper(), "off"))
# OpenCV contrib modules
for mod in self.contrib_modules:
args.append(self.define_from_variant("BUILD_opencv_" + mod, mod))
if mod in self.component_and_module:
args.append(self.define_from_variant("WITH_" + mod.upper(), mod))
for mod in self.contrib_modules_pending:
args.append(self.define("BUILD_opencv_" + mod, "off"))
if mod in self.component_and_module:
args.append(self.define("WITH_" + mod.upper(), "off"))
# Optional 3rd party components
for component in self.components:
args.append(
self.define_from_variant("WITH_" + component.upper(), component)
)
for component in self.components_pending:
args.append(self.define("WITH_" + component.upper(), "off"))
# Other
args.extend(
[
self.define("ENABLE_CONFIG_VERIFICATION", True),
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
self.define("ENABLE_PRECOMPILED_HEADERS", False),
self.define_from_variant("WITH_LAPACK", "lapack"),
self.define_from_variant("ENABLE_POWERPC", "powerpc"),
self.define_from_variant("ENABLE_FAST_MATH", "fast-math"),
self.define_from_variant("OPENCV_ENABLE_NONFREE", "nonfree"),
]
)
if "+cuda" in spec:
if spec.variants["cuda_arch"].value[0] != "none":
cuda_arch = spec.variants["cuda_arch"].value
args.append(self.define("CUDA_ARCH_BIN", " ".join(cuda_arch)))
# TODO: this CMake flag is deprecated
if spec.target.family == "ppc64le":
args.append(self.define("ENABLE_VSX", True))
# Media I/O
zlib = spec["zlib"]
args.extend(
[
self.define("BUILD_ZLIB", False),
self.define("ZLIB_LIBRARY", zlib.libs[0]),
self.define("ZLIB_INCLUDE_DIR", zlib.headers.directories[0]),
]
)
if "+png" in spec:
libpng = spec["libpng"]
args.extend(
[
self.define("BUILD_PNG", False),
self.define("PNG_LIBRARY", libpng.libs[0]),
self.define("PNG_INCLUDE_DIR", libpng.headers.directories[0]),
]
)
if "+jpeg" in spec:
libjpeg = spec["jpeg"]
args.extend(
[
self.define("BUILD_JPEG", False),
self.define("JPEG_LIBRARY", libjpeg.libs[0]),
self.define("JPEG_INCLUDE_DIR", libjpeg.headers.directories[0]),
]
)
if "+tiff" in spec:
libtiff = spec["libtiff"]
args.extend(
[
self.define("BUILD_TIFF", False),
self.define("TIFF_LIBRARY", libtiff.libs[0]),
self.define("TIFF_INCLUDE_DIR", libtiff.headers.directories[0]),
]
)
if "+jasper" in spec:
jasper = spec["jasper"]
args.extend(
[
self.define("BUILD_JASPER", False),
self.define("JASPER_LIBRARY", jasper.libs[0]),
self.define("JASPER_INCLUDE_DIR", jasper.headers.directories[0]),
]
)
if "+clp" in spec:
clp = spec["clp"]
args.extend(
[
self.define("BUILD_CLP", False),
self.define("CLP_LIBRARIES", clp.prefix.lib),
self.define("CLP_INCLUDE_DIR", clp.headers.directories[0]),
]
)
if "+onnx" in spec:
onnx = spec["onnx"]
args.extend(
[
self.define("BUILD_ONNX", False),
self.define("ORT_LIB", onnx.libs[0]),
self.define("ORT_INCLUDE", onnx.headers.directories[0]),
]
)
if "+tesseract" in spec:
tesseract = spec["tesseract"]
leptonica = spec["leptonica"]
args.extend(
[
self.define("Lept_LIBRARY", leptonica.libs[0]),
self.define("Tesseract_LIBRARY", tesseract.libs[0]),
self.define(
"Tesseract_INCLUDE_DIR", tesseract.headers.directories[0]
),
]
)
# Python
python_exe = spec["python"].command.path
python_lib = spec["python"].libs[0]
python_include_dir = spec["python"].headers.directories[0]
if "+python2" in spec:
args.extend(
[
self.define("PYTHON2_EXECUTABLE", python_exe),
self.define("PYTHON2_LIBRARY", python_lib),
self.define("PYTHON2_INCLUDE_DIR", python_include_dir),
self.define("PYTHON3_EXECUTABLE", ""),
]
)
elif "+python3" in spec:
args.extend(
[
self.define("PYTHON3_EXECUTABLE", python_exe),
self.define("PYTHON3_LIBRARY", python_lib),
self.define("PYTHON3_INCLUDE_DIR", python_include_dir),
self.define("PYTHON2_EXECUTABLE", ""),
]
)
else:
args.extend(
[
self.define("PYTHON2_EXECUTABLE", ""),
self.define("PYTHON3_EXECUTABLE", ""),
]
)
return args
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries(
"libopencv_*", root=self.prefix, shared=shared, recursive=True
)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-lazyarray/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLazyarray(PythonPackage):
"""a Python package that provides a lazily-evaluated numerical array class,
larray, based on and compatible with NumPy arrays."""
homepage = "https://lazyarray.readthedocs.io/en/latest/"
pypi = "lazyarray/lazyarray-0.2.8.tar.gz"
version('0.5.2', sha256='fe31804d82115ed7c382840a1708f498419ec1455cac084707ece9908310c7d1')
version('0.5.1', sha256='76964dd1384a6d020ae0e70806983d15d8fcd731734063f716696ebe300ab0af')
version('0.5.0', sha256='4cc4b54940def52fd96818a1c10528c4b7ecca77aa617d9e4fecfb42b51e73cf')
version('0.4.0', sha256='837cfe001840be43339d4c10d0028a70a8b3c22be08b75429a38472cbf327976')
version('0.3.4', sha256='357e80db7472c940ed3cab873544f2b7028f6ade8737adde2c91f91aeab2835a')
version('0.3.3', sha256='c9df003af5e1007a28c4ec45f995662fd195590d5694ef7d4cfb028bc508f6ed')
version('0.3.2', sha256='be980534c5950a976709085570f69be9534bdf0f3e5c21a9113de3ee2052683e')
version('0.2.10', sha256='7a53f81b5f3a098c04003d2ad179fc197451fd96bc921510f8534c6af8cc8e19')
version('0.2.8', sha256='aaee4e18117cc512de7a4e64522f37bc6f4bf125ecffdbdbf4e4e390fbdd9ba2')
# Required versions come from doc/installation.txt or:
# https://lazyarray.readthedocs.io/en/latest/installation.html#dependencies
depends_on('python@2.7:3.9', type=('build', 'run'), when='@0.3:0.3.4')
depends_on('python@3.4:3.9', type=('build', 'run'), when='@0.4:0.5.1')
depends_on('python@3.6:', type=('build', 'run'), when='@0.5.2:')
depends_on('py-numpy@1.3:', type=('build', 'run'), when='@:0.2.10^python@:2')
depends_on('py-numpy@1.5:', type=('build', 'run'), when='@:0.2.10^python@3:')
depends_on('py-numpy@1.8:', type=('build', 'run'), when='@0.3:0.3.4^python@:2')
depends_on('py-numpy@1.12:', type=('build', 'run'), when='@0.3:0.5.1^python@3:')
depends_on('py-numpy@1.13:', type=('build', 'run'), when='@0.5.2:')
depends_on('py-setuptools', type='build')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-matrix/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMatrix(RPackage):
"""Sparse and Dense Matrix Classes and Methods.
A rich hierarchy of matrix classes, including triangular, symmetric, and
diagonal matrices, both dense and sparse and with pattern, logical and
numeric entries. Numerous methods for and operations on these matrices,
using 'LAPACK' and 'SuiteSparse' libraries."""
cran = "Matrix"
version('1.4-0', sha256='c2b463702e4051b621f5e2b091a33f883f1caa97703d65f7a52b78caf81206f6')
version('1.3-4', sha256='ab42179d44545e99bbdf44bb6d04cab051dd2aba552b1f6edd51ed71b55f6c39')
version('1.3-3', sha256='f77ec8de43ae7bfa19dfdc7e76bfefbb21b3223dbc174423fcde70b44cf36a3b')
version('1.3-2', sha256='950ba5d91018e711fd2743b3486a50dc47ae9c271389fce587792f0a9aab9531')
version('1.2-17', sha256='db43e6f0196fd5dfd05a7e88cac193877352c60d771d4ec8772763e645723fcc')
version('1.2-14', sha256='49a6403547b66675cb44c1afb04bb87130c054510cb2b94971435a826ab41396')
version('1.2-11', sha256='ba8cd6565612552fe397e909721817b6cc0604a91299d56d118208006888dc0b')
version('1.2-8', sha256='3cd2a187c45fc18a0766dc148b7f83dbf6f2163c256e887c41cbaa7c9a20dbb7')
version('1.2-6', sha256='4b49b639b7bf612fa3d1c1b1c68125ec7859c8cdadae0c13f499f24099fd5f20')
depends_on('r@3.0.1:', type=('build', 'run'))
depends_on('r@3.2.0:', type=('build', 'run'), when='@1.2.13:')
depends_on('r@3.6.0:', type=('build', 'run'), when='@1.3-2:')
depends_on('r@3.5.0:', type=('build', 'run'), when='@1.3-3:')
depends_on('r-lattice', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/julia/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
from spack.version import ver
def get_best_target(microarch, compiler_name, compiler_version):
for compiler_entry in microarch.compilers[compiler_name]:
if compiler_version.satisfies(ver(compiler_entry["versions"])):
return compiler_entry.get("name", microarch.name)
raise InstallError("Could not find a target architecture")
class Julia(MakefilePackage):
"""The Julia Language: A fresh approach to technical computing"""
homepage = "https://julialang.org"
url = "https://github.com/JuliaLang/julia/releases/download/v1.7.0/julia-1.7.0.tar.gz"
git = "https://github.com/JuliaLang/julia.git"
maintainers = ['glennpj', 'vchuravy', 'haampie']
version('master', branch='master')
version('1.7.2', sha256='0847943dd65001f3322b00c7dc4e12f56e70e98c6b798ccbd4f02d27ce161fef')
version('1.7.1', sha256='17d298e50e4e3dd897246ccebd9f40ce5b89077fa36217860efaec4576aa718e')
version('1.7.0', sha256='8e870dbef71bc72469933317a1a18214fd1b4b12f1080784af7b2c56177efcb4')
version('1.6.5', sha256='b70ae299ff6b63a9e9cbf697147a48a31b4639476d1947cb52e4201e444f23cb')
version('1.6.4', sha256='a4aa921030250f58015201e28204bff604a007defc5a379a608723e6bb1808d4')
# We've deprecated these versions, so that we can remove them in Spack 0.18
# They are still available in Spack 0.17. Julia 0.17.0 is the first version that
# can be built enitrely from Spack packages, without a network connection during
# the build.
for v in [
'1.6.3', '1.6.2', '1.6.1', '1.6.0', '1.5.4', '1.5.3', '1.5.2', '1.5.1', '1.5.0',
'1.4.2', '1.4.1', '1.4.0', '1.3.1', '1.2.0', '1.1.1', '1.0.0', '0.6.2', '0.5.2',
'0.5.1', '0.5.0', '0.4.7', '0.4.6', '0.4.5', '0.4.3'
]:
version(v, deprecated=True)
variant('precompile', default=True, description='Improve julia startup time')
variant('openlibm', default=True, description='Use openlibm instead of libm')
# Note, we just use link_llvm_dylib so that we not only get a libLLVM,
# but also so that llvm-config --libfiles gives only the dylib. Without
# it it also gives static libraries, and breaks Julia's build.
depends_on('llvm targets=amdgpu,bpf,nvptx,webassembly version_suffix=jl +link_llvm_dylib ~internal_unwind')
depends_on('libuv')
with when('@1.7.0:1.7'):
# libssh2.so.1, libpcre2-8.so.0, mbedtls.so.13, mbedcrypto.so.5, mbedx509.so.1
# openlibm.so.3, (todo: complete this list for upperbounds...)
depends_on('llvm@12.0.1')
depends_on('libuv@1.42.0')
depends_on('mbedtls@2.24.0:2.24')
depends_on('openlibm@0.7.0:0.7', when='+openlibm')
depends_on('libblastrampoline@3.0.0:3')
with when('@1.6.0:1.6'):
# libssh2.so.1, libpcre2-8.so.0, mbedtls.so.13, mbedcrypto.so.5, mbedx509.so.1
# openlibm.so.3, (todo: complete this list for upperbounds...)
depends_on('llvm@11.0.1')
depends_on('libuv@1.39.0')
depends_on('mbedtls@2.24.0:2.24')
depends_on('openlibm@0.7.0:0.7', when='+openlibm')
# Patches for llvm
depends_on('llvm', patches='llvm7-symver-jlprefix.patch')
depends_on('llvm', when='^llvm@11.0.1', patches=patch(
'https://raw.githubusercontent.com/spack/patches/0b543955683a903d711a3e95ff29a4ce3951ca13/julia/llvm-11.0.1-julia-1.6.patch',
sha256='8866ee0595272b826b72d173301a2e625855e80680a84af837f1ed6db4657f42'))
depends_on('llvm', when='^llvm@12.0.1', patches=patch(
'https://github.com/JuliaLang/llvm-project/compare/fed41342a82f5a3a9201819a82bf7a48313e296b...980d2f60a8524c5546397db9e8bbb7d6ea56c1b7.patch',
sha256='10cb42f80c2eaad3e9c87cb818b6676f1be26737bdf972c77392d71707386aa4'))
depends_on('llvm', when='^llvm@13.0.0', patches=patch(
'https://github.com/JuliaLang/llvm-project/compare/d7b669b3a30345cfcdb2fde2af6f48aa4b94845d...6ced34d2b63487a88184c3c468ceda166d10abba.patch',
sha256='92f022176ab85ded517a9b7aa04df47e19a5def88f291e0c31100128823166c1'))
# Patches for libuv
depends_on('libuv', when='^libuv@1.39.0', patches=patch(
'https://raw.githubusercontent.com/spack/patches/b59ca193423c4c388254f528afabb906b5373162/julia/libuv-1.39.0.patch',
sha256='f7c1e7341e89dc35dfd85435ba35833beaef575b997c3f978c27d0dbf805149b'))
depends_on('libuv', when='^libuv@1.42.0', patches=patch(
'https://raw.githubusercontent.com/spack/patches/89b6d14eb1f3c3d458a06f1e06f7dda3ab67bd38/julia/libuv-1.42.0.patch',
sha256='d9252fbe67ac8f15e15653f0f6b00dffa07ae1a42f013d4329d17d8b492b7cdb'))
# patchelf 0.13 is required because the rpath patch uses --add-rpath
depends_on('patchelf@0.13:', type='build')
depends_on('perl', type='build')
depends_on('libwhich', type='build')
depends_on('blas') # note: for now openblas is fixed...
depends_on('curl tls=mbedtls +nghttp2 +libssh2')
depends_on('dsfmt@2.2.4:') # apparently 2.2.3->2.2.4 breaks API
depends_on('gmp')
depends_on('lapack') # note: for now openblas is fixed...
depends_on('libblastrampoline', when='@1.7.0:')
depends_on('libgit2')
depends_on('libssh2 crypto=mbedtls')
depends_on('mbedtls libs=shared')
depends_on('mpfr')
depends_on('nghttp2')
depends_on('openblas +ilp64 symbol_suffix=64_')
depends_on('openlibm', when='+openlibm')
depends_on('p7zip')
depends_on('pcre2')
depends_on('suite-sparse +pic')
depends_on('unwind')
depends_on('utf8proc')
depends_on('zlib +shared +pic +optimize')
# Patches for julia
patch('julia-1.6-system-libwhich-and-p7zip-symlink.patch', when='@1.6.0:1.6')
patch('use-add-rpath.patch')
# Fix gfortran abi detection https://github.com/JuliaLang/julia/pull/44026
patch('fix-gfortran.patch', when='@1.7.0:1.7.1')
def patch(self):
# The system-libwhich-libblastrampoline.patch causes a rebuild of docs as it
# touches the main Makefile, so we reset the a/m-time to doc/_build's.
f = os.path.join("doc", "_build", "html", "en", "index.html")
if os.path.exists(f):
time = (os.path.getatime(f), os.path.getmtime(f))
os.utime(os.path.join("base", "Makefile"), time)
def setup_build_environment(self, env):
# this is a bit ridiculous, but we are setting runtime linker paths to
# dependencies so that libwhich can locate them.
if (
self.spec.satisfies('platform=linux') or
self.spec.satisfies('platform=cray')
):
linker_var = 'LD_LIBRARY_PATH'
elif self.spec.satisfies('platform=darwin'):
linker_var = 'DYLD_FALLBACK_LIBRARY_PATH'
else:
return
pkgs = [
'curl', 'dsfmt', 'gmp', 'libgit2', 'libssh2', 'libunwind', 'mbedtls',
'mpfr', 'nghttp2', 'openblas', 'openlibm', 'pcre2', 'suite-sparse',
'utf8proc', 'zlib'
]
if self.spec.satisfies('@1.7.0:'):
pkgs.append('libblastrampoline')
for pkg in pkgs:
for dir in self.spec[pkg].libs.directories:
env.prepend_path(linker_var, dir)
def edit(self, spec, prefix):
# TODO: use a search query for blas / lapack?
libblas = os.path.splitext(spec['blas'].libs.basenames[0])[0]
liblapack = os.path.splitext(spec['lapack'].libs.basenames[0])[0]
# Host compiler target name
march = get_best_target(spec.target, spec.compiler.name, spec.compiler.version)
# LLVM compatible name for the JIT
julia_cpu_target = get_best_target(spec.target, 'clang', spec['llvm'].version)
options = [
'prefix:={0}'.format(prefix),
'MARCH:={0}'.format(march),
'JULIA_CPU_TARGET:={0}'.format(julia_cpu_target),
'USE_BINARYBUILDER:=0',
'VERBOSE:=1',
# Spack managed dependencies
'USE_SYSTEM_BLAS:=1',
'USE_SYSTEM_CSL:=1',
'USE_SYSTEM_CURL:=1',
'USE_SYSTEM_DSFMT:=1',
'USE_SYSTEM_GMP:=1',
'USE_SYSTEM_LAPACK:=1',
'USE_SYSTEM_LIBBLASTRAMPOLINE:=1',
'USE_SYSTEM_LIBGIT2:=1',
'USE_SYSTEM_LIBSSH2:=1',
'USE_SYSTEM_LIBSUITESPARSE:=1', # @1.7:
'USE_SYSTEM_SUITESPARSE:=1', # @:1.6
'USE_SYSTEM_LIBUNWIND:=1',
'USE_SYSTEM_LIBUV:=1',
'USE_SYSTEM_LIBWHICH:=1',
'USE_SYSTEM_LLVM:=1',
'USE_SYSTEM_MBEDTLS:=1',
'USE_SYSTEM_MPFR:=1',
'USE_SYSTEM_P7ZIP:=1',
'USE_SYSTEM_PATCHELF:=1',
'USE_SYSTEM_PCRE:=1',
'USE_SYSTEM_UTF8PROC:=1',
'USE_SYSTEM_ZLIB:=1',
# todo: ilp depends on arch
'USE_BLAS64:=1',
'LIBBLASNAME:={0}'.format(libblas),
'LIBLAPACKNAME:={0}'.format(liblapack),
'override LIBUV:={0}'.format(spec['libuv'].libs.libraries[0]),
'override LIBUV_INC:={0}'.format(spec['libuv'].headers.directories[0]),
'override USE_LLVM_SHLIB:=1',
# make rebuilds a bit faster for now, not sure if this should be kept
'JULIA_PRECOMPILE:={0}'.format(
'1' if spec.variants['precompile'].value else '0'),
]
# libm or openlibm?
if spec.variants['openlibm'].value:
options.append('USE_SYSTEM_LIBM=0')
options.append('USE_SYSTEM_OPENLIBM=1')
else:
options.append('USE_SYSTEM_LIBM=1')
options.append('USE_SYSTEM_OPENLIBM=0')
with open('Make.user', 'w') as f:
f.write('\n'.join(options) + '\n')
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/scheduler.py
|
<reponame>Brain-in-Vat/Aletheia
"""to be down
"""
from mesa.time import BaseScheduler
class Scheduler(object):
def __init__(self):
self.agents = {}
def setp(self):
for agent in self.agents.values():
agent.step()
def add(self, agent):
# self.agents.add(agent)
self.agents[agent._id] = agent
class RandomActivation(BaseScheduler):
"""A scheduler which activates each agent once per step, in random order,
with the order reshuffled every step.
This is equivalent to the NetLogo 'ask agents...' and is generally the
default behavior for an ABM.
Assumes that all agents have a step(model) method.
"""
def step(self) -> None:
"""Executes the step of all agents, one at a time, in
random order.
"""
for agent in self.agent_buffer(shuffled=True):
agent.step()
self.steps += 1
self.time += 1
|
Brain-in-Vat/Aletheia
|
aletheia/scenario_search/ga.py
|
<filename>aletheia/scenario_search/ga.py
from operator import itemgetter
from random import randint
# from runlocal import evaluate as run_evaluate
import random
from aletheia.settings import BASE_DIR
import json
import os
import shutil
import math
class Gene:
def __init__(self, **data):
self.__dict__.update(data)
self.size = len(data['data'])
class GA:
def __init__(self, **parameter) -> None:
'''
bound: {(start, end): (up, down)}}
example:
ga = GA(10, {(1,11):(1,2)}, evaluate_func, result_path, NGEN, CXPB, MUTPB)
'''
self.popsize = parameter['popsize']
self.bound = parameter['bound']
tmp = {}
for key, value in self.bound.items():
if isinstance(key, tuple):
for i in range(key[0], key[1]):
tmp[i] = value
elif isinstance(key, int):
tmp[key] = value
self.bound = tmp
self.evaluate = parameter['evaluate']
self.result_path = parameter['result_path']
self.NGEN = parameter['NGEN']
self.CXPB = parameter['CXPB']
self.MUTPB = parameter['MUTPB']
self.init_the_group()
def init_the_group(self):
pop = []
for i in range(self.popsize):
geneinfo = [
randint(self.bound[i][0], self.bound[i][1]) for i in range(len(self.bound))
]
fitness, measure = self.evaluate(geneinfo)
pop.append({'Gene': Gene(data=geneinfo),
'fitness': fitness, 'measure': measure})
self.pop = pop
self.bestindividual = self.selectBest(self.pop)
if os.path.exists(self.result_path):
if os.path.isfile(self.result_path):
os.remove(self.result_path)
elif os.isdir(self.result_path):
shutil.rmtree(self.result_path)
def selectBest(self, pop):
s_inds = sorted(pop, key=itemgetter('fitness'), reverse=True)
return s_inds[0]
def selection(self, individuals, k):
s_inds = sorted(individuals, key=itemgetter('fitness'), reverse=True)
sum_fits = sum(abs(ind['fitness']) for ind in individuals)
chosen = []
for i in range(k):
u = random.random() * sum_fits
sum_ = 0
for ind in s_inds:
sum_ += abs(ind['fitness'])
if sum_ >= u:
chosen.append(ind)
break
chosen = sorted(chosen, key=itemgetter('fitness'), reverse=True)
return chosen
def crossoperate(self, offspring):
dim = len(offspring[0]['Gene'].data)
# Gene's data of first offspring chosen from the selected pop
geninfo1 = offspring[0]['Gene'].data
# Gene's data of second offspring chosen from the selected pop
geninfo2 = offspring[1]['Gene'].data
if dim == 1:
pos1 = 1
pos2 = 1
else:
# select a position in the range from 0 to dim-1,
pos1 = random.randrange(1, dim)
pos2 = random.randrange(1, dim)
newoff1 = Gene(data=[]) # offspring1 produced by cross operation
newoff2 = Gene(data=[]) # offspring2 produced by cross operation
temp1 = []
temp2 = []
for i in range(dim):
if min(pos1, pos2) <= i < max(pos1, pos2):
temp2.append(geninfo2[i])
temp1.append(geninfo1[i])
else:
temp2.append(geninfo1[i])
temp1.append(geninfo2[i])
newoff1.data = temp1
newoff2.data = temp2
return newoff1, newoff2
def mutation(self, crossoff, bound):
dim = len(crossoff.data)
if dim == 1:
pos = 0
else:
pos = random.randrange(0, dim)
crossoff.data[pos] = random.randint(bound[pos][0], bound[pos][1])
return crossoff
def save_gen(self, gen):
with open(self.result_path, 'a', encoding='utf-8') as f:
datas = {
'gen': gen,
# 'pop': [data.]
'pop': [
{'Gene': x['Gene'].data, 'fitness': x['fitness'], 'measure':x['measure']} for x in self.pop
],
'best': {'Gene': self.bestindividual['Gene'].data, 'fitness': self.bestindividual['fitness'], 'measure': self.bestindividual['measure']}
}
datas = json.dumps(datas, ensure_ascii=False)
f.write(datas + "\n")
def GA_main(self):
popsize = self.popsize
print('Start of evolution')
NGEN = self.NGEN
CXPB = self.CXPB
MUTPB = self.MUTPB
for g in range(NGEN):
print('############ Generation {} ############'.format(g))
self.save_gen(g)
selectpop = self.selection(self.pop, popsize)
nextoff = []
while len(nextoff) != popsize:
if len(selectpop) < 2:
print('debug')
offspring = [selectpop.pop() for _ in range(2)]
if random.random() < CXPB:
crossoff1, crossoff2 = self.crossoperate(offspring)
if random.random() < MUTPB: # mutate an individual with probability MUTPB
muteoff1 = self.mutation(crossoff1, self.bound)
muteoff2 = self.mutation(crossoff2, self.bound)
# Evaluate the individuals
fit_muteoff1, measure = self.evaluate(
muteoff1.data)
# Evaluate the individuals
fit_muteoff2, measure = self.evaluate(
muteoff2.data)
nextoff.append(
{'Gene': muteoff1, 'fitness': fit_muteoff1, 'measure': measure})
nextoff.append(
{'Gene': muteoff2, 'fitness': fit_muteoff2, 'measure': measure})
else:
fit_crossoff1, measure = self.evaluate(
crossoff1.data) # Evaluate the individuals
fit_crossoff2, measure = self.evaluate(
crossoff2.data)
nextoff.append(
{'Gene': crossoff1, 'fitness': fit_crossoff1, 'measure': measure})
nextoff.append(
{'Gene': crossoff2, 'fitness': fit_crossoff2, 'measure': measure})
else:
nextoff.extend(offspring)
self.pop = nextoff
fits = [ind['fitness'] for ind in self.pop]
best_ind = self.selectBest(self.pop)
if best_ind['fitness'] > self.bestindividual['fitness']:
self.bestindividual = best_ind
print("Best individual found is {}, {}".format(self.bestindividual['Gene'].data,
self.bestindividual['fitness']))
print(" Max fitness of current pop: {}".format(max(fits)))
if __name__ == '__main__':
CXPB, MUTPB, NGEN, popsize = 0.8, 0.4, 1000, 100 # popsize must be even number
parameter = [CXPB, MUTPB, NGEN, popsize]
run = GA(agent_number=116, popsize=1000)
# run.GA_main()
run.GA_draw(skip=False, sort=True)
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/predict_markets/fpmm.py
|
<reponame>Brain-in-Vat/Aletheia
from . import PredictMarket
import math
from enum import Enum
class TokenType(Enum):
yes_token = 'yes_token'
no_token = 'no_token'
class FPMM(PredictMarket):
def __init__(self, fee=0.02, constant=100) -> None:
super().__init__()
self.proposal_map = {}
self.fee = fee
self.constant = constant
def submit(self, agent, proposal):
self.proposals.append(proposal)
self.proposal_map[proposal._id] = proposal
def buy(self, proposal_id, agent, token_type, amount):
proposal = self.proposal_map[proposal_id]
token_type = TokenType(token_type)
val = 0
amount = (1 - self.fee) * amount
if token_type == TokenType.yes_token:
val = self.calc_price(proposal._id, amount, 0)
proposal.yes_token += val
elif token_type == TokenType.no_token:
val = self.calc_price(proposal._id, 0, amount)
proposal.no_token += val
else:
raise Exception('unknown token type')
return amount
def sell(self, proposal_id, agent, token_type, amount):
proposal = self.proposal_map[proposal_id]
token_type = TokenType(token_type)
val = 0
if token_type == TokenType.yes_token:
if amount > proposal.yes_token:
return 0
val = (1 - self.fee)*self.calc_price(proposal._id, -amount, 0)
proposal.yes_token -= amount
elif token_type == TokenType.no_token:
if amount > proposal.no_token:
return 0
val = (1 - self.fee)*self.calc_price(proposal._id, 0, -amount)
proposal.no_token -= amount
else:
raise Exception('unknown token type')
return -val
def calc_price(self, proposal_id, yes_token, no_token):
"""compute the price of current proposal
"""
proposal = self.proposal_map[proposal_id]
p_w = proposal.yes_token
p_l = proposal.no_token
if proposal.state == 2:
return yes_token
elif proposal.state == 3:
return no_token
val = 0
# alpha = yes_token / p_w
# belta = no_token / p_l
if yes_token:
delta_x = yes_token
x_1 = p_w + delta_x
delta_y = self.constant/x_1 - p_l
val = delta_x + delta_y
elif no_token:
delta_x = no_token
x_1 = p_l + delta_x
delta_y = self.constant/x_1 - p_w
val = delta_y + delta_x
return val
def calc_current_price(self, proposal_id, token_type):
proposal = self.proposal_map[proposal_id]
p_w = proposal.yes_token
p_l = proposal.no_token
# if proposal.state == 2 and token_type == 'yes':
# return 1
# elif proposal.state == 2 and token_type == 'no':
# return 0
# if proposal.state == 3 and token_type == 'yes':
# return 0
# elif proposal.state == 3 and token_type == 'no':
# return 1
# yes_part = math.exp(p_w/b)
# no_part = math.exp(p_l/b)
if token_type == 'yes':
b = 1 - (p_w + p_l)
delta_x = (b + math.sqrt(b*b + 4*p_l))
return 1 - delta_x/2
else:
b = 1 - (p_w + p_l)
delta_x = (b + math.sqrt(b*b + 4*p_w))
return 1 - delta_x/2
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/predict_markets/__init__.py
|
class PredictMarket(object):
def __init__(self) -> None:
super().__init__()
self.proposals = []
def buy(self, agent):
pass
def sell(self, agent):
pass
def observe(self, agent):
pass
|
Brain-in-Vat/Aletheia
|
setup.py
|
<reponame>Brain-in-Vat/Aletheia
import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(name='aletheia',
version='0.1', description='A compute governance tools', author='<NAME>', long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/Brain-in-Vat/Aletheia',
packages=['aletheia'], author_email="<EMAIL>", include_package_data=True, install_requires=['mesa', 'owlready2'],
entry_points={
"console_scripts": [
"aletheia=aletheiacli.__main__:main"
]
}
)
|
Brain-in-Vat/Aletheia
|
aletheia/scenario_search/losspool.py
|
<gh_stars>0
import numpy as np
from aletheia.scenario_search.loss import belief_loss, donate_loss, token_loss
def futarchy_loss(expect, actual, origin, after, truth_index, false_index, alpha=0.05):
return belief_loss(expect, actual) + alpha * token_loss(origin, after, truth_index, false_index)
def qf_loss(expect, actual):
return donate_loss(expect, actual)
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/qf_futarchy/__init__.py
|
'''
predict project in futarchy
next round come out
'''
from aletheia.mechanism_engine.predict_markets.lmsr import LMSRMarket
from aletheia.mechanism_engine.qf import QuadraticFunding
class QFFutarchy:
def __init__(self, pass_ratio=0.8, projects=[0, 1]):
self.projects = projects
self.lmsr = LMSRMarket(self.projects)
self.fee = 0.03
self.fee_pool = 0
self.round = 0
self.clr = QuadraticFunding()
self.grant_history = []
self.history_market = []
self.history_clr = []
self.award_list = []
self.pass_ratio = pass_ratio
self.pool = 0
def finish_round(self):
result = {}
prices = self.lmsr.price_calcs()
prices = sorted(prices, key=lambda x: x[1], reverse=True)
choice = [price[0] for price in prices]
pass_indice = int(self.pass_ratio * len(self.projects))
self.lmsr.set_answer(choice[:pass_indice])
if self.history_market:
last_market = self.history_market[-1]
last_market.set_answer(choice)
result['award_predict_winner'] = self.award_predict_winner()
result['award_vote_winner'] = self.award_vote_winner()
self.history_market.append(self.lmsr)
self.compute_grants_from_market()
self.lmsr = LMSRMarket(self.projects)
# self.clr.grants = {
# key:value for key,value in self.clr.grants.items() if key in choice
# }
self.grant_history.append(self.clr.clr_calcs())
result['cls_grants'] = self.clr.clr_calcs()
self.history_clr.append(self.clr)
self.clr = QuadraticFunding()
return result
def award_predict_winner(self):
if len(self.award_list) == len(self.history_market):
return {}
last_market = self.history_market[-1]
trades = last_market.trades
answer = last_market.answer
trades = {k: v for k, v in trades.items() if k in answer}
users = {}
for k, v in trades.items():
trades = v['trades']
for trade in trades:
if trade['id'] in users:
users[trade['id']] += trade['amount']
else:
users[trade['id']] = trade['amount']
self.award_list.append(1)
return users
def compute_grants_from_market(self):
last_market = self.lmsr
trades = last_market.trades
answer = last_market.answer
trades = {k: v for k, v in trades.items() if k in answer}
users = {}
for k, v in trades.items():
trades = v['trades']
for trade in trades:
if trade['id'] in users:
users[trade['id']] += trade['amount']
else:
users[trade['id']] = trade['amount']
for user_id, amount in users.items():
self.clr.grant(k, user_id, amount)
def award_vote_winner(self):
last_market = self.lmsr
trades = last_market.trades
answer = last_market.answer
pool = last_market.pool
trades = {k: v for k, v in trades.items() if k in answer}
users = {}
for k, v in trades.items():
trades = v['trades']
for trade in trades:
if trade['id'] in users:
users[trade['id']] += trade['amount']
else:
users[trade['id']] = trade['amount']
total = sum(list(users.values()))
users = {
k: pool * (v / total) for k, v in users.items()
}
return users
|
Brain-in-Vat/Aletheia
|
aletheia/algorithm_analyze/futarchyqa.py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from aletheia.settings import BASE_DIR
import json
import os
import statistics
import re
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
result_path = os.path.join(BASE_DIR, 'tmp', 'result2.json')
datas = []
with open(result_path, 'r') as f:
# item = f.readline()
for item in f.readlines():
one_data = json.loads(item)
datas.append(one_data)
total_df_dict = {
'Gen': [],
'Governance Loss': [],
'data_type': []
}
for data in datas:
gen = data['gen']
best = data['best']['fitness']
avg = [x['fitness'] for x in data['pop']]
avg = statistics.mean(avg)
total_df_dict['Gen'].append(gen)
total_df_dict['Governance Loss'].append(avg)
total_df_dict['data_type'].append('avg')
total_df_dict['Gen'].append(gen)
total_df_dict['Governance Loss'].append(best)
total_df_dict['data_type'].append('best')
total_df = pd.DataFrame(data=total_df_dict)
total_fig = px.line(total_df, x='Gen', y='Governance Loss', color='data_type',
title='The Governance Loss Changing Along the Iteration')
best_data = datas[-1]['best']
best_data_loss_metric = best_data['measure']['loss_metric']
links = []
for data in datas:
gen = data['gen']
best = data['best']['fitness']
avg = [x['fitness'] for x in data['pop']]
avg = statistics.mean(avg)
item = {
'name': 'Gen {}'.format(gen),
'href': '/gen_{}'.format(gen),
'best': best,
'avg': avg
}
links.append(item)
new_links = []
for link in links:
new_links.append(html.Br())
new_links.append(dcc.Link(link['name'], href=link['href']))
app.layout = html.Div(
children=[
dcc.Location(id='url', refresh=False),
dbc.Row(dbc.Col(html.H1(children='Algorithm Analyze',
style={'textAlign': 'center'}))),
dbc.Row(dbc.Col(html.Div(children='''
Analyze and visualize the result of Aletheia.
''', style={'textAlign': 'center'}))),
dbc.Row(
[
dbc.Col(
dcc.Graph(
id='example=graph',
figure=total_fig),
),
dbc.Col(
html.Div(
[
dbc.Card(
[
dbc.CardBody(
[
html.H4("The Biggest Loss Situation",
className="card-title"),
html.P(
"Gonernance Loss : {}".format(
best_data['fitness']),
className="card-text",
),
html.P("Grant Loss : {}".format(
best_data_loss_metric['token_loss']),
className='card-text'),
html.P("Token Loss : {}".format(
best_data_loss_metric['grant_loss']),
className='card-text'),
dcc.Link(
"details", "best-button", href="/best")
]
),
],
style={"width": "30rem"},
)
]
),
)
], align='center'),
dbc.Row(dbc.Col(html.Div(id='page-content', children=new_links)))
]
)
index_page = html.Div(new_links)
def get_index_page(page, size=100):
start = (page - 1) * size
end = page * size
tmp_links = links[start:end]
card_links = []
for link in tmp_links:
card_content = [
dbc.CardHeader(link['name']),
dbc.CardBody([
html.H5("Governance Loss", className="card-title"),
html.P(
"Biggest Loss : {}".format(link['best']),
className="card-text",
),
html.P(
"Avg Loss : {}".format(link['avg']),
className='card-text'
),
dcc.Link('details', href=link['href'])
])
]
card_links.append(card_content)
link_number = len(tmp_links)
result = []
for i in range(1, 20):
i_start = (i - 1) * 5
i_end = i * 5
if i_start >= link_number:
break
tmp_row = dbc.Row(
[
dbc.Col(x) for x in card_links[i_start: i_end]
]
)
result.append(tmp_row)
next_page = page + 1
result.append(dbc.Row([
dbc.Col(
[dcc.Link('Next Page', href='/top/page_{}'.format(next_page))], align='Right'
)
]))
page = html.Div(result)
return page
def create_layout(gen, data, page, size=100):
start = (page - 1) * size
end = page * size
pop = data['pop']
pop = pop[start:end]
tmp_links = []
for index, item in enumerate(pop):
tmp_links.append({
'name': 'gen_{}_item_{}'.format(gen, index),
'href': '/gen_{}/item_{}'.format(gen, index),
'loss': item['fitness'],
'grant_loss': item['measure']['loss_metric']['grant_loss'],
'token_loss': item['measure']['loss_metric']['token_loss']
})
card_links = []
for link in tmp_links:
card_content = [
dbc.CardHeader(link['name']),
dbc.CardBody([
html.H5('Governance Loss'),
html.P('Loss : {}'.format(
link['loss']), className='card-text'),
html.P('Grant Loss : {}'.format(
link['grant_loss']), className='card-text'),
html.P('Token Loss : {}'.format(
link['token_loss']), className='card-text'),
dcc.Link('details', href=link['href'])
])
]
card_links.append(card_content)
link_number = len(tmp_links)
result = []
for i in range(1, 20):
i_start = (i - 1) * 5
i_end = i * 5
if i_start >= link_number:
break
tmp_row = dbc.Row(
[
dbc.Col(x) for x in card_links[i_start: i_end]
]
)
result.append(tmp_row)
# tmp_links.append(html.Br())
# tmp_links.append(dcc.Link('gen_{}_item_{}'.format(
# gen, index), href='/gen_{}/item_{}'.format(gen, index)))
next_page = page + 1
result.append(dbc.Row([
dbc.Col(
[dcc.Link('Next Page', href='/gen_{}/page_{}'.format(gen, next_page))], align='Right'
)
]))
return html.Div(result)
def create_detail_fig(data):
gene = data['Gene']
agent_numer = int(len(gene) / 2)
df_dict = {
'belief': [x for x in gene[:agent_numer]],
'tokens': [x for x in gene[agent_numer: agent_numer * 2]],
'id': [x for x in range(agent_numer)]
}
data_df = pd.DataFrame(data=df_dict)
# data_fig = px.scatter(data_df, x='belief', y='tokens')
data_fig = px.scatter(data_df, x='id', y='belief',
size='tokens', size_max=60, title='Distribute of Belief and Tokens')
clr_amount = {}
data_results = data['measure']['results']
for data_result in data_results:
cls_grants = data_result['cls_grants']
for cls_grant in cls_grants:
index = cls_grant['id']
if index in clr_amount:
# clr_amount[cls_grant['id']] += cls_grant['clr_amount']
clr_amount[index]['clr_amount'] += cls_grant['clr_amount']
clr_amount[index]['number_contributions'] += cls_grant['number_contributions']
clr_amount[index]['contribution_amount'] += cls_grant['contribution_amount']
else:
clr_amount[index] = {
'clr_amount': cls_grant['clr_amount'],
'number_contributions': cls_grant['number_contributions'],
'contribution_amount': cls_grant['contribution_amount']
}
clr_length = len(data_results)
# clr_amount = {k: v/clr_length for k, v in clr_amount.items()}
clr_amounts = [
{
'id': k,
'clr_amount': v['clr_amount']/clr_length,
'number_contributions': v['number_contributions'] / clr_length,
'contribution_amount': v['contribution_amount'] / clr_length
} for k, v in clr_amount.items()
]
clr_df_dict = {
'id': [],
'clr_amount': [],
'number_contributions': [],
'contribution_amount': []
}
for clr_amount in clr_amounts:
clr_df_dict['id'].append(clr_amount['id'])
clr_df_dict['clr_amount'].append(clr_amount['clr_amount'])
clr_df_dict['number_contributions'].append(
clr_amount['number_contributions'])
clr_df_dict['contribution_amount'].append(
clr_amount['contribution_amount'])
clr_df = pd.DataFrame(data=clr_df_dict)
# clr_fig = px.bar(clr_df, x='id', y='clr_amount')
clr_fig = px.bar(clr_df, x='id', y=[
'clr_amount', 'contribution_amount'], title='Distribution of Grant Amount and Contribution Amount')
ctr_number_dict = {
'id': [],
'number_contributions': []
}
for clr_amount in clr_amounts:
ctr_number_dict['id'].append(clr_amount['id'])
ctr_number_dict['number_contributions'].append(
clr_amount['number_contributions'])
ctr_number_df = pd.DataFrame(data=ctr_number_dict)
ctr_number_fig = px.bar(ctr_number_df, x='id', y='number_contributions',
title='Distribution of Contribution Number')
token_changed = data['measure']['token_changed']
token_changed_data = {
'id': [],
'value': []
}
for index, value in enumerate(token_changed):
# token_changed_data[index] = value
token_changed_data['id'].append(index)
token_changed_data['value'].append(value)
token_changed_df = pd.DataFrame(data=token_changed_data)
token_changed_fig = px.scatter(
token_changed_df, x='id', y='value', title='Dsitribution of Token Benefit')
page_layout = html.Div([
# html.H1('Agent Distribute'),
dbc.Row(dbc.Col(html.H1('Agent Distribute'))),
dbc.Row(
[
dbc.Col(
dcc.Graph(
id='agents',
figure=data_fig,
style={'width': '100%'}
)
),
dbc.Col(
dcc.Graph(
id='qf_amount',
figure=clr_fig,
style={'width': '100%'}
)
),
]
),
dbc.Row(
[
dbc.Col(
dcc.Graph(
id='token_changed',
figure=token_changed_fig,
style={'width': '100%'}
)
),
dbc.Col(
dcc.Graph(
id='contribute_number',
figure=ctr_number_fig,
style={'width': '100%'}
)
)
])
], style={'display': 'inline-block', 'width': '100%'})
return page_layout
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
# if pathname in []:
# if re.match(pathname, 'gen_\d')
if re.match('^/gen_\d+$', pathname):
gen_id = re.findall('\d+', pathname)
gen_id = int(gen_id[0])
data = datas[gen_id]
return create_layout(gen_id, data, 1, 100)
elif re.match('^/gen_\d+/item_\d+$', pathname):
gen_ids = re.findall('\d+', pathname)
gen_id = int(gen_ids[0])
data_id = int(gen_ids[1])
data = datas[gen_id]
data = data['pop'][data_id]
return create_detail_fig(data)
elif pathname == '/best':
return create_detail_fig(best_data)
elif re.match('^/gen_\d+/page_\d+', pathname):
gen_ids = re.findall('\d+', pathname)
gen_id = int(gen_ids[0])
page = int(gen_ids[1])
data = datas[gen_id]
return create_layout(gen_id, data, page, 100)
elif re.match('^/top/page_\d+', pathname):
gen_id = re.findall('\d+', pathname)
page = int(gen_id[0])
return get_index_page(page)
else:
return get_index_page(1)
if __name__ == '__main__':
app.run_server(debug=False)
|
Brain-in-Vat/Aletheia
|
aletheia/examples/simpleqf.py
|
<reponame>Brain-in-Vat/Aletheia
from mesa import Model
from aletheia.mechanism_engine.scheduler import RandomActivation
from mesa.datacollection import DataCollector
from mesa.space import Grid
from aletheia.mechanism_engine.predict_markets.lmsr import LMSRMarket
from aletheia.mechanism_engine.qf import QuadraticFunding
# from .agents.randomagent import RandomAgent
from aletheia.examples.agents.randomagent import FixZLAgent, RandomAgent
class QFModel(Model):
def __init__(self, project_num = 10, beliefs=[], tokens=[], pass_ratio=0.9):
self.qf = QuadraticFunding()
self.schedule = RandomActivation(self)
agent_ids = range(len(beliefs))
# todo add agents
for agent_id, belief, token in zip(agent_ids, beliefs, tokens):
agent = FixZLAgent(agent_id, self, token, belief)
self.schedule.add(agent)
# for i in range(10):
# agent = RandomAgent(i, self, 10)
# self.schedule.add(agent)
self.projects = range(0, project_num)
self.dead_line = 13
self.count = 0
self.running = True
def step(self):
self.schedule.step()
if self.count >= self.dead_line:
self.running = False
self.count += 1
self.show()
def show(self):
result = self.qf.clr_calcs()
print(result)
def evaluate(code, agent_number=10, project_number=10):
'''
code = [1, 2, 10..]
'''
origin_beliefs = code[0, agent_number]
beliefs = origin_beliefs
tmp = []
for belief in beliefs:
tmp_belief = {i:0 for i in range(project_number)}
tmp_belief[belief] = 1
tmp.append(tmp_belief)
tokens = code[agent_number : agent_number * 2]
qfmodel = QFModel()
while qfmodel.running:
qfmodel.step()
if __name__ == '__main__':
qfmodel = QFModel()
while qfmodel.running:
qfmodel.step()
# qfmodel.show()
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/futarchy/__init__.py
|
<reponame>Brain-in-Vat/Aletheia
from enum import Enum
class BaseSystem(object):
def __init__(self, states, governance):
self.states = states
self.governance = governance
def update(self):
pass
class FutarchySystem(BaseSystem):
"""the origin futarchy system
"""
def __init__(self, forum=None):
self.failed_proposals = []
self.activate_proposals = []
self.passed_proposals = []
self.forum = forum
def update(self):
"""
update the system time to t + 1
"""
pass
def observe(self, agent_id):
"""
observe from the agent
"""
pass
def propose(self, proposal, agent):
pass
class SimpleFutarchySystem(FutarchySystem):
def __init__(self, forum=None, predict_market=None, duration=14):
super().__init__(forum)
self.predict_market = predict_market
self.orders = []
self.time_stick = 0
self.day = 0
self.duration = duration
# self.vote_actions = {
# 'yes': [],
# 'no': []
# }
self.vote_actions = {}
self.voted_agents = set()
def update(self):
self.time_stick += 1
def propose(self, proposal):
proposal.time = self.day
self.activate_proposals.append(proposal)
self.predict_market.submit(-1, proposal)
self.vote_actions[proposal._id] = {
'yes': [],
'no': []
}
def vote(self, agent, proposal_id, vote_type):
if agent.unique_id in self.voted_agents or agent.token < 1:
return
vote_action = self.vote_actions[proposal_id]
if vote_type == 'yes':
vote_action['yes'].append((agent.unique_id, agent.token))
elif vote_type == 'no':
vote_action['no'].append((agent.unique_id, agent.token))
self.voted_agents.add(agent.unique_id)
def step(self):
# self.time_stick += 1
# self.day = int(self.time_stick/24)
self.day += 1
remove_list = []
for proposal in self.activate_proposals:
if self.day - proposal.time >= self.duration:
# self.activate_proposals.remove(proposal)
# remove_list.append(proposal)
vote_action = self.vote_actions[proposal._id]
yes_amount = sum(x[1] for x in vote_action['yes'])
no_amount = sum(x[1] for x in vote_action['no'])
if yes_amount > no_amount:
proposal.state = 2
self.passed_proposals.append(proposal)
else:
proposal.state = 3
self.failed_proposals.append(proposal)
for proposal in remove_list:
self.activate_proposals.remove(proposal)
def observe(self, agent):
return self.activate_proposals
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/qf/__init__.py
|
<filename>aletheia/mechanism_engine/qf/__init__.py
from aletheia.mechanism_engine.qf.clr import run_clr_calcs
class QuadraticFunding(object):
def __init__(self, threshold=25, total_pot=5000):
self.grants = {
}
self.threshold = threshold
self.total_pot = total_pot
def grant(self, project_id, user_id, contribution_amount):
tmp = self.grants.get(project_id)
if not tmp:
self.grants[project_id] = {
'id': project_id,
'contributions': []
}
tmp = self.grants[project_id]
tmp['contributions'].append({user_id: contribution_amount})
def clr_calcs(self):
grants = list(self.grants.values())
return run_clr_calcs(grants, self.threshold, self.total_pot)
|
Brain-in-Vat/Aletheia
|
aletheia/resources/zlagent.py
|
'''
This is the python code part of zero intelligence agent.
pre require:
owlready2
mesa
'''
from mesa.agent import Agent
from owlready2 import set_datatype_iri, World, sync_reasoner_pellet
agent_base = 'zlagent.owl'
set_datatype_iri(float, 'http://www.w3.org/2001/XMLSchema#float')
class ZLAgent(Agent):
def __init__(self, kb_path, unique_id, model):
super().__init__(unique_id, model)
self.kb_path = kb_path
if not self.kb_path:
self.kb_path = agent_base
self.world = World()
self.onto = self.world.get_ontology(self.kb_path)
self.onto.load()
self.unique_id = unique_id
self.model = model
def think(self):
"""reason in kb using predefined rulers
reason process dont introduce any new individuals
"""
try:
with self.onto:
sync_reasoner_pellet(self.world, infer_property_values=True,
infer_data_property_values=True, debug=2)
except Exception as e:
print(e)
self._replace_ready_property()
def _replace_ready_property(self):
# update goal
individuals = self.onto.individuals()
for individual in individuals:
property_names = [
property._name for property in individual.get_properties()]
update_paris = []
for property_name in property_names:
if property_name.endswith('Ready'):
update_paris.append((property_name, property_name[:-5]))
if update_paris:
for property_ready, property in update_paris:
ready_value = eval('individual.' + property_ready)
now_value = eval('individual.' + property)
if not ready_value:
continue
if isinstance(ready_value, list):
individual.__dict__[property_ready] = []
if isinstance(now_value, list):
individual.__dict__[property] = ready_value
else:
individual.__dict__[property] = ready_value[0]
else:
individual.__dict__[property_ready] = None
if isinstance(now_value, list):
individual.__dict__[property] = [ready_value]
else:
individual.__dict__[property] = ready_value
def observe(self):
""" here update the knowlege of agent
the zlagent focus on the trade price when they make descisions
"""
self.onto.noToken.currentPrice = 0.5
self.onto.yesToken.currentPrice = 0.5
self.onto.GNO.myBanlance = 10
self.onto.mySelf.myWealth = 10
def execute(self):
"""here u write the execute logical
"""
print('this agent is executing')
pass
def step(self):
self.observe()
self.think()
self.execute()
def print_beleif(self):
"""
"""
pass
def add_belief(self):
pass
if __name__ == '__main__':
agent = ZLAgent(None, None, None)
agent.step()
|
Brain-in-Vat/Aletheia
|
aletheia/examples/futarchyqf.py
|
<filename>aletheia/examples/futarchyqf.py<gh_stars>0
from aletheia.scenario_search.loss import belief_loss, token_loss
from mesa import Model
from aletheia.mechanism_engine.scheduler import RandomActivation
from mesa.datacollection import DataCollector
from mesa.space import Grid
from aletheia.mechanism_engine.predict_markets.lmsr import LMSRMarket
from aletheia.mechanism_engine.qf import QuadraticFunding
from aletheia.mechanism_engine.qf_futarchy import QFFutarchy
from aletheia.examples.agents.randomagent import RandomAgent, FixZLAgent
from aletheia.scenario_search.ga import GA
from aletheia.scenario_search.losspool import qf_loss, futarchy_loss
from aletheia.settings import BASE_DIR
import os
class QFFutarchyModel(Model):
def __init__(self, projects_num=10, beliefs=[], tokens=[], pass_ratio=0.8):
# self.qf = QuadraticFunding()
self.schedule = RandomActivation(self)
# todo add agents
agent_ids = range(len(beliefs))
for agent_id, belief, token in zip(agent_ids, beliefs, tokens):
# agent = RandomAgent(i, self, 10)
agent = FixZLAgent(agent_id, self, token, belief)
self.schedule.add(agent)
self.projects = range(0, projects_num)
self.dead_line = 13
self.count = 0
self.running = True
self.qf_futarchy = QFFutarchy(
pass_ratio=pass_ratio, projects=self.projects)
self.results = []
def step(self):
for i in range(14):
self.schedule.step()
result = self.qf_futarchy.finish_round()
self.results.append(result)
award_vote_winner = result.get('award_vote_winner')
if award_vote_winner:
for k, v in award_vote_winner.items():
self.schedule.agents[k].award_token(v)
award_predict_winner = result.get('award_predict_winner')
if award_predict_winner:
for k, v in award_predict_winner.items():
self.schedule.agents[k].award_brain_token(v)
if self.count >= self.dead_line:
self.running = False
self.count += 1
def evaluate(code, agent_number=10, project_number=5):
'''
code = [1, 2, 10..]
'''
# project_number = max(code[:agent_number])
origin_beliefs = code[0: agent_number]
beliefs = origin_beliefs
tmp = []
for belief in beliefs:
tmp_belief = {i: 0 for i in range(project_number)}
tmp_belief[belief] = 1
tmp.append(tmp_belief)
tokens = code[agent_number: agent_number * 2]
qffutarchyModel = QFFutarchyModel(project_number, tmp, tokens)
while qffutarchyModel.running:
qffutarchyModel.step()
# compute loss
expect = []
tmp = {}
for belief in beliefs:
if belief not in tmp.keys():
tmp[belief] = 1
else:
tmp[belief] += 1
beliefs = [(k, v) for k, v in tmp.items()]
beliefs = sorted(beliefs, key=lambda x: x[1], reverse=True)
beliefs = beliefs[: int((project_number + 1) * 0.9)]
expect = [belief[0] for belief in beliefs]
results = qffutarchyModel.results
# print(results)
avg_qf_loss = 0
for result in results:
qf_result = result['cls_grants']
qf_result = [x['id'] for x in qf_result]
qf_loss = belief_loss(expect, qf_result)
avg_qf_loss += qf_loss
avg_qf_loss = avg_qf_loss/len(results)
truth_index = []
false_index = []
for index, belief in enumerate(origin_beliefs):
if belief in expect:
truth_index.append(index)
else:
false_index.append(index)
agents = qffutarchyModel.schedule.agents
origin = tokens
after = [
a.token + a.brain_token for a in agents
]
token_changed = [t2 - t1 for t1, t2 in zip(origin, after)]
value2 = token_loss(origin, after, truth_index, false_index)
loss_metric = {
'grant_loss': avg_qf_loss,
'token_loss': value2
}
return avg_qf_loss + 0.05 * value2, {'results': results, 'token_changed': token_changed, 'loss_metric': loss_metric}
def compute_experiment(result_path='result.json'):
agent_number = 10
project_number = 5
run = GA(
popsize=100,
bound={(0, agent_number): (0, project_number - 1),
(agent_number, 2*agent_number): (0, 200)},
evaluate=evaluate,
result_path=os.path.join(BASE_DIR, 'tmp', result_path),
NGEN=100,
CXPB=0.8,
MUTPB=0.4
)
run.GA_main()
if __name__ == '__main__':
compute_experiment('result2.json')
|
Brain-in-Vat/Aletheia
|
aletheia/settings.py
|
<filename>aletheia/settings.py
import os
BASE_DIR = os.path.dirname(__file__)
# DOWNLOAD_URL = 'https://github.com/Brain-in-Vat/Aletheia/blob/main/resources'
# DOWNLOAD_URL = 'https://raw.githubusercontent.com/Brain-in-Vat/Aletheia/blob/main/resources'
# DOWNLOAD_URL = 'https://raw.githubusercontent.com/Brain-in-Vat/Aletheia/main/resources'
DOWNLOAD_URL = 'https://raw.githubusercontent.com/Brain-in-Vat/Aletheia/main/aletheia/resources'
# RULE_PATH = os.path.join(BASE_DIR, 'voting', 'resources', 'rules.json')
# KNOWLEGE_PATH = os.path.join(BASE_DIR, 'voting', 'resources', 'AgentKnowlege.turtle')
# GLOBAL_KNOWLEGE_PATH = os.path.join(BASE_DIR, 'voting', 'resources', 'GolbalKnowlege.owl')
# INDIVIDUAL_KNOWLEGE_PATH = os.path.join(BASE_DIR, 'voting', 'resources', 'IndividualKnowlege')
# AGENT_LIST_PATH = os.path.join(BASE_DIR, 'voting', 'resources', 'agents.json')
# GLOBAL_KNOWLEGE_PATH = os.path.join(BASE_DIR, 'voting', 'resources', 'GolbalKnowlege.nt')
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/futarchy/proposal.py
|
from enum import Enum
import math
class State(Enum):
Init = 1
Pass = 2
Failed = 3
pass
class Proposal(object):
def __init__(self, _meta, _id, pos):
self._meta = _meta
self.state = 1
self._id = _id
self.time = 0
self.passed = -1
self.pos = pos
def update(self, state):
self.state = state
class FutarchyProposal(Proposal):
def __init__(self, _meta, _id, pos, b_number):
super().__init__(_meta, _id, pos)
self.historys = {'1': {'yes_token': 0, 'no_token': 0}}
self.prices = {
'Yes': 1,
'No': 1
}
self.yes_token = 0
self.no_token = 0
self.b_number = b_number
class FPMMProposal(Proposal):
def __init__(self, _meta, _id, pos, constant):
super().__init__(_meta, _id, pos)
self.historys = {'1': {'yes_token': 0, 'no_token': 0}}
self.prices = {
'Yes': 1,
'No': 1
}
self.yes_token = math.sqrt(constant)
self.no_token = math.sqrt(constant)
|
Brain-in-Vat/Aletheia
|
aletheia/scenario_search/loss.py
|
<filename>aletheia/scenario_search/loss.py
import numpy as np
def belief_loss(expect, actual):
expect = set(expect)
actual = set(actual)
inter_set = expect & actual
or_set = expect | actual
return float(len(inter_set)/len(or_set))
def donate_loss(expect, actual):
dist = np.sqrt(np.sum(np.square(expect - actual)))
return dist
def token_loss(origin, after, truth_index, false_index):
truth_wealth = [[origin[index], after[index]] for index in truth_index]
false_wealth = [[origin[index], after[index]] for index in false_index]
if len(truth_wealth) == 0:
value1 = 0
else:
value1 = sum([x[0] - x[1] for x in truth_wealth])/len(truth_wealth)
if len(false_wealth) == 0:
value2 = 0
else:
value2 = sum([x[0] - x[1] for x in false_wealth])/len(false_wealth)
return value1 - value2
|
Brain-in-Vat/Aletheia
|
aletheia/agents/ontoagent.py
|
from owlready2 import get_ontology, default_world, sync_reasoner_pellet, World, set_datatype_iri
import os
from aletheia.settings import BASE_DIR
from mesa import Agent, Model
import random
agent_base = os.path.join(BASE_DIR, 'resource', 'zlagent.owl')
set_datatype_iri(float, "http://www.w3.org/2001/XMLSchema#float")
class BaseAgent(Agent):
def __init__(self, kb_path, unique_id: int, model: Model):
super().__init__(unique_id, model)
self.kb_path = kb_path
if not self.kb_path:
self.kb_path = agent_base
self.world = World()
self.onto = self.world.get_ontology(self.kb_path)
self.onto.load()
class OntoAgent(Agent):
def __init__(self, kb_path, unique_id: int, model: Model, market, system, pos, wealth=10, token=10, strategy=1) -> None:
super().__init__(unique_id, model)
self.kb_path = kb_path
if not self.kb_path:
self.kb_path = agent_base
self.world = World()
self.onto = self.world.get_ontology(self.kb_path)
self.onto.load()
self.unique_id = unique_id
self.model = model
self.market = market
self.system = system
self.pos = pos
self.wealth = wealth
self.token = token
self.strategy = strategy
self.yes_token = 0
self.no_token = 0
self.voted = False
self.init_myself()
def set_proposal(self, proposal_id, expect):
"""init the proposal knowlege in one proposal simulation
"""
self.onto.proposal1.proposalId = proposal_id
self.onto.proposal1.myExpectYes = float(expect)
self.onto.proposal1.myExpectNo = float(1 - expect)
self.onto.mySelf.currentProposal = self.onto.proposal1
def init_myself(self):
self.onto.GNO.myBanlance = float(self.token)
self.onto.mySelf.wealth = float(self.wealth)
self.onto.spatialAddress.hasX = float(self.pos[0])
self.onto.spatialAddress.hasY = float(self.pos[1])
if self.strategy == 0:
self.onto.mySelf.myStrategy = self.onto.onlyBuy
else:
self.onto.mySelf.myStrategy = self.onto.buyAndSell
self.onto.marketSystem.hasFee = float(self.market.fee)
def think(self):
"""reason in kb using predefined rulers
reason process dont introduce any new individuals
"""
try:
with self.onto:
sync_reasoner_pellet(self.world, infer_property_values=True,
infer_data_property_values=True, debug=2)
except Exception as e:
print(e)
self._replace_ready_property()
def _replace_ready_property(self):
# update goal
individuals = self.onto.individuals()
for individual in individuals:
property_names = [
property._name for property in individual.get_properties()]
update_paris = []
for property_name in property_names:
if property_name.endswith('Ready'):
update_paris.append((property_name, property_name[:-5]))
if update_paris:
for property_ready, property in update_paris:
ready_value = eval('individual.' + property_ready)
now_value = eval('individual.' + property)
if not ready_value:
continue
if isinstance(ready_value, list):
individual.__dict__[property_ready] = []
if isinstance(now_value, list):
individual.__dict__[property] = ready_value
else:
individual.__dict__[property] = ready_value[0]
else:
individual.__dict__[property_ready] = None
if isinstance(now_value, list):
individual.__dict__[property] = [ready_value]
else:
individual.__dict__[property] = ready_value
def observe(self):
proposals = self.system.observe(self)
if proposals:
target = proposals[0]
else:
target = None
if not target:
self.onto.mySelf.currentProposal = None
return
yes_prices = self.market.calc_price(target._id, 1, 0)
no_prices = self.market.calc_price(target._id, 0, 1)
self.onto.noToken.currentPrice = float(no_prices)
self.onto.yesToken.currentPrice = float(yes_prices)
self.onto.GNO.myBanlance = float(self.token)
self.onto.mySelf.myWealth = float(self.wealth)
def execute(self):
def buy_yes_token(_id, amount):
pay = self.market.calc_price(_id, amount, 0)
if pay <= self.token:
val = self.market.buy(_id, self.unique_id, 'yes_token', amount)
self.yes_token += amount
self.token -= val
def buy_no_token(_id, amount):
price = self.market.calc_price(_id, 0, amount)
if price <= self.token:
val = self.market.buy(_id, self.unique_id, 'no_token', amount)
self.no_token += amount
self.token -= val
def sell_yes_token(_id, amount):
if self.yes_token >= amount:
val = self.market.sell(
_id, self.unique_id, 'yes_token', amount)
self.yes_token -= amount
self.token -= val
def sell_no_token(_id, amount):
if self.no_token >= amount:
val = self.market.sell(_id, self.unique_id, 'no_token', amount)
self.no_token -= amount
self.token -= val
def vote_yes(_id, amount=None):
self.system.vote(self, _id, 'yes')
self.voted = True
def vote_no(_id, amount=None):
self.system.vote(self, _id, 'no')
self.voted = True
plan = self.onto.myPlan
actions = plan.hasAction
while actions:
action = actions.pop(0)
token = action.targetToken
if action._name == 'buyYesToken':
proposal = token.yesTokenOf
_id = proposal.proposalId
amount = action.buyAmount
_func = buy_yes_token
elif action._name == 'sellYesToken':
proposal = token.yesTokenOf
_id = proposal.proposalId
amount = action.sellAmount
_func = sell_yes_token
elif action._name == 'buyNoToken':
proposal = token.noTokenOf
_id = proposal.proposalId
amount = action.buyAmount
_func = buy_no_token
elif action._name == 'sellNoToken':
proposal = token.noTokenOf
_id = proposal.proposalId
amount = action.sellAmount
_func = sell_no_token
elif action.name == 'voteYes':
proposal = action.targetProposal
_id = proposal.proposalId
amount = 1
_func = vote_yes
elif action.name == 'voteNo':
proposal = action.targetProposal
_id = proposal.proposalId
amount = 1
_func = vote_no
else:
continue
_func(_id, amount)
self.wealth = self.token + self.no_token * \
self.market.calc_current_price(_id, 'no') + self.yes_token * \
self.market.calc_current_price(_id, 'yes')
def step(self):
print("agent {} started to thinking".format(self.unique_id))
self.observe()
self.think()
self.execute()
if __name__ == '__main__':
agent = OntoAgent(agent_base, 0, None, None, None, [0, 0])
agent.step()
# agent2.step()
|
Brain-in-Vat/Aletheia
|
aletheia/examples/simplefutarchy.py
|
from mesa import Model
from aletheia.mechanism_engine.scheduler import RandomActivation
from mesa.datacollection import DataCollector
from mesa.space import Grid
from aletheia.mechanism_engine.predict_markets.lmsr import LMSR
from aletheia.mechanism_engine.predict_markets.fpmm import FPMM
from aletheia.mechanism_engine.futarchy.proposal import Proposal, FutarchyProposal, FPMMProposal
from aletheia.mechanism_engine.futarchy import SimpleFutarchySystem
from aletheia.agents.simpleagent import FutarchyBuyAgent, FutarchyBuyAndSellAgent, FutarchyRandomAgent
import random
class Futarchy(Model):
"""
"""
def __init__(self, b_number, agent_number, kind1, kind2, kind3, times_per_day, fee=0.2, belief_rate=0.6, amm='lmsr') -> None:
"""
"""
kinds = [kind1, kind2, kind3]
# proposal = FutarchyProposal(None, 0, [], b_number)
self.schedule = RandomActivation(self)
if amm == 'lmsr':
self.market = LMSR(fee=fee)
proposal = FutarchyProposal(None, 0, [], b_number)
else:
self.market = FPMM(fee=fee, constant=100)
proposal = FPMMProposal(None, 0, [], constant=100)
self.current_proposal = proposal
self.system = SimpleFutarchySystem(
forum=None, predict_market=self.market)
self.system.propose(proposal)
self.grid = Grid(10, 10, torus=False)
self.count = 0
self.agent_numer = sum(kinds)
self.times_per_day = times_per_day
for i in range(0, 10):
for j in range(0, 10):
if j + i*10 < kinds[0]:
# agent = FutarchyRandomAgent(j+i*10, self, self.market, self.system, (i, j))
# agent = OntoAgent(None, j+i*10, self,
# self.market, self.system, (i, j), strategy=0)
if j+i*10 < kinds[0]*belief_rate:
agent = FutarchyBuyAgent(
j+i*10, self, self.market, self.system, (i, j), 1, token=2000)
else:
agent = FutarchyBuyAgent(
j+i*10, self, self.market, self.system, (i, j), 0.0, token=2000)
self.grid.place_agent(agent, (i, j))
self.schedule.add(agent)
# agent.set_proposal(proposal._id, 1.0)
elif j + i*10 < kinds[0] + kinds[1]:
# agent = OntoAgent(None, j+i*10, self,
# self.market, self.system, (i, j), strategy=1)
if j + i*10 < kinds[0] + kinds[1]/3 + 1:
agent = FutarchyBuyAndSellAgent(
j+i*10, self, self.market, self.system, (i, j), 0.5, token=2000)
else:
agent = FutarchyBuyAndSellAgent(
j+i*10, self, self.market, self.system, (i, j), 0.5, token=2000)
self.grid.place_agent(agent, (i, j))
self.schedule.add(agent)
# agent.set_proposal(proposal._id, 0.0)
elif j + i*10 < kinds[0] + kinds[1] + kinds[2]:
agent = FutarchyRandomAgent(
j+i*10, self, self.market, self.system, (i, j), token=2000)
self.grid.place_agent(agent, (i, j))
self.schedule.add(agent)
else:
continue
self.running = True
self.datacollector = DataCollector(
{
"yes_token_price": lambda m: self.count_type(m, m.market, 'yes'),
"no_token_price": lambda m: self.count_type(m, m.market, 'no')
}
)
self.datacollector.collect(self)
self.countcollector = DataCollector(
{
"vote_yes": lambda m: self.count_token(m, 'yes'),
"vote_no": lambda m: self.count_token(m, 'no')
}
)
self.countcollector.collect(self)
def step(self):
# self.schedule.agents
# index = self.count % self.agent_numer
for i in range(self.times_per_day):
index = random.randint(0, self.agent_numer - 1)
current_agent = self.schedule.agents[index]
current_agent.step()
# self.schedule.step()
self.datacollector.collect(self)
self.countcollector.collect(self)
# if self.count % (12*self.agent_numer) == 0 and self.count > 0:
self.system.step()
if self.count >= 13:
self.running = False
self.count += 1
@staticmethod
def count_type(model, market, voter_condition):
"""
"""
if not model.system.activate_proposals:
return 0
proposal = model.system.activate_proposals[0]
if voter_condition == 'yes':
# val = market.calc_price(proposal._id, 1, 0)
# val = proposal.yes_token
val = market.calc_current_price(proposal._id, 'yes')
else:
# val = market.calc_price(proposal._id, 0, 1)
val = market.calc_current_price(proposal._id, 'no')
# val = proposal.no_token
return val
@staticmethod
def count_token(model, voter_condition):
"""
"""
# system = model.system
# proposal_idk= model.current_proposal._id
# if voter_condition == 'yes':
# return len(system.vote_actions[proposal_id]['yes'])
# else:
# return len(system.vote_actions[proposal_id]['no'])
count = 0
if voter_condition == 'yes':
for voter in model.schedule.agents:
count += voter.yes_token
else:
for voter in model.schedule.agents:
count += voter.no_token
return count
|
Brain-in-Vat/Aletheia
|
aletheia/aletheiacli.py
|
<filename>aletheia/aletheiacli.py
import argparse
from aletheia.agents import create_agent
def process(args):
if args.subparser == 'agent':
if args.create:
create_agent(args.agent_name, args.target_path)
pass
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser')
parser_agent = subparsers.add_parser('agent')
parser_agent.add_argument(
'-c', '--create', dest='create', default=False, help='create an agent', action='store_true'
)
parser_agent.add_argument(
'-n', '--name', dest='agent_name', default='zlagent', help='the name of agent to create'
)
parser_agent.add_argument(
'-p', '--path', dest='target_path', default='', help='the target path of created agent'
)
args = parser.parse_args()
process(args)
|
Brain-in-Vat/Aletheia
|
aletheia/agents/__init__.py
|
import os
import sys
from aletheia.settings import DOWNLOAD_URL
import hashlib
__all__ = ['download', 'ZLAGENT']
def _python2_env():
"""function to check python version for compatibility handling"""
if sys.version_info[0] < 3:
return True
else:
return False
def _python3_env():
"""function to check python version for compatibility handling"""
return not _python2_env()
def download(download_url=None, filename_to_save=None):
if download_url is None or download_url == '':
print('[Agent][ERROR] - download URL missing for download()')
return False
if filename_to_save is None or filename_to_save == '':
download_url_tokens = download_url.split('/')
# if not given, use last part of url as filename to save
if filename_to_save is None or filename_to_save == '':
download_url_tokens = download_url.split('/')
filename_to_save = download_url_tokens[-1]
# delete existing file if exist to ensure freshness
if os.path.isfile(filename_to_save):
os.remove(filename_to_save)
# handle case where url is invalid or has no content
try:
if _python2_env():
import urllib
urllib.urlretrieve(download_url, filename_to_save)
else:
import urllib.request
urllib.request.urlretrieve(download_url, filename_to_save)
except Exception as e:
print('[Agent][ERROR] - failed downloading from ' + download_url + '...')
print(str(e))
return False
# take the existence of downloaded file as success
if os.path.isfile(filename_to_save):
return True
else:
print('[Agent][ERROR] - failed downloading to ' + filename_to_save)
return False
ZLAGENT = 'zlagent'
def create_agent(agent_name, path):
download_url = DOWNLOAD_URL + '/' + agent_name + '.owl'
download(download_url, path)
download_url = DOWNLOAD_URL + '/' + agent_name + '.py'
download(download_url, path)
def gen_md5(path):
md5_hash = hashlib.md5()
a_file = open(path, 'rb')
content = a_file.read()
md5_hash.update(content)
digest = md5_hash.hexdigest()
return digest
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/predict_markets/lmsr.py
|
<gh_stars>0
from . import PredictMarket
import math
from enum import Enum
class TokenType(Enum):
yes_token = 'yes_token'
no_token = '<PASSWORD>token'
class LMSRMarket(PredictMarket):
def __init__(self, choices, b=300):
self.choices = choices
self.amounts = {choice: 0 for choice in choices}
self.finished = False
self.answer = []
self.trades = {
choice: {'id': choice, 'trades': []} for choice in choices
}
self.fee = 0.03
self.b = b
self.pool = 0
def set_answer(self, choices):
self.answer = choices
self.finished = True
def buy(self, choice, amount, user_id):
if choice not in self.choices:
raise Exception('unknown choice')
val = self.calc_price(choice, amount)
self.amounts[choice] += amount
fee = self.fee * amount
self.pool += fee
self.trades[choice]['trades'].append(
{'id': user_id, 'amount': amount, 'fee': fee}
)
return val
def sell(self, choice, amount, user_id):
if choice not in self.choices:
raise Exception('unknown choice')
val = self.calc_price(choice, -amount)
self.amounts[choice] -= amount
fee = self.fee * amount
self.pool += fee
self.trades[choice]['trades'].append(
{'id': user_id, 'amount': -amount, 'fee': fee}
)
return val
def calc_price(self, choice, amount):
if self.finished and choice in self.answer:
return amount
elif self.finished:
return 0
tmp_amounts = {key: value for key,value in self.amounts.items()}
tmp_amounts[choice] += amount
c_n = self.b *math.log(
sum([math.exp(x/self.b) for x in tmp_amounts.keys()])
)
c_p = self.b * math.log(
sum([math.exp(x/self.b) for x in self.amounts.keys()])
)
val = c_n - c_p
val = round(val, 2)
return val
def calc_current_price(self, choice):
c_p = sum([math.exp(x/self.b) for x in self.amounts.values()])
return math.exp(self.amounts[choice]/self.b) / c_p
def price_calcs(self):
prices = [
(choice, self.calc_current_price(choice)) for choice in self.choices
]
return prices
class LMSR(PredictMarket):
def __init__(self, fee=0.02) -> None:
super().__init__()
self.proposal_map = {}
self.fee = fee
def submit(self, agent, proposal):
self.proposals.append(proposal)
self.proposal_map[proposal._id] = proposal
def buy(self, proposal_id, agent, token_type, amount):
proposal = self.proposal_map[proposal_id]
token_type = TokenType(token_type)
val = 0
if token_type == TokenType.yes_token:
val = (1 + self.fee)*self.calc_price(proposal._id, amount, 0)
proposal.yes_token += amount
elif token_type == TokenType.no_token:
val = (1 + self.fee)*self.calc_price(proposal._id, 0, amount)
proposal.no_token += amount
else:
raise Exception('unknown token type')
return val
def sell(self, proposal_id, agent, token_type, amount):
proposal = self.proposal_map[proposal_id]
token_type = TokenType(token_type)
val = 0
if token_type == TokenType.yes_token:
if amount > proposal.yes_token:
return 0
val = (1 - self.fee)*self.calc_price(proposal._id, -amount, 0)
proposal.yes_token -= amount
elif token_type == TokenType.no_token:
if amount > proposal.no_token:
return 0
val = (1 - self.fee)*self.calc_price(proposal._id, -amount, 0)
proposal.no_token -= amount
else:
raise Exception('unknown token type')
return val
def calc_price(self, proposal_id, yes_token, no_token):
"""compute the price of current proposal
"""
proposal = self.proposal_map[proposal_id]
b = proposal.b_number
p_w = proposal.yes_token
p_l = proposal.no_token
if proposal.state == 2:
return yes_token
elif proposal.state == 3:
return no_token
c_n = b * math.log(math.exp((p_w + yes_token)/b) +
math.exp((p_l + no_token)/b))
c_p = b * math.log(math.exp(p_w/b) + math.exp(p_l/b))
val = c_n - c_p
val = round(val, 2)
return val
def calc_current_price(self, proposal_id, token_type):
proposal = self.proposal_map[proposal_id]
b = proposal.b_number
p_w = proposal.yes_token
p_l = proposal.no_token
# if proposal.state == 2 and token_type == 'yes':
# return 1
# elif proposal.state == 2 and token_type == 'no':
# return 0
# if proposal.state == 3 and token_type == 'yes':
# return 0
# elif proposal.state == 3 and token_type == 'no':
# return 1
yes_part = math.exp(p_w/b)
no_part = math.exp(p_l/b)
if token_type == 'yes':
return yes_part / (yes_part + no_part)
else:
return no_part/(yes_part + no_part)
|
Brain-in-Vat/Aletheia
|
aletheia/agents/simpleagent.py
|
import math
from mesa import Agent, Model
import random
# class Agent(object):
# def __init__(self, _id, position, states):
# self._id = _id
# self.position = position
# self.states = states
class SpatialAgent(Agent):
"""
A Spatial Vote agent
agent with x,y represents it's opinion
"""
def __init__(self, pos, model, system, forum, knowlege, _id=None):
"""
create a new voter
Args:
pos: the opinon position
model: the model
system: the simluated system
forum: social concact net
knowlege: knowlege represent of this agent
_id: id
each time, the agent will focus on one proposal
"""
self.pos = pos
self.model = model
self.system = system
self.forum = forum
self._id = _id
self.unique_id = _id
self.knowlege = knowlege
self.like_threshold = 0.7
self.hate_threshold = 1
self.condition = 'Unknown'
def observe(self, system):
# infos = system.observe(self)
proposals = system.get_activate_proposals()
favorite_proposal = None
shortest_distance = 10000
for proposal in proposals:
if system.is_voted(self._id, proposal._id):
continue
else:
dist = self.likes(proposal)
if dist < shortest_distance:
shortest_distance = dist
favorite_proposal = proposal
return favorite_proposal, dist
def likes(self, proposal):
x = self.pos
y = proposal.pos
sum_result = sum([math.pow(i-j, 2)for i, j in zip(x, y)])
dist = math.sqrt(sum_result)
return dist
def think(self):
"""this step, agent try to reason his belief, intent
"""
pass
def check_intent(self):
pass
def update_intent(self):
pass
def check_plan(self):
pass
def execute_plan(self):
"""
"""
def vote(amount):
pass
pass
def step(self):
print('voted')
proposal, dist = self.observe(self.system)
if proposal and dist < self.like_threshold:
self.system.stack(0.1, self._id, proposal._id, 'yes')
self.condition = 'Yes'
elif proposal and dist > self.hate_threshold:
self.system.stack(0.1, self._id, proposal._id, 'yes')
self.condition = 'No'
else:
self.condition = 'Unknown'
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
class FutarchyRandomAgent(Agent):
def __init__(self, unique_id: int, model: Model, market, system, pos, token=10) -> None:
super().__init__(unique_id, model)
self.unique_id = unique_id
self.model = model
self.market = market
self.system = system
self.token = token
self.no_token = 0
self.yes_token = 0
self.pos = pos
self.wealth = self.token
self.voted = False
self.belief = 0.5
def set_type(self):
self._type = 1
def step(self) -> None:
proposals = self.system.observe(self)
if proposals:
target = proposals[0]
else:
return
dice = random.random()
val = 0
if dice < 0.25:
price = self.market.calc_price(target._id, 1, 0)
if price <= self.token:
val = self.market.buy(
target._id, self.unique_id, 'yes_token', 1)
self.yes_token += 1
self.system.vote(self, target._id, 'yes')
self.voted = True
elif dice > 0.25 and dice < 0.5:
if self.yes_token >= 1:
val = self.market.sell(
target._id, self.unique_id, 'yes_token', 1)
self.yes_token -= 1
elif dice > 0.5 and dice < 0.75:
if self.no_token >= 1:
val = self.market.sell(
target._id, self.unique_id, 'no_token', 1)
self.no_token -= 1
elif dice > 0.75:
price = self.market.calc_price(target._id, 0, 1)
if price <= self.token:
val = self.market.buy(
target._id, self.unique_id, 'no_token', 1)
self.no_token += 1
self.system.vote(self, target._id, 'no')
self.voted = True
self.token -= val
self.wealth = self.token + self.no_token * self.market.calc_current_price(target._id, 'no') + \
self.yes_token * self.market.calc_current_price(target._id, 'yes')
# self.wealth = self.token + self.no_token * self.market.calc_price(target._id, 0, 1)
# self.wealth = self.token + self.yes_token * self.market.calc_price(target._id, 1, 0)
# return super().step()
class FutarchyBuyAgent(Agent):
def __init__(self, unique_id: int, model: Model, market, system, pos, belief, join_time, token=10) -> None:
super().__init__(unique_id, model)
self.unique_id = unique_id
self.model = model
self.market = market
self.system = system
self.token = token
self.no_token = 0
self.yes_token = 0
self.pos = pos
self.wealth = self.token
self.voted = False
self.belief = belief
self.join_time = join_time
self.vote = 'unknown'
self.init_token = token
def set_type(self):
self._type = 2
pass
def step(self):
if self.system.day < self.join_time:
return
proposals = self.system.observe(self)
if proposals:
target = proposals[0]
else:
return
def buy_yes_token(_id, amount):
pay = self.market.calc_price(_id, amount, 0)
pay = (1 + self.market.fee)*pay
if pay <= self.token:
val = self.market.buy(_id, self.unique_id, 'yes_token', amount)
self.yes_token += amount
self.token -= val
def buy_no_token(_id, amount):
price = self.market.calc_price(_id, 0, amount)
price = (1 + self.market.fee)*price
if price <= self.token:
val = self.market.buy(_id, self.unique_id, 'no_token', amount)
self.no_token += amount
self.token -= val
def sell_yes_token(_id, amount):
if self.yes_token >= amount:
val = self.market.sell(
_id, self.unique_id, 'yes_token', amount)
self.yes_token -= amount
self.token -= val
def sell_no_token(_id, amount):
if self.no_token >= amount:
val = self.market.sell(_id, self.unique_id, 'no_token', amount)
self.no_token -= amount
self.token -= val
def vote_yes(_id, amount=None):
if not self.voted:
self.system.vote(self, _id, 'yes')
self.voted = True
self.vote = 'yes'
def vote_no(_id, amount=None):
if not self.voted:
self.system.vote(self, _id, 'no')
self.voted = True
self.vote = 'no'
fee = self.market.fee
yes_price = self.market.calc_current_price(target._id, 'yes')
yes_cost = yes_price + yes_price*fee
yes_sell = (1 - fee)*yes_price
no_price = self.market.calc_current_price(target._id, 'no')
no_cost = no_price + no_price*fee
no_sell = (1-fee)*no_price
if self.belief > 0.5:
if yes_cost < self.belief:
buy_yes_token(target._id, 1)
# print('I amd {}, type buy, has token {},yes cost: {}, beleif: {}, action: buy yes'.format(
# self.unique_id, self.token, yes_cost, self.belief))
elif self.belief < 0.5:
if no_cost < 1 - self.belief:
buy_no_token(target._id, 1)
# print('I am {}, type buy, has token {}, yes cost: {}, beleif: {}, action: buy no'.format(
# self.unique_id, self.token, no_cost, self.belief))
# print('current prices yes: {}, no: {}'.format(self.market.calc_current_price(
# target._id, 'yes'), self.market.calc_current_price(target._id, 'no')))
if self.belief > 0.5:
vote_yes(target._id, 1)
elif self.belief < 0.5:
vote_no(target._id, 1)
# self.token -= val
self.wealth = self.token + self.no_token * self.market.calc_current_price(target._id, 'no') + \
self.yes_token * self.market.calc_current_price(target._id, 'yes')
# self.wealth = self.token + self.no_token * self.market.calc_price(target._id, 0, 1)
# self.wealth = self.token + self.yes_token * self.market.calc_price(target._id, 1, 0)
# return super().step()
class FutarchyBuyAndSellAgent(FutarchyBuyAgent):
def set_type(self):
self._type = 3
def update_belief(self):
proposals = self.system.observe(self)
if proposals:
target = proposals[0]
else:
return
proposal_id = target._id
# agent_number = self.model.agent_number
if self.system.day >= 2:
vote_action = self.system.vote_actions[target._id]
yes_amount = sum(x[1] for x in vote_action['yes'])
no_amount = sum(x[1] for x in vote_action['no'])
if yes_amount + no_amount < self.model.agent_number/10:
self.belief = 0.5
return
self.belief = yes_amount / (yes_amount + no_amount)
if self.belief > 0.55:
self.belief = 1
elif self.belief < 0.45:
self.belief = 0
else:
self.belief = 0.5
def step(self):
if self.system.day < self.join_time:
return
proposals = self.system.observe(self)
if proposals:
target = proposals[0]
else:
return
self.update_belief()
def buy_yes_token(_id, amount):
pay = self.market.calc_price(_id, amount, 0)
pay = (1 + self.market.fee)*pay
if pay <= self.token:
val = self.market.buy(_id, self.unique_id, 'yes_token', amount)
self.yes_token += amount
self.token -= val
def buy_no_token(_id, amount):
price = self.market.calc_price(_id, 0, amount)
price = (1 + self.market.fee)*price
if price <= self.token:
val = self.market.buy(_id, self.unique_id, 'no_token', amount)
self.no_token += amount
self.token -= val
def sell_yes_token(_id, amount):
if self.yes_token >= amount:
val = self.market.sell(
_id, self.unique_id, 'yes_token', amount)
self.yes_token -= amount
self.token -= val
def sell_no_token(_id, amount):
if self.no_token >= amount:
val = self.market.sell(_id, self.unique_id, 'no_token', amount)
self.no_token -= amount
self.token -= val
def vote_yes(_id, amount=None):
if not self.voted:
self.system.vote(self, _id, 'yes')
self.voted = True
self.vote = 'yes'
def vote_no(_id, amount=None):
if not self.voted:
self.system.vote(self, _id, 'no')
self.voted = True
self.vote = 'no'
fee = self.market.fee
yes_price = self.market.calc_current_price(target._id, 'yes')
yes_cost = yes_price + yes_price*fee
yes_sell = (1 - fee)*yes_price
no_price = self.market.calc_current_price(target._id, 'no')
no_cost = no_price + no_price*fee
no_sell = (1-fee)*no_price
if self.belief > 0.5:
if yes_cost < self.belief:
buy_yes_token(target._id, 1)
# print('I am {}, type buy and sell, has token {}, yes cost: {}, beleif: {}, action: buy yes'.format(
# self.unique_id, self.token, yes_cost, self.belief))
if yes_sell > self.belief:
sell_yes_token(target._id, 1)
# print('I am {}, type buy and sell, has token {}, yes sell: {}, beleif: {}, action: sell yes'.format(
# self.unique_id, self.token, yes_sell, self.belief))
elif self.belief < 0.5:
if no_cost < 1 - self.belief:
buy_no_token(target._id, 1)
# print('I am {}, type buy and sell, has token {}, no cost: {}, beleif: {}, action: buy no'.format(
# self.unique_id, self.token, no_cost, self.belief))
if no_sell > 1 - self.belief:
sell_no_token(target._id, 1)
# print('I am {}, type buy and sell, has token {}, no sell: {}, beleif: {}, action: sell no'.format(
# self.unique_id, self.token, no_sell, self.belief))
# print('current prices yes: {}, no: {}'.format(self.market.calc_current_price(
# target._id, 'yes'), self.market.calc_current_price(target._id, 'no')))
if self.belief > 0.5:
vote_yes(target._id, 1)
elif self.belief < 0.5:
vote_no(target._id, 1)
# self.token -= val
self.wealth = self.token + self.no_token * self.market.calc_current_price(target._id, 'no') + \
self.yes_token * self.market.calc_current_price(target._id, 'yes')
|
Brain-in-Vat/Aletheia
|
aletheia/tests/test_download.py
|
<filename>aletheia/tests/test_download.py<gh_stars>0
import os
from agents import download, ZLAGENT, gen_md5
from settings import DOWNLOAD_URL, BASE_DIR
zlagent_owl = os.path.join(BASE_DIR, 'resources', 'zlagent.owl')
zlagent_py = os.path.join(BASE_DIR, 'resources', 'zlagent.py')
a = gen_md5(zlagent_owl)
print(a)
b = gen_md5(zlagent_py)
print(b)
|
Brain-in-Vat/Aletheia
|
aletheia/examples/agents/randomagent.py
|
<filename>aletheia/examples/agents/randomagent.py
from mesa import Agent, Model
import random
class RandomAgent(Agent):
def __init__(self, unique_id, model, token):
super().__init__(unique_id, model)
self.token = token
self.model = model
def donate(self):
projects = self.model.projects
project = random.choice(projects)
if 'qf_futarchy' in dir(self.model):
self.model.qf_futarchy.lmsr.buy(project, 1, self.unique_id)
elif 'qf' in dir(self.model):
self.model.qf.grant(project, self.unique_id, 1)
def step(self):
dice = random.random()
if dice <= 0.5:
self.donate()
# fix agent
"""
the agent is controlled by simple zero aligent rules
when the actual price is below his expect, it will buy it
otherwise, it wont do anything
"""
class FixZLAgent(Agent):
def __init__(self, unique_id, model, token, beliefs):
super().__init__(unique_id, model)
self.token = token
self.model = model
self.beliefs = beliefs
self.grant = {}
self.brain_token = 0
def award_token(self, token):
self.token += token
def award_brain_token(self, brain_token):
self.brain_token += brain_token
def donate(self):
def buy(project):
token_cost = self.model.qf_futarchy.lmsr.calc_price(project, 1)
if self.token + self.brain_token > token_cost:
self.model.qf_futarchy.lmsr.buy(project, 1, self.unique_id)
if token_cost >= self.brain_token:
self.brain_token -= token_cost
else:
token_cost = token_cost - self.brain_token
self.brain_token = 0
self.token = self.token - token_cost
def grant(project):
grant_cost = self.beliefs[project]
if self.token > self.beliefs[project]:
self.token = self.token - grant_cost
self.model.qf.grant(project, self.unique_id, self.beliefs[project])
projects = self.model.projects
if 'qf_futarchy' in dir(self.model):
for project in projects:
if self.beliefs[project] > self.model.qf_futarchy.lmsr.calc_current_price(project):
buy(project)
elif 'qf' in dir(self.model):
for project in projects:
if self.grant[project] != True:
# self.model.qf.grant(project, self.unique_id, self.beliefs[project])
grant(project)
def step(self):
dice = random.random()
if dice <= 0.5:
self.donate()
|
Brain-in-Vat/Aletheia
|
aletheia/algorithm_analyze/__init__.py
|
'''
1. 系统哪些状态需要展示给用户
a 地址相关的代币分布
b 治理损失函数的变化
c 治理相关的数据
可视化工具
https://altair-viz.github.io/gallery/scatter_tooltips.html
https://seaborn.pydata.org/tutorial/relational.html
mesa
dash
'''
'''
共有的格式
私有的格式
'''
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
df = px.data.iris()
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Graph(id="scatter-plot"),
html.P("Petal Width:"),
dcc.RangeSlider(
id='range-slider',
min=0, max=2.5, step=0.1,
marks={0: '0', 2.5: '2.5'},
value=[0.5, 2]
),
])
@app.callback(
Output("scatter-plot", "figure"),
[Input("range-slider", "value")])
def update_bar_chart(slider_range):
low, high = slider_range
mask = (df['petal_width'] > low) & (df['petal_width'] < high)
fig = px.scatter(
df[mask], x="sepal_width", y="sepal_length",
color="species", size='petal_length',
hover_data=['petal_width'])
return fig
app.run_server(debug=True)
|
Brain-in-Vat/Aletheia
|
aletheia/mechanism_engine/__init__.py
|
<filename>aletheia/mechanism_engine/__init__.py
from mesa import Model
|
JacOng17/serviceinnovationlab.github.io
|
.circleci/make-yaml.py
|
<gh_stars>1-10
#!/usr/bin/env python
# Might need to run `python3 -m pip install pyyaml` if you get a ModuleNotFoundError: No module named 'yaml'
import os
import csv
import yaml
import collections
csv_filename = os.environ['GS_CSV_FILE']
yaml_filename = os.environ['GS_YAML_FILE']
data = []
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
if __name__ == "__main__":
yaml.add_representer(collections.OrderedDict, represent_ordereddict)
yaml.explicit_start = True
with open(csv_filename, "r") as csv_file:
reader = csv.reader(csv_file, delimiter=",")
for i, line in enumerate(reader):
if i == 0:
labels = line
else:
hash = collections.OrderedDict()
column = 0
for label in labels:
value = line[column]
hash[label] = value
column += 1
data.append(hash)
with open(yaml_filename, 'w') as yaml_file:
yaml.dump({'data': data}, yaml_file)
|
anorth/shadow-wall
|
InterviewPrepScripts/videoCapture_Serial.py
|
import numpy as np
import cv2
import time
import serial
#ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=1)
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if frame == None:
continue
b,g,r = cv2.split(frame)
b_new = cv2.resize(b,(10,10))
g_new = cv2.resize(g,(10,10))
r_new = cv2.resize(r,(10,10))
out = cv2.merge((b_new,g_new,r_new))
cv2.imshow('frame',out)
# for byte in b_new:
# ser.write(byte)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
anorth/shadow-wall
|
Processing2Python/showMovie/recordVideo.py
|
import cv2
INTERNAL_CAMERA = 1
EXTERNAL_CAMERA = 0
PREVIEW_SIZE = (1920/2, 1080/2)
filename = 'testRecordVideo_MakerSpace_withoutFilters_lightsOff.mp4'
cap = cv2.VideoCapture(EXTERNAL_CAMERA)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 30)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1080)
frame_width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
frame_height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
frame_rate = 30
fourcc = cv2.cv.CV_FOURCC(*'mp4v')
out = cv2.VideoWriter(filename,fourcc,frame_rate, (int(1920),int(1080)))
while (cap.isOpened()):
have_frame, frame = cap.read()
if have_frame:
out.write(frame)
#cv2.imshow('video frame',frame)
cv2.imshow("video frame", cv2.resize(frame, PREVIEW_SIZE))
else:
break
key = cv2.waitKey(1)
if key == 27: # exit on ESC
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
anorth/shadow-wall
|
InterviewPrepScripts/serialOLD.py
|
import serial
ser = serial.Serial(0) # open first serial port
print ser.portstr # check which port was really used
ser.write("hello") # write a string
ser.close()
|
anorth/shadow-wall
|
Processing2Python/showMovie/imgproc.py
|
#!/usr/bin/env python
import itertools
import random
from collections import Counter
from showMovie.defish import create_fisher
import cv2
import numpy as np
from showMovie.helperFunctions import make_gamma_table
from perspective_transform import four_point_transform
THRESHOLD = int(255 * 0.7)
MIN_CONTOUR_DIMENSION = 20 # In debug window pixels
MORPH_KERNEL = np.ones((3, 3), np.uint8)
COLOURS = colors = itertools.cycle([
(255, 100, 100),
(255, 255, 100),
(255, 100, 255),
(100, 255, 100),
(255, 100, 255),
(100, 100, 255),
])
VERT_OFFSET = 20
TAN_THETA = np.tan(np.deg2rad(85))
# Observed framing of the camera I used
RAW_HEIGHT = 1080
left_crop = 196+10 # Left black region to discard
right_crop = 208+10 # Right black region to discard
diameter = 1920 - (left_crop + right_crop) # Remaining horizontal diameter of projection
vshift = 74 # Amount to vertically shift downwards to recenter
top_margin = ((diameter - RAW_HEIGHT) / 2) + vshift # Top margin to add to create a square
bottom_margin = ((diameter - RAW_HEIGHT) / 2) - vshift # Bottom margin to add
# Processing time can be reduced by shrinking this, but it makes no difference until sending to
# teensys is faster than ~50 ms
DEFISHED_SIZE = 1080
DEFISHED_TOP_MARGIN = 308 # These are measured from post-fisheye image
DEFISHED_BOTTOM_MARGIN = 209
# Length of the longer (top) side of pespective rect
CROP_WIDTH = (DEFISHED_SIZE * 0.70) #.58
# Length of the shorter (bottom) side
PERSPECTIVE_WIDTH = int(CROP_WIDTH * 0.54) #0.84
class Pipeline(object):
def __init__(self, defish, bg=None):
"""
:param defish: whether to defish
"""
if defish:
self.defisher = create_fisher((diameter,diameter), (DEFISHED_SIZE, DEFISHED_SIZE))
else:
self.defisher = None
self.bg = bg
self.prev_drawn = None
self.prev_grey_img = None
self.flowstate = None
def process(self, img, show_debug=False):
# Simplify to grayscale for processing
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
is_color = False
# Correct fisheye projection
if self.defisher:
img = img[0:RAW_HEIGHT, left_crop:-right_crop]
img = cv2.copyMakeBorder(img, top_margin, bottom_margin, 0, 0, cv2.BORDER_CONSTANT)
img = self.defisher.unwarp(img)
img = correct_perspective(img)
if self.bg:
img = self.bg.process(img, show_debug)
### Analysis (in greyscale)
img = morph_cleanup(img)
# Threshold before contours
# img = simple_threshold(img) #, or...
img = adaptive_threshold(img)
contours = find_contours(img)
### Drawing (in colour)
if not is_color:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = draw_contours(img, contours, self.prev_drawn)
self.prev_drawn = img
# draw_rectangles(img, contours) #better identifies individual contours - allows to easily detect 'bottom' of contour for future 'moving down the screen'
# bottom_of_contour(img, contours)
colors = get_draw_contours_colors(img, contours, self.prev_drawn)
new_img = move_contour_y(img, contours, colors) #contours are a bit jumpy...apply smoothing, or lag
new_img2 = enlarge_img(new_img)
# new_img3 = draw_text(new_img2)
if show_debug or True: cv2.imshow("debug", new_img)
if show_debug or True: cv2.imshow("debug2", new_img2)
return new_img2
class BackgroundRejecterMog(object):
def __init__(self):
# length_history = 100
# number_gaussian_mixtures = 6
# background_ratio = 0.9
# noise_strength_sigma = 1
self.fgbg = cv2.BackgroundSubtractorMOG()#history=200, nmixtures=6, backgroundRatio=0.1, noiseSigma=1)
def process(self, frame, show_debug=False):
fgmask = self.fgbg.apply(frame)
if show_debug: cv2.imshow("bg", fgmask)
frame = frame & fgmask
return frame
class BackgroundRejecterAvg(object):
def __init__(self, frame=None):
self.avg = np.float32(frame) if frame else None
def process(self, frame, show_debug=False):
if self.avg is None:
self.avg = np.float32(frame)
cv2.accumulateWeighted(frame, self.avg, 0.0010)
res = cv2.convertScaleAbs(self.avg)
if show_debug:
cv2.imshow("bg", res)
# Method 1: reject by subtraction. Avoids hard boundaries, only works well when background is dark.
res = np.minimum(res, frame)
frame = frame - res
# Method 2: reject by masking. Leaves more information but creates hard "glow" boundaries at threshold
# mask = np.abs(frame - res) > 10
# frame = np.where(mask, frame, 0)
return frame
def correct_perspective(img):
# Crop and correct perspective
topy = DEFISHED_TOP_MARGIN + 154 # Approx horizon - change if camera moves
boty = DEFISHED_SIZE - (DEFISHED_BOTTOM_MARGIN + 40) # bottom of useful data
topleftx = (DEFISHED_SIZE - CROP_WIDTH) / 2
toprightx = DEFISHED_SIZE - topleftx
botleftx = (DEFISHED_SIZE - PERSPECTIVE_WIDTH) / 2
botrightx = DEFISHED_SIZE - botleftx
# (TL, TR, BR, BL)
pts = np.array([(topleftx, topy), (toprightx, topy), (botrightx, boty), (botleftx, boty)], dtype="float32")
img = four_point_transform(img, pts)
return img
def morph_cleanup(img):
### Morphological cleanup
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
# http://stackoverflow.com/questions/29104091/morphological-reconstruction-in-opencv
# img = cv2.erode(img, morph_kernel)
# img = cv2.dilate(img, morph_kernel)
# Morph open to remove noise
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, MORPH_KERNEL, iterations=1)
# Morph close to fill dark holes
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, MORPH_KERNEL, iterations=3)
# Erode to define edges
# img = cv2.erode(img, MORPH_KERNEL, iterations=2)
# For cool fuzzy edge-style shadow, use gradient
# img = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, MORPH_KERNEL)
return img
def simple_threshold(img):
# Ghibli style
thresh = 12
ret, img = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)
return img
def adaptive_threshold(img):
# Simple low threshold first to remove some noise
# ret, img = cv2.threshold(img, 5, 255, cv2.THRESH_BINARY)
thresh_size = 111
thresh_c = -4
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, thresh_size, thresh_c)
return img
def find_contours(img):
#when doing edge detection, remove/denoise image first, then apply Canny
# img = cv2.GaussianBlur(img, (5, 5), 0)
# edges = cv2.Canny(img, 100, 200)
# Contours appropriate for filling with colour
# edges = cv2.Canny(img, 5, 15)
# edges = cv2.GaussianBlur(edges, (5, 5), 0) #consider blurring again, after edge detection
# contours, hchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours, hch = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Discard too-small contours
def is_big_enough(cnt):
x, y, w, h = cv2.boundingRect(cnt)
return w > MIN_CONTOUR_DIMENSION or h > MIN_CONTOUR_DIMENSION
contours = [c for c in contours if is_big_enough(c)]
return contours
def compute_flow(previmg, img, prevflow):
pyramidScale = 0.5 # 0.5 is each layer half the size
pyramidLevels = 3
windowSize = 10 # higher is faster, robust, blurrier
iterations = 3
polySize = 5 # typically 5 or 7
polySigma = 1.2 # suggested 5->1.1, 7->1.5
gaussian = False
if previmg is None:
return None, None
flags = 0
if gaussian:
flags |= cv2.OPTFLOW_FARNEBACK_GAUSSIAN
if prevflow is not None:
flags |= cv2.OPTFLOW_USE_INITIAL_FLOW
flow = cv2.calcOpticalFlowFarneback(previmg, img,
pyramidScale, pyramidLevels, windowSize, iterations,
polySize, polySigma, flags, prevflow)
magMax = 8
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
mag = np.clip(mag, 0, magMax)
hsv = np.zeros(img.shape+(3,), 'uint8')
hsv[..., 0] = ang * 180 / np.pi / 2 # hue is angle
hsv[..., 1] = 255 # full saturation
hsv[..., 2] = mag * (255 / magMax) # value is magnitude
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return flow, rgb
def draw_contours(img, contours, prev_drawn=None):
# Draw and fill all contours
drawn = np.zeros_like(img)
for ctrIdx, ctr in enumerate(contours):
color = (0, 0, 0)
if prev_drawn is not None:
# Randomly sample the bounding box of this contour in the previous image and use the most
# common colour.
x, y, w, h = cv2.boundingRect(ctr)
prev_box = prev_drawn[y:y+h, x:x+w]
# cv2.rectangle(drawn, (x, y), (x + w, y + h), (0, 255, 0), 1)
color_counts = Counter()
for _ in range(12): #Make this bigger for more domination by larger blobs
xx = random.randrange(w)
yy = random.randrange(h)
color_counts[tuple(map(int, prev_box[yy, xx]))] += 1
color_counts[(0, 0, 0)] = 0 # Don't choose black if possible
counted = sorted(((count, color) for color, count in color_counts.items()), reverse=True)
color = counted[0][1]
if color == (0, 0, 0):
color = next(COLOURS)
cv2.drawContours(drawn, contours, ctrIdx, color, thickness=-1) # -1 to fill
return drawn
def get_draw_contours_colors(img, contours, prev_drawn=None):
# Draw and fill all contours
colors = []
drawn = np.zeros_like(img)
for ctrIdx, ctr in enumerate(contours):
color = (0, 0, 0)
if prev_drawn is not None:
# Randomly sample the bounding box of this contour in the previous image and use the most
# common colour.
x, y, w, h = cv2.boundingRect(ctr)
prev_box = prev_drawn[y:y+h, x:x+w]
# cv2.rectangle(drawn, (x, y), (x + w, y + h), (0, 255, 0), 1)
color_counts = Counter()
for _ in range(12): #Make this bigger for more domination by larger blobs
xx = random.randrange(w)
yy = random.randrange(h)
color_counts[tuple(map(int, prev_box[yy, xx]))] += 1
color_counts[(0, 0, 0)] = 0 # Don't choose black if possible
counted = sorted(((count, color) for color, count in color_counts.items()), reverse=True)
color = counted[0][1]
if color == (0, 0, 0):
color = next(COLOURS)
cv2.drawContours(drawn, contours, ctrIdx, color, thickness=-1) # -1 to fill
colors.append(color)
return colors
def local_max(img):
kernel = np.ones((40, 40), np.uint8)
mask = cv2.dilate(img, kernel)
result = cv2.compare(img, mask, cv2.CMP_GE)
return result
def draw_rectangles(img, contours):
#draw the bounding rectangles around contours
for i, ctr in enumerate(contours):
x, y, w, h = cv2.boundingRect(ctr)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
def bottom_of_contour(img, contours, draw=False):
bottom_x = []
bottom_y = []
#get the bounding rectangles around contours
for i, ctr in enumerate(contours):
x, y, w, h = cv2.boundingRect(ctr)
bottom_x.append(x + w/2)
bottom_y.append(y + h)
if draw:
cv2.circle(img,(int(bottom_x[i]),int(bottom_y[i])),3,(255,0,0))
return zip(bottom_x, bottom_y)
def move_contour_y(img, contours, colors):
height,width = img.shape[:2]
new_img = np.zeros(img.shape, dtype=np.uint8)
new_ctrs = []
bottom_ctrs = bottom_of_contour(img, contours, draw=False)
if len(bottom_ctrs) == 0:
return new_img
(x_shifts, y_shifts) = zip(*bottom_ctrs)
#x_positions = calc_contour_x_shift(x_shifts, width, height)
for i, ctr in enumerate(contours):
new_ctrs.append(ctr + (0, (height - VERT_OFFSET) - y_shifts[i])) #TODO - move contours in the x direction away from center, depending on how far away they are (pixel row)
for idx, ctr in enumerate(new_ctrs):
cv2.drawContours(new_img,new_ctrs,idx,colors[idx],thickness=cv2.cv.CV_FILLED) #TODO - plot smallest contours first, so that larger ones cover/overplot the smaller ones
return new_img
def calc_contour_x_shift(row_pixels, img_width, img_height):
x_positions = []
for idx, row_pixel in enumerate(row_pixels):
d = img_width - 2*img_height/TAN_THETA
if row_pixel > (img_width/2):
x_positions.append(row_pixel + (img_width/d)*row_pixel)
else:
x_positions.append(row_pixel - (img_width/d)*row_pixel)
return x_positions
def enlarge_img(img):
h,w = img.shape[:2]
#new_img = cv2.resize(img, (2*w, 2*h))
return img[150:h,0:w]
def draw_text(img):
cv2.putText(img,"#ShadowWall", (500,30), cv2.cv.CV_FONT_HERSHEY_SIMPLEX,1,(0,255,0),5,cv2.CV_AA)
return img
|
anorth/shadow-wall
|
Processing2Python_old/showMovie/panel.py
|
from helperFunctions import percentage
# def send_frame_to_led_panels(frame, num_ports, led_area, led_image, led_data, led_layout):
# #Write the frame to panels
# for i in range(0, num_ports):
# #copy a portion of the movie's image to the LED image
# xoffset = percentage(frame.width, led_area[i].x)
# yoffset = percentage(frame.height, led_area[i].y)
# xwidth = percentage(frame.width, led_area[i].width)
# yheight = percentage(frame.height, led_area[i].height)
#
# led_image[i].copy(frame, xoffset, yoffset, xwidth, yheight, 0, 0, led_image[i].width, led_image[i].height)
# # // convert the LED image to raw data
# # byte[] ledData = new byte[(ledImage[i].width * ledImage[i].height * 3) + 3];
# # image2data(ledImage[i], ledData, ledLayout[i]);
# # if (i == 0) {
# # ledData[0] = '*'; // first Teensy is the frame sync master
# # int usec = (int)((1000000.0 / TargetFrameRate) * 0.75);
# # ledData[1] = (byte)(usec); // request the frame sync pulse
# # ledData[2] = (byte)(usec >> 8); // at 75% of the frame time
# # } else {
# # ledData[0] = '%'; // others sync to the master board
# # ledData[1] = 0;
# # ledData[2] = 0;
# # }
# # // send the raw data to the LEDs :-)
# # ledSerial[i].write(ledData);
# # }
# # }
# #
# // image2data converts an image to OctoWS2811's raw data format.
# // The number of vertical pixels in the image must be a multiple
# // of 8. The data array must be the proper size for the image.
# void image2data(PImage image, byte[] data, boolean layout) {
# int offset = 3;
# int x, y, xbegin, xend, xinc, mask;
# int linesPerPin = image.height / 8;
# int pixel[] = new int[8];
#
# for (y = 0; y < linesPerPin; y++) {
# if ((y & 1) == (layout ? 0 : 1)) {
# // even numbered rows are left to right
# xbegin = 0;
# xend = image.width;
# xinc = 1;
# } else {
# // odd numbered rows are right to left
# xbegin = image.width - 1;
# xend = -1;
# xinc = -1;
# }
# for (x = xbegin; x != xend; x += xinc) {
# for (int i=0; i < 8; i++) {
# // fetch 8 pixels from the image, 1 for each pin
# pixel[i] = image.pixels[x + (y + linesPerPin * i) * image.width];
# pixel[i] = convert_RGB_2_GRB(pixel[i]);
# // pixel[i] = pixel[i] % 20;
# // pixel[i] = pixel[i] & 0x1F1F1F;
# pixel[i] = pixel[i] & 0xf0f0f0;
# pixel[i] = pixel[i] >> 4;
# }
# // convert 8 pixels to 24 bytes
# for (mask = 0x800000; mask != 0; mask >>= 1) {
# byte b = 0;
# for (int i=0; i < 8; i++) {
# if ((pixel[i] & mask) != 0) b |= (1 << i);
# }
# data[offset++] = b;
# }
# }
# }
# }
#
# int convert_RGB_2_GRB(int colour) {
# int red = (colour & 0xFF0000) >> 16;
# int green = (colour & 0x00FF00) >> 8;
# int blue = (colour & 0x0000FF);
#
# red = gammaTable[red];
# green = gammaTable[green];
# blue = gammaTable[blue];
#
# return (green << 16) | (red << 8) | (blue);
# }
#
# // ask a Teensy board for its LED configuration, and set up the info for it.
# void serialConfigure(String portName) {
# if (numberOfPortsInUse >= MaximumNumberOfPorts) {
# println("too many serial ports, please increase maxPorts");
# errorCount++;
# return;
# }
# try {
# ledSerial[numberOfPortsInUse] = new Serial(this, portName);
# if (ledSerial[numberOfPortsInUse] == null) throw new NullPointerException();
# ledSerial[numberOfPortsInUse].write('?');
# }
# catch (Throwable e) {
# println("Serial port " + portName + " does not exist or is non-functional");
# errorCount++;
# return;
# }
# delay(50);
# String line = ledSerial[numberOfPortsInUse].readStringUntil(10);
# if (line == null) {
# println("Serial port " + portName + " readStringUntilis not responding.");
# println("Is it really a Teensy 3.0 running VideoDisplay?");
# errorCount++;
# return;
# }
# String param[] = line.split(",");
# if (param.length != 12) {
# println("Error: port " + portName + " did not respond to LED config query");
# errorCount++;
# return;
# }
# // only store the info and increase numPorts if Teensy responds properly
# ledImage[numberOfPortsInUse] = new PImage(Integer.parseInt(param[0]), Integer.parseInt(param[1]), RGB);
# // Note: rows and cols are according to the teensy, which is configured to be mounted rotated π/2
# println("Panel", numberOfPortsInUse, "cols", param[0], "rows", param[1]);
# ledArea[numberOfPortsInUse] = new Rectangle(Integer.parseInt(param[5]), Integer.parseInt(param[6]),
# Integer.parseInt(param[7]), Integer.parseInt(param[8]));
# println("xoff", param[5], "yoff", param[6], "width%", param[7], "height%", param[8]);
# ledLayout[numberOfPortsInUse] = (Integer.parseInt(param[5]) == 0);
# println("layout", param[5]);
# numberOfPortsInUse++;
# }
#
# // scale a number by a percentage, from 0 to 100
# int percentage(int num, int percent) {
# double mult = percentageFloat(percent);
# double output = num * mult;
# return (int)output;
# }
#
# // scale a number by the inverse of a percentage, from 0 to 100
# int percentageInverse(int num, int percent) {
# double div = percentageFloat(percent);
# double output = num / div;
# return (int)output;
# }
#
# // convert an integer from 0 to 100 to a float percentage
# // from 0.0 to 1.0. Special cases for 1/3, 1/6, 1/7, etc
# // are handled automatically to fix integer rounding.
# double percentageFloat(int percent) {
# if (percent == 33) return 1.0 / 3.0;
# if (percent == 17) return 1.0 / 6.0;
# if (percent == 14) return 1.0 / 7.0;
# if (percent == 13) return 1.0 / 8.0;
# if (percent == 11) return 1.0 / 9.0;
# if (percent == 9) return 1.0 / 11.0;
# if (percent == 8) return 1.0 / 12.0;
# return (double)percent / 100.0;
# }
|
anorth/shadow-wall
|
Processing2Python_old/showMovie/helperFunctions.py
|
import numpy as np
import cv2
gamma = 1.7
gamma_table = []
def initialise_gamma_table():
for i in range(0, 256):
gamma_table.append(int(np.power(i / 255.0, gamma) * 255.0 + 0.5))
# convert an integer from 0 to 100 to a float percentage
# from 0.0 to 1.0. Special cases for 1/3, 1/6, 1/7, etc
# are handled automatically to fix integer rounding.
def percentage_float(percent):
if percent == 33:
return 1.0 / 3.0
if percent == 17:
return 1.0 / 6.0
if percent == 14:
return 1.0 / 7.0
if percent == 13:
return 1.0 / 8.0
if percent == 11:
return 1.0 / 9.0
if percent == 9:
return 1.0 / 11.0
if percent == 8:
return 1.0 / 12.0
return np.double(percent / 100.0)
# scale a number by a percentage, from 0 to 100
def percentage(num, percent):
mult = percentage_float(percent)
return int(num * mult)
# scale a number by the inverse of a percentage, from 0 to 100
def percentage_inverse(num, percent):
div = percentage_float(percent)
return int(num / div)
def convert_rgb_2_bgr(color):
red = (color & 0xFF0000) >> 16
green = (color & 0x00FF00) >> 8
blue = (color & 0x0000FF)
red = gamma_table[red]
green = gamma_table[green]
blue = gamma_table[blue]
return (green << 16) | (red << 8) | blue
# image2data converts an image to OctoWS2811's raw data format.
# The number of vertical pixels in the image must be a multiple
# of 8. The data array must be the proper size for the image.
def image_to_data(image, layout):
byte_array = []
byte_array.insert(0, 0)
byte_array.insert(1, 0)
byte_array.insert(2, 0) # reserved values
offset = 3
height, width, depth = image.shape
print image.shape
lines_per_pin = width / 8
pixel = []
for y in range(0, lines_per_pin):
# if (y & 1) == (layout ? 0 : 1): # even numbered columns are left to right
if (y & 1) == layout:
ans = 0
else:
ans = 1
if ans:
xbegin = 0
xend = height # xend = image.width
xinc = 1
else: # odd numbered rows are right to left
xbegin = height - 1 # image.width - 1
xend = -1
xinc = -1
for x in range(xbegin, xend, xinc):
for i in range(0, 8): # fetch 8 pixels from the image, 1 for each pin
tmp = np.copy(image[x, (y + lines_per_pin * i), :])
tmp &= 0xF0F0F0
tmp >>= 4
pixel.append(tmp)
# convert 8 pixels to 24 bytes
for i in range(0, 8):
byte_array.insert(offset, pixel[i][0])
byte_array.insert(offset + 1, pixel[i][1])
byte_array.insert(offset + 2, pixel[i][2])
offset += 3
# mask = 0x800000
# num_right_shifts = 6
# for i in range(0, num_right_shifts):
# byte = 0
# for byte_count in range(0, 8):
# if pixel[i] & mask != 0:
# byte |= (1 << i)
#
# byte_array[offset] = byte
# offset += 1
# mask >>= 1
print len(byte_array)
return byte_array
# because one of the panels is different from others, we need to compensate by inserting dummy columns
def add_dummy_columns(image, idx_dummy_columns):
r, g, b = cv2.split(image)
height, width = r.shape
for i in range(0, len(idx_dummy_columns)):
r = np.insert(r, idx_dummy_columns[i], np.zeros(height), axis=1)
g = np.insert(g, idx_dummy_columns[i], np.zeros(height), axis=1)
b = np.insert(b, idx_dummy_columns[i], np.zeros(height), axis=1)
return cv2.merge((b, g, r)) # merging as bgr so as not to call 'convert_rgb_2_bgr'
def resize(frame, width, height, extra_columns_idxs):
res = cv2.resize(frame, (width, height))
# check that res size is 180x120
if extra_columns_idxs is not None:
res = add_dummy_columns(res, extra_columns_idxs)
print res.shape # now image size should be 184x120
return res
|
anorth/shadow-wall
|
Processing2Python/showMovie/helperFunctions.py
|
import time
import cv2
import numpy as np
from bitarray import bitarray
def make_gamma_table(g):
return [int(((i / 255.0) ** g) * 255.0 + 0.5) for i in range(256)]
def reverse_bits(x):
result = 0
for i in xrange(8):
if (x >> i) & 1: result |= 1 << (8 - 1 - i)
return result
led_gamma = 2.8#1.7
led_gamma_table = make_gamma_table(led_gamma) #[int(((i / 255.0) ** led_gamma) * 255.0 + 0.5) for i in range(256)]
# Reduce LED intensity by discarding least significant bits
led_gamma_table = [c >> 3 for c in led_gamma_table]
led_table_np = np.array(led_gamma_table, dtype=np.uint8)
# led_table_np_reversed = np.array([reverse_bits(i) for i in led_gamma_table], dtype=np.uint8)
bitswap_table = np.array([reverse_bits(i) for i in range(256)], dtype=np.uint8)
def bgr2grb(b, g, r):
blue = led_gamma_table[b]
green = led_gamma_table[g]
red = led_gamma_table[r]
return (green << 16) | (red << 8) | blue
# image2data converts an image to OctoWS2811's raw data format.
# The number of vertical pixels in the image must be a multiple
# of 8. The data array must be the proper size for the image.
def image_to_data_original(image, strip_layout_direction):
byte_array = bytearray(11523)
offset = 3
height, width, depth = image.shape
image = cv2.flip(image,1)
lines_per_pin = width / 8
for y in range(0, lines_per_pin):
# Even strips are indexed forward, odd strips backwards.
if y % 2 == strip_layout_direction:
forward = 0
else:
forward = 1
if forward:
xbegin = 0
xend = height # xend = image.width
xinc = 1
else: # odd numbered rows are right to left
xbegin = height - 1 # image.width - 1
xend = -1
xinc = -1
#print 'xbeing: ' + str(xbegin) + ' xend: ' + str(xend) + ' xinc: ' + str(xinc)
for x in range(xbegin, xend, xinc):
pixels = [0] * 8
for i in range(0, 8): # fetch 8 pixels from the image, 1 for each strip
pixel_channels = np.copy(image[x, (y + lines_per_pin * i), :])
pixels[i] = bgr2grb(pixel_channels[0], pixel_channels[1], pixel_channels[2])
# serialise 8 pixels to 24 bytes
mask = 0x800000
while mask != 0:
b = 0
for i in range(0, 8):
if pixels[i] & mask != 0:
b |= (1 << i)
byte_array[offset] = b
offset += 1
mask >>= 1
return byte_array
mean_dur = 0.0105
def image_to_data_fast(image, strip_layout_direction):
global mean_dur
tstart = time.time()
height, width, depth = image.shape
image = cv2.flip(image,1)
teensy_pins = 8
rows_per_pin = width / teensy_pins
# The image is in BGR, roll the channel axis to get GRB
image = np.roll(image, -1, axis=2)
bit_chunks = []
for y in range(0, rows_per_pin):
# Even strips are indexed forward, odd strips backwards.
# Collect one pixel per teensy pin, 8 x 3 bytes (g, r, b), for each "height" row (1st dimension)
if (y % 2) != strip_layout_direction:
pixel_arrs = image[::, y:y + rows_per_pin * teensy_pins:rows_per_pin, :]
else:
pixel_arrs = image[::-1, y:y+rows_per_pin*teensy_pins:rows_per_pin, :]
# Look up gamma-corrected LED values
pixel_arrs = led_table_np[pixel_arrs]
# Unpack to bits so we can re-order them
pixel_bits_np = np.unpackbits(pixel_arrs)
# Serialise pixels to 3 bytes per pin, 1 bit at a time.
# The most significant bit for each pin goes first.
# This relies on teensy_pins <= 8 so it fits in a byte.
bit_chunks.append(pixel_bits_np.reshape((height, 8, 24)).transpose((0, 2, 1)))
all_bits_np = np.zeros((24,), dtype=np.bool)
all_bits_np = np.append(all_bits_np, bit_chunks)
# Pack the bits back up
packed = np.packbits(all_bits_np)
packed = bitswap_table[packed]
bytearr = bytearray(packed.tobytes())
# tend = time.time()
# duration = (tend-tstart)
# mean_dur = (mean_dur * 0.98) + (duration * 0.02)
# print mean_dur*1000, duration*1000
# assert len(bytearr) == 11523 # not true for the last teensy
return bytearr
# because one of the panels is different from others, we need to compensate by inserting dummy columns
def add_dummy_columns(image, idx_dummy_columns):
b, g, r = cv2.split(image)
height, width = r.shape
for i in range(0, len(idx_dummy_columns)):
b = np.insert(b, idx_dummy_columns[i], np.zeros(height), axis=1)
g = np.insert(g, idx_dummy_columns[i], np.zeros(height), axis=1)
r = np.insert(r, idx_dummy_columns[i], np.zeros(height), axis=1)
return cv2.merge((b, g, r))
def resize(frame, width, height, extra_columns_idxs):
res = cv2.resize(frame, (width, height))
# check that res size is 180x120
if extra_columns_idxs is not None:
res = add_dummy_columns(res, extra_columns_idxs)
# print res.shape # now image size should be 184x120
return res
|
anorth/shadow-wall
|
Processing2Python/showMovie/check_panel_time.py
|
import datetime
START_TIME_HOUR = 18
MINUTES_PAST_HOUR = 50
END_TIME_HOUR = 23 #11pm
def good_time_to_play():
now = datetime.datetime.now()
# if now.day == 4:
# return False
if now.hour >= START_TIME_HOUR and now.hour < END_TIME_HOUR:
return True
else:
return False
|
anorth/shadow-wall
|
Processing2Python/showMovie/movie2serial.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import numpy as np
import cv2
import serial
import glob
import time
from MyRectangle import MyRectangle
import helperFunctions as hp
from concurrent.futures import ThreadPoolExecutor
from showMovie.imgproc import Pipeline, BackgroundRejecterMog, BackgroundRejecterAvg
import check_panel_time
from imgproc import draw_text
FAKE_SERIAL = False
DEFISH = True
MAX_NUM_PORTS = 24
TARGET_FRAME_RATE = 30
TEENSY_SYNC = False
CAPTURE_SIZE = (1920, 1080)
PREVIEW_SIZE = (CAPTURE_SIZE[0]/2, CAPTURE_SIZE[1]/2)
led_serial = []
led_image = []
led_area = []
led_layout = []
PANEL_WIDTH = 180
PANEL_HEIGHT = 120
DUMMY_COL_INDICES = list(range(24, 24+16, 4))
send_executor = ThreadPoolExecutor(1)
is_writing = False
FAKE_PORTS = ['/fake/1', '/fake/2', '/fake/3', '/fake/4', '/fake/5', '/fake/6']
class FakeSerial(object):
def readline(self):
return '120,32,0,0,0,152,0,32,120,0,0,0'
def write(self, b):
pass
def close(self):
pass
# ask a Teensy board for its LED configuration, and set up the info for it.
def serial_configure(port_name, port_num, fake=False):
if port_num >= MAX_NUM_PORTS:
print 'Too many serial ports, please increase maxPorts'
return
print 'Port name ' + port_name
port = FakeSerial() if fake else serial.Serial(port_name, timeout=1)
led_serial.append(port)
if led_serial[port_num] is None:
print 'portName: ', port_name, ' returned null'
return
time.sleep(500 / 1000.0)
led_serial[port_num].write('?')
line = led_serial[port_num].readline()
print line
if line is None:
print 'Serial port ' + port_name + ' is not responding.'
print 'Is it really a Teensy running VideoDisplay?'
return
params = line.split(",")
if len(params) != 12:
print 'Error: port ' + port_name + ' did not respond to LED config query'
return
# only store the info and increase numPorts if Teensy responds properly
led_image.append(np.zeros((int(params[0]), int(params[1]), 3), np.uint8))
# Note: rows and cols are according to the teensy, which is configured to be mounted rotated π/2
#print 'Panel: ', port_num, ' cols: ', params[0], ' rows: ', params[1]
rect = MyRectangle((int(params[5]), int(params[6])), int(params[7]), int(params[8]))
led_area.append(rect)
#print 'xoff: ', params[5], ' yoff: ', params[6], ' width: ', params[7], '%, height: ', params[8], '%'
led_layout.append(int(params[2]))
def initialise_serial_ports(fake=False):
ports = FAKE_PORTS if fake else glob.glob('/dev/ttyACM*')#''/dev/tty.usbmodem*')
print 'Serial Ports: '
print ports
idx = -1
for idx, port in enumerate(ports):
serial_configure(port, idx, fake=fake)
return idx + 1
def close_all_ports(num_ports):
for i in range(0, num_ports):
led_serial[i].close()
def send_frame_to_led_panels(frame, num_ports, show_debug=False):
# Resize to exact dimensions of panels, adding in dummy columns
frame = hp.resize(frame, PANEL_WIDTH, PANEL_HEIGHT, DUMMY_COL_INDICES)
if show_debug: cv2.imshow("panels", frame)
if is_writing:
return # drop the frame that the panels can't keep up with
# Write the frame to panels
for teensy_idx in range(0, num_ports):
# copy a portion of the movie's image to the LED image
xoffset = led_area[teensy_idx].x
yoffset = led_area[teensy_idx].y
twidth = led_area[teensy_idx].width
theight = led_area[teensy_idx].height
# determine what portion of frame to send to given Teensy:
led_image[teensy_idx] = frame[yoffset:yoffset + theight, xoffset:xoffset + twidth, :]
led_data = hp.image_to_data_fast(led_image[teensy_idx], led_layout[teensy_idx])
# verify_led_data(teensy_idx, led_data)
# send byte data to Teensys:
if (not TEENSY_SYNC) or teensy_idx == 0:
led_data[0] = '*' # first Teensy is the frame sync master
usec = int((1000000.0 / TARGET_FRAME_RATE) * 0.75)
led_data[1] = (usec) & 0xff # request the frame sync pulse
led_data[2] = (usec >> 8) & 0xff # at 75% of the frame time
else:
led_data[0] = '%' # others sync to the master board
led_data[1] = 0
led_data[2] = 0
def write(idx, data):
global is_writing
is_writing = True
led_serial[idx].write(data)
is_writing = False
# write(teensy_idx, bytes(led_data))
send_executor.submit(write, teensy_idx, bytes(led_data))
def verify_led_data(teensy_idx, led_data):
if teensy_idx > 4: return
led_data_orig = hp.image_to_data_original(led_image[teensy_idx], led_layout[teensy_idx])
if led_data != led_data_orig:
print(repr(led_data_orig))
print(repr(led_data))
# raise AssertionError
def open_camera():
print "Opening capture from camera at", CAPTURE_SIZE
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 30)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, CAPTURE_SIZE[0])
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, CAPTURE_SIZE[1])
# cap.set(cv2.cv.CV_CAP_PROP_GAIN, 1)
# cap.set(cv2.cv.CV_CAP_PROP_EXPOSURE, 1)
return cap
def open_file(path):
print "Opening capture from", path
cap = cv2.VideoCapture(path)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 30)
return cap
# Notes:
# - consider reading in grayscale frame straight away
def main(argv):
filename = None
if argv:
filename = argv[0]
needs_release = False
if filename:
cap = open_file(filename)
else:
cap = open_camera()
needs_release = True
if not cap.isOpened:
print "Failed to open capture"
return
print "Initialising pipeline"
pipeline = Pipeline(DEFISH, bg=BackgroundRejecterAvg())
print "Initialising serial ports"
num_ports = initialise_serial_ports(fake=FAKE_SERIAL)
print "Initialised", num_ports, "ports"
# Open a preview window
cv2.namedWindow("capture")
cv2.namedWindow("debug")
cv2.namedWindow("debug2")
cv2.namedWindow("panels")
# Run until no more frames
run(cap, filename, num_ports, pipeline)
cv2.destroyWindow("capture")
cv2.destroyWindow("debug")
cv2.destroyWindow("debug2")
cv2.destroyWindow("panels")
if needs_release:
cap.release() #release camera
# in case of some crash - set all LEDs to black before closing ports to Teensys
send_black_frame(num_ports)
time.sleep(1)
send_black_frame(num_ports)
send_executor.shutdown(wait=True)
close_all_ports(num_ports)
def run(cap, filename, num_ports, pipeline):
tstart = time.time()
have_frame, frame = cap.read()
framecount = 1
# need to first draw all black frame:
send_black_frame(num_ports)
while have_frame:
show_debug = framecount % (TARGET_FRAME_RATE/2) == 0
if show_debug: cv2.imshow("capture", cv2.resize(frame, PREVIEW_SIZE))
tprocstart = time.time()
frame = pipeline.process(frame, show_debug)
frame = cv2.flip(frame, 1)
#draw_text(frame)
tprocend = time.time()
if check_panel_time.good_time_to_play():
send_frame_to_led_panels(frame, num_ports, show_debug)
else:
send_black_frame(num_ports)
key = cv2.waitKey(1)
if key == 27: # exit on ESC
break
tend = time.time()
if show_debug:
proc_duration = (tprocend - tprocstart)
send_duration = (tend - tprocend)
duration = (tend - tstart)
print "Frame took", duration * 1000, "ms,", proc_duration * 1000, "proc,", \
send_duration * 1000, "send,", (1 / duration), "fps"
tstart = time.time()
have_frame, frame = cap.read()
framecount += 1
if filename and not have_frame:
framecount = 0
cap = open_file(filename)
have_frame, frame = cap.read()
return cap
def send_black_frame(num_ports):
black_frame = np.zeros((184, 120, 3), np.uint8)
black_frame[:] = (0, 0, 0)
send_frame_to_led_panels(black_frame, num_ports)
if __name__ == "__main__":
main(sys.argv[1:])
|
anorth/shadow-wall
|
InterviewPrepScripts/byteSerial.py
|
<filename>InterviewPrepScripts/byteSerial.py<gh_stars>1-10
import numpy as np
import cv2
from time import sleep
import serial
#ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
ser = serial.Serial('/dev/ttyACM1', 115200, timeout=1)
byteR = (255,0,0)
byteG = (0,255,0)
byteB = (0,0,255)
NUM_LEDS = 100
j = 0;
while 1:
tdata = ser.read()
if tdata == 'a':
print 'got an a!'
print j
if j == 0:
for i in range(0,NUM_LEDS):
ser.write(np.uint8(byteR))
ser.write(np.uint8(byteR))
ser.write(np.uint8(byteR))
j = 1
continue
if j == 1:
for i in range(0,NUM_LEDS):
ser.write(np.uint8(byteG))
ser.write(np.uint8(byteG))
ser.write(np.uint8(byteG))
j = 2
continue
if j == 2:
for i in range(0,NUM_LEDS):
ser.write(np.uint8(byteB))
ser.write(np.uint8(byteB))
ser.write(np.uint8(byteB))
j = 0
continue
|
anorth/shadow-wall
|
Processing2Python_old/showMovie/MyRectangle.py
|
<gh_stars>0
class MyRectangle(object):
def __init__(self, top_corner, width, height):
self.x = top_corner[0]
self.y = top_corner[1]
self.width = width
self.height = height
def get_bottom_right(self):
d = self.x + self.width
t = self.y + self.height
return d, t
|
anorth/shadow-wall
|
PythonHelperScripts/genGammaCorrection.py
|
#!/usr/local/bin/python
#Date created: 18-JAN-2016
#Author: NG
#code pilfered from LadyAda!
#https://learn.adafruit.com/led-tricks-gamma-correction/the-longer-fix
#Purpose: This table remaps linear input values (the numbers we’d like to use; e.g. 127 = half brightness) to nonlinear gamma-corrected output values (numbers producing the desired effect on the LED; e.g. 36 = half brightness).
#Great news, if we want an LED to appear at 'half' power, instead of writing '127' to the LED, we can write: table[127], which is 37! W00t for power saving :)
import numpy as np
import sys #get print without newline
gamma = 2.8
max_in = 255
max_out = 255
table = []
tmp = 0
for i in range(0,max_in):
if i > 0:
sys.stdout.write(',')
if np.mod(i,15) == 0:
sys.stdout.write('\n ')
tmp = np.uint8(np.round(pow(np.float32(i)/np.float(max_in),gamma) * max_out + 0.5))
table.append(tmp)
sys.stdout.write(str(tmp))
|
anorth/shadow-wall
|
Processing2Python/showMovie/defish.py
|
#!/usr/bin/env python
import cv2
import numpy as np
FISH_FOV = 180.0
class Defisher(object):
def __init__(self, src_size, dst_size, xmap, ymap):
self.src_size = src_size
self.dst_size = dst_size
self.xmap = xmap
self.ymap = ymap
def unwarp(self, img):
assert img.shape[0] == self.src_size[1]
assert img.shape[1] == self.src_size[0]
output = cv2.remap(img, self.xmap, self.ymap, cv2.INTER_LINEAR)
return output
def create_fisher(src_size, dst_size, hfovd=FISH_FOV, vfovd=FISH_FOV):
Ws, Hs = src_size
Wd, Hd = dst_size
# Build the fisheye mapping
map_x = np.zeros((Hd, Wd), np.float32)
map_y = np.zeros((Hd, Wd), np.float32)
vfov = (vfovd / 180.0) * np.pi
hfov = (hfovd / 180.0) * np.pi
vstart = ((180.0 - vfovd) / 180.00) * np.pi / 2.0
hstart = ((180.0 - hfovd) / 180.00) * np.pi / 2.0
count = 0
# need to scale to changed range from our
# smaller cirlce traced by the fov
xmax = np.sin(np.pi / 2.0) * np.cos(vstart)
xmin = np.sin(np.pi / 2.0) * np.cos(vstart + vfov)
xscale = xmax - xmin
xoff = xscale / 2.0
zmax = np.cos(hstart)
zmin = np.cos(hfov + hstart)
zscale = zmax - zmin
zoff = zscale / 2.0
# Fill in the map, this is slow but
# we could probably speed it up
# since we only calc it once, whatever
for y in range(0, int(Hd)):
theta = hstart + (hfov * ((float(y) / float(Hd))))
zp = ((np.cos(theta)) + zoff) / zscale #
for x in range(0, int(Wd)):
count = count + 1
phi = vstart + (vfov * ((float(x) / float(Wd))))
xp = ((np.sin(theta) * np.cos(phi)) + xoff) / zscale #
xS = Ws - (xp * Ws)
yS = Hs - (zp * Hs)
map_x.itemset((y, x), int(xS))
map_y.itemset((y, x), int(yS))
return Defisher(src_size, dst_size, map_x, map_y)
|
anorth/shadow-wall
|
Processing2Python_old/showMovie/movie2serial.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import serial
import glob
import time
from MyRectangle import MyRectangle
import helperFunctions as hp
cv2.namedWindow("preview")
vc = cv2.VideoCapture()
MaximumNumberOfPorts = 24
target_frame_rate = 30
led_serial = []
led_image = []
led_area = []
led_layout = []
panel_width_in_pixels = 180
panel_height_in_pixels = 120
idx_dummy_columns = np.array([147, 151, 155, 159])
# ask a Teensy board for its LED configuration, and set up the info for it.
def serial_configure(port_name, port_num):
if port_num >= MaximumNumberOfPorts:
print 'Too many serial ports, please increase maxPorts'
return
print 'Port name ' + port_name
led_serial.append(serial.Serial(port_name, timeout=1))
if led_serial[port_num] is None:
print 'portName: ', port_name, ' returned null'
return
time.sleep(50 / 1000.0) # sleep for 50ms
led_serial[port_num].write('?')
line = led_serial[port_num].readline()
print line
if line is None:
print 'Serial port ' + port_name + ' is not responding.'
print 'Is it really a Teensy running VideoDisplay?'
return
params = line.split(",")
if len(params) != 12:
print 'Error: port ' + port_name + ' did not respond to LED config query'
return
# only store the info and increase numPorts if Teensy responds properly
led_image.append(np.zeros((int(params[0]), int(params[1]), 3), np.uint8))
# Note: rows and cols are according to the teensy, which is configured to be mounted rotated π/2
print 'Panel: ', port_num, ' cols: ', params[0], ' rows: ', params[1]
rect = MyRectangle((int(params[5]), int(params[6])), int(params[7]), int(params[8]))
led_area.append(rect)
print 'xoff: ', params[5], ' yoff: ', params[6], ' width: ', params[7], '%, height: ', params[8], '%'
led_layout.append(int(params[2]))
print 'laout: ', params[2]
def initialise_serial_ports():
ports = glob.glob('/dev/tty.usbmodem*')
print 'Serial Ports: '
print ports
total_num_ports = 0
for idx, port in enumerate(ports):
serial_configure(port, idx)
total_num_ports += 1
print led_serial
return total_num_ports
def close_all_ports(num_ports):
for i in range(0, num_ports):
led_serial[i].close()
# int initialiseProcessingPipeline():
# opencv = new OpenCV(this, WidthInPixels, HeightInPixels);
# println("Using color?", opencv.getUseColor());
# //opencv.useGray();
#
# blobDetector = new BlobDetection(WidthInPixels, HeightInPixels);
# //BlobDetection.setConstants(5, 20, 60);
# blobDetector.setThreshold(0.5);
# blobDetector.setPosDiscrimination(true); // find highlights, not lowlights
# }
def send_frame_to_led_panels(frame, num_ports):
# Write the frame to panels
[height, width, depth] = frame.shape
for teensy_idx in range(0, num_ports):
# copy a portion of the movie's image to the LED image
xoffset = led_area[teensy_idx].x
yoffset = led_area[teensy_idx].y
twidth = led_area[teensy_idx].width
theight = led_area[teensy_idx].height
print 'xoffset: ' + str(xoffset)
print 'yoffset: ' + str(yoffset)
print 'width: ' + str(twidth)
print 'height: ' + str(theight)
print 'start width: ' + str(xoffset) + ' end width: ' + str(xoffset + twidth)
print 'start height: ' + str(yoffset) + ' end height: ' + str(yoffset + theight)
print frame.shape
# determine what portion of frame to send to given Teensy:
# led_image[teensy_idx] = np.copy(frame[xoffset:xoffset+theight,yoffset:yoffset+twidth,:])
led_image[teensy_idx] = np.copy(frame[yoffset:yoffset + theight, xoffset:xoffset + twidth, :])
# convert the LED image to raw data byte[]
print 'led_image[teensy_idx] ' + str(led_image[teensy_idx].shape)
led_data = hp.image_to_data(led_image[teensy_idx], led_layout)
# send byte data to Teensys:
if teensy_idx == 0:
led_data[0] = '*' # first Teensy is the frame sync master
usec = int((1000000.0 / target_frame_rate) * 0.75)
led_data[1] = (usec) & 0xff # request the frame sync pulse
led_data[2] = (usec >> 8) & 0xff # at 75% of the frame time
else:
led_data[0] = '%' # others sync to the master board
led_data[1] = 0
led_data[2] = 0
# and finally send the raw data to the LEDs
print led_serial[teensy_idx]
print led_data
led_serial[teensy_idx].write(bytearray(led_data))
def main():
number_of_ports_in_use = initialise_serial_ports()
print number_of_ports_in_use
hp.initialise_gamma_table()
# print hp.gamma_table
# now capture frames from webcam:
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
print 'Frame shape: ' + str(frame.shape)
frame_count = 3
while rval and frame_count > 0:
cv2.imshow("preview", frame)
rval, frame = vc.read()
cv2.imwrite('orig_frame.png', frame)
# resize frame to exactly be the dimensions of LED panel
new_frame = hp.resize(frame, panel_width_in_pixels, panel_height_in_pixels, idx_dummy_columns)
new_frame = cv2.flip(new_frame, 1)
cv2.imwrite('new_frame.png', new_frame)
send_frame_to_led_panels(new_frame, number_of_ports_in_use)
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
frame_count -= 1
cv2.destroyWindow("preview")
close_all_ports(number_of_ports_in_use)
if __name__ == "__main__":
main()
|
anorth/shadow-wall
|
InterviewPrepScripts/videoCapture.py
|
import numpy as np
import cv2
import time
start = time.time()
end = start + 3 #show video for three seconds - I do this to make sure your stream doesn't get stuffed up by a bad exit. Remove in future.
cap = cv2.VideoCapture(0)
#while time.time() < end: #
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if frame == None:
continue
b,g,r = cv2.split(frame)
b_new = cv2.resize(b,(10,10))
g_new = cv2.resize(g,(10,10))
r_new = cv2.resize(r,(10,10))
out = cv2.merge((b_new,g_new,r_new))
cv2.imshow('frame',out)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
anorth/shadow-wall
|
InterviewPrepScripts/byteSerialVideo.py
|
<reponame>anorth/shadow-wall
import numpy as np
import cv2
from time import sleep
import serial
#ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=1)
cap = cv2.VideoCapture(0)
NUM_LEDS = 100
SEND_SERIAL = True
while 1:
ret,frame = cap.read()
if frame == None:
continue
# b,g,r = cv2.split(frame)
r,g,b = cv2.split(frame)
b_new = cv2.resize(b,(10,10))
g_new = cv2.resize(g,(10,10))
r_new = cv2.resize(r,(10,10))
out = cv2.merge((b_new,g_new,r_new))
cv2.imshow('frame',out)
b_send = np.array(b_new.flatten())
g_send = np.array(g_new.flatten())
r_send = np.array(r_new.flatten())
if SEND_SERIAL:
tdata = ser.read()
if tdata == 'a':
print 'got an a!'
#send frame
for i in range(0,NUM_LEDS):
colorBytes = (b_send[i]/2,g_send[i]/2,r_send[i]/2)
# colorBytes = (255-b_send[i],255-g_send[i],255-r_send[i])
ser.write(np.uint8(colorBytes))
continue
# for i in range(0,NUM_LEDS):
# print np.uint8(b_send[i])
# print np.uint8(g_send[i])
# print np.uint8(r_send[i])
if cv2.waitKey(1) & 0xFF == ord('q'):
# cap.release()
# cv2.destroyAllWindows()
break
cap.release()
cv2.destroyAllWindows()
|
EdilsonTarcio/funcao_maior_numero
|
maior_valor.py
|
<reponame>EdilsonTarcio/funcao_maior_numero
def maior_valor(valores):
return max(valores)
def capturar_numeros():
lista_numeros = []
while True:
numero = int(input("Informe um número inteiro ou zero para sair: "))
if numero == 0:
break
lista_numeros.append(numero)
return maior_valor(lista_numeros)
# Chamada da função
print(f"Maior valor: {capturar_numeros()}")
|
wellcomecollection/aws_utils
|
tests/test_s3_utils.py
|
<filename>tests/test_s3_utils.py
# -*- encoding: utf-8 -*-
import boto3
from botocore.exceptions import ClientError
import dateutil.parser
from moto import mock_s3
import pytest
from wellcome_aws_utils import s3_utils
def s3_event():
return {
"Records": [
{
"eventVersion": "2.0",
"eventSource": "aws:s3",
"awsRegion": "us-east-1",
"eventTime": "1970-01-01T00:00:00.000Z",
"eventName": "event-type",
"userIdentity": {
"principalId": "CustomerID-who-caused-the-event"
},
"requestParameters": {
"sourceIPAddress": "ip-address-where-request-came-from"
},
"responseElements": {
"x-amz-request-id": "Amazon S3 generated request ID",
"x-amz-id-2": "Amazon S3 host that processed the request"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "ID in bucket notification config",
"bucket": {
"name": "bucket-name",
"ownerIdentity": {
"principalId": "CustomerID-of-bucket-owner"
},
"arn": "bucket-ARN"
},
"object": {
"key": "bucket-name",
"size": 1234,
"eTag": "object eTag",
"versionId": "v2",
"sequencer": "foo"
}
}
}
]
}
class TestIsObject(object):
@mock_s3
def test_detects_existing_object(self):
client = boto3.client('s3')
# Create a bucket and an object
client.create_bucket(Bucket='bukkit')
# First check we don't think the object exists
assert not s3_utils.is_object(bucket='bukkit', key='myfile.txt')
client.put_object(
Bucket='bukkit', Key='myfile.txt', Body=b'hello world'
)
# Now check we can detect its existence
assert s3_utils.is_object(bucket='bukkit', key='myfile.txt')
@mock_s3
def test_does_not_detect_missing_object(self):
client = boto3.client('s3')
client.create_bucket(Bucket='bukkit')
assert not s3_utils.is_object(bucket='bukkit', key='doesnotexist.py')
@mock_s3
def test_other_errors_are_raised(self):
with pytest.raises(ClientError):
s3_utils.is_object(bucket='notabukkit', key='forbidden.txt')
class TestCopyObject(object):
@mock_s3
def test_throws_error_if_src_does_not_exist(self):
client = boto3.client('s3')
client.create_bucket(Bucket='bukkit')
with pytest.raises(ValueError) as err:
s3_utils.copy_object(
src_bucket='bukkit', src_key='doesnotexist.txt',
dst_bucket='bukkit2', dst_key='doesnotexist.txt'
)
assert 'Tried to copy missing object' in err.value.args[0]
@mock_s3
def test_throws_error_if_dst_bucket_does_not_exist(self):
client = boto3.client('s3')
client.create_bucket(Bucket='bukkit')
client.put_object(Bucket='bukkit', Key='f.txt', Body=b'hello world')
with pytest.raises(ClientError):
s3_utils.copy_object(
src_bucket='bukkit', src_key='f.txt',
dst_bucket='doesnotexistbukkit', dst_key='f.txt'
)
@mock_s3
def test_copies_file_if_dst_key_does_not_exist(self):
client = boto3.client('s3')
client.create_bucket(Bucket='bukkit')
client.create_bucket(Bucket='newbukkit')
client.put_object(Bucket='bukkit', Key='f.txt', Body=b'hello world')
s3_utils.copy_object(
src_bucket='bukkit', src_key='f.txt',
dst_bucket='newbukkit', dst_key='f.txt'
)
assert s3_utils.is_object(bucket='newbukkit', key='f.txt')
@pytest.mark.skip(reason="""
Blocked on https://github.com/spulec/moto/issues/1271 as versioning
in moto doesn't work
""")
@mock_s3
@pytest.mark.parametrize('lazy, expected_version', [
(False, '1'),
(True, '0'),
])
def test_copies_file_if_dst_key_exists_but_not_lazy(
self, lazy, expected_version
):
client = boto3.client('s3')
# First create the same file in both buckets. We enable versioning
# so we can check when files change.
for b in ['bukkit', 'newbukkit']:
client.create_bucket(Bucket=b)
client.put_bucket_versioning(
Bucket=b,
VersioningConfiguration={'Status': 'Enabled'}
)
client.put_object(Bucket=b, Key='f.txt', Body=b'hello world')
resp = client.get_object(Bucket='newbukkit', Key='f.txt')
assert resp['VersionId'] == '0'
for _ in range(3):
s3_utils.copy_object(
src_bucket='bukkit', src_key='f.txt',
dst_bucket='newbukkit', dst_key='f.txt'
)
resp = client.get_object(Bucket='newbukkit', Key='f.txt')
assert resp['VersionId'] == expected_version
def test_parse_s3_event():
e = s3_event()
parsed_events = s3_utils.parse_s3_record(e)
expected_datetime = dateutil.parser.parse("1970-01-01T00:00:00.000Z")
expected_events = [{
"event_name": "event-type",
"event_time": expected_datetime,
"bucket_name": "bucket-name",
"object_key": "bucket-name",
"size": 1234,
"versionId": "v2"
}]
assert parsed_events == expected_events
def test_s3_event_with_special_char_in_key():
# We've seen cases where the S3 event has a URL-quoted plus, and this
# gets passed into the Lambda. Check we unpack it correctly.
event = s3_event()
event['Records'][0]['s3']['object']['key'] = 'foo%2Bbar'
parsed_events = s3_utils.parse_s3_record(event)
assert len(parsed_events) == 1
assert parsed_events[0]['object_key'] == 'foo+bar'
@mock_s3
def test_write_objects_to_s3():
client = boto3.client('s3')
client.create_bucket(Bucket='bukkit')
s3_utils.write_objects_to_s3(
bucket='bukkit', key='dicts.txt',
objects=[{'a': 1, 'b': 2}, {'c': 3, 'd': 4}]
)
assert s3_utils.is_object(bucket='bukkit', key='dicts.txt')
body = client.get_object(Bucket='bukkit', Key='dicts.txt')['Body'].read()
assert body == b'{"a":1,"b":2}\n{"c":3,"d":4}'
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/dynamo_utils.py
|
<filename>src/wellcome_aws_utils/dynamo_utils.py
# -*- encoding: utf-8 -*-
def _is_capacity_different(x, desired_capacity):
read_capacity_units = x['ProvisionedThroughput']['ReadCapacityUnits']
write_capacity_units = x['ProvisionedThroughput']['WriteCapacityUnits']
return (
read_capacity_units != desired_capacity
) or (
write_capacity_units != desired_capacity
)
def change_dynamo_capacity(client, table_name, desired_capacity):
"""
Given the name of a DynamoDB table and a desired capacity, update the
read/write capacity of the table and every secondary index.
"""
response = client.describe_table(TableName=table_name)
filtered_gsis = filter(
lambda x: _is_capacity_different(x, desired_capacity),
response['Table']['GlobalSecondaryIndexes'])
gsi_updates = list(map(
lambda x: {
'Update': {
'IndexName': x['IndexName'],
'ProvisionedThroughput': {
'ReadCapacityUnits': desired_capacity,
'WriteCapacityUnits': desired_capacity
}
}
},
filtered_gsis
))
table_update = _is_capacity_different(response['Table'], desired_capacity)
print(f'table_update: {table_update}')
if gsi_updates and table_update:
resp = client.update_table(
TableName=table_name,
ProvisionedThroughput={
'ReadCapacityUnits': desired_capacity,
'WriteCapacityUnits': desired_capacity
},
GlobalSecondaryIndexUpdates=gsi_updates
)
elif gsi_updates:
resp = client.update_table(
TableName=table_name,
GlobalSecondaryIndexUpdates=gsi_updates
)
elif table_update:
resp = client.update_table(
TableName=table_name,
ProvisionedThroughput={
'ReadCapacityUnits': desired_capacity,
'WriteCapacityUnits': desired_capacity
}
)
else:
return
print(f'DynamoDB response = {resp!r}')
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
|
wellcomecollection/aws_utils
|
tests/test_dynamo_event.py
|
<filename>tests/test_dynamo_event.py<gh_stars>0
# -*- encoding: utf-8 -*-
from wellcome_aws_utils import dynamo_event
event_source_arn = (
"arn:aws:dynamodb:us-east-1:123456789012:"
"table/BarkTable/stream/2016-11-16T20:42:48.104"
)
def create_insert_record(message):
return {
"ApproximateCreationDateTime": 1479499740,
"Keys": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Username": {
"S": "John Doe"
}
},
"NewImage": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": message
},
"Username": {
"S": "John Doe"
}
},
"SequenceNumber": "13021600000000001596893679",
"SizeBytes": 112,
"StreamViewType": "NEW_IMAGE"
}
def create_remove_record(message):
return {
"ApproximateCreationDateTime": 1479499740,
"Keys": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Username": {
"S": "John Doe"
}
},
"OldImage": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": message
},
"Username": {
"S": "John Doe"
}
},
"SequenceNumber": "13021600000000001596893679",
"SizeBytes": 112,
"StreamViewType": "OLD_IMAGE"
}
def create_modify_record(old_message, new_message):
return {
"ApproximateCreationDateTime": 1479499740,
"Keys": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Username": {
"S": "John Doe"
}
},
"OldImage": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": old_message
},
"Username": {
"S": "John Doe"
}
},
"NewImage": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": new_message
},
"Username": {
"S": "John Doe"
}
},
"SequenceNumber": "13021600000000001596893679",
"SizeBytes": 112,
"StreamViewType": "NEW_AND_OLD_IMAGES"
}
def create_modify_record_keys_only():
return {
"ApproximateCreationDateTime": 1479499740,
"Keys": {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Username": {
"S": "John Doe"
}
},
"SequenceNumber": "13021600000000001596893679",
"SizeBytes": 112,
"StreamViewType": "KEYS_ONLY"
}
def create_insert_event(message):
return {
"eventID": "7de3041dd709b024af6f29e4fa13d34c",
"eventName": "INSERT",
"eventVersion": "1.1",
"eventSource": "aws:dynamodb",
"awsRegion": "us-west-2",
"dynamodb": create_insert_record(message),
"eventSourceARN": event_source_arn
}
def create_remove_event(message):
return {
"eventID": "7de3041dd709b024af6f29e4fa13d34c",
"eventName": "REMOVE",
"eventVersion": "1.1",
"eventSource": "aws:dynamodb",
"awsRegion": "us-west-2",
"dynamodb": create_remove_record(message),
"eventSourceARN": event_source_arn
}
def create_modify_event(old_message, new_message):
return {
"eventID": "7de3041dd709b024af6f29e4fa13d34c",
"eventName": "MODIFY",
"eventVersion": "1.1",
"eventSource": "aws:dynamodb",
"awsRegion": "us-west-2",
"dynamodb": create_modify_record(old_message, new_message),
"eventSourceARN": event_source_arn
}
def create_modify_event_keys_only():
return {
"eventID": "7de3041dd709b024af6f29e4fa13d34c",
"eventName": "MODIFY",
"eventVersion": "1.1",
"eventSource": "aws:dynamodb",
"awsRegion": "us-west-2",
"dynamodb": create_modify_record_keys_only(),
"eventSourceARN": event_source_arn
}
def test_get_source_arn():
dynamo_image = dynamo_event.DynamoEvent(create_insert_event('foo'))
assert dynamo_image.event_source_arn == event_source_arn
def test_insert_event():
dynamo_image = dynamo_event.DynamoEvent(create_insert_event('foo'))
expected_image_with_deserialized_values = {
'Message': 'foo',
'Timestamp': '2016-11-18:12:09:36',
'Username': 'John Doe'
}
expected_image = {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": 'foo'
},
"Username": {
"S": "John Doe"
}
}
assert dynamo_image.new_image(
deserialize_values=True
) == expected_image_with_deserialized_values
assert dynamo_image.new_image() == expected_image
def test_remove_event():
dynamo_image = dynamo_event.DynamoEvent(create_remove_event('foo'))
expected_image_with_deserialized_values = {
'Message': 'foo',
'Timestamp': '2016-11-18:12:09:36',
'Username': 'John Doe'
}
expected_image = {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": 'foo'
},
"Username": {
"S": "John Doe"
}
}
assert dynamo_image.new_image(deserialize_values=True) is None
assert dynamo_image.new_image() is None
assert dynamo_image.old_image(
deserialize_values=True
) == expected_image_with_deserialized_values
assert dynamo_image.old_image() == expected_image
def test_modify_event():
dynamo_image = dynamo_event.DynamoEvent(create_modify_event('foo', 'bar'))
expected_old_image_with_deserialized_values = {
'Message': 'foo',
'Timestamp': '2016-11-18:12:09:36',
'Username': 'John Doe'
}
expected_old_image = {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": 'foo'
},
"Username": {
"S": "<NAME>"
}
}
expected_new_image_with_deserialized_values = {
'Message': 'bar',
'Timestamp': '2016-11-18:12:09:36',
'Username': 'John Doe'
}
expected_new_image = {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Message": {
"S": 'bar'
},
"Username": {
"S": "John Doe"
}
}
assert dynamo_image.new_image(
deserialize_values=True
) == expected_new_image_with_deserialized_values
assert dynamo_image.new_image() == expected_new_image
assert dynamo_image.old_image(
deserialize_values=True
) == expected_old_image_with_deserialized_values
assert dynamo_image.old_image() == expected_old_image
def test_modify_event_keys_only():
dynamo_image = dynamo_event.DynamoEvent(create_modify_event_keys_only())
assert dynamo_image.new_image(deserialize_values=True) is None
assert dynamo_image.new_image() is None
assert dynamo_image.old_image(deserialize_values=True) is None
assert dynamo_image.old_image() is None
assert dynamo_image.keys(deserialize_values=True) == {
'Timestamp': '2016-11-18:12:09:36',
'Username': 'John Doe'
}
assert dynamo_image.keys() == {
"Timestamp": {
"S": "2016-11-18:12:09:36"
},
"Username": {
"S": "John Doe"
}
}
|
wellcomecollection/aws_utils
|
tests/test_sqs_utils.py
|
# -*- encoding: utf-8 -*-
import logging
import boto3
import daiquiri
from moto import mock_sqs
import pytest
from wellcome_aws_utils import sqs_utils
daiquiri.setup(level=logging.INFO)
logger = daiquiri.getLogger(__name__)
@pytest.fixture
def queue_url():
with mock_sqs():
client = boto3.client('sqs')
resp = client.create_queue(QueueName='TestQueue')
logger.info('%r', resp)
yield resp['QueueUrl']
client.delete_queue(QueueUrl=resp['QueueUrl'])
@pytest.mark.parametrize('kwargs', [
{},
{'delete': False},
{'delete': True},
{'batch_size': 1},
{'batch_size': 10},
{'delete': True, 'batch_size': 2},
{'delete': False, 'batch_size': 7},
])
def test_get_empty_queue_is_empty(queue_url, kwargs):
assert list(sqs_utils.get_messages(queue_url, **kwargs)) == []
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/ecs_utils.py
|
<gh_stars>0
# -*- encoding: utf-8 -*-
import operator
from botocore.exceptions import ClientError
class EcsThrottleException(Exception):
pass
def identify_cluster_by_app_name(client, app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
return the ARN of the cluster the task runs on.
"""
for cluster_arn in get_cluster_arns(client):
for service_arn in get_service_arns(client, cluster_arn=cluster_arn):
# The format of an ECS service ARN is:
#
# arn:aws:ecs:{aws_region}:{account_id}:service/{service_name}
#
# Our ECS cluster is configured so that the name of the ECS cluster
# matches the name of the config in S3. It would be more robust
# to use the describeService API, but this saves us a couple of
# calls on our API quota so we skip it.
_, service_name = service_arn.split('/')
if service_name == app_name:
return cluster_arn
raise RuntimeError(f'Unable to find ECS cluster for {app_name}')
def get_latest_task_definition(client, cluster, service):
"""
Given the name of a cluster and a service, return the ARN
for its latest task definition.
"""
resp = client.describe_services(cluster=cluster, services=[service])
# The top-level structure of a describeServices API response is of the form
#
# {
# "failures": [],
# "services": [
# ...
# ]
# }
#
# Because we only asked for a description of a single service, we expect
# there to only be a single service.
services = resp['services']
assert len(services) == 1, resp
service = services[0]
# Within a 'service' description, the following structure is what we're
# interested in:
#
# "deployments": [
# {
# "createdAt": <date>,
# "taskDefinition": <task definition ARN>,
# "updatedAt": <date>
# ...
# },
# ... other running tasks
# ],
#
# Each "deployment" corresponds to a running task, so we pick the
# container with the most recent creation date.
deployments = service['deployments']
assert len(deployments) > 0, resp
newest_deployment = max(deployments, key=operator.itemgetter('createdAt'))
return newest_deployment['taskDefinition']
def clone_task_definition(client, task_definition):
"""
Given a task definition ARN, clone the associated task.
Returns the new task definition ARN.
"""
resp = client.describe_task_definition(taskDefinition=task_definition)
taskDefinition = resp['taskDefinition']
# The task definition contains two key fields: "family" and
# "containerDefinitions" which full describe the task.
new_task = client.register_task_definition(
family=taskDefinition['family'],
taskRoleArn=taskDefinition['taskRoleArn'],
containerDefinitions=taskDefinition['containerDefinitions'],
volumes=taskDefinition['volumes']
)
return new_task['taskDefinition']['taskDefinitionArn']
def _name_from_arn(arn):
return arn.split("/")[1]
def _check_for_throttle_exception(f, *args, **kwargs):
try:
return f(*args, **kwargs)
except ClientError as ex:
if ex.response['Error']['Code'] == 'ThrottlingException':
print(f'ThrottlingException: {ex}')
raise EcsThrottleException(ex)
else:
raise
def get_service_arns(ecs_client, cluster_arn):
"""
Given a cluster ARN, extracts the associated service ARNs.
Returns a list of service ARNS.
"""
return _check_for_throttle_exception(
ecs_client.list_services,
cluster=_name_from_arn(cluster_arn)
)['serviceArns']
def get_cluster_arns(ecs_client):
"""
Extract the list of cluster ARNs in this account.
Returns a list of cluster ARNs.
"""
return _check_for_throttle_exception(
ecs_client.list_clusters
)['clusterArns']
def describe_cluster(ecs_client, cluster_arn):
"""
Given a cluster ARN attempts to find a matching cluster description.
Returns a cluster description.
"""
return _check_for_throttle_exception(
ecs_client.describe_clusters,
clusters=[cluster_arn]
)['clusters'][0]
def describe_service(ecs_client, cluster_arn, service_arn):
"""
Given a cluster ARN and service ARN, attempts to find a matching
service description.
Returns a service description.
"""
return _check_for_throttle_exception(
ecs_client.describe_services,
cluster=_name_from_arn(cluster_arn),
services=[_name_from_arn(service_arn)]
)['services'][0]
def run_task(
ecs_client,
cluster_name,
task_definition,
started_by,
container_name="app",
command=[]):
"""
Run a given command against a named container in a task definition
on a particular cluster.
Returns the response from calling run_task
"""
return ecs_client.run_task(
cluster=cluster_name,
taskDefinition=task_definition,
overrides={
'containerOverrides': [
{
'name': container_name,
'command': command
},
]
},
count=1,
startedBy=started_by,
)
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/__init__.py
|
# -*- encoding: utf-8 -*-
from wellcome_aws_utils import (
deployment_utils,
dynamo_utils,
ecs_utils,
s3_utils,
sns_utils,
sqs_utils,
)
from wellcome_aws_utils.version import __version_info__, __version__
__all__ = [
'__version_info__',
'__version__',
'deployment_utils',
'dynamo_utils',
'ecs_utils',
's3_utils',
'sns_utils',
'sqs_utils',
]
|
wellcomecollection/aws_utils
|
scripts/deploy.py
|
#!/usr/bin/env python
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis-python
#
# Most of this work is copyright (C) 2013-2017 <NAME>
# (<EMAIL>), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import division, print_function, absolute_import
import os
import sys
import shutil
import subprocess
import hypothesistooling as tools
sys.path.append(os.path.dirname(__file__)) # noqa
DIST = os.path.join(tools.ROOT, 'dist')
PENDING_STATUS = ('started', 'created')
if __name__ == '__main__':
last_release = tools.latest_version()
print('Current version: %s. Latest released version: %s' % (
tools.__version__, last_release
))
HEAD = tools.hash_for_name('HEAD')
MASTER = tools.hash_for_name('origin/master')
print('Current head:', HEAD)
print('Current master:', MASTER)
on_master = tools.is_ancestor(HEAD, MASTER)
has_release = tools.has_release()
if has_release:
print('Updating changelog and version')
tools.update_for_pending_release()
print('Building an sdist...')
if os.path.exists(DIST):
shutil.rmtree(DIST)
subprocess.check_call([
sys.executable, 'setup.py', 'sdist', '--dist-dir', DIST,
])
if not on_master:
print('Not deploying due to not being on master')
sys.exit(0)
if not has_release:
print('Not deploying due to no release')
sys.exit(0)
if os.environ.get('TRAVIS_SECURE_ENV_VARS', None) != 'true':
print("But we don't have the keys to do it")
sys.exit(1)
print('Decrypting secrets')
# We'd normally avoid the use of shell=True, but this is more or less
# intended as an opaque string that was given to us by Travis that happens
# to be a shell command that we run, and there are a number of good reasons
# this particular instance is harmless and would be high effort to
# convert (principally: Lack of programmatic generation of the string and
# extensive use of environment variables in it), so we're making an
# exception here.
subprocess.check_call(
'openssl aes-256-cbc -K $encrypted_83630750896a_key '
'-iv $encrypted_83630750896a_iv -in deploy_key.enc -out deploy_key -d',
shell=True
)
subprocess.check_call(['chmod', '400', 'deploy_key'])
print('Release seems good. Pushing to GitHub now.')
tools.create_tag_and_push()
print('Now uploading to pypi.')
subprocess.check_call([
sys.executable, '-m', 'twine', 'upload',
'--username', os.environ['PYPI_USERNAME'],
'--password', os.environ['PYPI_PASSWORD'],
os.path.join(DIST, '*'),
])
sys.exit(0)
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/reporting_utils.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
get records from VHS, apply the transformation to them, and shove them into
an elasticsearch index
"""
import json
import boto3
import certifi
from attr import attrs, attrib
from elasticsearch import Elasticsearch
from wellcome_aws_utils.lambda_utils import log_on_error
def get_es_credentials(profile_name=None):
session = boto3.session.Session(profile_name=profile_name)
client = session.client(
service_name='secretsmanager',
region_name="eu-west-1"
)
get_secret_value_response = client.get_secret_value(
SecretId="prod/Elasticsearch/ReportingCredentials"
)
secret = get_secret_value_response['SecretString']
return json.loads(secret)
def dict_to_location(d):
return ObjectLocation(**d)
@attrs
class ObjectLocation(object):
namespace = attrib()
path = attrib()
@attrs
class Record(object):
id = attrib()
version = attrib()
payload = attrib(converter=dict_to_location)
@attrs
class ElasticsearchRecord(object):
id = attrib()
doc = attrib()
def extract_sns_messages_from_event(event):
keys_to_keep = ['id', 'version', 'payload']
for record in event["Records"]:
try:
full_message = json.loads(record["Sns"]["Message"])
except KeyError:
# This could be a message from SQS rather than SNS so attempt to
# decode the "body" of the message
event = json.loads(record["body"])
for msg in extract_sns_messages_from_event(event):
yield msg
else:
stripped_message = {
k: v for k, v in full_message.items() if k in keys_to_keep
}
yield stripped_message
def get_dynamo_record(dynamo_table, message):
item = dynamo_table.get_item(Key={"id": message['id']})
return Record(**item["Item"])
def get_s3_objects_from_messages(dynamo_table, s3, messages):
for message in messages:
record = get_dynamo_record(dynamo_table, message)
s3_object = s3.get_object(
Bucket=record.payload.namespace,
Key=record.payload.path
)
yield record.id, s3_object
def unpack_json_from_s3_objects(s3_objects):
for id, s3_object in s3_objects:
data = s3_object["Body"].read().decode("utf-8")
yield id, json.loads(data)
def transform_data_for_es(data, transform):
for id, data_dict in data:
yield ElasticsearchRecord(
id=id,
doc=transform(data_dict)
)
@log_on_error
def process_messages(
event, transform, index, table_name, dynamodb=None, s3_client=None,
es_client=None, credentials=None
):
s3_client = s3_client or boto3.client("s3")
dynamo_table = (dynamodb or boto3.resource("dynamodb")).Table(table_name)
if credentials and not es_client:
es_client = Elasticsearch(
hosts=credentials["url"],
use_ssl=True,
ca_certs=certifi.where(),
http_auth=(credentials['username'], credentials['password'])
)
elif not es_client:
raise ValueError(
'process_messages needs an elasticsearch client or a set of '
'credentials to create one'
)
_process_messages(
event, transform, index, dynamo_table, s3_client, es_client
)
def _process_messages(
event, transform, index, dynamo_table, s3_client, es_client
):
messages = extract_sns_messages_from_event(event)
s3_objects = get_s3_objects_from_messages(
dynamo_table, s3_client, messages
)
data = unpack_json_from_s3_objects(s3_objects)
es_records_to_send = transform_data_for_es(data, transform)
for record in es_records_to_send:
es_client.index(
index=index,
doc_type="_doc",
id=record.id,
body=json.dumps(record.doc)
)
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/sqs_utils.py
|
<gh_stars>0
# -*- encoding: utf-8 -*-
import boto3
import daiquiri
logger = daiquiri.getLogger(__name__)
def get_messages(queue_url, delete=False, batch_size=10):
"""
Gets messages from an SQS queue. If ``delete`` is True, the
messages are also deleted after they've been read.
"""
client = boto3.client('sqs')
while True:
# We batch message responses to reduce load on the SQS API.
# Note: 10 is currently the most messages you can read at once.
resp = client.receive_message(
QueueUrl=queue_url,
AttributeNames=['All'],
MaxNumberOfMessages=batch_size
)
# If there's nothing available, the queue is empty. Abort!
try:
logger.info(
'Received %d new messages from %s',
len(resp['Messages']), queue_url)
except KeyError:
logger.info('No messages received from %s; aborting', queue_url)
break
# If we're deleting the messages ourselves, we don't need to send
# the ReceiptHandle to the caller (it's only used for deleting).
# If not, we send the entire response.
if delete:
for m in resp['Messages']:
yield {k: v for k, v in m.items() if k != 'ReceiptHandle'}
else:
yield from resp['Messages']
# Now delete the messages from the queue, so they won't be read
# on the next GET call.
if delete:
logger.info(
'Deleting %d messages from %s',
len(resp['Messages']), queue_url)
client.delete_message_batch(
QueueUrl=queue_url,
Entries=[
{'Id': m['MessageId'], 'ReceiptHandle': m['ReceiptHandle']}
for m in resp['Messages']
]
)
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/deployment_utils.py
|
<filename>src/wellcome_aws_utils/deployment_utils.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Shared library to help surface ECS deployment information.
"""
import collections
import datetime
from wellcome_aws_utils.ecs_utils import (
get_cluster_arns, get_service_arns, describe_service
)
Deployment = collections.namedtuple(
'Deployment',
'deployment_key deployment_status color created_at task_definition'
)
DeploymentKey = collections.namedtuple('DeploymentKey', 'id service_arn')
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
def _create_deployment_tuple_from_item(item):
item_date = datetime.datetime.strptime(item['created_at'], DATE_FORMAT)
return Deployment(
DeploymentKey(item['deployment_id'], item['service_arn']),
item['deployment_status'],
item['color'],
item_date,
item['task_definition']
)
def _create_deployment_tuple_from_ecs(service, deployment):
"""Takes AWS ECS API Service & Deployment, return Deployment namedtuple."""
deployment_status = deployment['status']
ongoing_deployment = len(service['deployments']) > 1
if ongoing_deployment and (deployment_status == "PRIMARY"):
color = "green"
else:
color = "blue"
return Deployment(
DeploymentKey(deployment['id'], service['serviceArn']),
deployment_status,
color,
deployment['createdAt'],
deployment['taskDefinition']
)
def _get_service_deployments(ecs_client, cluster_arn, service_arn):
service = describe_service(ecs_client, cluster_arn, service_arn)
return [_create_deployment_tuple_from_ecs(service, deployment)
for deployment in service['deployments']]
def _get_date_string(date):
return date.strftime(DATE_FORMAT)
def delete_deployment_in_dynamo(table, deployment):
return table.delete_item(
Key={
'deployment_id': deployment.deployment_key.id,
'service_arn': deployment.deployment_key.service_arn
}
)
def put_deployment_in_dynamo(table, deployment):
return table.put_item(
Item={
'deployment_id': deployment.deployment_key.id,
'service_arn': deployment.deployment_key.service_arn,
'deployment_status': deployment.deployment_status,
'color': deployment.color,
'created_at': _get_date_string(deployment.created_at),
'task_definition': deployment.task_definition
}
)
def update_deployment_in_dynamo(table, deployment):
return table.update_item(
Key={
'deployment_id': deployment.deployment_key.id,
'service_arn': deployment.deployment_key.service_arn
},
UpdateExpression="""
SET deployment_status = :deployment_status,
color = :color,
created_at = :created_at,
task_definition = :task_definition
""",
ExpressionAttributeValues={
':deployment_status': deployment.deployment_status,
':color': deployment.color,
':created_at': _get_date_string(deployment.created_at),
':task_definition': deployment.task_definition
}
)
def get_deployments_from_dynamo(table):
response = table.scan()
return [_create_deployment_tuple_from_item(d) for d in response['Items']]
def get_deployments_from_ecs(ecs_client):
deployments = []
for cluster_arn in get_cluster_arns(ecs_client):
for service_arn in get_service_arns(ecs_client, cluster_arn):
service_deployments = _get_service_deployments(
ecs_client, cluster_arn, service_arn)
deployments += service_deployments
return deployments
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/lambda_utils.py
|
# -*- encoding: utf-8
import functools
import sys
def log_on_error(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception:
print(f'args = {args!r}', file=sys.stderr)
print(f'kwargs = {kwargs!r}', file=sys.stderr)
raise
return wrapper
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/dynamo_event.py
|
<gh_stars>0
from enum import Enum
from boto3.dynamodb.types import TypeDeserializer
from wellcome_aws_utils.exceptions import UnWellcomeException
def create_dynamo_events(event):
for record in event['Records']:
yield DynamoEvent(record)
class DynamoEventType(Enum):
REMOVE, INSERT, MODIFY = range(3)
class DynamoEvent:
def _set_event_type(self, record):
if self.record['eventName'] == 'REMOVE':
self.event_type = DynamoEventType.REMOVE
elif self.record['eventName'] == 'INSERT':
self.event_type = DynamoEventType.INSERT
elif self.record['eventName'] == 'MODIFY':
self.event_type = DynamoEventType.MODIFY
else:
raise UnWellcomeException(
f'Unrecognised eventName found in {record}!'
)
def __init__(self, record):
self.record = record
try:
self._set_event_type(record)
self.event_source_arn = record['eventSourceARN']
self._keys = record['dynamodb']['Keys']
self._new_image = record['dynamodb'].get('NewImage')
self._old_image = record['dynamodb'].get('OldImage')
except KeyError as e:
raise UnWellcomeException(
f'{e} not found in {record}!'
)
@staticmethod
def _deserialize_values(image):
td = TypeDeserializer()
return {k: td.deserialize(v) for k, v in image.items()}
def keys(self, deserialize_values=False):
if deserialize_values and self._keys:
return DynamoEvent._deserialize_values(self._keys)
return self._keys
def new_image(self, deserialize_values=False):
if deserialize_values and self._new_image:
return DynamoEvent._deserialize_values(self._new_image)
return self._new_image
def old_image(self, deserialize_values=False):
if deserialize_values and self._old_image:
return DynamoEvent._deserialize_values(self._old_image)
return self._old_image
|
wellcomecollection/aws_utils
|
scripts/hypothesistooling.py
|
<filename>scripts/hypothesistooling.py
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis-python
#
# Most of this work is copyright (C) 2013-2017 <NAME>
# (<EMAIL>), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import subprocess
from datetime import datetime, timedelta
def current_branch():
return subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'
]).decode('ascii').strip()
def tags():
result = [t.decode('ascii') for t in subprocess.check_output([
'git', 'tag'
]).split(b"\n")]
assert len(set(result)) == len(result)
return set(result)
ROOT = subprocess.check_output([
'git', 'rev-parse', '--show-toplevel']).decode('ascii').strip()
SRC = os.path.join(ROOT, 'src')
assert os.path.exists(SRC)
__version__ = None
__version_info__ = None
VERSION_FILE = os.path.join(ROOT, 'src/wellcome_aws_utils/version.py')
DOCS_CONF_FILE = os.path.join(ROOT, 'docs/conf.py')
with open(VERSION_FILE) as o:
exec(o.read())
assert __version__ is not None
assert __version_info__ is not None
def latest_version():
versions = []
for t in tags():
# All versions get tags but not all tags are versions (and there are
# a large number of historic tags with a different format for versions)
# so we parse each tag as a triple of ints (MAJOR, MINOR, PATCH)
# and skip any tag that doesn't match that.
assert t == t.strip()
parts = t.split('.')
if len(parts) != 3:
continue
try:
v = tuple(map(int, parts))
except ValueError:
continue
versions.append((v, t))
_, latest = max(versions)
assert latest in tags()
return latest
def hash_for_name(name):
return subprocess.check_output([
'git', 'rev-parse', name
]).decode('ascii').strip()
def is_ancestor(a, b):
check = subprocess.call([
'git', 'merge-base', '--is-ancestor', a, b
])
assert 0 <= check <= 1
return check == 0
CHANGELOG_FILE = os.path.join(ROOT, 'docs', 'changes.rst')
def changelog():
with open(CHANGELOG_FILE) as i:
return i.read()
def merge_base(a, b):
return subprocess.check_output([
'git', 'merge-base', a, b,
]).strip()
def has_source_changes(version=None):
if version is None:
version = latest_version()
# Check where we branched off from the version. We're only interested
# in whether *we* introduced any source changes, so we check diff from
# there rather than the diff to the other side.
point_of_divergence = merge_base('HEAD', version)
return subprocess.call([
'git', 'diff', '--exit-code', point_of_divergence, 'HEAD', '--', SRC,
]) != 0
def git(*args):
subprocess.check_call(('git',) + args)
def create_tag_and_push():
assert __version__ not in tags()
git('config', 'user.name', 'Travis CI on behalf of Wellcome')
git('config', 'user.email', '<EMAIL>')
git('config', 'core.sshCommand', 'ssh -i deploy_key')
git(
'remote', 'add', 'ssh-origin',
'<EMAIL>:wellcometrust/aws_utils.git'
)
git('tag', __version__)
subprocess.check_call(['git', 'push', 'ssh-origin', 'HEAD:master'])
subprocess.check_call(['git', 'push', 'ssh-origin', '--tags'])
def modified_files():
files = set()
for command in [
['git', 'diff', '--name-only', '--diff-filter=d',
latest_version(), 'HEAD'],
['git', 'diff', '--name-only']
]:
diff_output = subprocess.check_output(command).decode('ascii')
for line in diff_output.split('\n'):
filepath = line.strip()
if filepath:
assert os.path.exists(filepath)
files.add(filepath)
return files
RELEASE_FILE = os.path.join(ROOT, 'RELEASE.rst')
def has_release():
return os.path.exists(RELEASE_FILE)
CHANGELOG_BORDER = re.compile(r"^-+$")
CHANGELOG_HEADER = re.compile(r"^\d+\.\d+\.\d+ - \d\d\d\d-\d\d-\d\d$")
RELEASE_TYPE = re.compile(r"^RELEASE_TYPE: +(major|minor|patch)")
MAJOR = 'major'
MINOR = 'minor'
PATCH = 'patch'
VALID_RELEASE_TYPES = (MAJOR, MINOR, PATCH)
def parse_release_file():
with open(RELEASE_FILE) as i:
release_contents = i.read()
release_lines = release_contents.split('\n')
m = RELEASE_TYPE.match(release_lines[0])
if m is not None:
release_type = m.group(1)
if release_type not in VALID_RELEASE_TYPES:
print('Unrecognised release type %r' % (release_type,))
sys.exit(1)
del release_lines[0]
release_contents = '\n'.join(release_lines).strip()
else:
print(
'RELEASE.rst does not start by specifying release type. The first '
'line of the file should be RELEASE_TYPE: followed by one of '
'major, minor, or patch, to specify the type of release that '
'this is (i.e. which version number to increment). Instead the '
'first line was %r' % (release_lines[0],)
)
sys.exit(1)
return release_type, release_contents
def update_changelog_and_version():
global __version_info__
global __version__
with open(CHANGELOG_FILE) as i:
contents = i.read()
assert '\r' not in contents
lines = contents.split('\n')
assert contents == '\n'.join(lines)
for i, l in enumerate(lines):
if CHANGELOG_BORDER.match(l):
assert CHANGELOG_HEADER.match(lines[i + 1]), repr(lines[i + 1])
assert CHANGELOG_BORDER.match(lines[i + 2]), repr(lines[i + 2])
beginning = '\n'.join(lines[:i])
rest = '\n'.join(lines[i:])
assert '\n'.join((beginning, rest)) == contents
break
release_type, release_contents = parse_release_file()
new_version = list(__version_info__)
bump = VALID_RELEASE_TYPES.index(release_type)
new_version[bump] += 1
for i in range(bump + 1, len(new_version)):
new_version[i] = 0
new_version = tuple(new_version)
new_version_string = '.'.join(map(str, new_version))
__version_info__ = new_version
__version__ = new_version_string
for version_file in [VERSION_FILE, DOCS_CONF_FILE]:
with open(version_file) as i:
version_lines = i.read().split('\n')
for i, l in enumerate(version_lines):
if 'version_info' in l:
version_lines[i] = '__version_info__ = %r' % (new_version,)
break
with open(version_file, 'w') as o:
o.write('\n'.join(version_lines))
now = datetime.utcnow()
date = max([
d.strftime('%Y-%m-%d') for d in (now, now + timedelta(hours=1))
])
heading_for_new_version = ' - '.join((new_version_string, date))
border_for_new_version = '-' * len(heading_for_new_version)
new_changelog_parts = [
beginning.strip(),
'',
border_for_new_version,
heading_for_new_version,
border_for_new_version,
'',
release_contents,
'',
rest
]
with open(CHANGELOG_FILE, 'w') as o:
o.write('\n'.join(new_changelog_parts))
def update_for_pending_release():
git('config', 'user.name', 'Travis CI on behalf of Wellcome')
git('config', 'user.email', '<EMAIL>')
update_changelog_and_version()
git('rm', RELEASE_FILE)
git('add', CHANGELOG_FILE, VERSION_FILE)
git(
'commit',
'-m', 'Bump version to %s and update changelog' % (__version__,)
)
|
wellcomecollection/aws_utils
|
docs/build_api_reference.py
|
<filename>docs/build_api_reference.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import subprocess
import tempfile
import textwrap
from bs4 import BeautifulSoup
working_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(working_dir)
tmp_dir = tempfile.mkdtemp()
subprocess.check_call(['sphinx-build', '-b', 'html', '.', tmp_dir])
html_doc = open(os.path.join(tmp_dir, 'api_src.html')).read()
soup = BeautifulSoup(html_doc, 'html.parser')
with open('api.rst', 'w') as f:
f.write('.. This file is autogenerated; edits will be lost.\n')
f.write(' To rebuild this file, run "tox -e check_api_docs".\n\n')
f.write('API reference\n')
f.write('=============\n\n')
reference = soup.find('div', attrs={'id': 'api-reference'})
for section in reference.findAll('div', attrs={'class': 'section'}):
title = section.find('h2')
title.extract()
f.write(title.contents[0] + '\n')
f.write('*' * len(title.contents[0]) + '\n\n')
f.write('.. raw:: html\n\n')
f.write(textwrap.indent(
text=''.join([str(s) for s in section.contents]).strip(),
prefix=' ' * 4
))
id_span = section.find('span')
id_span.extract()
f.write('\n\n')
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/sns_utils.py
|
# -*- encoding: utf-8 -*-
import collections
import datetime
import decimal
import json
import logging
import warnings
from wellcome_aws_utils.exceptions import UnWellcomeException
SNSEvent = collections.namedtuple('SNSEvent', 'subject message')
logger = logging.getLogger(__name__)
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
if float(obj).is_integer():
return int(obj)
else:
return float(obj)
return json.JSONEncoder.default(self, obj)
def publish_sns_message(sns_client,
topic_arn,
message,
subject="default-subject"):
"""
Given a topic ARN and a series of key-value pairs, publish the key-value
data to the SNS topic.
"""
response = sns_client.publish(
TopicArn=topic_arn,
MessageStructure='json',
Message=json.dumps({
'default': json.dumps(
message,
cls=EnhancedJSONEncoder
)
}),
Subject=subject
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
logger.debug('SNS: sent notification %s', response["MessageId"])
else:
raise RuntimeError(repr(response))
return response
def extract_sns_messages_from_lambda_event(event):
"""
Extracts a JSON message from an SNS event sent to an AWS Lambda.
:param event: An event sent to a Lambda from SNS.
:returns: A generator of SNSEvent instances.
"""
if 'Records' not in event:
raise UnWellcomeException(f'No records found in {event}')
for record in event['Records']:
if record['EventSource'] != 'aws:sns':
raise UnWellcomeException(f'Invalid message source for {record}')
try:
subject = record['Sns']['Subject']
message = json.loads(record['Sns']['Message'])
except KeyError as e:
raise UnWellcomeException(
f'Invalid message structure, missing {e} in {record}'
)
yield SNSEvent(subject=subject, message=message)
def extract_json_message(event):
"""
Extracts a JSON message from an SNS event sent to a lambda
Deprecated in favour of extract_sns_messages_from_lambda_event
"""
warnings.warn(
'Deprecated in favour of extract_sns_messages_from_lambda_event',
DeprecationWarning
)
message = event['Records'][0]['Sns']['Message']
return json.loads(message)
|
wellcomecollection/aws_utils
|
tests/conftest.py
|
<reponame>wellcomecollection/aws_utils<gh_stars>0
# -*- encoding: utf-8 -*-
import boto3
from moto import mock_sns, mock_sqs
import pytest
def pytest_runtest_setup(item):
set_region()
@pytest.fixture()
def set_region():
# Without this, boto3 is complaining about not having a region defined
# in tests (despite one being set in the Travis env variables and passed
# into the image).
# TODO: Investigate this properly.
boto3.setup_default_session(region_name='eu-west-1')
@pytest.fixture()
def moto_start(set_region):
mock_sns().start()
mock_sqs().start()
yield
mock_sns().stop()
mock_sqs().stop()
@pytest.fixture()
def sns_sqs(set_region, moto_start):
fake_sns_client = boto3.client('sns')
fake_sqs_client = boto3.client('sqs')
queue_name = "test-queue"
topic_name = "test-topic"
print(f"Creating topic {topic_name} and queue {queue_name}")
fake_sns_client.create_topic(Name=topic_name)
response = fake_sns_client.list_topics()
topic_arn = response["Topics"][0]['TopicArn']
queue = fake_sqs_client.create_queue(QueueName=queue_name)
fake_sns_client.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=f"arn:aws:sqs:eu-west-1:123456789012:{queue_name}"
)
yield topic_arn, queue['QueueUrl']
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/s3_utils.py
|
<filename>src/wellcome_aws_utils/s3_utils.py
# -*- encoding: utf-8 -*-
import json
from urllib.parse import unquote
import boto3
from botocore.exceptions import ClientError
import dateutil.parser
def is_object(bucket, key):
"""
Checks if an object exists in S3. Returns True/False.
:param bucket: Bucket of the object to check.
:param key: Key of the object to check.
"""
client = boto3.client('s3')
try:
client.head_object(Bucket=bucket, Key=key)
except ClientError as err:
if err.response['Error']['Code'] == '404':
return False
else:
raise
else:
return True
def copy_object(src_bucket, src_key, dst_bucket, dst_key, lazy=False):
"""
Copy an object from one S3 bucket to another.
:param src_bucket: Bucket of the source object.
:param src_key: Key of the source object.
:param dst_bucket: Bucket of the destination object.
:param dst_key: Key of the destination object.
:param lazy: Do a lazy copy. This means that the object will only be
copied if the destination object does not exist, or exists but has
a different ETag from the source object.
"""
client = boto3.client('s3')
if not is_object(bucket=src_bucket, key=src_key):
raise ValueError(
f'Tried to copy missing object ({src_bucket}, {src_key})'
)
def should_copy():
if not lazy:
return True
if not is_object(bucket=dst_bucket, key=dst_key):
return True
src_resp = client.head_object(Bucket=src_bucket, Key=src_key)
dst_resp = client.head_object(Bucket=dst_bucket, Key=dst_key)
return src_resp['ETag'] == dst_resp['ETag']
if should_copy():
return client.copy_object(
CopySource={'Bucket': src_bucket, 'Key': src_key},
Bucket=dst_bucket,
Key=dst_key
)
def _extract_s3_event(record):
event_datetime = dateutil.parser.parse(record["eventTime"])
return {
"event_name": record["eventName"],
"event_time": event_datetime,
"bucket_name": record["s3"]["bucket"]["name"],
"object_key": unquote(record["s3"]["object"]["key"]),
"size": record["s3"]["object"]["size"],
"versionId": record["s3"]["object"].get("versionId")
}
def parse_s3_record(event):
"""
Extracts a simple subset of an S3 update event.
"""
return [_extract_s3_event(record) for record in event["Records"]]
def write_objects_to_s3(bucket, key, objects):
"""
Given an iterable of objects that can be serialised as JSON, serialise
them as JSON, and write them to a file in S3, one per line.
:param bucket: S3 bucket to upload the new file to.
:param key: S3 key to upload the new file to.
:param objects: An iterable of objects that can be serialised as JSON.
"""
# We use sort_keys=True to ensure deterministic results. The separators
# flag allows us to write more compact JSON, which makes things faster!
# See https://twitter.com/raymondh/status/842777864193769472
json_str = b'\n'.join([
json.dumps(m, sort_keys=True, separators=(',', ':')).encode('ascii')
for m in objects
])
client = boto3.client('s3')
client.put_object(Bucket=bucket, Key=key, Body=json_str)
__all__ = [
'is_object',
'copy_object',
'parse_s3_record',
'write_objects_to_s3',
]
|
wellcomecollection/aws_utils
|
tests/test_sns_utils.py
|
<gh_stars>0
# -*- encoding: utf-8 -*-
from datetime import datetime
import json
import boto3
from wellcome_aws_utils import sns_utils
def test_publish_sns_message(sns_sqs):
sns_client = boto3.client('sns')
sqs_client = boto3.client('sqs')
topic_arn, queue_url = sns_sqs
test_message = {
'string': 'a',
'number': 1,
'date': datetime.strptime(
'Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p'
)
}
expected_decoded_message = {
'string': 'a',
'number': 1,
'date': '2005-06-01T13:33:00'
}
sns_utils.publish_sns_message(sns_client, topic_arn, test_message)
messages = sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=1
)
message_body = messages['Messages'][0]['Body']
inner_message = json.loads(message_body)['Message']
actual_decoded_message = json.loads(inner_message)
assert (
json.loads(actual_decoded_message['default']) ==
expected_decoded_message)
def test_extract_json_message():
example_object = {
"foo": "bar",
"baz": ["bat", 0, 0.1, {"boo": "beep"}]
}
example_object_json = json.dumps(example_object)
example_event = {
"Records": [
{
"Sns": {
"Message": example_object_json
}
}
]
}
extracted_object = sns_utils.extract_json_message(example_event)
assert example_object == extracted_object
def test_extract_sns_messages_from_lambda_event():
expected_subject = 'my_subject'
expected_message = {
"foo": "bar",
"baz": ["bat", 0, 0.1, {"boo": "beep"}]
}
expected_message_json = json.dumps(expected_message)
example_event = {
"Records": [
{
"EventSource": 'aws:sns',
"Sns": {
"Message": expected_message_json,
"Subject": expected_subject
}
}
]
}
actual_extracted_message = (
sns_utils.extract_sns_messages_from_lambda_event(example_event)
)
assert list(actual_extracted_message) == [sns_utils.SNSEvent(
message=expected_message,
subject=expected_subject
)]
|
wellcomecollection/aws_utils
|
tests/test_reporting_utils.py
|
import json
import boto3
from moto import mock_s3, mock_dynamodb2
from unittest.mock import patch
from wellcome_aws_utils.reporting_utils import process_messages
def create_sns_message(id):
return {
"Records": [
{
"Sns": {
"Message": (f'{{"id":"{id}","version":1}}'),
"MessageAttributes": {},
"MessageId": "0cf7d798-64c8-45a7-a7bf-a9ebc94d1108",
"Type": "Notification",
}
}
]
}
def given_s3_has(s3_client, bucket, path, data):
s3_client.put_object(
ACL="public-read",
Bucket=bucket,
Key=path,
Body=data,
CacheControl="max-age=0",
ContentType="application/json",
)
def identity_transform(record):
return record
class TestReportingUtils(object):
@mock_s3
@mock_dynamodb2
def test_saves_record_in_es(self):
with patch('elasticsearch.Elasticsearch') as MockElasticsearch:
id = "V0000001"
mock_elasticsearch_client = MockElasticsearch()
elasticsearch_index = "index"
hybrid_data = '{"foo": "bar"}'
path = "00/V0000001/0.json"
bucket = "bukkit"
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket)
given_s3_has(
s3_client=s3_client,
bucket=bucket,
path=path,
data=json.dumps(hybrid_data)
)
table_name = 'vhs'
dynamodb = boto3.resource('dynamodb')
dynamodb.create_table(
TableName=table_name,
AttributeDefinitions=[{
"AttributeName": "id",
"AttributeType": "S"
}],
KeySchema=[{
"AttributeName": "id",
"KeyType": "HASH"
}],
ProvisionedThroughput={
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
}
)
dynamodb.Table(table_name).put_item(
Item={
"id": id,
"version": 1,
"payload": {"namespace": bucket, "path": path}
}
)
event = create_sns_message(id)
process_messages(
event,
identity_transform,
elasticsearch_index,
table_name,
dynamodb=dynamodb,
s3_client=s3_client,
es_client=mock_elasticsearch_client
)
mock_elasticsearch_client.index.assert_called_once_with(
body=json.dumps(hybrid_data),
doc_type="_doc",
id=id,
index=elasticsearch_index
)
|
wellcomecollection/aws_utils
|
setup.py
|
# -*- encoding: utf-8 -*-
import os
from setuptools import find_packages, setup
def local_file(name):
return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
SOURCE = local_file('src')
README = local_file('README.rst')
# Assignment to placate pyflakes. The actual version is from the exec that
# follows.
__version__ = None
with open(local_file('src/wellcome_aws_utils/version.py')) as o:
exec(o.read())
assert __version__ is not None
setup(
name='wellcome_aws_utils',
packages=find_packages(SOURCE),
package_dir={'': SOURCE},
version=__version__,
install_requires=[
'boto3',
'daiquiri',
'python-dateutil',
'elasticsearch',
'attrs'
],
python_requires='>=3.6',
description='A collection of AWS utilities',
long_description=open(README).read(),
author='Well<NAME> (Digital Platform Team)',
author_email='<EMAIL>',
url='https://github.com/wellcometrust/aws_utils',
keywords=['aws'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development',
'Topic :: Utilities',
],
)
|
wellcomecollection/aws_utils
|
src/wellcome_aws_utils/exceptions.py
|
<filename>src/wellcome_aws_utils/exceptions.py
# -*- encoding: utf-8 -*-
class UnWellcomeException(Exception):
"""
Base exception for all exceptions raised by this library.
"""
pass
|
wellcomecollection/aws_utils
|
tests/test_ecs_utils.py
|
<reponame>wellcomecollection/aws_utils<filename>tests/test_ecs_utils.py<gh_stars>0
# -*- encoding: utf-8 -*-
import json
import boto3
from botocore.exceptions import ClientError
from mock import Mock
from moto import mock_ecs, mock_ec2
from moto.ec2 import utils as moto_ec2_utils
import pytest
from wellcome_aws_utils import ecs_utils
cluster_name = 'my_cluster'
service_name = 'my_service'
def ecs_cluster(ecs_client):
cluster_response = ecs_client.create_cluster(clusterName=cluster_name)
cluster_arn = cluster_response['cluster']['clusterArn']
task_definition_response = ecs_client.register_task_definition(
family='my_family',
containerDefinitions=[]
)
task_definition_arn = (
task_definition_response['taskDefinition']['taskDefinitionArn']
)
service_response = ecs_client.create_service(
cluster='my_cluster',
serviceName=service_name,
taskDefinition=task_definition_arn,
desiredCount=0
)
service_arn = service_response['service']['serviceArn']
return task_definition_arn, service_arn, cluster_arn
@mock_ecs
def test_get_cluster_arns():
ecs_client = boto3.client('ecs')
_, _, cluster_arn = ecs_cluster(ecs_client)
actual_cluster_list = ecs_utils.get_cluster_arns(ecs_client)
assert actual_cluster_list == [cluster_arn]
def test_get_cluster_arns_throws_EcsThrottleException():
mock_ecs_client = Mock()
mock_ecs_client.list_clusters.side_effect = ClientError(
error_response={
'Error': {
'Code': 'ThrottlingException'
}
},
operation_name="foo"
)
with pytest.raises(ecs_utils.EcsThrottleException):
ecs_utils.get_cluster_arns(mock_ecs_client)
@mock_ecs
def test_get_service_arns():
ecs_client = boto3.client('ecs')
_, service_arn, cluster_arn = ecs_cluster(ecs_client)
actual_service_list = (
ecs_utils.get_service_arns(ecs_client, cluster_arn)
)
assert actual_service_list == [service_arn]
def test_get_service_arns_throws_EcsThrottleException():
mock_ecs_client = Mock()
mock_ecs_client.list_services.side_effect = ClientError(
error_response={'Error': {
'Code': 'ThrottlingException'
}},
operation_name="foo"
)
with pytest.raises(ecs_utils.EcsThrottleException):
ecs_utils.get_service_arns(mock_ecs_client, 'foo/bar')
@mock_ecs
def test_describe_cluster():
ecs_client = boto3.client('ecs')
_, service_arn, cluster_arn = ecs_cluster(ecs_client)
actual_cluster_description = ecs_utils.describe_cluster(
ecs_client,
cluster_arn
)
actual_cluster_arn = (
actual_cluster_description['clusterArn']
)
assert actual_cluster_arn == cluster_arn
def test_describe_cluster_throws_EcsThrottleException():
mock_ecs_client = Mock()
mock_ecs_client.describe_clusters.side_effect = ClientError(
error_response={'Error': {
'Code': 'ThrottlingException'
}},
operation_name="foo"
)
with pytest.raises(ecs_utils.EcsThrottleException):
ecs_utils.describe_cluster(mock_ecs_client, 'foo/bar')
@mock_ecs
def test_describe_service():
ecs_client = boto3.client('ecs')
_, service_arn, cluster_arn = ecs_cluster(ecs_client)
actual_service_description = ecs_utils.describe_service(
ecs_client,
cluster_arn,
service_arn
)
actual_service_arn = (
actual_service_description['serviceArn']
)
assert actual_service_arn == service_arn
def test_describe_service_throws_EcsThrottleException():
mock_ecs_client = Mock()
mock_ecs_client.describe_services.side_effect = ClientError(
error_response={'Error': {
'Code': 'ThrottlingException'
}},
operation_name="foo"
)
with pytest.raises(ecs_utils.EcsThrottleException):
ecs_utils.describe_service(mock_ecs_client, 'foo/bar', 'bat/baz')
@mock_ec2
@mock_ecs
def test_run_task():
ecs_client = boto3.client('ecs')
ec2 = boto3.resource('ec2', region_name='us-east-1')
cluster_response = ecs_client.create_cluster(clusterName=cluster_name)
cluster_arn = cluster_response['cluster']['clusterArn']
test_instance = ec2.create_instances(
ImageId="ami-1234abcd",
MinCount=1,
MaxCount=1,
)[0]
instance_id_document = json.dumps(
moto_ec2_utils.generate_instance_identity_document(test_instance)
)
ecs_client.register_container_instance(
cluster=cluster_name,
instanceIdentityDocument=instance_id_document
)
task_definition_response = ecs_client.register_task_definition(
family='my_family',
containerDefinitions=[]
)
task_definition_arn = (
task_definition_response['taskDefinition']['taskDefinitionArn']
)
started_by = "started_by"
response = ecs_utils.run_task(
ecs_client,
cluster_name,
task_definition_arn,
started_by)
assert len(response["failures"]) == 0
assert len(response["tasks"]) == 1
assert response["tasks"][0]["taskDefinitionArn"] == task_definition_arn
assert response["tasks"][0]["clusterArn"] == cluster_arn
assert response["tasks"][0]["startedBy"] == started_by
|
wellcomecollection/aws_utils
|
tests/test_lambda_utils.py
|
<gh_stars>0
# -*- encoding: utf-8
from wellcome_aws_utils import lambda_utils as lu
@lu.log_on_error
def return_2():
return 2
def test_log_on_error_preserves_return_value():
assert return_2() == 2
|
oleorhagen/mender-configure-module
|
tests/integration/install-mender-configure.py
|
<reponame>oleorhagen/mender-configure-module
#!/usr/bin/env python3
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
from ext4_manipulator import get, put, extract_ext4, insert_ext4
def main():
img = "mender-image-full-cmdline-rofs-qemux86-64.uefiimg"
# Extract ext4 image from img.
rootfs = "%s.ext4" % img
extract_ext4(img=img, rootfs=rootfs)
# Install module
put(
local_path="mender-configure",
remote_path="/usr/share/mender/modules/v3/mender-configure",
rootfs=rootfs,
remote_path_mkdir_p=True,
)
# Install inventory script
put(
local_path="mender-inventory-mender-configure",
remote_path="/usr/share/mender/inventory/mender-inventory-mender-configure",
rootfs=rootfs,
remote_path_mkdir_p=True,
)
# create empty folder
tf = tempfile.NamedTemporaryFile(delete=False)
try:
put(
local_path=tf.name,
remote_path="/var/lib/mender-configure/.empty-folder",
rootfs=rootfs,
remote_path_mkdir_p=True,
)
finally:
os.unlink(tf.name)
# Put back ext4 image into img.
insert_ext4(img=img, rootfs=rootfs)
if __name__ == "__main__":
main()
|
oleorhagen/mender-configure-module
|
tests/integration/tests/conftest.py
|
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from os import path
sys.path += [path.join(path.dirname(__file__), "..", "mender_integration")]
import logging
import random
import filelock
import pytest
import urllib3
from mender_integration.testutils.infra.container_manager import docker_compose_manager
from mender_integration.testutils.infra.device import MenderDevice
from mender_integration.tests.MenderAPI import devauth, reset_mender_api
from mender_test_containers.conftest import *
from mender_test_containers.container_props import *
from mender_integration.tests.conftest import pytest_exception_interact
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
logging.getLogger("filelock").setLevel(logging.INFO)
logging.getLogger("invoke").setLevel(logging.INFO)
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
collect_ignore = ["mender_integration"]
docker_lock = filelock.FileLock("docker_lock")
docker_compose_instance = "mender" + str(random.randint(0, 9999999))
inline_logs = False
MenderTestQemux86_64RofsMenderConfigure = ContainerProps(
image_name="mendersoftware/mender-client-qemu-rofs-mender-configure",
append_mender_version=False,
key_filename=path.join(
path.dirname(path.realpath(__file__)),
"../mender_test_containers/docker/ssh-keys/key",
),
)
TEST_CONTAINER_LIST = [MenderTestQemux86_64RofsMenderConfigure]
@pytest.fixture(scope="session", params=TEST_CONTAINER_LIST)
def setup_test_container_props(request):
return request.param
@pytest.fixture(scope="session")
def mender_version():
return "master"
class DockerComposeStandardSetupOneConfigureRofsClient(
docker_compose_manager.DockerComposeNamespace
):
def __init__(
self, extra_compose_file="../docker-compose.client.rofs.configure.yml"
):
compose_files = docker_compose_manager.DockerComposeNamespace.QEMU_CLIENT_FILES
compose_files += [
path.join(
path.dirname(__file__),
"../mender_integration/docker-compose.config.yml",
),
path.join(path.dirname(__file__), extra_compose_file),
]
docker_compose_manager.DockerComposeNamespace.__init__(
self, name="mender", extra_files=compose_files
)
@pytest.fixture(scope="function")
def standard_setup_one_rofs_configure_client(request):
env = DockerComposeStandardSetupOneConfigureRofsClient()
request.addfinalizer(env.teardown)
env.setup()
env.device = MenderDevice(env.get_mender_clients()[0])
env.device.ssh_is_opened()
reset_mender_api(env)
devauth.accept_devices(1)
return env
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.