max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
scrapy_toolbox/database.py | IW-Big-Data-Analytics/scrapy-toolbox | 4 | 12769051 | from scrapy import signals
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import database_exists, create_database
from sqlalchemy.orm import object_mapper
from .mapper import ItemsModelMapper
import os
DeclarativeBase = declarative_base()
# https://www.python.org/download/releases/2.2/descrintro/#__new__
class Singleton(object):
def __new__(cls, *args, **kwds):
it = cls.__dict__.get("__it__")
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
it.init(*args, **kwds)
return it
def init(self, *args, **kwds):
pass
class DatabasePipeline(Singleton):
def __init__(self, settings, items=None, model=None, database=None, database_dev=None):
if database:
self.database = database
elif settings:
self.database = settings.get("DATABASE")
self.database["query"]["charset"] = 'utf8mb4'
if database_dev:
self.database_dev = database_dev
elif settings:
self.database_dev = settings.get("DATABASE_DEV")
self.database_dev["query"]["charset"] = 'utf8mb4'
self.session = self.get_session()
if items and model:
self.mapper = ItemsModelMapper(items=items, model=model)
@classmethod
def from_crawler(cls, crawler):
pipeline = cls(crawler.settings)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
crawler.database_session = pipeline.session
return pipeline
def get_session(self):
engine = self.create_engine()
self.create_tables(engine)
return self.create_session(engine)
def create_engine(self):
if "PRODUCTION" in os.environ:
engine = create_engine(URL(**self.database))
else:
engine = create_engine(URL(**self.database_dev))
if not database_exists(engine.url):
create_database(engine.url)
return engine
def create_tables(self, engine):
DeclarativeBase.metadata.create_all(engine, checkfirst=True)
def create_session(self, engine):
session = sessionmaker(bind=engine, autoflush=False)() # autoflush=False: "This is useful when initializing a series of objects which involve existing database queries, where the uncompleted object should not yet be flushed." for instance when using the Association Object Pattern
return session
def spider_closed(self, spider):
self.session.close()
def process_item(self, item, spider):
obj = self.mapper.map_to_model(item=item, sess=self.session)
try:
self.session.add(obj)
self.session.commit()
# Set potentially missing primary keys (autoincrement) for the item
mapper = object_mapper(obj)
for key, value in zip(mapper.primary_key, mapper.primary_key_from_instance(obj)):
item[key.name] = value
except:
self.session.rollback()
raise
finally:
self.session.close()
return item
| 2.4375 | 2 |
test/unit/data_factory.py | JoachimC/magicbox_distance | 0 | 12769052 | from geopy import Point
from magicbox_distance import shapefile, distance
right_angle_start = Point(1.0, 1.0)
right_angle_middle = Point(1.1, 1.0)
right_angle_end = Point(1.1, 1.1)
right_angle_distance = distance.using_latitude_and_longitude(right_angle_start, right_angle_middle) + \
distance.using_latitude_and_longitude(right_angle_middle, right_angle_end)
def create_shapefile(roads):
return [shapefile.create_record(index, shapefile.ShapeType.POLYLINE, [road]) for index, road in enumerate(roads)]
def create_part(*args):
return shapefile.create_part(1, list(args))
| 2.90625 | 3 |
src/data/dataset_wrapper.py | vkinakh/scatsimclr | 12 | 12769053 | from typing import Tuple
from torch.utils.data import Dataset
from .datasets import get_dataset
from .augmentor import ContrastiveAugmentor
from .base_dataset_wrapper import BaseDatasetWrapper
class UnsupervisedDatasetWrapper(BaseDatasetWrapper):
"""Dataset wrapper for unsupervised image classification"""
def __init__(self,
batch_size: int,
valid_size: float,
input_shape: Tuple[int, int, int],
dataset: str):
"""
Args:
batch_size: batch size to use in train and validation data loaders
valid_size: percentage of the data to be used in validation set. Should be in range (0, 1)
input_shape: input size of the image. Should be Tuple (H, W, C), H - height, W - width, C - channels
dataset: dataset to use. Available datasets are in SUPPORTED_DATASETS
"""
super().__init__(batch_size, valid_size, input_shape, dataset)
def get_dataset(self, dataset: str) -> Dataset:
data_augmentations = ContrastiveAugmentor(dataset, self._input_size)
return get_dataset(dataset, True, data_augmentations, True, True)
| 2.796875 | 3 |
setup.py | ameli/gaussian_proc | 0 | 12769054 | <reponame>ameli/gaussian_proc
#!/usr/bin/env python
# SPDX-FileCopyrightText: Copyright 2021, <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the license found in the LICENSE.txt file in the root
# directory of this source tree.
# =======
# Imports
# =======
from __future__ import print_function
import os
from os.path import join
import sys
from glob import glob
import subprocess
import codecs
import tempfile
import shutil
from distutils.errors import CompileError, LinkError
import textwrap
import multiprocessing
# ===============
# install package
# ===============
def install_package(package):
"""
Installs packages using pip.
Example:
.. code-block:: python
>>> install_package('numpy>1.11')
:param package: Name of package with or without its version pin.
:type package: string
"""
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# =====================
# Import Setup Packages
# =====================
# Import numpy
try:
import numpy
except ImportError:
# Install numpy
install_package('numpy>1.11')
import numpy
# Check scipy is installed (needed for build, but not required to be imported)
try:
import scipy # noqa F401
except ImportError:
# Install scipy
install_package('scipy')
# Check special_functions is installed (needed for build)
try:
import special_functions # noqa F401
except ImportError:
# Install special_functions
install_package('special_functions')
# Import setuptools
try:
import setuptools
from setuptools.extension import Extension
except ImportError:
# Install setuptools
install_package('setuptools')
import setuptools
from setuptools.extension import Extension
# Import Cython (to convert pyx to C code)
try:
from Cython.Build import cythonize
except ImportError:
# Install Cython
install_package('cython')
from Cython.Build import cythonize
# Import build_ext
try:
from Cython.Distutils import build_ext
except ImportError:
from distutils.command import build_ext
# =========================
# get environment variables
# =========================
"""
To compile with cuda, set ``USE_CUDA`` environment variable.
To compile for debugging, set ``DEBUG_MODE`` environment variable.
::
# In Unix
export USE_CUDA=1
export DEBUG_MODE=1
# In Windows
$env:USE_CUDA = "1"
$env:DEBUG_MODE = "1"
python setup.py install
If you are using ``sudo``, to pass the environment variable, use ``-E`` option:
::
sudo -E python setup.py install
"""
# If USE_CUDA is set to "1", the package is compiled with cuda lib using nvcc.
use_cuda = False
if 'USE_CUDA' in os.environ and os.environ.get('USE_CUDA') == '1':
use_cuda = True
# If DEBUG_MODE is set to "1<F3>", the package is compiled with debug mode.
debug_mode = False
if 'DEBUG_MODE' in os.environ and os.environ.get('DEBUG_MODE') == '1':
debug_mode = True
# ============
# find in path
# ============
def find_in_path(executable_name, path):
"""
Recursively searches the executable ``executable_name`` in all of the
directories in the given path, and returns the full path of the executable
once its first occurrence is found.. If no executable is found, ``None`` is
returned. This is used to find CUDA's directories.
"""
for dir in path.split(os.pathsep):
executable_path = join(dir, executable_name)
if os.path.exists(executable_path):
return os.path.abspath(executable_path)
return None
# ===========
# locate cuda
# ===========
def locate_cuda():
"""
Finds the executable ``nvcc`` (or ``nvcc.exe`` if windows). If found,
creates a dictionary of cuda's executable path, include and lib directories
and home directory. This is used for GPU.
"""
if not use_cuda:
raise EnvironmentError('This function should not be called when '
'"USE_CUDA" is not set to "1".')
# List of environment variables to search for cuda
environs = ['CUDA_HOME', 'CUDA_ROOT', 'CUDA_PATH']
cuda_found = False
# nvcc binary
nvcc_binary_name = 'nvcc'
if sys.platform == 'win32':
nvcc_binary_name = nvcc_binary_name + '.exe'
# Search in each of the possible environment variables, if they exist
for env in environs:
if env in os.environ:
# Home
home = os.environ[env]
if not os.path.exists(home):
continue
# nvcc binary
nvcc = join(home, 'bin', nvcc_binary_name)
if not os.path.exists(nvcc):
continue
else:
cuda_found = True
break
# Brute-force search in all path to find nvcc binary
if not cuda_found:
nvcc = find_in_path(nvcc_binary_name, os.environ['PATH'])
if nvcc is None:
raise EnvironmentError('The "nvcc" binary could not be located '
'located in $PATH. Either add it to '
'path or set $CUDA_HOME, or $CUDA_ROOT, '
'or $CUDA_PATH.')
home = os.path.dirname(os.path.dirname(nvcc))
# Include directory
include = join(home, 'include')
if not os.path.exists(include):
raise EnvironmentError("The CUDA's include directory could not be " +
"located in %s." % include)
# Library directory
lib = join(home, 'lib')
if not os.path.exists(lib):
lib64 = join(home, 'lib64')
if not os.path.exists(lib64):
raise EnvironmentError("The CUDA's lib directory could not be " +
"located in %s or %s." % (lib, lib64))
lib = lib64
# Output dictionary of set of paths
cuda = {
'home': home,
'nvcc': nvcc,
'include': include,
'lib': lib
}
return cuda
# ================================
# customize unix compiler for nvcc
# ================================
def customize_unix_compiler_for_nvcc(self, cuda):
"""
Sets compiler to treat 'cpp' and 'cu' file extensions differently. Namely:
1. A 'cpp' file is treated as usual with the default compiler and the same
compiler and linker flags as before.
2. For a 'cu' file, the compiler is switched to 'nvcc' with other compiler
flags that suites GPU machine.
This function only should be called for 'unix' compiler (``gcc``, `clang``
or similar). For windows ``msvc`` compiler, this function does not apply.
.. note::
This function should be called when ``USE_CUDA`` is enabled.
"""
self.src_extensions.append('.cu')
# Backup default compiler to call them later
default_compiler_so = self.compiler_so
super = self._compile
# =======
# compile
# =======
def _compile(obj, src, ext, cc_args, extra_compile_args, pp_opts):
"""
Define ``_compile`` method to be called before the original
``self.compile`` method. This function modifies the dispatch of the
compiler depend on the source file extension ('cu', or non 'cu' file),
then calls the original (backed up) compile function.
Note: ``extra_compile_args_dict`` is a dictionary with two keys
``"nvcc"`` and ``"gcc"``. Respectively, the values of each are lists of
extra_compile_args for nvcc (to compile .cu files) and other compile
args to compile other files. This dictionary was created in the
extra_compile_args when each extension is created (see later in this
script).
"""
if os.path.splitext(src)[1] == '.cu':
# Use nvcc for *.cu files.
self.set_executable('compiler_so', cuda['nvcc'])
# Use only a part of extra_postargs dictionary with the key "nvcc"
_extra_compile_args = extra_compile_args['nvcc']
else:
# for any other file extension, use the defaukt compiler. Also, for
# the extra compile args, use args in "gcc" key of extra_postargs
_extra_compile_args = extra_compile_args['not_nvcc']
# Pass back to the default compiler
super(obj, src, ext, cc_args, _extra_compile_args, pp_opts)
# Return back the previous default compiler to self.compiler_so
self.compiler_so = default_compiler_so
self._compile = _compile
# ===================================
# customize windows compiler for nvcc
# ===================================
def customize_windows_compiler_for_nvcc(self, cuda):
"""
TODO: This function is not yet fully implemented. There is an issue with
the self.compile of distutil for windows. The issue is that the ``sources``
argument in ``compile`` method is NOT a single file, rather is a list of
all files.
.. note::
This function should be called when ``USE_CUDA`` is enabled.
"""
if not self.initialized:
self.initialize()
self.src_extensions.append('.cu')
# Backup default compiler
# default_compiler_so = self.compiler_so
default_cc = self.cc
super = self.compile
# =======
# compile
# =======
# def compile(obj, src, ext, cc_args, extra_compile_args_dict, pp_opts):
def compile(sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
"""
Redefine ``_compile`` method to dispatch relevant compiler for each
file extension. For ``.cu`` files, the ``nvcc`` compiler will be used.
Note: ``extra_compile_args_dict`` is a dictionary with two keys
``"nvcc"`` and ``"gcc"``. Respectively, the values of each are lists of
extra_compile_args for nvcc (to compile .cu files) and other compile
args to compile other files. This dictionary was created in the
extra_compile_args when each extension is created (see later in this
script).
"""
# if os.path.splitext(src)[1] == '.cu':
if True:
# Use nvcc for *.cu files.
# self.set_executable('compiler_so', cuda['nvcc'])
# self.set_executable('cc', cuda['nvcc'])
self.cc = cuda['nvcc']
# Use only a part of extra_postargs dictionary with the key "nvcc"
extra_postargs = extra_postargs['nvcc']
else:
# for any other file extension, use the defaukt compiler. Also, for
# the extra compile args, use args in "gcc" key of extra_postargs
extra_postargs = extra_postargs['gcc']
# Pass back to the default compiler
# super(obj, src, ext, cc_args, extra_compile_args, pp_opts)
super(sources, output_dir, macros, include_dirs, debug,
extra_preargs, extra_postargs, depends)
# Return back the previous default compiler to self.compiler_so
# self.compiler_so = default_compiler_so
self.cc = default_cc
self.compile = compile
# =======================
# check compiler has flag
# =======================
def check_compiler_has_flag(compiler, compile_flags, link_flags):
"""
Checks if the C compiler has a given flag. The motivation for this function
is that:
* In Linux, the gcc compiler has ``-fopenmp`` flag, which enables compiling
with OpenMP.
* In macOS, the clang compiler does not recognize ``-fopenmp`` flag,
rather, this flag should be passed through the preprocessor using
``-Xpreprocessor -fopenmp``.
Thus, we should know in advance which compiler is employed to provide the
correct flags. The problem is that in the setup.py script, we cannot
determine if the compiler is gcc or clang. The closet we can get is to call
.. code-block:: python
>>> import distutils.ccompiler
>>> print(distutils.ccompiler.get_default_compiler())
In both Linux and macOS, the above line returns ``unix``, and in windows it
returns ``msvc`` for Microsoft Visual C++. In the case of Linux and macOS,
we cannot figure which compiler is being used as both outputs are the same.
The safest solution so far is this function, which compilers a small c code
with a given ``flag_name`` and checks if it compiles successfully. In case
of ``unix``, if it compiles with ``-fopenmp``, it is gcc on Linux,
otherwise it is likely to be the ``clang`` compiler on macOS.
:param compiler: The compiler object from build_ext.compiler
:type compiler: build_ext.compiler
:param compile_flags: A list of compile flags, such as
``['-Xpreprocessor','-fopenmp']``
:type compile_flags: list(string)
:param link_flags: A list of linker flags, such as
``['-Xpreprocessor','-fopenmp']``
:type link_flags: list(string)
"""
if "PYODIDE_PACKAGE_ABI" in os.environ:
# pyodide doesn't support OpenMP
return False
compile_success = True
current_working_dir = os.getcwd()
temp_dir = tempfile.mkdtemp()
filename = 'test.c'
code = "#include <omp.h>\nint main(int argc, char** argv) { return(0); }"
# Considerations for Microsoft visual C++ compiler
if compiler.compiler_type == "msvc":
link_flags = link_flags + ['/DLL']
# Write a code in temp directory
os.chdir(temp_dir)
with open(filename, 'wt') as file_obj:
file_obj.write(code)
try:
# Try to compile
objects = compiler.compile([filename], extra_postargs=compile_flags)
try:
# Try to link
compiler.link_shared_lib(
objects,
"testlib",
extra_postargs=link_flags)
except (LinkError, TypeError):
# Linker was not successful
compile_success = False
except CompileError:
# Compile was not successful
compile_success = False
os.chdir(current_working_dir)
shutil.rmtree(temp_dir)
return compile_success
# ======================
# Custom Build Extension
# ======================
class CustomBuildExtension(build_ext):
"""
Customized ``build_ext`` that provides correct compile and linker flags to
the extensions depending on the compiler and the operating system platform.
Default compiler names depending on platform:
* linux: gcc
* mac: clang (llvm)
* windows: msvc (Microsoft Visual C++)
Compiler flags:
* gcc : -O3 -march=native -fno-stack-protector -Wall -fopenmp
* clang : -O3 -march=native -fno-stack-protector -Wall -Xpreprocessor
-fopenmp
* msvc : /O2 /Wall /openmp
Linker flags:
* gcc : -fopenmp
* clang : -Xpreproessor -fopenmp -lomp
* msvc : (none)
Usage:
This class (CustomBuildExtention) is a child of``build_ext`` class. To use
this class, add it to the ``cmdclass`` by:
.. code-block: python
>>> setup(
... ...
... # cmdclass = {'build_ext' : } # default
... cmdclass = {'build_ext' : CustomBuildExtention} # this class
... ...
... )
"""
# ---------------
# Build Extension
# ---------------
def build_extensions(self):
"""
Specifies compiler and linker flags depending on the compiler.
.. warning::
DO NOT USE '-march=native' flag. By using this flag, the compiler
optimizes the instructions for the native machine of the build time
and the executable will not be backward compatible to older CPUs.
As a result, the package will not be distributable on other
machines as the installation with the binary wheel crashes on other
machines with this error:
'illegal instructions (core dumped).'
An alternative optimization flag is '-mtune=native', which is
backward compatible and the package can be installed using wheel
binary file.
"""
# Get compiler type. This is "unix" (linux, mac) or "msvc" (windows)
compiler_type = self.compiler.compiler_type
# Initialize flags
extra_compile_args = []
extra_link_args = []
if compiler_type == 'msvc':
# This is Microsoft Windows Visual C++ compiler
msvc_compile_args = ['/O2', '/Wall', '/openmp']
msvc_link_args = []
msvc_has_openmp_flag = check_compiler_has_flag(
self.compiler,
msvc_compile_args,
msvc_link_args)
if msvc_has_openmp_flag:
# Add flags
extra_compile_args += msvc_compile_args
extra_link_args += msvc_link_args
else:
# It does not seem msvc accept -fopenmp flag.
raise RuntimeError(textwrap.dedent(
"""
OpenMP does not seem to be available on %s compiler.
""" % compiler_type))
else:
# The compile_type is 'unix'. This is either linux or mac.
# We add common flags that work both for gcc and mac's clang
extra_compile_args += ['-O3', '-fno-stack-protector', '-Wall']
# The option '-Wl, ..' will send arguments ot the linker. Here,
# '--strip-all' will remove all symbols from the shared library.
if not debug_mode:
extra_compile_args += ['-g0', '-Wl, --strip-all']
# Assume compiler is gcc (we do not know yet). Check if the
# compiler accepts '-fopenmp' flag. Note: clang in mac does not
# accept this flag alone, but gcc does.
gcc_compile_args = ['-fopenmp']
gcc_link_args = ['-fopenmp']
gcc_has_openmp_flag = check_compiler_has_flag(
self.compiler,
gcc_compile_args,
gcc_link_args)
if gcc_has_openmp_flag:
# Assuming this is gcc. Add '-fopenmp' safely.
extra_compile_args += gcc_compile_args
extra_link_args += gcc_link_args
else:
# Assume compiler is clang (we do not know yet). Check if
# -fopenmp can be passed through preprocessor. This is how
# clang compiler accepts -fopenmp.
clang_compile_args = ['-Xpreprocessor', '-fopenmp']
clang_link_args = ['-Xpreprocessor', '-fopenmp', '-lomp']
clang_has_openmp_flag = check_compiler_has_flag(
self.compiler,
clang_compile_args,
clang_link_args)
if clang_has_openmp_flag:
# Assuming this is mac's clag. Add '-fopenmp' through
# preprocessor
extra_compile_args += clang_compile_args
extra_link_args += clang_link_args
else:
# It doesn't seem either gcc or clang accept -fopenmp flag.
raise RuntimeError(textwrap.dedent(
"""
OpenMP does not seem to be available on %s compiler.
""" % compiler_type))
# Modify compiler flags for cuda
if use_cuda:
# Compile flags for nvcc
if sys.platform == 'win32':
extra_compile_args_nvcc = ['/Ox']
else:
extra_compile_args_nvcc = ['-arch=sm_35', '--ptxas-options=-v',
'-c', '--compiler-options', '-fPIC',
'-O3', '--verbose', '--shared']
# The option '-Wl, ..' will send arguments ot the linker. Here,
# '--strip-all' will remove all symbols from the shared library.
if debug_mode:
extra_compile_args_nvcc += ['-g', '-G']
else:
extra_compile_args_nvcc += ['--linker-options', '--strip-all']
# Redefine extra_compile_args list to be a dictionary
extra_compile_args = {
'not_nvcc': extra_compile_args,
'nvcc': extra_compile_args_nvcc
}
# Add the flags to all extensions
for ext in self.extensions:
ext.extra_compile_args = extra_compile_args
ext.extra_link_args = extra_link_args
# Parallel compilation (can also be set via build_ext -j or --parallel)
# Note: parallel build fails in windows since object files are accessed
# by race condition.
# if sys.platform != 'win32':
# self.parallel = multiprocessing.cpu_count()
# Modify compiler for cuda
if use_cuda:
cuda = locate_cuda()
if sys.platform == 'win32':
customize_windows_compiler_for_nvcc(self.compiler, cuda)
else:
customize_unix_compiler_for_nvcc(self.compiler, cuda)
# Remove warning: command line option '-Wstrict-prototypes' is valid
# for C/ObjC but not for C++
try:
if '-Wstrict-prototypes' in self.compiler.compiler_so:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
# Call parent class to build
build_ext.build_extensions(self)
# =========
# Read File
# =========
def read_file(Filename):
"""
Reads a file with latin codec.
"""
with codecs.open(Filename, 'r', 'latin') as file_obj:
return file_obj.read()
# ================
# Read File to RST
# ================
def read_file_to_rst(filename):
"""
Reads a markdown text file and converts it to RST file using pandas.
"""
try:
import pypandoc
rstname = "{}.{}".format(os.path.splitext(filename)[0], 'rst')
pypandoc.convert(
filename,
'rst',
format='markdown',
outputfile=rstname)
with open(rstname, 'r') as f:
rststr = f.read()
return rststr
except ImportError:
return read_file(filename)
# ======================
# does cuda source exist
# ======================
def does_cuda_source_exist(sources):
"""
Checks files extensions in a list of files in the ``sources``. If a file
extension ``.cu`` is found, returns ``True``, otherwise returns ``False``.
:param sources: A list of file names.
:type sources: list
"""
has_cuda_source = False
for source in sources:
file_extension = os.path.splitext(source)[1]
if file_extension == '.cu':
has_cuda_source = True
break
return has_cuda_source
# ================
# create Extension
# ================
def create_extension(
package_name,
subpackage_name,
other_source_dirs=None,
other_source_files=None,
other_include_dirs=None):
"""
Creates an extension for each of the sub-packages that contain
``.pyx`` files.
How to add a new cython sub-package or module:
In the :func:`main` function, add the name of cython sub-packages or
modules in the `subpackages_names` list. Note that only include those
sub-packages in the input list that have cython's *.pyx files. If a
sub-package is purely python, it should not be included in that list.
Compile arguments:
The compiler and linker flags (``extra_compile_args`` and
``extra_link_args``) are set to an empty list. We will fill them using
``CustomBuildExtension`` class, which depend on the compiler and
platform, it sets correct flags.
Parameters:
:param package_name: Name of the main package
:type package_name: string
:param subpackage_name: Name of the subpackage to build its extension.
In the package_name/subpackage_name directory, all ``pyx``, ``c``,
``cpp``, and ``cu`` files (if use_cuda is True) will be added to the
extension. If there are additional ``c``, ``cpp`` and ``cu`` source
files in other directories beside the soubpackage directory, use
``other_source_dirs`` argument.
:type subpackage_name: string
:param other_source_dirs: To add any other source files (only ``c``,
``cpp``, and ``cu``, but not ``pyx``) that are outside of the
subpackage directory, use this argument. The ``other_source_dirs`` is
a list of directories to include their path to ``include_dir`` and
to add all of the ``c``, ``cpp``, and ``cu`` files to ``sources``.
Note that the ``pyx`` files in these other directories will not be
added. To add a ``pyx`` file, use ``subpackage_name`` argument, which
creates a separate moule extension for each ``pyx`` file.
:type other_source_dirs: list(string)
:param other_source_files: A list of fullpath names of other source files
(only ``c``, ``cpp``, and ``cu``), that are not in the
``subpackage_name`` directory, neither are in the ``other_source_dirs``
directory.
:type other_source_files: list(string)
:param other_include_dirs: A list of fullpath directories of other source
files, such as other ``*.cpp`` or ``*.cu`` that are not in the
directories of ``subpackage_name`` and ``other_source_dirs`` arguments.
:type other_include_dirs: list(string)
:return: Cythonized extensions object
:rtype: dict
"""
# Check directory
subpackage_dir_name = join(package_name, subpackage_name)
if not os.path.isdir(subpackage_dir_name):
raise ValueError('Directory %s does not exists.' % subpackage_dir_name)
# Wether to create a module for each pyx file or a module for all cpp files
pyx_sources = join('.', package_name, subpackage_name, '*.pyx')
if glob(pyx_sources) != []:
# Creates a directory of modules for each pyx file
name = package_name + '.' + subpackage_name + '.*'
sources = [pyx_sources]
else:
# Create one so file (not a directory) for all source files (cpp, etc)
name = package_name + '.' + subpackage_name
sources = []
sources += glob(join('.', package_name, subpackage_name, '*.cpp'))
if use_cuda:
sources += glob(join('.', package_name, subpackage_name, '*.cu'))
include_dirs = [join('.', package_name, subpackage_name)]
extra_compile_args = [] # will be filled by CustomBuildExtension class
extra_link_args = [] # will be filled by CustomBuildExtension class
library_dirs = []
runtime_library_dirs = []
libraries = []
language = 'c++'
# Include any additional source files
if other_source_files is not None:
# Check source files exist
for source_file in other_source_files:
if not os.path.isfile(source_file):
raise ValueError('File %s does not exists.' % source_file)
sources += other_source_files
# Include any additional include directories
if other_include_dirs is not None:
# Check if directories exist
for include_dir in other_include_dirs:
if not os.path.isdir(include_dir):
raise ValueError('Directory %s does not exists.' % include_dir)
include_dirs += other_include_dirs
# Glob entire source c, cpp and cufiles in other source directories
if other_source_dirs is not None:
for other_source_dir in other_source_dirs:
# Check directory exists
other_source_dirname = join(package_name, other_source_dir)
if not os.path.isdir(other_source_dirname):
raise ValueError('Directory %s does not exists.'
% other_source_dirname)
sources += glob(join(other_source_dirname, '*.c'))
sources += glob(join(other_source_dirname, '*.cpp'))
include_dirs += join(other_source_dirname)
if use_cuda:
sources += \
glob(join(other_source_dirname, '*.cu'))
# Add cuda info
if use_cuda:
cuda = locate_cuda()
# Check if any '*.cu' files exists in the sources
has_cuda_source = does_cuda_source_exist(sources)
# Add cuda libraries only if a cuda source exists. This is necessary
# to run the non-cuda modules on machines without cuda installed.
if has_cuda_source:
include_dirs += [cuda['include']]
library_dirs += [cuda['lib']]
runtime_library_dirs += [cuda['lib']]
libraries += ['cudart', 'cublas', 'cusparse']
# Create an extension
extension = Extension(
name,
sources=sources,
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=runtime_library_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language=language,
define_macros=[('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')],
)
return extension
# ====================
# cythonize extensions
# ====================
def cythonize_extensions(extensions):
"""
Resolving issue with conda-build:
If the code is build using "conda-build" to be uploaded on anaconda
cloud, consider setting this environmental variable:
::
export CYTHON_BUILD_IN_SOURCE='true'
By setting so, this function sets the build directory ``build_dir``
to ``None``, which then the ``*.c`` files will be written in the source
code alongside with the source (where ``*.pyx`` are). If this
environmental variable does not exist, this function sets ``build_dir``
to ``build``directory , which builds the cython files outside of the
source code.
Why this matters?
Apparently, ``conda-build`` has a bug and emerges if the following two
conditions are met:
1. conda builds over multiple variants of the conda recipe (by defining
jinja variables in the file ``/conda/conda_build_config.yaml``),
such as defining multiple python versions and then using the jinja
variable ``{{ python }}`` in ``/conda/meta.yaml``.
2. Cython builds the ``*.c`` files outside of the source. That is, when
we set ``build_dir`` to anything but its default in
``cythonize(build_dir='some_directory')``. When ``build_dir`` is
set to ``None``, cython builds the ``*.c`` files in source. But when
``build_dir`` is set to a directory, the ``*.c`` files will be
written there.
Now, when the two above are set, ``conda-build`` faces a race condition
to build multiple versions of the package for variants of python
versions (if this is the variant variable), and crashes. Either
conda-build should build only one variant of the ``meta.yaml`` file
(that is, defining no variant in ``/conda/conda_build.config.yaml``),
or cython should build in source.
To resolve this, set ``CYTHON_BUILD_IN_SOURCE`` whenever the package is
build with build-conda. Also see the github action
``./github/workflow/deploy-conda/yaml``
::
env:
CYTHON_BUILD_IN_SOURCE: 'true'
Resolving issue with docstring for documentation:
To build this package only to generate proper cython docstrings for the
documentation, set the following environment variable:
::
export CYTHON_BUILD_FOR_DOC='true'
If the documentation is generated by the github actions, set
::
env:
CYTHON_BUILD_FOR_DOC: 'true'
.. warning::
DO NOT USE `linetrace=True` for a production code. Only use linetrace
to generate the documentation. This is because of serious cython bugs
caused by linetrace feature, particularly the behavior of ``prange``
becomes unpredictable since it often halts execution of the program.
"""
# Add cython signatures for sphinx
# for extension in extensions:
# extension.cython_directives = {"embedsignature": True}
# If environment var "CYTHON_BUILD_IN_SOURCE" exists, cython builds *.c
# files in the source code, otherwise in "/build" directory
cython_build_in_source = os.environ.get('CYTHON_BUILD_IN_SOURCE', None)
# If this package is build for the documentation, define the environment
# variable "CYTHON_BUILD_FOR_DOC". By doing so, two things happen:
# 1. The cython source will be generated in source (not in build directory)
# 2. The "linetrace" is added to the cython's compiler derivatives.
cython_build_for_doc = os.environ.get('CYTHON_BUILD_FOR_DOC', None)
# Build in source or out of source
if bool(cython_build_in_source) or bool(cython_build_for_doc):
cython_build_dir = None # builds *.c in source alongside *.pyx files
else:
cython_build_dir = 'build'
# Compiler derivatives
compiler_derivatives = {
'boundscheck': False,
'cdivision': True,
'wraparound': False,
'nonecheck': False,
'embedsignature': True,
}
# Build for doc or not
if bool(cython_build_for_doc):
compiler_derivatives['linetrace'] = True
# Cythonize
cythonized_extensions = cythonize(
extensions,
build_dir=cython_build_dir,
include_path=[numpy.get_include(), "."],
language_level="3",
nthreads=multiprocessing.cpu_count(),
compiler_directives=compiler_derivatives
)
return cythonized_extensions
# ====
# Main
# ====
def main(argv):
directory = os.path.dirname(os.path.realpath(__file__))
package_name = "glearn"
# Version
version_dummy = {}
version_file = join(directory, package_name, '__version__.py')
exec(open(version_file, 'r').read(), version_dummy)
version = version_dummy['__version__']
del version_dummy
# Author
author_file = join(directory, 'AUTHORS.txt')
author = open(author_file, 'r').read().rstrip()
# Requirements
requirements_filename = join(directory, "requirements.txt")
requirements_file = open(requirements_filename, 'r')
requirements = [i.strip() for i in requirements_file.readlines()]
# ReadMe
readme_file = join(directory, 'README.rst')
long_description = open(readme_file, 'r').read()
# Cyhton cpp extentions
extensions = []
extensions.append(create_extension(package_name, 'kernels'))
extensions.append(create_extension(package_name, '_correlation'))
# Cythonize
external_modules = cythonize_extensions(extensions)
# Description
description = 'Gaussian Process for Machine Learning'
# URLs
url = 'https://github.com/ameli/glearn'
download_url = url + '/archive/main.zip'
documentation_url = url + '/blob/main/README.rst'
tracker_url = url + '/issues'
# Inputs to setup
metadata = dict(
name=package_name,
version=version,
author=author,
author_email='<EMAIL>',
description=description,
long_description=long_description,
long_description_content_type='text/x-rst',
keywords="""gaussian-process gaussian-process-regression
machine-learning""",
url=url,
download_url=download_url,
project_urls={
"Documentation": documentation_url,
"Source": url,
"Tracker": tracker_url,
},
platforms=['Linux', 'OSX', 'Windows'],
packages=setuptools.find_packages(exclude=[
'tests.*',
'tests',
'examples.*',
'examples']
),
ext_modules=external_modules,
include_dirs=[numpy.get_include()],
install_requires=requirements,
python_requires='>=3.6',
setup_requires=[
'setuptools',
'wheel',
'numpy>1.11',
'scipy>=1.5',
'special_functions',
'cython',
'pytest-runner'],
tests_require=[
'pytest',
'pytest-cov'],
include_package_data=True,
cmdclass={'build_ext': CustomBuildExtension},
zip_safe=False, # False: package can be "cimported" by another package
extras_require={
'test': [
'pytest-cov',
'codecov'
],
'docs': [
'sphinx',
'sphinx-math-dollar',
'sphinx-toggleprompt',
'sphinx_rtd_theme',
'graphviz',
'sphinx-automodapi',
]
},
classifiers=[
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Environment :: GPU :: NVIDIA CUDA',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Natural Language :: English',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
# Setup
setuptools.setup(**metadata)
# =============
# script's main
# =============
if __name__ == "__main__":
main(sys.argv)
| 2.015625 | 2 |
h2o-py/tests/testdir_munging/binop/pyunit_mod.py | ahmedengu/h2o-3 | 6,098 | 12769055 | <gh_stars>1000+
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def frame_as_list():
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv.zip"))
(prostate % 10).show()
(prostate[4] % 10).show()
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
(airlines["CRSArrTime"] % 100).show()
if __name__ == "__main__":
pyunit_utils.standalone_test(frame_as_list)
else:
frame_as_list()
| 2.125 | 2 |
models.py | Pycomet/music-bot | 0 | 12769056 | from dataclasses import dataclass
@dataclass
class User:
id: int
name: str = ""
@dataclass
class Order:
product: str
price: float
price: float
description: str
url: str
data: str
@dataclass
class Product:
name: str
price: float
description: str
url: str
| 2.78125 | 3 |
src/ralph_assets/others.py | mkurek/ralph_assets | 0 | 12769057 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.utils.encoding import smart_str
from ralph_assets.models import Asset, Licence
from ralph_assets.models_assets import MODE2ASSET_TYPE
ASSETS_COLUMNS = [
'id',
'niw',
'barcode',
'sn',
'model__category__name',
'model__manufacturer__name',
'model__name',
'user__username',
'user__first_name',
'user__last_name',
'owner__username',
'owner__first_name',
'owner__last_name',
'status',
'service_name__name',
'property_of',
'warehouse__name',
'invoice_date',
'invoice_no',
'region__name',
]
LICENCES_COLUMNS = [
'niw',
'software_category',
'number_bought',
'price',
'invoice_date',
'invoice_no',
]
LICENCES_ASSETS_COLUMNS = [
'id',
'barcode',
'niw',
'user__username',
'user__first_name',
'user__last_name',
'owner__username',
'owner__first_name',
'owner__last_name',
'region__name',
]
LICENCES_USERS_COLUMNS = [
'username',
'first_name',
'last_name',
]
def get_licences_rows(filter_type='all', only_assigned=False):
if filter_type == 'all':
queryset = Licence.objects.all()
else:
queryset = Licence.objects.filter(
asset_type=MODE2ASSET_TYPE[filter_type]
)
yield (
LICENCES_COLUMNS +
LICENCES_ASSETS_COLUMNS +
LICENCES_USERS_COLUMNS +
['single_cost']
)
fill_empty_assets = [''] * len(LICENCES_ASSETS_COLUMNS)
fill_empty_licences = [''] * len(LICENCES_USERS_COLUMNS)
for licence in queryset:
row = []
row = [str(getattr(licence, column)) for column in LICENCES_COLUMNS]
base_row = row
row = row + fill_empty_assets + fill_empty_licences
if only_assigned:
if not(licence.assets.exists() or licence.users.exists()):
yield row
else:
yield row
if licence.number_bought > 0 and licence.price:
single_licence_cost = str(licence.price / licence.number_bought)
else:
single_licence_cost = ''
for asset in licence.assets.all().values(*LICENCES_ASSETS_COLUMNS):
row = []
row = [
smart_str(
asset.get(column),
) for column in LICENCES_ASSETS_COLUMNS
]
yield base_row + row + fill_empty_assets + fill_empty_licences
for user in licence.users.all().values(*LICENCES_USERS_COLUMNS):
row = []
row = [
smart_str(
user.get(column),
) for column in LICENCES_USERS_COLUMNS
]
yield base_row + fill_empty_assets + row + [single_licence_cost]
def get_assets_rows(filter_type='all'):
if filter_type == 'all':
queryset = Asset.objects.all().values(*ASSETS_COLUMNS)
else:
queryset = Asset.objects.filter(
type=MODE2ASSET_TYPE[filter_type]
).values(*ASSETS_COLUMNS)
yield ASSETS_COLUMNS
for asset in queryset:
row = []
row = [asset.get(column) for column in ASSETS_COLUMNS]
yield row
| 1.90625 | 2 |
examples/test_simple.py | VStoilovskyi/agent-python-pytest | 85 | 12769058 | <gh_stars>10-100
"""Simple example test."""
def test_simple():
"""Simple example test."""
assert True
| 1.328125 | 1 |
faker/providers/company.py | kaflesudip/faker | 1 | 12769059 | <reponame>kaflesudip/faker
# coding=utf-8
from __future__ import unicode_literals
from . import BaseProvider
class Provider(BaseProvider):
formats = ['{{last_name}} {{company_suffix}}', ]
company_suffixes = ['Ltd', ]
def company(self):
"""
:example 'Acme Ltd'
"""
pattern = self.random_element(self.formats)
return self.generator.parse(pattern)
@classmethod
def company_suffix(cls):
"""
:example 'Ltd'
"""
return cls.random_element(cls.company_suffixes)
| 2.609375 | 3 |
change_detection_pytorch/utils/lr_scheduler.py | yjt2018/change_detection.pytorch | 99 | 12769060 | <gh_stars>10-100
import warnings
from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler
from torch.optim.optimizer import Optimizer
__all__ = ['GradualWarmupScheduler', 'PolyScheduler']
class GradualWarmupScheduler(_LRScheduler):
"""https://github.com/ildoonet/pytorch-gradual-warmup-lr
Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_last_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
self._last_lr = self.after_scheduler.get_last_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
class PolyScheduler(_LRScheduler):
r"""Decays the learning rate of each parameter group using a polynomial LR scheduler.
When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
power (float): Polynomial factor of learning rate decay.
total_steps (int): The total number of steps in the cycle. Note that
if a value is not provided here, then it must be inferred by providing
a value for epochs and steps_per_epoch.
Default: None
epochs (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle
if a value for total_steps is not provided.
Default: None
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the
cycle if a value for total_steps is not provided.
Default: None
by_epoch (bool): If ``True``, the learning rate will be updated with the epoch
and `steps_per_epoch` and `total_steps` will be ignored. If ``False``,
the learning rate will be updated with the batch, you must define either
`total_steps` or (`epochs` and `steps_per_epoch`).
Default: ``False``.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> data_loader = torch.utils.data.DataLoader(...)
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.PolyScheduler(optimizer, power=0.9, steps_per_epoch=len(data_loader), epochs=10)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
OR
>>> data_loader = torch.utils.data.DataLoader(...)
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.PolyScheduler(optimizer, power=0.9, epochs=10, by_epoch=True)
>>> for epoch in range(10):
>>> train_epoch(...)
>>> scheduler.step()
https://github.com/likyoo/change_detection.pytorch/blob/main/change_detection_pytorch/utils/lr_scheduler.py
"""
def __init__(self,
optimizer,
power=1.0,
total_steps=None,
epochs=None,
steps_per_epoch=None,
by_epoch=False,
min_lr=0,
last_epoch=-1,
verbose=False):
# Validate optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
self.power = power
self.by_epoch = by_epoch
self.min_lr = min_lr
# Validate total_steps
if by_epoch:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError("Expected positive integer epochs, but got {}".format(epochs))
if steps_per_epoch is not None or total_steps is not None:
warnings.warn("`steps_per_epoch` and `total_steps` will be ignored if `by_epoch` is True, "
"please use `epochs`.", UserWarning)
self.total_steps = epochs
elif total_steps is None and epochs is None and steps_per_epoch is None:
raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)")
elif total_steps is not None:
if total_steps <= 0 or not isinstance(total_steps, int):
raise ValueError("Expected positive integer total_steps, but got {}".format(total_steps))
self.total_steps = total_steps
else:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError("Expected positive integer epochs, but got {}".format(epochs))
if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int):
raise ValueError("Expected positive integer steps_per_epoch, but got {}".format(steps_per_epoch))
self.total_steps = epochs * steps_per_epoch
super(PolyScheduler, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
step_num = self.last_epoch
if step_num > self.total_steps:
raise ValueError("Tried to step {} times. The specified number of total steps is {}"
.format(step_num + 1, self.total_steps))
if step_num == 0:
return self.base_lrs
coeff = (1 - step_num / self.total_steps) ** self.power
return [(base_lr - self.min_lr) * coeff + self.min_lr
for base_lr in self.base_lrs]
if __name__ == '__main__':
# https://github.com/ildoonet/pytorch-gradual-warmup-lr
# import torch
# from torch.optim.lr_scheduler import StepLR
# from torch.optim.sgd import SGD
#
# model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
# optim = SGD(model, 0.1)
#
# # scheduler_warmup is chained with schduler_steplr
# scheduler_steplr = StepLR(optim, step_size=10, gamma=0.1)
# scheduler_warmup = GradualWarmupScheduler(optim, multiplier=1, total_epoch=5, after_scheduler=scheduler_steplr)
#
# # this zero gradient update is needed to avoid a warning message, issue #8.
# optim.zero_grad()
# optim.step()
#
# for epoch in range(1, 20):
# scheduler_warmup.step(epoch)
# print(epoch, optim.param_groups[0]['lr'])
#
# optim.step() # backward pass (update network)
import matplotlib.pyplot as plt
import torch
EPOCH = 10
LEN_DATA = 10
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = torch.optim.SGD(params=model, lr=0.1)
scheduler = PolyScheduler(optimizer, power=0.9, min_lr=1e-4, epochs=EPOCH, steps_per_epoch=LEN_DATA, by_epoch=False)
plt.figure()
x = list(range(EPOCH*LEN_DATA))
y = []
for epoch in range(EPOCH):
for batch in range(LEN_DATA):
print(epoch, 'lr={:.6f}'.format(scheduler.get_last_lr()[0]))
y.append(scheduler.get_last_lr()[0])
scheduler.step()
plt.plot(x, y)
plt.show()
| 2.25 | 2 |
paper/plot_addmul_performance.py | RobertCsordas/modules | 22 | 12769061 | #!/usr/bin/env python3
from typing import List, Dict
import lib
from lib import StatTracker
from lib.common import group, calc_stat
import collections
import matplotlib.pyplot as plt
import os
plots = collections.OrderedDict()
plots["Full"] = "analyzer/baseline/validation/"
plots["$+$"] = "analyzer/add/validation/"
plots["$\\neg +$"] = "inverse_mask_test/add/"
plots["$*$"] = "analyzer/mul/validation/"
plots["$\\neg *$"] = "inverse_mask_test/mul/"
names = list(plots.keys())
ops = ["add","mul"]
def plot_both(ff, rnn):
ff_stats = calc_stat({"a":ff}, lambda k: (k.startswith("analyzer/") and k.endswith("/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"]
rnn_stats = calc_stat({"a":rnn}, lambda k: (k.startswith("analyzer/") and k.endswith("/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"]
fig = plt.figure(figsize=[6,1.6])
for t in range(2):
this_ff_stats = [ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
means_ff = [s.mean * 100 for s in this_ff_stats]
std_ff = [s.std * 100 for s in this_ff_stats]
plt.bar([5.5 * r + t * 2.5 for r in range(len(names))], means_ff, yerr=std_ff, align='center')
for t in range(2):
this_rnn_stats = [rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
means_rnn = [s.mean * 100 for s in this_rnn_stats]
std_rnn = [s.std * 100 for s in this_rnn_stats]
plt.bar([5.5 * r + 1+ t * 2.5 for r in range(len(names))], means_rnn, yerr=std_rnn, align='center')
plt.xticks([5.5 * r + 1.75 for r in range(len(names))], names)
plt.ylabel("Accuracy [\\%]")
plt.legend(["FNN $+$", "FNN $*$", "RNN $+$", "RNN $*$"])
fname = "out/admmul_performance.pdf"
os.makedirs(os.path.dirname(fname), exist_ok=True)
fig.savefig(fname, bbox_inches='tight')
print("\\begin{tabular}{ll|c|cc|cc}")
print("\\toprule")
print(" & ".join(["", ""] + names) + " \\\\")
print("\\midrule")
row = ["\\multirow{2}{*}{FNN}"]
for t in range(2):
this_stats = [ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
row.append(f"Pair {t}")
for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]):
row.append(f"${m:.0f} \pm {s:.1f}$")
print(" & ".join(row) + " \\\\")
row = [""]
print("\\midrule")
row = ["\\multirow{2}{*}{LSTM}"]
for t in range(2):
this_stats = [rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
row.append(f"Pair {t}")
for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]):
row.append(f"${m:.0f} \pm {s:.1f}$")
print(" & ".join(row) + " \\\\")
row = [""]
print("\\bottomrule")
print("\end{tabular}")
rnn_runs = lib.get_runs(["addmul_rnn"])
feedforward_runs = lib.get_runs(["addmul_feedforward_big"])
feedforward_runs = group(feedforward_runs, ["layer_sizes"])
rnn_runs = group(rnn_runs, ["tuple.mode"])
plot_both(feedforward_runs["layer_sizes_2000,2000,2000,2000"], rnn_runs["tuple.mode_together"])
| 2.53125 | 3 |
backend/coreapp/serializers.py | kiwi515/decomp.me | 1 | 12769062 | from rest_framework import serializers
from typing import Optional, TYPE_CHECKING
from .models import Profile, Scratch
from .github import GitHubUser
from .middleware import Request
def serialize_profile(request: Request, profile: Profile):
if profile.user is None:
return {
"is_you": profile == request.profile,
"is_anonymous": True,
}
else:
user = profile.user
github: Optional[GitHubUser] = GitHubUser.objects.filter(user=user).first()
github_details = github.details() if github else None
return {
"is_you": user == request.user,
"is_anonymous": False,
"id": user.id,
"username": user.username,
"email": user.email,
"name": github_details.name if github_details else user.username,
"avatar_url": github_details.avatar_url if github_details else None,
"github_api_url": github_details.url if github_details else None,
"github_html_url": github_details.html_url if github_details else None,
}
if TYPE_CHECKING:
ProfileFieldBaseClass = serializers.RelatedField[Profile, str, str]
else:
ProfileFieldBaseClass = serializers.RelatedField
class ProfileField(ProfileFieldBaseClass):
def to_representation(self, profile: Profile):
return serialize_profile(self.context["request"], profile)
class ScratchCreateSerializer(serializers.Serializer[None]):
compiler = serializers.CharField(allow_blank=True, required=True)
platform = serializers.CharField(allow_blank=True, required=False)
compiler_flags = serializers.CharField(allow_blank=True, required=False)
source_code = serializers.CharField(allow_blank=True, required=False)
target_asm = serializers.CharField(allow_blank=True)
# TODO: `context` should be renamed; it conflicts with Field.context
context = serializers.CharField(allow_blank=True) # type: ignore
diff_label = serializers.CharField(allow_blank=True, required=False)
class ScratchSerializer(serializers.ModelSerializer[Scratch]):
class Meta:
model = Scratch
fields = ["slug", "name", "description", "compiler", "platform", "compiler_flags", "target_assembly", "source_code", "context", "diff_label", "score", "max_score"]
# XXX: ideally we would just use ScratchSerializer, but adding owner and parent breaks creation
class ScratchWithMetadataSerializer(serializers.ModelSerializer[Scratch]):
owner = ProfileField(read_only=True)
parent = serializers.HyperlinkedRelatedField( # type: ignore
read_only=True,
view_name="scratch-detail",
lookup_field="slug",
)
class Meta:
model = Scratch
fields = ["slug", "name", "description", "compiler", "platform", "compiler_flags", "source_code", "context", "owner", "parent", "diff_label", "score", "max_score"]
| 2.171875 | 2 |
spexxy/data/__init__.py | thusser/spexxy | 4 | 12769063 | from .filter import Filter
from .fitsspectrum import FitsSpectrum
from .isochrone import Isochrone, MultiIsochrone
from .losvd import LOSVD
from .lsf import LSF, AnalyticalLSF, EmpiricalLSF
from .resultsfits import ResultsFITS
from .spectrum import Spectrum, SpectrumFits, SpectrumFitsHDU, SpectrumBinTableFITS, SpectrumHiResFITS, SpectrumAscii
| 0.855469 | 1 |
video-streaming/video_streaming/core/constants/status.py | mojtaba-arvin/video-service | 9 | 12769064 | <filename>video-streaming/video_streaming/core/constants/status.py
__all__ = [
'PrimaryStatus',
'InputStatus',
'OutputStatus',
'StopReason'
]
class PrimaryStatus:
QUEUING_CHECKS = "QUEUING_CHECKS"
# one of checks tasks has been started
CHECKING = "CHECKING"
# when all first level tasks has been finished
CHECKS_FINISHED = "CHECKS_FINISHED"
# It will be set outside of tasks,
# when CHECKS_FINISHED and not INPUTS_DOWNLOADING
QUEUING_INPUTS_DOWNLOADING = "QUEUING_INPUTS_DOWNLOADING"
# when one of checks tasks has been started
INPUTS_DOWNLOADING = "INPUTS_DOWNLOADING"
# when downloading all inputs has been finished
ALL_INPUTS_DOWNLOADED = "ALL_INPUTS_DOWNLOADED"
# It will be set outside of tasks,
# when ALL_INPUTS_DOWNLOADED and not QUEUING_OUTPUTS
QUEUING_OUTPUTS = "QUEUING_OUTPUTS"
# when one of level3 chains has been started
OUTPUTS_PROGRESSING = "OUTPUTS_PROGRESSING"
# when all outputs are finished
# means total_outputs == (ready_outputs + revoked_outputs + failed_outputs)
FINISHED = "FINISHED"
# when all outputs revoked
REVOKED = "REVOKED"
# when one of checks has been failed, one of inputs has been failed,
# or all outputs has been failed
FAILED = "FAILED"
class InputStatus:
# for every input
PREPARATION_DOWNLOAD = "PREPARATION_DOWNLOAD"
DOWNLOADING = "DOWNLOADING"
DOWNLOADING_FINISHED = "DOWNLOADING_FINISHED"
QUEUING_TO_ANALYZE = "QUEUING_TO_ANALYZE"
ANALYZING = "ANALYZING"
ANALYZING_FINISHED = "ANALYZING_FINISHED"
INPUT_REVOKED = "INPUT_REVOKED"
INPUT_FAILED = "INPUT_FAILED"
class OutputStatus:
# for every output
OUTPUT_REVOKED = "OUTPUT_REVOKED"
OUTPUT_FAILED = "OUTPUT_FAILED"
PREPARATION_PROCESSING = "PREPARATION_PROCESSING"
PROCESSING = "PROCESSING"
PROCESSING_FINISHED = "PROCESSING_FINISHED"
# It will be set outside of tasks,
# when PROCESSING_FINISHED and not UPLOADING
QUEUING_UPLOADING = "QUEUING_UPLOADING"
UPLOADING = "UPLOADING"
UPLOADING_FINISHED = "UPLOADING_FINISHED"
class StopReason:
FORCE_REVOKED = "FORCE_REVOKED"
INTERNAL_ERROR = "INTERNAL_ERROR"
# CheckInputKeyTask and DownloadInputTask
INPUT_VIDEO_ON_S3_IS_404_OR_403 = "INPUT_VIDEO_ON_S3_IS_404_OR_403"
# CheckInputKeyTask
FAILED_INPUT_KEY_CHECKING = "FAILED_INPUT_KEY_CHECKING"
# CheckOutputBucketTask
FAILED_OUTPUT_BUCKET_CHECKING = "FAILED_OUTPUT_BUCKET_CHECKING"
OUTPUT_BUCKET_ON_S3_IS_404_OR_403 = "OUTPUT_BUCKET_ON_S3_IS_404_OR_403"
# CheckOutputKeyTask
FAILED_OUTPUT_KEY_CHECKING = "FAILED_OUTPUT_KEY_CHECKING"
OUTPUT_KEY_IS_ALREADY_EXIST = "OUTPUT_KEY_IS_ALREADY_EXIST"
# DownloadInputTask
DOWNLOADING_FAILED = "DOWNLOADING_FAILED"
# AnalyzeInputTask
FAILED_ANALYZE_INPUT = "FAILED_ANALYZE_INPUT"
# AnalyzeInputTask
INPUT_VIDEO_CODEC_TYPE_IN_NOT_VIDEO = "INPUT_VIDEO_CODEC_TYPE_IN_NOT_VIDEO"
# InputsFunnelTask
AGGREGATE_INPUTS_FAILED = "AGGREGATE_INPUTS_FAILED"
# CreatePlaylistTask
FAILED_CREATE_PLAYLIST = "FAILED_CREATE_PLAYLIST"
INPUT_VIDEO_SIZE_CAN_NOT_BE_ZERO = "INPUT_VIDEO_SIZE_CAN_NOT_BE_ZERO"
REPRESENTATION_NEEDS_BOTH_SIZE_AND_BITRATE = "REPRESENTATION_NEEDS_BOTH_SIZE_AND_BITRATE"
# UploadDirectoryTask
FAILED_UPLOAD_DIRECTORY = "FAILED_UPLOAD_DIRECTORY"
# GenerateThumbnailTask
FAILED_GENERATE_THUMBNAIL = "FAILED_GENERATE_THUMBNAIL"
# UploadFileTask
FAILED_UPLOAD_FILE = "FAILED_UPLOAD_FILE"
# AddWatermarkTask
FAILED_ADD_WATERMARK = "FAILED_ADD_WATERMARK"
# InputsFunnelTask
# when job_details not found
JOB_TIMEOUT = "JOB_TIMEOUT"
| 2.359375 | 2 |
src/appliance-onboarding-script/appliance_setup/pkgs/_az_cli.py | Azure/ArcOnAVS | 0 | 12769065 | <reponame>Azure/ArcOnAVS
#!/usr/bin/python
# This module is wrapper around az cli command execution
# az_cli and az_cli_with_retries are two functions which
# can be used to execute az commands.
import os, subprocess, logging
from ._utils import bytes_to_string
def az_cli (*args):
res = None
try:
cmd = 'az ' + ' '.join(args)
if logging.getLogger().isEnabledFor(logging.DEBUG):
cmd = cmd + ' --debug'
logging.debug(f'Executing command {cmd}')
try:
res = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError as e:
logging.error(bytes_to_string(e.output))
return res, 1
res = bytes_to_string(res)
except Exception as e:
logging.exception(e)
return res, 1
return res, 0
| 2.296875 | 2 |
apps/users/urls.py | lorenz-bienek/drf-saas-starter | 9 | 12769066 | from rest_framework.routers import DefaultRouter
from django.conf.urls import include, url
from .views import UserViewSet
app_name = 'apps.users'
# # Here the comments and activity is added manually by including them under the detail view.
#
# urlpatterns = [
#
# url(
# regex=r'^$',
# view=UserViewSet.as_view({'get': 'list'}),
# name='user-list'
# ),
#
# url(
# r'^(?P<pk>[\w.@+-]+)/',
# include([
# url(
# regex=r'^$',
# view=UserViewSet.as_view({'get': 'retrieve'}),
# name='user-detail'
# ),
# url(
# regex=r'^comments/$',
# view=UserViewSet.as_view({'get': 'comments', 'post': 'comments'}),
# name='user-comments'
# ),
# url(
# regex=r'^activities/$',
# view=UserViewSet.as_view({'get': 'activities'}),
# name='user-activities'
# ),
# ])
# ),
#
# ]
# If we use a router the comments and activity is added via
# the @detail_route decorator in the CommentsMixin and ActivitiesMixin.
# This means, that nothing has to be changed here.
router = DefaultRouter()
router.register(r'', UserViewSet)
urlpatterns = router.urls
| 2.34375 | 2 |
tests/test_powrap.py | deronnax/powrap | 1 | 12769067 | from pathlib import Path
import pytest
import os
from powrap import powrap
FIXTURE_DIR = Path(__file__).resolve().parent
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad" / "glossary.po",))
def test_fail_on_bad_wrapping(po_file, capsys):
assert powrap.check_style([po_file]) == 1
assert str(po_file) in capsys.readouterr().err
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po"))
def test_succees_on_good_wrapping(po_file, capsys):
assert powrap.check_style([po_file]) == 0
assert str(po_file) not in capsys.readouterr().err
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad" / "invalid_po_file.po",))
def test_msgcat_error(po_file, capsys):
assert powrap.check_style([po_file]) == 0
assert str(po_file) not in capsys.readouterr().err
@pytest.mark.parametrize("po_file", ("non_existent_file.po",))
def test_fileread_error(po_file, capsys):
assert powrap.check_style([po_file]) == 0
assert str(po_file) not in capsys.readouterr().err
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po"))
def test_wrong_msgcat(po_file):
"""Test if msgcat is not available"""
environ_saved = os.environ["PATH"]
os.environ["PATH"] = ""
with pytest.raises(SystemExit) as sysexit:
powrap.check_style([po_file])
os.environ["PATH"] = environ_saved
assert sysexit.type == SystemExit
assert sysexit.value.code == 127
| 2.3125 | 2 |
src/patteRNA/ScoringManager.py | AviranLab/patteRNA | 12 | 12769068 | import exrex
import logging
import os
import multiprocessing
import numpy as np
from scipy.stats import genlogistic
from scipy.ndimage.filters import median_filter, uniform_filter1d
from functools import partial
from patteRNA.LBC import LBC
from patteRNA import rnalib, filelib, timelib, misclib, viennalib
from tqdm import tqdm
LOCK = multiprocessing.Lock()
logger = logging.getLogger(__name__)
clock = timelib.Clock()
class ScoringManager:
def __init__(self, model, run_config):
self.model = model
self.run_config = run_config
self.mp_tasks = run_config['n_tasks']
self.mp_pool = None
self.motifs = []
self.cscore_dists = None
self.dataset = None
self.no_vienna = run_config['no_vienna']
self.lbc = LBC()
if run_config['motif'] is not None:
self.parse_motifs()
def parse_motifs(self):
expression = self.run_config['motif']
expression = expression.replace('(', r'\(')
expression = expression.replace('.', r'\.')
expression = expression.replace(')', r'\)')
motifs = exrex.generate(expression)
self.motifs = list(filter(rnalib.valid_db, motifs))
def import_data(self, dataset):
self.dataset = dataset
def execute_scoring(self):
# Compile scoring configuration parameters
scoring_config = {'posteriors': self.run_config['posteriors'],
'hdsl': self.run_config['HDSL'],
'spp': self.run_config['SPP'],
'viterbi': self.run_config['viterbi'],
'suppress_nan': True,
'fp_posteriors': os.path.join(self.run_config['output'], 'posteriors.txt'),
'fp_scores_pre': os.path.join(self.run_config['output'], 'scores_pre'),
'fp_scores': os.path.join(self.run_config['output'], 'scores.txt'),
'fp_hdsl': os.path.join(self.run_config['output'], 'hdsl.txt'),
'fp_spp': os.path.join(self.run_config['output'], 'spp.txt'),
'fp_viterbi': os.path.join(self.run_config['output'], 'viterbi.txt'),
'no_cscores': self.run_config['no_cscores'],
'min_cscores': self.run_config['min_cscores'],
'batch_size': self.run_config['batch_size'],
'motifs': self.motifs,
'path': self.run_config['path'],
'context': self.run_config['context'],
'cscore_dists': None,
'no_vienna': self.no_vienna,
'energy': ~np.any([self.no_vienna,
self.run_config['no_cscores'],
not viennalib.vienna_imported]),
'lbc': self.lbc,
'hdsl_params': self.run_config['hdsl_params']}
self.pool_init() # Initialize parallelized pool
# Prepare score distributions for c-score normalization
if not scoring_config['no_cscores']:
logger.info('Sampling null sites for c-score normalization')
clock.tick()
self.cscore_dists = dict.fromkeys(self.motifs)
cscore_batch = self.make_cscore_batch(scoring_config['min_cscores'])
cscore_batch.pre_process(self.model, scoring=True)
with tqdm(total=len(self.motifs),
leave=False,
unit='motif') as pb_samples:
try:
if scoring_config['path']:
path = np.array(list(scoring_config['path']), dtype=int)
else:
path = None
worker = partial(self.sample_worker, path=path, batch=cscore_batch)
samples_pool = self.mp_pool.imap_unordered(worker, self.motifs)
for (motif, samples) in samples_pool:
params = genlogistic.fit(samples)
self.cscore_dists[motif] = genlogistic(c=params[0], loc=params[1], scale=params[2])
pb_samples.update()
self.mp_pool.close()
self.mp_pool.join()
except Exception:
self.mp_pool.terminate()
raise
scoring_config['cscore_dists'] = self.cscore_dists
logger.info(' ... done in {}'.format(misclib.seconds_to_hms(clock.tock())))
# Begin formal scoring phase by making batches to save on memory
batches = self.make_batches(scoring_config['batch_size'])
n_batches = len(self.dataset.rnas) // scoring_config['batch_size'] + 1 # Number of batches
if self.motifs:
header = "transcript\tstart score c-score BCE MEL Prob(motif) motif path seq\n"
with open(scoring_config['fp_scores_pre'], 'w') as f:
f.write(header)
logger.info("Executing scoring")
clock.tick()
with tqdm(total=n_batches,
leave=False,
unit='batch',
desc=' Overall') as pbar_batches:
# Process batches sequentially
for i, batch in enumerate(batches):
self.pool_init()
batch.pre_process(self.model)
with tqdm(total=len(batch.rnas),
leave=False,
unit="transcript",
desc="Current batch") as pbar_transcripts:
try:
worker = partial(self.score_worker, model=self.model, config=scoring_config)
jobs = self.mp_pool.imap_unordered(worker, batch.rnas.values())
for _ in jobs:
pbar_transcripts.update()
self.mp_pool.close()
self.mp_pool.join()
except Exception:
self.mp_pool.terminate()
raise
batch.clear()
pbar_batches.update()
# Sort score file
if self.motifs:
scores = filelib.read_score_file(scoring_config['fp_scores_pre'])
if not scores:
os.rename(scoring_config['fp_scores_pre'], scoring_config['fp_scores'])
else:
if scoring_config['no_cscores']:
filelib.write_score_file(sorted(scores, key=lambda score: score['score'], reverse=True),
scoring_config['fp_scores'])
else:
if scoring_config['energy']:
filelib.write_score_file(sorted(scores, key=lambda score: score['Prob(motif)'], reverse=True),
scoring_config['fp_scores'])
else:
filelib.write_score_file(sorted(scores, key=lambda score: score['c-score'], reverse=True),
scoring_config['fp_scores'])
os.remove(scoring_config['fp_scores_pre']) # Clean-up
logger.info(' ... done in {}'.format(misclib.seconds_to_hms(clock.tock())))
@staticmethod
def sample_worker(motif, path, batch):
if path is None:
path = rnalib.dot2states(motif)
scores = []
for transcript in batch.rnas.values():
scores.extend(get_null_scores(transcript, motif, path))
return motif, scores
@staticmethod
def score_worker(transcript, model, config):
model.e_step(transcript) # Apply model to transcripts
outputs = compute_outputs(transcript, model, config)
with LOCK as _:
write_outputs(outputs, config)
def make_cscore_batch(self, min_sample_size):
"""
Scan through RNAs in provided data and determine how many are needed to sufficiently
estimate null distributions for c-score normalization. Return a new Dataset with just
the RNAs to use for score sampling.
Args:
min_sample_size: Minimum number of samples to estimate the null score distribution for a single motif.
Returns:
Dataset of RNAs which is a subset of the provided data and meets the criteria needed for score sampling.
"""
motif_samples = {motif: 0 for motif in self.motifs}
cscore_rnas = []
for rna in self.dataset.rnas.values():
cscore_rnas.append(rna.name)
for motif in self.motifs:
null_sites = count_null_sites(rna, motif)
motif_samples[motif] += null_sites
if np.all([motif_samples[motif] >= min_sample_size for motif in motif_samples]):
break # No more sites needed
return self.dataset.spawn_set(rnas=cscore_rnas)
def make_batches(self, size):
rnas = list(self.dataset.rnas.keys())
while rnas:
rnas_batch = rnas[:size]
rnas[:size] = []
yield self.dataset.spawn_set(rnas=rnas_batch)
def pool_init(self):
self.mp_pool = multiprocessing.Pool(processes=self.mp_tasks,
maxtasksperchild=1000)
def count_null_sites(transcript, motif):
if motif not in transcript.valid_sites.keys():
transcript.find_valid_sites(motif)
if motif not in transcript.nan_sites.keys():
transcript.find_nan_sites(len(motif))
non_null_sites = transcript.nan_sites[len(motif)] | transcript.valid_sites[motif]
count = transcript.T - len(motif) + 1 - len(non_null_sites)
return count
def get_null_scores(transcript, motif, path):
# Get sites which violate sequence constraints
invalid_sites = np.where(~np.in1d(range(transcript.T - len(motif) + 1), transcript.valid_sites[motif]))[0]
null_scores = list(filter(lambda score: ~np.isnan(score['score']),
map(lambda start: score_path(transcript, start, path, motif, None, lbc=False),
invalid_sites)))
return [null_score['score'] for null_score in null_scores]
def compute_cscores(scores, dists):
list(map(lambda score: apply_cscore(score, dists[score['dot-bracket']]), scores))
def apply_cscore(score, dist):
pv = dist.sf(score['score'])
if pv == 0:
log_c = np.Inf
elif np.isnan(pv):
log_c = np.nan
else:
log_c = -np.log10(pv)
score['c-score'] = log_c
def score_path(transcript, start, path, motif, pt, lbc=True, context=40):
m = len(path)
end = start + m - 1
bce = np.nan
mel = np.nan
if np.all(np.isnan(transcript.obs[start:end + 1])):
score = np.nan
else:
score = 0
score += np.log(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])
score += np.sum((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])
score += np.log(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])
if lbc:
rstart = int(np.max((0, start - context)))
rend = int(np.min((len(transcript.seq), end + context)))
start_shift = start - rstart
hcs = rnalib.compile_motif_constraints(pt[0], pt[1], start_shift)
lmfe = viennalib.fold(transcript.seq[rstart:rend])
lcmfe = viennalib.hc_fold(transcript.seq[rstart:rend], hcs=hcs)
mel = lmfe - lcmfe
bce = bce_loss(transcript.gamma[1, start:end + 1], path)
return {'score': score,
'c-score': None,
'start': start,
'transcript': transcript.name,
'dot-bracket': motif,
'path': "".join([str(a) for a in path]),
'BCE': bce,
'MEL': mel,
'Prob(motif)': np.nan,
'seq': transcript.seq[start:start + m]}
def bce_loss(yhat, y):
assert len(yhat) == len(y)
return sum(
-yi * np.log(yhi + 1e-20) if yi == 1 else -(1 - yi) * np.log(1 - yhi + 1e-20) for yhi, yi in zip(yhat, y))
def compute_outputs(transcript, model, config):
outputs = {'name': transcript.name,
'viterbi': '',
'posteriors': '',
'spp': '',
'scores_pre': '',
'hdsl': ''} # Initialize outputs dictionary
if config['viterbi']:
vp = model.viterbi_decoding(transcript) # Viterbi algorithm
outputs['viterbi'] = "> {}\n{}\n".format(transcript.name, "".join([str(i) for i in vp]))
# Posterior pairing probabilities
if config['posteriors']:
transcript.gamma /= np.sum(transcript.gamma, axis=0)[np.newaxis, :]
outputs['posteriors'] = "> {}\n{}\n".format(transcript.name,
" ".join(["{:1.3f}".format(p) for p in transcript.gamma[0, :]]))
# Smoothed P(paired) measure --> HDSL without augmentation
if config['spp']:
spp_tmp = transcript.gamma[1, :] # Raw pairing probabilities
spp_tmp = uniform_filter1d(spp_tmp, size=5) # Local mean
spp = median_filter(spp_tmp, size=15) # Local median
outputs['spp'] = "> {}\n{}\n".format(transcript.name,
" ".join(["{:1.3f}".format(p) for p in spp]))
if config['motifs']:
transcript.compute_log_B_ratios()
scores = []
for motif in config['motifs']:
if config['path'] is not None:
path = np.array(list(config['path']), dtype=int)
else:
path = rnalib.dot2states(motif)
pt = transcript.find_valid_sites(motif) # Returns motif base pairing list
scores_tmp = list(map(lambda start: score_path(transcript, start, path, motif, pt, lbc=config['energy']),
transcript.valid_sites[motif]))
if config['suppress_nan']:
scores_tmp = list(filter(lambda s: ~np.isnan(s['score']), scores_tmp))
if config['cscore_dists'] is not None:
compute_cscores(scores_tmp, config['cscore_dists'])
scores += scores_tmp
if config['energy']:
config['lbc'].apply_classifier(scores)
outputs['scores_pre'] = format_scores(scores)
# Hairpin-derived structure level measure
if config['hdsl']:
hdsl_tmp = transcript.gamma[1, :] # Pairing probabilities
for score in scores:
# Profile augmentation with hairpin scores
if score['c-score'] > config['hdsl_params'][1]:
end = score['start'] + len(score['dot-bracket'])
boost = config['hdsl_params'][0] * (score['c-score'] - config['hdsl_params'][1])
hdsl_tmp[score['start']:end] += boost
# Clipping to [0, 1]
hdsl_tmp[hdsl_tmp < 0] = 0
hdsl_tmp[hdsl_tmp > 1] = 1
# Smoothing steps
hdsl_tmp = uniform_filter1d(hdsl_tmp, size=5) # Local mean
hdsl = median_filter(hdsl_tmp, size=15) # Local median
outputs['hdsl'] = "> {}\n{}\n".format(transcript.name, " ".join(["{:1.3f}".format(p) for p in hdsl]))
return outputs
def format_scores(scores):
return "".join(["{} {} {:1.2f} {:1.2f} {:1.2f} {:1.2f} {:1.3g} {} {} {}\n".format(
score['transcript'],
score['start'] + 1,
score['score'],
score['c-score'],
score['BCE'],
score['MEL'],
score['Prob(motif)'],
score['dot-bracket'],
score['path'],
score['seq']) for score in scores])
def write_outputs(outputs, config):
output_types = ['viterbi', 'posteriors', 'spp', 'scores_pre', 'hdsl']
for output_type in output_types:
if outputs[output_type]:
with open(config[f'fp_{output_type}'], 'a') as f:
f.write(outputs[output_type])
| 1.898438 | 2 |
hardhat/recipes/freefont.py | stangelandcl/hardhat | 0 | 12769069 | import os
from .base import GnuRecipe
class FreeFontRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(FreeFontRecipe, self).__init__(*args, **kwargs)
self.sha256 = '7c85baf1bf82a1a1845d1322112bc6ca' \
'982221b484e3b3925022e25b5cae89af'
self.depends = ['fontconfig', 'unzip']
self.name = 'freefont'
self.version = '20120503'
self.url = 'ftp://ftp.gnu.org/pub/gnu/freefont/' \
'freefont-ttf-$version.zip'
self.install_args = [['install', '-v', '-d', '-m755',
'%s/share/fonts/freefont' % self.prefix_dir],
['install', '-v', '-m644', '*.ttf',
'%s/share/fonts/freefont' % self.prefix_dir]]
def extract(self):
self.log_dir('extract', self.directory, 'extracting')
self.extract_args = ['unzip', self.filename, '-d', self.directory]
self.run_exe(self.extract_args, self.tarball_dir, self.environment)
self.directory = os.path.join(self.directory,
'freefont-%s' % self.version)
def configure(self):
pass
def compile(self):
pass
def post_install(self):
self.log_dir('post-install', self.directory, 'fc-cache')
self.run_exe(['fc-cache'], self.directory, self.environment)
| 2.265625 | 2 |
python/src/model/structure.py | LaudateCorpus1/scalu | 0 | 12769070 |
import src.frontend.utility.utility as utility
import src.model.universe as universe
class rd_obj():
def __init__(self):
self.name = ''
self.declared = False
def declaration_collision(self):
pass
class rd_list(list):
def setup(self, obj_constructor):
self.obj_constructor = obj_constructor
return self
def reference(self, obj_string, constructor_override=None):
if obj_string in [x.name for x in self]:
return self.get_object(obj_string)
else:
if constructor_override is not None:
new_obj = constructor_override()
else:
new_obj = self.obj_constructor()
new_obj.name = obj_string
new_obj.declared = False
self.append(new_obj)
return new_obj
def declare(self, obj_string, constructor_override=None):
if obj_string in [x.name for x in self]:
declared_obj = self.get_object(obj_string)
if declared_obj.declared:
declared_obj.declaration_collision()
raise Exception('object collision')
else:
declared_obj.declared = True
return declared_obj
else:
if constructor_override is not None:
new_obj = constructor_override()
else:
new_obj = self.obj_constructor()
new_obj.name = obj_string
new_obj.declared = True
self.append(new_obj)
return new_obj
def get_object(self, obj_string):
for obj in self:
if obj_string == obj.name:
return obj
raise Exception('object not accessible')
def validate(self):
for obj in self:
if not obj.declared:
raise Exception(obj.name + ' has not been declared')
class global_object():
def __init__(self):
self.sandbox = rd_list().setup(sandbox)
self.maps = map_collection()
self.universe = universe.universe()
self.universe.initialize()
def resolve(self):
self.sandbox.validate()
for sandbox in self.sandbox:
sandbox.variables.validate()
sandbox.services.validate()
class map_collection():
def __init__(self):
self.maps = list()
def add(self, event):
if self.non_colliding_keys(event) and self.non_colliding_files(event):
if event.string in [x.string for x in self.maps]:
old_event = self.return_matching_event(event)
self.merge_events(old_event, event)
else:
self.maps.append(event)
else:
raise Exception('Cannot add event "' + event.string + '" to collection, key "' + event.key + '" already in collection. The same key cannot be bound to multiple events.')
def return_matching_event(self, event):
for maps in self.maps:
if event.string == maps.string:
return maps
def non_colliding_keys(self, event):
if event.key is None or event.key not in [x.key for x in self.maps]:
return True
else:
raise Exception('Cannot add event "' + event.string + '" to collection, key "' + event.key + '" already in collection. The same key cannot be bound to multiple events.')
def non_colliding_files(self, event):
if event.file is None or event.file not in [x.file for x in self.maps]:
return True
else:
raise Exception('Cannot add event "' + event.string + '" to collection, file "' + event.file + '" already in collection. The same file cannot be bound to multiple events.')
def merge_events(self, old_event, new_event):
if new_event.key is not None:
old_event.key = new_event.key
if new_event.file is not None:
old_event.file = new_event.file
old_event.services = old_event.services + new_event.services
class sandbox(rd_obj):
def __init__(self):
rd_obj.__init__(self)
self.variables = rd_list().setup(variable)
self.services = rd_list().setup(service)
class variable(rd_obj):
def __init__(self, name=''):
rd_obj.__init__(self)
self.name = name
self.type = 'int'
self.value = '0'
self.word_size = '8'
def set_value(self, value):
if int(value) < 2**int(self.word_size) and int(value) >= 0:
self.value = value
else:
raise Exception('illegal value declaration:' + value + ' . Number not within bounds of the word size')
class constant(variable):
def __init__(self, value='0'):
variable.__init__(self, value)
self.set_value(value)
class service(rd_obj):
def __init__(self):
rd_obj.__init__(self)
self.sequence = list()
self.is_anonymous = False
class event():
def __init__(self, string):
self.string = string
self.key = None
self.file = None
self.services = list()
def add_key(self, key_string):
if self.key == None:
self.key = key_string
else:
raise Exception('event "' + self.string + '" already has key "' + self.key + '". Cannot assign "' + key_string + '" to "' + self.string + '"')
def add_file(self, file_string):
if self.file == None:
self.file = file_string
else:
raise Exception('event "' + self.string + '" already has file "' + self.file + '". Cannot assign "' + file_string + '" to "' + self.string + '"')
class statement():
def __init__(self):
self.identifier = ''
self.arg = list()
class assignment(statement):
def __init__(self):
self.arg = [None]
class service_call(statement):
def __init__(self):
self.identifier = ''
class source_call(statement):
def __init__(self):
self.arg = [None]
class if_statement():
def __init__(self):
self.true_service = None
self.false_service = None
self.condition = None
class jump_statement():
def __init__(self):
self.var = None
self.services = list()
class operator():
def __init__(self):
identity = ''
output = ''
arg = list()
class unary_operator(operator):
def __init__(self):
operator.__init__(self)
self.arg = [None]
class binary_operator(operator):
def __init__(self):
operator.__init__(self)
self.arg = [None] * 2
class conditional(operator):
def __init__(self):
operator.__init__(self)
self.arg = [None] * 2
def is_assignment(arg):
return isinstance(arg, assignment)
def is_service_call(arg):
return isinstance(arg, service_call)
def is_operator(arg):
return isinstance(arg, operator)
def is_unary_operator(arg):
return isinstance(arg, unary_operator)
def is_binary_operator(arg):
return isinstance(arg, binary_operator)
def is_variable(arg):
return isinstance(arg, variable)
def is_constant(arg):
return isinstance(arg, constant)
def is_literal_value(arg):
return isinstance(arg, literal_value)
def is_source_call(arg):
return isinstance(arg, source_call)
def is_key(arg):
return isinstance(arg, key)
def is_if_statement(arg):
return isinstance(arg, if_statement)
def is_jump_statement(arg):
return isinstance(arg, jump_statement)
def is_conditional(arg):
return isinstance(arg, conditional)
| 2.4375 | 2 |
get_attribute.py | jalexvig/python_learn | 0 | 12769071 | <filename>get_attribute.py
# __getattr__ is not implemented by default
# __getattribute__ run for every attribute access (w/o looking at attributes on object)
# __getattr__ only run when attribute not found in normal ways
class Foo(object):
def __getattribute__(self, item):
print('Foo called __getattribute__ on {0}'.format(item))
return super().__getattribute__(item)
def __getattr__(self, item):
print('Foo called __getattr__ on {0}'.format(item))
return super().__getattr__(item)
class Bar(Foo):
def __getattribute__(self, item):
print('Bar called __getattribute__ on {0}'.format(item))
return super().__getattribute__(item)
def __getattr__(self, item):
print('Bar called __getattr__ on {0}'.format(item))
return super().__getattr__(item)
if __name__ == '__main__':
b = Bar()
# b.x = 4
b.x | 3.296875 | 3 |
bitex/api/REST/response.py | ligggooo/quant2018 | 312 | 12769072 | # Import Third-Party
from requests import Response
class APIResponse(Response):
def __init__(self, req_response, formatted_json=None):
for k, v in req_response.__dict__.items():
self.__dict__[k] = v
self._formatted = formatted_json
@property
def formatted(self):
return self._formatted
@formatted.setter
def formatted(self, value):
self._formatted = value
if __name__ == '__main__':
from bitex import Kraken
k = Kraken()
resp = k.ticker('XXBTZEUR')
print(resp.formatted)
print(resp.json()) | 2.484375 | 2 |
gluetool_modules_framework/infrastructure/mbs.py | testing-farm/gluetool-modules | 0 | 12769073 | # Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
import collections
import urllib
import re
import six
from concurrent.futures import ThreadPoolExecutor, wait
import requests
from jq import jq
import koji
import gluetool
from gluetool import GlueError
from gluetool.action import Action
from gluetool.utils import cached_property, normalize_multistring_option, dict_update
from gluetool.log import LoggerMixin, log_dict
# Type annotations
from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set # noqa
from typing_extensions import TypedDict
#: Information about task architectures.
#:
#: :ivar list(str) arches: List of architectures.
TaskArches = NamedTuple('TaskArches', [('arches', List[str])])
#: Information about MBS.
#:
#: :ivar str api_version: MBS API version.
#: :ivar str auth_method: MBS authentication method.
#: :ivar str version: MBS version.
MBSAbout = NamedTuple('MBSAbout', [
('api_version', str),
('auth_method', str),
('version', str)
])
# regular expressions for nvr and nsvc of a module
NSVC_REGEX = re.compile(r'^([^:]*):([^:]*):([^:]*):([^:]*)$')
NVR_REGEX = re.compile(r'^(.*)-([^-]*)-([^\.]*)\.(.*)$')
NSVCType = Tuple[str, str, str, str]
BuildInfoType = TypedDict(
'BuildInfoType',
{
'id': int,
'name': str,
'stream': str,
'version': str,
'context': str,
'owner': str,
'scratch': str,
'modulemd': str,
'scmurl': str,
}
)
def nsvc_from_string(nsvc):
# type: (str) -> NSVCType
"""
Helper function to return a tuple of NSVC from a string.
:param: str nsvc: NSVC string.
:rtype: tuple
:returns: Tuple of N, S, V, C.
:raises: gluetool.GlueError if NSVC not valid.
"""
match = re.match(NSVC_REGEX, nsvc)
if not match:
raise gluetool.GlueError("'{}' is not a valid module nsvc".format(nsvc))
return cast(NSVCType, match.groups())
def nsvc_from_nvr(nvr):
# type: (str) -> NSVCType
"""
Helper function to return a tuple of NSVC from an Brew/Koji compatible module NVR.
:param: str nvr: NVR string.
:rtype: tuple
:returns: Tuple of N, S, V, C.
:raises: gluetool.GlueError if NVR not valid.
"""
match = re.match(NVR_REGEX, nvr)
if not match:
raise gluetool.GlueError("'{}' is not a valid module nvr".format(nvr))
(name, stream, version, context) = match.groups()
# underscore in stream number must be converted to '-'
stream = stream.replace('_', '-')
return (name, stream, version, context)
class MBSApi(object):
def __init__(self, mbs_api_url, mbs_ui_url, module):
# type: (str, str, gluetool.Module) -> None
self.mbs_api_url = mbs_api_url
self.mbs_ui_url = mbs_ui_url
self.module = module
@cached_property
def about(self):
# type: () -> MBSAbout
"""
Returns MBS about endpoint as a namedtuple.
:rtype: MBSAbout
:returns: MBS about namedtuple with fields api_version, auth_method and version.
"""
return MBSAbout(**self._get_json('module-build-service/1/about'))
def _get_json(self, location, params=None):
# type: (str, Optional[Dict[str, Any]]) -> Any
"""
Query MBS API endpoint location and return the JSON reply.
:param str location: API endpoint to query.
:param dict params: Query parameters
:rtype: dict
:returns: JSON output as a dictionary.
"""
params = params or {}
url = '{}/{}'.format(self.mbs_api_url, location)
if params:
# keep params sorted in the URL - makes testing possible
sorted_params = collections.OrderedDict([
(name, params[name]) for name in sorted(params.iterkeys())
])
url = '{}?{}'.format(url, urllib.urlencode(sorted_params))
self.module.debug('[MBS API]: {}'.format(url))
with Action('query MBS API', parent=Action.current_action(), logger=self.module.logger, tags={
'location': location,
'params': params
}):
try:
output = requests.get(url).json()
except Exception:
raise gluetool.GlueError('Unable to get: {}'.format(url))
log_dict(self.module.debug, '[MBS API] output', output)
return output
def get_build_info_by_id(self, build_id, verbose=False):
# type: (int, bool) -> BuildInfoType
"""
Get MBS build information from build ID.
:param int build_id: MBS build ID.
:param boolean verbose: Verbose query.
:rtype: dict
:returns: JSON output with given build informations.
"""
params = {'verbose': 1 if verbose else 0}
return cast(
BuildInfoType,
self._get_json('module-build-service/1/module-builds/{}'.format(build_id), params=params)
)
def get_build_info_by_nsvc(self, nsvc_tuple, verbose=False):
# type: (NSVCType, bool) -> BuildInfoType
"""
Get MBS build information from NSVC tuple.
:param tuple nsvc_tuple: Build NSVC as a tuple.
:param boolean verbose: Verbose query.
:rtype: dict
:returns: JSON output with given build informations.
"""
(name, stream, version, context) = nsvc_tuple
url = 'module-build-service/1/module-builds/'
params = {
'name': name,
'stream': stream,
'version': version,
'context': context,
'verbose': 1 if verbose else 0
}
try:
return cast(BuildInfoType, self._get_json(url, params=params)['items'][0])
except (IndexError, KeyError):
raise gluetool.GlueError(
"Could not find module with nsvc '{}:{}:{}:{}'".format(name, stream, version, context)
)
def get_build_ui_url(self, build_id):
# type: (int) -> str
"""
Returns URL to the MBS web interface for the given build ID.
:param int build_id: MBS build ID.
:rtype: str
:returns: URL to web interface of the MBS build.
"""
return '{}/module/{}'.format(self.mbs_ui_url, build_id)
class MBSTask(LoggerMixin, object):
ARTIFACT_NAMESPACE = 'redhat-module'
def __init__(self, module, build_id=None, nsvc=None, nvr=None):
# type: (MBS, Optional[int], Optional[str], Optional[str]) -> None
super(MBSTask, self).__init__(module.logger)
self.module = module
mbs_api = module.mbs_api()
if sum([bool(param) for param in [build_id, nsvc, nvr]]) != 1:
raise gluetool.GlueError('module must be initialized only from one of build_id, nsvc or nvr')
if build_id:
build_info = mbs_api.get_build_info_by_id(build_id, verbose=True)
if nsvc:
build_info = mbs_api.get_build_info_by_nsvc(nsvc_from_string(nsvc), verbose=True)
if nvr:
build_info = mbs_api.get_build_info_by_nsvc(nsvc_from_nvr(nvr), verbose=True)
self._build_info = build_info
self.id = self.dispatch_id = build_info['id']
self.name = build_info['name']
self.component = self.name
self.stream = build_info['stream']
self.version = build_info['version']
self.context = build_info['context']
self.issuer = build_info['owner']
self.scratch = build_info['scratch']
self.nsvc = '{}:{}:{}:{}'.format(self.name, self.stream, self.version, self.context)
self.tags = [] # type: List[str]
# `nvr` is:
# - often used as unique id of artifact (e.g. in mail notifications)
# - same as nvr of module in Brew/Koji
# - for modules the nvr is diffrent from NSVC, as it is delimited with '-' instead of ':'
# and also in case of stream the character '-' is replaced with '_', see:
# https://github.com/release-engineering/resultsdb-updater/pull/73#discussion_r235964781
# - if build is scratch, the '+' and id is added to the end
self.nvr = '{}-{}-{}.{}'.format(self.name, self.stream.replace('-', '_'), self.version, self.context)
if self.scratch:
self.nvr = '{}+{}'.format(self.nvr, self.id)
# make devel module nvr available for convenience
self.devel_nvr = '{}-devel-{}-{}.{}'.format(
self.name, self.stream.replace('-', '_'), self.version, self.context
)
# build tags from brew, only applicable to non-scratch modules, scratch modules do not have metadata in Brew
if not self.scratch:
self.tags = [tag['name'] for tag in self.module.shared('koji_session').listTags(self.nvr)]
# this string identifies component in static config file
self.component_id = '{}:{}'.format(self.name, self.stream)
# the target for modules uses platform stream, which nicely reflects the fact for which
# release the module is built for, similarly to what build target in Brew/Koji does
self.target = self.platform_stream
# required API for our modules providing artifacts, we have no destination_tags for modules, use target
self.destination_tag = self.target
@cached_property
def platform_stream(self):
# type: () -> str
"""
:rtype: str
:returns: Platform stream from the modulemd document.
"""
query = ".data.xmd.mbs.buildrequires.platform.stream"
platform_stream = jq(query).transform(self._modulemd)
if not platform_stream:
raise gluetool.GlueError('Could not detect platform stream in modulemd document')
return cast(str, platform_stream.encode('ascii'))
@cached_property
def _modulemd(self):
# type: () -> Dict[str, Any]
"""
Returns ``modulemd`` document if available in build info. Describes details of the artifacts
used to build the module. It is embedded in a form of string, containing the YAML document.
This function extracts the string and unpacks its YAML-ness into a data structure it represents.
:returns: ``modulemd`` structure of ``None`` if there's no ``modulemd`` key in the build info.
"""
if 'modulemd' not in self._build_info:
raise gluetool.GlueError('Artifact build info does not include modulemd document')
# Use "base" loader, to overcome MBS representing some string-like values as numbers,
# for example "5.30" may be expressed as a number `5.30` which the default parser yields
# as a number, `5.3`, which is just misleading. "base" parser yields "5.30", that's
# better. But, it probably treats *all* fields this way, so some fields we're expected
# to be numbers are suddenly strings...
modulemd = gluetool.utils.from_yaml(self._build_info['modulemd'], loader_type='base')
log_dict(self.debug, 'modulemd', modulemd)
return cast(Dict[str, Any], modulemd)
@cached_property
def has_artifacts(self):
# type: () -> bool
# We believe MBS - and Brew behind it keeps artifacts "forever" - or, at least, long enough to matter to us
# - therefore we don't even bother to check for their presence.
return True
@cached_property
def task_arches(self):
# type: () -> TaskArches
"""
:rtype: TaskArches
:returns: Information about arches the task was building for
"""
query = """
.data.components.rpms
| .[]
| .arches
| .[]
"""
# Empty modules do not have components
if 'components' not in self._modulemd['data']:
return cast(TaskArches, self.module._default_task_arches)
all_arches = jq(query).transform(self._modulemd, multiple_output=True)
log_dict(self.debug, 'gathered module arches', all_arches)
# Apparently, output from jq is unicode string, despite feeding it ascii-encoded. Encode each arch
# string to ascii before while we're getting rid of duplicates.
#
# ``set`` to filter out duplicities, ``list`` to convert the set back to a list of uniq arches,
# and ``sorted`` to make it easier to grab & read & test.
arches = sorted(list(set([arch.encode('ascii') for arch in all_arches])))
log_dict(self.debug, 'unique module arches', arches)
return TaskArches(arches)
@cached_property
def dependencies(self):
# type: () -> List[str]
dependencies = []
try:
requires = self._modulemd['data']['dependencies'][0]['requires']
except (AttributeError, KeyError) as error:
raise gluetool.GlueError('Could not detect module dependecies: {}'.format(error))
for module_name, module_streams in six.iteritems(requires):
for stream in module_streams:
dependencies.append('{}:{}'.format(module_name, stream))
return sorted(dependencies)
@cached_property
def url(self):
# type: () -> str
return self.module.mbs_api().get_build_ui_url(self.id)
@cached_property
def distgit_ref(self):
# type: () -> Optional[str]
"""
Distgit ref id from which package has been built or ``None`` if it's impossible to find it.
:rtype: str
:returns: Dist-git ref of the build source.
"""
try:
return self._build_info['scmurl'].split('#')[1].encode('ascii')
except (AttributeError, IndexError):
self.debug('Distgit ref not found in scmurl: {}'.format(self._build_info['scmurl']))
return None
@cached_property
def dist_git_repository_name(self):
# type: () -> str
return self.component
@cached_property
def baseline(self):
# type: () -> Optional[str]
"""
Return baseline task NVR if `baseline-method` specified, otherwise return None.
:rtype: str
"""
if not self.module.option('baseline-method'):
return None
task = cast(MBSTask, self.baseline_task)
return task.nvr
@cached_property
def baseline_task(self):
# type: () -> Optional[MBSTask]
"""
Return baseline task. For documentation of the baseline methods see the module's help.
:rtype: MBSTask
:returns: Initialized task for the baseline build or None if baseline not found.
:raises gluetool.glue.GlueError: if specific build does not exist or no baseline-method specified.
"""
method = self.module.option('baseline-method')
if not method:
raise GlueError("Cannot get baseline because no 'baseline-method' specified")
if method == 'previous-released-build':
previous_tags = self.previous_tags(tags=self.tags)
if not previous_tags:
return None
baseline_task = self.latest_released(tags=previous_tags)
elif method == 'previous-build':
baseline_task = self.latest_released(tags=self._tags_from_map)
elif method == 'specific-build':
nvr = self.module.option('baseline-nvr')
try:
baseline_task = self.module.tasks(nvrs=[nvr])[1]
except GlueError:
raise GlueError("Specific build with nvr '{}' not found".format(nvr))
else:
# this really should not happen ...
self.warn("Unknown baseline method '{}'".format(method), sentry=True)
return None
return baseline_task
def previous_tags(self, tags):
# type: (List[str]) -> List[str]
"""
Return previous tags according to the inheritance tag hierarchy to the given tags.
:param str tags: Tags used for checking.
:rtype: list(str)
:returns: List of previous tags, empty list if previous tags not found.
:raises gluetool.glue.GlueError: In case previous tag search cannot be performed.
"""
previous_tags = []
session = self.module.shared('koji_session')
for tag in tags:
if tag == '<no build target available>':
raise GlueError('Cannot check for previous tag as build target does not exist')
try:
previous_tags.append(session.getFullInheritance(tag)[0]['name'])
except (KeyError, IndexError, koji.GenericError):
self.warn("Failed to find inheritance tree for tag '{}'".format(tag), sentry=True)
return previous_tags
def latest_released(self, tags=None):
# type: (Optional[List[str]]) -> Optional[MBSTask]
"""
Returns task of the latest module build tagged with the same build target.
If no builds are found ``None`` is returned.
In case the build found is the same as this build, the previous build is returned.
The tags for checking can be overriden with the ``tags`` parameter. First match wins.
:param list(str) tags: Tags to use for searching.
:rtype: :py:class:`MBSTask`
"""
tags = tags or [self.target]
session = self.module.shared('koji_session')
for tag in tags:
try:
builds = session.listTagged(tag, None, True, latest=2, package=self.component)
except koji.GenericError as error:
self.warn(
"ignoring error while listing latest builds tagged to '{}': {}".format(tag, error),
sentry=True
)
continue
if builds:
break
else:
log_dict(self.debug, "no latest builds found for package '{}' on tags".format(self.component), tags)
return None
# for scratch builds the latest released package is the latest tagged
if self.scratch:
build = builds[0]
# for non scratch we return the latest released package, in case it is the same, the previously
# released package
else:
if self.nvr != builds[0]['nvr']:
build = builds[0]
else:
build = builds[1] if len(builds) > 1 else None
return self.module.tasks(nvrs=[build['nvr']])[1] if build else None
@cached_property
def _tags_from_map(self):
# type: () -> List[str]
"""
Unfortunately tags used for looking up baseline builds need to be resolved from a rules
file due to their specifics.
Nice examples for this are:
* rhel-8 module builds, which have ``target`` set to el8.X.Y, i.e. module platform stream,
but we need to transform it to Brew module tag ``rhel-8.X.Y-modules-candidate`` for correct
lookup
"""
self.module.require_shared('evaluate_instructions', 'evaluate_rules')
# use dictionary which can be altered in _tags_callback
map = {
'tags': []
} # type: Dict[str, List[str]]
def _tags_callback(instruction, command, argument, context):
# type: (str, str, List[str], str) -> None
map['tags'] = []
for arg in argument:
map['tags'].append(self.module.shared('evaluate_rules', arg, context=context))
context = dict_update(self.module.shared('eval_context'), {
'TASK': self
})
commands = {
'tags': _tags_callback,
}
self.module.shared(
'evaluate_instructions', self.module.baseline_tag_map,
commands=commands, context=context
)
log_dict(self.debug, 'Tags from baseline tag map', map['tags'])
return map['tags']
class MBS(gluetool.Module):
name = 'mbs'
description = 'Provides information about MBS (Module Build Service) artifact'
supported_dryrun_level = gluetool.glue.DryRunLevels.DRY
options = [
('MBS options', {
'mbs-ui-url': {
'help': 'URL of mbs ui server.',
'type': str
},
'mbs-api-url': {
'help': 'URL of mbs api server.',
'type': str
}
}),
('Build initialization options', {
'build-id': {
'help': 'Initialize build from MBS build ID (default: none).',
'action': 'append',
'default': [],
},
'nsvc': {
'help': 'Initialize build from NSVC (default: none).',
'action': 'append',
'default': [],
},
'nvr': {
'help': 'Initialize build from NVR (default: none).',
'action': 'append',
'default': [],
},
}),
('Default options', {
'default-task-arches': {
'help': 'Default task arches to use in case of empty modules.',
'action': 'append'
}
}),
('Baseline options', {
'baseline-method': {
'help': 'Method for choosing the baseline package.',
'choices': ['previous-build', 'specific-build', 'previous-released-build'],
'metavar': 'METHOD',
},
'baseline-nvr': {
'help': "NVR of the build to use with 'specific-build' baseline method",
},
'baseline-tag-map': {
'help': 'Optional rules providing tags which are used for finding baseline package'
}
})
]
required_options = ('mbs-api-url', 'default-task-arches')
shared_functions = ['primary_task', 'tasks', 'mbs_api']
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(MBS, self).__init__(*args, **kwargs)
self._tasks = [] # type: List[MBSTask]
@cached_property
def _default_task_arches(self):
# type: () -> TaskArches
return TaskArches(gluetool.utils.normalize_multistring_option(self.option('default-task-arches')))
def primary_task(self):
# type: () -> Optional[MBSTask]
"""
Returns a `primary` module build, the first build in the list of current nodules.
:rtype: :py:class:`MbsTask` or None
:returns: Instance of an object represeting a module buil or None, if no modules are avaiable.
"""
log_dict(self.debug, 'primary task - current modules', self._tasks)
return self._tasks[0] if self._tasks else None
def _init_mbs_builds(self, build_ids=None, nsvcs=None, nvrs=None):
# type: (Optional[List[str]], Optional[List[str]], Optional[List[str]]) -> None
"""
Initializes MBS builds in parallel.
:param list build_ids: List of module build IDs.
:param list nsvcs: List of module NSVCs.
:param list nvrs: List of NVRs of a module (compatible with brew/koji).
:retype: list(MBSTask)
:returns: List of initialized MBS builds.
"""
build_ids = build_ids or []
nsvcs = nsvcs or []
nvrs = nvrs or []
current_action = Action.current_action()
# Our API routines call `Action.current_action` to get parent for their own actions,
# and since we're spawning threads for our `MBSTask` calls, we need to provide
# the initial action in each of those threads.
def _init_trampoline(**kwargs):
# type: (**Any) -> MBSTask
Action.set_thread_root(current_action)
return MBSTask(self, **kwargs)
with ThreadPoolExecutor(thread_name_prefix="api_thread") as executor:
# initialized from build IDs
futures = {
executor.submit(_init_trampoline, build_id=build_id)
for build_id in build_ids
}
# initialized from NSVCs
futures.update({
executor.submit(_init_trampoline, nsvc=nsvc)
for nsvc in nsvcs
})
# initialized from NVRs
futures.update({
executor.submit(_init_trampoline, nvr=nvr)
for nvr in nvrs
})
Wait = NamedTuple('Wait', (('done', Set[Any]), ('not_done', Set[Any])))
wait_result = cast(Wait, wait(futures))
for future in wait_result.done:
self._tasks.append(future.result())
def tasks(self, build_ids=None, nsvcs=None, nvrs=None):
# type: (Optional[List[str]], Optional[List[str]], Optional[List[str]]) -> List[MBSTask]
"""
Returns list of module builds available. If any of the additional parameters
are provided, modules list is extended with them first.
:param list build_ids: List of module build IDs.
:param list nsvcs: List of module NSVCs.
:param list nvrs: List of NVRs of a module (compatible with brew/koji).
:rtype: list(MBSTask)
:returns: List of module builds.
"""
if any([build_ids, nsvcs, nvrs]):
self._init_mbs_builds(build_ids=build_ids, nsvcs=nsvcs, nvrs=nvrs)
return self._tasks
@property
def eval_context(self):
# type: () -> Dict[str, Union[str, MBSTask, List[str], List[MBSTask]]]
__content__ = { # noqa
'ARTIFACT_TYPE': """
Type of the artifact, ``mbs-build`` in the case of ``mbs`` module.
""",
'BUILD_TARGET': """
Build target for modules is the platform module stream name (e.g. el8, el8.1.0, etc).
""",
'PRIMARY_TASK': """
Primary task, represented as ``MBSTask`` instance.
""",
'TAGS': """
Module Brew/Koji build tags.
""",
'TASKS': """
List of all tasks known to this module instance.
"""
}
primary_task = self.primary_task()
if not primary_task:
self.debug('No primary task available, cannot pass it to eval_context')
return {}
return {
# common for all artifact providers
'ARTIFACT_TYPE': primary_task.ARTIFACT_NAMESPACE,
'BUILD_TARGET': primary_task.target,
'PRIMARY_TASK': primary_task,
'TAGS': primary_task.tags,
'TASKS': self.tasks()
}
@cached_property
def _mbs_api(self):
# type: () -> MBSApi
return MBSApi(self.option('mbs-api-url'), self.option('mbs-ui-url'), self)
@cached_property
def baseline_tag_map(self):
# type: () -> Any
if not self.option('baseline-tag-map'):
return []
return gluetool.utils.load_yaml(self.option('baseline-tag-map'))
def mbs_api(self):
# type: () -> MBSApi
"""
Returns MBSApi instance.
"""
return cast(MBSApi, self._mbs_api)
def execute(self):
# type: () -> None
self.info(
"connected to MBS instance '{}' version '{}'".format(
self.option('mbs-api-url'),
self.mbs_api().about.version
)
)
# koji/brew is required to get module tags
self.require_shared('koji_session')
if any([self.option(opt) for opt in ['build-id', 'nsvc', 'nvr']]):
self._init_mbs_builds(
build_ids=normalize_multistring_option(self.option('build-id')),
nsvcs=normalize_multistring_option(self.option('nsvc')),
nvrs=normalize_multistring_option(self.option('nvr'))
)
for task in self._tasks:
self.info('Initialized with {}: {} ({})'.format(task.id, task.nsvc, task.url))
# init baseline build if requested
if self.option('baseline-method'):
if task.baseline_task:
self.info('Baseline build: {} ({})'.format(task.baseline_task.nvr, task.baseline_task.url))
else:
self.warn('Baseline build was not found')
| 1.742188 | 2 |
map_to_py.py | russhughes/hershey-scripts | 1 | 12769074 | #!/usr/bin/env python3
"""
Convert Hershey data with hmp file to create DrawBot python module file
"""
from struct import pack
import re
from parse import parse
vectors = {}
vectors_count = {}
vectors_used = {}
def hershey_load(glyph_file_name):
"""
Load Hershey glyphs
"""
global vectors, vectors_count, vectors_used
vectors = {}
vectors_count = {}
vectors_used = {}
print(glyph_file_name)
# Read the glyphs file handling the continuation line
with open(glyph_file_name, "r") as file:
for raw_line in file:
match = re.match('^([0-9 ]{4}[0-9]{1})([0-9 ]{2}[0-9]{1})(.*)$', raw_line.rstrip())
if match:
glyph_num = int(match.group(1))
glyph_len = int(match.group(2))
vectors[glyph_num] = match.group(3)
vectors_count[glyph_num] = glyph_len -1
vectors_used[glyph_num] = False
else:
line = raw_line.rstrip()
vectors[glyph_num] += line
def map_to_py(map_file_name, font_file_name):
"""
Convert Hershey data with hmp file to create python module file
"""
global vectors, vectors_count, vectors_used
offsets = {}
offset = 0
font_vectors = {}
font_count = {}
print(map_file_name, font_file_name)
# Read the map file and build FONT[]
with open(map_file_name, "r") as file:
glyph_counter = 0
for raw_line in file:
hmp_entry = parse("{:d} {:d}", raw_line)
if hmp_entry:
if hmp_entry[1] is not 0:
for glyph in range(hmp_entry[0], hmp_entry[1]+1):
if vectors[glyph] is not None:
vectors_used[glyph] = True
font_vectors[glyph_counter] = vectors.get(glyph)
font_count[glyph_counter] = vectors_count.get(glyph)
offsets[glyph_counter] = offset
offset += len(font_vectors[glyph_counter])+1
glyph_counter += 1
else:
raise Exception("glyph {glyph} referenced but not found.")
else:
glyph = hmp_entry[0]
vectors_used[glyph] = True
font_vectors[glyph_counter] = vectors.get(glyph)
font_count[glyph_counter] = vectors_count.get(glyph)
offsets[glyph_counter] = offset
offset += len(font_vectors[glyph_counter])+1
glyph_counter += 1
# Write the font_data to the file
with open(font_file_name, "wt") as outfile:
# number of glyphs in font
print(f'def glyphs():\n\treturn {glyph_counter}\n', file=outfile)
font_data = bytes()
# vectors for each glyph in the font
for glyph in font_vectors:
print("cnt:", font_count[glyph], "vect:", font_vectors[glyph])
print("")
f_c = bytearray(font_count[glyph].to_bytes(1, byteorder='little'))
f_v = bytearray(font_vectors[glyph], 'utf-8')
font_data += f_c + f_v
print("f_c:", f_c, "f_v", f_v)
print("_font =\\", file=outfile)
print("b'", file=outfile, sep='', end='')
count = 0
for byte in (font_data):
print(f'\\x{byte:02x}', file=outfile, sep='', end='', )
count += 1
if count == 15:
print("'\\\nb'", file=outfile, sep='', end='')
count = 0
print("'", file=outfile)
# 16 bit integer table to the start of the vector data for each glyph in the font
index_data = bytes()
for offset in offsets:
print("for offset:", offsets[offset])
index_data += bytearray(pack('H', offsets[offset]))
print("\n_index =\\", file=outfile)
print("b'", file=outfile, sep='', end='')
count = 0
for byte in (index_data):
print(f'\\x{byte:02x}', file=outfile, sep='', end='', )
count += 1
if count == 15:
print("'\\\nb'", file=outfile, sep='', end='')
count = 0
print("'", file=outfile)
count = 0
print ("""
_mvfont = memoryview(_font)
def _chr_addr(ordch):
offset = 2 * (ordch - 32)
return int.from_bytes(_index[offset:offset + 2], 'little')
def get_ch(ordch):
offset = _chr_addr(ordch if 32 <= ordch <= 127 else ord('?'))
count = _font[offset]
return _mvfont[offset:offset+(count+2)*2-1]
""", file=outfile)
hershey_load("hershey/hersh-fixed.oc")
map_to_py("hershey/astrol.hmp", "pyfont/astrol.py")
map_to_py("hershey/cyrilc.hmp", "pyfont/cyrilc.py")
map_to_py("hershey/gotheng.hmp", "pyfont/gotheng.py")
map_to_py("hershey/gothger.hmp", "pyfont/gothger.py")
map_to_py("hershey/gothita.hmp", "pyfont/gothita.py")
map_to_py("hershey/greekc.hmp", "pyfont/greekc.py")
map_to_py("hershey/greekcs.hmp", "pyfont/greekcs.py")
map_to_py("hershey/greeks.hmp", "pyfont/greeks.py")
map_to_py("hershey/greekp.hmp", "pyfont/greekp.py")
map_to_py("hershey/italicc.hmp", "pyfont/italicc.py")
map_to_py("hershey/italiccs.hmp", "pyfont/italiccs.py")
map_to_py("hershey/italict.hmp", "pyfont/italict.py")
map_to_py("hershey/lowmat.hmp", "pyfont/lowmat.py")
map_to_py("hershey/marker.hmp", "pyfont/marker.py")
map_to_py("hershey/meteo.hmp", "pyfont/meteo.py")
map_to_py("hershey/music.hmp", "pyfont/music.py")
map_to_py("hershey/romanc.hmp", "pyfont/romanc.py")
map_to_py("hershey/romancs.hmp", "pyfont/romancs.py")
map_to_py("hershey/romand.hmp", "pyfont/romand.py")
map_to_py("hershey/romans.hmp", "pyfont/romans.py")
map_to_py("hershey/romant.hmp", "pyfont/romant.py")
map_to_py("hershey/scriptc.hmp", "pyfont/scriptc.py")
map_to_py("hershey/scripts.hmp", "pyfont/scripts.py")
map_to_py("hershey/symbol.hmp", "pyfont/symbol.py")
map_to_py("hershey/uppmat.hmp", "pyfont/uppmat.py")
map_to_py("hershey/romanp.hmp", "pyfont/romanp.py")
with open("hershey/misc.hmp", "w") as file:
for glyph in vectors_used:
if not vectors_used[glyph]:
print(f'{glyph} 0', file=file)
map_to_py("hershey/misc.hmp", "pyfont/misc.py")
print('glyph map:')
character = 0
for glyph in vectors_used:
if not vectors_used[glyph]:
print(f'{character:X} {glyph}')
character += 1
hershey_load("hershey/hersh.or")
map_to_py("hershey/japan.hmp", "pyfont/japan.py")
| 3.28125 | 3 |
Assoc/ceas.py | jianTaoLiu-SWJTU2012/taolib | 14 | 12769075 | <filename>Assoc/ceas.py
#!/usr/bin/env python
"""Module Description
Copyright (c) 2008 <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: <NAME>
@contact: <EMAIL>
"""
# ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import re
import logging
from optparse import OptionParser
from Cistrome.Assoc import *
from Cistrome.Assoc.inout import MYSQL
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Main function
# ------------------------------------
def main():
# read the options and validate them
options=opt_validate(prepare_optparser())
# CEAS run
# read the gene annotation table
jobcount=1
info("#%d read the gene table..." %jobcount)
# read
GeneT=inout.GeneTable()
GeneT.read(Host=options.Host,User=options.User,Db=options.Db,annotation='refGene',which=options.which)
GeneT.sort()
chroms_GeneT=GeneT.get_chroms()
jobcount+=1
# read ChIP regions
info("#%d read the bed file of ChIP regions..." %jobcount)
Cbed=inout.Bed()
Cbed.read(options.bed)
Csampler=sampler.ChIPSampler()
ChIP=Csampler.sample(Cbed,resolution=options.chip_res)
del Cbed
jobcount+=1
# read regions of interest if it is given
if options.ebed:
info("#%d read the bed file of regions of interest..." %jobcount)
roi=inout.Bed()
roi.read(options.ebed)
jobcount+=1
else: roi=None
# if wig profiling is not being run.
if not options.bg:
# iterate through chromosomes of the gene table
info("#%d read the pre-computed genome bg annotation..." %jobcount)
GenomeBGS=tables.SummaryGBG(name='GenomeBGS')
GenomeBGS.readdb(Db=options.gdb)
GP=_interpoloate_gbg(gdb,options.promoter,options.bipromoter,options.downstream)
chroms_bg=GP.get_chroms()
# if any regions of interest are given
if options.ebed:
GP=_get_bgroi(GP,GenomeBGS,roi=roi,bg_res=options.bg_res)
# annotate ChIP regions
info('#%d annotate the ChIP regions...' %jobcount)
Annot=annotator.Annotator()
ChIPA=Annot.annotate(genome_coordinates=ChIP,gene_table=GeneT,roi=roi,prom=options.promoter,biprom=options.bipromoter,down=options.downstream,gene_div=(3,5))
CS,CP=Annot.summarize(ChIPA)
# make the table complete with missing chromsomes, if there are
annotator.make_table_complete(CS,chroms_bg)
annotator.make_table_complete(CP,chroms_bg)
# get the pvalues
CPval=annotator.estimate_pvals(GP,CS,CP)
jobcount+=1
# open outfile
info('#%d write a R script of CEAS...' %jobcount)
ofhd=open(options.name+'.R','w')
pdfname=options.name+'.pdf'
# the first part of CEAS R script. Because wig profiling is not run, just terminate
rscript=R.pdf(pdfname,height=11.5,width=8.5)
rscript+=inout.draw_CEAS(GP,CP,CPval,bg_res=options.bg_res,chip_res=options.chip_res,prom=options.promoter,biprom=options.bipromoter,down=options.downstream,gene_div=(3,5))
ofhd.write(rscript) # write CEAS
# when wig profiling is running
if options.pf:
if options.bg:
GenomeBGS=tables.Summary()
# if gene groups are give
if options.gn_groups:
subsets=inout.read_gene_subsets(options.gn_groups)
chrom=''
chrcount=1
prof=profiler.WigProfiler()
FIRST=True
for line in open(options.wig,'r').xreadlines():
if not line: continue
# read a chromosome
if re.search(r'track',line):
try:
description=re.search(r'description="(\w+)"\s',line).group(1)
except AttributeError:
pass
continue
if re.search(r'chrom=(\w+)\s',line):
newchrom=re.search(r'chrom=(\w+)\s',line).group(1)
try:
newchrom=inout.standard_chroms[newchrom]
except KeyError:
pass
continue
l=line.strip().split()
# the beginning
if chrom=='' and chrom!=newchrom:
# if the chromosome is not in gene table, continue
chrom=newchrom
if chrom in chroms_GeneT: # only if the new chromosome is in the chroms of gene table, a wig object is initiated.
info("#%d-%d work on %s..." %(jobcount,chrcount,chrom))
input=inout.Wig()
input.add_line(chrom,l)
chrcount+=1
elif chrom!='' and chrom!=newchrom: # new chromosome
if chrom in chroms_GeneT:
# do genome BG annotation
if options.bg:
Sampler=sampler.GenomeSampler()
Annotator=annotator.Annotator()
GA=Annotator.annotate(Sampler.sample(input,resolution=options.bg_res),GeneT,roi=roi,prom=options.promoter,biprom=options.bipromoter,down=options.downstream,gene_div=(3,5))
tempS,tempP=Annotator.summarize(GA)
GenomeBGS.add_row(chrom,tempS.get_row(chrom))
# wig profiling
names,breaks,upstreams,downstreams,metagene_breaks,metagenes,metaexon_breaks,metaexons,metaintron_breaks,metaintrons=prof.profile(input,GeneT,rel_pos=options.rel_dist,metagenesize=options.metagene_size,step=options.pf_res,which=options.which,exonratio=0.5,emask=options.emask,imask=options.imask)
# get average of this chromosome
avg_up,upcount=corelib.mean_col_by_col(upstreams,counts=True)
avg_down,downcount=corelib.mean_col_by_col(downstreams,counts=True)
avg_mg,genecount=corelib.mean_col_by_col(metagenes,counts=True)
avg_me,exoncount=corelib.mean_col_by_col(metaexons,counts=True)
avg_mi,introncount=corelib.mean_col_by_col(metaintrons,counts=True)
if not FIRST: # if not first chromosome
avg_upstream,avg_upcount=corelib.weight_mean_col_by_col([avg_upstream,avg_up],[avg_upcount,upcount],counts=True)
avg_downstream,avg_downcount=corelib.weight_mean_col_by_col([avg_downstream,avg_down],[avg_downcount,upcount],counts=True)
avg_metagene,avg_genecount=corelib.weight_mean_col_by_col([avg_metagene,avg_mg],[avg_genecount,genecount],counts=True)
avg_metaexon,avg_exoncount=corelib.weight_mean_col_by_col([avg_metaexon,avg_me],[avg_exoncount,exoncount],counts=True)
avg_metaintron,avg_introncount=corelib.weight_mean_col_by_col([avg_metaintron,avg_mi],[avg_introncount,introncount],counts=True)
del avg_up,avg_down,avg_mg,avg_me,avg_mi,upcount,downcount,genecount,exoncount,introncount
if options.gn_groups: # when gene sub-gropus are given
ixs,subsets=profiler.get_gene_indicies(names,subsets)
avg_ups,upcs,avg_downs,downcs,avg_mgs,gcs,avg_mes,ecs,avg_mis,ics=profiler.select_profiles_chr_by_chr(ixs,upstreams,downstreams,metagenes,metaexons,metaintrons)
avg_upstreams,avg_upcounts=profiler.weight_mean_profiles_chr_by_chr(avg_upstreams,avg_upcounts,avg_ups,upcs)
avg_downstreams,avg_downcounts=profiler.weight_mean_profiles_chr_by_chr(avg_downstreams,avg_downcounts,avg_downs,downcs)
avg_metagenes,avg_genecounts=profiler.weight_mean_profiles_chr_by_chr(avg_metagenes,avg_genecounts,avg_mgs,gcs)
avg_metaexons,avg_exoncounts=profiler.weight_mean_profiles_chr_by_chr(avg_metaexons,avg_exoncounts,avg_mes,ecs)
avg_metaintrons,avg_introncounts=profiler.weight_mean_profiles_chr_by_chr(avg_metaintrons,avg_introncounts,avg_mis,ics)
del avg_ups,avg_downs,avg_mgs,avg_mes,avg_mis,upcs,downcs,gcs,ecs,ics
else: # if first chromosome
avg_upstream=avg_up
avg_downstream=avg_down
avg_metagene=avg_mg
avg_metaexon=avg_me
avg_metaintron=avg_mi
avg_upcount=upcount
avg_downcount=downcount
avg_genecount=genecount
avg_exoncount=exoncount
avg_introncount=introncount
if options.gn_groups:
ixs,subsets=profiler.get_gene_indicies(names,subsets)
avg_upstreams,avg_upcounts,avg_downstreams,avg_downcounts,avg_metagenes,avg_genecounts,avg_metaexons,avg_exoncounts,avg_metaintrons,avg_introncounts=profiler.select_profiles_chr_by_chr(ixs,upstreams,downstreams,metagenes,metaexons,metaintrons)
FIRST=False
del upstreams,downstreams,metagenes,metaexons,metaintrons
# set chrom to the new chromosome
chrom=newchrom
if chrom in chroms_GeneT: # only if the new chromosome is in the chroms of gene table, a wig object is initiated.
info("#%d-%d work on %s..." %(jobcount,chrcount,chrom))
input=inout.Wig()
input.add_line(chrom,l)
chrcount+=1
else: # in the middle of chromosome
if chrom in chroms_GeneT: # only if the new chromosome is in the chroms of gene table, the wig object is updated.
input.add_line(chrom,l)
# do profiling for the last chromosome
if chrom in chroms_GeneT:
if options.bg:
Sampler=sampler.GenomeSampler()
Annotator=annotator.Annotator()
GA=Annotator.annotate(Sampler.sample(input,resolution=options.bg_res),GeneT,roi=roi,prom=options.promoter,biprom=options.bipromoter,down=options.downstream,gene_div=(3,5))
tempS,tempP=Annotator.summarize(GA)
GenomeBGS.add_row(chrom,tempS.get_row(chrom))
GenomeBGS.summarize()
GP=GenomeBGS.get_p()
if options.ebed:
GP=_get_bgroi(GP,GenomeBGS,roi=roi,bg_res=options.bg_res)
# profiling
names,breaks,upstreams,downstreams,metagene_breaks,metagenes,metaexon_breaks,metaexons,metaintron_breaks,metaintrons=prof.profile(input,GeneT,rel_pos=options.rel_dist,metagenesize=options.metagene_size,step=options.pf_res,which=options.which,exonratio=0.5,emask=options.emask,imask=options.imask)
del input
# get average of this chromosome
avg_up,upcount=corelib.mean_col_by_col(upstreams,counts=True)
avg_down,downcount=corelib.mean_col_by_col(downstreams,counts=True)
avg_mg,genecount=corelib.mean_col_by_col(metagenes,counts=True)
avg_me,exoncount=corelib.mean_col_by_col(metaexons,counts=True)
avg_mi,introncount=corelib.mean_col_by_col(metaintrons,counts=True)
if not FIRST: # the first chromosome profiling
avg_upstream,avg_upcount=corelib.weight_mean_col_by_col([avg_upstream,avg_up],[avg_upcount,upcount],counts=True)
avg_downstream,avg_downcount=corelib.weight_mean_col_by_col([avg_downstream,avg_down],[avg_downcount,upcount],counts=True)
avg_metagene,avg_genecount=corelib.weight_mean_col_by_col([avg_metagene,avg_mg],[avg_genecount,genecount],counts=True)
avg_metaexon,avg_exoncount=corelib.weight_mean_col_by_col([avg_metaexon,avg_me],[avg_exoncount,exoncount],counts=True)
avg_metaintron,avg_introncount=corelib.weight_mean_col_by_col([avg_metaintron,avg_mi],[avg_introncount,introncount],counts=True)
del avg_up,avg_down,avg_mg,avg_me,avg_mi,upcount,downcount,genecount,exoncount,introncount
if options.gn_groups:
ixs,subsets=profiler.get_gene_indicies(names,subsets)
avg_ups,upcs,avg_downs,downcs,avg_mgs,gcs,avg_mes,ecs,avg_mis,ics=profiler.select_profiles_chr_by_chr(ixs,upstreams,downstreams,metagenes,metaexons,metaintrons)
avg_upstreams,avg_upcounts=profiler.weight_mean_profiles_chr_by_chr(avg_upstreams,avg_upcounts,avg_ups,upcs)
avg_downstreams,avg_downcounts=profiler.weight_mean_profiles_chr_by_chr(avg_downstreams,avg_downcounts,avg_downs,downcs)
avg_metagenes,avg_genecounts=profiler.weight_mean_profiles_chr_by_chr(avg_metagenes,avg_genecounts,avg_mgs,gcs)
avg_metaexons,avg_exoncounts=profiler.weight_mean_profiles_chr_by_chr(avg_metaexons,avg_exoncounts,avg_mes,ecs)
avg_metaintrons,avg_introncounts=profiler.weight_mean_profiles_chr_by_chr(avg_metaintrons,avg_introncounts,avg_mis,ics)
del avg_ups,avg_downs,avg_mgs,avg_mes,avg_mis,upcs,downcs,gcs,ecs,ics
else:
avg_upstream=avg_up
avg_downstream=avg_down
avg_metagene=avg_mg
avg_metaexon=avg_me
avg_metaintron=avg_mi
avg_upcount=upcount
avg_downcount=downcount
avg_genecount=genecount
avg_exoncount=exoncount
avg_introncount=introncount
if options.gn_groups:
ixs,subsets=profiler.get_gene_indicies(names,subsets)
avg_upstreams,avg_upcounts,avg_downstreams,avg_downcounts,avg_metagenes,avg_genecounts,avg_metaexons,avg_exoncounts,avg_metaintrons,avg_introncounts=profiler.select_profiles_chr_by_chr(ixs,upstreams,downstreams,metagenes,metaexons,metaintrons)
del upstreams,downstreams,metagenes,metaexons,metaintrons
jobcount+=1
if options.bg:
info('#%d annotate ChIP regions...' %jobcount)
Annot=annotator.Annotator()
ChIPA=Annot.annotate(genome_coordinates=ChIP,gene_table=GeneT,roi=roi,prom=options.promoter,biprom=options.bipromoter,down=options.downstream,gene_div=(3,5))
CS,CP=Annot.summarize(ChIPA)
CPval=annotator.estimate_pvals(GP,CS,CP)
jobcount+=1
info('#%d write R script of CEAS and wig profiling...' %jobcount)
ofhd=open(options.name+'.R','w')
pdfname=options.name+'.pdf'
# the first part of CEAS R script. Because wig profiling is not run, just terminate
rscript=R.pdf(pdfname,height=11.5,width=8.5)
rscript+=inout.draw_CEAS(GP,CP,CPval,bg_res=options.bg_res,chip_res=options.chip_res,prom=options.promoter,biprom=options.bipromoter,down=options.downstream,gene_div=(3,5))
ofhd.write(rscript) # writing CEAS
else:
info('#%d append R script of wig profiling...' %jobcount)
# write R script
if options.gn_groups:
# append the profiles of all genes
avg_upstreams.append(avg_upstream)
avg_downstreams.append(avg_downstream)
avg_metagenes.append(avg_metagene)
avg_metaexons.append(avg_metaexon)
avg_metaintrons.append(avg_metaintron)
rscript=inout.draw_profile_plots(breaks,avg_upstreams,avg_downstreams,metagene_breaks,avg_metagenes,metaexon_breaks,avg_metaexons,metaintron_breaks,avg_metaintrons,metagene_breaks_lim=[-1000,1000],legends=options.gn_names)
else:
rscript=inout.draw_profile_plot(breaks,avg_upstream,avg_downstream,metagene_breaks,avg_metagene,metaexon_breaks,avg_metaexon,metaintron_breaks,avg_metaintron,metagene_breaks_lim=[-1000,1000])
ofhd.write(rscript) # write wig profiling
ofhd.write(R.devoff())
ofhd.close()
info ('#... cong! Run R on %s!' %(options.name+'.R'))
# ------------------------------------
# functions
# ------------------------------------
def prepare_optparser ():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = "usage: %prog <-b bed -g gdb> [options]"
description = "CEAS -- Cis-regulatory Element Annotation System"
optparser = OptionParser(version="%prog 1.0",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-b","--bed",dest="bed",type="string",
help="BED file of ChIP regions.")
optparser.add_option("-w","--wig",dest="wig",type="string",
help="WIGGLE file for either wig profiling or genome background re-annotation. WARNING: A WIGGLE file must be given for wig profiling.")
optparser.add_option("-e","--ebed",dest="ebed",type="string",
help="BED file of extra regions of interest (eg, non-coding regions)")
optparser.add_option("-g","--gene-db",dest="gdb",type="string",
help="Gene annotation table (a local sqlite3 db file provided by CEAS or species name in UCSC). CEAS searches the designated directory for the the db file. If not find, CEAS looks up UCSC for the table. WARNING: When using UCSC, MySQLdb package must be installed.")
optparser.add_option("--bg",action="store_true",dest="bg",\
help="Run genome BG annotation. WARNING: This flag is effective only if a wig file is given through -w (--wig). Otherwise, ignored.",default=False)
optparser.add_option("--name",dest="name",\
help="Experiment name. This will be used to name the output file. If an experiment name is not given, input BED file name will be used instead.")
optparser.add_option("--chip-res",dest="chip_res",type="int",
help="ChIP annotation resolution, DEFAULT: 600bp. WARNING: Value less than 600bp turns to be 600 p", default=600)
optparser.add_option("--promoter",dest="promoter",type="int",
help="Promoter size for annotation, DEFAULT: 3000bp", default=3000)
optparser.add_option("--bipromoter",dest="bipromoter",type="int",
help="Bidirectional-promoter size for annotation, DEFAULT: 5000bp", default=5000)
optparser.add_option("--downstream",dest="downstream",type="int",
help="Downstream size for annotation, DEFAULT: 3000bp", default=3000)
optparser.add_option("--pf-res", dest="pf_res", type="int",\
help="Wig profiling resolution, DEFAULT: 50bp. WARNING: Value smaller than the wig step (resolution) may cause aliasing error.", default=50)
optparser.add_option("--rel-dist",dest="rel_dist",type="int",
help="Relative distance to TSS/TTS in wig profiling, DEFAULT: 3000bp", default=3000)
optparser.add_option("--metagene-size",dest="metagene_size",type="int",
help="Normalized gene length in wig profiling, DEFAULT: 3000bp. Every gene is normalized to have this length.", default=3000)
optparser.add_option("--gn-groups",dest="gn_groups",type="string",\
help="Gene-groups of particular interest in wig profiling. Each gene group file must have gene names in the 1st column. The file names are separated by commas w/ no space (eg, --gn-groups=top10.txt,bottom10.txt)")
optparser.add_option("--gn-group-names", dest="gn_names",type="string",\
help="The names of the gene groups in --gn-groups. The gene group names are separated by commas. (eg, --gn-group-names='top 10%,bottom 10%'). These group names appear in the legends of the wig profiling plots. If no group names given, the groups are represented as 'Group 1, Group2,...Group n'.")
optparser.add_option("--alt-gn", action="store_true", dest="name2",\
help="Whether alternative gene names (eg, 'name2' in refGene of UCSC) are used in --gn-groups or not. This flag is meaningful only if --gn-groups is set.",default=False)
optparser.add_option("--verbose",dest="verbose",type="string",
help="Name of a directory. if set, save verbose information in the direcotry.")
return optparser
def opt_validate (optparser):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
(options,args) = optparser.parse_args()
# input BED file and GDB must be given
if not (options.bed and options.gdb):
optparser.print_help()
sys.exit(1)
# get gdb lower case
HAVELOCALGDB=os.path.isfile(options.gdb)
if HAVELOCALGDB:
options.Host=None
options.User=None
options.Db=options.gdb.lower()
else:
if MYSQL:
options.Host="genome-mysql.cse.ucsc.edu"
options.User="genome"
options.Db=os.path.split(options.gdb)[-1].lower()
else:
error('MySQLdb package needs to be installed to use UCSC or a local sqlite3 db file must exist.')
error('Check -g (--gdb). No such file or species: %s' %options.gdb)
sys.exit(1)
# bg background resolution is set to 100
options.bg_res=100
# wig file
if options.wig:
HAVEWIG=os.path.isfile(options.wig)
else: HAVEWIG=False
REBG=False
if HAVEWIG and options.bg:
REBG=True
elif not HAVELOCALGDB and HAVEWIG and not options.bg:
error('Genome BG annotation must be run when no pre-computed BG annotation exists. Set --bg.')
sys.exit(1)
elif not HAVELOCALGDB and not HAVEWIG and options.bg:
if not options.wig:
error('Genome BG annotation must be run when no pre-computed BG annotation exists. Give a WIGGLE file through -w (--wig)')
else:
error('Genome BG annotation must be run when no pre-computed BG annotation exists. Check -w (--wig). No such file: %s' %options.wig)
sys.exit(1)
elif not HAVELOCALGDB and not HAVEWIG and not options.bg:
if not options.wig:
error('Genome BG annotation must be run when no pre-computed BG annotation exists. set --bg and give a WIGGLE file through -w (--wig)')
else:
error('Genome BG annotation must be run when no pre-computed BG annotation exists. Set --bg and check -w (--wig). No such file: %s' %options.wig)
sys.exit(1)
options.bg=REBG
# if a WIGGLE file given, do profiling
if HAVEWIG:
options.pf=True
else: options.pf=False
# non-coding regions
if options.ebed:
if not os.path.isfile(options.ebed):
error('Check -e (--ebed). No such file: %s' %options.ebed)
sys.exit(1)
# get namename
if not options.name:
options.name=os.path.split(options.bed)[-1].rsplit('.bed',2)[0]
# the minimum ChIP annotation resolution is 600
options.chip_res=max(600,options.chip_res)
#check if name2 is going to be used instead of name
if options.name2:
options.which='name2'
else:
options.which='name'
# check the gene group files
if options.pf and options.gn_groups:
parsed=options.gn_groups.split(',')
for p in parsed:
if not os.path.isfile(p):
error('Check --gn-groups. No such file: %s' %p)
sys.exit(1)
options.gn_groups=parsed
if options.gn_names:
parsed_names=options.gn_names.split(',')
if len(parsed_names)!=len(options.gn_groups):
error('There must be the same number of group names as gene groups')
sys.exit(1)
options.gn_names=parsed_names
else:
options.gn_names=[]
for i in range(len(options.gn_groups)):
options.gn_names.append('Group %d' %(i+1))
# emask and imask -- potentially can be added later
options.emask=0
options.imask=0
return options
def _get_bgroi(GenomeBGP,GenomeBGS,roi,bg_res=100):
"""Get the background annotation for regions of interest given through -e (or --ebed) option
Parameters:
1. GenomeGBP: a P object (see inout.py) of genome background annotation. This will be modified by this function and returned.
2. GenomeGBS: a SummaryGBG object (see inout.py) of genome background annotation
2. roi: a Bed object of regions of interest
3. bg_res: genome background annotation resolution (default=100bp)
"""
# sampler
Sampler=sampler.ChIPSampler()
roisamp=Sampler.sample(bed=roi,resolution=bg_res)
chroms=set(GenomeBGP.get_chroms()).intersection(roi.keys())
bgroi={}
whole=0
for chrom in chroms:
num_this_chr=len(roi[chrom])
bgroi[chrom]=num_this_chr
whole+=num_this_chr
bgroi['whole']=whole
for chrom in bgroi.keys():
try:
GenomeBGP[chrom]['roi']=1.0*bgroi[chrom]/GenomeBGS[chrom]['Ns']
except ZeroDivisionError:
pass
except KeyError:
pass
return GenomeBGP
def _interpoloate_gbg(gdb,promoter,bipromoter,downstream):
"""In using the pre-computed genome bg model, this function performs linear interpolation of
genome-wide enrichments of promoter, bidirectional promoter, and downstream.
Parameters:
1. gdb: sqlite3 db file. This file must have GenomeBGS and GenomeBGP tables
2. promoter: promoter length given through options.promoter
3. bipromoter: bidirectional promoter length given through options.bipromoter
4. downstream: downstream length given through options.downstream
Return
GP: a P object (see tables.py). This object contains genome bg annotation
"""
GenomeBGP=tables.PGBG(name='GenomeBGP',numprom=11,numbiprom=21,numdown=11)
GenomeBGP.readdb(Db=gdb)
GP=tables.P(name='GP')
# the given promoter, bipromoter, and downstream lengths
new['promoter'] = [promoter/3, 2*promoter/3, promoter]
new['bipromoter'] = [bipromoeter/2, bipromoter]
new['downstream'] = [downstream/3, 2*downstream/3, downstream]
# the model promoter, bipromoter, and downstream lengths
mod['promoter']=[0, 500]+seq(fr=1000,to=10000,by=1000)
mod['bipromoter']=[0, 500]+seq(fr=1000,to=20000,by=1000)
mod['downstream']=[0, 500]+seq(fr=1000,to=10000,by=1000)
for chrom in GenomeBGP.get_chroms():
GP.init_table(chrom)
for column in GenomeBGP.columns[1:]:
if column!='promoter' and column!='bipromoter' and column!='downstream':
GP[chrom][column]=GenomeBGP[chrom][column]
else:
vals=[0.0]+GenomeBGP[chrom][column]
interpol=[]
for x in new[column]:
i=corelib.findbin(x, mod[column])
interpol.append(corelib.lininterpol([modproms[i],vals[i]], [modproms[i+1],vals[i+1]],p))
GP[chrom][column]=interpol
return GP
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
warn("User interrupts me! ;-) See you!")
sys.exit(0)
| 1.921875 | 2 |
historycontrol.py | bixind/JohnReboot | 2 | 12769076 | <reponame>bixind/JohnReboot
import threading
import time
import datetime as dt
import fileparse as fp
historyLock = threading.Lock()
day = 24 * 60 * 60
with historyLock, open('days/lastupdate.txt') as f:
last_update = int(f.readline())
def statusChange(upd):
global last_update
with historyLock:
d = dt.datetime.now(dt.timezone(dt.timedelta(hours=4)))
now = round(d.timestamp())
with open('history.txt', 'a') as f:
print(*(upd + [now]), file = f)
curdname = time.strftime('%Y-%m-%d', d.timetuple())
id = -upd[1]
fp.ensure_dir('days/' + curdname)
with open('days/' + curdname + '/' + str(id) + '.txt', 'a') as f:
print(*(upd + [now]), file = f)
newhist = []
if now > last_update + day:
l = dict()
with open('history.txt') as f:
for s in f:
s = s.split()
id = -int(s[1])
if id not in l:
l[id] = []
l[id].append(s)
with open('history.txt', 'w') as f:
for id in l:
for el in l[id]:
if int(el[-1]) >= now - day:
print(*el, file = f)
with open('days/lastupdate.txt', 'w') as f:
print(now, file = f)
| 2.6875 | 3 |
maskprocessor/__main__.py | Xvezda/python-maskprocessor | 1 | 12769077 | <filename>maskprocessor/__main__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Xvezda <<EMAIL>>
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .core import main
if __name__ == '__main__':
main()
| 1.4375 | 1 |
examples/print.py | tugboat1039/Form1-Hack | 71 | 12769078 | #!/usr/bin/env python
"""
This is a Python script that prints an FLP
"""
import OpenFL.Printer as P
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Print a .flp")
parser.add_argument('input', metavar='input', type=str,
help='source flp file')
args = parser.parse_args()
p = P.Printer()
p.initialize()
p.write_block(0, args.input)
p.start_printing(0)
| 2.59375 | 3 |
simplegan/__init__.py | grohith327/EasyGAN | 23 | 12769079 | <filename>simplegan/__init__.py
from simplegan import autoencoder, gan, datasets, losses, metrics
__version__ = "v0.2.9"
| 1.085938 | 1 |
blog/migrations/0004_auto_20200301_1133.py | satyamtiwari1004/incognito-beta | 0 | 12769080 | <filename>blog/migrations/0004_auto_20200301_1133.py
# Generated by Django 3.1 on 2020-03-01 06:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20090101_0253'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='timestamp',
field=models.DateTimeField(auto_now=True),
),
]
| 1.421875 | 1 |
estimate_pass_test.py | FootBrawlers/Passing_Algo | 1 | 12769081 | <reponame>FootBrawlers/Passing_Algo<filename>estimate_pass_test.py
# --------- This script will estimate the best coordinate to pass the ball, if any. ----------------
# here, we are defining some constraints based on field dimensions
maximum_pass_length = 700
ellipse_width_opp_check =40 # 'b'-parameter and 'a' will be (distance between points)*1.5
threshold=ellipse_width_opp_check
import math as mt
gp=[1020,310]
gp_length=140
# FUNCTION for DISTANCE
# argument1 -> (x1,y1) or [x1,y1], argument2 -> (x2,y2) or [x2,y2]
def distance(p1, p2):
return mt.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
# FUNCTION for finding the coordinates to whom the ball can be passed.
# argument1 -> list of coordinates of the form [[x1,y1],[x2,y2],...,[xn,yn]] , argument2 -> Id of the bot which is currently holding the ball
def passable_host_bots(list_host_coords, current_id):
list_passable_host_bots = []
for i in range(len(list_host_coords)):
if i != current_id:
if distance(list_host_coords[current_id], list_host_coords[i]) <= maximum_pass_length:
list_passable_host_bots.append(list_host_coords[i])
return list_passable_host_bots
def in_ellipse(c1,c2,e):
if (((e[0]-c2[0])**2)/(distance(c1,c2)**2))+(((e[1]-c2[1])**2)/(threshold**2)) <=1:
return True
else:
return False
def in_circle(c2,e):
if (e[0]-c2[0])**2+(e[1]-c2[1])**2<=threshold**2:
return True
else:
return False
def dist_from_gp(c1):
gp_end1 = [gp[0], gp[1] + gp_length / 2]
gp_end2 = [gp[0], gp[1] - gp_length / 2]
if c1[1]< gp_end2[1]:
return distance(c1,gp_end2)
elif c1[1]> gp_end1[1]:
return distance(c1, gp_end1)
else:
return gp[0]-c1[0]
def sort_array(arr):
for i in range(1, len(arr)):
key = arr[i]
j = i - 1
while j >= 0 and dist_from_gp(key) < dist_from_gp(arr[j]):
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = key
def same_side_line(c1, c2, e):
m=-(c1[0]-c2[0])/(c1[1]-c2[1])
a=(e[1]-c2[1])-m*(e[0]-c2[0])
b=(c1[1]-c2[1])-m*(c1[0]-c2[0])
if a*b<0:
return 0
else:
return 1
def in_shape(c1, c2, e):
if (same_side_line(c1, c2, e)):
if (in_ellipse(c1,c2,e)):
return 1
else:
return 0
else:
if (in_circle(c2, e)):
return 1
else:
return 0
'''def chk_pass(cur,l,ec):
for i in range (0,len(l)):
flag=0
for j in range (0,len(ec)):
if(in_shape(cur,l[i],ec[j])==0):
flag+=1
if (flag==6):
return(id(l[i]))
else:
return "none"'''
def chk_pass(cur,l,ec):
for i in range (0,len(l)):
for j in range (0,len(ec)):
if in_shape(cur,l[i],ec[j]):
break
else:
return id(l[i])
else:
return "None"
if __name__ == "__main__":
host_cords = [] # this will contain the host coordinates with id as index, i.e., from 1 to 6
opp_cords = [] # this will contain the opposition coordinates with id as index, i.e., from 11 to 16
# read the coordinates from "input.txt"
# data-format in input.txt ---------> ( index, x-cord, y-cord)
input_file = open("input.txt", "r")
op = input_file.readlines()
for i in range(len(op)):
if i < 6:
host_cords.append(list(map(int, op[i].strip().split()[1:])))
else:
opp_cords.append(list(map(int, op[i].strip().split()[1:])))
input_file.close()
# Verifying the input
print()
print("Host Coordinates : ", host_cords)
print("Opponent Coordinates : ", opp_cords)
print()
# Verifying the passable points
print("Passable points with ", host_cords[0], " as current bot : ", passable_host_bots(host_cords, 0))
print()
list_passable_host_bots=passable_host_bots(host_cords, 0)
(sort_array(list_passable_host_bots))
print(list_passable_host_bots)
dict = {}
for i in range(0, len(host_cords)):
dict[i + 1] = host_cords[i]
def id(x):
for i in range(0, len(host_cords)):
if (dict[i + 1][0] == x[0] and dict[i + 1][1] == x[1]):
return (i + 1)
print(chk_pass(host_cords[0],list_passable_host_bots,opp_cords))
| 2.796875 | 3 |
motrackers/detectors/__init__.py | timseifer/mixed_motion_detection | 570 | 12769082 | from motrackers.detectors.tf import TF_SSDMobileNetV2
from motrackers.detectors.caffe import Caffe_SSDMobileNet
from motrackers.detectors.yolo import YOLOv3
| 1.046875 | 1 |
tests/orm/nodes/data/test_array_bands.py | azadoks/aiida-core | 180 | 12769083 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=redefined-outer-name
"""Tests for the :mod:`aiida.orm.nodes.data.array.bands` module."""
from argparse import Namespace
import pytest
from aiida.common.exceptions import NotExistent
from aiida.orm import BandsData, Group, User
from aiida.orm.nodes.data.array.bands import get_bands_and_parents_structure
@pytest.fixture
def alternate_user():
"""Return an alternate ``User`` instance that is not the current default user."""
email = 'alternate<EMAIL>'
try:
return User.objects.get(email=email)
except NotExistent:
return User(email='alternate<EMAIL>').store()
class TestGetBandsAndParentsStructure:
"""Tests for the :meth:`~aiida.orm.nodes.data.array.bands.get_bands_and_parents_structure` function."""
@staticmethod
def _get_default_ns():
"""Returns a simple template Namespace"""
args = Namespace()
args.element = None
args.element_only = None
args.formulamode = None
args.past_days = None
args.group_name = None
args.group_pk = None
args.all_users = False
return args
@pytest.mark.parametrize('all_users, expected', ((True, [True, True]), (False, [True, False])))
@pytest.mark.usefixtures('clear_database_before_test')
def test_all_users(self, alternate_user, all_users, expected):
"""Test the behavior for the ``all_users`` argument."""
bands_default_user = BandsData().store()
bands_alternate_user = BandsData(user=alternate_user).store()
bands = [bands_default_user, bands_alternate_user]
args = self._get_default_ns()
args.all_users = all_users
entries = get_bands_and_parents_structure(args)
node_pks = [int(e[0]) for e in entries]
assert [node.pk in node_pks for node in bands] == expected
@pytest.mark.parametrize('argument, attribute', (('group_name', 'label'), ('group_pk', 'pk')))
@pytest.mark.usefixtures('clear_database_before_test')
def test_identifier(self, argument, attribute):
"""Test the behavior for the ``group_name`` and ``group_pk`` arguments."""
bands_data_grouped = BandsData().store()
_ = BandsData().store()
bands_group = Group('some_bands_data').store()
bands_group.add_nodes(bands_data_grouped)
args = self._get_default_ns()
setattr(args, argument, [getattr(bands_group, attribute)])
entries = get_bands_and_parents_structure(args)
assert [int(e[0]) for e in entries] == [bands_data_grouped.pk]
| 1.929688 | 2 |
contents/models.py | hafiztsalavin/personal_web | 4 | 12769084 | from django.contrib.auth.models import User
from django.db import models
from PIL import Image
class Profile(models.Model):
contact_no = models.CharField(max_length=20)
address = models.CharField(max_length=200)
image = models.ImageField(help_text='425x425px recommmended', upload_to='profile_pics')
title = models.CharField(max_length=100, blank=True)
linkedin_url = models.CharField(max_length=100)
github_url = models.CharField(max_length=50)
about_me = models.CharField(max_length=500)
cv_link = models.CharField(max_length=255, blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return f'{self.user.username} Profile'
# Override the save function in Profile class:
def save(self, *args, **kwargs):
# run the parent class' save() function:
super().save(*args, **kwargs)
# open the image of the current instance:
img = Image.open(self.image.path)
if img.height > 425 or img.width > 425:
output_size = (425, 425)
img.thumbnail(output_size)
img.save(self.image.path)
class Focus(models.Model):
name = models.CharField(max_length=50)
icon = models.CharField(max_length=20)
color = models.CharField(max_length=20, default='white')
description = models.CharField(max_length=500)
is_active = models.BooleanField(default=True)
def __str__(self):
return f'{self.name} - Active: {self.is_active}'
class TechnicalSkill(models.Model):
name = models.CharField(max_length=20)
is_top_skill = models.BooleanField(default=True)
percentage = models.IntegerField()
def __str__(self):
return f'{self.name} - Top Skill: {self.is_top_skill}'
class ProfessionalSkill(models.Model):
name = models.CharField(max_length=20)
percentage = models.IntegerField()
def __str__(self):
return self.name
class Education(models.Model):
school = models.CharField(max_length=100)
duration = models.CharField(max_length=15)
level = models.CharField(max_length=200)
address = models.CharField(max_length=200)
achievements = models.TextField(max_length=500, blank=True)
def __str__(self):
return f'{self.level} - {self.school}'
class WorkExperience(models.Model):
position = models.CharField(max_length=100)
company = models.CharField(max_length=100)
duration = models.CharField(max_length=30)
address = models.CharField(max_length=200)
summary = models.TextField(max_length=500, blank=True)
def __str__(self):
return f'{self.position} - {self.company}'
class ProjectCategory(models.Model):
name = models.CharField(max_length=30)
code = models.CharField(max_length=20)
def __str__(self):
return self.name
class Project(models.Model):
title = models.CharField(max_length=200)
code = models.CharField(max_length=20, blank=True)
description = models.TextField()
date_started = models.CharField(max_length=20, blank=True)
date_ended = models.CharField(max_length=20, blank=True)
main_image = models.ImageField(upload_to='project_images', default='')
repo_link = models.CharField(max_length=50, blank=True)
demo_link = models.CharField(max_length=50, blank=True)
document_link = models.CharField(max_length=255, blank=True)
project_category = models.ForeignKey(ProjectCategory, on_delete=models.CASCADE, related_name='projects')
def __str__(self):
return self.title
class ToolsAndTech(models.Model):
name = models.CharField(max_length=30)
project = models.ManyToManyField(Project, related_name='toolsandtechs')
def __str__(self):
return self.name
class ProjectImage(models.Model):
image = models.ImageField(upload_to='project_images')
caption = models.CharField(max_length=100, blank=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='projectimages')
def __str__(self):
return f'{self.project.code} - {self.image.name}'
class Recommendation(models.Model):
name = models.CharField(max_length=40)
message = models.CharField(max_length=400)
image = models.ImageField(upload_to='recommendations', default='recommendations/default')
summary = models.CharField(max_length=50)
def __str__(self):
return f'{self.name} - {self.summary}'
class Certification(models.Model):
title = models.CharField(max_length=100)
authority = models.CharField(max_length=30)
date_issued = models.CharField(max_length=20)
document_link = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.title
class Seminar(models.Model):
title = models.CharField(max_length=100)
organizer = models.CharField(max_length=30)
event_date = models.CharField(max_length=20)
link_proof = models.CharField(max_length=200, blank=True)
link_icon = models.CharField(max_length=20, blank=True)
document_link = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.title
| 2.4375 | 2 |
palindrome_check.py | gtsofa/yt | 0 | 12769085 | <reponame>gtsofa/yt
# palindrome_check.py
# recursion in strings
def isPalindrome(s):
# check if string is a palindrome
def toChars(s):
# convert string to characters
s = s.lower()
ans = ''
for c in s:
if c in 'abcdefghijklmnopqrstuvwxy':
ans = ans + c
return ans
def isPal(s):
#Return true/false if string is a palindrome
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and isPal(s[1:-1])
return isPal(toChars(s))
# test goes here:
test1 = isPalindrome('ama')
print(test1)
| 3.625 | 4 |
pep/MeanComparison2groups.py | Martacsg/Edem2021MDA | 0 | 12769086 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 19 18:29:06 2021
@author: marta
"""
reset -f
import os #sistema operativo
import pandas as pd #gestionar dataframes
import numpy as np # numeric python vectores
import matplotlib.pyplot as plt # graficos
from pandas.api.types import CategoricalDtype #Gestionar variables ordinales
import scipy.stats as stats
os.chdir(r'C:\Users\marta\git\Edem2021MDA\pep')
os.getcwd()
wbr = pd.read_csv ('WBR_11_12_denormalized_temp.csv', sep=';', decimal=',')
wbr.shape
#1-Describe the two variables involved int th hypothesis
#Rentals
wbr.cnt.describe()
plt.hist(wbr.cnt)
mytable = wbr.groupby(['yr']).size()
mytable
n = mytable.sum()
mytable2 = (mytable/n)*100
print(mytable2)
mytable3 = round(mytable2,1)
mytable3
bar_list = ['1: 2011', '2: 2012']
plt.bar(bar_list, mytable2,edgecolor='black')
plt.title('Figure1. Ventas')
plt.ylabel('Percentage')
#plt.text(1.7, 50, 'n: 731')
plt.show()
#2-Perform the numeric test. la media de los working days es superior
wbr.groupby('yr').cnt.mean()
#3-Perform the graphic test: plot of the mean
#4-When posible:
#Comparacion estadistica
# Subsetting
cnt_wd=wbr.loc[(wbr['yr'] == 1), 'cnt'] # grupo 1, 'cnt'= variable cuantitativa a testear
cnt_nwd=wbr.loc[(wbr['yr'] == 0), 'cnt'] # grupo 2
#import scipy.stats as stats #pvalue
res=stats.ttest_ind(cnt_wd, cnt_nwd, equal_var = False)
print(res[1])
("{:.%df}" % number_of_digits(result[1])).format(result[1])
#Grafic comparison: cofidence intervals for the means
import seaborn as sns
import matplotlib.pyplot as plt
#CI meanplot version1: básica
ax = sns.pointplot(x="yr", y="cnt", data=wbr,ci=95, join=0)
#CI meanplot version2
ax = sns.pointplot(x="yr", y="cnt", data=wbr,ci=95, join=0)
plt.axhline(y=wbr.cnt.mean(), #pintamos la liea de la media
linewidth=1,
linestyle= 'dashed',
color="green") #color de la linea de l amedia
ax.set_ylabel('Rentals')
plt.yticks(np.arange(3000, 7000, step=500))
plt.ylim(2800,7000) #rango entre 0 y 9.000
props = dict(boxstyle='round', facecolor='white', lw=0.5)
plt.text(0.1,6000,'Mean:4504.3''\n''n:731' '\n' 't:1.601' '\n' 'Pval.:0.000', bbox=props)
plt.xlabel('Year')
plt.title('Figure 6. Average rentals by year.''\n') | 3.421875 | 3 |
data/deu-eng/divide.py | lorylei/DARTS-et | 1 | 12769087 | <filename>data/deu-eng/divide.py
f = open('./deu.txt','r')
fwde = open('./deu.de','w')
fwen = open('./deu.en','w')
lines = [line for line in f.read().split('\n') if line]
for line in lines:
items = line.split('\t')
assert len(items)==3
fwen.write(items[0] + '\n')
fwde.write(items[1] + '\n')
fwde.close()
fwen.close()
| 2.8125 | 3 |
talentmap_api/position/migrations/0023_positionbidstatistics_has_handshake_accepted.py | burgwyn/State-TalentMAP-API | 5 | 12769088 | <filename>talentmap_api/position/migrations/0023_positionbidstatistics_has_handshake_accepted.py
# Generated by Django 2.0.4 on 2019-03-11 18:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('position', '0022_positionbidstatistics_has_handshake_offered'),
]
operations = [
migrations.AddField(
model_name='positionbidstatistics',
name='has_handshake_accepted',
field=models.BooleanField(default=False),
),
]
| 1.664063 | 2 |
CostControl.py | yusukeyurameshi/CostControlV2 | 0 | 12769089 | #!/usr/bin/python3
##########################################################################
# Tables used:
# - OCI_USAGE - Raw data of the usage reports
# - OCI_USAGE_STATS - Summary Stats of the Usage Report for quick query if only filtered by tenant and date
# - OCI_USAGE_TAG_KEYS - Tag keys of the usage reports
# - OCI_COST - Raw data of the cost reports
# - OCI_COST_STATS - Summary Stats of the Cost Report for quick query if only filtered by tenant and date
# - OCI_COST_TAG_KEYS - Tag keys of the cost reports
# - OCI_COST_REFERENCE - Reference table of the cost filter keys - SERVICE, REGION, COMPARTMENT, PRODUCT, SUBSCRIPTION
# - OCI_PRICE_LIST - Hold the price list and the cost per product
##########################################################################
import sys
import argparse
import datetime
import oci
import gzip
import os
import csv
import requests
import time
import pandas as pd
import json
version = "20.07.28"
usage_report_namespace = "bling"
work_report_dir = os.curdir + "/work_report_dir_temp"
# create the work dir if not exist
if not os.path.exists(work_report_dir):
os.mkdir(work_report_dir)
##########################################################################
# Print header centered
##########################################################################
def print_header(name, category):
options = {0: 90, 1: 60, 2: 30}
chars = int(options[category])
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Get Column from Array
##########################################################################
def get_column_value_from_array(column, array):
if column in array:
return array[column]
else:
return ""
##########################################################################
# Create signer
##########################################################################
def create_signer(cmd):
# assign default values
config_file = oci.config.DEFAULT_LOCATION
config_section = oci.config.DEFAULT_PROFILE
if cmd.config:
if cmd.config.name:
config_file = cmd.config.name
if cmd.profile:
config_section = cmd.profile
if cmd.instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting", 0)
raise SystemExit
else:
config = oci.config.from_file(config_file, config_section)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##########################################################################
# Load compartments
##########################################################################
def identity_read_compartments(identity, tenancy):
compartments = []
#print("Loading Compartments...")
try:
# read all compartments to variable
all_compartments = []
try:
all_compartments = oci.pagination.list_call_get_all_results(
identity.list_compartments,
tenancy.id,
compartment_id_in_subtree=True
).data
except oci.exceptions.ServiceError:
raise
###################################################
# Build Compartments - return nested compartment list
###################################################
def build_compartments_nested(identity_client, cid, path):
try:
compartment_list = [item for item in all_compartments if str(item.compartment_id) == str(cid)]
if path != "":
path = path + " / "
for c in compartment_list:
if c.lifecycle_state == oci.identity.models.Compartment.LIFECYCLE_STATE_ACTIVE:
cvalue = {'id': str(c.id), 'name': str(c.name), 'path': path + str(c.name)}
compartments.append(cvalue)
build_compartments_nested(identity_client, c.id, cvalue['path'])
except Exception as error:
raise Exception("Error in build_compartments_nested: " + str(error.args))
###################################################
# Add root compartment
###################################################
value = {'id': str(tenancy.id), 'name': str(tenancy.name) + " (root)", 'path': "/ " + str(tenancy.name) + " (root)"}
compartments.append(value)
# Build the compartments
build_compartments_nested(identity, str(tenancy.id), "")
# sort the compartment
sorted_compartments = sorted(compartments, key=lambda k: k['path'])
#print(" Total " + str(len(sorted_compartments)) + " compartments loaded.")
return sorted_compartments
except oci.exceptions.RequestException:
raise
except Exception as e:
raise Exception("Error in identity_read_compartments: " + str(e.args))
##########################################################################
# set parser
##########################################################################
def set_parser_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=argparse.FileType('r'), dest='config', help="Config File")
parser.add_argument('-t', default="", dest='profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-f', default="", dest='fileid', help='File Id to load')
parser.add_argument('-d', default="", dest='filedate', help='Minimum File Date to load (i.e. yyyy-mm-dd)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-su', action='store_true', default=False, dest='skip_usage', help='Skip Load Usage Files')
parser.add_argument('-sc', action='store_true', default=False, dest='skip_cost', help='Skip Load Cost Files')
parser.add_argument('-ip', action='store_true', default=False, dest='instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('--version', action='version', version='%(prog)s ' + version)
result = parser.parse_args()
return result
##########################################################################
# update_cost_stats
##########################################################################
def update_cost_stats(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_COST_STATS...")
# run merge to oci_update_stats
sql = "merge into OCI_COST_STATS a "
sql += "using "
sql += "( "
sql += " select "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START, "
sql += " sum(COST_MY_COST) COST_MY_COST, "
sql += " sum(COST_MY_COST_OVERAGE) COST_MY_COST_OVERAGE, "
sql += " min(COST_CURRENCY_CODE) COST_CURRENCY_CODE, "
sql += " count(*) NUM_ROWS "
sql += " from "
sql += " oci_cost "
sql += " group by "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START "
sql += ") b "
sql += "on (a.tenant_name=b.tenant_name and a.file_id=b.file_id and a.USAGE_INTERVAL_START=b.USAGE_INTERVAL_START) "
sql += "when matched then update set a.num_rows=b.num_rows, a.COST_MY_COST=b.COST_MY_COST, a.UPDATE_DATE=sysdate, a.AGENT_VERSION=:version,"
sql += " a.COST_MY_COST_OVERAGE=b.COST_MY_COST_OVERAGE, a.COST_CURRENCY_CODE=b.COST_CURRENCY_CODE "
sql += "where a.num_rows <> b.num_rows "
sql += "when not matched then insert (TENANT_NAME,FILE_ID,USAGE_INTERVAL_START,NUM_ROWS,COST_MY_COST,UPDATE_DATE,AGENT_VERSION,COST_MY_COST_OVERAGE,COST_CURRENCY_CODE) "
sql += " values (b.TENANT_NAME,b.FILE_ID,b.USAGE_INTERVAL_START,b.NUM_ROWS,b.COST_MY_COST,sysdate,:version,b.COST_MY_COST_OVERAGE,b.COST_CURRENCY_CODE) "
cursor.execute(sql, {"version": version})
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_cost_stats() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_cost_stats() - " + str(e))
##########################################################################
# update_price_list
##########################################################################
def update_price_list(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_PRICE_LIST...")
# run merge to oci_update_stats
sql = "MERGE INTO OCI_PRICE_LIST A "
sql += "USING "
sql += "( "
sql += " SELECT "
sql += " TENANT_NAME, "
sql += " COST_PRODUCT_SKU, "
sql += " PRD_DESCRIPTION, "
sql += " COST_CURRENCY_CODE, "
sql += " COST_UNIT_PRICE "
sql += " FROM "
sql += " ( "
sql += " SELECT "
sql += " TENANT_NAME, "
sql += " COST_PRODUCT_SKU, "
sql += " PRD_DESCRIPTION, "
sql += " COST_CURRENCY_CODE, "
sql += " COST_UNIT_PRICE, "
sql += " ROW_NUMBER() OVER (PARTITION BY TENANT_NAME, COST_PRODUCT_SKU ORDER BY USAGE_INTERVAL_START DESC, COST_UNIT_PRICE DESC) RN "
sql += " FROM OCI_COST A "
sql += " ) "
sql += " WHERE RN = 1 "
sql += " ORDER BY 1,2 "
sql += ") B "
sql += "ON (A.TENANT_NAME = B.TENANT_NAME AND A.COST_PRODUCT_SKU = B.COST_PRODUCT_SKU) "
sql += "WHEN MATCHED THEN UPDATE SET A.PRD_DESCRIPTION=B.PRD_DESCRIPTION, A.COST_CURRENCY_CODE=B.COST_CURRENCY_CODE, A.COST_UNIT_PRICE=B.COST_UNIT_PRICE, COST_LAST_UPDATE = SYSDATE "
sql += "WHEN NOT MATCHED THEN INSERT (TENANT_NAME,COST_PRODUCT_SKU,PRD_DESCRIPTION,COST_CURRENCY_CODE,COST_UNIT_PRICE,COST_LAST_UPDATE) "
sql += " VALUES (B.TENANT_NAME,B.COST_PRODUCT_SKU,B.PRD_DESCRIPTION,B.COST_CURRENCY_CODE,B.COST_UNIT_PRICE,SYSDATE)"
cursor.execute(sql)
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_price_list() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_price_list() - " + str(e))
##########################################################################
# update_cost_reference
##########################################################################
def update_cost_reference(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_COST_REFERENCE...")
# run merge to oci_update_stats
sql = "merge into OCI_COST_REFERENCE a "
sql += "using "
sql += "( "
sql += " select TENANT_NAME, REF_TYPE, REF_NAME "
sql += " from "
sql += " ( "
sql += " select distinct TENANT_NAME, 'PRD_SERVICE' as REF_TYPE, PRD_SERVICE as REF_NAME from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'PRD_COMPARTMENT_PATH' as REF_TYPE, "
sql += " case when prd_compartment_path like '%/%' then substr(prd_compartment_path,1,instr(prd_compartment_path,' /')-1) "
sql += " else prd_compartment_path end as REF_NAME "
sql += " from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'PRD_COMPARTMENT_NAME' as REF_TYPE, PRD_COMPARTMENT_NAME as ref_name from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'PRD_REGION' as REF_TYPE, PRD_REGION as ref_name from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'COST_SUBSCRIPTION_ID' as REF_TYPE, to_char(COST_SUBSCRIPTION_ID) as ref_name from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'COST_PRODUCT_SKU' as REF_TYPE, COST_PRODUCT_SKU || ' '||min(PRD_DESCRIPTION) as ref_name from OCI_COST "
sql += " group by TENANT_NAME, COST_PRODUCT_SKU "
sql += " ) where ref_name is not null "
sql += ") b "
sql += "on (a.TENANT_NAME=b.TENANT_NAME and a.REF_TYPE=b.REF_TYPE and a.REF_NAME=b.REF_NAME) "
sql += "when not matched then insert (TENANT_NAME,REF_TYPE,REF_NAME) "
sql += "values (b.TENANT_NAME,b.REF_TYPE,b.REF_NAME)"
cursor.execute(sql)
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_cost_reference() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_cost_reference() - " + str(e))
##########################################################################
# update_public_rates
##########################################################################
def update_public_rates(connection, tenant_name):
try:
# open cursor
num_rows = 0
cursor = connection.cursor()
api_url = "https://itra.oraclecloud.com/itas/.anon/myservices/api/v1/products?partNumber="
#print("\nMerging Public Rates into OCI_RATE_CARD...")
# retrieve the SKUS to query
sql = "select COST_PRODUCT_SKU, COST_CURRENCY_CODE from OCI_PRICE_LIST where tenant_name=:tenant_name"
cursor.execute(sql, {"tenant_name": tenant_name})
rows = cursor.fetchall()
if rows:
for row in rows:
rate_description = ""
rate_price = None
resp = None
#######################################
# Call API to fetch the SKU Data
#######################################
try:
cost_product_sku = str(row[0])
country_code = str(row[1])
resp = requests.get(api_url + cost_product_sku, headers={'X-Oracle-Accept-CurrencyCode': country_code})
time.sleep(0.5)
except Exception as e:
print("\nWarning Calling REST API for Public Rate at update_public_rates() - " + str(e))
time.sleep(2)
continue
if not resp:
continue
for item in resp.json()['items']:
rate_description = item["displayName"]
for price in item['prices']:
if price['model'] == 'PAY_AS_YOU_GO':
rate_price = price['value']
# update database
sql = "update OCI_PRICE_LIST set "
sql += "RATE_DESCRIPTION=:rate_description, "
sql += "RATE_PAYGO_PRICE=:rate_price, "
sql += "RATE_MONTHLY_FLEX_PRICE=:rate_price, "
sql += "RATE_UPDATE_DATE=sysdate "
sql += "where TENANT_NAME=:tenant_name and COST_PRODUCT_SKU=:cost_product_sku "
# only apply paygo cost after 7/13 oracle change rate
sql_variables = {
"rate_description": rate_description,
"rate_price": rate_price,
"tenant_name": tenant_name,
"cost_product_sku": cost_product_sku
}
cursor.execute(sql, sql_variables)
num_rows += 1
# Commit
connection.commit()
#print(" Update Completed, " + str(num_rows) + " rows updated.")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_public_rates() - " + str(e) + "\n")
raise SystemExit
except requests.exceptions.ConnectionError as e:
print("\nError connecting to billing metering API at update_public_rates() - " + str(e))
except Exception as e:
raise Exception("\nError manipulating database at update_public_rates() - " + str(e))
##########################################################################
# update_usage_stats
##########################################################################
def update_usage_stats(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_USAGE_STATS...")
# run merge to oci_update_stats
sql = "merge into OCI_USAGE_STATS a "
sql += "using "
sql += "( "
sql += " select "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START, "
sql += " count(*) NUM_ROWS "
sql += " from "
sql += " oci_usage "
sql += " group by "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START "
sql += ") b "
sql += "on (a.tenant_name=b.tenant_name and a.file_id=b.file_id and a.USAGE_INTERVAL_START=b.USAGE_INTERVAL_START) "
sql += "when matched then update set a.num_rows=b.num_rows, a.UPDATE_DATE=sysdate, a.AGENT_VERSION=:version "
sql += "where a.num_rows <> b.num_rows "
sql += "when not matched then insert (TENANT_NAME,FILE_ID,USAGE_INTERVAL_START,NUM_ROWS,UPDATE_DATE,AGENT_VERSION) "
sql += " values (b.TENANT_NAME,b.FILE_ID,b.USAGE_INTERVAL_START,b.NUM_ROWS,sysdate,:version) "
cursor.execute(sql, {"version": version})
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_usage_stats() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_usage_stats() - " + str(e))
#########################################################################
# Load Cost File
##########################################################################
def load_cost_file(object_storage, object_file, max_file_id, cmd, tenancy, compartments):
num_files = 0
num_rows = 0
try:
o = object_file
# keep tag keys per file
tags_keys = []
# get file name
filename = o.name.rsplit('/', 1)[-1]
file_id = filename[:-7]
file_time = str(o.time_created)[0:16]
# if file already loaded, skip (check if < max_file_id
if str(max_file_id) != "None":
if file_id <= str(max_file_id):
return num_files
# if file id enabled, check
if cmd.fileid:
if file_id != cmd.fileid:
return num_files
# check file date
if cmd.filedate:
if file_time <= cmd.filedate:
return num_files
path_filename = work_report_dir + '/' + filename
#print(" Processing file " + o.name + " - " + str(o.size) + " bytes, " + file_time)
# download file
object_details = object_storage.get_object(usage_report_namespace, str(tenancy.id), o.name)
with open(path_filename, 'wb') as f:
for chunk in object_details.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
# Read file to variable
with gzip.open(path_filename, 'rt') as file_in:
csv_reader = csv.DictReader(file_in)
#incluir código de conversão para json
f = open(path_filename[:-3], "w")
f.write(file_in.read())
f.close()
df = pd.read_csv (path_filename[:-3])
df.to_json (path_filename[:-3][:-3] + "json")
f = open(path_filename[:-3][:-3] + "json", "r")
dado = f.read()
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/insertjson'
myobj = {'id_arquivo': filename[:-3][:-3], 'tenant_name': tenancy.name, 'tp_arquivo': 'cost', 'json': dado}
f.close()
x = requests.post(url, data = myobj)
# Read file to variable
with gzip.open(path_filename, 'rt') as file_in:
csv_reader = csv.DictReader(file_in)
# Adjust the batch size to meet memory and performance requirements for cx_oracle
batch_size = 5000
array_size = 1000
data = []
for row in csv_reader:
# find compartment path
compartment_path = ""
for c in compartments:
if c['id'] == row['product/compartmentId']:
compartment_path = c['path']
# Handle Tags up to 4000 chars with # seperator
tags_data = ""
for (key, value) in row.items():
if 'tags' in key and len(value) > 0:
# remove # and = from the tags keys and value
keyadj = str(key).replace("tags/", "").replace("#", "").replace("=", "")
valueadj = str(value).replace("#", "").replace("=", "")
# check if length < 4000 to avoid overflow database column
if len(tags_data) + len(keyadj) + len(valueadj) + 2 < 4000:
tags_data += ("#" if tags_data == "" else "") + keyadj + "=" + valueadj + "#"
# add tag key to tag_keys array
if keyadj not in tags_keys:
tags_keys.append(keyadj)
# Assign each column to variable to avoid error if column missing from the file
lineItem_intervalUsageStart = get_column_value_from_array('lineItem/intervalUsageStart', row)
lineItem_intervalUsageEnd = get_column_value_from_array('lineItem/intervalUsageEnd', row)
product_service = get_column_value_from_array('product/service', row)
product_compartmentId = get_column_value_from_array('product/compartmentId', row)
product_compartmentName = get_column_value_from_array('product/compartmentName', row)
product_region = get_column_value_from_array('product/region', row)
product_availabilityDomain = get_column_value_from_array('product/availabilityDomain', row)
product_resourceId = get_column_value_from_array('product/resourceId', row)
usage_billedQuantity = get_column_value_from_array('usage/billedQuantity', row)
usage_billedQuantityOverage = get_column_value_from_array('usage/billedQuantityOverage', row)
cost_subscriptionId = get_column_value_from_array('cost/subscriptionId', row)
cost_productSku = get_column_value_from_array('cost/productSku', row)
product_Description = get_column_value_from_array('product/Description', row)
cost_unitPrice = get_column_value_from_array('cost/unitPrice', row)
cost_unitPriceOverage = get_column_value_from_array('cost/unitPriceOverage', row)
cost_myCost = get_column_value_from_array('cost/myCost', row)
cost_myCostOverage = get_column_value_from_array('cost/myCostOverage', row)
cost_currencyCode = get_column_value_from_array('cost/currencyCode', row)
cost_overageFlag = get_column_value_from_array('cost/overageFlag', row)
lineItem_isCorrection = get_column_value_from_array('lineItem/isCorrection', row)
# OCI changed the column billingUnitReadable to skuUnitDescription
if 'cost/skuUnitDescription' in row:
cost_billingUnitReadable = get_column_value_from_array('cost/skuUnitDescription', row)
else:
cost_billingUnitReadable = get_column_value_from_array('cost/billingUnitReadable', row)
# Fix OCI Data for missing product description
if cost_productSku == "B88285" and product_Description == "":
product_Description = "Object Storage Classic"
cost_billingUnitReadable = "Gigabyte Storage Capacity per Month"
elif cost_productSku == "B88272" and product_Description == "":
product_Description = "Compute Classic - Unassociated Static IP"
cost_billingUnitReadable = "IPs"
elif cost_productSku == "B88166" and product_Description == "":
product_Description = "Oracle Identity Cloud - Standard"
cost_billingUnitReadable = "Active User per Hour"
elif cost_productSku == "B88167" and product_Description == "":
product_Description = "Oracle Identity Cloud - Basic"
cost_billingUnitReadable = "Active User per Hour"
elif cost_productSku == "B88168" and product_Description == "":
product_Description = "Oracle Identity Cloud - Basic - Consumer User"
cost_billingUnitReadable = "Active User per Hour"
elif cost_productSku == "B88274" and product_Description == "":
product_Description = "Block Storage Classic"
cost_billingUnitReadable = "Gigabyte Storage Capacity per Month"
elif cost_productSku == "B89164" and product_Description == "":
product_Description = "Oracle Security Monitoring and Compliance Edition"
cost_billingUnitReadable = "100 Entities Per Hour"
elif cost_productSku == "B88269" and product_Description == "":
product_Description = "Compute Classic"
cost_billingUnitReadable = "OCPU Per Hour "
elif cost_productSku == "B88269" and product_Description == "":
product_Description = "Compute Classic"
cost_billingUnitReadable = "OCPU Per Hour"
elif cost_productSku == "B88275" and product_Description == "":
product_Description = "Block Storage Classic - High I/O"
cost_billingUnitReadable = "Gigabyte Storage Per Month"
elif cost_productSku == "B88283" and product_Description == "":
product_Description = "Object Storage Classic - GET and all other Requests"
cost_billingUnitReadable = "10,000 Requests Per Month"
elif cost_productSku == "B88284" and product_Description == "":
product_Description = "Object Storage Classic - PUT, COPY, POST or LIST Requests"
cost_billingUnitReadable = "10,000 Requests Per Month"
num_rows += 1
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/cost/' + str(tenancy.name)
myobj = {
'a1': str(tenancy.name),
'a2': file_id,
'a3': lineItem_intervalUsageStart[0:10] + " " + lineItem_intervalUsageStart[11:16],
'a4': lineItem_intervalUsageEnd[0:10] + " " + lineItem_intervalUsageEnd[11:16],
'a5': product_service,
'a6': product_compartmentId,
'a7': product_compartmentName,
'a8': compartment_path,
'a9': product_region,
'a10': product_availabilityDomain,
'a11': product_resourceId,
'a12': usage_billedQuantity,
'a13': usage_billedQuantityOverage,
'a14': cost_subscriptionId,
'a15': cost_productSku,
'a16': product_Description,
'a17': cost_unitPrice,
'a18': cost_unitPriceOverage,
'a19': cost_myCost,
'a20': cost_myCostOverage,
'a21': cost_currencyCode,
'a22': cost_billingUnitReadable,
'a23': cost_overageFlag,
'a24': lineItem_isCorrection,
'a25': tags_data
}
x = requests.post(url, data = myobj)
#print(" Completed file " + o.name + " - " + str(num_rows) + " Rows Inserted")
num_files += 1
# remove file
os.remove(path_filename)
os.remove(path_filename[:-3])
os.remove(path_filename[:-3][:-3] + "json")
#######################################
# insert bulk tags to the database
#######################################
data = []
for tag in tags_keys:
row_data = (str(tenancy.name), tag, str(tenancy.name), tag)
data.append(row_data)
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/costtags/' + str(tenancy.name)
myobj = {'tag': tag}
x = requests.post(url, data = myobj)
return num_files
except Exception as e:
print("\nload_cost_file() - Error Download Usage and insert to database 01 - " + str(e))
raise SystemExit
#########################################################################
# Load Usage File
##########################################################################
def load_usage_file(object_storage, object_file, max_file_id, cmd, tenancy, compartments):
num_files = 0
num_rows = 0
try:
o = object_file
# keep tag keys per file
tags_keys = []
# get file name
filename = o.name.rsplit('/', 1)[-1]
file_id = filename[:-7]
file_time = str(o.time_created)[0:16]
# if file already loaded, skip (check if < max_usage_file_id)
if str(max_file_id) != "None":
if file_id <= str(max_file_id):
return num_files
# if file id enabled, check
if cmd.fileid:
if file_id != cmd.file_id:
return num_files
# check file date
if cmd.filedate:
if file_time <= cmd.filedate:
return num_files
path_filename = work_report_dir + '/' + filename
#print(" Processing file " + o.name + " - " + str(o.size) + " bytes, " + file_time)
# download file
object_details = object_storage.get_object(usage_report_namespace, str(tenancy.id), o.name)
with open(path_filename, 'wb') as f:
for chunk in object_details.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
# Read file to variable
with gzip.open(path_filename, 'rt') as file_in:
csv_reader = csv.DictReader(file_in)
# Adjust the batch size to meet memory and performance requirements
batch_size = 5000
array_size = 1000
data = []
for row in csv_reader:
# find compartment path
compartment_path = ""
for c in compartments:
if c['id'] == row['product/compartmentId']:
compartment_path = c['path']
# Handle Tags up to 3500 chars with # seperator
tags_data = ""
for (key, value) in row.items():
if 'tags' in key and len(value) > 0:
# remove # and = from the tags keys and value
keyadj = str(key).replace("tags/", "").replace("#", "").replace("=", "")
valueadj = str(value).replace("#", "").replace("=", "")
# check if length < 3500 to avoid overflow database column
if len(tags_data) + len(keyadj) + len(valueadj) + 2 < 3500:
tags_data += ("#" if tags_data == "" else "") + keyadj + "=" + valueadj + "#"
# add tag key to tag_keys array
if keyadj not in tags_keys:
tags_keys.append(keyadj)
# Assign each column to variable to avoid error if column missing from the file
lineItem_intervalUsageStart = get_column_value_from_array('lineItem/intervalUsageStart', row)
lineItem_intervalUsageEnd = get_column_value_from_array('lineItem/intervalUsageEnd', row)
product_service = get_column_value_from_array('product/service', row)
product_resource = get_column_value_from_array('product/resource', row)
product_compartmentId = get_column_value_from_array('product/compartmentId', row)
product_compartmentName = get_column_value_from_array('product/compartmentName', row)
product_region = get_column_value_from_array('product/region', row)
product_availabilityDomain = get_column_value_from_array('product/availabilityDomain', row)
product_resourceId = get_column_value_from_array('product/resourceId', row)
usage_billedQuantity = get_column_value_from_array('usage/billedQuantity', row)
usage_consumedQuantity = get_column_value_from_array('usage/consumedQuantity', row)
usage_consumedQuantityUnits = get_column_value_from_array('usage/consumedQuantityUnits', row)
usage_consumedQuantityMeasure = get_column_value_from_array('usage/consumedQuantityMeasure', row)
lineItem_isCorrection = get_column_value_from_array('lineItem/isCorrection', row)
num_rows += 1
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/usage/' + str(tenancy.name)
myobj = {
'a1': str(tenancy.name),
'a2': file_id,
'a3': lineItem_intervalUsageStart[0:10] + " " + lineItem_intervalUsageStart[11:16],
'a4': lineItem_intervalUsageEnd[0:10] + " " + lineItem_intervalUsageEnd[11:16],
'a5': product_service,
'a6': product_resource,
'a7': product_compartmentId,
'a8': product_compartmentName,
'a9': compartment_path,
'a10': product_region,
'a11': product_availabilityDomain,
'a12': product_resourceId,
'a13': usage_billedQuantity,
'a14': usage_consumedQuantity,
'a15': usage_consumedQuantityUnits,
'a16': usage_consumedQuantityMeasure,
'a17': lineItem_isCorrection,
'a18': tags_data
}
x = requests.post(url, data = myobj)
#print(" Completed file " + o.name + " - " + str(num_rows) + " Rows Inserted")
num_files += 1
# remove file
os.remove(path_filename)
#######################################
# insert bulk tags to the database
#######################################
data = []
for tag in tags_keys:
row_data = (str(tenancy.name), tag, str(tenancy.name), tag)
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/usagetags/' + str(tenancy.name)
myobj = {'tag': tag}
x = requests.post(url, data = myobj)
return num_files
except Exception as e:
print("\nload_usage_file() - Error Download Usage and insert to database 02 - " + str(e))
raise SystemExit
##########################################################################
# Main
##########################################################################
def main_process():
cmd = set_parser_arguments()
if cmd is None:
exit()
config, signer = create_signer(cmd)
############################################
# Start
############################################
#print_header("Running Usage Load to ADW", 0)
#print("Starts at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
#print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
############################################
# Identity extract compartments
############################################
compartments = []
tenancy = None
try:
#print("\nConnecting to Identity Service...")
identity = oci.identity.IdentityClient(config, signer=signer)
if cmd.proxy:
identity.base_client.session.proxies = {'https': cmd.proxy}
tenancy = identity.get_tenancy(config["tenancy"]).data
tenancy_home_region = ""
# find home region full name
subscribed_regions = identity.list_region_subscriptions(tenancy.id).data
for reg in subscribed_regions:
if reg.is_home_region:
tenancy_home_region = str(reg.region_name)
#print(" Tenant Name : " + str(tenancy.name))
#print(" Tenant Id : " + tenancy.id)
#print(" App Version : " + version)
#print(" Home Region : " + tenancy_home_region)
#print("")
# set signer home region
signer.region = tenancy_home_region
config['region'] = tenancy_home_region
# Extract compartments
compartments = identity_read_compartments(identity, tenancy)
except Exception as e:
print("\nError extracting compartments section - " + str(e) + "\n")
raise SystemExit
############################################
# connect to database
############################################
max_usage_file_id = ""
max_cost_file_id = ""
poc_status = ""
try:
#print('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/pocstatus/' + str(tenancy.name))
x = requests.get('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/pocstatus/' + str(tenancy.name))
response = json.loads(x.text)
#print(response['status'])
poc_status = response['status']
#print(poc_status)
if (poc_status==3):
#print('if')
sys.exit()
###############################
# fetch max file id processed
# for usage and cost
###############################
#print("\nChecking Last Loaded File...")
#sql = "select /*+ full(a) parallel(a,4) */ nvl(max(file_id),'0') as file_id from OCI_USAGE a where TENANT_NAME=:tenant_name"
#cursor.execute(sql, {"tenant_name": str(tenancy.name)})
#max_usage_file_id, = cursor.fetchone()
x = requests.get('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/usage/' + str(tenancy.name))
response = json.loads(x.text)
#print(response['file_id'])
max_usage_file_id = response['file_id']
x = requests.get('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/cost/' + str(tenancy.name))
response = json.loads(x.text)
#print(response['file_id'])
max_cost_file_id = response['file_id']
#print(" Max Usage File Id Processed = " + str(max_usage_file_id))
#print(" Max Cost File Id Processed = " + str(max_cost_file_id))
except Exception as e:
raise Exception("\nError manipulating database - " + str(e))
############################################
# Download Usage, cost and insert to database
############################################
try:
#print("\nConnecting to Object Storage Service...")
object_storage = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage.base_client.session.proxies = {'https': cmd.proxy}
#print(" Connected")
#############################
# Handle Report Usage
#############################
usage_num = 0
if not cmd.skip_usage:
#print("\nHandling Usage Report...")
objects = object_storage.list_objects(usage_report_namespace, str(tenancy.id), fields="timeCreated,size", limit=999, prefix="reports/usage-csv/", start="reports/usage-csv/" + max_usage_file_id).data
for object_file in objects.objects:
usage_num += load_usage_file(object_storage, object_file, max_usage_file_id, cmd, tenancy, compartments)
#print("\n Total " + str(usage_num) + " Usage Files Loaded")
#############################
# Handle Cost Usage
#############################
cost_num = 0
if not cmd.skip_cost:
#print("\nHandling Cost Report...")
objects = object_storage.list_objects(usage_report_namespace, str(tenancy.id), fields="timeCreated,size", limit=999, prefix="reports/cost-csv/", start="reports/cost-csv/" + max_cost_file_id).data
for object_file in objects.objects:
cost_num += load_cost_file(object_storage, object_file, max_cost_file_id, cmd, tenancy, compartments)
#print("\n Total " + str(cost_num) + " Cost Files Loaded")
# Handle Index structure if not exist
#check_database_index_structure_usage(connection)
#check_database_index_structure_cost(connection)
# Update oci_usage_stats and oci_cost_stats if there were files
#if usage_num > 0:
# update_usage_stats(connection)
#if cost_num > 0:
# update_cost_stats(connection)
# update_cost_reference(connection)
# update_price_list(connection)
# update_public_rates(connection, tenancy.name)
except Exception as e:
print("\nError Download Usage and insert to database 03 - " + str(e))
############################################
# print completed
############################################
#print("\nCompleted at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
##########################################################################
# Execute Main Process
##########################################################################
main_process()
| 1.859375 | 2 |
friendtrend.py | srodriguez1850/friendtrend | 0 | 12769090 | <gh_stars>0
#!/usr/bin/python
import plotly
import plotly.graph_objs as go
import preprocess_data as prepdata
import argparse
import json
import os
from plotly import tools
from collections import defaultdict, Counter
from fnvhash import fnv1a_32
from enum import Enum
## ENUMS ##
class TopKFormat(Enum):
GLOBAL_K = 0
MONTHLY = 1
## DEFINITIONS ##
MESSENGER_START = 2009
MESSENGER_END = 2019
TOP_K_PEOPLE = 10
TOP_K_FORMAT = TopKFormat.MONTHLY
INCLUDE_FACEBOOKUSER = False
SCRAMBLE_NAMES = False
VIZ_TARGET_DIRECTORY = 'viz'
MARKER_SIZE = 15
MARKER_OUTLINE_SIZE = 1.5
# if actively editing the code and want a hardcoded location instead of params, edit these
IDE_DEBUGGING = False
IDE_DEBUGGING_NAME = '<NAME>'
## INTERNAL DEFINITIONS ##
_SCRAMBLE_NAMES = 0 if SCRAMBLE_NAMES is False else 1
## HELPER METHODS ##
#region
def name_to_color(name):
hash = fnv1a_32(name.encode('utf-8'))
r = (hash & 0xFF000000) >> 24
g = (hash & 0x00FF0000) >> 16
b = (hash & 0x0000FF00) >> 8
val = 'rgb({}, {}, {})'.format(int(r), int(g), int(b))
return val
#endregion
def generate_viz(year_data, month_data, filename, title, hidenames, k, kformat):
# Load flags
SCRAMBLE_NAMES = hidenames
_SCRAMBLE_NAMES = 0 if SCRAMBLE_NAMES is False else 1
TOP_K_PEOPLE = k
TOP_K_FORMAT = TopKFormat.GLOBAL_K if kformat else TopKFormat.MONTHLY
# Load data
json_year_count_data = year_data
json_month_count_data = month_data
# Parse scatterplot to dictionaries
# region
# Parse year counts
year_count_dict = defaultdict(lambda: defaultdict(list))
year_count_dict['METADATA']['total_counts'] = {}
for y, v in json_year_count_data.items():
for recp in v:
if ((recp[0].split('_')[0] == 'facebookuser') and (INCLUDE_FACEBOOKUSER is False)):
continue
year_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]]['x_values'].append(str(y))
year_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]]['y_values'].append(recp[1])
for p in year_count_dict:
if p == 'METADATA':
continue
year_count_dict[p]['total_count'] = sum(year_count_dict[p]['y_values'])
year_count_dict[p]['plot_obj_scatter'] = go.Scattergl(x=year_count_dict[p]['x_values'], y=year_count_dict[p]['y_values'], mode='lines+markers', name=p, visible=True, marker = dict(size=MARKER_SIZE, color=name_to_color(p), line=dict(width=MARKER_OUTLINE_SIZE)), hoverinfo="y+name")
year_count_dict['METADATA']['total_counts'][p] = year_count_dict[p]['total_count']
# Parse month counts (12 graphs per month)
month_count_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for y, v in json_month_count_data.items():
month_count_dict['METADATA'][y]['total_counts'] = {}
for m, v0 in v.items():
for recp in v0:
if ((recp[0].split('_')[0] == 'facebookuser') and (INCLUDE_FACEBOOKUSER is False)):
continue
month_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]][y]['x_values'].append(str(m))
month_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]][y]['y_values'].append(recp[1])
for p in month_count_dict:
if p == 'METADATA':
continue
for y, v in month_count_dict[p].items():
month_count_dict[p][y]['total_counts'] = sum(month_count_dict[p][y]['y_values'])
month_count_dict[p][y]['plot_obj_scatter'] = go.Scattergl(x=month_count_dict[p][y]['x_values'], y=month_count_dict[p][y]['y_values'], mode='lines+markers', name=p, visible=False, marker = dict(size=MARKER_SIZE, color=name_to_color(p), line=dict(width=MARKER_OUTLINE_SIZE)), hoverinfo="y+name")
month_count_dict['METADATA'][y]['total_counts'][p] = month_count_dict[p][y]['total_counts']
# endregion
# Parse boxplot to dictionaries
# region
# Parse year counts
byear_count_dict = defaultdict(lambda: defaultdict(list))
byear_count_dict['METADATA']['total_counts'] = {}
for y, v in json_year_count_data.items():
for recp in v:
if ((recp[0].split('_')[0] == 'facebookuser') and (INCLUDE_FACEBOOKUSER is False)):
continue
byear_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]]['x_values'].append(str(y))
byear_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]]['y_values'].append(recp[1])
for p in byear_count_dict:
if p == 'METADATA':
continue
byear_count_dict[p]['total_count'] = sum(byear_count_dict[p]['y_values'])
byear_count_dict[p]['plot_obj_scatter'] = go.Bar(x=byear_count_dict[p]['x_values'], y=byear_count_dict[p]['y_values'], name=p, showlegend=False, visible=True, marker = dict(color=name_to_color(p)), hoverinfo="y+name")
byear_count_dict['METADATA']['total_counts'][p] = byear_count_dict[p]['total_count']
# Parse month counts (12 graphs per month)
bmonth_count_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for y, v in json_month_count_data.items():
bmonth_count_dict['METADATA'][y]['total_counts'] = {}
for m, v0 in v.items():
for recp in v0:
if ((recp[0].split('_')[0] == 'facebookuser') and (INCLUDE_FACEBOOKUSER is False)):
continue
bmonth_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]][y]['x_values'].append(str(m))
bmonth_count_dict[recp[0].split('_', 1)[_SCRAMBLE_NAMES]][y]['y_values'].append(recp[1])
for p in bmonth_count_dict:
if p == 'METADATA':
continue
for y, v in bmonth_count_dict[p].items():
bmonth_count_dict[p][y]['total_counts'] = sum(bmonth_count_dict[p][y]['y_values'])
bmonth_count_dict[p][y]['plot_obj_scatter'] = go.Bar(x=bmonth_count_dict[p][y]['x_values'], y=bmonth_count_dict[p][y]['y_values'], name=p, showlegend=False, visible=False, marker = dict(color=name_to_color(p)), hoverinfo="y+name")
bmonth_count_dict['METADATA'][y]['total_counts'][p] = bmonth_count_dict[p][y]['total_counts']
# endregion
# Keep trace statuses for button interactivity
# region
trace_status = dict()
trace_status['year_view'] = []
trace_status['month_view'] = {}
for y in range(MESSENGER_START, MESSENGER_END + 1):
trace_status['month_view'][str(y) + '-01-01'] = []
btrace_status = dict()
btrace_status['year_view'] = []
btrace_status['month_view'] = {}
for y in range(MESSENGER_START, MESSENGER_END + 1):
btrace_status['month_view'][str(y) + '-01-01'] = []
# endregion
# Generate list of top people (based on option)
# region
top_ppl_years = defaultdict(int)
top_ppl_months = dict()
btop_ppl_years = defaultdict(int)
btop_ppl_months = dict()
if TOP_K_FORMAT is TopKFormat.GLOBAL_K:
# populate year view
top_ppl_years = Counter(year_count_dict['METADATA']['total_counts']).most_common(TOP_K_PEOPLE)
btop_ppl_years = Counter(byear_count_dict['METADATA']['total_counts']).most_common(TOP_K_PEOPLE)
# populate month view
for y in month_count_dict['METADATA']:
top_ppl_months[y] = Counter(month_count_dict['METADATA'][y]['total_counts']).most_common(TOP_K_PEOPLE)
for y in bmonth_count_dict['METADATA']:
btop_ppl_months[y] = Counter(bmonth_count_dict['METADATA'][y]['total_counts']).most_common(TOP_K_PEOPLE)
elif TOP_K_FORMAT is TopKFormat.MONTHLY:
# populate month view
for y in month_count_dict['METADATA']:
top_ppl_months[y] = Counter(month_count_dict['METADATA'][y]['total_counts']).most_common(TOP_K_PEOPLE)
for y in bmonth_count_dict['METADATA']:
btop_ppl_months[y] = Counter(bmonth_count_dict['METADATA'][y]['total_counts']).most_common(TOP_K_PEOPLE)
# use month view to populate year view
for y in top_ppl_months:
for p in top_ppl_months[y]:
top_ppl_years[p[0]] += p[1]
for y in btop_ppl_months:
for p in btop_ppl_months[y]:
btop_ppl_years[p[0]] += p[1]
# endregion
# Populate data based on top people
# region
# Scatter
data = list()
if TOP_K_FORMAT is TopKFormat.GLOBAL_K:
for p in top_ppl_years:
data.append(year_count_dict[p[0]]['plot_obj_scatter'])
trace_status['year_view'].append({
'person': p,
})
elif TOP_K_FORMAT is TopKFormat.MONTHLY:
for p in sorted(top_ppl_years, key=top_ppl_years.get, reverse=True):
data.append(year_count_dict[p]['plot_obj_scatter'])
trace_status['year_view'].append({
'person': p,
})
for y in range(MESSENGER_START, MESSENGER_END + 1):
y0 = str(y) + '-01-01'
for j in top_ppl_months[y0]:
if (month_count_dict[j[0]][y0]['plot_obj_scatter'] == []):
continue
data.append(month_count_dict[j[0]][y0]['plot_obj_scatter'])
trace_status['month_view'][y0].append({
'person': j[0],
'year': y
})
# Box
bdata = list()
if TOP_K_FORMAT is TopKFormat.GLOBAL_K:
for p in btop_ppl_years:
bdata.append(byear_count_dict[p[0]]['plot_obj_scatter'])
btrace_status['year_view'].append({
'person': p,
})
elif TOP_K_FORMAT is TopKFormat.MONTHLY:
for p in sorted(btop_ppl_years, key=btop_ppl_years.get, reverse=True):
bdata.append(byear_count_dict[p]['plot_obj_scatter'])
btrace_status['year_view'].append({
'person': p,
})
for y in range(MESSENGER_START, MESSENGER_END + 1):
y0 = str(y) + '-01-01'
for j in btop_ppl_months[y0]:
if (bmonth_count_dict[j[0]][y0]['plot_obj_scatter'] == []):
continue
bdata.append(bmonth_count_dict[j[0]][y0]['plot_obj_scatter'])
btrace_status['month_view'][y0].append({
'person': j[0],
'year': y
})
# endregion
# Generate list of visibilities for button displays
# region
# Scatter
button_visibility_vectors = list()
vector = list()
for t in trace_status['year_view']: # t is dict
vector.append(True)
for t, v in trace_status['month_view'].items():
for y in v:
vector.append(False)
button_visibility_vectors.append(vector)
for y in range(MESSENGER_START, MESSENGER_END + 1):
vector = list()
for t in trace_status['year_view']: # t is dict
vector.append(False)
for t, v in trace_status['month_view'].items():
for year in v:
if year['year'] == y:
vector.append(True)
else:
vector.append(False)
button_visibility_vectors.append(vector)
# Box
bbutton_visibility_vectors = list()
vector = list()
for t in btrace_status['year_view']: # t is dict
vector.append(True)
for t, v in btrace_status['month_view'].items():
for y in v:
vector.append(False)
bbutton_visibility_vectors.append(vector)
for y in range(MESSENGER_START, MESSENGER_END + 1):
vector = list()
for t in btrace_status['year_view']: # t is dict
vector.append(False)
for t, v in btrace_status['month_view'].items():
for year in v:
if year['year'] == y:
vector.append(True)
else:
vector.append(False)
bbutton_visibility_vectors.append(vector)
# endregion
# Generate buttons, menus, and UI
# region
# Scatter
buttons = list()
for y in range(MESSENGER_START, MESSENGER_END + 1):
buttons.append(
dict(
label = y,
method = 'update',
args = [
{'visible': button_visibility_vectors[y - MESSENGER_START + 1]},
{'title': 'FriendTrend - {}: {}'.format(title, str(y)) }
]
))
buttons.append(dict(
label = 'Overview',
method = 'update',
args = [
{'visible': button_visibility_vectors[0]},
{'title': 'FriendTrend - {}'.format(title) }
]
))
updatemenus=list([
dict(
type = 'buttons',
buttons=buttons,
active=11,
direction = 'left',
showactive = True,
x = 0.5,
xanchor = 'auto',
y = -0.1,
yanchor = 'bottom'
),
dict(
type='dropdown',
buttons=list([
dict(label = 'Linear',
method = 'relayout',
args = [dict(yaxis=dict(type='linear', autorange=True, domain=[0.51, 1]), yaxis2=dict(domain=[0, 0.49]))]),
dict(label = 'Log',
method = 'relayout',
args = [dict(yaxis=dict(type='log', autorange=True, domain=[0.51, 1]), yaxis2=dict(domain=[0, 0.49]))])]),
direction = 'down',
showactive = True,
x = -0.1,
xanchor = 'left',
y = 1,
yanchor = 'top'
)])
fig = tools.make_subplots(rows=2, cols=1)
for t in data:
fig.append_trace(t, 1, 1)
for t in bdata:
fig.append_trace(t, 2, 1)
fig['layout'].update(
title='FriendTrend - ' + str(title),
autosize=True,
updatemenus=updatemenus,
hovermode='closest',
barmode='stack',
showlegend=True,
legend=dict(traceorder='normal'),
yaxis1=dict(
type='linear',
autorange=True,
domain=[0.51, 1]
),
yaxis2=dict(
type='linear',
autorange=True,
domain=[0, 0.49]
),
xaxis1=dict(
type='date',
tickformat='%b %Y',
showgrid=True,
autorange=True,
visible=True
),
xaxis2=dict(
type='date',
tickformat='%b %Y',
showgrid=True,
autorange=True,
visible=True
))
config = {
'modeBarButtonsToRemove' : ['toImage', 'select2d', 'lasso2d', 'toggleSpikelines']
}
#endregion
# PLOT!
plotly.offline.plot(fig, auto_open=True, config=config, filename=filename)
if __name__== "__main__":
try:
os.mkdir(VIZ_TARGET_DIRECTORY)
except FileExistsError:
pass
if IDE_DEBUGGING is False:
parser = argparse.ArgumentParser()
parser.add_argument('--datapath', default='data/messages/inbox/', help='path to the messages/inbox sub-directory in the Messenger data dump')
parser.add_argument('--hidenames', default=False, action='store_true', help='whether or not to use real names in the visualization')
parser.add_argument('--topk', default=10, type=int, help='number of top K friends to show')
parser.add_argument('--topkglobal', default=False, action='store_true', help='populate the top K friends globally and display <K for each monthly view')
parser.add_argument('name', help='your Facebook display name (as written)')
args = parser.parse_args()
datasets = prepdata.main(args.datapath, args.name, False)
generate_viz(datasets.messages_yearly, datasets.messages_monthly, VIZ_TARGET_DIRECTORY + '/' + args.name + '-messages.html', 'Total Messages', args.hidenames, args.topk, args.topkglobal)
generate_viz(datasets.days_interacted_yearly, datasets.days_interacted_monthly, VIZ_TARGET_DIRECTORY + '/' + args.name + '-daysinteracted.html', 'Days Interacted', args.hidenames, args.topk, args.topkglobal)
else:
datasets = prepdata.main('data/messages/inbox/', IDE_DEBUGGING_NAME, False, args.hidenames, args.topk, args.topkglobal)
generate_viz(datasets.messages_yearly, datasets.messages_monthly, VIZ_TARGET_DIRECTORY + '/' + IDE_DEBUGGING_NAME + '-messages.html', 'Total Messages', args.hidenames, args.topk, args.topkglobal)
generate_viz(datasets.days_interacted_yearly, datasets.days_interacted_monthly, VIZ_TARGET_DIRECTORY + '/' + IDE_DEBUGGING_NAME + '-daysinteracted.html', 'Days Interacted', args.hidenames, args.topk, args.topkglobal)
| 2.078125 | 2 |
SUSOD/config.py | whale-net/SUSOD | 1 | 12769091 | """
SUSOD dev config.
currently not very useful
"""
import os
APPLICATION_ROOT = '/'
SECRET_KEY = b'tobegenerated'
SESSION_COOKIE_NAME = 'login_name'
# Directory for file uploads
# currently not used
UPLOAD_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'var',
'uploads',
)
# Database configuration
DATABASE_HOSTNAME = 'localhost'
DATABASE_NAME = 'dbSUSOD'
DATABASE_USERNAME = 'susod'
DATABASE_PASSWORD = 'password'
| 1.601563 | 2 |
06/06_P16.py | monikuri/2110101_Com_Prog | 0 | 12769092 | <filename>06/06_P16.py
height_map = [int(e.strip()) for e in input().strip().split(',')]
count = 0
negative = False
for i in height_map:
if i < 0:
negative = True
else:
if negative:
count += 1
negative = False
print(count) | 3.234375 | 3 |
polls/tests.py | davidefabbrico/Progetto_School | 1 | 12769093 | import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
| 1.375 | 1 |
tests/main_test.py | NaveenKumarGorantla/cs5293sp21-project0 | 0 | 12769094 | import pytest
#from project0 import project0
from project0 import main
def test_fetchincidents():
url = "https://www.normanok.gov/sites/default/files/documents/2021-03/2021-03-03_daily_incident_summary.pdf"
data = main.fetchincidents(url)
assert type(data) == bytes
def test_extractincidents():
url = "https://www.normanok.gov/sites/default/files/documents/2021-03/2021-03-03_daily_incident_summary.pdf"
data = main.fetchincidents(url)
incidentdata = main.extractincidents(data)
assert type(incidentdata)== list
def test_createdb():
db = main.createdb()
assert db == 'normanpd.db'
def test_populatedb():
assert True
def test_status():
assert True
| 2.5 | 2 |
sensor-button/button.py | sahuyash/raspberrypi-examples | 41 | 12769095 | <filename>sensor-button/button.py
"""
read state from buttons using callback functions
"""
import RPi.GPIO as GPIO
btnPin1 = 27
btnPin2 = 22
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(btnPin1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(btnPin2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def eventB1(e):
print("Button 1 pressed")
print(e)
def eventB2(e):
print("Button 1 pressed")
print(e)
GPIO.add_event_detect(btnPin1, GPIO.RISING, bouncetime=200, callback=eventB1)
GPIO.add_event_detect(btnPin2, GPIO.RISING, bouncetime=200, callback=eventB2)
while(True):
time.sleep(0.1)
| 3.421875 | 3 |
options/model_options/__init__.py | chorseng/UMD | 48 | 12769096 | <gh_stars>10-100
from .context_encoder_options import ContextEncoderOption
from .image_encoder_options import ImageEncoderOption, ContextImageEncoderOption, ProductImageEncoderOption
from .mfb_fusion_options import MFBFusionOption, ContextMFBFusionOption, ContextAttentionMFBFusionOption, \
ProductMFBFusionOption, ProductAttentionMFBFusionOption
from .similarity_options import SimilarityOption
from .text_decoder_options import TextDecoderOption
from .text_encoder_options import TextEncoderOption, ContextTextEncoderOption, ProductTextEncoderOption | 1.03125 | 1 |
users/serializers.py | nikolamatijas/DjangoAndReact | 0 | 12769097 | from rest_framework import serializers
from users.models import User, Permission, Role
class RoleRelatedField(serializers.RelatedField):
def to_representation(self, instance):
return RoleSerializer(instance).data
def to_internal_value(self, data):
return self.queryset.get(pk=data)
class UserSerializer(serializers.ModelSerializer):
role = RoleRelatedField(many=False, queryset=Role.objects.all())
class Meta:
model = User
fields = ['id', 'first_name', 'last_name', 'email', 'password', 'role']
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
def update(self, instance, validated_data):
password = validated_data.pop('password', None)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class PermissionRelatedField(serializers.StringRelatedField):
def to_representation(self, value):
return PermissionSerializer(value).data
def to_internal_value(self, data):
return data
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = '__all__'
class RoleSerializer(serializers.ModelSerializer):
permissions = PermissionRelatedField(many=True)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
instance = self.Meta.model(**validated_data)
instance.save()
instance.permissions.add(*permissions)
instance.save()
return instance
| 2.234375 | 2 |
hphp/tools/benchy/benchy_config.py | ng0ctrinh/hhvm | 1 | 12769098 | #!/usr/bin/env python
"""Configuration loader for benchy benchmark harness.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
def _load():
"""Initializes and returns a singleton config dictionary.
"""
config = _load.config
if config is not None:
return _load.config
benchy_dir = os.path.dirname(os.path.realpath(__file__))
tools_dir = os.path.dirname(benchy_dir)
base_dir = os.path.dirname(tools_dir)
fbcode_dir = os.path.dirname(base_dir)
benchmark_dir = os.path.join(base_dir, 'benchmarks', 'php-octane')
_load.config = {
'ANYMEAN_PATH': os.path.join(benchy_dir, 'any_mean.py'),
'BENCHMARK_DIR': benchmark_dir,
'BENCH_ENTRY_PATH': os.path.join(benchmark_dir, 'harness-run.php'),
'BUILD_INTERNAL_PATH': os.path.join(fbcode_dir[1:], '_build',
'opt', 'hphp'),
'HARNESS_PATH': os.path.join(benchy_dir, 'benchy_harness.py'),
'INCLUDE_PATH': os.path.join(benchmark_dir, 'include.php'),
'SIGNIFICANCE_PATH': os.path.join(benchy_dir, 'significance.py'),
'SUITES_PATH': os.path.join(benchmark_dir, 'suites.json'),
'VERSION': 1,
'WRAPPER_PATH': os.path.join(tools_dir, 'hhvm_wrapper.php'),
}
home_dir = os.path.expanduser('~')
config_path = os.path.join(home_dir, '.benchy')
with open(config_path, 'r') as config_file:
tmp = json.load(config_file)
work_dir = _load.config['WORK_DIR'] = tmp['work_dir']
_load.config['BUILD_ROOT'] = tmp['build_dir']
_load.config['RUNSCRIPT_PATH'] = os.path.join(work_dir, 'runscript')
_load.config['RUNLOG_PATH'] = os.path.join(work_dir, 'runlog')
_load.config['PERF_PATH'] = os.path.join(work_dir, 'perf')
_load.config['TMP_PATH'] = os.path.join(work_dir, 'tmp')
_load.config['PLATFORM'] = "%s_platform" % tmp['platform']
return _load.config
_load.config = None
def _get(key):
"""Looks up the given key in the config singleton.
"""
config = _load()
if key in config:
return config[key]
return None
ANYMEAN_PATH = _get('ANYMEAN_PATH')
BENCHMARK_DIR = _get('BENCHMARK_DIR')
BENCH_ENTRY_PATH = _get('BENCH_ENTRY_PATH')
BUILD_ROOT = _get('BUILD_ROOT')
BUILD_INTERNAL_PATH = _get('BUILD_INTERNAL_PATH')
HARNESS_PATH = _get('HARNESS_PATH')
INCLUDE_PATH = _get('INCLUDE_PATH')
PERF_PATH = _get('PERF_PATH')
PLATFORM = _get('PLATFORM')
RUNLOG_PATH = _get('RUNLOG_PATH')
RUNSCRIPT_PATH = _get('RUNSCRIPT_PATH')
SIGNIFICANCE_PATH = _get('SIGNIFICANCE_PATH')
SUITES_PATH = _get('SUITES_PATH')
TMP_PATH = _get('TMP_PATH')
VERSION = _get('VERSION')
WORK_DIR = _get('WORK_DIR')
WRAPPER_PATH = _get('WRAPPER_PATH')
| 2.046875 | 2 |
utils.py | existme/har-to-seq | 0 | 12769099 | import json
def read_json_file(filename):
f = open(filename, 'r')
data = json.load(f)
f.close()
return data
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start + len(needle))
n -= 1
return start
def find_base_url(url):
end_pos = find_nth(url, '/', 4)
return url[0:end_pos]
| 3.203125 | 3 |
check_diff.py | JoDaTy/pyats-check-os | 6 | 12769100 | <reponame>JoDaTy/pyats-check-os<gh_stars>1-10
import toolbox.pyats_diff as diff
import toolbox.database as db
import argparse
import sys
import json
import pprint
# Parsing the args
description = "Print the differences for a specific test and hostname."
test_help = '''Possible completions:
- route_summary > show ip route summary
- routes > show ip route
- isis > show ip isis neighbors
- xconnect > show xconnect all
- cpu > `show cpu processes`
'''
test_help_list = ["route_summary", "routes", "isis", "xconnect", "cpu"]
when_help = '''Possible completions:
- both > for a diff before/after
- after > for a specific output `after`
- before > for a specific output `before`'''
when_help_list = ["both", "after", "before"]
parser = argparse.ArgumentParser(description=description, usage='use "%(prog)s --help" for more information', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--hostname', dest='hostname', help = 'Hostname of the device', required = True)
parser.add_argument('--testname', dest='test_name', help = test_help, required = True)
parser.add_argument('--when', dest='when', default="both", help = when_help)
args, sys.argv[1:] = parser.parse_known_args(sys.argv[1:])
hostname = args.hostname
test_name = args.test_name
when = args.when
# Asserts
## String is in the list
# for device in (device for device in testbed if device.is_connected() == True)
assert([item for item in test_help_list if test_name == item]), "Provided `test` is not in the list. Use --help for possible completions."
assert([item for item in when_help_list if when == item]), "Provided `when` is not in the list. Use --help for possible completions."
# Sends a diff
if when == "both":
diff.compare_output_before_after(hostname, test_name)
else:
pp = pprint.PrettyPrinter(indent=1)
output = db.get_output_test(hostname, test_name, when)
pp.pprint(json.loads(output)) | 2.6875 | 3 |
spotlob/tests/test_feature_filter.py | fa-me/spotlob | 1 | 12769101 | import unittest
import numpy as np
from numpy.testing import assert_array_equal,\
assert_array_almost_equal, assert_almost_equal
from .image_generation import binary_circle_border
from ..spim import Spim, SpimStage
from ..process_opencv import ContourFinderSimple, FeatureFormFilter
class FeatureFilterTestCase(unittest.TestCase):
seed = 0
repetitions = 20
def test_binary_circle_left_border_filter(self):
h, w = [1000, 2000]
contour_finder = ContourFinderSimple()
feature_filter = FeatureFormFilter(size=0,
solidity=0.9,
remove_on_edge=True)
for i in range(self.repetitions):
# randomly select a border
j = np.random.randint(low=0, high=3)
border = ["left", "right", "top", "bottom"][j]
circ_im, exp_pos, exp_radius = binary_circle_border(
border,
shape=(h, w),
val_type=np.uint8,
seed=self.seed)
assert_array_equal(np.sort(np.unique(circ_im)), np.array([0, 255]))
# make spim, assuming image is already binary
bin_spim = Spim(image=circ_im,
metadata={},
stage=SpimStage.binarized,
cached=False,
predecessors=[])
cont_spim = bin_spim\
.extract_features(contour_finder)\
.filter_features(feature_filter)
blobs = cont_spim.metadata["contours"]
self.assertEqual(len(blobs), 0)
| 2.453125 | 2 |
cal_different.py | Feobi1999/unbiased-teacher | 0 | 12769102 | <reponame>Feobi1999/unbiased-teacher<filename>cal_different.py
import json
import torch
farea = lambda x: (x[:, 2] - x[:, 0]) * (x[:, 3] - x[:, 1])
def bbox_iou(box1, box2):
area1 = farea(box1)
area2 = farea(box2)
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
# load gt
gt_anno = json.load(open('/media/sda2/mzhe/datasets/Cityscapes/cocoAnnotations/cityscapes_val_caronly_cocostyle.json'))
gt = {img['id']:{'bbox':[], 'label': []} for img in gt_anno['images']}
for anno in gt_anno['annotations']:
if anno['iscrowd'] == 1:
continue
gt[anno['image_id']]['bbox'].append([anno['bbox'][0], anno['bbox'][1], anno['bbox'][0] + anno['bbox'][2], anno['bbox'][1] + anno['bbox'][3]])
gt[anno['image_id']]['label'].append(anno['category_id'])
for img in gt:
gt[img]['bbox'] = torch.Tensor(gt[img]['bbox']).view(-1, 4)
gt[img]['label'] = torch.Tensor(gt[img]['label']).view(-1)
#load pred
def load_pred(fname):
pred = {img['id']:{'bbox':[], 'label': [], 'score': []} for img in gt_anno['images']}
pred_anno = json.load(open(fname))
for anno in pred_anno:
pred[anno['image_id']]['bbox'].append([anno['bbox'][0], anno['bbox'][1], anno['bbox'][0] + anno['bbox'][2], anno['bbox'][1] + anno['bbox'][3]])
pred[anno['image_id']]['label'].append(anno['category_id'])
pred[anno['image_id']]['score'].append(anno['score'])
for img in pred:
pred[img]['bbox'] = torch.Tensor(pred[img]['bbox']).view(-1, 4)
pred[img]['label'] = torch.Tensor(pred[img]['label']).view(-1)
pred[img]['score'] = torch.Tensor(pred[img]['score']).view(-1)
return pred
#cs_pred = load_pred('cs_coco_mh_cshead.json')
#coco_pred = load_pred('cs_coco_mh_cocohead.json')
cs_pred = load_pred('/media/sda2/mzhe/unbiased-teacher/fft_sim10k/inference/coco_instances_results.json')
coco_pred = load_pred('/media/sda2/mzhe/unbiased-teacher/ubteacher_sim10k/inference/coco_instances_results.json')
sum_pred = {img:{'bbox': torch.cat((cs_pred[img]['bbox'], coco_pred[img]['bbox'])), 'label': torch.cat((cs_pred[img]['label'], coco_pred[img]['label'])), 'score': torch.cat((cs_pred[img]['score'], coco_pred[img]['score']))} for img in cs_pred}
def merge_pred(preds):
res = {}
for img in preds:
if int(img) % 100 == 0:
print('merging:%d'%int(img))
res[img] = {}
res[img]['bbox'] = []
res[img]['score'] = []
res[img]['label'] = []
for cls in range(1, 9):
cls_ind = (preds[img]['label'] == cls)
if cls_ind.sum() == 0:
continue
boxes = preds[img]['bbox'][cls_ind]
scores = preds[img]['score'][cls_ind]
scores, sort_ind = scores.sort(descending=True)
boxes = boxes[sort_ind]
ious = bbox_iou(boxes, boxes)
picked = [False] * len(ious)
for i in range(ious.size(0)):
if not picked[i]:
picked[i] = True
else:
continue
for j in range(i + 1, ious.size(0)):
if ious[i, j] > 0.6:
res[img]['bbox'].append((boxes[i] + boxes[j]) / 2)
res[img]['score'].append((scores[i] + scores[j]) / 2)
res[img]['label'].append(cls)
picked[j] = True
continue
res[img]['bbox'].append(boxes[i])
res[img]['score'].append(scores[i] / 2)
res[img]['label'].append(cls)
res[img]['bbox'] = torch.stack(res[img]['bbox']).view(-1, 4)
res[img]['label'] = torch.Tensor(res[img]['label']).view(-1)
res[img]['score'] = torch.Tensor(res[img]['score']).view(-1)
return res
merged_pred = merge_pred(sum_pred)
#test under different confident scores
def match(pred, gt, c_th=0.05, area=[0, 9999999]):
thresh = 0.5
tp = []
fn = []
fp = []
match_res = []
match_score = []
for img in gt:
#tp.append(0)
#fn.append(0)
#fp.append(0)
##dealing
keep_ind = pred[img]['score'] >= c_th
pred[img]['bbox'] = pred[img]['bbox'][keep_ind]
pred[img]['label'] = pred[img]['label'][keep_ind]
pred[img]['score'] = pred[img]['score'][keep_ind]
##
if len(gt[img]['bbox']) == 0:
tp.append(0)
fn.append(0)
fp.append(len(pred[img]['bbox']))
continue
if len(pred[img]['bbox']) == 0:
tp.append(0)
fn.append(len(gt[img]['bbox']))
fp.append(0)
match_res.append(torch.zeros(len(gt[img]['bbox'])))
match_score.append(torch.zeros(len(gt[img]['bbox'])))
continue
ious = bbox_iou(gt[img]['bbox'], pred[img]['bbox'])
scores = torch.zeros(len(gt[img]['bbox']))
for i in range(ious.size(0)):
for j in range(ious.size(1)):
if ious[i, j] >= thresh and gt[img]['label'][i] == pred[img]['label'][j]:
ious[i, j] = 1
if pred[img]['score'][j] > scores[i]:
scores[i] = pred[img]['score'][j]
else:
ious[i, j] = 0
match_res.append((ious.sum(dim=1) > 0).float())
match_score.append(scores)
tp.append((ious.sum(dim=1) > 0).sum().item())
fn.append((ious.sum(dim=1) == 0).sum().item())
fp.append((ious.sum(dim=0) == 0).sum().item())
print('total ratio: %.3f'%(sum(tp) / (sum(tp) + sum(fn))))
avg_f = lambda x: sum(x) / len(x)
print('avg ratio: %.3f'%(avg_f([tp[i] / (tp[i] + fn[i]) for i in range(len(tp)) if tp[i] + fn[i] > 0])))
print('fp / img: %.1f'%(sum(fp) / len(fp)))
return torch.cat(match_res).float(), torch.cat(match_score)
'''
res_sum, score_sum = match(sum_pred, gt)
res_sum, score_sum = match(sum_pred, gt, 0.2)
res_sum, score_sum = match(sum_pred, gt, 0.3)
res_sum, score_sum = match(sum_pred, gt, 0.35)
res_sum, score_sum = match(sum_pred, gt, 0.4)
res, score = match(merged_pred, gt)
res, score = match(merged_pred, gt, 0.2)
res, score = match(merged_pred, gt, 0.3)
'''
'''
res_sum, score_sum = match(sum_pred, gt, 0.1)
res, score = match(merged_pred, gt, 0.1)
res_sum, score_sum = match(sum_pred, gt, 0.15)
res, score = match(merged_pred, gt, 0.15)
'''
'''
res_sum, score_sum = match(sum_pred, gt, 0.5)
res, score = match(merged_pred, gt, 0.4)
res, score = match(merged_pred, gt, 0.5)
'''
res1, score1 = match(cs_pred, gt)
res2, score2 = match(coco_pred, gt)
print((res1 == res2).float().mean())
print((res1 * res2).float().mean())
print((score1 - score2).abs().mean())
print((score1 - score2)[res1 * res2 > 0].abs().mean())
print(score1.mean())
print(score2.mean())
#match(cs_pred, gt, 0.1)
#match(coco_pred, gt, 0.1)
#match(cs_pred, gt, 0.2)
#match(coco_pred, gt, 0.2)
#match(cs_pred, gt, 0.3)
#match(coco_pred, gt, 0.3)
#match(cs_pred, gt, 0.5)
#match(coco_pred, gt, 0.5)
| 1.882813 | 2 |
billboard.py | the-pudding/falsetto-site | 1 | 12769103 | string = "hi there my name is matt"
print(string.count("e"))
| 3.359375 | 3 |
src/setup_custom_envs.py | jqueguiner/ai-api-marketplace- | 0 | 12769104 | #!/usr/bin/env python3
import os
import yaml
rootDir = 'apis'
for dirName, subdirList, fileList in os.walk(rootDir):
if 'env.yaml' in fileList:
print(f"Found env.yaml in {dirName}")
with open(os.path.join(dirName, 'env.yaml'), 'r') as stream:
try:
env_yaml = yaml.safe_load(stream)
print(f"Loaded env.yaml in {dirName}")
except yaml.YAMLError as exc:
print(exc)
print(env_yaml)
try:
os.system(f"cd {dirName} && rm -rf .env Pipfile")
except:
print("Could not remove .env and Pipfile")
packages_to_install = ' '.join(env_yaml['packages']) + ' git+https://github.com/theunifai/unifai-api-utils.git'
os.system(f"cd {dirName} && echo Y | pipenv --python {env_yaml['python']['version']}")
os.system(f"cd {dirName} && pipenv run pip install {packages_to_install}")
| 2.375 | 2 |
ros/src/waypoint_updater/waypoint_updater.py | dayuwater/CarND-Capstone | 0 | 12769105 | <gh_stars>0
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Header
import math
# import a helper module for waypoint updater
import helper
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 20 # Number of waypoints we will publish. You can change this number
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater', log_level=rospy.DEBUG)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
# TODO: Determine the message type for the two waypoints
#rospy.Subscriber('/traffic_waypoint', None, self.traffic_cb)
#rospy.Subscriber('/obstacle_waypoint', None, self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=10)
# TODO: Add other member variables you need below
# The position of the car
self.pos_x = 0
self.pos_y = 0
self.orientation = 0 # facing angle
# All Waypoints
self.waypoint_msg = "" # The waypoint message from ROS
self.waypoint_header = "" # Last header detected for waypoints
self.waypoints = [] # Actual waypoint data
# Waypoints to be published
self.waypoints_to_publish = []
# Sequence
self.seq = 0
self.start_time = rospy.Time.now()
# TODO: Publish waypoints here
rospy.spin()
def pose_cb(self, msg):
'''
This is a callback function when it receives a pose message
It is called about 20Hz ROS time (Actually 21-22Hz)
Do we need to publish this at 50Hz? (This is for waypoints, not actuation commands)
'''
# TODO: Implement
# Decompose message
header = msg.header
pose = msg.pose
pos_x = pose.position.x
pos_y = pose.position.y
pos_z = pose.position.z
ori_z = pose.orientation.z
ori_w = pose.orientation.w
logmsg = "The car is at ({}, {}), facing({})".format(pos_x, pos_y, ori_z)
#rospy.logwarn(logmsg)
rate = rospy.Rate(50)
self.pos_x = pos_x
self.pos_y = pos_y
self.orientation = ori_z
# Determine the waypoints to be published
self.waypoints_to_publish = helper.filter_waypoints(self.pos_x, self.pos_y, self.orientation, self.waypoints, LOOKAHEAD_WPS)
# rospy.logwarn(self.waypoints_to_publish)
msg = Lane()
# Compose the header
header = Header()
header.seq = self.seq
header.stamp = rospy.Time.now()
header.frame_id = "/world"
msg.header = header
# msg.header = self.waypoint_header
msg.waypoints = self.waypoints_to_publish
# rospy.logwarn(msg)
self.final_waypoints_pub.publish(msg)
self.seq += 1
# rospy.logwarn(self.seq)
rate.sleep()
#pass
def waypoints_cb(self, waypoints):
'''
Callback function for waypoint loader
It seems that this function is only called when the program starts
'''
# TODO: Implement
self.waypoint_msg = waypoints
# Parse the waypoints
self.waypoint_header = waypoints.header
self.waypoints = waypoints.waypoints
rospy.logwarn("111")
#pass
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 2.921875 | 3 |
script/test_regex_match_dialog.py | cpcgskill/dg_editor | 2 | 12769106 | # -*-coding:utf-8 -*-
u"""
:创建时间: 2021/12/5 1:40
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
from __future__ import unicode_literals, print_function
import imp
import init
imp.reload(init)
import regex_match_dialog
print(regex_match_dialog.exec_())
| 1.945313 | 2 |
problems/problem-2/FibonacciSeries.py | Nimelo/project-euler | 0 | 12769107 | from itertools import takewhile
def fibonacci_series():
last, current = 0, 1
while True:
current, last = current + last, current
yield current
def even_fibonacci_series():
last, current = 0, 1
while True:
current, last = current + last, current
current, last = current + last, current
current, last = current + last, current
yield last
if __name__ == "__main__":
n = 4e6
smaller_than_4_millions = takewhile(lambda x: x < n + 1, fibonacci_series())
even_numbers = filter(lambda x: x % 2 == 0, smaller_than_4_millions)
result = sum(list(even_numbers))
print(f"The result is {result}")
smaller_than_4_millions = takewhile(lambda x: x < n + 1, even_fibonacci_series())
result = sum(list(smaller_than_4_millions))
print(f"The result is {result}")
| 3.921875 | 4 |
UnpairedNumber.py | Cynthyah/Exercises | 0 | 12769108 | <reponame>Cynthyah/Exercises<filename>UnpairedNumber.py
# Find value that occurs in odd number of elements.
# Find the number in an non-empty array that does not have pair
# Array [9,3,9,3,9,7,9] pairs
# A[0] = 9 and A[2] = 9
# A[1] = 3 and A[3] = 3
# A[4] = 9 and A[6] = 9
# A[5] = 7 no pair
# the function should return 7, as explained in the example above.
# Important
# N is an odd integer within the range [1..1,000,000];
# each element of array A is an integer within the range [1..1,000,000,000];
# all but one of the values in A occur an even number of times.
def solution(A):
A = sorted(A)
current = A[0]
count = 0
for i in A:
if current == i:
count += 1
else:
if count % 2 != 0:
return current
count = 1
current = i
return current
# Testing
print(solution([9,3,9,3,9,7,9]))
# Result
# Detected time complexity: O(N) or O(N*log(N)) | 4.09375 | 4 |
google/ads/google_ads/v2/proto/services/landing_page_view_service_pb2.py | jiulongw/google-ads-python | 1 | 12769109 | <reponame>jiulongw/google-ads-python
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/services/landing_page_view_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v2.proto.resources import landing_page_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_landing__page__view__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/services/landing_page_view_service.proto',
package='google.ads.googleads.v2.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v2.servicesB\033LandingPageViewServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V2.Services\312\002 Google\\Ads\\GoogleAds\\V2\\Services\352\002$Google::Ads::GoogleAds::V2::Services'),
serialized_pb=_b('\nFgoogle/ads/googleads_v2/proto/services/landing_page_view_service.proto\x12 google.ads.googleads.v2.services\x1a?google/ads/googleads_v2/proto/resources/landing_page_view.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\"2\n\x19GetLandingPageViewRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xf9\x01\n\x16LandingPageViewService\x12\xc1\x01\n\x12GetLandingPageView\x12;.google.ads.googleads.v2.services.GetLandingPageViewRequest\x1a\x32.google.ads.googleads.v2.resources.LandingPageView\":\x82\xd3\xe4\x93\x02\x34\x12\x32/v2/{resource_name=customers/*/landingPageViews/*}\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x82\x02\n$com.google.ads.googleads.v2.servicesB\x1bLandingPageViewServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V2.Services\xca\x02 Google\\Ads\\GoogleAds\\V2\\Services\xea\x02$Google::Ads::GoogleAds::V2::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_landing__page__view__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,])
_GETLANDINGPAGEVIEWREQUEST = _descriptor.Descriptor(
name='GetLandingPageViewRequest',
full_name='google.ads.googleads.v2.services.GetLandingPageViewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.GetLandingPageViewRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=228,
serialized_end=278,
)
DESCRIPTOR.message_types_by_name['GetLandingPageViewRequest'] = _GETLANDINGPAGEVIEWREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetLandingPageViewRequest = _reflection.GeneratedProtocolMessageType('GetLandingPageViewRequest', (_message.Message,), dict(
DESCRIPTOR = _GETLANDINGPAGEVIEWREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.landing_page_view_service_pb2'
,
__doc__ = """Request message for
[LandingPageViewService.GetLandingPageView][google.ads.googleads.v2.services.LandingPageViewService.GetLandingPageView].
Attributes:
resource_name:
The resource name of the landing page view to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.GetLandingPageViewRequest)
))
_sym_db.RegisterMessage(GetLandingPageViewRequest)
DESCRIPTOR._options = None
_LANDINGPAGEVIEWSERVICE = _descriptor.ServiceDescriptor(
name='LandingPageViewService',
full_name='google.ads.googleads.v2.services.LandingPageViewService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=281,
serialized_end=530,
methods=[
_descriptor.MethodDescriptor(
name='GetLandingPageView',
full_name='google.ads.googleads.v2.services.LandingPageViewService.GetLandingPageView',
index=0,
containing_service=None,
input_type=_GETLANDINGPAGEVIEWREQUEST,
output_type=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_landing__page__view__pb2._LANDINGPAGEVIEW,
serialized_options=_b('\202\323\344\223\0024\0222/v2/{resource_name=customers/*/landingPageViews/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_LANDINGPAGEVIEWSERVICE)
DESCRIPTOR.services_by_name['LandingPageViewService'] = _LANDINGPAGEVIEWSERVICE
# @@protoc_insertion_point(module_scope)
| 1.5625 | 2 |
prodavnica/migrations/0007_auto_20170811_0044.py | astanic1/novi | 0 | 12769110 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-10 22:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prodavnica', '0006_slika'),
]
operations = [
migrations.RemoveField(
model_name='slika',
name='lokacija',
),
migrations.AddField(
model_name='slika',
name='slika',
field=models.FileField(blank=True, upload_to='C:\\Python34\\Scripts\\env_site1\\slike'),
),
]
| 1.328125 | 1 |
__manifest__.py | rocketgithub/hr_expense_invoice | 0 | 12769111 | # -*- coding: utf-8 -*-
{
'name': 'Gastos unidos a facturas',
'version': '1.0',
'category': 'Accounting/Expenses',
'sequence': 6,
'summary': 'Gastos unidos a facturas',
'description': """ Cambios a gastos para unirlas a facturas """,
'website': 'http://aquih.com',
'author': '<NAME>',
'depends': ['hr_expense'],
'data': [
'views/hr_expense_views.xml',
'views/account_move_views.xml',
'security/ir.model.access.csv',
],
'qweb': [
],
'installable': True,
'auto_install': False,
} | 0.953125 | 1 |
dbt_sugar/core/clients/dbt.py | badge/dbt-sugar | 0 | 12769112 | <reponame>badge/dbt-sugar
"""Holds methods to interact with dbt API (we mostly don't for now because not stable) and objects."""
import os
from pathlib import Path
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel, Field, root_validator
from dbt_sugar.core.clients.yaml_helpers import open_yaml
from dbt_sugar.core.exceptions import (
DbtProfileFileMissing,
ProfileParsingError,
TargetNameNotProvided,
)
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.logger import GLOBAL_LOGGER as logger
DEFAULT_DBT_PROFILE_PATH = Path(os.getenv("DBT_PROFILES_DIR", default=Path.home().joinpath(".dbt")))
class PostgresDbtProfilesModel(BaseModel):
"""Postgres Dbt credentials validation model."""
type: str
user: str
password: str = Field(..., alias="pass")
database: str = Field(..., alias="dbname")
target_schema: str = Field(..., alias="schema")
host: str
port: int
class SnowflakeDbtProfilesModel(BaseModel):
"""Snowflake Dbt credentials validation model."""
type: str
account: str
user: str
password: Optional[str]
private_key: Optional[str]
database: str
target_schema: str = Field(..., alias="schema")
role: str
warehouse: str
@root_validator
def check_password_or_pk(cls, values: Dict[Any, Any]) -> Dict[Any, Any]:
"""Validates the Snowflake connection args
Checks that the values dictionary has either a ``password`` or a ``private_key``
key.
Args:
values (Dict[Any, Any]): Dictionary of key-value pairs
"""
if "password" not in values and "private_key" not in values:
raise ValueError("Must pass either password or private key!")
return values
class DbtProjectModel(BaseModel):
"""Defines pydandic validation schema for a dbt_project.yml file."""
profile: str
class BaseYamlConfig:
"""Base class object which gets extended by objects which will generally read from yaml configs."""
def _assert_file_exists(self, dir: Path, filename: str = "profiles.yml") -> bool:
logger.debug(dir.resolve())
full_path_to_file = dir / filename
if full_path_to_file.is_file():
return True
else:
raise DbtProfileFileMissing(f"Could not locate `{filename}` in {dir.resolve()}")
class DbtProject(BaseYamlConfig):
"""Holds parsed dbt project information needed for dbt-sugar such as which db profile to target."""
DBT_PROJECT_FILENAME: str = "dbt_project.yml"
def __init__(self, project_name: str, project_dir: Path) -> None:
"""Constructor for DbtProject.
Given a project name and a project dir it will parse the relevant dbt_project.yml and
parse information such as `profile` so dbt-sugar knows which database profile entry from
/.dbt/profiles.yml to use.
Args:
project_name (str): Name of the dbt project to read profile from.
project_dir (Path): Path object the dbt_project.yml to read from.
"""
self._project_name = project_name
self._project_dir = project_dir
# class "outputs"
self.project: DbtProjectModel
self.profile_name: str
@property
def _dbt_project_filename(self) -> Path:
logger.debug(f"project_dir: {self._project_dir}")
return Path(self._project_dir).joinpath(type(self).DBT_PROJECT_FILENAME)
def read_project(self) -> None:
_ = self._assert_file_exists(Path(self._project_dir), filename=self.DBT_PROJECT_FILENAME)
_project_dict = open_yaml(self._dbt_project_filename)
# pass the dict through pydantic for validation and only getting what we need
# if the profile is invalid app will crash so no further tests required below.
logger.debug(f"the project {_project_dict}")
_project = DbtProjectModel(**_project_dict)
logger.debug(_project)
self.project = _project
self.profile_name = self.project.dict().get("profile", str())
if not self.profile_name:
logger.warning(
f"[yellow]There was no `profile:` entry in {self._dbt_project_filename}. "
"dbt-sugar will try to find a 'default' profile. This might lead to unexpected"
"behaviour or an error when no defaulf profile can be found in your dbt profiles.yml"
)
class DbtProfile(BaseYamlConfig):
"""Holds parsed profile dict from dbt profiles."""
CLI_OVERRIDE_FLAGS = [{"cli_arg_name": "schema", "maps_to": "target_schema"}]
def __init__(
self,
flags: FlagParser,
profile_name: str,
target_name: str,
profiles_dir: Optional[Path] = None,
) -> None:
"""Reads, validates and holds dbt profile info required by dbt-sugar (mainly db creds).
Args:
project_name (str): name of the dbt project to read credentials from.
target_name (str): name of the target entry. This corresponds to what resides below
"outputs" in the dbt's profile.yml (https://docs.getdbt.com/dbt-cli/configure-your-profile/)
"""
# attrs parsed from constructor
self._flags = flags
self._profile_name = profile_name
self._target_name = target_name
self._profiles_dir = profiles_dir
# attrs populated by class methods
self.profile: Dict[str, str]
@property
def profiles_dir(self):
if self._profiles_dir:
return self._profiles_dir
return DEFAULT_DBT_PROFILE_PATH
def _get_target_profile(self, profile_dict: Dict[str, Any]) -> Dict[str, Union[str, int]]:
if self._target_name:
return profile_dict["outputs"].get(self._target_name)
self._target_name = profile_dict.get("target", str())
if self._target_name:
return profile_dict["outputs"].get(self._target_name)
else:
raise TargetNameNotProvided(
f"No target name provied in {self._profiles_dir} and none provided via "
"--target in CLI. Cannot figure out appropriate profile information to load."
)
def read_profile(self):
_ = self._assert_file_exists(
self.profiles_dir
) # this will raise so no need to check exists further
_profile_dict = open_yaml(self.profiles_dir / "profiles.yml")
_profile_dict = _profile_dict.get(self._profile_name, _profile_dict.get(self._profile_name))
if _profile_dict:
# read target name from args or try to get it from the dbt_profile `target:` field.
_target_profile = self._get_target_profile(profile_dict=_profile_dict)
if _target_profile:
_profile_type = _target_profile.get("type")
# call the right pydantic validator depending on the db type as dbt is not
# consistent with it's profiles and it's hell to have all the validation in one
# pydantic model.
if _profile_type == "snowflake":
# uses pydantic to validate profile. It will raise and break app if invalid.
_target_profile = SnowflakeDbtProfilesModel(**_target_profile)
elif _profile_type == "postgres" or "redshift":
_target_profile = PostgresDbtProfilesModel(**_target_profile)
# if we don't manage to read the db type for some reason.
elif _profile_type is None:
raise ProfileParsingError(
f"Could not read or find a database type for {self._profile_name} in your dbt "
"profiles.yml. Check that this field is not missing."
)
else:
raise NotImplementedError(f"{_profile_type} is not implemented yet.")
logger.debug(_target_profile)
self.profile = _target_profile.dict(exclude_unset=True)
# override profile info with potential CLI args
self._integrate_cli_flags()
else:
raise ProfileParsingError(
f"Could not find an entry for target: '{self._target_name}', "
f"for the '{self._profile_name}' profile in your dbt profiles.yml."
)
else:
raise ProfileParsingError(
f"Could not find an entry for '{self._profile_name}' in your profiles.yml"
)
def _integrate_cli_flags(self) -> None:
for flag_override_dict in self.CLI_OVERRIDE_FLAGS:
cli_arg_value = getattr(self._flags, flag_override_dict["cli_arg_name"])
if cli_arg_value and isinstance(self.profile, dict):
self.profile[flag_override_dict["maps_to"]] = cli_arg_value
else:
logger.debug("No schema passed to CLI will try to read from profile.yml")
| 2.203125 | 2 |
modules/sbin/get_dataframe.py | BiRG/Omics-Dashboard | 1 | 12769113 | <reponame>BiRG/Omics-Dashboard
#!/usr/bin/env python3
import sys
from os.path import splitext, basename
from omics_dashboard_client.hdf_tools.collection_tools import get_dataframe
filename = sys.argv[1]
numeric_columns = sys.argv[2].lower() == 'true' if len(sys.argv) > 2 else True
include_labels = sys.argv[3].lower() == 'true' if len(sys.argv) > 3 else True
include_only_labels = sys.argv[4].lower() == 'true' if len(sys.argv) > 4 else False
df = get_dataframe(filename,
include_labels=include_labels,
numeric_columns=numeric_columns,
include_only_labels=include_only_labels)
df.to_csv(f'{splitext(basename(filename))[0]}.csv')
| 2.515625 | 3 |
assessment/views/custom404.py | kenware/Assessment | 0 | 12769114 | <reponame>kenware/Assessment
from rest_framework import viewsets
from rest_framework.response import Response
class custom404(viewsets.ModelViewSet):
def error_404(self, request):
method = request.method
return Response({
'statusCode': 404,
'error': f'The {method} request resource was not found'
}, 404)
http_mapper = {
'get': 'error_404',
'post': 'error_404',
'patch': 'error_404',
'put': 'error_404',
'delete': 'error_404',
}
| 2.34375 | 2 |
datapackage_pipelines/specs/hashers/hash_calculator.py | gperonato/datapackage-pipelines | 109 | 12769115 | <filename>datapackage_pipelines/specs/hashers/hash_calculator.py
import hashlib
from ...utilities.extended_json import json
from ..parsers.base_parser import PipelineSpec
from ..errors import SpecError
from .dependency_resolver import resolve_dependencies
class HashCalculator(object):
def __init__(self):
self.all_pipeline_ids = {}
def calculate_hash(self, spec: PipelineSpec, status_mgr, ignore_missing_deps=False):
cache_hash = None
if spec.pipeline_id in self.all_pipeline_ids:
message = 'Duplicate key {0} in {1}' \
.format(spec.pipeline_id, spec.path)
spec.validation_errors.append(SpecError('Duplicate Pipeline Id', message))
else:
if ignore_missing_deps:
cache_hash = ''
else:
cache_hash = resolve_dependencies(spec, self.all_pipeline_ids, status_mgr)
self.all_pipeline_ids[spec.pipeline_id] = spec
if len(spec.validation_errors) > 0:
return cache_hash
for step in spec.pipeline_details['pipeline']:
m = hashlib.md5()
m.update(cache_hash.encode('ascii'))
with open(step['executor'], 'rb') as f:
m.update(f.read())
m.update(json.dumps(step, ensure_ascii=True, sort_keys=True)
.encode('ascii'))
cache_hash = m.hexdigest()
step['_cache_hash'] = cache_hash
spec.cache_hash = cache_hash
| 2.328125 | 2 |
notebooks/universals.py | MikeOMa/MV_Prediction | 1 | 12769116 | METHOD_ORDER = ["NGB", "Indep NGB", "skGB", "GB", "NN"]
METRIC_ORDER = ["NLL", "RMSE", "coverage", "area"] | 1.242188 | 1 |
normbench/testing/test_pyScTransform.py | normjam/benchmark | 18 | 12769117 | <gh_stars>10-100
import numpy as np
from normbench.methods import ad2seurat as a2s
from normbench.methods.data import pbmc3k
def test_pyScTransform():
adata = pbmc3k()
a2s.pyScTransform(adata)
# Test that it runs
assert 'normalized' in adata.layers
# Test functionality
assert np.isclose(adata.layers['normalized'][0,0], -0.03377807)
| 2.265625 | 2 |
main.py | justinhchae/app_courts | 4 | 12769118 | <filename>main.py
from application.application import Application
if __name__ == '__main__':
app = Application()
def run_app():
app.run_app()
run_app()
| 1.851563 | 2 |
src/gitlab_connection/utils.py | davidkuda/gitlab-api-client | 0 | 12769119 | from typing import List
def transform_vars_data_structure(vars: List[dict]):
"""Transforms the data structure of the dict.
Transforms this:
[{
'environment_scope': '*',
'key': 'DATA',
'masked': False,
'protected': True,
'value': 'Dave',
'variable_type': 'env_var'},
{'environment_scope': '*',
'key': 'DATER',
'masked': False,
'protected': True,
'value': 'Daver',
'variable_type': 'env_var'},
{'environment_scope': '*',
'key': 'DATERR',
'masked': False,
'protected': True,
'value': 'Daverr',
'variable_type': 'env_var'
}]
To this data structure:
{
'DATA': 'Dave',
'DATER': 'Daver',
'DATERR': 'Daverr'
}
"""
transformed_vars = {}
for var in vars:
key = var['key']
value = var['value']
transformed_vars.update({key: value})
return transformed_vars
| 3.5 | 4 |
Week 3/source.py | SoloSynth1/data-visualization | 0 | 12769120 | <gh_stars>0
# Use the following data for this assignment:
%matplotlib notebook
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as stats
def get_df(seed_no, index):
np.random.seed(seed_no)
df = pd.DataFrame([np.random.normal(32000,200000,3650),
np.random.normal(43000,100000,3650),
np.random.normal(43500,140000,3650),
np.random.normal(48000,70000,3650)],
index=index)
return df
def get_em95(df):
df_de = df.describe()
leng = len(df)
# SEM = std. err. of the mean
sem = stats.sem(df)
# confidence interval = 95% = 0.95
# calculate the t value
t = -(stats.t.ppf((1-0.95)/2, leng-1))
# error of the mean at 95% CI
return (t * sem)
def plot():
df = get_df(seed, index).T
em95 = get_em95(df)
fig, ax = plt.subplots()
df_de = df.describe()
err_range = pd.DataFrame([df_de.loc['mean']+em95, df_de.loc['mean']-em95])
return fig, ax, df, em95, err_range
def update(color = ['lightgray' for _ in range(4)]):
ax.bar(range(1,5,1),
[df[i].mean() for i in index],
width=0.95,
yerr=em95,
error_kw=dict(ecolor='black', lw=1, capsize=10, capthick=1),
color=color)
plt.xticks(range(1,5,1), index)
seed = 12345
index = [1992,1993,1994,1995]
click_count = 0
fig, ax, df, em95, err_range = plot()
update()
plt.gca().set_title('Please click on the plot.')
def onclick(event):
plt.cla()
update()
global click_count
global prev, level, y_range
level = event.ydata
plt.axhline(level, lw=1, c='gray')
plt.gca().set_title('y = {}\nSelect a second point.'.format(event.ydata))
if click_count == 0:
click_count += 1
# store the event.ydata
prev = level
else:
y_range = pd.Series([prev, level])
plt.gca().set_title('Selected range =\n[{}, {}]'.format(y_range[0], y_range[1]))
click_count = 0
recolor(y_range)
plt.axhspan(prev, level, alpha=0.3, color='gray')
# do a t-test to check if selected range is similar to the 95%CI range of each mean
def recolor(y_range):
color = []
for item in err_range:
s, p = stats.ttest_ind(err_range[item], y_range)
color.append(((1-p), 0, p, 0.9))
update(color)
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick) | 2.859375 | 3 |
0000_book/wrssystem_cdtype.py | takuya-ki/wrs | 23 | 12769121 | import numpy as np
import modeling.collision_model as cm
import visualization.panda.world as wd
if __name__ == '__main__':
base = wd.World(cam_pos=np.array([.7, .05, .3]), lookat_pos=np.zeros(3))
# object
object_ref = cm.CollisionModel(initor="./objects/bunnysim.stl",
cdprimit_type="box",
cdmesh_type="triangles")
object_ref.set_rgba([.9, .75, .35, 1])
# object 1
object1 = object_ref.copy()
object1.set_pos(np.array([0, -.18, 0]))
# object 2
object2 = object_ref.copy()
object2.set_pos(np.array([0, -.09, 0]))
# object 3
object3 = object_ref.copy()
object3.change_cdprimitive_type(cdprimitive_type="surface_balls")
object3.set_pos(np.array([0, .0, 0]))
# object 4
object4 = object_ref.copy()
object4.set_pos(np.array([0, .09, 0]))
# object 5
object5 = object_ref.copy()
object5.change_cdmesh_type(cdmesh_type="convex_hull")
object5.set_pos(np.array([0, .18, 0]))
# object 1 show
object1.attach_to(base)
# object 2 show
object2.attach_to(base)
object2.show_cdprimit()
# object 3 show
object3.attach_to(base)
object3.show_cdprimit()
# object 4 show
object4.attach_to(base)
object4.show_cdmesh()
# object 5 show
object5.attach_to(base)
object5.show_cdmesh()
base.run()
| 2.328125 | 2 |
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltESPChi2MeasurementEstimator100_cfi.py | PKUfudawei/cmssw | 1 | 12769122 | import FWCore.ParameterSet.Config as cms
hltESPChi2MeasurementEstimator100 = cms.ESProducer("Chi2MeasurementEstimatorESProducer",
ComponentName = cms.string('hltESPChi2MeasurementEstimator100'),
MaxChi2 = cms.double(40.0),
MaxDisplacement = cms.double(0.5),
MaxSagitta = cms.double(2.0),
MinPtForHitRecoveryInGluedDet = cms.double(1e+12),
MinimalTolerance = cms.double(0.5),
appendToDataLabel = cms.string(''),
nSigma = cms.double(4.0)
)
| 1.257813 | 1 |
test/language/templates/python/StructTemplateInTemplateTest.py | dkBrazz/zserio | 86 | 12769123 | import unittest
import zserio
from testutils import getZserioApi
class StructTemplateInTemplateTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "templates.zs").struct_template_in_template
def testReadWrite(self):
structTemplateInTemplate = self.api.StructTemplateInTemplate(
self.api.Field_uint32(self.api.Compound_uint32(42)),
self.api.Field_string(self.api.Compound_string("string"))
)
writer = zserio.BitStreamWriter()
structTemplateInTemplate.write(writer)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
readStructTemplateInTemplate = self.api.StructTemplateInTemplate()
readStructTemplateInTemplate.read(reader)
self.assertEqual(structTemplateInTemplate, readStructTemplateInTemplate)
| 2.546875 | 3 |
imax/randaugment.py | 4rtemi5/imax | 16 | 12769124 | # Original Source:
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
#
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from functools import partial
import jax
from jax import random
import jax.numpy as jnp
from imax import color_transforms
from imax import transforms
DEBUG = False
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
DEFAULT_RANDAUGMENT_VALUES = {
# function_name -> probability
# ORDER NEEDS TO BE KEPT THE SAME AS IN level_to_arg
'AutoContrast': 1., # 0
'Equalize': 1., # 1
'Invert': 0., # 2
'Posterize': 1., # 3
'Solarize': 0., # 4
'SolarizeAdd': 0., # 5
'Color': 1., # 6
'Contrast': 1., # 7
'Brightness': 1., # 8
'Sharpness': 1., # 9
'Rotate': 1., # 10
'ShearX': 1., # 11
'ShearY': 1., # 12
'TranslateX': 1., # 13
'TranslateY': 1., # 14
'FlipX': 1., # 15
'FlipY': 1., # 16
'Cutout': 1., # 17
}
DEFAULT_OPS = jnp.array(list(range(len(DEFAULT_RANDAUGMENT_VALUES.keys()))))
DEFAULT_PROBS = jnp.array(list(DEFAULT_RANDAUGMENT_VALUES.values())) / \
sum(list(DEFAULT_RANDAUGMENT_VALUES.values()))
def level_to_arg(cutout_val, translate_val, negate, level, mask_value):
"""
Translates the level to args for various functions.
Args:
cutout_val: value for cutout size of cutout function
translate_val: value for
negate: negate level
level: input level
Returns:
"""
return tuple({
'AutoContrast': (),
'Equalize': (),
'Invert': (),
'Posterize': (5 - jnp.min(jnp.array(
[4, (level / _MAX_LEVEL * 4).astype('uint8')])),),
'Solarize': (((level / _MAX_LEVEL) * 256).astype('uint8'),),
'SolarizeAdd': (((level / _MAX_LEVEL) * 110).astype('uint8'),),
'Color': _enhance_level_to_arg(level),
'Contrast': _enhance_level_to_arg(level),
'Brightness': _enhance_level_to_arg(level),
'Sharpness': _enhance_level_to_arg(level),
'Rotate': (_rotate_level_to_arg(level, negate),),
'ShearX': (_shear_level_to_arg(level, negate), 0),
'ShearY': (0, _shear_level_to_arg(level, negate)),
'TranslateX': (_translate_level_to_arg(translate_val, negate)[0], 0.),
'TranslateY': (0., _translate_level_to_arg(translate_val, negate)[1]),
'FlipX': (True, False),
'FlipY': (False, True),
'Cutout': (cutout_val, mask_value),
}.values())
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return 1.0 # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return level
def _enhance_level_to_arg(level):
return [(level / _MAX_LEVEL) * 1.8 + 0.1]
def _rotate_level_to_arg(level, negate):
level = (level / _MAX_LEVEL) * jnp.pi
level = jax.lax.cond(
negate,
lambda l: -l,
lambda l: l,
level
)
return level
def _shear_level_to_arg(level, negate):
level = (level / _MAX_LEVEL)
# Flip level to negative with 50% chance.
level = jax.lax.cond(
negate,
lambda l: -l,
lambda l: l,
level
)
return level
def _translate_level_to_arg(translate_val, negate):
# Flip level to negative with 50% chance.
level = jax.lax.cond(
negate,
lambda t: (-t[0], -t[1]),
lambda t: t,
translate_val
)
return level
def _apply_ops(image, args, selected_op):
"""
An abomination of a function to apply a chosen operation to an image.
Args:
image:
args:
selected_op:
Returns:
"""
geometric_transform = jnp.identity(4)
image, geometric_transform = jax.lax.switch(selected_op, [
lambda op: (color_transforms.autocontrast(op[0], *op[1][0]),
geometric_transform), # 0
lambda op: (color_transforms.equalize(op[0], *op[1][1]),
geometric_transform), # 1
lambda op: (color_transforms.invert(op[0], *op[1][2]),
geometric_transform), # 2
lambda op: (color_transforms.posterize(op[0], *op[1][3]),
geometric_transform), # 3
lambda op: (color_transforms.solarize(op[0], *op[1][4]),
geometric_transform), # 4
lambda op: (color_transforms.solarize_add(op[0], *op[1][5]),
geometric_transform), # 5
lambda op: (color_transforms.color(op[0], *op[1][6]),
geometric_transform), # 6
lambda op: (color_transforms.contrast(op[0], *op[1][7]),
geometric_transform), # 7
lambda op: (color_transforms.brightness(op[0], *op[1][8]),
geometric_transform), # 8
lambda op: (color_transforms.sharpness(op[0], *op[1][9]),
geometric_transform), # 9
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.rotate(*op[1][10]))), # 10
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.shear(*op[1][11]))), # 11
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.shear(*op[1][12]))), # 12
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.translate(*op[1][13]))), # 13
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.translate(*op[1][14]))), # 14
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.flip(*op[1][15]))), # 15
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.flip(*op[1][16]))), # 16
lambda op: (color_transforms.cutout(op[0], *op[1][17]),
geometric_transform), # 17
], (image, args))
return image, geometric_transform
# @jax.jit
def _randaugment_inner_for_loop(_, in_args):
"""
Loop body for for randougment.
Args:
i: loop iteration
in_args: loop body arguments
Returns:
updated loop arguments
"""
(image, geometric_transforms, random_key, available_ops, op_probs,
magnitude, cutout_const, translate_const, join_transforms,
default_replace_value) = in_args
random_keys = random.split(random_key, num=8)
random_key = random_keys[0] # keep for next iteration
op_to_select = random.choice(random_keys[1], available_ops, p=op_probs)
mask_value = jnp.where(default_replace_value > 0,
jnp.ones([image.shape[-1]]) * default_replace_value,
random.randint(random_keys[2],
[image.shape[-1]],
minval=-1, maxval=256))
random_magnitude = random.uniform(random_keys[3], [], minval=0.,
maxval=magnitude)
cutout_mask = color_transforms.get_random_cutout_mask(
random_keys[4],
image.shape,
cutout_const)
translate_vals = (random.uniform(random_keys[5], [], minval=0.0,
maxval=1.0) * translate_const,
random.uniform(random_keys[6], [], minval=0.0,
maxval=1.0) * translate_const)
negate = random.randint(random_keys[7], [], minval=0,
maxval=2).astype('bool')
args = level_to_arg(cutout_mask, translate_vals, negate,
random_magnitude, mask_value)
if DEBUG:
print(op_to_select, args[op_to_select])
image, geometric_transform = _apply_ops(image, args, op_to_select)
image, geometric_transform = jax.lax.cond(
jnp.logical_or(join_transforms, jnp.all(
jnp.not_equal(geometric_transform, jnp.identity(4)))),
lambda op: (op[0], op[1]),
lambda op: (transforms.apply_transform(op[0],
op[1],
mask_value=mask_value),
jnp.identity(4)),
(image, geometric_transform)
)
geometric_transforms = jnp.matmul(geometric_transforms, geometric_transform)
return(image, geometric_transforms, random_key, available_ops, op_probs,
magnitude, cutout_const, translate_const, join_transforms,
default_replace_value)
def distort_image_with_randaugment(image,
num_layers,
magnitude,
random_key,
cutout_const=40,
translate_const=50.0,
default_replace_value=-1,
available_ops=DEFAULT_OPS,
op_probs=DEFAULT_PROBS,
join_transforms=False):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper.
Usually best values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
random_key: random key to do random stuff
join_transforms: reduce multiple transforms to one.
Much more efficient but simpler.
cutout_const: max cutout size int
translate_const: maximum translation amount int
default_replace_value: default replacement value for pixels outside
of the image
available_ops: available operations
op_probs: probabilities of operations
join_transforms: apply transformations immediately or join them
Returns:
The augmented version of `image`.
"""
geometric_transforms = jnp.identity(4)
for_i_args = (image, geometric_transforms, random_key, available_ops,
op_probs, magnitude, cutout_const, translate_const,
join_transforms, default_replace_value)
if DEBUG: # un-jitted
for i in range(num_layers):
for_i_args = _randaugment_inner_for_loop(i, for_i_args)
else: # jitted
for_i_args = jax.lax.fori_loop(0, num_layers,
_randaugment_inner_for_loop, for_i_args)
image, geometric_transforms = for_i_args[0], for_i_args[1]
if join_transforms:
replace_value = jnp.where(default_replace_value > 0,
jnp.ones([image.shape[-1]]) * default_replace_value,
random.randint(random_key,
[image.shape[-1]],
minval=0,
maxval=256))
image = transforms.apply_transform(image, geometric_transforms,
mask_value=replace_value)
return image
#
# if not DEBUG:
# distort_image_with_randaugment = jax.jit(distort_image_with_randaugment, static_argnames=('default_replace_value', ))
| 1.765625 | 2 |
example_config.py | cyprienruffino/CycleGAN-TensorFlow | 2 | 12769125 | import hashlib
from applications import cyclegan_disc, cyclegan_gen_9
from configs.abstract_config import AbstractConfig
from CycleGAN import CycleGANBase
class CustomConfig(AbstractConfig):
def __init__(self, name):
super().__init__(name)
# Edit from here
# Run metadata
self.gpus = 1 # Up to 2
self.name = name
self.seed = int(hashlib.sha1(name.encode("utf-8")).hexdigest(), 16) % (
10 ** 8)
# Dataset size
self.dataA_channels = 3
self.dataB_channels = 3
self.dataset_size = 1500 # Same size for the 2 datasets
self.resize_size = 500 # The images sizes are first standardized with a resizing
self.image_size = 200 # Then patches are randomly cropped at training time
# Training settings
# These are the standard CycleGAN parameters
self.cyc_factor = 10 # The Lambda hyperparameter, controls the importance of the reconstruction term
self.pool_size = 50 # Size of the generated image pool
self.initial_learning_rate = 0.0002
self.final_learning_rate = 0.000002 # From mid-training on, learning rate decays linearly
self.batch_size = 1
self.epochs = 200
# Networks setup
self.genA = cyclegan_gen_9.create_network
self.genA_args = {"channels_out": 3, "name": "GenA"}
self.genB = cyclegan_gen_9.create_network
self.genB_args = {"channels_out": 3, "name": "GenB"}
self.discA = cyclegan_disc.create_network
self.discA_args = {"channels": 3, "name": "DiscA"}
self.discB = cyclegan_disc.create_network
self.discB_args = {"channels": 3, "name": "DiscB"}
| 2.390625 | 2 |
core/polyaxon/operations/tuner.py | erexer/polyaxon | 0 | 12769126 | <filename>core/polyaxon/operations/tuner.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from polyaxon import types
from polyaxon.auxiliaries import get_default_tuner_container
from polyaxon.k8s.k8s_schemas import V1Container
from polyaxon.polyflow import (
V1IO,
V1Bayes,
V1Component,
V1Hyperband,
V1Hyperopt,
V1Matrix,
V1Operation,
V1Param,
V1Plugins,
V1Termination,
V1Tuner,
)
def get_tuner(
name: str,
container: V1Container,
matrix: V1Matrix,
configs: List[Dict],
metrics: List[float],
iteration: int,
) -> V1Operation:
return V1Operation(
params={
"configs": V1Param(value=configs),
"metrics": V1Param(value=metrics),
"matrix": V1Param(value=matrix),
"iteration": V1Param(value=iteration),
},
termination=V1Termination(max_retries=3),
component=V1Component(
name=name,
plugins=V1Plugins(
auth=True,
collect_logs=False,
collect_artifacts=False,
collect_resources=False,
sync_statuses=True,
),
inputs=[
V1IO(
name="configs", iotype=types.DICT, is_list=True, is_optional=False
),
V1IO(
name="metrics", iotype=types.FLOAT, is_list=True, is_optional=False
),
V1IO(
name="iteration", iotype=types.INT, is_list=True, is_optional=True
),
],
outputs=[
V1IO(
name="suggestions",
iotype=types.DICT,
is_list=True,
is_optional=False,
),
],
run=V1Tuner(container=container,),
),
)
def get_bo_tuner(
matrix: V1Bayes,
configs: List[Dict],
metrics: List[float],
iteration: int,
container: V1Container = None,
) -> V1Operation:
container = container or get_default_tuner_container(["polyaxon", "tuner", "bo"])
return get_tuner(
name="bayesian-tuner",
container=container,
matrix=matrix,
configs=configs,
metrics=metrics,
iteration=iteration,
)
def get_hyperband_tuner(
matrix: V1Hyperband,
configs: List[Dict],
metrics: List[float],
iteration: int,
container: V1Container = None,
) -> V1Operation:
container = container or get_default_tuner_container(
["polyaxon", "tuner", "hyperband"]
)
return get_tuner(
name="hyperband-tuner",
container=container,
matrix=matrix,
configs=configs,
metrics=metrics,
iteration=iteration,
)
def get_hyperopt_tuner(
matrix: V1Hyperopt,
configs: List[Dict],
metrics: List[float],
iteration: int,
container: V1Container = None,
) -> V1Operation:
container = container or get_default_tuner_container(
["polyaxon", "tuner", "hyperopt"]
)
return get_tuner(
name="hyperopt-tuner",
container=container,
matrix=matrix,
configs=configs,
metrics=metrics,
iteration=iteration,
)
| 1.976563 | 2 |
resippy/photogrammetry/dem/dem_factory.py | BeamIO-Inc/resippy | 11 | 12769127 | from __future__ import division
from resippy.image_objects.earth_overhead.geotiff.geotiff_image_factory import GeotiffImageFactory
from resippy.photogrammetry.dem.geotiff_dem import GeotiffDem
from resippy.photogrammetry.dem.constant_elevation_dem import ConstantElevationDem
class DemFactory:
@staticmethod
def from_gtiff_file(fname, # type: str
nodata_value=None, # type: float
interpolation_method='bilinear', # type: str
): # type (...) -> GeotiffDem
# type: (str) -> GeotiffDem
gtiff = GeotiffImageFactory.from_file(fname)
gtiff_dem = GeotiffDem()
gtiff_dem.set_geotiff_image(gtiff)
# attempt to get nodata value from the gtiff file itself
if nodata_value is None:
nodata_value = gtiff.get_metadata().get_nodata_val()
# TODO: optimization needed here, takes a very long time for large datasets
gtiff_dem.remove_nodata_values(nodata_value)
if interpolation_method == 'bilinear':
gtiff_dem.set_interpolation_to_bilinear()
elif interpolation_method == 'nearest':
gtiff_dem.set_interpolation_to_nearest()
else:
TypeError("interpolation method should either be 'bilinear' or 'nearest'")
return gtiff_dem
@staticmethod
def constant_elevation(
elevation=0 # type: float
): # type: (...) -> ConstantElevationDem
return ConstantElevationDem(elevation=elevation)
| 2.09375 | 2 |
pyspec/mockobject.py | jyotijaya/pyspec | 1 | 12769128 | <filename>pyspec/mockobject.py
# -*- coding: ascii -*-
"""PySpec mock objects
"""
__pyspec = 1
import pyspec
import copy
from util import create_method_repr
from pyspec.embedded import config
class MockResult(object):
def __init__(self, parent_method):
self.parent = parent_method
def result(self, result_value):
self.parent.result_value = result_value
if self.parent.is_print:
print " <= %s" % result_value
def __eq__(self, result_value):
self.result(result_value)
return True
def repeat(self, count):
self.parent.repeat(count)
return self
def with_any_parameter(self):
self.parent.with_any_parameter = True
return self
class MockMethod(object):
__slots__ = ("name", "is_recording", "is_print", "result_value",
"args", "kwargs", "parent", "with_any_parameter")
def __init__(self, parent, name, is_print=False):
self.name = name
self.is_recording = True
self.is_print = is_print
self.result_value = None
self.args = []
self.kwargs = {}
self.parent = parent
self.with_any_parameter = False
def call(self, name):
if name != self.name:
raise AssertionError("Mock: expected function name is %s(), but was %s()" % (self.name, name))
return self
def repeat(self, count):
for i in xrange(count-1):
self.parent._methods.append(self)
def __call__(self, *args, **kwargs):
if self.is_recording:
if self.is_print:
print "Recording: %s" % self.str_for_status(args, kwargs)
self.args = args
self.kwargs = kwargs
return MockResult(self)
else:
if self.is_print:
if self.result_value is not None:
result = " => %s" % self.result_value
else:
result = ""
print "Calling: %s%s" \
% (self.str_for_status(args, kwargs), result)
if not self.with_any_parameter:
if args != self.args:
raise AssertionError(
"Mock: expected args are %r, but was %r" \
% (self.args, args))
if kwargs != self.kwargs:
raise AssertionError(
"Mock: expected kwargs are %r, but was %r" \
% (self.kwargs, kwargs))
return self.result_value
def str_for_status(self, args, kwargs):
return "MockObject.%s" % create_method_repr(self.name, args, kwargs)
def __str__(self):
args = ", ".join((repr(arg) for arg in self.args))
kwargs = ", ".join(("%s=%s" % (key, repr(value)) \
for key, value in self.kwargs.iteritems()))
if args != "" and kwargs != "":
argstring = "%s, %s" % (args, kwargs)
elif args != "":
argstring = args
else:
argstring = kwargs
return " %s(%s) = %r" % (self.name, argstring, self.result_value)
def copy(self):
new_method = copy.copy(self)
new_method.is_recording = False
new_method.parent = None
return new_method
class MockObjectRecorder(object):
def __init__(self, is_print = False):
self._is_print = is_print
self._methods = []
def __getattribute__(self, name):
try:
attr = super(MockObjectRecorder, self).__getattribute__(name)
return attr
except AttributeError:
if name in ("__members__", "__methods__"):
raise AttributeError()
method = MockMethod(self, name, self._is_print)
self._methods.append(method)
return method
def _get_mock_object_(self):
mock_object = MockObject(self._is_print)
mock_object._methods = [method.copy() for method in self._methods]
return mock_object
class MockObject(object):
def __init__(self, is_print = False):
self._is_print = is_print
self._methods = []
self._current = 0
def __getattribute__(self, name):
try:
attr = super(MockObject, self).__getattribute__(name)
return attr
except AttributeError:
if name in ("__members__", "__methods__"):
raise AttributeError()
if len(self._methods) <= self._current:
raise AssertionError("Mock: unexpected method call '%s'" % name)
result = self._methods[self._current]
self._current += 1
return result.call(name)
def _verify_(self):
if len(self._methods) != self._current:
raise AssertionError("Mock: method %s() must be called."
% self._methods[self._current].name)
if config.runtime.report_out:
msg = ["MockObject should be call like this:"]
if self._is_print:
for method in self._methods:
msg.append(str(method))
config.runtime.report_out.write((None, "\n".join(msg)))
class MockFile(object):
def __init__(self, contents):
self._contents = contents
self._cursor = 0
def write(self, actual):
length = len(actual)
start = self._cursor
end = length + self._cursor
try:
expected = self._contents[start:end]
except IndexError:
raise AssertionError("FileMock: last write('%s') is unexpected" % actual)
if expected != actual:
raise AssertionError("FileMock: expected is '%s', but was '%s'" % (expected, actual))
self._cursor = end
def _verify_(self):
start = self._cursor
remain = len(self._contents) - start
if remain > 0:
str = self._contents[start:start+7]
if remain > 7:
str = str + "..."
raise AssertionError("FileMock: %d chars(%s) must be written." % (remain, str))
class MockSocket(object):
def __init__(self, recv=None, send=None):
self._send_messages = []
self._recv_messages = []
self._blocking = 1
self._timeout = None
if type(recv) == str:
self._add_recv_message_(recv)
elif type(recv) in (list, tuple):
self._add_recv_message_(*recv)
if type(send) == str:
self._add_send_message_(send)
elif type(send) in (list, tuple):
self._add_send_message_(*send)
def _add_recv_message_(self, *message):
self._recv_messages += list(message)
def _add_send_message_(self, *message):
self._send_messages += list(message)
def recv(self, bufsize, flag=None):
import socket, errno
try:
result = self._recv_messages[0]
if len(result) > bufsize:
self._recv_messages[0] = result[bufsize:]
result = result[:bufsize]
else:
del self._recv_messages[0]
return result
except IndexError:
if self._blocking == 1:
raise AssertionError("recv buffer underflow")
else:
raise socket.error((errno.ETIMEDOUT, "timeout"))
def send(self, string, flag=None):
try:
if self._send_messages[0] != string:
raise AssertionError('MockSocket.send(): expected is "%s", but was "%s"' % (self._send_messages[0], string))
del self._send_messages[0]
return len(string)
except IndexError:
raise AssertionError('MockSocket.send(): unexpected send "%s"' % string)
def accept(self):
return (self, None)
def setblocking(self, flag):
if flag == 0:
self.settimeout(0)
else:
self.settimeout(1)
def settimeout(self, value):
if value is None:
self._blocking = 1
self._timeout = None
else:
self._blocking = 0
self._timeout = float(value)
def gettimeout(self):
return self._timeout
def __getattribute__(self, name):
try:
attr = super(MockSocket, self).__getattribute__(name)
return attr
except AttributeError:
if name in ["accept", "bind", "close", "connect", "connect_ex",
"fileno", "getpeername", "getsockname", "getsockopt",
"listen", "makefile", "recvfrom", "sendall", "sendto",
"setsockopt", "shutdown"]:
return MockMethod(None, name)
raise AttributeError(name)
| 3.03125 | 3 |
socket_client.py | KhalilWong/AIO | 0 | 12769129 | import asyncio
import time
################################################################################
async def main():
reader, writer = await asyncio.open_connection(
'127.0.0.1', 8888
)
while not reader.at_eof():
data = await reader.readline()
print('[{}] Received: {}'.format(time.strftime('%X'), data))
################################################################################
if __name__ == '__main__':
asyncio.run(main())
| 3.078125 | 3 |
tests/test_optimizers/test_inf_nan.py | Wollala/Gradient-Free-Optimizers | 1 | 12769130 | <gh_stars>1-10
import pytest
import random
import numpy as np
from ._parametrize import optimizers
def objective_function_nan(para):
rand = random.randint(0, 1)
if rand == 0:
return 1
else:
return np.nan
def objective_function_m_inf(para):
rand = random.randint(0, 1)
if rand == 0:
return 1
else:
return -np.inf
def objective_function_inf(para):
rand = random.randint(0, 1)
if rand == 0:
return 1
else:
return np.inf
search_space = {"x1": np.arange(0, 20, 1)}
objective_para = (
"objective",
[
(objective_function_nan),
(objective_function_m_inf),
(objective_function_inf),
],
)
@pytest.mark.parametrize(*objective_para)
@pytest.mark.parametrize(*optimizers)
def test_inf_nan_0(Optimizer, objective):
objective_function = objective
initialize = {"random": 20}
opt = Optimizer(search_space, initialize=initialize)
opt.search(
objective_function,
n_iter=80,
verbosity={"print_results": False, "progress_bar": False},
)
@pytest.mark.parametrize(*objective_para)
@pytest.mark.parametrize(*optimizers)
def test_inf_nan_1(Optimizer, objective):
objective_function = objective
initialize = {"random": 20}
opt = Optimizer(search_space, initialize=initialize)
opt.search(
objective_function,
n_iter=50,
memory=False,
verbosity={"print_results": False, "progress_bar": False},
)
search_data = opt.search_data
print("\n search_data \n", search_data)
non_inf_mask = ~np.isinf(search_data["score"].values)
non_nan_mask = ~np.isnan(search_data["score"].values)
non_inf_nan = np.sum(non_inf_mask * non_nan_mask)
assert 10 < non_inf_nan < 40
| 2.109375 | 2 |
networks/mocogan.py | maua-maua-maua/nvGAN | 0 | 12769131 | <gh_stars>0
import functools
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import DictConfig, OmegaConf
from torch import Tensor
import dnnlib
from networks.stylegan2 import Discriminator as ImageDiscriminator
from torch_utils import persistence
# ----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(nn.Module):
"""
MoCoGAN discriminator, consisting on 2 parts: ImageDiscriminator and VideoDiscriminator
"""
def __init__(self, cfg: DictConfig, img_channels: int, img_resolution: int, *img_discr_args, **img_discr_kwargs):
super().__init__()
self.cfg = cfg
self.image_discr = ImageDiscriminator(
img_resolution=img_resolution,
img_channels=img_channels,
cfg=OmegaConf.create(
{
"num_frames_per_sample": 1,
"hyper_type": "no_hyper",
"dummy_c": False,
"dummy_synth_cfg": {},
"predict_dists_weight": 0.0,
"contr": {"resolutions": []},
"fmaps": 1.0 if img_resolution >= 512 else 0.5,
"mbstd_group_size": 4,
"time_enc_type": "diff",
"agg": {"type": "concat", "conca_res": 0},
}
),
*img_discr_args,
**img_discr_kwargs,
)
self.video_discr = MoCoGANVideoDiscriminator(
n_channels=img_channels,
n_output_neurons=1,
bn_use_gamma=True,
use_noise=True,
noise_sigma=0.1,
image_size=img_resolution,
)
self.video_discr.apply(weights_init)
def params_with_lr(self, lr: float) -> List[Dict]:
return [
{"params": self.image_discr.parameters()},
{"params": self.video_discr.parameters(), "lr": self.cfg.video_discr_lr_multiplier * lr},
]
def forward(self, img: Tensor, c: Tensor, t: Tensor, **img_discr_kwargs) -> Tuple[Tensor, "None"]:
"""
- img has shape [batch_size * num_frames_per_sample, c, h, w]
- c has shape [batch_size, c, h, w]
- t has shape [batch_size, num_frames_per_sample]
"""
batch_size, num_frames_per_sample = t.shape
image_logits = self.image_discr(img, c, t, **img_discr_kwargs)["image_logits"] # [batch_size * num_frames]
# Preparing input for the video discriminator
videos = img.view(batch_size, num_frames_per_sample, *img.shape[1:]) # [batch_size, t, c, h, w]
videos = videos.permute(0, 2, 1, 3, 4).contiguous() # [batch_size, c, t, h, w]
video_logits = self.video_discr(videos) # (num_subdiscrs, num_layers, [batch_size, 1, out_t, out_h, out_w])
# We return a tuple for backward compatibility
return {"image_logits": image_logits, "video_logits": video_logits.flatten(start_dim=1)}
# ----------------------------------------------------------------------------
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1 and hasattr(m, "weight"):
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm3d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type="instance"):
if norm_type == "batch":
norm_layer = functools.partial(nn.BatchNorm3d, affine=True)
elif norm_type == "instance":
norm_layer = functools.partial(nn.InstanceNorm3d, affine=False, track_running_stats=True)
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer
# ----------------------------------------------------------------------------
@persistence.persistent_class
class VideoDiscriminator(nn.Module):
def __init__(
self,
num_input_channels,
ndf=64,
n_layers=3,
n_frames_per_sample=16,
norm_layer=nn.InstanceNorm3d,
num_sub_discrs=2,
get_intermediate_feat=True,
):
super().__init__()
self.num_sub_discrs = num_sub_discrs
self.n_layers = n_layers
self.get_intermediate_feat = get_intermediate_feat
ndf_max = 64
for i in range(num_sub_discrs):
block = SubVideoDiscriminator(
num_input_channels=num_input_channels,
ndf=min(ndf_max, ndf * (2 ** (num_sub_discrs - 1 - i))),
n_layers=n_layers,
norm_layer=norm_layer,
get_intermediate_feat=get_intermediate_feat,
)
if get_intermediate_feat:
for j in range(n_layers + 2):
setattr(self, "scale" + str(i) + "_layer" + str(j), getattr(block, "model" + str(j)))
else:
setattr(self, "layer" + str(i), block.model)
stride = 2 if n_frames_per_sample > 16 else [1, 2, 2]
self.downsample = nn.AvgPool3d(3, stride=stride, padding=[1, 1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.get_intermediate_feat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, x):
result = []
x = x
for block_idx in range(self.num_sub_discrs):
if self.get_intermediate_feat:
model = [
getattr(self, "scale" + str(self.num_sub_discrs - 1 - block_idx) + "_layer" + str(j))
for j in range(self.n_layers + 2)
]
else:
model = getattr(self, "layer" + str(self.num_sub_discrs - 1 - block_idx))
result.append(self.singleD_forward(model, x))
if block_idx != (self.num_sub_discrs - 1):
x = self.downsample(x)
return result
# ----------------------------------------------------------------------------
@persistence.persistent_class
class SubVideoDiscriminator(nn.Module):
def __init__(
self, num_input_channels, ndf=64, n_layers=3, norm_layer=nn.InstanceNorm3d, get_intermediate_feat=True
):
super().__init__()
self.get_intermediate_feat = get_intermediate_feat
self.n_layers = n_layers
kernel_size = 4
padw = int(np.ceil((kernel_size - 1.0) / 2))
sequence = [
[
nn.Conv3d(num_input_channels, ndf, kernel_size=kernel_size, stride=2, padding=padw),
nn.LeakyReLU(0.2, True),
]
]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [
[
nn.Conv3d(nf_prev, nf, kernel_size=kernel_size, stride=2, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True),
]
]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [
[
nn.Conv3d(nf_prev, nf, kernel_size=kernel_size, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True),
]
]
sequence += [[nn.Conv3d(nf, 1, kernel_size=kernel_size, stride=1, padding=padw)]]
if get_intermediate_feat:
for n in range(len(sequence)):
setattr(self, "model" + str(n), nn.Sequential(*sequence[n]))
else:
self.model = nn.Sequential(*[s for ss in sequence for s in ss])
def forward(self, x):
if self.get_intermediate_feat:
res = [x]
for n in range(self.n_layers + 2):
model = getattr(self, "model" + str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(x)
class MoCoGANVideoDiscriminator(nn.Module):
def __init__(
self,
n_channels,
n_output_neurons=1,
bn_use_gamma=True,
use_noise=False,
noise_sigma=None,
ndf=64,
image_size: int = 64,
):
super(MoCoGANVideoDiscriminator, self).__init__()
self.n_channels = n_channels
self.n_output_neurons = n_output_neurons
self.use_noise = use_noise
self.bn_use_gamma = bn_use_gamma
layers = [
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf * 4, ndf * 8, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
]
if image_size == 256:
layers.extend(
[
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf * 8, ndf * 8, 3, stride=(1, 1, 1), padding=(1, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf * 8, ndf * 8, 3, stride=(1, 1, 1), padding=(1, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
]
)
layers.extend(
[
nn.Conv3d(ndf * 8, n_output_neurons, 4, 1, 0, bias=False),
]
)
self.main = nn.Sequential(*layers)
def forward(self, input):
return self.main(input).squeeze()
# ----------------------------------------------------------------------------
class Noise(nn.Module):
def __init__(self, use_noise, sigma=0.2):
super(Noise, self).__init__()
self.use_noise = use_noise
self.sigma = sigma
def forward(self, x: Tensor):
if self.use_noise:
return x + self.sigma * torch.randn_like(x)
return x
# ----------------------------------------------------------------------------
| 1.976563 | 2 |
Day2.2.py | m-berk/AdventOfCode2019 | 1 | 12769132 | Data_File = open("Day2_Data.txt")
Data = Data_File.read()
Data = Data.split(',')
for i in range(len(Data)):
Data[i]=int(Data[i])
Data_Backup = Data.copy()
for noun in range (100):
for verb in range(100):
Data = Data_Backup.copy()
Data[1] = noun
Data[2] = verb
Index_Number = 0
while(True):
if(Data[Index_Number] == 1):
Data[Data[Index_Number+3]] = Data[Data[Index_Number+1]] + Data[Data[Index_Number+2]]
Index_Number +=4
elif(Data[Index_Number] ==2):
Data[Data[Index_Number + 3]] = Data[Data[Index_Number + 1]] * Data[Data[Index_Number + 2]]
Index_Number += 4
elif(Data[Index_Number] == 99):
#print("Terminated successfully")
break
else:
#print("Something has gone wrong, read opcode:"+ str(Data[Index_Number]))
#print("Terminated")
break
if(Data[0] == 19690720):
print("noun :" + str(noun) )
print("verb :" + str(verb) )
exit()
| 3.25 | 3 |
ISBN.py | Jeet1243/Mathematizer | 0 | 12769133 | # https://www.facebook.com/roshan.philipines/posts/2769055946717382
# Subscribed by Roshaen
#Implementation of ISBN number
def isbn(n):
lst=[]
j=0
for i in range(len(n)):
lst.insert(j,n[i])
j+=1
i=0
sum=0
for i in range(len(lst)):
sum=sum+int(lst[i])*(10-i)
if(sum%11==0):
print("Valid ISBN number")
else:
print("Not a valid ISBN number")
inp=input("Enter isbn number: ")
isbn(inp)
| 3.65625 | 4 |
bin/sofa_record.py | sjeemb/sofa | 0 | 12769134 | #!/usr/bin/env python3
import numpy as np
import csv
import json
import sys
import argparse
import multiprocessing as mp
import glob
import os
from functools import partial
from sofa_print import *
import subprocess
from time import sleep, time
def sofa_record(command, logdir, cfg):
p_tcpdump = None
p_mpstat = None
p_vmstat = None
p_nvsmi = None
p_nvtopo = None
print_info('SOFA_COMMAND: %s' % command)
sample_freq = 99
if int(open("/proc/sys/kernel/kptr_restrict").read()) != 0:
print_error(
"/proc/kallsyms permission is restricted, please try the command below:")
print_error("sudo sysctl -w kernel.kptr_restrict=0")
quit()
if int(open("/proc/sys/kernel/perf_event_paranoid").read()) != -1:
print_error('PerfEvent is not avaiable, please try the command below:')
print_error('sudo sysctl -w kernel.perf_event_paranoid=-1')
quit()
if subprocess.call(['mkdir', '-p', logdir]):
quit()
subprocess.call('rm %s/perf.data > /dev/null 2> /dev/null' % logdir, shell=True )
subprocess.call('rm %s/sofa.pcap > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/gputrace*.nvvp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/gputrace.tmp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/*.csv > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/*.txt > /dev/null 2> /dev/null' % logdir, shell=True)
try:
print_info("Prolog of Recording...")
with open(os.devnull, 'w') as FNULL:
p_tcpdump = subprocess.Popen(["tcpdump",
'-i',
'any',
'-v',
'tcp',
'-w',
'%s/sofa.pcap' % logdir],
stderr=FNULL)
with open('%s/mpstat.txt' % logdir, 'w') as logfile:
p_mpstat = subprocess.Popen(
['mpstat', '-P', 'ALL', '1', '600'], stdout=logfile)
with open('%s/vmstat.txt' % logdir, 'w') as logfile:
p_vmstat = subprocess.Popen(['vmstat', '-w', '1', '600'], stdout=logfile)
if int(os.system('command -v nvprof')) == 0:
with open('%s/nvsmi.txt' % logdir, 'w') as logfile:
p_nvsmi = subprocess.Popen(['nvidia-smi', 'dmon', '-s', 'u'], stdout=logfile)
with open('%s/nvlink_topo.txt' % logdir, 'w') as logfile:
p_nvtopo = subprocess.Popen(['nvidia-smi', 'topo', '-m'], stdout=logfile)
with open('%s/sofa_time.txt' % logdir, 'w') as logfile:
logfile.write(str(int(time()))+'\n')
print_info("Recording...")
if cfg.profile_all_cpus == True:
perf_options = '-a'
else:
perf_options = ''
subprocess.call('cp /proc/kallsyms %s/' % (logdir), shell=True )
subprocess.call('chmod +w %s/kallsyms' % (logdir), shell=True )
if int(os.system('command -v nvprof')) == 0:
profile_command = 'nvprof --profile-child-processes -o %s/gputrace%%p.nvvp perf record -e cycles,bus-cycles -o %s/perf.data -F %s %s -- %s ' % (logdir, logdir, sample_freq, perf_options, command)
else:
print_warning('Profile without NVPROF')
profile_command = 'perf record -o %s/perf.data -e cycles,bus-cycles -F %s %s -- %s' % (logdir, sample_freq, perf_options, command)
print_info( profile_command)
subprocess.call(profile_command.split())
print_info("Epilog of Recording...")
if p_tcpdump != None:
p_tcpdump.terminate()
print_info("tried terminating tcpdump")
if p_vmstat != None:
p_vmstat.terminate()
print_info("tried terminating vmstat")
if p_mpstat != None:
p_mpstat.terminate()
print_info("tried terminating mpstat")
if p_nvtopo != None:
p_nvtopo.terminate()
print_info("tried terminating nvidia-smi topo")
if p_nvsmi != None:
p_nvsmi.terminate()
print_info("tried terminating nvidia-smi dmon")
#os.system('pkill tcpdump')
#os.system('pkill mpstat')
#os.system('pkill vmstat')
#os.system('pkill nvidia-smi')
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
if p_tcpdump != None:
p_tcpdump.kill()
print_info("tried killing tcpdump")
if p_vmstat != None:
p_vmstat.kill()
print_info("tried killing vmstat")
if p_mpstat != None:
p_mpstat.kill()
print_info("tried killing mpstat")
if p_nvtopo != None:
p_nvtopo.kill()
print_info("tried killing nvidia-smi topo")
if p_nvsmi != None:
p_nvsmi.kill()
print_info("tried killing nvidia-smi dmon")
raise
print_info("End of Recording")
| 2.046875 | 2 |
src/third_party/beaengine/tests/0fc4.py | CrackerCat/rp | 1 | 12769135 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : <EMAIL>
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# NP 0F C4 /r ib1
# PINSRW mm, r32/m16, imm8
Buffer = bytes.fromhex('0fc42022')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xfc4)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pinsrw')
assert_equal(myDisasm.repr(), 'pinsrw mm4, word ptr [rax], 22h')
assert_equal(myDisasm.infos.Instruction.Immediat, 0x22)
Buffer = bytes.fromhex('0fc4c022')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xfc4)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pinsrw')
assert_equal(myDisasm.repr(), 'pinsrw mm0, eax, 22h')
assert_equal(myDisasm.infos.Instruction.Immediat, 0x22)
# 66 0F C4 /r ib
# PINSRW xmm, r32/m16, imm8
Buffer = bytes.fromhex('660fc42022')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xfc4)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pinsrw')
assert_equal(myDisasm.repr(), 'pinsrw xmm4, word ptr [rax], 22h')
assert_equal(myDisasm.infos.Instruction.Immediat, 0x22)
# VEX.NDS.128.66.0F.W0 C4 /r ib
# VPINSRW xmm1, xmm2, r32/m16, imm8
myVEX = VEX('VEX.NDS.128.66.0F.W0')
Buffer = bytes.fromhex('{}c410f0'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xc4)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpinsrw')
assert_equal(myDisasm.repr(), 'vpinsrw xmm10, xmm15, word ptr [r8], F0h')
myVEX = VEX('VEX.NDS.128.66.0F.W0')
Buffer = bytes.fromhex('{}c4c0f0'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xc4)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpinsrw')
assert_equal(myDisasm.repr(), 'vpinsrw xmm8, xmm15, r8d, F0h')
# EVEX.NDS.128.66.0F.WIG C4 /r ib
# VPINSRW xmm1, xmm2, r32/m16, imm8
myEVEX = EVEX('EVEX.NDS.128.66.0F.WIG')
Buffer = bytes.fromhex('{}c416bb'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xc4)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpinsrw')
assert_equal(myDisasm.repr(), 'vpinsrw xmm26, xmm31, word ptr [r14], BBh')
| 2.109375 | 2 |
shopping-cart-with-discounts/discounts.py | DeepNinja07x/python | 3 | 12769136 | <reponame>DeepNinja07x/python<gh_stars>1-10
from abc import (ABC, abstractmethod)
from typing import (Dict, List, Optional)
from product import Product
class Discounts():
def __init__(self, discounts: List["BaseDiscount"], available_products: Dict[str, Product]) -> None:
self.discounts = discounts
self.available_products = available_products
discounts_count = len(self.discounts) - 1
for index, discount in enumerate(self.discounts):
if index < discounts_count:
discount.set_next(self.discounts[index+1])
def calculate_total_discount(self, products: Dict[str, int]) -> int:
if not self.discounts:
raise RuntimeError("Need to have setup at least one discount")
return self.discounts[0].execute(products, self.available_products)
class BaseDiscount(ABC):
def __init__(self) -> None:
self.next_discount = None # type: Optional["BaseDiscount"]
def set_next(self, discount: "BaseDiscount") -> None:
self.next_discount = discount
@abstractmethod
def calculate_discount(self, products: Dict[str, int], available_products: Dict[str, Product]) -> int:
pass
def execute(self, products: Dict[str, int], available_products: Dict[str, Product]) -> int:
discount = self.calculate_discount(products, available_products)
if self.next_discount:
return discount + self.next_discount.execute(products, available_products)
else:
return discount
# --- Available discount implementations: ---
class NoDiscount(BaseDiscount):
"""
Dummy discount for when not wanting to have any real discount active, or for testing, etc.
"""
def calculate_discount(self, products: Dict[str, int], available_products: Dict[str, Product]) -> int:
return 0
class BuyNGetOneFreeDiscount(BaseDiscount):
"""
Buy N units and one becomes free e.g. 2x1
"""
def __init__(self, product_code: str, amount: int) -> None:
super().__init__()
self.product_code = product_code
self.amount = amount
def calculate_discount(self, products: Dict[str, int], available_products: Dict[str, Product]) -> int:
discount = 0
if self.product_code not in products.keys():
return discount
quantity = products[self.product_code]
while quantity > 0 and quantity >= self.amount:
quantity -= self.amount
discount += available_products[self.product_code].price
return discount
class BulkPurchasePriceDiscount(BaseDiscount):
"""
Buy at least N units and have a reduced per unit price
"""
def __init__(self, product_code: str, amount: int, reduced_price: int) -> None:
super().__init__()
self.product_code = product_code
self.amount = amount
self.reduced_price = reduced_price
def calculate_discount(self, products: Dict[str, int], available_products: Dict[str, Product]) -> int:
discount = 0
if self.product_code not in products.keys():
return discount
quantity = products[self.product_code]
if quantity >= self.amount:
original_aggregated_price = available_products[self.product_code].price * quantity
discount = original_aggregated_price - self.reduced_price * quantity
return discount
| 3.390625 | 3 |
lib/ControlFile.py | cedadev/mistamover | 0 | 12769137 | # BSD Licence
# Copyright (c) 2012, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
from AbstractControlFile import *
class ControlFile(AbstractControlFile):
"""
Simple class to read/write control file.
Note read() and create() are in base class; encode() and decode()
are helpers for them.
Lines of control file are:
magic1
data_file_name
expected_size_in_bytes
expected_md5_checksum
basename_requested_for_receipt_file
magic2
"""
magic1 = "_start_stager_ctrl_data_"
magic2 = "_end_stager_ctrl_data_"
def encode(self, filename, size, cksum, rcptname=None):
if not rcptname:
rcptname = filename + ".rcpt"
return [filename, size, cksum, rcptname]
def decode(self, lines):
data_file_name, sizeStr, checksum, rcpt_file_name = lines
return [data_file_name, int(sizeStr), checksum, rcpt_file_name]
def getFileName(self):
return self.data[0]
def getFileSize(self):
return self.data[1]
def getFileChecksum(self):
return self.data[2]
def getRcptName(self):
return self.data[3]
if __name__ == '__main__':
import os
fname = "myfile.tmp"
os.system("rm -f %s" % fname)
a1 = ControlFile(fname)
a1.create("foo", 34, "a25902q5390", "foo.rcpt")
os.system("cat %s" % fname)
a2 = ControlFile(fname)
path, size, cksum, rcptfile = a2.read()
print "-- %s -- %d -- %s -- %s --" % (path, size, cksum, rcptfile)
print a2.getFileSize()
| 2.75 | 3 |
tests/test_managers/test_build_job.py | DXist/polyaxon-cli | 0 | 12769138 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from polyaxon_cli.managers.build_job import BuildJobManager
from polyaxon_cli.schemas import BuildJobConfig
class TestBuildJobManager(TestCase):
def test_default_props(self):
assert BuildJobManager.IS_GLOBAL is False
assert BuildJobManager.IS_POLYAXON_DIR is True
assert BuildJobManager.CONFIG_FILE_NAME == '.polyaxonbuild'
assert BuildJobManager.CONFIG == BuildJobConfig
| 1.773438 | 2 |
src/apps/core/models/publication_model.py | zhiyuli/HydroLearn | 0 | 12769139 | <reponame>zhiyuli/HydroLearn
from django.db import models
from src.apps.core.models.module_models import Module
class Publication(models.Model):
class Meta:
app_label = 'core'
verbose_name_plural = 'Publications'
# map to a user and a module
# user = models.ForeignKey(User, null=False, blank=False, related_name='shared_with')
# module = models.ForeignKey(Module, null=False, blank=False, related_name='shared_module')
# record the date the module was shared with the user
# date = models.DateTimeField(auto_now_add=True)
draft = models.ForeignKey(Module, null=False, blank=False, related_name='draft_copy')
published = models.ForeignKey(Module, null=True, related_name='published_copy')
class PublishableModelMixin(models.Model):
class Meta:
abstract = True
is_draft = models.BooleanField(default=True)
| 2.28125 | 2 |
pyecore/resources/xmi.py | bmjjr/pyecore | 0 | 12769140 | """
The xmi module introduces XMI resource and XMI parsing.
"""
from enum import unique, Enum
from defusedxml.lxml import _etree as etree
from .resource import Resource
from .. import ecore as Ecore
XSI = 'xsi'
XSI_URL = 'http://www.w3.org/2001/XMLSchema-instance'
XMI = 'xmi'
XMI_URL = 'http://www.omg.org/XMI'
@unique
class XMIOptions(Enum):
OPTION_USE_XMI_TYPE = 0
SERIALIZE_DEFAULT_VALUES = 1
class XMIResource(Resource):
def __init__(self, uri=None, use_uuid=False):
super().__init__(uri, use_uuid)
self._later = []
self.prefixes = {}
self.reverse_nsmap = {}
def load(self, options=None):
self.options = options or {}
tree = etree.parse(self.uri.create_instream())
xmlroot = tree.getroot()
self.prefixes.update(xmlroot.nsmap)
self.reverse_nsmap = {v: k for k, v in self.prefixes.items()}
self.xsitype = '{{{0}}}type'.format(self.prefixes.get(XSI))
self.xmiid = '{{{0}}}id'.format(self.prefixes.get(XMI))
self.schema_tag = '{{{0}}}schemaLocation'.format(
self.prefixes.get(XSI))
# Decode the XMI
if '{{{0}}}XMI'.format(self.prefixes.get(XMI)) == xmlroot.tag:
real_roots = xmlroot
else:
real_roots = [xmlroot]
def grouper(iterable):
args = [iter(iterable)] * 2
return zip(*args)
self.schema_locations = {}
schema_tag_list = xmlroot.attrib.get(self.schema_tag, '')
for prefix, path in grouper(schema_tag_list.split()):
if '#' not in path:
path = path + '#'
self.schema_locations[prefix] = Ecore.EProxy(path, self)
for root in real_roots:
modelroot = self._init_modelroot(root)
for child in root:
self._decode_eobject(child, modelroot)
if self.contents:
self._decode_ereferences()
self._clean_registers()
self.uri.close_stream()
def xsi_type_url(self):
if self.options.get(XMIOptions.OPTION_USE_XMI_TYPE, False):
return XMI_URL
return XSI_URL
@staticmethod
def extract_namespace(tag):
qname = etree.QName(tag)
return qname.namespace, qname.localname
def _type_attribute(self, node):
type_ = node.get(self.xsitype)
if type_ is None:
xmi_type_url = '{{{0}}}type'.format(self.prefixes.get(XMI))
type_ = node.get(xmi_type_url)
if type_ is not None:
self.xsitype = xmi_type_url
return type_
def _get_metaclass(self, nsURI, eclass_name):
try:
return self.get_metamodel(nsURI).getEClassifier(eclass_name)
except Exception as e:
proxy = self.schema_locations[nsURI]
try:
return proxy.getEClassifier(eclass_name)
except Exception:
raise e
def _init_modelroot(self, xmlroot):
nsURI, eclass_name = self.extract_namespace(xmlroot.tag)
eclass = self._get_metaclass(nsURI, eclass_name)
if not eclass:
raise TypeError('"{0}" EClass does not exists'.format(eclass_name))
modelroot = eclass()
modelroot._eresource = self
self.use_uuid = xmlroot.get(self.xmiid) is not None
self.contents.append(modelroot)
erefs = []
for key, value in xmlroot.attrib.items():
namespace, att_name = self.extract_namespace(key)
if key == self.xmiid:
modelroot._internal_id = value
self.uuid_dict[value] = modelroot
# Do stuff with this
# elif namespace:
# try:
#
# # metaclass = self.get_metamodel(namespace)
# pass
# except KeyError:
# pass
elif not namespace:
feature = self._find_feature(modelroot.eClass, key)
if not feature:
continue
if isinstance(feature, Ecore.EAttribute):
self._decode_eattribute_value(modelroot, feature, value)
else:
erefs.append((feature, value))
if erefs:
self._later.append((modelroot, erefs))
return modelroot
@staticmethod
def _decode_eattribute_value(eobject, eattribute, value, from_tag=False):
is_many = eattribute.many
if is_many and not from_tag:
values = value.split()
from_string = eattribute.eType.from_string
results = [from_string(x) for x in values]
eobject.__getattribute__(eattribute.name).extend(results)
elif is_many:
value = eattribute.eType.from_string(value)
eobject.__getattribute__(eattribute.name).append(value)
else:
val = eattribute.eType.from_string(value)
eobject.__setattr__(eattribute.name, val)
def _decode_eobject(self, current_node, parent_eobj):
eobject_info = self._decode_node(parent_eobj, current_node)
feat_container, eobject, eatts, erefs, from_tag = eobject_info
# deal with eattributes and ereferences
for eattribute, value in eatts:
self._decode_eattribute_value(eobject, eattribute, value, from_tag)
if erefs:
self._later.append((eobject, erefs))
if not feat_container:
return
# attach the new eobject to the parent one
if feat_container.many:
parent_eobj.__getattribute__(feat_container.name).append(eobject)
else:
parent_eobj.__setattr__(feat_container.name, eobject)
# iterate on children
for child in current_node:
self._decode_eobject(child, eobject)
def _is_none_node(self, node):
return '{{{}}}nil'.format(self.prefixes.get(XSI)) in node.attrib
def _decode_node(self, parent_eobj, node):
if node.tag == 'eGenericType': # Special case, TODO
return (None, None, [], [], False)
_, node_tag = self.extract_namespace(node.tag)
feature_container = self._find_feature(parent_eobj.eClass, node_tag)
if not feature_container:
raise ValueError('Feature "{0}" is unknown for {1}, line {2}'
.format(node_tag,
parent_eobj.eClass.name,
node.sourceline,))
if self._is_none_node(node):
parent_eobj.__setattr__(feature_container.name, None)
return (None, None, [], [], False)
if node.get('href'):
ref = node.get('href')
proxy = Ecore.EProxy(path=ref, resource=self)
return (feature_container, proxy, [], [], False)
if self._type_attribute(node):
prefix, _type = self._type_attribute(node).split(':')
if not prefix:
raise ValueError('Prefix {0} is not registered, line {1}'
.format(prefix, node.tag))
epackage = self.prefix2epackage(prefix)
etype = epackage.getEClassifier(_type)
if not etype:
raise ValueError('Type {0} is unknown in {1}, line{2}'
.format(_type, epackage, node.tag))
else:
etype = feature_container.eType
if isinstance(etype, Ecore.EProxy):
etype.force_resolve()
# we create the instance
if etype is Ecore.EClass or etype is Ecore.EClass.eClass:
name = node.get('name')
eobject = etype(name)
elif (etype is Ecore.EStringToStringMapEntry
or etype is Ecore.EStringToStringMapEntry.eClass) \
and feature_container is Ecore.EAnnotation.details:
annotation_key = node.get('key')
annotation_value = node.get('value')
parent_eobj.details[annotation_key] = annotation_value
if annotation_key == 'documentation':
container = parent_eobj.eContainer()
if hasattr(container, 'python_class'):
container = container.python_class
container.__doc__ = annotation_value
return (None, None, tuple(), tuple(), False)
elif isinstance(etype, Ecore.EDataType):
key = node.tag
value = node.text if node.text else ''
feature = self._decode_attribute(parent_eobj, key, value)
return (None, parent_eobj, ((feature, value),), tuple(), True)
else:
# idref = node.get('{{{}}}idref'.format(XMI_URL))
# if idref:
# return (None, parent_eobj, [],
# [(feature_container, idref)], True)
eobject = etype()
# we sort the node feature (no containments)
eatts = []
erefs = []
for key, value in node.attrib.items():
feature = self._decode_attribute(eobject, key, value)
if not feature:
continue # we skip the unknown features
if etype is Ecore.EClass and feature.name == 'name':
continue # we skip the name for metamodel import
if isinstance(feature, Ecore.EAttribute):
eatts.append((feature, value))
if feature.iD:
self.uuid_dict[value] = eobject
else:
erefs.append((feature, value))
return (feature_container, eobject, eatts, erefs, False)
def _decode_attribute(self, owner, key, value):
namespace, att_name = self.extract_namespace(key)
prefix = self.reverse_nsmap[namespace] if namespace else None
# This is a special case, we are working with uuids
if key == <KEY>:
owner._internal_id = value
self.uuid_dict[value] = owner
elif prefix in ('xsi', 'xmi') and att_name == 'type':
# type has already been handled
pass
# elif namespace:
# pass
elif not namespace:
if att_name == 'href':
return
feature = self._find_feature(owner.eClass, att_name)
if not feature:
raise ValueError('Feature {0} does not exists for type {1}'
.format(att_name, owner.eClass.name))
return feature
def _decode_ereferences(self):
opposite = []
for eobject, erefs in self._later:
for ref, value in erefs:
name = ref.name
if name == 'eOpposite':
opposite.append((eobject, ref, value))
continue
if ref.many:
values = [self.normalize(x) for x in value.split()]
else:
values = [value]
for value in values:
resolved_value = self._resolve_nonhref(value)
if not resolved_value:
raise ValueError('EObject for {0} is unknown'
.format(value))
if not hasattr(resolved_value, '_inverse_rels'):
resolved_value = resolved_value.eClass
if ref.many:
eobject.__getattribute__(name).append(resolved_value)
else:
eobject.__setattr__(name, resolved_value)
for eobject, ref, value in opposite:
resolved_value = self._resolve_nonhref(value)
if not resolved_value:
raise ValueError('EObject for {0} is unknown'.format(value))
eobject.__setattr__(ref.name, resolved_value)
def _resolve_nonhref(self, path):
uri, fragment = self._is_external(path)
if fragment in self._resolve_mem:
return self._resolve_mem[fragment]
if uri:
cleaned_uri = uri + '#' + fragment
if cleaned_uri in self._resolve_mem:
return self._resolve_mem[cleaned_uri]
proxy = Ecore.EProxy(path=cleaned_uri, resource=self)
self._resolve_mem[cleaned_uri] = proxy
return proxy
return self.resolve(fragment)
def _clean_registers(self):
self._later.clear()
self._feature_cache.clear()
self._resolve_mem.clear()
def register_nsmap(self, prefix, uri):
if uri in self.reverse_nsmap:
return
if prefix not in self.prefixes:
self.prefixes[prefix] = uri
self.reverse_nsmap[uri] = prefix
return
same_prefix = [x for x in self.prefixes.keys() if x.startswith(prefix)]
prefix = '{0}_{1}'.format(prefix, len(same_prefix))
self.prefixes[prefix] = uri
self.reverse_nsmap[uri] = prefix
def register_eobject_epackage(self, eobj):
epackage = eobj.eClass.ePackage
prefix = epackage.nsPrefix
nsURI = epackage.nsURI
self.register_nsmap(prefix, nsURI)
def save(self, output=None, options=None):
self.options = options or {}
output = self.open_out_stream(output)
self.prefixes.clear()
self.reverse_nsmap.clear()
serialize_default = \
self.options.get(XMIOptions.SERIALIZE_DEFAULT_VALUES,
False)
nsmap = {XMI: XMI_URL,
XSI: XSI_URL}
if len(self.contents) == 1:
root = self.contents[0]
self.register_eobject_epackage(root)
tmp_xmi_root = self._go_across(root, serialize_default)
else:
tag = etree.QName(XMI_URL, 'XMI')
tmp_xmi_root = etree.Element(tag)
for root in self.contents:
root_node = self._go_across(root, serialize_default)
tmp_xmi_root.append(root_node)
# update nsmap with prefixes register during the nodes creation
nsmap.update(self.prefixes)
xmi_root = etree.Element(tmp_xmi_root.tag, nsmap=nsmap)
xmi_root[:] = tmp_xmi_root[:]
xmi_root.attrib.update(tmp_xmi_root.attrib)
xmi_version = etree.QName(XMI_URL, 'version')
xmi_root.attrib[xmi_version] = '2.0'
tree = etree.ElementTree(xmi_root)
tree.write(output,
pretty_print=True,
xml_declaration=True,
encoding=tree.docinfo.encoding)
output.flush()
self.uri.close_stream()
def _add_explicit_type(self, node, obj):
xsi_type = etree.QName(self.xsi_type_url(), 'type')
uri = obj.eClass.ePackage.nsURI
if uri not in self.reverse_nsmap:
epackage = self.get_metamodel(uri)
self.register_nsmap(epackage.nsPrefix, uri)
prefix = self.reverse_nsmap[uri]
node.attrib[xsi_type] = '{0}:{1}'.format(prefix, obj.eClass.name)
def _build_none_node(self, feature_name):
sub = etree.Element(feature_name)
xsi_null = etree.QName(self.xsi_type_url(), 'nil')
sub.attrib[xsi_null] = 'true'
return sub
def _go_across(self, obj, serialize_default=False):
self.register_eobject_epackage(obj)
eclass = obj.eClass
if not obj.eContainmentFeature(): # obj is the root
epackage = eclass.ePackage
nsURI = epackage.nsURI
tag = etree.QName(nsURI, eclass.name) if nsURI else eclass.name
node = etree.Element(tag)
else:
node = etree.Element(obj.eContainmentFeature().name)
if obj.eContainmentFeature().eType != eclass:
self._add_explicit_type(node, obj)
if self.use_uuid:
self._assign_uuid(obj)
xmi_id = '{{{0}}}id'.format(XMI_URL)
node.attrib[xmi_id] = obj._internal_id
for feat in obj._isset:
if feat.derived or feat.transient:
continue
feat_name = feat.name
value = obj.__getattribute__(feat_name)
if hasattr(feat.eType, 'eType') and feat.eType.eType is dict:
for key, val in value.items():
entry = etree.Element(feat_name)
entry.attrib['key'] = key
entry.attrib['value'] = val
node.append(entry)
elif isinstance(feat, Ecore.EAttribute):
etype = feat.eType
if feat.many and value:
to_str = etype.to_string
has_special_char = False
result_list = []
for v in value:
string = to_str(v)
if any(x.isspace() for x in string):
has_special_char = True
result_list.append(string)
if has_special_char:
for v in result_list:
sub = etree.SubElement(node, feat_name)
sub.text = v
else:
node.attrib[feat_name] = ' '.join(result_list)
continue
default_value = feat.get_default_value()
if value != default_value or serialize_default:
if value is None:
node.append(self._build_none_node(feat_name))
else:
node.attrib[feat_name] = etype.to_string(value)
continue
elif isinstance(feat, Ecore.EReference) and \
feat.eOpposite and feat.eOpposite.containment:
continue
elif isinstance(feat, Ecore.EReference) \
and not feat.containment:
if not value:
if serialize_default and value is None:
node.append(self._build_none_node(feat_name))
continue
if feat.many:
results = [self._build_path_from(x) for x in value]
embedded = []
crossref = []
for i, result in enumerate(results):
frag, cref = result
if cref:
crossref.append((i, frag))
else:
embedded.append(frag)
if embedded:
result = ' '.join(embedded)
node.attrib[feat_name] = result
for i, ref in crossref:
sub = etree.SubElement(node, feat_name)
sub.attrib['href'] = ref
self._add_explicit_type(sub, value[i])
else:
frag, is_crossref = self._build_path_from(value)
if is_crossref:
sub = etree.SubElement(node, feat_name)
sub.attrib['href'] = frag
self._add_explicit_type(sub, value)
else:
node.attrib[feat_name] = frag
if isinstance(feat, Ecore.EReference) and feat.containment:
children = obj.__getattribute__(feat_name)
children = children if feat.many else [children]
for child in children:
node.append(self._go_across(child, serialize_default))
return node
| 2.265625 | 2 |
tools/queue_producer.py | saucetray/st2 | 2 | 12769141 | #!/usr/bin/env python
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A utility script which sends test messages to a queue.
"""
from __future__ import absolute_import
import argparse
import eventlet
from kombu import Exchange
from st2common import config
from st2common.transport.publishers import PoolPublisher
def main(exchange, routing_key, payload):
exchange = Exchange(exchange, type='topic')
publisher = PoolPublisher()
publisher.publish(payload=payload, exchange=exchange, routing_key=routing_key)
eventlet.sleep(0.5)
if __name__ == '__main__':
config.parse_args(args={})
parser = argparse.ArgumentParser(description='Queue producer')
parser.add_argument('--exchange', required=True,
help='Exchange to publish the message to')
parser.add_argument('--routing-key', required=True,
help='Routing key to use')
parser.add_argument('--payload', required=True,
help='Message payload')
args = parser.parse_args()
main(exchange=args.exchange, routing_key=args.routing_key,
payload=args.payload)
| 2.25 | 2 |
tests/linux_benchmarks/beam_integration_benchmark_test.py | robfrut135/PerfKitBenchmarker | 3 | 12769142 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_integration_benchmark."""
import unittest
from perfkitbenchmarker import beam_pipeline_options
class BeamArgsOptionsTestCase(unittest.TestCase):
def testNoFlagsPassed(self):
options_list = beam_pipeline_options.GenerateAllPipelineOptions(
None, None, [], [])
self.assertListEqual(options_list, [])
def testAllFlagsPassed(self):
options_list = beam_pipeline_options.GenerateAllPipelineOptions(
"--itargone=anarg,--itargtwo=anotherarg",
"[\"--project=testProj\","
"\"--gcpTempLocation=gs://test-bucket/staging\"]",
[{"postgresUsername": "postgres"}, {"postgresPassword": "<PASSWORD>"}],
[{"name": "aTestVal", "type": "TestValue", "value": "this_is_a_test"},
{"name": "testier", "type": "TestValue", "value": "another_test"}]
)
self.assertListEqual(options_list,
["\"--itargone=anarg\"",
"\"--itargtwo=anotherarg\"",
"\"--project=testProj\"",
"\"--gcpTempLocation=gs://test-bucket/staging\"",
"\"--aTestVal=this_is_a_test\"",
"\"--testier=another_test\"",
"\"--postgresUsername=postgres\"",
"\"--postgresPassword=<PASSWORD>\""])
def testItOptionsWithSpaces(self):
options_list = beam_pipeline_options.GenerateAllPipelineOptions(
None,
"[\"--project=testProj\", "
"\"--gcpTempLocation=gs://test-bucket/staging\"]",
[],
[])
self.assertListEqual(options_list,
["\"--project=testProj\"",
"\"--gcpTempLocation=gs://test-bucket/staging\""])
def testDynamicPipelineOpionsWithFormat(self):
dynamic_options = [
{
"name": "test_value_A",
"type": "TestValue",
"value": "a_value",
"format": "other representation of {{TestValue}}",
},
{
"name": "test_value_B",
"type": "TestValue",
"value": "b_value"
}
]
self.assertListEqual(
beam_pipeline_options.EvaluateDynamicPipelineOptions(dynamic_options),
[
("test_value_A", "other representation of a_value"),
("test_value_B", "b_value"),
]
)
def dynamicPipelineOptions(self):
beam_pipeline_options.EvaluateDynamicPipelineOptions()
if __name__ == '__main__':
unittest.main()
| 1.9375 | 2 |
Config/Defaults.py | jeroanan/Gyroscope | 1 | 12769143 | def get_defaults():
return {
"acceptable_size": 100,
"acceptable_time": 3,
"get_pages": True,
"get_assets": True,
"get_images": True,
"get_scripts": True,
"get_stylesheets": True,
"give_second_chance": True,
"logfile_location": "gyroscope.log",
"logfile_mode": "w",
"log_level": 30,
"log_too_big": True,
"log_too_slow": True,
"sites_file": "sites.json",
"asset_types": []
} | 1.570313 | 2 |
django_mobile_app_distribution/admin.py | chingmeng/django-mobile-app-distribution | 38 | 12769144 | <reponame>chingmeng/django-mobile-app-distribution
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.contrib import admin, messages
from django.contrib.auth.models import User
try:
from django.contrib.sites.models import get_current_site
except ImportError:
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
import django
if django.VERSION >= (1, 10):
from django.urls import reverse
else:
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django_mobile_app_distribution import settings as _settings
from django_mobile_app_distribution.models import IosApp, AndroidApp
from django_mobile_app_distribution.forms import iOSAppAdminForm, AndroidAppAdminForm
from django_mobile_app_distribution.models import UserInfo
logger = logging.getLogger(__name__)
class UserInfoAdmin(admin.ModelAdmin):
model = UserInfo
list_display = ['user', 'language']
list_editable = ['language']
search_fields = ['user__username']
class NotifiableModelAdmin(admin.ModelAdmin):
actions = ['notify_client']
search_fields = ['name', 'user__username', 'groups__name', 'comment']
def notify_client(self, request, queryset):
for app in queryset.all():
recipients = []
if app.user and app.user.email:
recipients.append(app.user.email)
if app.groups.count() > 0:
group_users = User.objects.filter(groups__in=app.groups.all())
for user in group_users:
if user.email and user.email not in recipients:
recipients.append(user.email)
if recipients:
recipient_count = len(recipients)
if request.user.email and request.user.email not in recipients:
recipients.append(request.user.email)
try:
# Try and send email in client's preferred language
# This doesn't make much sense for apps distributed to groups
# hence the catch all except clause
lang = app.user.userinfo.language
translation.activate(lang)
except Exception:
pass
domain = get_current_site(request).domain
index_url = reverse('django_mobile_app_distribution_index')
data = {
'email_link_color_hex': _settings.EMAIL_LINK_COLOR_HEX,
'app_name': app.name,
'app_version': app.version,
'os': app.operating_system,
'download_url': '/'.join(s.strip('/') for s in (domain, index_url))
}
email = EmailMultiAlternatives()
email.bcc = recipients
email.subject = _('Version %(app_version)s of %(app_name)s for %(os)s is available for download') % data
email.body = _(
'Version %(app_version)s of %(app_name)s for %(os)s is available for download.\n'
'Please visit %(download_url)s to install the app.'
) % data
email.attach_alternative(
render_to_string('django_mobile_app_distribution/email_notification.html', data),
'text/html'
)
# Reset to system language
translation.deactivate()
email.send(fail_silently=False)
messages.add_message(
request,
messages.INFO, ungettext_lazy(
'%(recipient_count)s user was notified of %(app_name)s %(app_version)s availability.',
'%(recipient_count)s users were notified of %(app_name)s %(app_version)s availability.',
recipient_count) % {
'recipient_count' : recipient_count,
'app_name' : app.name,
'app_version': app.version
},
fail_silently=True)
else:
messages.add_message(
request, messages.ERROR, _('Nobody was notified by email because nobody\'s email address is set.'),
fail_silently=True
)
notify_client.short_description = _('Notify clients of app availability')
def user_display_name(self, instance):
if instance.user:
return instance.user.username
else:
return ''
user_display_name.short_description = _('User')
user_display_name.admin_order_field = 'user'
def groups_display_name(self, instance):
if instance.groups.count() > 0:
return ", ".join(str(group) for group in instance.groups.all())
else:
return ''
groups_display_name.short_description = _('Groups')
class IosAppAdmin(NotifiableModelAdmin):
form = iOSAppAdminForm
list_display = ('name', 'user_display_name', 'groups_display_name', 'version', 'comment', 'updatedAt')
filter_horizontal = ['groups']
fieldsets = (
(_('App info'), {
'fields': ('user', 'groups', 'name', 'version', 'bundle_identifier', 'app_binary', 'comment')
}),
(_('Provide these deploy on iOS 9'), {
'fields': ('display_image', 'full_size_image')
}),
)
class AndroidAppAdmin(NotifiableModelAdmin):
form = AndroidAppAdminForm
list_display = ('name', 'user_display_name', 'groups_display_name', 'version', 'comment', 'updatedAt')
filter_horizontal = ['groups']
fieldsets = (
(_('App info'), {
'fields': ('user', 'groups', 'name', 'version', 'app_binary', 'comment')
}),
)
admin.site.register(IosApp, IosAppAdmin)
admin.site.register(AndroidApp, AndroidAppAdmin)
admin.site.register(UserInfo, UserInfoAdmin)
| 1.734375 | 2 |
parse.py | starswan/www-hertschess-com | 0 | 12769145 | from utils import commaSep
HEADER_TRANS = { 'TEAM':'Team Name', 'PLAYED':'P', 'WON':'W', 'DRAWN':'D', 'LOST':'L',
'GAMES':'For','POINTS':'Ps', 'POSITION':'Position', 'DIVISION':'Division'}
def celldata(cells, division, index):
if index == 1:
return cells[index].findAll('a')[0].contents[0] + division
elif index > 7:
return cells[index]
else:
return cells[index].contents[0].replace('½','.5')
def make_header_dict(table):
header_data = table.findAll('th')
header_dict = { 'TEAM':1, 'POSITION':8, 'DIVISION':9 }
index = 0
for header in header_data:
if len(header.contents) > 0:
header_dict[header.contents[0]] = index
index = index + 1
return header_dict
def print_headers(table):
header_dict = make_header_dict(table)
print commaSep([HEADER_TRANS[h] for h in header_dict])
def parse(division, table):
header_dict = make_header_dict(table)
index = 1
for row in table.findAll('tr')[1:6]:
cells = row.findAll('td')
cells.append(str(index))
cells.append(division)
data = [celldata(cells, division, header_dict[h]) for h in header_dict]
print commaSep(data)
index = index + 1
if __name__ == '__main__':
import sys, BeautifulSoup
division = sys.argv[1]
table = BeautifulSoup.BeautifulSoup(file(division + '.html').read())
print_headers(table)
parse(division, table)
| 3.015625 | 3 |
Keras-test/k-mlp-softmax.py | ruanyangry/Keras_NLP_- | 1 | 12769146 | <reponame>ruanyangry/Keras_NLP_-<gh_stars>1-10
# _*_ coding:gbk _*_
'''
Author: <NAME>
Email: <EMAIL>
Reference: https://keras-cn.readthedocs.io/en/latest/getting_started/sequential_model/
'''
import keras
from keras.models import Sequential
from keras.layers import Dense,Dropout,Activation
from keras.optimizers import SGD
# Generate dummy data
import numpy as np
x_train=np.random.random((1000,20))
y_train=keras.utils.to_categorical(np.random.randint(10,size=(1000,1)),\
num_classes=10)
x_test=np.random.random((100,20))
y_test=keras.utils.to_categorical(np.random.randint(10,size=(100,1)),\
num_classes=10)
model=Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64,activation="relu",input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64,activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10,activation="softmax"))
# Defined the optimizer parameters
sgd=SGD(lr=0.01,decay=1e-6,momentum=0.9,nesterov=True)
# compile the model
model.compile(loss="categorical_crossentropy",optimizer=sgd,\
metrics=["accuracy"])
# fit the model
model.fit(x_train,y_train,epochs=20,batch_size=128)
# get the results
score=model.evaluate(x_test,y_test,batch_size=128)
print("#----------------------------------#")
print(score)
print("#----------------------------------#")
print("\n")
| 3.359375 | 3 |
cli/src/pcluster/cli/commands/configure/command.py | maclema/aws-parallelcluster | 279 | 12769147 | # Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=import-outside-toplevel
from typing import List
import argparse
from argparse import Namespace
from pcluster.cli.commands.common import CliCommand
class ConfigureCommand(CliCommand):
"""Implement pcluster configure command."""
# CLI
name = "configure"
help = "Start the AWS ParallelCluster configuration."
description = help
def __init__(self, subparsers):
super().__init__(subparsers, name=self.name, help=self.help, description=self.description)
def register_command_args(self, parser: argparse.ArgumentParser) -> None: # noqa: D102
parser.add_argument("-c", "--config", help="Path to output the generated config file.", required=True)
def execute(self, args: Namespace, extra_args: List[str]) -> None: # noqa: D102 #pylint: disable=unused-argument
from pcluster.cli.commands.configure.easyconfig import configure
configure(args)
| 1.9375 | 2 |
src/data/change_only_ypred.py | onurerkin/prohack | 0 | 12769148 | import pandas as pd
import numpy as np
test_submission_guardians_may_24 = pd.read_csv('data/processed/test_submission_guardians_may_24.csv')
two_sub_combined_submission_may_23 = pd.read_csv('data/processed/two_sub_combined_submission_may_23.csv')
two_sub_combined_submission_may_23_y_pred_may_24 = two_sub_combined_submission_may_23
two_sub_combined_submission_may_23_y_pred_may_24['pred'] = test_submission_guardians_may_24['pred']
two_sub_combined_submission_may_23_y_pred_may_24.to_csv('data/processed/two_sub_combined_submission_may_23_y_pred_may_24.csv',index=False) | 2.46875 | 2 |
PE010.py | CaptainSora/Python-Project-Euler | 0 | 12769149 | <reponame>CaptainSora/Python-Project-Euler
from _prime_tools import sieve
def summation_of_primes(ceiling):
"""
Returns the sum of all primes below ceiling.
"""
return sum(sieve(ceiling))
def solve(vol=0):
return summation_of_primes(2 * 10**6)
| 3.53125 | 4 |
examples/joined.py | SilverOS/Silbot-Py | 4 | 12769150 | import silbot
from silbot.helper import InlineKBMarkup, inlineKBRow, inlineKBData
"""
This is an example of how to use new methods added with silbot 1.1
This is a simple bot that will check if a user is in a channel or is admin of that channel
"""
token = "<KEY>" # Put bot token here
channelid = -1001086416281 # Change the channel ID, the but must be admin of the channel
bot = silbot.botapi.BotApi(token, "HTML")
r, response = bot.getMe()
if not response.ok:
print("Error, wrong bot Token")
exit()
else:
print("Bot @" + r.username + " started")
def updateH(update: silbot.types.Update, bot: silbot.botapi.BotApi):
if update.message is not None:
message = update.message
chat = message.chat
if message.text == "/start":
kb = InlineKBMarkup(
inlineKBRow(
inlineKBData("Join Check", "/join"),
inlineKBData("Admin Check", "/admin")
)
)
bot.sendMessage(chat.id,
"<b>Silbot Py Example</b>\n\nClick the button to check if you are admin/member of the channel defined in the config",
kb)
elif update.callback_query is not None:
callback = update.callback_query
user = callback.user
if callback.data == "/join":
r = user.isMember(bot, channelid)
if r:
callback.answer(bot, "You joined the channel")
elif not r:
callback.answer(bot, "You have not joined the channel")
elif callback.data == "/admin":
r = user.isAdmin(bot, channelid)
if r:
callback.answer(bot, "You are an admin of the channel")
elif not r:
callback.answer(bot, "You are not an admin of the channel")
silbot.GetUpdatesLoop(bot, updateH)
| 2.953125 | 3 |